summaryrefslogtreecommitdiff
path: root/nacl/crypto_stream
diff options
context:
space:
mode:
authorirungentoo <irungentoo@gmail.com>2013-07-02 09:53:34 -0400
committerirungentoo <irungentoo@gmail.com>2013-07-02 09:53:34 -0400
commite2967396ac73cb7410787886cdaf072a184ffc49 (patch)
tree527a74d25a4a0705fc641994fd35bfab22662034 /nacl/crypto_stream
parent8928c817df345f29aa0b194743595aa11bd6a8ba (diff)
Added NaCl crypto library.
Diffstat (limited to 'nacl/crypto_stream')
-rw-r--r--nacl/crypto_stream/aes128ctr/checksum1
-rw-r--r--nacl/crypto_stream/aes128ctr/core2/afternm.s12308
-rw-r--r--nacl/crypto_stream/aes128ctr/core2/api.h3
-rw-r--r--nacl/crypto_stream/aes128ctr/core2/beforenm.s13694
-rw-r--r--nacl/crypto_stream/aes128ctr/core2/stream.c14
-rw-r--r--nacl/crypto_stream/aes128ctr/core2/xor.c15
-rw-r--r--nacl/crypto_stream/aes128ctr/core2/xor_afternm.s12407
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/afternm.c158
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/api.h3
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/beforenm.c59
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/common.c64
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/common.h788
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/consts.c14
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/consts.h28
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/int128.c128
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/int128.h47
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/stream.c28
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/types.h10
-rw-r--r--nacl/crypto_stream/aes128ctr/portable/xor_afternm.c180
-rw-r--r--nacl/crypto_stream/aes128ctr/used0
-rw-r--r--nacl/crypto_stream/measure.c73
-rw-r--r--nacl/crypto_stream/salsa20/amd64_xmm6/api.h2
-rw-r--r--nacl/crypto_stream/salsa20/amd64_xmm6/implementors1
-rw-r--r--nacl/crypto_stream/salsa20/amd64_xmm6/stream.s4823
-rw-r--r--nacl/crypto_stream/salsa20/checksum1
-rw-r--r--nacl/crypto_stream/salsa20/ref/api.h2
-rw-r--r--nacl/crypto_stream/salsa20/ref/implementors1
-rw-r--r--nacl/crypto_stream/salsa20/ref/stream.c49
-rw-r--r--nacl/crypto_stream/salsa20/ref/xor.c52
-rw-r--r--nacl/crypto_stream/salsa20/used0
-rw-r--r--nacl/crypto_stream/salsa20/x86_xmm5/api.h2
-rw-r--r--nacl/crypto_stream/salsa20/x86_xmm5/implementors1
-rw-r--r--nacl/crypto_stream/salsa20/x86_xmm5/stream.s5078
-rw-r--r--nacl/crypto_stream/salsa2012/amd64_xmm6/api.h2
-rw-r--r--nacl/crypto_stream/salsa2012/amd64_xmm6/implementors1
-rw-r--r--nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s4823
-rw-r--r--nacl/crypto_stream/salsa2012/checksum1
-rw-r--r--nacl/crypto_stream/salsa2012/ref/api.h2
-rw-r--r--nacl/crypto_stream/salsa2012/ref/implementors1
-rw-r--r--nacl/crypto_stream/salsa2012/ref/stream.c49
-rw-r--r--nacl/crypto_stream/salsa2012/ref/xor.c52
-rw-r--r--nacl/crypto_stream/salsa2012/used0
-rw-r--r--nacl/crypto_stream/salsa2012/x86_xmm5/api.h2
-rw-r--r--nacl/crypto_stream/salsa2012/x86_xmm5/implementors1
-rw-r--r--nacl/crypto_stream/salsa2012/x86_xmm5/stream.s5078
-rw-r--r--nacl/crypto_stream/salsa208/amd64_xmm6/api.h2
-rw-r--r--nacl/crypto_stream/salsa208/amd64_xmm6/implementors1
-rw-r--r--nacl/crypto_stream/salsa208/amd64_xmm6/stream.s4823
-rw-r--r--nacl/crypto_stream/salsa208/checksum1
-rw-r--r--nacl/crypto_stream/salsa208/ref/api.h2
-rw-r--r--nacl/crypto_stream/salsa208/ref/implementors1
-rw-r--r--nacl/crypto_stream/salsa208/ref/stream.c49
-rw-r--r--nacl/crypto_stream/salsa208/ref/xor.c52
-rw-r--r--nacl/crypto_stream/salsa208/used0
-rw-r--r--nacl/crypto_stream/salsa208/x86_xmm5/api.h2
-rw-r--r--nacl/crypto_stream/salsa208/x86_xmm5/implementors1
-rw-r--r--nacl/crypto_stream/salsa208/x86_xmm5/stream.s5078
-rw-r--r--nacl/crypto_stream/try.c124
-rw-r--r--nacl/crypto_stream/wrapper-stream.cpp12
-rw-r--r--nacl/crypto_stream/wrapper-xor.cpp17
-rw-r--r--nacl/crypto_stream/xsalsa20/checksum1
-rw-r--r--nacl/crypto_stream/xsalsa20/ref/api.h2
-rw-r--r--nacl/crypto_stream/xsalsa20/ref/implementors1
-rw-r--r--nacl/crypto_stream/xsalsa20/ref/stream.c22
-rw-r--r--nacl/crypto_stream/xsalsa20/ref/xor.c23
-rw-r--r--nacl/crypto_stream/xsalsa20/selected0
-rw-r--r--nacl/crypto_stream/xsalsa20/used0
67 files changed, 70260 insertions, 0 deletions
diff --git a/nacl/crypto_stream/aes128ctr/checksum b/nacl/crypto_stream/aes128ctr/checksum
new file mode 100644
index 00000000..92865436
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/checksum
@@ -0,0 +1 @@
6e9966897837aae181e93261ae88fdf0
diff --git a/nacl/crypto_stream/aes128ctr/core2/afternm.s b/nacl/crypto_stream/aes128ctr/core2/afternm.s
new file mode 100644
index 00000000..c1ba79ef
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/core2/afternm.s
@@ -0,0 +1,12308 @@
1# Author: Emilia Käsper and Peter Schwabe
2# Date: 2009-03-19
3# +2010.01.31: minor namespace modifications
4# Public domain
5
6.data
7.p2align 6
8
9RCON: .int 0x00000000, 0x00000000, 0x00000000, 0xffffffff
10ROTB: .int 0x0c000000, 0x00000000, 0x04000000, 0x08000000
11EXPB0: .int 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f
12CTRINC1: .int 0x00000001, 0x00000000, 0x00000000, 0x00000000
13CTRINC2: .int 0x00000002, 0x00000000, 0x00000000, 0x00000000
14CTRINC3: .int 0x00000003, 0x00000000, 0x00000000, 0x00000000
15CTRINC4: .int 0x00000004, 0x00000000, 0x00000000, 0x00000000
16CTRINC5: .int 0x00000005, 0x00000000, 0x00000000, 0x00000000
17CTRINC6: .int 0x00000006, 0x00000000, 0x00000000, 0x00000000
18CTRINC7: .int 0x00000007, 0x00000000, 0x00000000, 0x00000000
19RCTRINC1: .int 0x00000000, 0x00000000, 0x00000000, 0x00000001
20RCTRINC2: .int 0x00000000, 0x00000000, 0x00000000, 0x00000002
21RCTRINC3: .int 0x00000000, 0x00000000, 0x00000000, 0x00000003
22RCTRINC4: .int 0x00000000, 0x00000000, 0x00000000, 0x00000004
23RCTRINC5: .int 0x00000000, 0x00000000, 0x00000000, 0x00000005
24RCTRINC6: .int 0x00000000, 0x00000000, 0x00000000, 0x00000006
25RCTRINC7: .int 0x00000000, 0x00000000, 0x00000000, 0x00000007
26
27SWAP32: .int 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
28M0SWAP: .quad 0x0105090d0004080c , 0x03070b0f02060a0e
29
30BS0: .quad 0x5555555555555555, 0x5555555555555555
31BS1: .quad 0x3333333333333333, 0x3333333333333333
32BS2: .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
33ONE: .quad 0xffffffffffffffff, 0xffffffffffffffff
34M0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d
35SRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d
36SR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
37
38# qhasm: int64 outp
39
40# qhasm: int64 len
41
42# qhasm: int64 np
43
44# qhasm: int64 c
45
46# qhasm: input outp
47
48# qhasm: input len
49
50# qhasm: input np
51
52# qhasm: input c
53
54# qhasm: int64 lensav
55
56# qhasm: int6464 xmm0
57
58# qhasm: int6464 xmm1
59
60# qhasm: int6464 xmm2
61
62# qhasm: int6464 xmm3
63
64# qhasm: int6464 xmm4
65
66# qhasm: int6464 xmm5
67
68# qhasm: int6464 xmm6
69
70# qhasm: int6464 xmm7
71
72# qhasm: int6464 xmm8
73
74# qhasm: int6464 xmm9
75
76# qhasm: int6464 xmm10
77
78# qhasm: int6464 xmm11
79
80# qhasm: int6464 xmm12
81
82# qhasm: int6464 xmm13
83
84# qhasm: int6464 xmm14
85
86# qhasm: int6464 xmm15
87
88# qhasm: int6464 t
89
90# qhasm: stack1024 bl
91
92# qhasm: stack128 nonce_stack
93
94# qhasm: int64 blp
95
96# qhasm: int64 b
97
98# qhasm: int64 tmp
99
100# qhasm: enter crypto_stream_aes128ctr_core2_afternm
101.text
102.p2align 5
103.globl _crypto_stream_aes128ctr_core2_afternm
104.globl crypto_stream_aes128ctr_core2_afternm
105_crypto_stream_aes128ctr_core2_afternm:
106crypto_stream_aes128ctr_core2_afternm:
107mov %rsp,%r11
108and $31,%r11
109add $160,%r11
110sub %r11,%rsp
111
112# qhasm: xmm0 = *(int128 *) (np + 0)
113# asm 1: movdqa 0(<np=int64#3),>xmm0=int6464#1
114# asm 2: movdqa 0(<np=%rdx),>xmm0=%xmm0
115movdqa 0(%rdx),%xmm0
116
117# qhasm: nonce_stack = xmm0
118# asm 1: movdqa <xmm0=int6464#1,>nonce_stack=stack128#1
119# asm 2: movdqa <xmm0=%xmm0,>nonce_stack=0(%rsp)
120movdqa %xmm0,0(%rsp)
121
122# qhasm: np = &nonce_stack
123# asm 1: leaq <nonce_stack=stack128#1,>np=int64#3
124# asm 2: leaq <nonce_stack=0(%rsp),>np=%rdx
125leaq 0(%rsp),%rdx
126
127# qhasm: enc_block:
128._enc_block:
129
130# qhasm: xmm0 = *(int128 *) (np + 0)
131# asm 1: movdqa 0(<np=int64#3),>xmm0=int6464#1
132# asm 2: movdqa 0(<np=%rdx),>xmm0=%xmm0
133movdqa 0(%rdx),%xmm0
134
135# qhasm: xmm1 = xmm0
136# asm 1: movdqa <xmm0=int6464#1,>xmm1=int6464#2
137# asm 2: movdqa <xmm0=%xmm0,>xmm1=%xmm1
138movdqa %xmm0,%xmm1
139
140# qhasm: shuffle bytes of xmm1 by SWAP32
141# asm 1: pshufb SWAP32,<xmm1=int6464#2
142# asm 2: pshufb SWAP32,<xmm1=%xmm1
143pshufb SWAP32,%xmm1
144
145# qhasm: xmm2 = xmm1
146# asm 1: movdqa <xmm1=int6464#2,>xmm2=int6464#3
147# asm 2: movdqa <xmm1=%xmm1,>xmm2=%xmm2
148movdqa %xmm1,%xmm2
149
150# qhasm: xmm3 = xmm1
151# asm 1: movdqa <xmm1=int6464#2,>xmm3=int6464#4
152# asm 2: movdqa <xmm1=%xmm1,>xmm3=%xmm3
153movdqa %xmm1,%xmm3
154
155# qhasm: xmm4 = xmm1
156# asm 1: movdqa <xmm1=int6464#2,>xmm4=int6464#5
157# asm 2: movdqa <xmm1=%xmm1,>xmm4=%xmm4
158movdqa %xmm1,%xmm4
159
160# qhasm: xmm5 = xmm1
161# asm 1: movdqa <xmm1=int6464#2,>xmm5=int6464#6
162# asm 2: movdqa <xmm1=%xmm1,>xmm5=%xmm5
163movdqa %xmm1,%xmm5
164
165# qhasm: xmm6 = xmm1
166# asm 1: movdqa <xmm1=int6464#2,>xmm6=int6464#7
167# asm 2: movdqa <xmm1=%xmm1,>xmm6=%xmm6
168movdqa %xmm1,%xmm6
169
170# qhasm: xmm7 = xmm1
171# asm 1: movdqa <xmm1=int6464#2,>xmm7=int6464#8
172# asm 2: movdqa <xmm1=%xmm1,>xmm7=%xmm7
173movdqa %xmm1,%xmm7
174
175# qhasm: int32323232 xmm1 += RCTRINC1
176# asm 1: paddd RCTRINC1,<xmm1=int6464#2
177# asm 2: paddd RCTRINC1,<xmm1=%xmm1
178paddd RCTRINC1,%xmm1
179
180# qhasm: int32323232 xmm2 += RCTRINC2
181# asm 1: paddd RCTRINC2,<xmm2=int6464#3
182# asm 2: paddd RCTRINC2,<xmm2=%xmm2
183paddd RCTRINC2,%xmm2
184
185# qhasm: int32323232 xmm3 += RCTRINC3
186# asm 1: paddd RCTRINC3,<xmm3=int6464#4
187# asm 2: paddd RCTRINC3,<xmm3=%xmm3
188paddd RCTRINC3,%xmm3
189
190# qhasm: int32323232 xmm4 += RCTRINC4
191# asm 1: paddd RCTRINC4,<xmm4=int6464#5
192# asm 2: paddd RCTRINC4,<xmm4=%xmm4
193paddd RCTRINC4,%xmm4
194
195# qhasm: int32323232 xmm5 += RCTRINC5
196# asm 1: paddd RCTRINC5,<xmm5=int6464#6
197# asm 2: paddd RCTRINC5,<xmm5=%xmm5
198paddd RCTRINC5,%xmm5
199
200# qhasm: int32323232 xmm6 += RCTRINC6
201# asm 1: paddd RCTRINC6,<xmm6=int6464#7
202# asm 2: paddd RCTRINC6,<xmm6=%xmm6
203paddd RCTRINC6,%xmm6
204
205# qhasm: int32323232 xmm7 += RCTRINC7
206# asm 1: paddd RCTRINC7,<xmm7=int6464#8
207# asm 2: paddd RCTRINC7,<xmm7=%xmm7
208paddd RCTRINC7,%xmm7
209
210# qhasm: shuffle bytes of xmm0 by M0
211# asm 1: pshufb M0,<xmm0=int6464#1
212# asm 2: pshufb M0,<xmm0=%xmm0
213pshufb M0,%xmm0
214
215# qhasm: shuffle bytes of xmm1 by M0SWAP
216# asm 1: pshufb M0SWAP,<xmm1=int6464#2
217# asm 2: pshufb M0SWAP,<xmm1=%xmm1
218pshufb M0SWAP,%xmm1
219
220# qhasm: shuffle bytes of xmm2 by M0SWAP
221# asm 1: pshufb M0SWAP,<xmm2=int6464#3
222# asm 2: pshufb M0SWAP,<xmm2=%xmm2
223pshufb M0SWAP,%xmm2
224
225# qhasm: shuffle bytes of xmm3 by M0SWAP
226# asm 1: pshufb M0SWAP,<xmm3=int6464#4
227# asm 2: pshufb M0SWAP,<xmm3=%xmm3
228pshufb M0SWAP,%xmm3
229
230# qhasm: shuffle bytes of xmm4 by M0SWAP
231# asm 1: pshufb M0SWAP,<xmm4=int6464#5
232# asm 2: pshufb M0SWAP,<xmm4=%xmm4
233pshufb M0SWAP,%xmm4
234
235# qhasm: shuffle bytes of xmm5 by M0SWAP
236# asm 1: pshufb M0SWAP,<xmm5=int6464#6
237# asm 2: pshufb M0SWAP,<xmm5=%xmm5
238pshufb M0SWAP,%xmm5
239
240# qhasm: shuffle bytes of xmm6 by M0SWAP
241# asm 1: pshufb M0SWAP,<xmm6=int6464#7
242# asm 2: pshufb M0SWAP,<xmm6=%xmm6
243pshufb M0SWAP,%xmm6
244
245# qhasm: shuffle bytes of xmm7 by M0SWAP
246# asm 1: pshufb M0SWAP,<xmm7=int6464#8
247# asm 2: pshufb M0SWAP,<xmm7=%xmm7
248pshufb M0SWAP,%xmm7
249
250# qhasm: xmm8 = xmm6
251# asm 1: movdqa <xmm6=int6464#7,>xmm8=int6464#9
252# asm 2: movdqa <xmm6=%xmm6,>xmm8=%xmm8
253movdqa %xmm6,%xmm8
254
255# qhasm: uint6464 xmm8 >>= 1
256# asm 1: psrlq $1,<xmm8=int6464#9
257# asm 2: psrlq $1,<xmm8=%xmm8
258psrlq $1,%xmm8
259
260# qhasm: xmm8 ^= xmm7
261# asm 1: pxor <xmm7=int6464#8,<xmm8=int6464#9
262# asm 2: pxor <xmm7=%xmm7,<xmm8=%xmm8
263pxor %xmm7,%xmm8
264
265# qhasm: xmm8 &= BS0
266# asm 1: pand BS0,<xmm8=int6464#9
267# asm 2: pand BS0,<xmm8=%xmm8
268pand BS0,%xmm8
269
270# qhasm: xmm7 ^= xmm8
271# asm 1: pxor <xmm8=int6464#9,<xmm7=int6464#8
272# asm 2: pxor <xmm8=%xmm8,<xmm7=%xmm7
273pxor %xmm8,%xmm7
274
275# qhasm: uint6464 xmm8 <<= 1
276# asm 1: psllq $1,<xmm8=int6464#9
277# asm 2: psllq $1,<xmm8=%xmm8
278psllq $1,%xmm8
279
280# qhasm: xmm6 ^= xmm8
281# asm 1: pxor <xmm8=int6464#9,<xmm6=int6464#7
282# asm 2: pxor <xmm8=%xmm8,<xmm6=%xmm6
283pxor %xmm8,%xmm6
284
285# qhasm: xmm8 = xmm4
286# asm 1: movdqa <xmm4=int6464#5,>xmm8=int6464#9
287# asm 2: movdqa <xmm4=%xmm4,>xmm8=%xmm8
288movdqa %xmm4,%xmm8
289
290# qhasm: uint6464 xmm8 >>= 1
291# asm 1: psrlq $1,<xmm8=int6464#9
292# asm 2: psrlq $1,<xmm8=%xmm8
293psrlq $1,%xmm8
294
295# qhasm: xmm8 ^= xmm5
296# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
297# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
298pxor %xmm5,%xmm8
299
300# qhasm: xmm8 &= BS0
301# asm 1: pand BS0,<xmm8=int6464#9
302# asm 2: pand BS0,<xmm8=%xmm8
303pand BS0,%xmm8
304
305# qhasm: xmm5 ^= xmm8
306# asm 1: pxor <xmm8=int6464#9,<xmm5=int6464#6
307# asm 2: pxor <xmm8=%xmm8,<xmm5=%xmm5
308pxor %xmm8,%xmm5
309
310# qhasm: uint6464 xmm8 <<= 1
311# asm 1: psllq $1,<xmm8=int6464#9
312# asm 2: psllq $1,<xmm8=%xmm8
313psllq $1,%xmm8
314
315# qhasm: xmm4 ^= xmm8
316# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
317# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
318pxor %xmm8,%xmm4
319
320# qhasm: xmm8 = xmm2
321# asm 1: movdqa <xmm2=int6464#3,>xmm8=int6464#9
322# asm 2: movdqa <xmm2=%xmm2,>xmm8=%xmm8
323movdqa %xmm2,%xmm8
324
325# qhasm: uint6464 xmm8 >>= 1
326# asm 1: psrlq $1,<xmm8=int6464#9
327# asm 2: psrlq $1,<xmm8=%xmm8
328psrlq $1,%xmm8
329
330# qhasm: xmm8 ^= xmm3
331# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#9
332# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm8
333pxor %xmm3,%xmm8
334
335# qhasm: xmm8 &= BS0
336# asm 1: pand BS0,<xmm8=int6464#9
337# asm 2: pand BS0,<xmm8=%xmm8
338pand BS0,%xmm8
339
340# qhasm: xmm3 ^= xmm8
341# asm 1: pxor <xmm8=int6464#9,<xmm3=int6464#4
342# asm 2: pxor <xmm8=%xmm8,<xmm3=%xmm3
343pxor %xmm8,%xmm3
344
345# qhasm: uint6464 xmm8 <<= 1
346# asm 1: psllq $1,<xmm8=int6464#9
347# asm 2: psllq $1,<xmm8=%xmm8
348psllq $1,%xmm8
349
350# qhasm: xmm2 ^= xmm8
351# asm 1: pxor <xmm8=int6464#9,<xmm2=int6464#3
352# asm 2: pxor <xmm8=%xmm8,<xmm2=%xmm2
353pxor %xmm8,%xmm2
354
355# qhasm: xmm8 = xmm0
356# asm 1: movdqa <xmm0=int6464#1,>xmm8=int6464#9
357# asm 2: movdqa <xmm0=%xmm0,>xmm8=%xmm8
358movdqa %xmm0,%xmm8
359
360# qhasm: uint6464 xmm8 >>= 1
361# asm 1: psrlq $1,<xmm8=int6464#9
362# asm 2: psrlq $1,<xmm8=%xmm8
363psrlq $1,%xmm8
364
365# qhasm: xmm8 ^= xmm1
366# asm 1: pxor <xmm1=int6464#2,<xmm8=int6464#9
367# asm 2: pxor <xmm1=%xmm1,<xmm8=%xmm8
368pxor %xmm1,%xmm8
369
370# qhasm: xmm8 &= BS0
371# asm 1: pand BS0,<xmm8=int6464#9
372# asm 2: pand BS0,<xmm8=%xmm8
373pand BS0,%xmm8
374
375# qhasm: xmm1 ^= xmm8
376# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
377# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
378pxor %xmm8,%xmm1
379
380# qhasm: uint6464 xmm8 <<= 1
381# asm 1: psllq $1,<xmm8=int6464#9
382# asm 2: psllq $1,<xmm8=%xmm8
383psllq $1,%xmm8
384
385# qhasm: xmm0 ^= xmm8
386# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
387# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
388pxor %xmm8,%xmm0
389
390# qhasm: xmm8 = xmm5
391# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#9
392# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm8
393movdqa %xmm5,%xmm8
394
395# qhasm: uint6464 xmm8 >>= 2
396# asm 1: psrlq $2,<xmm8=int6464#9
397# asm 2: psrlq $2,<xmm8=%xmm8
398psrlq $2,%xmm8
399
400# qhasm: xmm8 ^= xmm7
401# asm 1: pxor <xmm7=int6464#8,<xmm8=int6464#9
402# asm 2: pxor <xmm7=%xmm7,<xmm8=%xmm8
403pxor %xmm7,%xmm8
404
405# qhasm: xmm8 &= BS1
406# asm 1: pand BS1,<xmm8=int6464#9
407# asm 2: pand BS1,<xmm8=%xmm8
408pand BS1,%xmm8
409
410# qhasm: xmm7 ^= xmm8
411# asm 1: pxor <xmm8=int6464#9,<xmm7=int6464#8
412# asm 2: pxor <xmm8=%xmm8,<xmm7=%xmm7
413pxor %xmm8,%xmm7
414
415# qhasm: uint6464 xmm8 <<= 2
416# asm 1: psllq $2,<xmm8=int6464#9
417# asm 2: psllq $2,<xmm8=%xmm8
418psllq $2,%xmm8
419
420# qhasm: xmm5 ^= xmm8
421# asm 1: pxor <xmm8=int6464#9,<xmm5=int6464#6
422# asm 2: pxor <xmm8=%xmm8,<xmm5=%xmm5
423pxor %xmm8,%xmm5
424
425# qhasm: xmm8 = xmm4
426# asm 1: movdqa <xmm4=int6464#5,>xmm8=int6464#9
427# asm 2: movdqa <xmm4=%xmm4,>xmm8=%xmm8
428movdqa %xmm4,%xmm8
429
430# qhasm: uint6464 xmm8 >>= 2
431# asm 1: psrlq $2,<xmm8=int6464#9
432# asm 2: psrlq $2,<xmm8=%xmm8
433psrlq $2,%xmm8
434
435# qhasm: xmm8 ^= xmm6
436# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#9
437# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm8
438pxor %xmm6,%xmm8
439
440# qhasm: xmm8 &= BS1
441# asm 1: pand BS1,<xmm8=int6464#9
442# asm 2: pand BS1,<xmm8=%xmm8
443pand BS1,%xmm8
444
445# qhasm: xmm6 ^= xmm8
446# asm 1: pxor <xmm8=int6464#9,<xmm6=int6464#7
447# asm 2: pxor <xmm8=%xmm8,<xmm6=%xmm6
448pxor %xmm8,%xmm6
449
450# qhasm: uint6464 xmm8 <<= 2
451# asm 1: psllq $2,<xmm8=int6464#9
452# asm 2: psllq $2,<xmm8=%xmm8
453psllq $2,%xmm8
454
455# qhasm: xmm4 ^= xmm8
456# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
457# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
458pxor %xmm8,%xmm4
459
460# qhasm: xmm8 = xmm1
461# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#9
462# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm8
463movdqa %xmm1,%xmm8
464
465# qhasm: uint6464 xmm8 >>= 2
466# asm 1: psrlq $2,<xmm8=int6464#9
467# asm 2: psrlq $2,<xmm8=%xmm8
468psrlq $2,%xmm8
469
470# qhasm: xmm8 ^= xmm3
471# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#9
472# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm8
473pxor %xmm3,%xmm8
474
475# qhasm: xmm8 &= BS1
476# asm 1: pand BS1,<xmm8=int6464#9
477# asm 2: pand BS1,<xmm8=%xmm8
478pand BS1,%xmm8
479
480# qhasm: xmm3 ^= xmm8
481# asm 1: pxor <xmm8=int6464#9,<xmm3=int6464#4
482# asm 2: pxor <xmm8=%xmm8,<xmm3=%xmm3
483pxor %xmm8,%xmm3
484
485# qhasm: uint6464 xmm8 <<= 2
486# asm 1: psllq $2,<xmm8=int6464#9
487# asm 2: psllq $2,<xmm8=%xmm8
488psllq $2,%xmm8
489
490# qhasm: xmm1 ^= xmm8
491# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
492# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
493pxor %xmm8,%xmm1
494
495# qhasm: xmm8 = xmm0
496# asm 1: movdqa <xmm0=int6464#1,>xmm8=int6464#9
497# asm 2: movdqa <xmm0=%xmm0,>xmm8=%xmm8
498movdqa %xmm0,%xmm8
499
500# qhasm: uint6464 xmm8 >>= 2
501# asm 1: psrlq $2,<xmm8=int6464#9
502# asm 2: psrlq $2,<xmm8=%xmm8
503psrlq $2,%xmm8
504
505# qhasm: xmm8 ^= xmm2
506# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#9
507# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm8
508pxor %xmm2,%xmm8
509
510# qhasm: xmm8 &= BS1
511# asm 1: pand BS1,<xmm8=int6464#9
512# asm 2: pand BS1,<xmm8=%xmm8
513pand BS1,%xmm8
514
515# qhasm: xmm2 ^= xmm8
516# asm 1: pxor <xmm8=int6464#9,<xmm2=int6464#3
517# asm 2: pxor <xmm8=%xmm8,<xmm2=%xmm2
518pxor %xmm8,%xmm2
519
520# qhasm: uint6464 xmm8 <<= 2
521# asm 1: psllq $2,<xmm8=int6464#9
522# asm 2: psllq $2,<xmm8=%xmm8
523psllq $2,%xmm8
524
525# qhasm: xmm0 ^= xmm8
526# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
527# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
528pxor %xmm8,%xmm0
529
530# qhasm: xmm8 = xmm3
531# asm 1: movdqa <xmm3=int6464#4,>xmm8=int6464#9
532# asm 2: movdqa <xmm3=%xmm3,>xmm8=%xmm8
533movdqa %xmm3,%xmm8
534
535# qhasm: uint6464 xmm8 >>= 4
536# asm 1: psrlq $4,<xmm8=int6464#9
537# asm 2: psrlq $4,<xmm8=%xmm8
538psrlq $4,%xmm8
539
540# qhasm: xmm8 ^= xmm7
541# asm 1: pxor <xmm7=int6464#8,<xmm8=int6464#9
542# asm 2: pxor <xmm7=%xmm7,<xmm8=%xmm8
543pxor %xmm7,%xmm8
544
545# qhasm: xmm8 &= BS2
546# asm 1: pand BS2,<xmm8=int6464#9
547# asm 2: pand BS2,<xmm8=%xmm8
548pand BS2,%xmm8
549
550# qhasm: xmm7 ^= xmm8
551# asm 1: pxor <xmm8=int6464#9,<xmm7=int6464#8
552# asm 2: pxor <xmm8=%xmm8,<xmm7=%xmm7
553pxor %xmm8,%xmm7
554
555# qhasm: uint6464 xmm8 <<= 4
556# asm 1: psllq $4,<xmm8=int6464#9
557# asm 2: psllq $4,<xmm8=%xmm8
558psllq $4,%xmm8
559
560# qhasm: xmm3 ^= xmm8
561# asm 1: pxor <xmm8=int6464#9,<xmm3=int6464#4
562# asm 2: pxor <xmm8=%xmm8,<xmm3=%xmm3
563pxor %xmm8,%xmm3
564
565# qhasm: xmm8 = xmm2
566# asm 1: movdqa <xmm2=int6464#3,>xmm8=int6464#9
567# asm 2: movdqa <xmm2=%xmm2,>xmm8=%xmm8
568movdqa %xmm2,%xmm8
569
570# qhasm: uint6464 xmm8 >>= 4
571# asm 1: psrlq $4,<xmm8=int6464#9
572# asm 2: psrlq $4,<xmm8=%xmm8
573psrlq $4,%xmm8
574
575# qhasm: xmm8 ^= xmm6
576# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#9
577# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm8
578pxor %xmm6,%xmm8
579
580# qhasm: xmm8 &= BS2
581# asm 1: pand BS2,<xmm8=int6464#9
582# asm 2: pand BS2,<xmm8=%xmm8
583pand BS2,%xmm8
584
585# qhasm: xmm6 ^= xmm8
586# asm 1: pxor <xmm8=int6464#9,<xmm6=int6464#7
587# asm 2: pxor <xmm8=%xmm8,<xmm6=%xmm6
588pxor %xmm8,%xmm6
589
590# qhasm: uint6464 xmm8 <<= 4
591# asm 1: psllq $4,<xmm8=int6464#9
592# asm 2: psllq $4,<xmm8=%xmm8
593psllq $4,%xmm8
594
595# qhasm: xmm2 ^= xmm8
596# asm 1: pxor <xmm8=int6464#9,<xmm2=int6464#3
597# asm 2: pxor <xmm8=%xmm8,<xmm2=%xmm2
598pxor %xmm8,%xmm2
599
600# qhasm: xmm8 = xmm1
601# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#9
602# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm8
603movdqa %xmm1,%xmm8
604
605# qhasm: uint6464 xmm8 >>= 4
606# asm 1: psrlq $4,<xmm8=int6464#9
607# asm 2: psrlq $4,<xmm8=%xmm8
608psrlq $4,%xmm8
609
610# qhasm: xmm8 ^= xmm5
611# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
612# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
613pxor %xmm5,%xmm8
614
615# qhasm: xmm8 &= BS2
616# asm 1: pand BS2,<xmm8=int6464#9
617# asm 2: pand BS2,<xmm8=%xmm8
618pand BS2,%xmm8
619
620# qhasm: xmm5 ^= xmm8
621# asm 1: pxor <xmm8=int6464#9,<xmm5=int6464#6
622# asm 2: pxor <xmm8=%xmm8,<xmm5=%xmm5
623pxor %xmm8,%xmm5
624
625# qhasm: uint6464 xmm8 <<= 4
626# asm 1: psllq $4,<xmm8=int6464#9
627# asm 2: psllq $4,<xmm8=%xmm8
628psllq $4,%xmm8
629
630# qhasm: xmm1 ^= xmm8
631# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
632# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
633pxor %xmm8,%xmm1
634
635# qhasm: xmm8 = xmm0
636# asm 1: movdqa <xmm0=int6464#1,>xmm8=int6464#9
637# asm 2: movdqa <xmm0=%xmm0,>xmm8=%xmm8
638movdqa %xmm0,%xmm8
639
640# qhasm: uint6464 xmm8 >>= 4
641# asm 1: psrlq $4,<xmm8=int6464#9
642# asm 2: psrlq $4,<xmm8=%xmm8
643psrlq $4,%xmm8
644
645# qhasm: xmm8 ^= xmm4
646# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#9
647# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm8
648pxor %xmm4,%xmm8
649
650# qhasm: xmm8 &= BS2
651# asm 1: pand BS2,<xmm8=int6464#9
652# asm 2: pand BS2,<xmm8=%xmm8
653pand BS2,%xmm8
654
655# qhasm: xmm4 ^= xmm8
656# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
657# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
658pxor %xmm8,%xmm4
659
660# qhasm: uint6464 xmm8 <<= 4
661# asm 1: psllq $4,<xmm8=int6464#9
662# asm 2: psllq $4,<xmm8=%xmm8
663psllq $4,%xmm8
664
665# qhasm: xmm0 ^= xmm8
666# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
667# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
668pxor %xmm8,%xmm0
669
670# qhasm: xmm0 ^= *(int128 *)(c + 0)
671# asm 1: pxor 0(<c=int64#4),<xmm0=int6464#1
672# asm 2: pxor 0(<c=%rcx),<xmm0=%xmm0
673pxor 0(%rcx),%xmm0
674
675# qhasm: shuffle bytes of xmm0 by SR
676# asm 1: pshufb SR,<xmm0=int6464#1
677# asm 2: pshufb SR,<xmm0=%xmm0
678pshufb SR,%xmm0
679
680# qhasm: xmm1 ^= *(int128 *)(c + 16)
681# asm 1: pxor 16(<c=int64#4),<xmm1=int6464#2
682# asm 2: pxor 16(<c=%rcx),<xmm1=%xmm1
683pxor 16(%rcx),%xmm1
684
685# qhasm: shuffle bytes of xmm1 by SR
686# asm 1: pshufb SR,<xmm1=int6464#2
687# asm 2: pshufb SR,<xmm1=%xmm1
688pshufb SR,%xmm1
689
690# qhasm: xmm2 ^= *(int128 *)(c + 32)
691# asm 1: pxor 32(<c=int64#4),<xmm2=int6464#3
692# asm 2: pxor 32(<c=%rcx),<xmm2=%xmm2
693pxor 32(%rcx),%xmm2
694
695# qhasm: shuffle bytes of xmm2 by SR
696# asm 1: pshufb SR,<xmm2=int6464#3
697# asm 2: pshufb SR,<xmm2=%xmm2
698pshufb SR,%xmm2
699
700# qhasm: xmm3 ^= *(int128 *)(c + 48)
701# asm 1: pxor 48(<c=int64#4),<xmm3=int6464#4
702# asm 2: pxor 48(<c=%rcx),<xmm3=%xmm3
703pxor 48(%rcx),%xmm3
704
705# qhasm: shuffle bytes of xmm3 by SR
706# asm 1: pshufb SR,<xmm3=int6464#4
707# asm 2: pshufb SR,<xmm3=%xmm3
708pshufb SR,%xmm3
709
710# qhasm: xmm4 ^= *(int128 *)(c + 64)
711# asm 1: pxor 64(<c=int64#4),<xmm4=int6464#5
712# asm 2: pxor 64(<c=%rcx),<xmm4=%xmm4
713pxor 64(%rcx),%xmm4
714
715# qhasm: shuffle bytes of xmm4 by SR
716# asm 1: pshufb SR,<xmm4=int6464#5
717# asm 2: pshufb SR,<xmm4=%xmm4
718pshufb SR,%xmm4
719
720# qhasm: xmm5 ^= *(int128 *)(c + 80)
721# asm 1: pxor 80(<c=int64#4),<xmm5=int6464#6
722# asm 2: pxor 80(<c=%rcx),<xmm5=%xmm5
723pxor 80(%rcx),%xmm5
724
725# qhasm: shuffle bytes of xmm5 by SR
726# asm 1: pshufb SR,<xmm5=int6464#6
727# asm 2: pshufb SR,<xmm5=%xmm5
728pshufb SR,%xmm5
729
730# qhasm: xmm6 ^= *(int128 *)(c + 96)
731# asm 1: pxor 96(<c=int64#4),<xmm6=int6464#7
732# asm 2: pxor 96(<c=%rcx),<xmm6=%xmm6
733pxor 96(%rcx),%xmm6
734
735# qhasm: shuffle bytes of xmm6 by SR
736# asm 1: pshufb SR,<xmm6=int6464#7
737# asm 2: pshufb SR,<xmm6=%xmm6
738pshufb SR,%xmm6
739
740# qhasm: xmm7 ^= *(int128 *)(c + 112)
741# asm 1: pxor 112(<c=int64#4),<xmm7=int6464#8
742# asm 2: pxor 112(<c=%rcx),<xmm7=%xmm7
743pxor 112(%rcx),%xmm7
744
745# qhasm: shuffle bytes of xmm7 by SR
746# asm 1: pshufb SR,<xmm7=int6464#8
747# asm 2: pshufb SR,<xmm7=%xmm7
748pshufb SR,%xmm7
749
750# qhasm: xmm5 ^= xmm6
751# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
752# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
753pxor %xmm6,%xmm5
754
755# qhasm: xmm2 ^= xmm1
756# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
757# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
758pxor %xmm1,%xmm2
759
760# qhasm: xmm5 ^= xmm0
761# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
762# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
763pxor %xmm0,%xmm5
764
765# qhasm: xmm6 ^= xmm2
766# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
767# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
768pxor %xmm2,%xmm6
769
770# qhasm: xmm3 ^= xmm0
771# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
772# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
773pxor %xmm0,%xmm3
774
775# qhasm: xmm6 ^= xmm3
776# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
777# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
778pxor %xmm3,%xmm6
779
780# qhasm: xmm3 ^= xmm7
781# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
782# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
783pxor %xmm7,%xmm3
784
785# qhasm: xmm3 ^= xmm4
786# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
787# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
788pxor %xmm4,%xmm3
789
790# qhasm: xmm7 ^= xmm5
791# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
792# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
793pxor %xmm5,%xmm7
794
795# qhasm: xmm3 ^= xmm1
796# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
797# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
798pxor %xmm1,%xmm3
799
800# qhasm: xmm4 ^= xmm5
801# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
802# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
803pxor %xmm5,%xmm4
804
805# qhasm: xmm2 ^= xmm7
806# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
807# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
808pxor %xmm7,%xmm2
809
810# qhasm: xmm1 ^= xmm5
811# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
812# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
813pxor %xmm5,%xmm1
814
815# qhasm: xmm11 = xmm7
816# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
817# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
818movdqa %xmm7,%xmm8
819
820# qhasm: xmm10 = xmm1
821# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
822# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
823movdqa %xmm1,%xmm9
824
825# qhasm: xmm9 = xmm5
826# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
827# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
828movdqa %xmm5,%xmm10
829
830# qhasm: xmm13 = xmm2
831# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
832# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
833movdqa %xmm2,%xmm11
834
835# qhasm: xmm12 = xmm6
836# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
837# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
838movdqa %xmm6,%xmm12
839
840# qhasm: xmm11 ^= xmm4
841# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
842# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
843pxor %xmm4,%xmm8
844
845# qhasm: xmm10 ^= xmm2
846# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
847# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
848pxor %xmm2,%xmm9
849
850# qhasm: xmm9 ^= xmm3
851# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
852# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
853pxor %xmm3,%xmm10
854
855# qhasm: xmm13 ^= xmm4
856# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
857# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
858pxor %xmm4,%xmm11
859
860# qhasm: xmm12 ^= xmm0
861# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
862# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
863pxor %xmm0,%xmm12
864
865# qhasm: xmm14 = xmm11
866# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
867# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
868movdqa %xmm8,%xmm13
869
870# qhasm: xmm8 = xmm10
871# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
872# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
873movdqa %xmm9,%xmm14
874
875# qhasm: xmm15 = xmm11
876# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
877# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
878movdqa %xmm8,%xmm15
879
880# qhasm: xmm10 |= xmm9
881# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
882# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
883por %xmm10,%xmm9
884
885# qhasm: xmm11 |= xmm12
886# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
887# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
888por %xmm12,%xmm8
889
890# qhasm: xmm15 ^= xmm8
891# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
892# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
893pxor %xmm14,%xmm15
894
895# qhasm: xmm14 &= xmm12
896# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
897# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
898pand %xmm12,%xmm13
899
900# qhasm: xmm8 &= xmm9
901# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
902# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
903pand %xmm10,%xmm14
904
905# qhasm: xmm12 ^= xmm9
906# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
907# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
908pxor %xmm10,%xmm12
909
910# qhasm: xmm15 &= xmm12
911# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
912# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
913pand %xmm12,%xmm15
914
915# qhasm: xmm12 = xmm3
916# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
917# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
918movdqa %xmm3,%xmm10
919
920# qhasm: xmm12 ^= xmm0
921# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
922# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
923pxor %xmm0,%xmm10
924
925# qhasm: xmm13 &= xmm12
926# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
927# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
928pand %xmm10,%xmm11
929
930# qhasm: xmm11 ^= xmm13
931# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
932# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
933pxor %xmm11,%xmm8
934
935# qhasm: xmm10 ^= xmm13
936# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
937# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
938pxor %xmm11,%xmm9
939
940# qhasm: xmm13 = xmm7
941# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
942# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
943movdqa %xmm7,%xmm10
944
945# qhasm: xmm13 ^= xmm1
946# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
947# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
948pxor %xmm1,%xmm10
949
950# qhasm: xmm12 = xmm5
951# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
952# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
953movdqa %xmm5,%xmm11
954
955# qhasm: xmm9 = xmm13
956# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
957# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
958movdqa %xmm10,%xmm12
959
960# qhasm: xmm12 ^= xmm6
961# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
962# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
963pxor %xmm6,%xmm11
964
965# qhasm: xmm9 |= xmm12
966# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
967# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
968por %xmm11,%xmm12
969
970# qhasm: xmm13 &= xmm12
971# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
972# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
973pand %xmm11,%xmm10
974
975# qhasm: xmm8 ^= xmm13
976# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
977# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
978pxor %xmm10,%xmm14
979
980# qhasm: xmm11 ^= xmm15
981# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
982# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
983pxor %xmm15,%xmm8
984
985# qhasm: xmm10 ^= xmm14
986# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
987# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
988pxor %xmm13,%xmm9
989
990# qhasm: xmm9 ^= xmm15
991# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
992# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
993pxor %xmm15,%xmm12
994
995# qhasm: xmm8 ^= xmm14
996# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
997# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
998pxor %xmm13,%xmm14
999
1000# qhasm: xmm9 ^= xmm14
1001# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
1002# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
1003pxor %xmm13,%xmm12
1004
1005# qhasm: xmm12 = xmm2
1006# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
1007# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
1008movdqa %xmm2,%xmm10
1009
1010# qhasm: xmm13 = xmm4
1011# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
1012# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
1013movdqa %xmm4,%xmm11
1014
1015# qhasm: xmm14 = xmm1
1016# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
1017# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
1018movdqa %xmm1,%xmm13
1019
1020# qhasm: xmm15 = xmm7
1021# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
1022# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
1023movdqa %xmm7,%xmm15
1024
1025# qhasm: xmm12 &= xmm3
1026# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
1027# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
1028pand %xmm3,%xmm10
1029
1030# qhasm: xmm13 &= xmm0
1031# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
1032# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
1033pand %xmm0,%xmm11
1034
1035# qhasm: xmm14 &= xmm5
1036# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
1037# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
1038pand %xmm5,%xmm13
1039
1040# qhasm: xmm15 |= xmm6
1041# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
1042# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
1043por %xmm6,%xmm15
1044
1045# qhasm: xmm11 ^= xmm12
1046# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
1047# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
1048pxor %xmm10,%xmm8
1049
1050# qhasm: xmm10 ^= xmm13
1051# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
1052# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
1053pxor %xmm11,%xmm9
1054
1055# qhasm: xmm9 ^= xmm14
1056# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
1057# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
1058pxor %xmm13,%xmm12
1059
1060# qhasm: xmm8 ^= xmm15
1061# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
1062# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
1063pxor %xmm15,%xmm14
1064
1065# qhasm: xmm12 = xmm11
1066# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
1067# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
1068movdqa %xmm8,%xmm10
1069
1070# qhasm: xmm12 ^= xmm10
1071# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
1072# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
1073pxor %xmm9,%xmm10
1074
1075# qhasm: xmm11 &= xmm9
1076# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
1077# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
1078pand %xmm12,%xmm8
1079
1080# qhasm: xmm14 = xmm8
1081# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
1082# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
1083movdqa %xmm14,%xmm11
1084
1085# qhasm: xmm14 ^= xmm11
1086# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
1087# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
1088pxor %xmm8,%xmm11
1089
1090# qhasm: xmm15 = xmm12
1091# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
1092# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
1093movdqa %xmm10,%xmm13
1094
1095# qhasm: xmm15 &= xmm14
1096# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
1097# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
1098pand %xmm11,%xmm13
1099
1100# qhasm: xmm15 ^= xmm10
1101# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
1102# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
1103pxor %xmm9,%xmm13
1104
1105# qhasm: xmm13 = xmm9
1106# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
1107# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
1108movdqa %xmm12,%xmm15
1109
1110# qhasm: xmm13 ^= xmm8
1111# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
1112# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
1113pxor %xmm14,%xmm15
1114
1115# qhasm: xmm11 ^= xmm10
1116# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
1117# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
1118pxor %xmm9,%xmm8
1119
1120# qhasm: xmm13 &= xmm11
1121# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
1122# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
1123pand %xmm8,%xmm15
1124
1125# qhasm: xmm13 ^= xmm8
1126# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
1127# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
1128pxor %xmm14,%xmm15
1129
1130# qhasm: xmm9 ^= xmm13
1131# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
1132# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
1133pxor %xmm15,%xmm12
1134
1135# qhasm: xmm10 = xmm14
1136# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
1137# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
1138movdqa %xmm11,%xmm8
1139
1140# qhasm: xmm10 ^= xmm13
1141# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
1142# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
1143pxor %xmm15,%xmm8
1144
1145# qhasm: xmm10 &= xmm8
1146# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
1147# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
1148pand %xmm14,%xmm8
1149
1150# qhasm: xmm9 ^= xmm10
1151# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
1152# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
1153pxor %xmm8,%xmm12
1154
1155# qhasm: xmm14 ^= xmm10
1156# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
1157# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
1158pxor %xmm8,%xmm11
1159
1160# qhasm: xmm14 &= xmm15
1161# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
1162# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
1163pand %xmm13,%xmm11
1164
1165# qhasm: xmm14 ^= xmm12
1166# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
1167# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
1168pxor %xmm10,%xmm11
1169
1170# qhasm: xmm12 = xmm6
1171# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
1172# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
1173movdqa %xmm6,%xmm8
1174
1175# qhasm: xmm8 = xmm5
1176# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
1177# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
1178movdqa %xmm5,%xmm9
1179
1180# qhasm: xmm10 = xmm15
1181# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
1182# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
1183movdqa %xmm13,%xmm10
1184
1185# qhasm: xmm10 ^= xmm14
1186# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
1187# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
1188pxor %xmm11,%xmm10
1189
1190# qhasm: xmm10 &= xmm6
1191# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
1192# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
1193pand %xmm6,%xmm10
1194
1195# qhasm: xmm6 ^= xmm5
1196# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
1197# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
1198pxor %xmm5,%xmm6
1199
1200# qhasm: xmm6 &= xmm14
1201# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
1202# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
1203pand %xmm11,%xmm6
1204
1205# qhasm: xmm5 &= xmm15
1206# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
1207# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
1208pand %xmm13,%xmm5
1209
1210# qhasm: xmm6 ^= xmm5
1211# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
1212# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
1213pxor %xmm5,%xmm6
1214
1215# qhasm: xmm5 ^= xmm10
1216# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
1217# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
1218pxor %xmm10,%xmm5
1219
1220# qhasm: xmm12 ^= xmm0
1221# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
1222# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
1223pxor %xmm0,%xmm8
1224
1225# qhasm: xmm8 ^= xmm3
1226# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
1227# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
1228pxor %xmm3,%xmm9
1229
1230# qhasm: xmm15 ^= xmm13
1231# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
1232# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
1233pxor %xmm15,%xmm13
1234
1235# qhasm: xmm14 ^= xmm9
1236# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
1237# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
1238pxor %xmm12,%xmm11
1239
1240# qhasm: xmm11 = xmm15
1241# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1242# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1243movdqa %xmm13,%xmm10
1244
1245# qhasm: xmm11 ^= xmm14
1246# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1247# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1248pxor %xmm11,%xmm10
1249
1250# qhasm: xmm11 &= xmm12
1251# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
1252# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
1253pand %xmm8,%xmm10
1254
1255# qhasm: xmm12 ^= xmm8
1256# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
1257# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
1258pxor %xmm9,%xmm8
1259
1260# qhasm: xmm12 &= xmm14
1261# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
1262# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
1263pand %xmm11,%xmm8
1264
1265# qhasm: xmm8 &= xmm15
1266# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
1267# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
1268pand %xmm13,%xmm9
1269
1270# qhasm: xmm8 ^= xmm12
1271# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
1272# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
1273pxor %xmm8,%xmm9
1274
1275# qhasm: xmm12 ^= xmm11
1276# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
1277# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
1278pxor %xmm10,%xmm8
1279
1280# qhasm: xmm10 = xmm13
1281# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
1282# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
1283movdqa %xmm15,%xmm10
1284
1285# qhasm: xmm10 ^= xmm9
1286# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
1287# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
1288pxor %xmm12,%xmm10
1289
1290# qhasm: xmm10 &= xmm0
1291# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
1292# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
1293pand %xmm0,%xmm10
1294
1295# qhasm: xmm0 ^= xmm3
1296# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
1297# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
1298pxor %xmm3,%xmm0
1299
1300# qhasm: xmm0 &= xmm9
1301# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
1302# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
1303pand %xmm12,%xmm0
1304
1305# qhasm: xmm3 &= xmm13
1306# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
1307# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
1308pand %xmm15,%xmm3
1309
1310# qhasm: xmm0 ^= xmm3
1311# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
1312# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
1313pxor %xmm3,%xmm0
1314
1315# qhasm: xmm3 ^= xmm10
1316# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
1317# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
1318pxor %xmm10,%xmm3
1319
1320# qhasm: xmm6 ^= xmm12
1321# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
1322# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
1323pxor %xmm8,%xmm6
1324
1325# qhasm: xmm0 ^= xmm12
1326# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
1327# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
1328pxor %xmm8,%xmm0
1329
1330# qhasm: xmm5 ^= xmm8
1331# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
1332# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
1333pxor %xmm9,%xmm5
1334
1335# qhasm: xmm3 ^= xmm8
1336# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
1337# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
1338pxor %xmm9,%xmm3
1339
1340# qhasm: xmm12 = xmm7
1341# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
1342# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
1343movdqa %xmm7,%xmm8
1344
1345# qhasm: xmm8 = xmm1
1346# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
1347# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
1348movdqa %xmm1,%xmm9
1349
1350# qhasm: xmm12 ^= xmm4
1351# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
1352# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
1353pxor %xmm4,%xmm8
1354
1355# qhasm: xmm8 ^= xmm2
1356# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
1357# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
1358pxor %xmm2,%xmm9
1359
1360# qhasm: xmm11 = xmm15
1361# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1362# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1363movdqa %xmm13,%xmm10
1364
1365# qhasm: xmm11 ^= xmm14
1366# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1367# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1368pxor %xmm11,%xmm10
1369
1370# qhasm: xmm11 &= xmm12
1371# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
1372# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
1373pand %xmm8,%xmm10
1374
1375# qhasm: xmm12 ^= xmm8
1376# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
1377# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
1378pxor %xmm9,%xmm8
1379
1380# qhasm: xmm12 &= xmm14
1381# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
1382# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
1383pand %xmm11,%xmm8
1384
1385# qhasm: xmm8 &= xmm15
1386# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
1387# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
1388pand %xmm13,%xmm9
1389
1390# qhasm: xmm8 ^= xmm12
1391# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
1392# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
1393pxor %xmm8,%xmm9
1394
1395# qhasm: xmm12 ^= xmm11
1396# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
1397# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
1398pxor %xmm10,%xmm8
1399
1400# qhasm: xmm10 = xmm13
1401# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
1402# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
1403movdqa %xmm15,%xmm10
1404
1405# qhasm: xmm10 ^= xmm9
1406# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
1407# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
1408pxor %xmm12,%xmm10
1409
1410# qhasm: xmm10 &= xmm4
1411# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
1412# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
1413pand %xmm4,%xmm10
1414
1415# qhasm: xmm4 ^= xmm2
1416# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
1417# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
1418pxor %xmm2,%xmm4
1419
1420# qhasm: xmm4 &= xmm9
1421# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
1422# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
1423pand %xmm12,%xmm4
1424
1425# qhasm: xmm2 &= xmm13
1426# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
1427# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
1428pand %xmm15,%xmm2
1429
1430# qhasm: xmm4 ^= xmm2
1431# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
1432# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
1433pxor %xmm2,%xmm4
1434
1435# qhasm: xmm2 ^= xmm10
1436# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
1437# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
1438pxor %xmm10,%xmm2
1439
1440# qhasm: xmm15 ^= xmm13
1441# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
1442# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
1443pxor %xmm15,%xmm13
1444
1445# qhasm: xmm14 ^= xmm9
1446# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
1447# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
1448pxor %xmm12,%xmm11
1449
1450# qhasm: xmm11 = xmm15
1451# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1452# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1453movdqa %xmm13,%xmm10
1454
1455# qhasm: xmm11 ^= xmm14
1456# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1457# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1458pxor %xmm11,%xmm10
1459
1460# qhasm: xmm11 &= xmm7
1461# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
1462# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
1463pand %xmm7,%xmm10
1464
1465# qhasm: xmm7 ^= xmm1
1466# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
1467# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
1468pxor %xmm1,%xmm7
1469
1470# qhasm: xmm7 &= xmm14
1471# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
1472# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
1473pand %xmm11,%xmm7
1474
1475# qhasm: xmm1 &= xmm15
1476# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
1477# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
1478pand %xmm13,%xmm1
1479
1480# qhasm: xmm7 ^= xmm1
1481# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
1482# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
1483pxor %xmm1,%xmm7
1484
1485# qhasm: xmm1 ^= xmm11
1486# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
1487# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
1488pxor %xmm10,%xmm1
1489
1490# qhasm: xmm7 ^= xmm12
1491# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
1492# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
1493pxor %xmm8,%xmm7
1494
1495# qhasm: xmm4 ^= xmm12
1496# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
1497# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
1498pxor %xmm8,%xmm4
1499
1500# qhasm: xmm1 ^= xmm8
1501# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
1502# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
1503pxor %xmm9,%xmm1
1504
1505# qhasm: xmm2 ^= xmm8
1506# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
1507# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
1508pxor %xmm9,%xmm2
1509
1510# qhasm: xmm7 ^= xmm0
1511# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
1512# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
1513pxor %xmm0,%xmm7
1514
1515# qhasm: xmm1 ^= xmm6
1516# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
1517# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
1518pxor %xmm6,%xmm1
1519
1520# qhasm: xmm4 ^= xmm7
1521# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
1522# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
1523pxor %xmm7,%xmm4
1524
1525# qhasm: xmm6 ^= xmm0
1526# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
1527# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
1528pxor %xmm0,%xmm6
1529
1530# qhasm: xmm0 ^= xmm1
1531# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
1532# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
1533pxor %xmm1,%xmm0
1534
1535# qhasm: xmm1 ^= xmm5
1536# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
1537# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
1538pxor %xmm5,%xmm1
1539
1540# qhasm: xmm5 ^= xmm2
1541# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
1542# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
1543pxor %xmm2,%xmm5
1544
1545# qhasm: xmm4 ^= xmm5
1546# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
1547# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
1548pxor %xmm5,%xmm4
1549
1550# qhasm: xmm2 ^= xmm3
1551# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
1552# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
1553pxor %xmm3,%xmm2
1554
1555# qhasm: xmm3 ^= xmm5
1556# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
1557# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
1558pxor %xmm5,%xmm3
1559
1560# qhasm: xmm6 ^= xmm3
1561# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
1562# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
1563pxor %xmm3,%xmm6
1564
1565# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
1566# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
1567# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
1568pshufd $0x93,%xmm0,%xmm8
1569
1570# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
1571# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
1572# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
1573pshufd $0x93,%xmm1,%xmm9
1574
1575# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
1576# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
1577# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
1578pshufd $0x93,%xmm4,%xmm10
1579
1580# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
1581# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
1582# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
1583pshufd $0x93,%xmm6,%xmm11
1584
1585# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
1586# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
1587# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
1588pshufd $0x93,%xmm3,%xmm12
1589
1590# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
1591# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
1592# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
1593pshufd $0x93,%xmm7,%xmm13
1594
1595# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
1596# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
1597# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
1598pshufd $0x93,%xmm2,%xmm14
1599
1600# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
1601# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
1602# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
1603pshufd $0x93,%xmm5,%xmm15
1604
1605# qhasm: xmm0 ^= xmm8
1606# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
1607# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
1608pxor %xmm8,%xmm0
1609
1610# qhasm: xmm1 ^= xmm9
1611# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
1612# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
1613pxor %xmm9,%xmm1
1614
1615# qhasm: xmm4 ^= xmm10
1616# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
1617# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
1618pxor %xmm10,%xmm4
1619
1620# qhasm: xmm6 ^= xmm11
1621# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
1622# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
1623pxor %xmm11,%xmm6
1624
1625# qhasm: xmm3 ^= xmm12
1626# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
1627# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
1628pxor %xmm12,%xmm3
1629
1630# qhasm: xmm7 ^= xmm13
1631# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
1632# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
1633pxor %xmm13,%xmm7
1634
1635# qhasm: xmm2 ^= xmm14
1636# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
1637# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
1638pxor %xmm14,%xmm2
1639
1640# qhasm: xmm5 ^= xmm15
1641# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
1642# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
1643pxor %xmm15,%xmm5
1644
1645# qhasm: xmm8 ^= xmm5
1646# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
1647# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
1648pxor %xmm5,%xmm8
1649
1650# qhasm: xmm9 ^= xmm0
1651# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
1652# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
1653pxor %xmm0,%xmm9
1654
1655# qhasm: xmm10 ^= xmm1
1656# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
1657# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
1658pxor %xmm1,%xmm10
1659
1660# qhasm: xmm9 ^= xmm5
1661# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
1662# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
1663pxor %xmm5,%xmm9
1664
1665# qhasm: xmm11 ^= xmm4
1666# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
1667# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
1668pxor %xmm4,%xmm11
1669
1670# qhasm: xmm12 ^= xmm6
1671# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
1672# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
1673pxor %xmm6,%xmm12
1674
1675# qhasm: xmm13 ^= xmm3
1676# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
1677# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
1678pxor %xmm3,%xmm13
1679
1680# qhasm: xmm11 ^= xmm5
1681# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
1682# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
1683pxor %xmm5,%xmm11
1684
1685# qhasm: xmm14 ^= xmm7
1686# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
1687# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
1688pxor %xmm7,%xmm14
1689
1690# qhasm: xmm15 ^= xmm2
1691# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
1692# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
1693pxor %xmm2,%xmm15
1694
1695# qhasm: xmm12 ^= xmm5
1696# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
1697# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
1698pxor %xmm5,%xmm12
1699
1700# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
1701# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
1702# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
1703pshufd $0x4E,%xmm0,%xmm0
1704
1705# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
1706# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
1707# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
1708pshufd $0x4E,%xmm1,%xmm1
1709
1710# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
1711# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
1712# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
1713pshufd $0x4E,%xmm4,%xmm4
1714
1715# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
1716# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
1717# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
1718pshufd $0x4E,%xmm6,%xmm6
1719
1720# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
1721# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
1722# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
1723pshufd $0x4E,%xmm3,%xmm3
1724
1725# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
1726# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
1727# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
1728pshufd $0x4E,%xmm7,%xmm7
1729
1730# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
1731# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
1732# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
1733pshufd $0x4E,%xmm2,%xmm2
1734
1735# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
1736# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
1737# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
1738pshufd $0x4E,%xmm5,%xmm5
1739
1740# qhasm: xmm8 ^= xmm0
1741# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
1742# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
1743pxor %xmm0,%xmm8
1744
1745# qhasm: xmm9 ^= xmm1
1746# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
1747# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
1748pxor %xmm1,%xmm9
1749
1750# qhasm: xmm10 ^= xmm4
1751# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
1752# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
1753pxor %xmm4,%xmm10
1754
1755# qhasm: xmm11 ^= xmm6
1756# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
1757# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
1758pxor %xmm6,%xmm11
1759
1760# qhasm: xmm12 ^= xmm3
1761# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
1762# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
1763pxor %xmm3,%xmm12
1764
1765# qhasm: xmm13 ^= xmm7
1766# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
1767# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
1768pxor %xmm7,%xmm13
1769
1770# qhasm: xmm14 ^= xmm2
1771# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
1772# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
1773pxor %xmm2,%xmm14
1774
1775# qhasm: xmm15 ^= xmm5
1776# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
1777# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
1778pxor %xmm5,%xmm15
1779
1780# qhasm: xmm8 ^= *(int128 *)(c + 128)
1781# asm 1: pxor 128(<c=int64#4),<xmm8=int6464#9
1782# asm 2: pxor 128(<c=%rcx),<xmm8=%xmm8
1783pxor 128(%rcx),%xmm8
1784
1785# qhasm: shuffle bytes of xmm8 by SR
1786# asm 1: pshufb SR,<xmm8=int6464#9
1787# asm 2: pshufb SR,<xmm8=%xmm8
1788pshufb SR,%xmm8
1789
1790# qhasm: xmm9 ^= *(int128 *)(c + 144)
1791# asm 1: pxor 144(<c=int64#4),<xmm9=int6464#10
1792# asm 2: pxor 144(<c=%rcx),<xmm9=%xmm9
1793pxor 144(%rcx),%xmm9
1794
1795# qhasm: shuffle bytes of xmm9 by SR
1796# asm 1: pshufb SR,<xmm9=int6464#10
1797# asm 2: pshufb SR,<xmm9=%xmm9
1798pshufb SR,%xmm9
1799
1800# qhasm: xmm10 ^= *(int128 *)(c + 160)
1801# asm 1: pxor 160(<c=int64#4),<xmm10=int6464#11
1802# asm 2: pxor 160(<c=%rcx),<xmm10=%xmm10
1803pxor 160(%rcx),%xmm10
1804
1805# qhasm: shuffle bytes of xmm10 by SR
1806# asm 1: pshufb SR,<xmm10=int6464#11
1807# asm 2: pshufb SR,<xmm10=%xmm10
1808pshufb SR,%xmm10
1809
1810# qhasm: xmm11 ^= *(int128 *)(c + 176)
1811# asm 1: pxor 176(<c=int64#4),<xmm11=int6464#12
1812# asm 2: pxor 176(<c=%rcx),<xmm11=%xmm11
1813pxor 176(%rcx),%xmm11
1814
1815# qhasm: shuffle bytes of xmm11 by SR
1816# asm 1: pshufb SR,<xmm11=int6464#12
1817# asm 2: pshufb SR,<xmm11=%xmm11
1818pshufb SR,%xmm11
1819
1820# qhasm: xmm12 ^= *(int128 *)(c + 192)
1821# asm 1: pxor 192(<c=int64#4),<xmm12=int6464#13
1822# asm 2: pxor 192(<c=%rcx),<xmm12=%xmm12
1823pxor 192(%rcx),%xmm12
1824
1825# qhasm: shuffle bytes of xmm12 by SR
1826# asm 1: pshufb SR,<xmm12=int6464#13
1827# asm 2: pshufb SR,<xmm12=%xmm12
1828pshufb SR,%xmm12
1829
1830# qhasm: xmm13 ^= *(int128 *)(c + 208)
1831# asm 1: pxor 208(<c=int64#4),<xmm13=int6464#14
1832# asm 2: pxor 208(<c=%rcx),<xmm13=%xmm13
1833pxor 208(%rcx),%xmm13
1834
1835# qhasm: shuffle bytes of xmm13 by SR
1836# asm 1: pshufb SR,<xmm13=int6464#14
1837# asm 2: pshufb SR,<xmm13=%xmm13
1838pshufb SR,%xmm13
1839
1840# qhasm: xmm14 ^= *(int128 *)(c + 224)
1841# asm 1: pxor 224(<c=int64#4),<xmm14=int6464#15
1842# asm 2: pxor 224(<c=%rcx),<xmm14=%xmm14
1843pxor 224(%rcx),%xmm14
1844
1845# qhasm: shuffle bytes of xmm14 by SR
1846# asm 1: pshufb SR,<xmm14=int6464#15
1847# asm 2: pshufb SR,<xmm14=%xmm14
1848pshufb SR,%xmm14
1849
1850# qhasm: xmm15 ^= *(int128 *)(c + 240)
1851# asm 1: pxor 240(<c=int64#4),<xmm15=int6464#16
1852# asm 2: pxor 240(<c=%rcx),<xmm15=%xmm15
1853pxor 240(%rcx),%xmm15
1854
1855# qhasm: shuffle bytes of xmm15 by SR
1856# asm 1: pshufb SR,<xmm15=int6464#16
1857# asm 2: pshufb SR,<xmm15=%xmm15
1858pshufb SR,%xmm15
1859
1860# qhasm: xmm13 ^= xmm14
1861# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
1862# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
1863pxor %xmm14,%xmm13
1864
1865# qhasm: xmm10 ^= xmm9
1866# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
1867# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
1868pxor %xmm9,%xmm10
1869
1870# qhasm: xmm13 ^= xmm8
1871# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
1872# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
1873pxor %xmm8,%xmm13
1874
1875# qhasm: xmm14 ^= xmm10
1876# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
1877# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
1878pxor %xmm10,%xmm14
1879
1880# qhasm: xmm11 ^= xmm8
1881# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
1882# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
1883pxor %xmm8,%xmm11
1884
1885# qhasm: xmm14 ^= xmm11
1886# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
1887# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
1888pxor %xmm11,%xmm14
1889
1890# qhasm: xmm11 ^= xmm15
1891# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
1892# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
1893pxor %xmm15,%xmm11
1894
1895# qhasm: xmm11 ^= xmm12
1896# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
1897# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
1898pxor %xmm12,%xmm11
1899
1900# qhasm: xmm15 ^= xmm13
1901# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
1902# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
1903pxor %xmm13,%xmm15
1904
1905# qhasm: xmm11 ^= xmm9
1906# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
1907# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
1908pxor %xmm9,%xmm11
1909
1910# qhasm: xmm12 ^= xmm13
1911# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
1912# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
1913pxor %xmm13,%xmm12
1914
1915# qhasm: xmm10 ^= xmm15
1916# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
1917# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
1918pxor %xmm15,%xmm10
1919
1920# qhasm: xmm9 ^= xmm13
1921# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
1922# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
1923pxor %xmm13,%xmm9
1924
1925# qhasm: xmm3 = xmm15
1926# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
1927# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
1928movdqa %xmm15,%xmm0
1929
1930# qhasm: xmm2 = xmm9
1931# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
1932# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
1933movdqa %xmm9,%xmm1
1934
1935# qhasm: xmm1 = xmm13
1936# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
1937# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
1938movdqa %xmm13,%xmm2
1939
1940# qhasm: xmm5 = xmm10
1941# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
1942# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
1943movdqa %xmm10,%xmm3
1944
1945# qhasm: xmm4 = xmm14
1946# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
1947# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
1948movdqa %xmm14,%xmm4
1949
1950# qhasm: xmm3 ^= xmm12
1951# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
1952# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
1953pxor %xmm12,%xmm0
1954
1955# qhasm: xmm2 ^= xmm10
1956# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
1957# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
1958pxor %xmm10,%xmm1
1959
1960# qhasm: xmm1 ^= xmm11
1961# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
1962# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
1963pxor %xmm11,%xmm2
1964
1965# qhasm: xmm5 ^= xmm12
1966# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
1967# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
1968pxor %xmm12,%xmm3
1969
1970# qhasm: xmm4 ^= xmm8
1971# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
1972# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
1973pxor %xmm8,%xmm4
1974
1975# qhasm: xmm6 = xmm3
1976# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
1977# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
1978movdqa %xmm0,%xmm5
1979
1980# qhasm: xmm0 = xmm2
1981# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
1982# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
1983movdqa %xmm1,%xmm6
1984
1985# qhasm: xmm7 = xmm3
1986# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
1987# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
1988movdqa %xmm0,%xmm7
1989
1990# qhasm: xmm2 |= xmm1
1991# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
1992# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
1993por %xmm2,%xmm1
1994
1995# qhasm: xmm3 |= xmm4
1996# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
1997# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
1998por %xmm4,%xmm0
1999
2000# qhasm: xmm7 ^= xmm0
2001# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
2002# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
2003pxor %xmm6,%xmm7
2004
2005# qhasm: xmm6 &= xmm4
2006# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
2007# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
2008pand %xmm4,%xmm5
2009
2010# qhasm: xmm0 &= xmm1
2011# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
2012# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
2013pand %xmm2,%xmm6
2014
2015# qhasm: xmm4 ^= xmm1
2016# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
2017# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
2018pxor %xmm2,%xmm4
2019
2020# qhasm: xmm7 &= xmm4
2021# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
2022# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
2023pand %xmm4,%xmm7
2024
2025# qhasm: xmm4 = xmm11
2026# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
2027# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
2028movdqa %xmm11,%xmm2
2029
2030# qhasm: xmm4 ^= xmm8
2031# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
2032# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
2033pxor %xmm8,%xmm2
2034
2035# qhasm: xmm5 &= xmm4
2036# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
2037# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
2038pand %xmm2,%xmm3
2039
2040# qhasm: xmm3 ^= xmm5
2041# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
2042# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
2043pxor %xmm3,%xmm0
2044
2045# qhasm: xmm2 ^= xmm5
2046# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
2047# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
2048pxor %xmm3,%xmm1
2049
2050# qhasm: xmm5 = xmm15
2051# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
2052# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
2053movdqa %xmm15,%xmm2
2054
2055# qhasm: xmm5 ^= xmm9
2056# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
2057# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
2058pxor %xmm9,%xmm2
2059
2060# qhasm: xmm4 = xmm13
2061# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
2062# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
2063movdqa %xmm13,%xmm3
2064
2065# qhasm: xmm1 = xmm5
2066# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
2067# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
2068movdqa %xmm2,%xmm4
2069
2070# qhasm: xmm4 ^= xmm14
2071# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
2072# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
2073pxor %xmm14,%xmm3
2074
2075# qhasm: xmm1 |= xmm4
2076# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
2077# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
2078por %xmm3,%xmm4
2079
2080# qhasm: xmm5 &= xmm4
2081# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
2082# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
2083pand %xmm3,%xmm2
2084
2085# qhasm: xmm0 ^= xmm5
2086# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
2087# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
2088pxor %xmm2,%xmm6
2089
2090# qhasm: xmm3 ^= xmm7
2091# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
2092# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
2093pxor %xmm7,%xmm0
2094
2095# qhasm: xmm2 ^= xmm6
2096# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
2097# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
2098pxor %xmm5,%xmm1
2099
2100# qhasm: xmm1 ^= xmm7
2101# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
2102# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
2103pxor %xmm7,%xmm4
2104
2105# qhasm: xmm0 ^= xmm6
2106# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
2107# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
2108pxor %xmm5,%xmm6
2109
2110# qhasm: xmm1 ^= xmm6
2111# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
2112# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
2113pxor %xmm5,%xmm4
2114
2115# qhasm: xmm4 = xmm10
2116# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
2117# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
2118movdqa %xmm10,%xmm2
2119
2120# qhasm: xmm5 = xmm12
2121# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
2122# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
2123movdqa %xmm12,%xmm3
2124
2125# qhasm: xmm6 = xmm9
2126# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
2127# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
2128movdqa %xmm9,%xmm5
2129
2130# qhasm: xmm7 = xmm15
2131# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
2132# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
2133movdqa %xmm15,%xmm7
2134
2135# qhasm: xmm4 &= xmm11
2136# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
2137# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
2138pand %xmm11,%xmm2
2139
2140# qhasm: xmm5 &= xmm8
2141# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
2142# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
2143pand %xmm8,%xmm3
2144
2145# qhasm: xmm6 &= xmm13
2146# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
2147# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
2148pand %xmm13,%xmm5
2149
2150# qhasm: xmm7 |= xmm14
2151# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
2152# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
2153por %xmm14,%xmm7
2154
2155# qhasm: xmm3 ^= xmm4
2156# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
2157# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
2158pxor %xmm2,%xmm0
2159
2160# qhasm: xmm2 ^= xmm5
2161# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
2162# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
2163pxor %xmm3,%xmm1
2164
2165# qhasm: xmm1 ^= xmm6
2166# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
2167# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
2168pxor %xmm5,%xmm4
2169
2170# qhasm: xmm0 ^= xmm7
2171# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
2172# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
2173pxor %xmm7,%xmm6
2174
2175# qhasm: xmm4 = xmm3
2176# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
2177# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
2178movdqa %xmm0,%xmm2
2179
2180# qhasm: xmm4 ^= xmm2
2181# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
2182# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
2183pxor %xmm1,%xmm2
2184
2185# qhasm: xmm3 &= xmm1
2186# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
2187# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
2188pand %xmm4,%xmm0
2189
2190# qhasm: xmm6 = xmm0
2191# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
2192# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
2193movdqa %xmm6,%xmm3
2194
2195# qhasm: xmm6 ^= xmm3
2196# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
2197# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
2198pxor %xmm0,%xmm3
2199
2200# qhasm: xmm7 = xmm4
2201# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
2202# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
2203movdqa %xmm2,%xmm5
2204
2205# qhasm: xmm7 &= xmm6
2206# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
2207# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
2208pand %xmm3,%xmm5
2209
2210# qhasm: xmm7 ^= xmm2
2211# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
2212# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
2213pxor %xmm1,%xmm5
2214
2215# qhasm: xmm5 = xmm1
2216# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
2217# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
2218movdqa %xmm4,%xmm7
2219
2220# qhasm: xmm5 ^= xmm0
2221# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
2222# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
2223pxor %xmm6,%xmm7
2224
2225# qhasm: xmm3 ^= xmm2
2226# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
2227# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
2228pxor %xmm1,%xmm0
2229
2230# qhasm: xmm5 &= xmm3
2231# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
2232# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
2233pand %xmm0,%xmm7
2234
2235# qhasm: xmm5 ^= xmm0
2236# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
2237# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
2238pxor %xmm6,%xmm7
2239
2240# qhasm: xmm1 ^= xmm5
2241# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
2242# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
2243pxor %xmm7,%xmm4
2244
2245# qhasm: xmm2 = xmm6
2246# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
2247# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
2248movdqa %xmm3,%xmm0
2249
2250# qhasm: xmm2 ^= xmm5
2251# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
2252# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
2253pxor %xmm7,%xmm0
2254
2255# qhasm: xmm2 &= xmm0
2256# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
2257# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
2258pand %xmm6,%xmm0
2259
2260# qhasm: xmm1 ^= xmm2
2261# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
2262# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
2263pxor %xmm0,%xmm4
2264
2265# qhasm: xmm6 ^= xmm2
2266# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
2267# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
2268pxor %xmm0,%xmm3
2269
2270# qhasm: xmm6 &= xmm7
2271# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
2272# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
2273pand %xmm5,%xmm3
2274
2275# qhasm: xmm6 ^= xmm4
2276# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
2277# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
2278pxor %xmm2,%xmm3
2279
2280# qhasm: xmm4 = xmm14
2281# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
2282# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
2283movdqa %xmm14,%xmm0
2284
2285# qhasm: xmm0 = xmm13
2286# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
2287# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
2288movdqa %xmm13,%xmm1
2289
2290# qhasm: xmm2 = xmm7
2291# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
2292# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
2293movdqa %xmm5,%xmm2
2294
2295# qhasm: xmm2 ^= xmm6
2296# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
2297# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
2298pxor %xmm3,%xmm2
2299
2300# qhasm: xmm2 &= xmm14
2301# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
2302# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
2303pand %xmm14,%xmm2
2304
2305# qhasm: xmm14 ^= xmm13
2306# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
2307# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
2308pxor %xmm13,%xmm14
2309
2310# qhasm: xmm14 &= xmm6
2311# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
2312# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
2313pand %xmm3,%xmm14
2314
2315# qhasm: xmm13 &= xmm7
2316# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
2317# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
2318pand %xmm5,%xmm13
2319
2320# qhasm: xmm14 ^= xmm13
2321# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
2322# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
2323pxor %xmm13,%xmm14
2324
2325# qhasm: xmm13 ^= xmm2
2326# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
2327# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
2328pxor %xmm2,%xmm13
2329
2330# qhasm: xmm4 ^= xmm8
2331# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
2332# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
2333pxor %xmm8,%xmm0
2334
2335# qhasm: xmm0 ^= xmm11
2336# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
2337# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
2338pxor %xmm11,%xmm1
2339
2340# qhasm: xmm7 ^= xmm5
2341# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
2342# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
2343pxor %xmm7,%xmm5
2344
2345# qhasm: xmm6 ^= xmm1
2346# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
2347# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
2348pxor %xmm4,%xmm3
2349
2350# qhasm: xmm3 = xmm7
2351# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
2352# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
2353movdqa %xmm5,%xmm2
2354
2355# qhasm: xmm3 ^= xmm6
2356# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
2357# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
2358pxor %xmm3,%xmm2
2359
2360# qhasm: xmm3 &= xmm4
2361# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
2362# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
2363pand %xmm0,%xmm2
2364
2365# qhasm: xmm4 ^= xmm0
2366# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
2367# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
2368pxor %xmm1,%xmm0
2369
2370# qhasm: xmm4 &= xmm6
2371# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
2372# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
2373pand %xmm3,%xmm0
2374
2375# qhasm: xmm0 &= xmm7
2376# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
2377# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
2378pand %xmm5,%xmm1
2379
2380# qhasm: xmm0 ^= xmm4
2381# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
2382# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
2383pxor %xmm0,%xmm1
2384
2385# qhasm: xmm4 ^= xmm3
2386# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
2387# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
2388pxor %xmm2,%xmm0
2389
2390# qhasm: xmm2 = xmm5
2391# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
2392# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
2393movdqa %xmm7,%xmm2
2394
2395# qhasm: xmm2 ^= xmm1
2396# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
2397# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
2398pxor %xmm4,%xmm2
2399
2400# qhasm: xmm2 &= xmm8
2401# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
2402# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
2403pand %xmm8,%xmm2
2404
2405# qhasm: xmm8 ^= xmm11
2406# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
2407# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
2408pxor %xmm11,%xmm8
2409
2410# qhasm: xmm8 &= xmm1
2411# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
2412# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
2413pand %xmm4,%xmm8
2414
2415# qhasm: xmm11 &= xmm5
2416# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
2417# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
2418pand %xmm7,%xmm11
2419
2420# qhasm: xmm8 ^= xmm11
2421# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
2422# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
2423pxor %xmm11,%xmm8
2424
2425# qhasm: xmm11 ^= xmm2
2426# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
2427# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
2428pxor %xmm2,%xmm11
2429
2430# qhasm: xmm14 ^= xmm4
2431# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
2432# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
2433pxor %xmm0,%xmm14
2434
2435# qhasm: xmm8 ^= xmm4
2436# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
2437# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
2438pxor %xmm0,%xmm8
2439
2440# qhasm: xmm13 ^= xmm0
2441# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
2442# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
2443pxor %xmm1,%xmm13
2444
2445# qhasm: xmm11 ^= xmm0
2446# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
2447# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
2448pxor %xmm1,%xmm11
2449
2450# qhasm: xmm4 = xmm15
2451# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
2452# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
2453movdqa %xmm15,%xmm0
2454
2455# qhasm: xmm0 = xmm9
2456# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
2457# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
2458movdqa %xmm9,%xmm1
2459
2460# qhasm: xmm4 ^= xmm12
2461# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
2462# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
2463pxor %xmm12,%xmm0
2464
2465# qhasm: xmm0 ^= xmm10
2466# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
2467# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
2468pxor %xmm10,%xmm1
2469
2470# qhasm: xmm3 = xmm7
2471# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
2472# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
2473movdqa %xmm5,%xmm2
2474
2475# qhasm: xmm3 ^= xmm6
2476# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
2477# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
2478pxor %xmm3,%xmm2
2479
2480# qhasm: xmm3 &= xmm4
2481# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
2482# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
2483pand %xmm0,%xmm2
2484
2485# qhasm: xmm4 ^= xmm0
2486# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
2487# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
2488pxor %xmm1,%xmm0
2489
2490# qhasm: xmm4 &= xmm6
2491# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
2492# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
2493pand %xmm3,%xmm0
2494
2495# qhasm: xmm0 &= xmm7
2496# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
2497# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
2498pand %xmm5,%xmm1
2499
2500# qhasm: xmm0 ^= xmm4
2501# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
2502# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
2503pxor %xmm0,%xmm1
2504
2505# qhasm: xmm4 ^= xmm3
2506# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
2507# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
2508pxor %xmm2,%xmm0
2509
2510# qhasm: xmm2 = xmm5
2511# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
2512# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
2513movdqa %xmm7,%xmm2
2514
2515# qhasm: xmm2 ^= xmm1
2516# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
2517# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
2518pxor %xmm4,%xmm2
2519
2520# qhasm: xmm2 &= xmm12
2521# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
2522# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
2523pand %xmm12,%xmm2
2524
2525# qhasm: xmm12 ^= xmm10
2526# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
2527# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
2528pxor %xmm10,%xmm12
2529
2530# qhasm: xmm12 &= xmm1
2531# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
2532# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
2533pand %xmm4,%xmm12
2534
2535# qhasm: xmm10 &= xmm5
2536# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
2537# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
2538pand %xmm7,%xmm10
2539
2540# qhasm: xmm12 ^= xmm10
2541# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
2542# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
2543pxor %xmm10,%xmm12
2544
2545# qhasm: xmm10 ^= xmm2
2546# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
2547# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
2548pxor %xmm2,%xmm10
2549
2550# qhasm: xmm7 ^= xmm5
2551# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
2552# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
2553pxor %xmm7,%xmm5
2554
2555# qhasm: xmm6 ^= xmm1
2556# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
2557# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
2558pxor %xmm4,%xmm3
2559
2560# qhasm: xmm3 = xmm7
2561# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
2562# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
2563movdqa %xmm5,%xmm2
2564
2565# qhasm: xmm3 ^= xmm6
2566# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
2567# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
2568pxor %xmm3,%xmm2
2569
2570# qhasm: xmm3 &= xmm15
2571# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
2572# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
2573pand %xmm15,%xmm2
2574
2575# qhasm: xmm15 ^= xmm9
2576# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
2577# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
2578pxor %xmm9,%xmm15
2579
2580# qhasm: xmm15 &= xmm6
2581# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
2582# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
2583pand %xmm3,%xmm15
2584
2585# qhasm: xmm9 &= xmm7
2586# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
2587# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
2588pand %xmm5,%xmm9
2589
2590# qhasm: xmm15 ^= xmm9
2591# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
2592# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
2593pxor %xmm9,%xmm15
2594
2595# qhasm: xmm9 ^= xmm3
2596# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
2597# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
2598pxor %xmm2,%xmm9
2599
2600# qhasm: xmm15 ^= xmm4
2601# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
2602# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
2603pxor %xmm0,%xmm15
2604
2605# qhasm: xmm12 ^= xmm4
2606# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
2607# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
2608pxor %xmm0,%xmm12
2609
2610# qhasm: xmm9 ^= xmm0
2611# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
2612# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
2613pxor %xmm1,%xmm9
2614
2615# qhasm: xmm10 ^= xmm0
2616# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
2617# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
2618pxor %xmm1,%xmm10
2619
2620# qhasm: xmm15 ^= xmm8
2621# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
2622# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
2623pxor %xmm8,%xmm15
2624
2625# qhasm: xmm9 ^= xmm14
2626# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
2627# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
2628pxor %xmm14,%xmm9
2629
2630# qhasm: xmm12 ^= xmm15
2631# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
2632# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
2633pxor %xmm15,%xmm12
2634
2635# qhasm: xmm14 ^= xmm8
2636# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
2637# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
2638pxor %xmm8,%xmm14
2639
2640# qhasm: xmm8 ^= xmm9
2641# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
2642# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
2643pxor %xmm9,%xmm8
2644
2645# qhasm: xmm9 ^= xmm13
2646# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
2647# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
2648pxor %xmm13,%xmm9
2649
2650# qhasm: xmm13 ^= xmm10
2651# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
2652# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
2653pxor %xmm10,%xmm13
2654
2655# qhasm: xmm12 ^= xmm13
2656# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
2657# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
2658pxor %xmm13,%xmm12
2659
2660# qhasm: xmm10 ^= xmm11
2661# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
2662# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
2663pxor %xmm11,%xmm10
2664
2665# qhasm: xmm11 ^= xmm13
2666# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
2667# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
2668pxor %xmm13,%xmm11
2669
2670# qhasm: xmm14 ^= xmm11
2671# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
2672# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
2673pxor %xmm11,%xmm14
2674
2675# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
2676# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
2677# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
2678pshufd $0x93,%xmm8,%xmm0
2679
2680# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
2681# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
2682# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
2683pshufd $0x93,%xmm9,%xmm1
2684
2685# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
2686# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
2687# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
2688pshufd $0x93,%xmm12,%xmm2
2689
2690# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
2691# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
2692# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
2693pshufd $0x93,%xmm14,%xmm3
2694
2695# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
2696# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
2697# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
2698pshufd $0x93,%xmm11,%xmm4
2699
2700# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
2701# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
2702# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
2703pshufd $0x93,%xmm15,%xmm5
2704
2705# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
2706# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
2707# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
2708pshufd $0x93,%xmm10,%xmm6
2709
2710# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
2711# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
2712# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
2713pshufd $0x93,%xmm13,%xmm7
2714
2715# qhasm: xmm8 ^= xmm0
2716# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
2717# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
2718pxor %xmm0,%xmm8
2719
2720# qhasm: xmm9 ^= xmm1
2721# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
2722# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
2723pxor %xmm1,%xmm9
2724
2725# qhasm: xmm12 ^= xmm2
2726# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
2727# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
2728pxor %xmm2,%xmm12
2729
2730# qhasm: xmm14 ^= xmm3
2731# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
2732# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
2733pxor %xmm3,%xmm14
2734
2735# qhasm: xmm11 ^= xmm4
2736# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
2737# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
2738pxor %xmm4,%xmm11
2739
2740# qhasm: xmm15 ^= xmm5
2741# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
2742# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
2743pxor %xmm5,%xmm15
2744
2745# qhasm: xmm10 ^= xmm6
2746# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
2747# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
2748pxor %xmm6,%xmm10
2749
2750# qhasm: xmm13 ^= xmm7
2751# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
2752# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
2753pxor %xmm7,%xmm13
2754
2755# qhasm: xmm0 ^= xmm13
2756# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
2757# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
2758pxor %xmm13,%xmm0
2759
2760# qhasm: xmm1 ^= xmm8
2761# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
2762# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
2763pxor %xmm8,%xmm1
2764
2765# qhasm: xmm2 ^= xmm9
2766# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
2767# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
2768pxor %xmm9,%xmm2
2769
2770# qhasm: xmm1 ^= xmm13
2771# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
2772# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
2773pxor %xmm13,%xmm1
2774
2775# qhasm: xmm3 ^= xmm12
2776# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
2777# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
2778pxor %xmm12,%xmm3
2779
2780# qhasm: xmm4 ^= xmm14
2781# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
2782# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
2783pxor %xmm14,%xmm4
2784
2785# qhasm: xmm5 ^= xmm11
2786# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
2787# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
2788pxor %xmm11,%xmm5
2789
2790# qhasm: xmm3 ^= xmm13
2791# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
2792# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
2793pxor %xmm13,%xmm3
2794
2795# qhasm: xmm6 ^= xmm15
2796# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
2797# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
2798pxor %xmm15,%xmm6
2799
2800# qhasm: xmm7 ^= xmm10
2801# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
2802# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
2803pxor %xmm10,%xmm7
2804
2805# qhasm: xmm4 ^= xmm13
2806# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
2807# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
2808pxor %xmm13,%xmm4
2809
2810# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
2811# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
2812# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
2813pshufd $0x4E,%xmm8,%xmm8
2814
2815# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
2816# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
2817# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
2818pshufd $0x4E,%xmm9,%xmm9
2819
2820# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
2821# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
2822# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
2823pshufd $0x4E,%xmm12,%xmm12
2824
2825# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
2826# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
2827# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
2828pshufd $0x4E,%xmm14,%xmm14
2829
2830# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
2831# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
2832# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
2833pshufd $0x4E,%xmm11,%xmm11
2834
2835# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
2836# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
2837# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
2838pshufd $0x4E,%xmm15,%xmm15
2839
2840# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
2841# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
2842# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
2843pshufd $0x4E,%xmm10,%xmm10
2844
2845# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
2846# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
2847# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
2848pshufd $0x4E,%xmm13,%xmm13
2849
2850# qhasm: xmm0 ^= xmm8
2851# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
2852# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
2853pxor %xmm8,%xmm0
2854
2855# qhasm: xmm1 ^= xmm9
2856# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
2857# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
2858pxor %xmm9,%xmm1
2859
2860# qhasm: xmm2 ^= xmm12
2861# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
2862# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
2863pxor %xmm12,%xmm2
2864
2865# qhasm: xmm3 ^= xmm14
2866# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
2867# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
2868pxor %xmm14,%xmm3
2869
2870# qhasm: xmm4 ^= xmm11
2871# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
2872# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
2873pxor %xmm11,%xmm4
2874
2875# qhasm: xmm5 ^= xmm15
2876# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
2877# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
2878pxor %xmm15,%xmm5
2879
2880# qhasm: xmm6 ^= xmm10
2881# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
2882# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
2883pxor %xmm10,%xmm6
2884
2885# qhasm: xmm7 ^= xmm13
2886# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
2887# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
2888pxor %xmm13,%xmm7
2889
2890# qhasm: xmm0 ^= *(int128 *)(c + 256)
2891# asm 1: pxor 256(<c=int64#4),<xmm0=int6464#1
2892# asm 2: pxor 256(<c=%rcx),<xmm0=%xmm0
2893pxor 256(%rcx),%xmm0
2894
2895# qhasm: shuffle bytes of xmm0 by SR
2896# asm 1: pshufb SR,<xmm0=int6464#1
2897# asm 2: pshufb SR,<xmm0=%xmm0
2898pshufb SR,%xmm0
2899
2900# qhasm: xmm1 ^= *(int128 *)(c + 272)
2901# asm 1: pxor 272(<c=int64#4),<xmm1=int6464#2
2902# asm 2: pxor 272(<c=%rcx),<xmm1=%xmm1
2903pxor 272(%rcx),%xmm1
2904
2905# qhasm: shuffle bytes of xmm1 by SR
2906# asm 1: pshufb SR,<xmm1=int6464#2
2907# asm 2: pshufb SR,<xmm1=%xmm1
2908pshufb SR,%xmm1
2909
2910# qhasm: xmm2 ^= *(int128 *)(c + 288)
2911# asm 1: pxor 288(<c=int64#4),<xmm2=int6464#3
2912# asm 2: pxor 288(<c=%rcx),<xmm2=%xmm2
2913pxor 288(%rcx),%xmm2
2914
2915# qhasm: shuffle bytes of xmm2 by SR
2916# asm 1: pshufb SR,<xmm2=int6464#3
2917# asm 2: pshufb SR,<xmm2=%xmm2
2918pshufb SR,%xmm2
2919
2920# qhasm: xmm3 ^= *(int128 *)(c + 304)
2921# asm 1: pxor 304(<c=int64#4),<xmm3=int6464#4
2922# asm 2: pxor 304(<c=%rcx),<xmm3=%xmm3
2923pxor 304(%rcx),%xmm3
2924
2925# qhasm: shuffle bytes of xmm3 by SR
2926# asm 1: pshufb SR,<xmm3=int6464#4
2927# asm 2: pshufb SR,<xmm3=%xmm3
2928pshufb SR,%xmm3
2929
2930# qhasm: xmm4 ^= *(int128 *)(c + 320)
2931# asm 1: pxor 320(<c=int64#4),<xmm4=int6464#5
2932# asm 2: pxor 320(<c=%rcx),<xmm4=%xmm4
2933pxor 320(%rcx),%xmm4
2934
2935# qhasm: shuffle bytes of xmm4 by SR
2936# asm 1: pshufb SR,<xmm4=int6464#5
2937# asm 2: pshufb SR,<xmm4=%xmm4
2938pshufb SR,%xmm4
2939
2940# qhasm: xmm5 ^= *(int128 *)(c + 336)
2941# asm 1: pxor 336(<c=int64#4),<xmm5=int6464#6
2942# asm 2: pxor 336(<c=%rcx),<xmm5=%xmm5
2943pxor 336(%rcx),%xmm5
2944
2945# qhasm: shuffle bytes of xmm5 by SR
2946# asm 1: pshufb SR,<xmm5=int6464#6
2947# asm 2: pshufb SR,<xmm5=%xmm5
2948pshufb SR,%xmm5
2949
2950# qhasm: xmm6 ^= *(int128 *)(c + 352)
2951# asm 1: pxor 352(<c=int64#4),<xmm6=int6464#7
2952# asm 2: pxor 352(<c=%rcx),<xmm6=%xmm6
2953pxor 352(%rcx),%xmm6
2954
2955# qhasm: shuffle bytes of xmm6 by SR
2956# asm 1: pshufb SR,<xmm6=int6464#7
2957# asm 2: pshufb SR,<xmm6=%xmm6
2958pshufb SR,%xmm6
2959
2960# qhasm: xmm7 ^= *(int128 *)(c + 368)
2961# asm 1: pxor 368(<c=int64#4),<xmm7=int6464#8
2962# asm 2: pxor 368(<c=%rcx),<xmm7=%xmm7
2963pxor 368(%rcx),%xmm7
2964
2965# qhasm: shuffle bytes of xmm7 by SR
2966# asm 1: pshufb SR,<xmm7=int6464#8
2967# asm 2: pshufb SR,<xmm7=%xmm7
2968pshufb SR,%xmm7
2969
2970# qhasm: xmm5 ^= xmm6
2971# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
2972# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
2973pxor %xmm6,%xmm5
2974
2975# qhasm: xmm2 ^= xmm1
2976# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
2977# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
2978pxor %xmm1,%xmm2
2979
2980# qhasm: xmm5 ^= xmm0
2981# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
2982# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
2983pxor %xmm0,%xmm5
2984
2985# qhasm: xmm6 ^= xmm2
2986# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
2987# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
2988pxor %xmm2,%xmm6
2989
2990# qhasm: xmm3 ^= xmm0
2991# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
2992# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
2993pxor %xmm0,%xmm3
2994
2995# qhasm: xmm6 ^= xmm3
2996# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
2997# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
2998pxor %xmm3,%xmm6
2999
3000# qhasm: xmm3 ^= xmm7
3001# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
3002# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
3003pxor %xmm7,%xmm3
3004
3005# qhasm: xmm3 ^= xmm4
3006# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
3007# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
3008pxor %xmm4,%xmm3
3009
3010# qhasm: xmm7 ^= xmm5
3011# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
3012# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
3013pxor %xmm5,%xmm7
3014
3015# qhasm: xmm3 ^= xmm1
3016# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
3017# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
3018pxor %xmm1,%xmm3
3019
3020# qhasm: xmm4 ^= xmm5
3021# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
3022# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
3023pxor %xmm5,%xmm4
3024
3025# qhasm: xmm2 ^= xmm7
3026# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
3027# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
3028pxor %xmm7,%xmm2
3029
3030# qhasm: xmm1 ^= xmm5
3031# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
3032# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
3033pxor %xmm5,%xmm1
3034
3035# qhasm: xmm11 = xmm7
3036# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
3037# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
3038movdqa %xmm7,%xmm8
3039
3040# qhasm: xmm10 = xmm1
3041# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
3042# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
3043movdqa %xmm1,%xmm9
3044
3045# qhasm: xmm9 = xmm5
3046# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
3047# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
3048movdqa %xmm5,%xmm10
3049
3050# qhasm: xmm13 = xmm2
3051# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
3052# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
3053movdqa %xmm2,%xmm11
3054
3055# qhasm: xmm12 = xmm6
3056# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
3057# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
3058movdqa %xmm6,%xmm12
3059
3060# qhasm: xmm11 ^= xmm4
3061# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
3062# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
3063pxor %xmm4,%xmm8
3064
3065# qhasm: xmm10 ^= xmm2
3066# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
3067# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
3068pxor %xmm2,%xmm9
3069
3070# qhasm: xmm9 ^= xmm3
3071# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
3072# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
3073pxor %xmm3,%xmm10
3074
3075# qhasm: xmm13 ^= xmm4
3076# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
3077# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
3078pxor %xmm4,%xmm11
3079
3080# qhasm: xmm12 ^= xmm0
3081# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
3082# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
3083pxor %xmm0,%xmm12
3084
3085# qhasm: xmm14 = xmm11
3086# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
3087# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
3088movdqa %xmm8,%xmm13
3089
3090# qhasm: xmm8 = xmm10
3091# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
3092# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
3093movdqa %xmm9,%xmm14
3094
3095# qhasm: xmm15 = xmm11
3096# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
3097# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
3098movdqa %xmm8,%xmm15
3099
3100# qhasm: xmm10 |= xmm9
3101# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
3102# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
3103por %xmm10,%xmm9
3104
3105# qhasm: xmm11 |= xmm12
3106# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
3107# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
3108por %xmm12,%xmm8
3109
3110# qhasm: xmm15 ^= xmm8
3111# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
3112# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
3113pxor %xmm14,%xmm15
3114
3115# qhasm: xmm14 &= xmm12
3116# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
3117# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
3118pand %xmm12,%xmm13
3119
3120# qhasm: xmm8 &= xmm9
3121# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
3122# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
3123pand %xmm10,%xmm14
3124
3125# qhasm: xmm12 ^= xmm9
3126# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
3127# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
3128pxor %xmm10,%xmm12
3129
3130# qhasm: xmm15 &= xmm12
3131# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
3132# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
3133pand %xmm12,%xmm15
3134
3135# qhasm: xmm12 = xmm3
3136# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
3137# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
3138movdqa %xmm3,%xmm10
3139
3140# qhasm: xmm12 ^= xmm0
3141# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
3142# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
3143pxor %xmm0,%xmm10
3144
3145# qhasm: xmm13 &= xmm12
3146# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
3147# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
3148pand %xmm10,%xmm11
3149
3150# qhasm: xmm11 ^= xmm13
3151# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
3152# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
3153pxor %xmm11,%xmm8
3154
3155# qhasm: xmm10 ^= xmm13
3156# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
3157# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
3158pxor %xmm11,%xmm9
3159
3160# qhasm: xmm13 = xmm7
3161# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
3162# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
3163movdqa %xmm7,%xmm10
3164
3165# qhasm: xmm13 ^= xmm1
3166# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
3167# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
3168pxor %xmm1,%xmm10
3169
3170# qhasm: xmm12 = xmm5
3171# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
3172# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
3173movdqa %xmm5,%xmm11
3174
3175# qhasm: xmm9 = xmm13
3176# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
3177# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
3178movdqa %xmm10,%xmm12
3179
3180# qhasm: xmm12 ^= xmm6
3181# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
3182# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
3183pxor %xmm6,%xmm11
3184
3185# qhasm: xmm9 |= xmm12
3186# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
3187# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
3188por %xmm11,%xmm12
3189
3190# qhasm: xmm13 &= xmm12
3191# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
3192# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
3193pand %xmm11,%xmm10
3194
3195# qhasm: xmm8 ^= xmm13
3196# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
3197# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
3198pxor %xmm10,%xmm14
3199
3200# qhasm: xmm11 ^= xmm15
3201# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
3202# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
3203pxor %xmm15,%xmm8
3204
3205# qhasm: xmm10 ^= xmm14
3206# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
3207# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
3208pxor %xmm13,%xmm9
3209
3210# qhasm: xmm9 ^= xmm15
3211# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
3212# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
3213pxor %xmm15,%xmm12
3214
3215# qhasm: xmm8 ^= xmm14
3216# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
3217# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
3218pxor %xmm13,%xmm14
3219
3220# qhasm: xmm9 ^= xmm14
3221# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
3222# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
3223pxor %xmm13,%xmm12
3224
3225# qhasm: xmm12 = xmm2
3226# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
3227# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
3228movdqa %xmm2,%xmm10
3229
3230# qhasm: xmm13 = xmm4
3231# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
3232# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
3233movdqa %xmm4,%xmm11
3234
3235# qhasm: xmm14 = xmm1
3236# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
3237# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
3238movdqa %xmm1,%xmm13
3239
3240# qhasm: xmm15 = xmm7
3241# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
3242# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
3243movdqa %xmm7,%xmm15
3244
3245# qhasm: xmm12 &= xmm3
3246# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
3247# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
3248pand %xmm3,%xmm10
3249
3250# qhasm: xmm13 &= xmm0
3251# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
3252# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
3253pand %xmm0,%xmm11
3254
3255# qhasm: xmm14 &= xmm5
3256# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
3257# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
3258pand %xmm5,%xmm13
3259
3260# qhasm: xmm15 |= xmm6
3261# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
3262# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
3263por %xmm6,%xmm15
3264
3265# qhasm: xmm11 ^= xmm12
3266# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
3267# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
3268pxor %xmm10,%xmm8
3269
3270# qhasm: xmm10 ^= xmm13
3271# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
3272# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
3273pxor %xmm11,%xmm9
3274
3275# qhasm: xmm9 ^= xmm14
3276# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
3277# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
3278pxor %xmm13,%xmm12
3279
3280# qhasm: xmm8 ^= xmm15
3281# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
3282# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
3283pxor %xmm15,%xmm14
3284
3285# qhasm: xmm12 = xmm11
3286# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
3287# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
3288movdqa %xmm8,%xmm10
3289
3290# qhasm: xmm12 ^= xmm10
3291# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
3292# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
3293pxor %xmm9,%xmm10
3294
3295# qhasm: xmm11 &= xmm9
3296# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
3297# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
3298pand %xmm12,%xmm8
3299
3300# qhasm: xmm14 = xmm8
3301# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
3302# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
3303movdqa %xmm14,%xmm11
3304
3305# qhasm: xmm14 ^= xmm11
3306# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
3307# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
3308pxor %xmm8,%xmm11
3309
3310# qhasm: xmm15 = xmm12
3311# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
3312# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
3313movdqa %xmm10,%xmm13
3314
3315# qhasm: xmm15 &= xmm14
3316# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
3317# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
3318pand %xmm11,%xmm13
3319
3320# qhasm: xmm15 ^= xmm10
3321# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
3322# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
3323pxor %xmm9,%xmm13
3324
3325# qhasm: xmm13 = xmm9
3326# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
3327# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
3328movdqa %xmm12,%xmm15
3329
3330# qhasm: xmm13 ^= xmm8
3331# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
3332# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
3333pxor %xmm14,%xmm15
3334
3335# qhasm: xmm11 ^= xmm10
3336# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
3337# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
3338pxor %xmm9,%xmm8
3339
3340# qhasm: xmm13 &= xmm11
3341# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
3342# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
3343pand %xmm8,%xmm15
3344
3345# qhasm: xmm13 ^= xmm8
3346# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
3347# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
3348pxor %xmm14,%xmm15
3349
3350# qhasm: xmm9 ^= xmm13
3351# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
3352# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
3353pxor %xmm15,%xmm12
3354
3355# qhasm: xmm10 = xmm14
3356# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
3357# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
3358movdqa %xmm11,%xmm8
3359
3360# qhasm: xmm10 ^= xmm13
3361# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
3362# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
3363pxor %xmm15,%xmm8
3364
3365# qhasm: xmm10 &= xmm8
3366# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
3367# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
3368pand %xmm14,%xmm8
3369
3370# qhasm: xmm9 ^= xmm10
3371# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
3372# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
3373pxor %xmm8,%xmm12
3374
3375# qhasm: xmm14 ^= xmm10
3376# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
3377# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
3378pxor %xmm8,%xmm11
3379
3380# qhasm: xmm14 &= xmm15
3381# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
3382# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
3383pand %xmm13,%xmm11
3384
3385# qhasm: xmm14 ^= xmm12
3386# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
3387# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
3388pxor %xmm10,%xmm11
3389
3390# qhasm: xmm12 = xmm6
3391# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
3392# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
3393movdqa %xmm6,%xmm8
3394
3395# qhasm: xmm8 = xmm5
3396# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
3397# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
3398movdqa %xmm5,%xmm9
3399
3400# qhasm: xmm10 = xmm15
3401# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
3402# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
3403movdqa %xmm13,%xmm10
3404
3405# qhasm: xmm10 ^= xmm14
3406# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
3407# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
3408pxor %xmm11,%xmm10
3409
3410# qhasm: xmm10 &= xmm6
3411# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
3412# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
3413pand %xmm6,%xmm10
3414
3415# qhasm: xmm6 ^= xmm5
3416# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
3417# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
3418pxor %xmm5,%xmm6
3419
3420# qhasm: xmm6 &= xmm14
3421# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
3422# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
3423pand %xmm11,%xmm6
3424
3425# qhasm: xmm5 &= xmm15
3426# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
3427# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
3428pand %xmm13,%xmm5
3429
3430# qhasm: xmm6 ^= xmm5
3431# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
3432# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
3433pxor %xmm5,%xmm6
3434
3435# qhasm: xmm5 ^= xmm10
3436# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
3437# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
3438pxor %xmm10,%xmm5
3439
3440# qhasm: xmm12 ^= xmm0
3441# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
3442# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
3443pxor %xmm0,%xmm8
3444
3445# qhasm: xmm8 ^= xmm3
3446# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
3447# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
3448pxor %xmm3,%xmm9
3449
3450# qhasm: xmm15 ^= xmm13
3451# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
3452# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
3453pxor %xmm15,%xmm13
3454
3455# qhasm: xmm14 ^= xmm9
3456# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
3457# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
3458pxor %xmm12,%xmm11
3459
3460# qhasm: xmm11 = xmm15
3461# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3462# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3463movdqa %xmm13,%xmm10
3464
3465# qhasm: xmm11 ^= xmm14
3466# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3467# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3468pxor %xmm11,%xmm10
3469
3470# qhasm: xmm11 &= xmm12
3471# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
3472# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
3473pand %xmm8,%xmm10
3474
3475# qhasm: xmm12 ^= xmm8
3476# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
3477# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
3478pxor %xmm9,%xmm8
3479
3480# qhasm: xmm12 &= xmm14
3481# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
3482# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
3483pand %xmm11,%xmm8
3484
3485# qhasm: xmm8 &= xmm15
3486# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
3487# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
3488pand %xmm13,%xmm9
3489
3490# qhasm: xmm8 ^= xmm12
3491# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
3492# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
3493pxor %xmm8,%xmm9
3494
3495# qhasm: xmm12 ^= xmm11
3496# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
3497# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
3498pxor %xmm10,%xmm8
3499
3500# qhasm: xmm10 = xmm13
3501# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
3502# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
3503movdqa %xmm15,%xmm10
3504
3505# qhasm: xmm10 ^= xmm9
3506# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
3507# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
3508pxor %xmm12,%xmm10
3509
3510# qhasm: xmm10 &= xmm0
3511# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
3512# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
3513pand %xmm0,%xmm10
3514
3515# qhasm: xmm0 ^= xmm3
3516# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
3517# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
3518pxor %xmm3,%xmm0
3519
3520# qhasm: xmm0 &= xmm9
3521# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
3522# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
3523pand %xmm12,%xmm0
3524
3525# qhasm: xmm3 &= xmm13
3526# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
3527# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
3528pand %xmm15,%xmm3
3529
3530# qhasm: xmm0 ^= xmm3
3531# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
3532# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
3533pxor %xmm3,%xmm0
3534
3535# qhasm: xmm3 ^= xmm10
3536# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
3537# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
3538pxor %xmm10,%xmm3
3539
3540# qhasm: xmm6 ^= xmm12
3541# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
3542# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
3543pxor %xmm8,%xmm6
3544
3545# qhasm: xmm0 ^= xmm12
3546# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
3547# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
3548pxor %xmm8,%xmm0
3549
3550# qhasm: xmm5 ^= xmm8
3551# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
3552# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
3553pxor %xmm9,%xmm5
3554
3555# qhasm: xmm3 ^= xmm8
3556# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
3557# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
3558pxor %xmm9,%xmm3
3559
3560# qhasm: xmm12 = xmm7
3561# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
3562# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
3563movdqa %xmm7,%xmm8
3564
3565# qhasm: xmm8 = xmm1
3566# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
3567# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
3568movdqa %xmm1,%xmm9
3569
3570# qhasm: xmm12 ^= xmm4
3571# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
3572# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
3573pxor %xmm4,%xmm8
3574
3575# qhasm: xmm8 ^= xmm2
3576# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
3577# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
3578pxor %xmm2,%xmm9
3579
3580# qhasm: xmm11 = xmm15
3581# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3582# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3583movdqa %xmm13,%xmm10
3584
3585# qhasm: xmm11 ^= xmm14
3586# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3587# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3588pxor %xmm11,%xmm10
3589
3590# qhasm: xmm11 &= xmm12
3591# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
3592# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
3593pand %xmm8,%xmm10
3594
3595# qhasm: xmm12 ^= xmm8
3596# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
3597# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
3598pxor %xmm9,%xmm8
3599
3600# qhasm: xmm12 &= xmm14
3601# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
3602# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
3603pand %xmm11,%xmm8
3604
3605# qhasm: xmm8 &= xmm15
3606# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
3607# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
3608pand %xmm13,%xmm9
3609
3610# qhasm: xmm8 ^= xmm12
3611# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
3612# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
3613pxor %xmm8,%xmm9
3614
3615# qhasm: xmm12 ^= xmm11
3616# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
3617# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
3618pxor %xmm10,%xmm8
3619
3620# qhasm: xmm10 = xmm13
3621# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
3622# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
3623movdqa %xmm15,%xmm10
3624
3625# qhasm: xmm10 ^= xmm9
3626# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
3627# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
3628pxor %xmm12,%xmm10
3629
3630# qhasm: xmm10 &= xmm4
3631# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
3632# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
3633pand %xmm4,%xmm10
3634
3635# qhasm: xmm4 ^= xmm2
3636# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
3637# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
3638pxor %xmm2,%xmm4
3639
3640# qhasm: xmm4 &= xmm9
3641# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
3642# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
3643pand %xmm12,%xmm4
3644
3645# qhasm: xmm2 &= xmm13
3646# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
3647# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
3648pand %xmm15,%xmm2
3649
3650# qhasm: xmm4 ^= xmm2
3651# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
3652# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
3653pxor %xmm2,%xmm4
3654
3655# qhasm: xmm2 ^= xmm10
3656# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
3657# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
3658pxor %xmm10,%xmm2
3659
3660# qhasm: xmm15 ^= xmm13
3661# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
3662# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
3663pxor %xmm15,%xmm13
3664
3665# qhasm: xmm14 ^= xmm9
3666# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
3667# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
3668pxor %xmm12,%xmm11
3669
3670# qhasm: xmm11 = xmm15
3671# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3672# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3673movdqa %xmm13,%xmm10
3674
3675# qhasm: xmm11 ^= xmm14
3676# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3677# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3678pxor %xmm11,%xmm10
3679
3680# qhasm: xmm11 &= xmm7
3681# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
3682# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
3683pand %xmm7,%xmm10
3684
3685# qhasm: xmm7 ^= xmm1
3686# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
3687# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
3688pxor %xmm1,%xmm7
3689
3690# qhasm: xmm7 &= xmm14
3691# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
3692# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
3693pand %xmm11,%xmm7
3694
3695# qhasm: xmm1 &= xmm15
3696# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
3697# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
3698pand %xmm13,%xmm1
3699
3700# qhasm: xmm7 ^= xmm1
3701# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
3702# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
3703pxor %xmm1,%xmm7
3704
3705# qhasm: xmm1 ^= xmm11
3706# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
3707# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
3708pxor %xmm10,%xmm1
3709
3710# qhasm: xmm7 ^= xmm12
3711# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
3712# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
3713pxor %xmm8,%xmm7
3714
3715# qhasm: xmm4 ^= xmm12
3716# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
3717# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
3718pxor %xmm8,%xmm4
3719
3720# qhasm: xmm1 ^= xmm8
3721# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
3722# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
3723pxor %xmm9,%xmm1
3724
3725# qhasm: xmm2 ^= xmm8
3726# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
3727# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
3728pxor %xmm9,%xmm2
3729
3730# qhasm: xmm7 ^= xmm0
3731# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
3732# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
3733pxor %xmm0,%xmm7
3734
3735# qhasm: xmm1 ^= xmm6
3736# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
3737# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
3738pxor %xmm6,%xmm1
3739
3740# qhasm: xmm4 ^= xmm7
3741# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
3742# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
3743pxor %xmm7,%xmm4
3744
3745# qhasm: xmm6 ^= xmm0
3746# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
3747# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
3748pxor %xmm0,%xmm6
3749
3750# qhasm: xmm0 ^= xmm1
3751# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
3752# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
3753pxor %xmm1,%xmm0
3754
3755# qhasm: xmm1 ^= xmm5
3756# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
3757# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
3758pxor %xmm5,%xmm1
3759
3760# qhasm: xmm5 ^= xmm2
3761# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
3762# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
3763pxor %xmm2,%xmm5
3764
3765# qhasm: xmm4 ^= xmm5
3766# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
3767# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
3768pxor %xmm5,%xmm4
3769
3770# qhasm: xmm2 ^= xmm3
3771# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
3772# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
3773pxor %xmm3,%xmm2
3774
3775# qhasm: xmm3 ^= xmm5
3776# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
3777# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
3778pxor %xmm5,%xmm3
3779
3780# qhasm: xmm6 ^= xmm3
3781# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
3782# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
3783pxor %xmm3,%xmm6
3784
3785# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
3786# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
3787# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
3788pshufd $0x93,%xmm0,%xmm8
3789
3790# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
3791# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
3792# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
3793pshufd $0x93,%xmm1,%xmm9
3794
3795# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
3796# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
3797# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
3798pshufd $0x93,%xmm4,%xmm10
3799
3800# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
3801# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
3802# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
3803pshufd $0x93,%xmm6,%xmm11
3804
3805# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
3806# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
3807# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
3808pshufd $0x93,%xmm3,%xmm12
3809
3810# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
3811# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
3812# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
3813pshufd $0x93,%xmm7,%xmm13
3814
3815# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
3816# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
3817# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
3818pshufd $0x93,%xmm2,%xmm14
3819
3820# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
3821# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
3822# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
3823pshufd $0x93,%xmm5,%xmm15
3824
3825# qhasm: xmm0 ^= xmm8
3826# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
3827# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
3828pxor %xmm8,%xmm0
3829
3830# qhasm: xmm1 ^= xmm9
3831# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
3832# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
3833pxor %xmm9,%xmm1
3834
3835# qhasm: xmm4 ^= xmm10
3836# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
3837# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
3838pxor %xmm10,%xmm4
3839
3840# qhasm: xmm6 ^= xmm11
3841# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
3842# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
3843pxor %xmm11,%xmm6
3844
3845# qhasm: xmm3 ^= xmm12
3846# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
3847# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
3848pxor %xmm12,%xmm3
3849
3850# qhasm: xmm7 ^= xmm13
3851# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
3852# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
3853pxor %xmm13,%xmm7
3854
3855# qhasm: xmm2 ^= xmm14
3856# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
3857# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
3858pxor %xmm14,%xmm2
3859
3860# qhasm: xmm5 ^= xmm15
3861# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
3862# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
3863pxor %xmm15,%xmm5
3864
3865# qhasm: xmm8 ^= xmm5
3866# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
3867# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
3868pxor %xmm5,%xmm8
3869
3870# qhasm: xmm9 ^= xmm0
3871# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
3872# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
3873pxor %xmm0,%xmm9
3874
3875# qhasm: xmm10 ^= xmm1
3876# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
3877# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
3878pxor %xmm1,%xmm10
3879
3880# qhasm: xmm9 ^= xmm5
3881# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
3882# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
3883pxor %xmm5,%xmm9
3884
3885# qhasm: xmm11 ^= xmm4
3886# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
3887# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
3888pxor %xmm4,%xmm11
3889
3890# qhasm: xmm12 ^= xmm6
3891# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
3892# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
3893pxor %xmm6,%xmm12
3894
3895# qhasm: xmm13 ^= xmm3
3896# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
3897# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
3898pxor %xmm3,%xmm13
3899
3900# qhasm: xmm11 ^= xmm5
3901# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
3902# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
3903pxor %xmm5,%xmm11
3904
3905# qhasm: xmm14 ^= xmm7
3906# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
3907# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
3908pxor %xmm7,%xmm14
3909
3910# qhasm: xmm15 ^= xmm2
3911# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
3912# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
3913pxor %xmm2,%xmm15
3914
3915# qhasm: xmm12 ^= xmm5
3916# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
3917# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
3918pxor %xmm5,%xmm12
3919
3920# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
3921# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
3922# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
3923pshufd $0x4E,%xmm0,%xmm0
3924
3925# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
3926# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
3927# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
3928pshufd $0x4E,%xmm1,%xmm1
3929
3930# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
3931# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
3932# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
3933pshufd $0x4E,%xmm4,%xmm4
3934
3935# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
3936# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
3937# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
3938pshufd $0x4E,%xmm6,%xmm6
3939
3940# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
3941# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
3942# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
3943pshufd $0x4E,%xmm3,%xmm3
3944
3945# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
3946# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
3947# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
3948pshufd $0x4E,%xmm7,%xmm7
3949
3950# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
3951# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
3952# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
3953pshufd $0x4E,%xmm2,%xmm2
3954
3955# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
3956# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
3957# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
3958pshufd $0x4E,%xmm5,%xmm5
3959
3960# qhasm: xmm8 ^= xmm0
3961# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
3962# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
3963pxor %xmm0,%xmm8
3964
3965# qhasm: xmm9 ^= xmm1
3966# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
3967# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
3968pxor %xmm1,%xmm9
3969
3970# qhasm: xmm10 ^= xmm4
3971# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
3972# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
3973pxor %xmm4,%xmm10
3974
3975# qhasm: xmm11 ^= xmm6
3976# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
3977# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
3978pxor %xmm6,%xmm11
3979
3980# qhasm: xmm12 ^= xmm3
3981# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
3982# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
3983pxor %xmm3,%xmm12
3984
3985# qhasm: xmm13 ^= xmm7
3986# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
3987# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
3988pxor %xmm7,%xmm13
3989
3990# qhasm: xmm14 ^= xmm2
3991# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
3992# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
3993pxor %xmm2,%xmm14
3994
3995# qhasm: xmm15 ^= xmm5
3996# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
3997# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
3998pxor %xmm5,%xmm15
3999
4000# qhasm: xmm8 ^= *(int128 *)(c + 384)
4001# asm 1: pxor 384(<c=int64#4),<xmm8=int6464#9
4002# asm 2: pxor 384(<c=%rcx),<xmm8=%xmm8
4003pxor 384(%rcx),%xmm8
4004
4005# qhasm: shuffle bytes of xmm8 by SR
4006# asm 1: pshufb SR,<xmm8=int6464#9
4007# asm 2: pshufb SR,<xmm8=%xmm8
4008pshufb SR,%xmm8
4009
4010# qhasm: xmm9 ^= *(int128 *)(c + 400)
4011# asm 1: pxor 400(<c=int64#4),<xmm9=int6464#10
4012# asm 2: pxor 400(<c=%rcx),<xmm9=%xmm9
4013pxor 400(%rcx),%xmm9
4014
4015# qhasm: shuffle bytes of xmm9 by SR
4016# asm 1: pshufb SR,<xmm9=int6464#10
4017# asm 2: pshufb SR,<xmm9=%xmm9
4018pshufb SR,%xmm9
4019
4020# qhasm: xmm10 ^= *(int128 *)(c + 416)
4021# asm 1: pxor 416(<c=int64#4),<xmm10=int6464#11
4022# asm 2: pxor 416(<c=%rcx),<xmm10=%xmm10
4023pxor 416(%rcx),%xmm10
4024
4025# qhasm: shuffle bytes of xmm10 by SR
4026# asm 1: pshufb SR,<xmm10=int6464#11
4027# asm 2: pshufb SR,<xmm10=%xmm10
4028pshufb SR,%xmm10
4029
4030# qhasm: xmm11 ^= *(int128 *)(c + 432)
4031# asm 1: pxor 432(<c=int64#4),<xmm11=int6464#12
4032# asm 2: pxor 432(<c=%rcx),<xmm11=%xmm11
4033pxor 432(%rcx),%xmm11
4034
4035# qhasm: shuffle bytes of xmm11 by SR
4036# asm 1: pshufb SR,<xmm11=int6464#12
4037# asm 2: pshufb SR,<xmm11=%xmm11
4038pshufb SR,%xmm11
4039
4040# qhasm: xmm12 ^= *(int128 *)(c + 448)
4041# asm 1: pxor 448(<c=int64#4),<xmm12=int6464#13
4042# asm 2: pxor 448(<c=%rcx),<xmm12=%xmm12
4043pxor 448(%rcx),%xmm12
4044
4045# qhasm: shuffle bytes of xmm12 by SR
4046# asm 1: pshufb SR,<xmm12=int6464#13
4047# asm 2: pshufb SR,<xmm12=%xmm12
4048pshufb SR,%xmm12
4049
4050# qhasm: xmm13 ^= *(int128 *)(c + 464)
4051# asm 1: pxor 464(<c=int64#4),<xmm13=int6464#14
4052# asm 2: pxor 464(<c=%rcx),<xmm13=%xmm13
4053pxor 464(%rcx),%xmm13
4054
4055# qhasm: shuffle bytes of xmm13 by SR
4056# asm 1: pshufb SR,<xmm13=int6464#14
4057# asm 2: pshufb SR,<xmm13=%xmm13
4058pshufb SR,%xmm13
4059
4060# qhasm: xmm14 ^= *(int128 *)(c + 480)
4061# asm 1: pxor 480(<c=int64#4),<xmm14=int6464#15
4062# asm 2: pxor 480(<c=%rcx),<xmm14=%xmm14
4063pxor 480(%rcx),%xmm14
4064
4065# qhasm: shuffle bytes of xmm14 by SR
4066# asm 1: pshufb SR,<xmm14=int6464#15
4067# asm 2: pshufb SR,<xmm14=%xmm14
4068pshufb SR,%xmm14
4069
4070# qhasm: xmm15 ^= *(int128 *)(c + 496)
4071# asm 1: pxor 496(<c=int64#4),<xmm15=int6464#16
4072# asm 2: pxor 496(<c=%rcx),<xmm15=%xmm15
4073pxor 496(%rcx),%xmm15
4074
4075# qhasm: shuffle bytes of xmm15 by SR
4076# asm 1: pshufb SR,<xmm15=int6464#16
4077# asm 2: pshufb SR,<xmm15=%xmm15
4078pshufb SR,%xmm15
4079
4080# qhasm: xmm13 ^= xmm14
4081# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
4082# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
4083pxor %xmm14,%xmm13
4084
4085# qhasm: xmm10 ^= xmm9
4086# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
4087# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
4088pxor %xmm9,%xmm10
4089
4090# qhasm: xmm13 ^= xmm8
4091# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
4092# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
4093pxor %xmm8,%xmm13
4094
4095# qhasm: xmm14 ^= xmm10
4096# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
4097# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
4098pxor %xmm10,%xmm14
4099
4100# qhasm: xmm11 ^= xmm8
4101# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
4102# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
4103pxor %xmm8,%xmm11
4104
4105# qhasm: xmm14 ^= xmm11
4106# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
4107# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
4108pxor %xmm11,%xmm14
4109
4110# qhasm: xmm11 ^= xmm15
4111# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
4112# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
4113pxor %xmm15,%xmm11
4114
4115# qhasm: xmm11 ^= xmm12
4116# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
4117# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
4118pxor %xmm12,%xmm11
4119
4120# qhasm: xmm15 ^= xmm13
4121# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
4122# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
4123pxor %xmm13,%xmm15
4124
4125# qhasm: xmm11 ^= xmm9
4126# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
4127# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
4128pxor %xmm9,%xmm11
4129
4130# qhasm: xmm12 ^= xmm13
4131# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
4132# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
4133pxor %xmm13,%xmm12
4134
4135# qhasm: xmm10 ^= xmm15
4136# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
4137# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
4138pxor %xmm15,%xmm10
4139
4140# qhasm: xmm9 ^= xmm13
4141# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
4142# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
4143pxor %xmm13,%xmm9
4144
4145# qhasm: xmm3 = xmm15
4146# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
4147# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
4148movdqa %xmm15,%xmm0
4149
4150# qhasm: xmm2 = xmm9
4151# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
4152# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
4153movdqa %xmm9,%xmm1
4154
4155# qhasm: xmm1 = xmm13
4156# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
4157# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
4158movdqa %xmm13,%xmm2
4159
4160# qhasm: xmm5 = xmm10
4161# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
4162# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
4163movdqa %xmm10,%xmm3
4164
4165# qhasm: xmm4 = xmm14
4166# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
4167# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
4168movdqa %xmm14,%xmm4
4169
4170# qhasm: xmm3 ^= xmm12
4171# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
4172# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
4173pxor %xmm12,%xmm0
4174
4175# qhasm: xmm2 ^= xmm10
4176# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
4177# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
4178pxor %xmm10,%xmm1
4179
4180# qhasm: xmm1 ^= xmm11
4181# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
4182# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
4183pxor %xmm11,%xmm2
4184
4185# qhasm: xmm5 ^= xmm12
4186# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
4187# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
4188pxor %xmm12,%xmm3
4189
4190# qhasm: xmm4 ^= xmm8
4191# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
4192# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
4193pxor %xmm8,%xmm4
4194
4195# qhasm: xmm6 = xmm3
4196# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
4197# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
4198movdqa %xmm0,%xmm5
4199
4200# qhasm: xmm0 = xmm2
4201# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
4202# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
4203movdqa %xmm1,%xmm6
4204
4205# qhasm: xmm7 = xmm3
4206# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
4207# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
4208movdqa %xmm0,%xmm7
4209
4210# qhasm: xmm2 |= xmm1
4211# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
4212# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
4213por %xmm2,%xmm1
4214
4215# qhasm: xmm3 |= xmm4
4216# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
4217# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
4218por %xmm4,%xmm0
4219
4220# qhasm: xmm7 ^= xmm0
4221# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
4222# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
4223pxor %xmm6,%xmm7
4224
4225# qhasm: xmm6 &= xmm4
4226# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
4227# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
4228pand %xmm4,%xmm5
4229
4230# qhasm: xmm0 &= xmm1
4231# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
4232# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
4233pand %xmm2,%xmm6
4234
4235# qhasm: xmm4 ^= xmm1
4236# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
4237# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
4238pxor %xmm2,%xmm4
4239
4240# qhasm: xmm7 &= xmm4
4241# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
4242# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
4243pand %xmm4,%xmm7
4244
4245# qhasm: xmm4 = xmm11
4246# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
4247# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
4248movdqa %xmm11,%xmm2
4249
4250# qhasm: xmm4 ^= xmm8
4251# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
4252# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
4253pxor %xmm8,%xmm2
4254
4255# qhasm: xmm5 &= xmm4
4256# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
4257# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
4258pand %xmm2,%xmm3
4259
4260# qhasm: xmm3 ^= xmm5
4261# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
4262# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
4263pxor %xmm3,%xmm0
4264
4265# qhasm: xmm2 ^= xmm5
4266# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
4267# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
4268pxor %xmm3,%xmm1
4269
4270# qhasm: xmm5 = xmm15
4271# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
4272# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
4273movdqa %xmm15,%xmm2
4274
4275# qhasm: xmm5 ^= xmm9
4276# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
4277# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
4278pxor %xmm9,%xmm2
4279
4280# qhasm: xmm4 = xmm13
4281# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
4282# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
4283movdqa %xmm13,%xmm3
4284
4285# qhasm: xmm1 = xmm5
4286# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
4287# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
4288movdqa %xmm2,%xmm4
4289
4290# qhasm: xmm4 ^= xmm14
4291# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
4292# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
4293pxor %xmm14,%xmm3
4294
4295# qhasm: xmm1 |= xmm4
4296# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
4297# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
4298por %xmm3,%xmm4
4299
4300# qhasm: xmm5 &= xmm4
4301# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
4302# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
4303pand %xmm3,%xmm2
4304
4305# qhasm: xmm0 ^= xmm5
4306# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
4307# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
4308pxor %xmm2,%xmm6
4309
4310# qhasm: xmm3 ^= xmm7
4311# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
4312# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
4313pxor %xmm7,%xmm0
4314
4315# qhasm: xmm2 ^= xmm6
4316# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
4317# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
4318pxor %xmm5,%xmm1
4319
4320# qhasm: xmm1 ^= xmm7
4321# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
4322# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
4323pxor %xmm7,%xmm4
4324
4325# qhasm: xmm0 ^= xmm6
4326# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
4327# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
4328pxor %xmm5,%xmm6
4329
4330# qhasm: xmm1 ^= xmm6
4331# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
4332# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
4333pxor %xmm5,%xmm4
4334
4335# qhasm: xmm4 = xmm10
4336# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
4337# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
4338movdqa %xmm10,%xmm2
4339
4340# qhasm: xmm5 = xmm12
4341# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
4342# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
4343movdqa %xmm12,%xmm3
4344
4345# qhasm: xmm6 = xmm9
4346# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
4347# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
4348movdqa %xmm9,%xmm5
4349
4350# qhasm: xmm7 = xmm15
4351# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
4352# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
4353movdqa %xmm15,%xmm7
4354
4355# qhasm: xmm4 &= xmm11
4356# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
4357# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
4358pand %xmm11,%xmm2
4359
4360# qhasm: xmm5 &= xmm8
4361# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
4362# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
4363pand %xmm8,%xmm3
4364
4365# qhasm: xmm6 &= xmm13
4366# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
4367# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
4368pand %xmm13,%xmm5
4369
4370# qhasm: xmm7 |= xmm14
4371# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
4372# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
4373por %xmm14,%xmm7
4374
4375# qhasm: xmm3 ^= xmm4
4376# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
4377# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
4378pxor %xmm2,%xmm0
4379
4380# qhasm: xmm2 ^= xmm5
4381# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
4382# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
4383pxor %xmm3,%xmm1
4384
4385# qhasm: xmm1 ^= xmm6
4386# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
4387# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
4388pxor %xmm5,%xmm4
4389
4390# qhasm: xmm0 ^= xmm7
4391# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
4392# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
4393pxor %xmm7,%xmm6
4394
4395# qhasm: xmm4 = xmm3
4396# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
4397# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
4398movdqa %xmm0,%xmm2
4399
4400# qhasm: xmm4 ^= xmm2
4401# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
4402# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
4403pxor %xmm1,%xmm2
4404
4405# qhasm: xmm3 &= xmm1
4406# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
4407# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
4408pand %xmm4,%xmm0
4409
4410# qhasm: xmm6 = xmm0
4411# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
4412# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
4413movdqa %xmm6,%xmm3
4414
4415# qhasm: xmm6 ^= xmm3
4416# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
4417# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
4418pxor %xmm0,%xmm3
4419
4420# qhasm: xmm7 = xmm4
4421# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
4422# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
4423movdqa %xmm2,%xmm5
4424
4425# qhasm: xmm7 &= xmm6
4426# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
4427# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
4428pand %xmm3,%xmm5
4429
4430# qhasm: xmm7 ^= xmm2
4431# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
4432# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
4433pxor %xmm1,%xmm5
4434
4435# qhasm: xmm5 = xmm1
4436# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
4437# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
4438movdqa %xmm4,%xmm7
4439
4440# qhasm: xmm5 ^= xmm0
4441# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
4442# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
4443pxor %xmm6,%xmm7
4444
4445# qhasm: xmm3 ^= xmm2
4446# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
4447# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
4448pxor %xmm1,%xmm0
4449
4450# qhasm: xmm5 &= xmm3
4451# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
4452# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
4453pand %xmm0,%xmm7
4454
4455# qhasm: xmm5 ^= xmm0
4456# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
4457# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
4458pxor %xmm6,%xmm7
4459
4460# qhasm: xmm1 ^= xmm5
4461# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
4462# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
4463pxor %xmm7,%xmm4
4464
4465# qhasm: xmm2 = xmm6
4466# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
4467# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
4468movdqa %xmm3,%xmm0
4469
4470# qhasm: xmm2 ^= xmm5
4471# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
4472# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
4473pxor %xmm7,%xmm0
4474
4475# qhasm: xmm2 &= xmm0
4476# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
4477# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
4478pand %xmm6,%xmm0
4479
4480# qhasm: xmm1 ^= xmm2
4481# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
4482# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
4483pxor %xmm0,%xmm4
4484
4485# qhasm: xmm6 ^= xmm2
4486# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
4487# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
4488pxor %xmm0,%xmm3
4489
4490# qhasm: xmm6 &= xmm7
4491# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
4492# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
4493pand %xmm5,%xmm3
4494
4495# qhasm: xmm6 ^= xmm4
4496# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
4497# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
4498pxor %xmm2,%xmm3
4499
4500# qhasm: xmm4 = xmm14
4501# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
4502# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
4503movdqa %xmm14,%xmm0
4504
4505# qhasm: xmm0 = xmm13
4506# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
4507# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
4508movdqa %xmm13,%xmm1
4509
4510# qhasm: xmm2 = xmm7
4511# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
4512# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
4513movdqa %xmm5,%xmm2
4514
4515# qhasm: xmm2 ^= xmm6
4516# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
4517# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
4518pxor %xmm3,%xmm2
4519
4520# qhasm: xmm2 &= xmm14
4521# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
4522# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
4523pand %xmm14,%xmm2
4524
4525# qhasm: xmm14 ^= xmm13
4526# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
4527# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
4528pxor %xmm13,%xmm14
4529
4530# qhasm: xmm14 &= xmm6
4531# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
4532# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
4533pand %xmm3,%xmm14
4534
4535# qhasm: xmm13 &= xmm7
4536# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
4537# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
4538pand %xmm5,%xmm13
4539
4540# qhasm: xmm14 ^= xmm13
4541# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
4542# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
4543pxor %xmm13,%xmm14
4544
4545# qhasm: xmm13 ^= xmm2
4546# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
4547# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
4548pxor %xmm2,%xmm13
4549
4550# qhasm: xmm4 ^= xmm8
4551# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
4552# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
4553pxor %xmm8,%xmm0
4554
4555# qhasm: xmm0 ^= xmm11
4556# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
4557# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
4558pxor %xmm11,%xmm1
4559
4560# qhasm: xmm7 ^= xmm5
4561# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
4562# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
4563pxor %xmm7,%xmm5
4564
4565# qhasm: xmm6 ^= xmm1
4566# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
4567# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
4568pxor %xmm4,%xmm3
4569
4570# qhasm: xmm3 = xmm7
4571# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
4572# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
4573movdqa %xmm5,%xmm2
4574
4575# qhasm: xmm3 ^= xmm6
4576# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
4577# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
4578pxor %xmm3,%xmm2
4579
4580# qhasm: xmm3 &= xmm4
4581# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
4582# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
4583pand %xmm0,%xmm2
4584
4585# qhasm: xmm4 ^= xmm0
4586# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
4587# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
4588pxor %xmm1,%xmm0
4589
4590# qhasm: xmm4 &= xmm6
4591# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
4592# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
4593pand %xmm3,%xmm0
4594
4595# qhasm: xmm0 &= xmm7
4596# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
4597# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
4598pand %xmm5,%xmm1
4599
4600# qhasm: xmm0 ^= xmm4
4601# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
4602# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
4603pxor %xmm0,%xmm1
4604
4605# qhasm: xmm4 ^= xmm3
4606# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
4607# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
4608pxor %xmm2,%xmm0
4609
4610# qhasm: xmm2 = xmm5
4611# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
4612# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
4613movdqa %xmm7,%xmm2
4614
4615# qhasm: xmm2 ^= xmm1
4616# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
4617# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
4618pxor %xmm4,%xmm2
4619
4620# qhasm: xmm2 &= xmm8
4621# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
4622# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
4623pand %xmm8,%xmm2
4624
4625# qhasm: xmm8 ^= xmm11
4626# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
4627# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
4628pxor %xmm11,%xmm8
4629
4630# qhasm: xmm8 &= xmm1
4631# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
4632# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
4633pand %xmm4,%xmm8
4634
4635# qhasm: xmm11 &= xmm5
4636# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
4637# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
4638pand %xmm7,%xmm11
4639
4640# qhasm: xmm8 ^= xmm11
4641# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
4642# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
4643pxor %xmm11,%xmm8
4644
4645# qhasm: xmm11 ^= xmm2
4646# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
4647# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
4648pxor %xmm2,%xmm11
4649
4650# qhasm: xmm14 ^= xmm4
4651# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
4652# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
4653pxor %xmm0,%xmm14
4654
4655# qhasm: xmm8 ^= xmm4
4656# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
4657# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
4658pxor %xmm0,%xmm8
4659
4660# qhasm: xmm13 ^= xmm0
4661# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
4662# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
4663pxor %xmm1,%xmm13
4664
4665# qhasm: xmm11 ^= xmm0
4666# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
4667# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
4668pxor %xmm1,%xmm11
4669
4670# qhasm: xmm4 = xmm15
4671# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
4672# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
4673movdqa %xmm15,%xmm0
4674
4675# qhasm: xmm0 = xmm9
4676# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
4677# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
4678movdqa %xmm9,%xmm1
4679
4680# qhasm: xmm4 ^= xmm12
4681# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
4682# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
4683pxor %xmm12,%xmm0
4684
4685# qhasm: xmm0 ^= xmm10
4686# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
4687# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
4688pxor %xmm10,%xmm1
4689
4690# qhasm: xmm3 = xmm7
4691# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
4692# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
4693movdqa %xmm5,%xmm2
4694
4695# qhasm: xmm3 ^= xmm6
4696# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
4697# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
4698pxor %xmm3,%xmm2
4699
4700# qhasm: xmm3 &= xmm4
4701# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
4702# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
4703pand %xmm0,%xmm2
4704
4705# qhasm: xmm4 ^= xmm0
4706# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
4707# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
4708pxor %xmm1,%xmm0
4709
4710# qhasm: xmm4 &= xmm6
4711# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
4712# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
4713pand %xmm3,%xmm0
4714
4715# qhasm: xmm0 &= xmm7
4716# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
4717# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
4718pand %xmm5,%xmm1
4719
4720# qhasm: xmm0 ^= xmm4
4721# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
4722# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
4723pxor %xmm0,%xmm1
4724
4725# qhasm: xmm4 ^= xmm3
4726# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
4727# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
4728pxor %xmm2,%xmm0
4729
4730# qhasm: xmm2 = xmm5
4731# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
4732# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
4733movdqa %xmm7,%xmm2
4734
4735# qhasm: xmm2 ^= xmm1
4736# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
4737# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
4738pxor %xmm4,%xmm2
4739
4740# qhasm: xmm2 &= xmm12
4741# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
4742# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
4743pand %xmm12,%xmm2
4744
4745# qhasm: xmm12 ^= xmm10
4746# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
4747# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
4748pxor %xmm10,%xmm12
4749
4750# qhasm: xmm12 &= xmm1
4751# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
4752# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
4753pand %xmm4,%xmm12
4754
4755# qhasm: xmm10 &= xmm5
4756# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
4757# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
4758pand %xmm7,%xmm10
4759
4760# qhasm: xmm12 ^= xmm10
4761# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
4762# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
4763pxor %xmm10,%xmm12
4764
4765# qhasm: xmm10 ^= xmm2
4766# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
4767# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
4768pxor %xmm2,%xmm10
4769
4770# qhasm: xmm7 ^= xmm5
4771# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
4772# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
4773pxor %xmm7,%xmm5
4774
4775# qhasm: xmm6 ^= xmm1
4776# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
4777# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
4778pxor %xmm4,%xmm3
4779
4780# qhasm: xmm3 = xmm7
4781# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
4782# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
4783movdqa %xmm5,%xmm2
4784
4785# qhasm: xmm3 ^= xmm6
4786# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
4787# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
4788pxor %xmm3,%xmm2
4789
4790# qhasm: xmm3 &= xmm15
4791# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
4792# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
4793pand %xmm15,%xmm2
4794
4795# qhasm: xmm15 ^= xmm9
4796# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
4797# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
4798pxor %xmm9,%xmm15
4799
4800# qhasm: xmm15 &= xmm6
4801# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
4802# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
4803pand %xmm3,%xmm15
4804
4805# qhasm: xmm9 &= xmm7
4806# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
4807# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
4808pand %xmm5,%xmm9
4809
4810# qhasm: xmm15 ^= xmm9
4811# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
4812# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
4813pxor %xmm9,%xmm15
4814
4815# qhasm: xmm9 ^= xmm3
4816# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
4817# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
4818pxor %xmm2,%xmm9
4819
4820# qhasm: xmm15 ^= xmm4
4821# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
4822# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
4823pxor %xmm0,%xmm15
4824
4825# qhasm: xmm12 ^= xmm4
4826# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
4827# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
4828pxor %xmm0,%xmm12
4829
4830# qhasm: xmm9 ^= xmm0
4831# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
4832# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
4833pxor %xmm1,%xmm9
4834
4835# qhasm: xmm10 ^= xmm0
4836# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
4837# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
4838pxor %xmm1,%xmm10
4839
4840# qhasm: xmm15 ^= xmm8
4841# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
4842# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
4843pxor %xmm8,%xmm15
4844
4845# qhasm: xmm9 ^= xmm14
4846# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
4847# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
4848pxor %xmm14,%xmm9
4849
4850# qhasm: xmm12 ^= xmm15
4851# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
4852# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
4853pxor %xmm15,%xmm12
4854
4855# qhasm: xmm14 ^= xmm8
4856# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
4857# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
4858pxor %xmm8,%xmm14
4859
4860# qhasm: xmm8 ^= xmm9
4861# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
4862# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
4863pxor %xmm9,%xmm8
4864
4865# qhasm: xmm9 ^= xmm13
4866# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
4867# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
4868pxor %xmm13,%xmm9
4869
4870# qhasm: xmm13 ^= xmm10
4871# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
4872# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
4873pxor %xmm10,%xmm13
4874
4875# qhasm: xmm12 ^= xmm13
4876# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
4877# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
4878pxor %xmm13,%xmm12
4879
4880# qhasm: xmm10 ^= xmm11
4881# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
4882# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
4883pxor %xmm11,%xmm10
4884
4885# qhasm: xmm11 ^= xmm13
4886# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
4887# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
4888pxor %xmm13,%xmm11
4889
4890# qhasm: xmm14 ^= xmm11
4891# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
4892# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
4893pxor %xmm11,%xmm14
4894
4895# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
4896# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
4897# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
4898pshufd $0x93,%xmm8,%xmm0
4899
4900# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
4901# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
4902# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
4903pshufd $0x93,%xmm9,%xmm1
4904
4905# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
4906# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
4907# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
4908pshufd $0x93,%xmm12,%xmm2
4909
4910# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
4911# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
4912# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
4913pshufd $0x93,%xmm14,%xmm3
4914
4915# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
4916# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
4917# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
4918pshufd $0x93,%xmm11,%xmm4
4919
4920# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
4921# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
4922# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
4923pshufd $0x93,%xmm15,%xmm5
4924
4925# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
4926# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
4927# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
4928pshufd $0x93,%xmm10,%xmm6
4929
4930# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
4931# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
4932# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
4933pshufd $0x93,%xmm13,%xmm7
4934
4935# qhasm: xmm8 ^= xmm0
4936# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
4937# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
4938pxor %xmm0,%xmm8
4939
4940# qhasm: xmm9 ^= xmm1
4941# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
4942# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
4943pxor %xmm1,%xmm9
4944
4945# qhasm: xmm12 ^= xmm2
4946# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
4947# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
4948pxor %xmm2,%xmm12
4949
4950# qhasm: xmm14 ^= xmm3
4951# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
4952# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
4953pxor %xmm3,%xmm14
4954
4955# qhasm: xmm11 ^= xmm4
4956# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
4957# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
4958pxor %xmm4,%xmm11
4959
4960# qhasm: xmm15 ^= xmm5
4961# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
4962# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
4963pxor %xmm5,%xmm15
4964
4965# qhasm: xmm10 ^= xmm6
4966# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
4967# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
4968pxor %xmm6,%xmm10
4969
4970# qhasm: xmm13 ^= xmm7
4971# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
4972# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
4973pxor %xmm7,%xmm13
4974
4975# qhasm: xmm0 ^= xmm13
4976# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
4977# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
4978pxor %xmm13,%xmm0
4979
4980# qhasm: xmm1 ^= xmm8
4981# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
4982# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
4983pxor %xmm8,%xmm1
4984
4985# qhasm: xmm2 ^= xmm9
4986# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
4987# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
4988pxor %xmm9,%xmm2
4989
4990# qhasm: xmm1 ^= xmm13
4991# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
4992# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
4993pxor %xmm13,%xmm1
4994
4995# qhasm: xmm3 ^= xmm12
4996# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
4997# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
4998pxor %xmm12,%xmm3
4999
5000# qhasm: xmm4 ^= xmm14
5001# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
5002# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
5003pxor %xmm14,%xmm4
5004
5005# qhasm: xmm5 ^= xmm11
5006# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
5007# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
5008pxor %xmm11,%xmm5
5009
5010# qhasm: xmm3 ^= xmm13
5011# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
5012# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
5013pxor %xmm13,%xmm3
5014
5015# qhasm: xmm6 ^= xmm15
5016# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
5017# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
5018pxor %xmm15,%xmm6
5019
5020# qhasm: xmm7 ^= xmm10
5021# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
5022# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
5023pxor %xmm10,%xmm7
5024
5025# qhasm: xmm4 ^= xmm13
5026# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
5027# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
5028pxor %xmm13,%xmm4
5029
5030# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
5031# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
5032# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
5033pshufd $0x4E,%xmm8,%xmm8
5034
5035# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
5036# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
5037# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
5038pshufd $0x4E,%xmm9,%xmm9
5039
5040# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
5041# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
5042# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
5043pshufd $0x4E,%xmm12,%xmm12
5044
5045# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
5046# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
5047# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
5048pshufd $0x4E,%xmm14,%xmm14
5049
5050# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
5051# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
5052# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
5053pshufd $0x4E,%xmm11,%xmm11
5054
5055# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
5056# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
5057# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
5058pshufd $0x4E,%xmm15,%xmm15
5059
5060# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
5061# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
5062# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
5063pshufd $0x4E,%xmm10,%xmm10
5064
5065# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
5066# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
5067# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
5068pshufd $0x4E,%xmm13,%xmm13
5069
5070# qhasm: xmm0 ^= xmm8
5071# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
5072# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
5073pxor %xmm8,%xmm0
5074
5075# qhasm: xmm1 ^= xmm9
5076# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
5077# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
5078pxor %xmm9,%xmm1
5079
5080# qhasm: xmm2 ^= xmm12
5081# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
5082# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
5083pxor %xmm12,%xmm2
5084
5085# qhasm: xmm3 ^= xmm14
5086# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
5087# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
5088pxor %xmm14,%xmm3
5089
5090# qhasm: xmm4 ^= xmm11
5091# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
5092# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
5093pxor %xmm11,%xmm4
5094
5095# qhasm: xmm5 ^= xmm15
5096# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
5097# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
5098pxor %xmm15,%xmm5
5099
5100# qhasm: xmm6 ^= xmm10
5101# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
5102# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
5103pxor %xmm10,%xmm6
5104
5105# qhasm: xmm7 ^= xmm13
5106# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
5107# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
5108pxor %xmm13,%xmm7
5109
5110# qhasm: xmm0 ^= *(int128 *)(c + 512)
5111# asm 1: pxor 512(<c=int64#4),<xmm0=int6464#1
5112# asm 2: pxor 512(<c=%rcx),<xmm0=%xmm0
5113pxor 512(%rcx),%xmm0
5114
5115# qhasm: shuffle bytes of xmm0 by SR
5116# asm 1: pshufb SR,<xmm0=int6464#1
5117# asm 2: pshufb SR,<xmm0=%xmm0
5118pshufb SR,%xmm0
5119
5120# qhasm: xmm1 ^= *(int128 *)(c + 528)
5121# asm 1: pxor 528(<c=int64#4),<xmm1=int6464#2
5122# asm 2: pxor 528(<c=%rcx),<xmm1=%xmm1
5123pxor 528(%rcx),%xmm1
5124
5125# qhasm: shuffle bytes of xmm1 by SR
5126# asm 1: pshufb SR,<xmm1=int6464#2
5127# asm 2: pshufb SR,<xmm1=%xmm1
5128pshufb SR,%xmm1
5129
5130# qhasm: xmm2 ^= *(int128 *)(c + 544)
5131# asm 1: pxor 544(<c=int64#4),<xmm2=int6464#3
5132# asm 2: pxor 544(<c=%rcx),<xmm2=%xmm2
5133pxor 544(%rcx),%xmm2
5134
5135# qhasm: shuffle bytes of xmm2 by SR
5136# asm 1: pshufb SR,<xmm2=int6464#3
5137# asm 2: pshufb SR,<xmm2=%xmm2
5138pshufb SR,%xmm2
5139
5140# qhasm: xmm3 ^= *(int128 *)(c + 560)
5141# asm 1: pxor 560(<c=int64#4),<xmm3=int6464#4
5142# asm 2: pxor 560(<c=%rcx),<xmm3=%xmm3
5143pxor 560(%rcx),%xmm3
5144
5145# qhasm: shuffle bytes of xmm3 by SR
5146# asm 1: pshufb SR,<xmm3=int6464#4
5147# asm 2: pshufb SR,<xmm3=%xmm3
5148pshufb SR,%xmm3
5149
5150# qhasm: xmm4 ^= *(int128 *)(c + 576)
5151# asm 1: pxor 576(<c=int64#4),<xmm4=int6464#5
5152# asm 2: pxor 576(<c=%rcx),<xmm4=%xmm4
5153pxor 576(%rcx),%xmm4
5154
5155# qhasm: shuffle bytes of xmm4 by SR
5156# asm 1: pshufb SR,<xmm4=int6464#5
5157# asm 2: pshufb SR,<xmm4=%xmm4
5158pshufb SR,%xmm4
5159
5160# qhasm: xmm5 ^= *(int128 *)(c + 592)
5161# asm 1: pxor 592(<c=int64#4),<xmm5=int6464#6
5162# asm 2: pxor 592(<c=%rcx),<xmm5=%xmm5
5163pxor 592(%rcx),%xmm5
5164
5165# qhasm: shuffle bytes of xmm5 by SR
5166# asm 1: pshufb SR,<xmm5=int6464#6
5167# asm 2: pshufb SR,<xmm5=%xmm5
5168pshufb SR,%xmm5
5169
5170# qhasm: xmm6 ^= *(int128 *)(c + 608)
5171# asm 1: pxor 608(<c=int64#4),<xmm6=int6464#7
5172# asm 2: pxor 608(<c=%rcx),<xmm6=%xmm6
5173pxor 608(%rcx),%xmm6
5174
5175# qhasm: shuffle bytes of xmm6 by SR
5176# asm 1: pshufb SR,<xmm6=int6464#7
5177# asm 2: pshufb SR,<xmm6=%xmm6
5178pshufb SR,%xmm6
5179
5180# qhasm: xmm7 ^= *(int128 *)(c + 624)
5181# asm 1: pxor 624(<c=int64#4),<xmm7=int6464#8
5182# asm 2: pxor 624(<c=%rcx),<xmm7=%xmm7
5183pxor 624(%rcx),%xmm7
5184
5185# qhasm: shuffle bytes of xmm7 by SR
5186# asm 1: pshufb SR,<xmm7=int6464#8
5187# asm 2: pshufb SR,<xmm7=%xmm7
5188pshufb SR,%xmm7
5189
5190# qhasm: xmm5 ^= xmm6
5191# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
5192# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
5193pxor %xmm6,%xmm5
5194
5195# qhasm: xmm2 ^= xmm1
5196# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
5197# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
5198pxor %xmm1,%xmm2
5199
5200# qhasm: xmm5 ^= xmm0
5201# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
5202# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
5203pxor %xmm0,%xmm5
5204
5205# qhasm: xmm6 ^= xmm2
5206# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
5207# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
5208pxor %xmm2,%xmm6
5209
5210# qhasm: xmm3 ^= xmm0
5211# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
5212# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
5213pxor %xmm0,%xmm3
5214
5215# qhasm: xmm6 ^= xmm3
5216# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
5217# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
5218pxor %xmm3,%xmm6
5219
5220# qhasm: xmm3 ^= xmm7
5221# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
5222# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
5223pxor %xmm7,%xmm3
5224
5225# qhasm: xmm3 ^= xmm4
5226# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
5227# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
5228pxor %xmm4,%xmm3
5229
5230# qhasm: xmm7 ^= xmm5
5231# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
5232# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
5233pxor %xmm5,%xmm7
5234
5235# qhasm: xmm3 ^= xmm1
5236# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
5237# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
5238pxor %xmm1,%xmm3
5239
5240# qhasm: xmm4 ^= xmm5
5241# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
5242# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
5243pxor %xmm5,%xmm4
5244
5245# qhasm: xmm2 ^= xmm7
5246# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
5247# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
5248pxor %xmm7,%xmm2
5249
5250# qhasm: xmm1 ^= xmm5
5251# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
5252# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
5253pxor %xmm5,%xmm1
5254
5255# qhasm: xmm11 = xmm7
5256# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
5257# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
5258movdqa %xmm7,%xmm8
5259
5260# qhasm: xmm10 = xmm1
5261# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
5262# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
5263movdqa %xmm1,%xmm9
5264
5265# qhasm: xmm9 = xmm5
5266# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
5267# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
5268movdqa %xmm5,%xmm10
5269
5270# qhasm: xmm13 = xmm2
5271# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
5272# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
5273movdqa %xmm2,%xmm11
5274
5275# qhasm: xmm12 = xmm6
5276# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
5277# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
5278movdqa %xmm6,%xmm12
5279
5280# qhasm: xmm11 ^= xmm4
5281# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
5282# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
5283pxor %xmm4,%xmm8
5284
5285# qhasm: xmm10 ^= xmm2
5286# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
5287# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
5288pxor %xmm2,%xmm9
5289
5290# qhasm: xmm9 ^= xmm3
5291# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
5292# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
5293pxor %xmm3,%xmm10
5294
5295# qhasm: xmm13 ^= xmm4
5296# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
5297# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
5298pxor %xmm4,%xmm11
5299
5300# qhasm: xmm12 ^= xmm0
5301# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
5302# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
5303pxor %xmm0,%xmm12
5304
5305# qhasm: xmm14 = xmm11
5306# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
5307# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
5308movdqa %xmm8,%xmm13
5309
5310# qhasm: xmm8 = xmm10
5311# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
5312# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
5313movdqa %xmm9,%xmm14
5314
5315# qhasm: xmm15 = xmm11
5316# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
5317# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
5318movdqa %xmm8,%xmm15
5319
5320# qhasm: xmm10 |= xmm9
5321# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
5322# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
5323por %xmm10,%xmm9
5324
5325# qhasm: xmm11 |= xmm12
5326# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
5327# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
5328por %xmm12,%xmm8
5329
5330# qhasm: xmm15 ^= xmm8
5331# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
5332# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
5333pxor %xmm14,%xmm15
5334
5335# qhasm: xmm14 &= xmm12
5336# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
5337# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
5338pand %xmm12,%xmm13
5339
5340# qhasm: xmm8 &= xmm9
5341# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
5342# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
5343pand %xmm10,%xmm14
5344
5345# qhasm: xmm12 ^= xmm9
5346# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
5347# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
5348pxor %xmm10,%xmm12
5349
5350# qhasm: xmm15 &= xmm12
5351# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
5352# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
5353pand %xmm12,%xmm15
5354
5355# qhasm: xmm12 = xmm3
5356# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
5357# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
5358movdqa %xmm3,%xmm10
5359
5360# qhasm: xmm12 ^= xmm0
5361# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
5362# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
5363pxor %xmm0,%xmm10
5364
5365# qhasm: xmm13 &= xmm12
5366# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
5367# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
5368pand %xmm10,%xmm11
5369
5370# qhasm: xmm11 ^= xmm13
5371# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
5372# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
5373pxor %xmm11,%xmm8
5374
5375# qhasm: xmm10 ^= xmm13
5376# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
5377# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
5378pxor %xmm11,%xmm9
5379
5380# qhasm: xmm13 = xmm7
5381# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
5382# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
5383movdqa %xmm7,%xmm10
5384
5385# qhasm: xmm13 ^= xmm1
5386# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
5387# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
5388pxor %xmm1,%xmm10
5389
5390# qhasm: xmm12 = xmm5
5391# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
5392# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
5393movdqa %xmm5,%xmm11
5394
5395# qhasm: xmm9 = xmm13
5396# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
5397# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
5398movdqa %xmm10,%xmm12
5399
5400# qhasm: xmm12 ^= xmm6
5401# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
5402# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
5403pxor %xmm6,%xmm11
5404
5405# qhasm: xmm9 |= xmm12
5406# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
5407# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
5408por %xmm11,%xmm12
5409
5410# qhasm: xmm13 &= xmm12
5411# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
5412# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
5413pand %xmm11,%xmm10
5414
5415# qhasm: xmm8 ^= xmm13
5416# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
5417# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
5418pxor %xmm10,%xmm14
5419
5420# qhasm: xmm11 ^= xmm15
5421# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
5422# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
5423pxor %xmm15,%xmm8
5424
5425# qhasm: xmm10 ^= xmm14
5426# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
5427# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
5428pxor %xmm13,%xmm9
5429
5430# qhasm: xmm9 ^= xmm15
5431# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
5432# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
5433pxor %xmm15,%xmm12
5434
5435# qhasm: xmm8 ^= xmm14
5436# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
5437# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
5438pxor %xmm13,%xmm14
5439
5440# qhasm: xmm9 ^= xmm14
5441# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
5442# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
5443pxor %xmm13,%xmm12
5444
5445# qhasm: xmm12 = xmm2
5446# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
5447# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
5448movdqa %xmm2,%xmm10
5449
5450# qhasm: xmm13 = xmm4
5451# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
5452# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
5453movdqa %xmm4,%xmm11
5454
5455# qhasm: xmm14 = xmm1
5456# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
5457# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
5458movdqa %xmm1,%xmm13
5459
5460# qhasm: xmm15 = xmm7
5461# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
5462# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
5463movdqa %xmm7,%xmm15
5464
5465# qhasm: xmm12 &= xmm3
5466# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
5467# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
5468pand %xmm3,%xmm10
5469
5470# qhasm: xmm13 &= xmm0
5471# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
5472# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
5473pand %xmm0,%xmm11
5474
5475# qhasm: xmm14 &= xmm5
5476# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
5477# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
5478pand %xmm5,%xmm13
5479
5480# qhasm: xmm15 |= xmm6
5481# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
5482# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
5483por %xmm6,%xmm15
5484
5485# qhasm: xmm11 ^= xmm12
5486# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
5487# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
5488pxor %xmm10,%xmm8
5489
5490# qhasm: xmm10 ^= xmm13
5491# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
5492# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
5493pxor %xmm11,%xmm9
5494
5495# qhasm: xmm9 ^= xmm14
5496# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
5497# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
5498pxor %xmm13,%xmm12
5499
5500# qhasm: xmm8 ^= xmm15
5501# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
5502# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
5503pxor %xmm15,%xmm14
5504
5505# qhasm: xmm12 = xmm11
5506# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
5507# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
5508movdqa %xmm8,%xmm10
5509
5510# qhasm: xmm12 ^= xmm10
5511# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
5512# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
5513pxor %xmm9,%xmm10
5514
5515# qhasm: xmm11 &= xmm9
5516# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
5517# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
5518pand %xmm12,%xmm8
5519
5520# qhasm: xmm14 = xmm8
5521# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
5522# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
5523movdqa %xmm14,%xmm11
5524
5525# qhasm: xmm14 ^= xmm11
5526# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
5527# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
5528pxor %xmm8,%xmm11
5529
5530# qhasm: xmm15 = xmm12
5531# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
5532# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
5533movdqa %xmm10,%xmm13
5534
5535# qhasm: xmm15 &= xmm14
5536# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
5537# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
5538pand %xmm11,%xmm13
5539
5540# qhasm: xmm15 ^= xmm10
5541# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
5542# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
5543pxor %xmm9,%xmm13
5544
5545# qhasm: xmm13 = xmm9
5546# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
5547# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
5548movdqa %xmm12,%xmm15
5549
5550# qhasm: xmm13 ^= xmm8
5551# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
5552# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
5553pxor %xmm14,%xmm15
5554
5555# qhasm: xmm11 ^= xmm10
5556# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
5557# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
5558pxor %xmm9,%xmm8
5559
5560# qhasm: xmm13 &= xmm11
5561# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
5562# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
5563pand %xmm8,%xmm15
5564
5565# qhasm: xmm13 ^= xmm8
5566# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
5567# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
5568pxor %xmm14,%xmm15
5569
5570# qhasm: xmm9 ^= xmm13
5571# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
5572# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
5573pxor %xmm15,%xmm12
5574
5575# qhasm: xmm10 = xmm14
5576# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
5577# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
5578movdqa %xmm11,%xmm8
5579
5580# qhasm: xmm10 ^= xmm13
5581# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
5582# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
5583pxor %xmm15,%xmm8
5584
5585# qhasm: xmm10 &= xmm8
5586# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
5587# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
5588pand %xmm14,%xmm8
5589
5590# qhasm: xmm9 ^= xmm10
5591# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
5592# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
5593pxor %xmm8,%xmm12
5594
5595# qhasm: xmm14 ^= xmm10
5596# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
5597# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
5598pxor %xmm8,%xmm11
5599
5600# qhasm: xmm14 &= xmm15
5601# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
5602# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
5603pand %xmm13,%xmm11
5604
5605# qhasm: xmm14 ^= xmm12
5606# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
5607# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
5608pxor %xmm10,%xmm11
5609
5610# qhasm: xmm12 = xmm6
5611# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
5612# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
5613movdqa %xmm6,%xmm8
5614
5615# qhasm: xmm8 = xmm5
5616# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
5617# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
5618movdqa %xmm5,%xmm9
5619
5620# qhasm: xmm10 = xmm15
5621# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
5622# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
5623movdqa %xmm13,%xmm10
5624
5625# qhasm: xmm10 ^= xmm14
5626# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
5627# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
5628pxor %xmm11,%xmm10
5629
5630# qhasm: xmm10 &= xmm6
5631# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
5632# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
5633pand %xmm6,%xmm10
5634
5635# qhasm: xmm6 ^= xmm5
5636# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
5637# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
5638pxor %xmm5,%xmm6
5639
5640# qhasm: xmm6 &= xmm14
5641# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
5642# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
5643pand %xmm11,%xmm6
5644
5645# qhasm: xmm5 &= xmm15
5646# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
5647# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
5648pand %xmm13,%xmm5
5649
5650# qhasm: xmm6 ^= xmm5
5651# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
5652# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
5653pxor %xmm5,%xmm6
5654
5655# qhasm: xmm5 ^= xmm10
5656# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
5657# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
5658pxor %xmm10,%xmm5
5659
5660# qhasm: xmm12 ^= xmm0
5661# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
5662# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
5663pxor %xmm0,%xmm8
5664
5665# qhasm: xmm8 ^= xmm3
5666# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
5667# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
5668pxor %xmm3,%xmm9
5669
5670# qhasm: xmm15 ^= xmm13
5671# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
5672# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
5673pxor %xmm15,%xmm13
5674
5675# qhasm: xmm14 ^= xmm9
5676# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
5677# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
5678pxor %xmm12,%xmm11
5679
5680# qhasm: xmm11 = xmm15
5681# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5682# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5683movdqa %xmm13,%xmm10
5684
5685# qhasm: xmm11 ^= xmm14
5686# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5687# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5688pxor %xmm11,%xmm10
5689
5690# qhasm: xmm11 &= xmm12
5691# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
5692# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
5693pand %xmm8,%xmm10
5694
5695# qhasm: xmm12 ^= xmm8
5696# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
5697# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
5698pxor %xmm9,%xmm8
5699
5700# qhasm: xmm12 &= xmm14
5701# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
5702# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
5703pand %xmm11,%xmm8
5704
5705# qhasm: xmm8 &= xmm15
5706# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
5707# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
5708pand %xmm13,%xmm9
5709
5710# qhasm: xmm8 ^= xmm12
5711# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
5712# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
5713pxor %xmm8,%xmm9
5714
5715# qhasm: xmm12 ^= xmm11
5716# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
5717# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
5718pxor %xmm10,%xmm8
5719
5720# qhasm: xmm10 = xmm13
5721# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
5722# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
5723movdqa %xmm15,%xmm10
5724
5725# qhasm: xmm10 ^= xmm9
5726# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
5727# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
5728pxor %xmm12,%xmm10
5729
5730# qhasm: xmm10 &= xmm0
5731# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
5732# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
5733pand %xmm0,%xmm10
5734
5735# qhasm: xmm0 ^= xmm3
5736# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
5737# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
5738pxor %xmm3,%xmm0
5739
5740# qhasm: xmm0 &= xmm9
5741# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
5742# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
5743pand %xmm12,%xmm0
5744
5745# qhasm: xmm3 &= xmm13
5746# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
5747# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
5748pand %xmm15,%xmm3
5749
5750# qhasm: xmm0 ^= xmm3
5751# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
5752# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
5753pxor %xmm3,%xmm0
5754
5755# qhasm: xmm3 ^= xmm10
5756# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
5757# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
5758pxor %xmm10,%xmm3
5759
5760# qhasm: xmm6 ^= xmm12
5761# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
5762# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
5763pxor %xmm8,%xmm6
5764
5765# qhasm: xmm0 ^= xmm12
5766# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
5767# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
5768pxor %xmm8,%xmm0
5769
5770# qhasm: xmm5 ^= xmm8
5771# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
5772# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
5773pxor %xmm9,%xmm5
5774
5775# qhasm: xmm3 ^= xmm8
5776# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
5777# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
5778pxor %xmm9,%xmm3
5779
5780# qhasm: xmm12 = xmm7
5781# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
5782# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
5783movdqa %xmm7,%xmm8
5784
5785# qhasm: xmm8 = xmm1
5786# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
5787# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
5788movdqa %xmm1,%xmm9
5789
5790# qhasm: xmm12 ^= xmm4
5791# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
5792# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
5793pxor %xmm4,%xmm8
5794
5795# qhasm: xmm8 ^= xmm2
5796# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
5797# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
5798pxor %xmm2,%xmm9
5799
5800# qhasm: xmm11 = xmm15
5801# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5802# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5803movdqa %xmm13,%xmm10
5804
5805# qhasm: xmm11 ^= xmm14
5806# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5807# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5808pxor %xmm11,%xmm10
5809
5810# qhasm: xmm11 &= xmm12
5811# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
5812# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
5813pand %xmm8,%xmm10
5814
5815# qhasm: xmm12 ^= xmm8
5816# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
5817# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
5818pxor %xmm9,%xmm8
5819
5820# qhasm: xmm12 &= xmm14
5821# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
5822# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
5823pand %xmm11,%xmm8
5824
5825# qhasm: xmm8 &= xmm15
5826# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
5827# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
5828pand %xmm13,%xmm9
5829
5830# qhasm: xmm8 ^= xmm12
5831# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
5832# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
5833pxor %xmm8,%xmm9
5834
5835# qhasm: xmm12 ^= xmm11
5836# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
5837# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
5838pxor %xmm10,%xmm8
5839
5840# qhasm: xmm10 = xmm13
5841# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
5842# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
5843movdqa %xmm15,%xmm10
5844
5845# qhasm: xmm10 ^= xmm9
5846# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
5847# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
5848pxor %xmm12,%xmm10
5849
5850# qhasm: xmm10 &= xmm4
5851# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
5852# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
5853pand %xmm4,%xmm10
5854
5855# qhasm: xmm4 ^= xmm2
5856# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
5857# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
5858pxor %xmm2,%xmm4
5859
5860# qhasm: xmm4 &= xmm9
5861# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
5862# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
5863pand %xmm12,%xmm4
5864
5865# qhasm: xmm2 &= xmm13
5866# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
5867# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
5868pand %xmm15,%xmm2
5869
5870# qhasm: xmm4 ^= xmm2
5871# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
5872# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
5873pxor %xmm2,%xmm4
5874
5875# qhasm: xmm2 ^= xmm10
5876# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
5877# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
5878pxor %xmm10,%xmm2
5879
5880# qhasm: xmm15 ^= xmm13
5881# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
5882# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
5883pxor %xmm15,%xmm13
5884
5885# qhasm: xmm14 ^= xmm9
5886# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
5887# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
5888pxor %xmm12,%xmm11
5889
5890# qhasm: xmm11 = xmm15
5891# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5892# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5893movdqa %xmm13,%xmm10
5894
5895# qhasm: xmm11 ^= xmm14
5896# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5897# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5898pxor %xmm11,%xmm10
5899
5900# qhasm: xmm11 &= xmm7
5901# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
5902# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
5903pand %xmm7,%xmm10
5904
5905# qhasm: xmm7 ^= xmm1
5906# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
5907# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
5908pxor %xmm1,%xmm7
5909
5910# qhasm: xmm7 &= xmm14
5911# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
5912# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
5913pand %xmm11,%xmm7
5914
5915# qhasm: xmm1 &= xmm15
5916# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
5917# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
5918pand %xmm13,%xmm1
5919
5920# qhasm: xmm7 ^= xmm1
5921# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
5922# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
5923pxor %xmm1,%xmm7
5924
5925# qhasm: xmm1 ^= xmm11
5926# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
5927# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
5928pxor %xmm10,%xmm1
5929
5930# qhasm: xmm7 ^= xmm12
5931# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
5932# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
5933pxor %xmm8,%xmm7
5934
5935# qhasm: xmm4 ^= xmm12
5936# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
5937# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
5938pxor %xmm8,%xmm4
5939
5940# qhasm: xmm1 ^= xmm8
5941# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
5942# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
5943pxor %xmm9,%xmm1
5944
5945# qhasm: xmm2 ^= xmm8
5946# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
5947# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
5948pxor %xmm9,%xmm2
5949
5950# qhasm: xmm7 ^= xmm0
5951# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
5952# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
5953pxor %xmm0,%xmm7
5954
5955# qhasm: xmm1 ^= xmm6
5956# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
5957# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
5958pxor %xmm6,%xmm1
5959
5960# qhasm: xmm4 ^= xmm7
5961# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
5962# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
5963pxor %xmm7,%xmm4
5964
5965# qhasm: xmm6 ^= xmm0
5966# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
5967# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
5968pxor %xmm0,%xmm6
5969
5970# qhasm: xmm0 ^= xmm1
5971# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
5972# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
5973pxor %xmm1,%xmm0
5974
5975# qhasm: xmm1 ^= xmm5
5976# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
5977# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
5978pxor %xmm5,%xmm1
5979
5980# qhasm: xmm5 ^= xmm2
5981# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
5982# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
5983pxor %xmm2,%xmm5
5984
5985# qhasm: xmm4 ^= xmm5
5986# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
5987# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
5988pxor %xmm5,%xmm4
5989
5990# qhasm: xmm2 ^= xmm3
5991# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
5992# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
5993pxor %xmm3,%xmm2
5994
5995# qhasm: xmm3 ^= xmm5
5996# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
5997# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
5998pxor %xmm5,%xmm3
5999
6000# qhasm: xmm6 ^= xmm3
6001# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
6002# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
6003pxor %xmm3,%xmm6
6004
6005# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
6006# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
6007# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
6008pshufd $0x93,%xmm0,%xmm8
6009
6010# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
6011# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
6012# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
6013pshufd $0x93,%xmm1,%xmm9
6014
6015# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
6016# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
6017# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
6018pshufd $0x93,%xmm4,%xmm10
6019
6020# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
6021# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
6022# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
6023pshufd $0x93,%xmm6,%xmm11
6024
6025# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
6026# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
6027# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
6028pshufd $0x93,%xmm3,%xmm12
6029
6030# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
6031# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
6032# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
6033pshufd $0x93,%xmm7,%xmm13
6034
6035# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
6036# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
6037# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
6038pshufd $0x93,%xmm2,%xmm14
6039
6040# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
6041# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
6042# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
6043pshufd $0x93,%xmm5,%xmm15
6044
6045# qhasm: xmm0 ^= xmm8
6046# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
6047# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
6048pxor %xmm8,%xmm0
6049
6050# qhasm: xmm1 ^= xmm9
6051# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
6052# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
6053pxor %xmm9,%xmm1
6054
6055# qhasm: xmm4 ^= xmm10
6056# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
6057# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
6058pxor %xmm10,%xmm4
6059
6060# qhasm: xmm6 ^= xmm11
6061# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
6062# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
6063pxor %xmm11,%xmm6
6064
6065# qhasm: xmm3 ^= xmm12
6066# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
6067# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
6068pxor %xmm12,%xmm3
6069
6070# qhasm: xmm7 ^= xmm13
6071# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
6072# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
6073pxor %xmm13,%xmm7
6074
6075# qhasm: xmm2 ^= xmm14
6076# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
6077# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
6078pxor %xmm14,%xmm2
6079
6080# qhasm: xmm5 ^= xmm15
6081# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
6082# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
6083pxor %xmm15,%xmm5
6084
6085# qhasm: xmm8 ^= xmm5
6086# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
6087# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
6088pxor %xmm5,%xmm8
6089
6090# qhasm: xmm9 ^= xmm0
6091# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
6092# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
6093pxor %xmm0,%xmm9
6094
6095# qhasm: xmm10 ^= xmm1
6096# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
6097# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
6098pxor %xmm1,%xmm10
6099
6100# qhasm: xmm9 ^= xmm5
6101# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
6102# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
6103pxor %xmm5,%xmm9
6104
6105# qhasm: xmm11 ^= xmm4
6106# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
6107# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
6108pxor %xmm4,%xmm11
6109
6110# qhasm: xmm12 ^= xmm6
6111# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
6112# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
6113pxor %xmm6,%xmm12
6114
6115# qhasm: xmm13 ^= xmm3
6116# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
6117# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
6118pxor %xmm3,%xmm13
6119
6120# qhasm: xmm11 ^= xmm5
6121# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
6122# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
6123pxor %xmm5,%xmm11
6124
6125# qhasm: xmm14 ^= xmm7
6126# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
6127# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
6128pxor %xmm7,%xmm14
6129
6130# qhasm: xmm15 ^= xmm2
6131# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
6132# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
6133pxor %xmm2,%xmm15
6134
6135# qhasm: xmm12 ^= xmm5
6136# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
6137# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
6138pxor %xmm5,%xmm12
6139
6140# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
6141# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
6142# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
6143pshufd $0x4E,%xmm0,%xmm0
6144
6145# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
6146# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
6147# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
6148pshufd $0x4E,%xmm1,%xmm1
6149
6150# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
6151# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
6152# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
6153pshufd $0x4E,%xmm4,%xmm4
6154
6155# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
6156# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
6157# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
6158pshufd $0x4E,%xmm6,%xmm6
6159
6160# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
6161# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
6162# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
6163pshufd $0x4E,%xmm3,%xmm3
6164
6165# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
6166# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
6167# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
6168pshufd $0x4E,%xmm7,%xmm7
6169
6170# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
6171# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
6172# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
6173pshufd $0x4E,%xmm2,%xmm2
6174
6175# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
6176# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
6177# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
6178pshufd $0x4E,%xmm5,%xmm5
6179
6180# qhasm: xmm8 ^= xmm0
6181# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
6182# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
6183pxor %xmm0,%xmm8
6184
6185# qhasm: xmm9 ^= xmm1
6186# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
6187# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
6188pxor %xmm1,%xmm9
6189
6190# qhasm: xmm10 ^= xmm4
6191# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
6192# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
6193pxor %xmm4,%xmm10
6194
6195# qhasm: xmm11 ^= xmm6
6196# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
6197# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
6198pxor %xmm6,%xmm11
6199
6200# qhasm: xmm12 ^= xmm3
6201# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
6202# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
6203pxor %xmm3,%xmm12
6204
6205# qhasm: xmm13 ^= xmm7
6206# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
6207# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
6208pxor %xmm7,%xmm13
6209
6210# qhasm: xmm14 ^= xmm2
6211# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
6212# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
6213pxor %xmm2,%xmm14
6214
6215# qhasm: xmm15 ^= xmm5
6216# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
6217# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
6218pxor %xmm5,%xmm15
6219
6220# qhasm: xmm8 ^= *(int128 *)(c + 640)
6221# asm 1: pxor 640(<c=int64#4),<xmm8=int6464#9
6222# asm 2: pxor 640(<c=%rcx),<xmm8=%xmm8
6223pxor 640(%rcx),%xmm8
6224
6225# qhasm: shuffle bytes of xmm8 by SR
6226# asm 1: pshufb SR,<xmm8=int6464#9
6227# asm 2: pshufb SR,<xmm8=%xmm8
6228pshufb SR,%xmm8
6229
6230# qhasm: xmm9 ^= *(int128 *)(c + 656)
6231# asm 1: pxor 656(<c=int64#4),<xmm9=int6464#10
6232# asm 2: pxor 656(<c=%rcx),<xmm9=%xmm9
6233pxor 656(%rcx),%xmm9
6234
6235# qhasm: shuffle bytes of xmm9 by SR
6236# asm 1: pshufb SR,<xmm9=int6464#10
6237# asm 2: pshufb SR,<xmm9=%xmm9
6238pshufb SR,%xmm9
6239
6240# qhasm: xmm10 ^= *(int128 *)(c + 672)
6241# asm 1: pxor 672(<c=int64#4),<xmm10=int6464#11
6242# asm 2: pxor 672(<c=%rcx),<xmm10=%xmm10
6243pxor 672(%rcx),%xmm10
6244
6245# qhasm: shuffle bytes of xmm10 by SR
6246# asm 1: pshufb SR,<xmm10=int6464#11
6247# asm 2: pshufb SR,<xmm10=%xmm10
6248pshufb SR,%xmm10
6249
6250# qhasm: xmm11 ^= *(int128 *)(c + 688)
6251# asm 1: pxor 688(<c=int64#4),<xmm11=int6464#12
6252# asm 2: pxor 688(<c=%rcx),<xmm11=%xmm11
6253pxor 688(%rcx),%xmm11
6254
6255# qhasm: shuffle bytes of xmm11 by SR
6256# asm 1: pshufb SR,<xmm11=int6464#12
6257# asm 2: pshufb SR,<xmm11=%xmm11
6258pshufb SR,%xmm11
6259
6260# qhasm: xmm12 ^= *(int128 *)(c + 704)
6261# asm 1: pxor 704(<c=int64#4),<xmm12=int6464#13
6262# asm 2: pxor 704(<c=%rcx),<xmm12=%xmm12
6263pxor 704(%rcx),%xmm12
6264
6265# qhasm: shuffle bytes of xmm12 by SR
6266# asm 1: pshufb SR,<xmm12=int6464#13
6267# asm 2: pshufb SR,<xmm12=%xmm12
6268pshufb SR,%xmm12
6269
6270# qhasm: xmm13 ^= *(int128 *)(c + 720)
6271# asm 1: pxor 720(<c=int64#4),<xmm13=int6464#14
6272# asm 2: pxor 720(<c=%rcx),<xmm13=%xmm13
6273pxor 720(%rcx),%xmm13
6274
6275# qhasm: shuffle bytes of xmm13 by SR
6276# asm 1: pshufb SR,<xmm13=int6464#14
6277# asm 2: pshufb SR,<xmm13=%xmm13
6278pshufb SR,%xmm13
6279
6280# qhasm: xmm14 ^= *(int128 *)(c + 736)
6281# asm 1: pxor 736(<c=int64#4),<xmm14=int6464#15
6282# asm 2: pxor 736(<c=%rcx),<xmm14=%xmm14
6283pxor 736(%rcx),%xmm14
6284
6285# qhasm: shuffle bytes of xmm14 by SR
6286# asm 1: pshufb SR,<xmm14=int6464#15
6287# asm 2: pshufb SR,<xmm14=%xmm14
6288pshufb SR,%xmm14
6289
6290# qhasm: xmm15 ^= *(int128 *)(c + 752)
6291# asm 1: pxor 752(<c=int64#4),<xmm15=int6464#16
6292# asm 2: pxor 752(<c=%rcx),<xmm15=%xmm15
6293pxor 752(%rcx),%xmm15
6294
6295# qhasm: shuffle bytes of xmm15 by SR
6296# asm 1: pshufb SR,<xmm15=int6464#16
6297# asm 2: pshufb SR,<xmm15=%xmm15
6298pshufb SR,%xmm15
6299
6300# qhasm: xmm13 ^= xmm14
6301# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
6302# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
6303pxor %xmm14,%xmm13
6304
6305# qhasm: xmm10 ^= xmm9
6306# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
6307# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
6308pxor %xmm9,%xmm10
6309
6310# qhasm: xmm13 ^= xmm8
6311# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
6312# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
6313pxor %xmm8,%xmm13
6314
6315# qhasm: xmm14 ^= xmm10
6316# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
6317# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
6318pxor %xmm10,%xmm14
6319
6320# qhasm: xmm11 ^= xmm8
6321# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
6322# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
6323pxor %xmm8,%xmm11
6324
6325# qhasm: xmm14 ^= xmm11
6326# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
6327# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
6328pxor %xmm11,%xmm14
6329
6330# qhasm: xmm11 ^= xmm15
6331# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
6332# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
6333pxor %xmm15,%xmm11
6334
6335# qhasm: xmm11 ^= xmm12
6336# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
6337# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
6338pxor %xmm12,%xmm11
6339
6340# qhasm: xmm15 ^= xmm13
6341# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
6342# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
6343pxor %xmm13,%xmm15
6344
6345# qhasm: xmm11 ^= xmm9
6346# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
6347# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
6348pxor %xmm9,%xmm11
6349
6350# qhasm: xmm12 ^= xmm13
6351# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
6352# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
6353pxor %xmm13,%xmm12
6354
6355# qhasm: xmm10 ^= xmm15
6356# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
6357# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
6358pxor %xmm15,%xmm10
6359
6360# qhasm: xmm9 ^= xmm13
6361# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
6362# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
6363pxor %xmm13,%xmm9
6364
6365# qhasm: xmm3 = xmm15
6366# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
6367# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
6368movdqa %xmm15,%xmm0
6369
6370# qhasm: xmm2 = xmm9
6371# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
6372# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
6373movdqa %xmm9,%xmm1
6374
6375# qhasm: xmm1 = xmm13
6376# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
6377# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
6378movdqa %xmm13,%xmm2
6379
6380# qhasm: xmm5 = xmm10
6381# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
6382# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
6383movdqa %xmm10,%xmm3
6384
6385# qhasm: xmm4 = xmm14
6386# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
6387# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
6388movdqa %xmm14,%xmm4
6389
6390# qhasm: xmm3 ^= xmm12
6391# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
6392# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
6393pxor %xmm12,%xmm0
6394
6395# qhasm: xmm2 ^= xmm10
6396# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
6397# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
6398pxor %xmm10,%xmm1
6399
6400# qhasm: xmm1 ^= xmm11
6401# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
6402# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
6403pxor %xmm11,%xmm2
6404
6405# qhasm: xmm5 ^= xmm12
6406# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
6407# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
6408pxor %xmm12,%xmm3
6409
6410# qhasm: xmm4 ^= xmm8
6411# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
6412# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
6413pxor %xmm8,%xmm4
6414
6415# qhasm: xmm6 = xmm3
6416# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
6417# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
6418movdqa %xmm0,%xmm5
6419
6420# qhasm: xmm0 = xmm2
6421# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
6422# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
6423movdqa %xmm1,%xmm6
6424
6425# qhasm: xmm7 = xmm3
6426# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
6427# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
6428movdqa %xmm0,%xmm7
6429
6430# qhasm: xmm2 |= xmm1
6431# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
6432# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
6433por %xmm2,%xmm1
6434
6435# qhasm: xmm3 |= xmm4
6436# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
6437# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
6438por %xmm4,%xmm0
6439
6440# qhasm: xmm7 ^= xmm0
6441# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
6442# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
6443pxor %xmm6,%xmm7
6444
6445# qhasm: xmm6 &= xmm4
6446# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
6447# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
6448pand %xmm4,%xmm5
6449
6450# qhasm: xmm0 &= xmm1
6451# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
6452# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
6453pand %xmm2,%xmm6
6454
6455# qhasm: xmm4 ^= xmm1
6456# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
6457# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
6458pxor %xmm2,%xmm4
6459
6460# qhasm: xmm7 &= xmm4
6461# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
6462# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
6463pand %xmm4,%xmm7
6464
6465# qhasm: xmm4 = xmm11
6466# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
6467# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
6468movdqa %xmm11,%xmm2
6469
6470# qhasm: xmm4 ^= xmm8
6471# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
6472# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
6473pxor %xmm8,%xmm2
6474
6475# qhasm: xmm5 &= xmm4
6476# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
6477# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
6478pand %xmm2,%xmm3
6479
6480# qhasm: xmm3 ^= xmm5
6481# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
6482# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
6483pxor %xmm3,%xmm0
6484
6485# qhasm: xmm2 ^= xmm5
6486# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
6487# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
6488pxor %xmm3,%xmm1
6489
6490# qhasm: xmm5 = xmm15
6491# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
6492# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
6493movdqa %xmm15,%xmm2
6494
6495# qhasm: xmm5 ^= xmm9
6496# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
6497# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
6498pxor %xmm9,%xmm2
6499
6500# qhasm: xmm4 = xmm13
6501# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
6502# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
6503movdqa %xmm13,%xmm3
6504
6505# qhasm: xmm1 = xmm5
6506# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
6507# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
6508movdqa %xmm2,%xmm4
6509
6510# qhasm: xmm4 ^= xmm14
6511# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
6512# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
6513pxor %xmm14,%xmm3
6514
6515# qhasm: xmm1 |= xmm4
6516# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
6517# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
6518por %xmm3,%xmm4
6519
6520# qhasm: xmm5 &= xmm4
6521# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
6522# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
6523pand %xmm3,%xmm2
6524
6525# qhasm: xmm0 ^= xmm5
6526# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
6527# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
6528pxor %xmm2,%xmm6
6529
6530# qhasm: xmm3 ^= xmm7
6531# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
6532# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
6533pxor %xmm7,%xmm0
6534
6535# qhasm: xmm2 ^= xmm6
6536# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
6537# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
6538pxor %xmm5,%xmm1
6539
6540# qhasm: xmm1 ^= xmm7
6541# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
6542# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
6543pxor %xmm7,%xmm4
6544
6545# qhasm: xmm0 ^= xmm6
6546# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
6547# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
6548pxor %xmm5,%xmm6
6549
6550# qhasm: xmm1 ^= xmm6
6551# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
6552# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
6553pxor %xmm5,%xmm4
6554
6555# qhasm: xmm4 = xmm10
6556# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
6557# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
6558movdqa %xmm10,%xmm2
6559
6560# qhasm: xmm5 = xmm12
6561# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
6562# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
6563movdqa %xmm12,%xmm3
6564
6565# qhasm: xmm6 = xmm9
6566# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
6567# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
6568movdqa %xmm9,%xmm5
6569
6570# qhasm: xmm7 = xmm15
6571# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
6572# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
6573movdqa %xmm15,%xmm7
6574
6575# qhasm: xmm4 &= xmm11
6576# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
6577# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
6578pand %xmm11,%xmm2
6579
6580# qhasm: xmm5 &= xmm8
6581# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
6582# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
6583pand %xmm8,%xmm3
6584
6585# qhasm: xmm6 &= xmm13
6586# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
6587# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
6588pand %xmm13,%xmm5
6589
6590# qhasm: xmm7 |= xmm14
6591# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
6592# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
6593por %xmm14,%xmm7
6594
6595# qhasm: xmm3 ^= xmm4
6596# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
6597# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
6598pxor %xmm2,%xmm0
6599
6600# qhasm: xmm2 ^= xmm5
6601# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
6602# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
6603pxor %xmm3,%xmm1
6604
6605# qhasm: xmm1 ^= xmm6
6606# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
6607# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
6608pxor %xmm5,%xmm4
6609
6610# qhasm: xmm0 ^= xmm7
6611# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
6612# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
6613pxor %xmm7,%xmm6
6614
6615# qhasm: xmm4 = xmm3
6616# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
6617# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
6618movdqa %xmm0,%xmm2
6619
6620# qhasm: xmm4 ^= xmm2
6621# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
6622# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
6623pxor %xmm1,%xmm2
6624
6625# qhasm: xmm3 &= xmm1
6626# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
6627# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
6628pand %xmm4,%xmm0
6629
6630# qhasm: xmm6 = xmm0
6631# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
6632# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
6633movdqa %xmm6,%xmm3
6634
6635# qhasm: xmm6 ^= xmm3
6636# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
6637# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
6638pxor %xmm0,%xmm3
6639
6640# qhasm: xmm7 = xmm4
6641# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
6642# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
6643movdqa %xmm2,%xmm5
6644
6645# qhasm: xmm7 &= xmm6
6646# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
6647# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
6648pand %xmm3,%xmm5
6649
6650# qhasm: xmm7 ^= xmm2
6651# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
6652# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
6653pxor %xmm1,%xmm5
6654
6655# qhasm: xmm5 = xmm1
6656# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
6657# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
6658movdqa %xmm4,%xmm7
6659
6660# qhasm: xmm5 ^= xmm0
6661# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
6662# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
6663pxor %xmm6,%xmm7
6664
6665# qhasm: xmm3 ^= xmm2
6666# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
6667# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
6668pxor %xmm1,%xmm0
6669
6670# qhasm: xmm5 &= xmm3
6671# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
6672# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
6673pand %xmm0,%xmm7
6674
6675# qhasm: xmm5 ^= xmm0
6676# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
6677# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
6678pxor %xmm6,%xmm7
6679
6680# qhasm: xmm1 ^= xmm5
6681# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
6682# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
6683pxor %xmm7,%xmm4
6684
6685# qhasm: xmm2 = xmm6
6686# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
6687# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
6688movdqa %xmm3,%xmm0
6689
6690# qhasm: xmm2 ^= xmm5
6691# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
6692# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
6693pxor %xmm7,%xmm0
6694
6695# qhasm: xmm2 &= xmm0
6696# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
6697# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
6698pand %xmm6,%xmm0
6699
6700# qhasm: xmm1 ^= xmm2
6701# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
6702# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
6703pxor %xmm0,%xmm4
6704
6705# qhasm: xmm6 ^= xmm2
6706# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
6707# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
6708pxor %xmm0,%xmm3
6709
6710# qhasm: xmm6 &= xmm7
6711# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
6712# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
6713pand %xmm5,%xmm3
6714
6715# qhasm: xmm6 ^= xmm4
6716# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
6717# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
6718pxor %xmm2,%xmm3
6719
6720# qhasm: xmm4 = xmm14
6721# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
6722# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
6723movdqa %xmm14,%xmm0
6724
6725# qhasm: xmm0 = xmm13
6726# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
6727# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
6728movdqa %xmm13,%xmm1
6729
6730# qhasm: xmm2 = xmm7
6731# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
6732# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
6733movdqa %xmm5,%xmm2
6734
6735# qhasm: xmm2 ^= xmm6
6736# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
6737# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
6738pxor %xmm3,%xmm2
6739
6740# qhasm: xmm2 &= xmm14
6741# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
6742# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
6743pand %xmm14,%xmm2
6744
6745# qhasm: xmm14 ^= xmm13
6746# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
6747# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
6748pxor %xmm13,%xmm14
6749
6750# qhasm: xmm14 &= xmm6
6751# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
6752# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
6753pand %xmm3,%xmm14
6754
6755# qhasm: xmm13 &= xmm7
6756# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
6757# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
6758pand %xmm5,%xmm13
6759
6760# qhasm: xmm14 ^= xmm13
6761# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
6762# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
6763pxor %xmm13,%xmm14
6764
6765# qhasm: xmm13 ^= xmm2
6766# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
6767# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
6768pxor %xmm2,%xmm13
6769
6770# qhasm: xmm4 ^= xmm8
6771# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
6772# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
6773pxor %xmm8,%xmm0
6774
6775# qhasm: xmm0 ^= xmm11
6776# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
6777# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
6778pxor %xmm11,%xmm1
6779
6780# qhasm: xmm7 ^= xmm5
6781# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
6782# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
6783pxor %xmm7,%xmm5
6784
6785# qhasm: xmm6 ^= xmm1
6786# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
6787# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
6788pxor %xmm4,%xmm3
6789
6790# qhasm: xmm3 = xmm7
6791# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
6792# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
6793movdqa %xmm5,%xmm2
6794
6795# qhasm: xmm3 ^= xmm6
6796# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
6797# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
6798pxor %xmm3,%xmm2
6799
6800# qhasm: xmm3 &= xmm4
6801# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
6802# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
6803pand %xmm0,%xmm2
6804
6805# qhasm: xmm4 ^= xmm0
6806# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
6807# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
6808pxor %xmm1,%xmm0
6809
6810# qhasm: xmm4 &= xmm6
6811# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
6812# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
6813pand %xmm3,%xmm0
6814
6815# qhasm: xmm0 &= xmm7
6816# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
6817# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
6818pand %xmm5,%xmm1
6819
6820# qhasm: xmm0 ^= xmm4
6821# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
6822# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
6823pxor %xmm0,%xmm1
6824
6825# qhasm: xmm4 ^= xmm3
6826# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
6827# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
6828pxor %xmm2,%xmm0
6829
6830# qhasm: xmm2 = xmm5
6831# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
6832# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
6833movdqa %xmm7,%xmm2
6834
6835# qhasm: xmm2 ^= xmm1
6836# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
6837# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
6838pxor %xmm4,%xmm2
6839
6840# qhasm: xmm2 &= xmm8
6841# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
6842# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
6843pand %xmm8,%xmm2
6844
6845# qhasm: xmm8 ^= xmm11
6846# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
6847# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
6848pxor %xmm11,%xmm8
6849
6850# qhasm: xmm8 &= xmm1
6851# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
6852# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
6853pand %xmm4,%xmm8
6854
6855# qhasm: xmm11 &= xmm5
6856# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
6857# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
6858pand %xmm7,%xmm11
6859
6860# qhasm: xmm8 ^= xmm11
6861# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
6862# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
6863pxor %xmm11,%xmm8
6864
6865# qhasm: xmm11 ^= xmm2
6866# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
6867# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
6868pxor %xmm2,%xmm11
6869
6870# qhasm: xmm14 ^= xmm4
6871# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
6872# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
6873pxor %xmm0,%xmm14
6874
6875# qhasm: xmm8 ^= xmm4
6876# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
6877# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
6878pxor %xmm0,%xmm8
6879
6880# qhasm: xmm13 ^= xmm0
6881# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
6882# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
6883pxor %xmm1,%xmm13
6884
6885# qhasm: xmm11 ^= xmm0
6886# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
6887# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
6888pxor %xmm1,%xmm11
6889
6890# qhasm: xmm4 = xmm15
6891# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
6892# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
6893movdqa %xmm15,%xmm0
6894
6895# qhasm: xmm0 = xmm9
6896# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
6897# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
6898movdqa %xmm9,%xmm1
6899
6900# qhasm: xmm4 ^= xmm12
6901# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
6902# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
6903pxor %xmm12,%xmm0
6904
6905# qhasm: xmm0 ^= xmm10
6906# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
6907# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
6908pxor %xmm10,%xmm1
6909
6910# qhasm: xmm3 = xmm7
6911# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
6912# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
6913movdqa %xmm5,%xmm2
6914
6915# qhasm: xmm3 ^= xmm6
6916# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
6917# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
6918pxor %xmm3,%xmm2
6919
6920# qhasm: xmm3 &= xmm4
6921# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
6922# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
6923pand %xmm0,%xmm2
6924
6925# qhasm: xmm4 ^= xmm0
6926# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
6927# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
6928pxor %xmm1,%xmm0
6929
6930# qhasm: xmm4 &= xmm6
6931# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
6932# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
6933pand %xmm3,%xmm0
6934
6935# qhasm: xmm0 &= xmm7
6936# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
6937# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
6938pand %xmm5,%xmm1
6939
6940# qhasm: xmm0 ^= xmm4
6941# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
6942# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
6943pxor %xmm0,%xmm1
6944
6945# qhasm: xmm4 ^= xmm3
6946# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
6947# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
6948pxor %xmm2,%xmm0
6949
6950# qhasm: xmm2 = xmm5
6951# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
6952# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
6953movdqa %xmm7,%xmm2
6954
6955# qhasm: xmm2 ^= xmm1
6956# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
6957# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
6958pxor %xmm4,%xmm2
6959
6960# qhasm: xmm2 &= xmm12
6961# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
6962# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
6963pand %xmm12,%xmm2
6964
6965# qhasm: xmm12 ^= xmm10
6966# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
6967# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
6968pxor %xmm10,%xmm12
6969
6970# qhasm: xmm12 &= xmm1
6971# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
6972# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
6973pand %xmm4,%xmm12
6974
6975# qhasm: xmm10 &= xmm5
6976# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
6977# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
6978pand %xmm7,%xmm10
6979
6980# qhasm: xmm12 ^= xmm10
6981# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
6982# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
6983pxor %xmm10,%xmm12
6984
6985# qhasm: xmm10 ^= xmm2
6986# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
6987# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
6988pxor %xmm2,%xmm10
6989
6990# qhasm: xmm7 ^= xmm5
6991# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
6992# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
6993pxor %xmm7,%xmm5
6994
6995# qhasm: xmm6 ^= xmm1
6996# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
6997# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
6998pxor %xmm4,%xmm3
6999
7000# qhasm: xmm3 = xmm7
7001# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
7002# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
7003movdqa %xmm5,%xmm2
7004
7005# qhasm: xmm3 ^= xmm6
7006# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
7007# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
7008pxor %xmm3,%xmm2
7009
7010# qhasm: xmm3 &= xmm15
7011# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
7012# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
7013pand %xmm15,%xmm2
7014
7015# qhasm: xmm15 ^= xmm9
7016# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
7017# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
7018pxor %xmm9,%xmm15
7019
7020# qhasm: xmm15 &= xmm6
7021# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
7022# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
7023pand %xmm3,%xmm15
7024
7025# qhasm: xmm9 &= xmm7
7026# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
7027# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
7028pand %xmm5,%xmm9
7029
7030# qhasm: xmm15 ^= xmm9
7031# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
7032# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
7033pxor %xmm9,%xmm15
7034
7035# qhasm: xmm9 ^= xmm3
7036# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
7037# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
7038pxor %xmm2,%xmm9
7039
7040# qhasm: xmm15 ^= xmm4
7041# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
7042# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
7043pxor %xmm0,%xmm15
7044
7045# qhasm: xmm12 ^= xmm4
7046# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
7047# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
7048pxor %xmm0,%xmm12
7049
7050# qhasm: xmm9 ^= xmm0
7051# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
7052# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
7053pxor %xmm1,%xmm9
7054
7055# qhasm: xmm10 ^= xmm0
7056# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
7057# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
7058pxor %xmm1,%xmm10
7059
7060# qhasm: xmm15 ^= xmm8
7061# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
7062# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
7063pxor %xmm8,%xmm15
7064
7065# qhasm: xmm9 ^= xmm14
7066# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
7067# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
7068pxor %xmm14,%xmm9
7069
7070# qhasm: xmm12 ^= xmm15
7071# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
7072# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
7073pxor %xmm15,%xmm12
7074
7075# qhasm: xmm14 ^= xmm8
7076# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
7077# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
7078pxor %xmm8,%xmm14
7079
7080# qhasm: xmm8 ^= xmm9
7081# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
7082# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
7083pxor %xmm9,%xmm8
7084
7085# qhasm: xmm9 ^= xmm13
7086# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
7087# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
7088pxor %xmm13,%xmm9
7089
7090# qhasm: xmm13 ^= xmm10
7091# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
7092# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
7093pxor %xmm10,%xmm13
7094
7095# qhasm: xmm12 ^= xmm13
7096# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
7097# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
7098pxor %xmm13,%xmm12
7099
7100# qhasm: xmm10 ^= xmm11
7101# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
7102# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
7103pxor %xmm11,%xmm10
7104
7105# qhasm: xmm11 ^= xmm13
7106# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
7107# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
7108pxor %xmm13,%xmm11
7109
7110# qhasm: xmm14 ^= xmm11
7111# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
7112# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
7113pxor %xmm11,%xmm14
7114
7115# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
7116# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
7117# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
7118pshufd $0x93,%xmm8,%xmm0
7119
7120# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
7121# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
7122# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
7123pshufd $0x93,%xmm9,%xmm1
7124
7125# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
7126# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
7127# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
7128pshufd $0x93,%xmm12,%xmm2
7129
7130# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
7131# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
7132# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
7133pshufd $0x93,%xmm14,%xmm3
7134
7135# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
7136# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
7137# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
7138pshufd $0x93,%xmm11,%xmm4
7139
7140# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
7141# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
7142# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
7143pshufd $0x93,%xmm15,%xmm5
7144
7145# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
7146# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
7147# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
7148pshufd $0x93,%xmm10,%xmm6
7149
7150# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
7151# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
7152# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
7153pshufd $0x93,%xmm13,%xmm7
7154
7155# qhasm: xmm8 ^= xmm0
7156# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
7157# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
7158pxor %xmm0,%xmm8
7159
7160# qhasm: xmm9 ^= xmm1
7161# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
7162# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
7163pxor %xmm1,%xmm9
7164
7165# qhasm: xmm12 ^= xmm2
7166# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
7167# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
7168pxor %xmm2,%xmm12
7169
7170# qhasm: xmm14 ^= xmm3
7171# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
7172# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
7173pxor %xmm3,%xmm14
7174
7175# qhasm: xmm11 ^= xmm4
7176# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
7177# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
7178pxor %xmm4,%xmm11
7179
7180# qhasm: xmm15 ^= xmm5
7181# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
7182# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
7183pxor %xmm5,%xmm15
7184
7185# qhasm: xmm10 ^= xmm6
7186# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
7187# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
7188pxor %xmm6,%xmm10
7189
7190# qhasm: xmm13 ^= xmm7
7191# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
7192# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
7193pxor %xmm7,%xmm13
7194
7195# qhasm: xmm0 ^= xmm13
7196# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
7197# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
7198pxor %xmm13,%xmm0
7199
7200# qhasm: xmm1 ^= xmm8
7201# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
7202# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
7203pxor %xmm8,%xmm1
7204
7205# qhasm: xmm2 ^= xmm9
7206# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
7207# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
7208pxor %xmm9,%xmm2
7209
7210# qhasm: xmm1 ^= xmm13
7211# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
7212# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
7213pxor %xmm13,%xmm1
7214
7215# qhasm: xmm3 ^= xmm12
7216# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
7217# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
7218pxor %xmm12,%xmm3
7219
7220# qhasm: xmm4 ^= xmm14
7221# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
7222# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
7223pxor %xmm14,%xmm4
7224
7225# qhasm: xmm5 ^= xmm11
7226# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
7227# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
7228pxor %xmm11,%xmm5
7229
7230# qhasm: xmm3 ^= xmm13
7231# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
7232# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
7233pxor %xmm13,%xmm3
7234
7235# qhasm: xmm6 ^= xmm15
7236# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
7237# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
7238pxor %xmm15,%xmm6
7239
7240# qhasm: xmm7 ^= xmm10
7241# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
7242# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
7243pxor %xmm10,%xmm7
7244
7245# qhasm: xmm4 ^= xmm13
7246# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
7247# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
7248pxor %xmm13,%xmm4
7249
7250# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
7251# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
7252# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
7253pshufd $0x4E,%xmm8,%xmm8
7254
7255# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
7256# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
7257# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
7258pshufd $0x4E,%xmm9,%xmm9
7259
7260# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
7261# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
7262# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
7263pshufd $0x4E,%xmm12,%xmm12
7264
7265# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
7266# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
7267# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
7268pshufd $0x4E,%xmm14,%xmm14
7269
7270# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
7271# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
7272# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
7273pshufd $0x4E,%xmm11,%xmm11
7274
7275# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
7276# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
7277# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
7278pshufd $0x4E,%xmm15,%xmm15
7279
7280# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
7281# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
7282# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
7283pshufd $0x4E,%xmm10,%xmm10
7284
7285# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
7286# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
7287# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
7288pshufd $0x4E,%xmm13,%xmm13
7289
7290# qhasm: xmm0 ^= xmm8
7291# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
7292# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
7293pxor %xmm8,%xmm0
7294
7295# qhasm: xmm1 ^= xmm9
7296# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
7297# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
7298pxor %xmm9,%xmm1
7299
7300# qhasm: xmm2 ^= xmm12
7301# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
7302# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
7303pxor %xmm12,%xmm2
7304
7305# qhasm: xmm3 ^= xmm14
7306# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
7307# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
7308pxor %xmm14,%xmm3
7309
7310# qhasm: xmm4 ^= xmm11
7311# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
7312# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
7313pxor %xmm11,%xmm4
7314
7315# qhasm: xmm5 ^= xmm15
7316# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
7317# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
7318pxor %xmm15,%xmm5
7319
7320# qhasm: xmm6 ^= xmm10
7321# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
7322# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
7323pxor %xmm10,%xmm6
7324
7325# qhasm: xmm7 ^= xmm13
7326# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
7327# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
7328pxor %xmm13,%xmm7
7329
7330# qhasm: xmm0 ^= *(int128 *)(c + 768)
7331# asm 1: pxor 768(<c=int64#4),<xmm0=int6464#1
7332# asm 2: pxor 768(<c=%rcx),<xmm0=%xmm0
7333pxor 768(%rcx),%xmm0
7334
7335# qhasm: shuffle bytes of xmm0 by SR
7336# asm 1: pshufb SR,<xmm0=int6464#1
7337# asm 2: pshufb SR,<xmm0=%xmm0
7338pshufb SR,%xmm0
7339
7340# qhasm: xmm1 ^= *(int128 *)(c + 784)
7341# asm 1: pxor 784(<c=int64#4),<xmm1=int6464#2
7342# asm 2: pxor 784(<c=%rcx),<xmm1=%xmm1
7343pxor 784(%rcx),%xmm1
7344
7345# qhasm: shuffle bytes of xmm1 by SR
7346# asm 1: pshufb SR,<xmm1=int6464#2
7347# asm 2: pshufb SR,<xmm1=%xmm1
7348pshufb SR,%xmm1
7349
7350# qhasm: xmm2 ^= *(int128 *)(c + 800)
7351# asm 1: pxor 800(<c=int64#4),<xmm2=int6464#3
7352# asm 2: pxor 800(<c=%rcx),<xmm2=%xmm2
7353pxor 800(%rcx),%xmm2
7354
7355# qhasm: shuffle bytes of xmm2 by SR
7356# asm 1: pshufb SR,<xmm2=int6464#3
7357# asm 2: pshufb SR,<xmm2=%xmm2
7358pshufb SR,%xmm2
7359
7360# qhasm: xmm3 ^= *(int128 *)(c + 816)
7361# asm 1: pxor 816(<c=int64#4),<xmm3=int6464#4
7362# asm 2: pxor 816(<c=%rcx),<xmm3=%xmm3
7363pxor 816(%rcx),%xmm3
7364
7365# qhasm: shuffle bytes of xmm3 by SR
7366# asm 1: pshufb SR,<xmm3=int6464#4
7367# asm 2: pshufb SR,<xmm3=%xmm3
7368pshufb SR,%xmm3
7369
7370# qhasm: xmm4 ^= *(int128 *)(c + 832)
7371# asm 1: pxor 832(<c=int64#4),<xmm4=int6464#5
7372# asm 2: pxor 832(<c=%rcx),<xmm4=%xmm4
7373pxor 832(%rcx),%xmm4
7374
7375# qhasm: shuffle bytes of xmm4 by SR
7376# asm 1: pshufb SR,<xmm4=int6464#5
7377# asm 2: pshufb SR,<xmm4=%xmm4
7378pshufb SR,%xmm4
7379
7380# qhasm: xmm5 ^= *(int128 *)(c + 848)
7381# asm 1: pxor 848(<c=int64#4),<xmm5=int6464#6
7382# asm 2: pxor 848(<c=%rcx),<xmm5=%xmm5
7383pxor 848(%rcx),%xmm5
7384
7385# qhasm: shuffle bytes of xmm5 by SR
7386# asm 1: pshufb SR,<xmm5=int6464#6
7387# asm 2: pshufb SR,<xmm5=%xmm5
7388pshufb SR,%xmm5
7389
7390# qhasm: xmm6 ^= *(int128 *)(c + 864)
7391# asm 1: pxor 864(<c=int64#4),<xmm6=int6464#7
7392# asm 2: pxor 864(<c=%rcx),<xmm6=%xmm6
7393pxor 864(%rcx),%xmm6
7394
7395# qhasm: shuffle bytes of xmm6 by SR
7396# asm 1: pshufb SR,<xmm6=int6464#7
7397# asm 2: pshufb SR,<xmm6=%xmm6
7398pshufb SR,%xmm6
7399
7400# qhasm: xmm7 ^= *(int128 *)(c + 880)
7401# asm 1: pxor 880(<c=int64#4),<xmm7=int6464#8
7402# asm 2: pxor 880(<c=%rcx),<xmm7=%xmm7
7403pxor 880(%rcx),%xmm7
7404
7405# qhasm: shuffle bytes of xmm7 by SR
7406# asm 1: pshufb SR,<xmm7=int6464#8
7407# asm 2: pshufb SR,<xmm7=%xmm7
7408pshufb SR,%xmm7
7409
7410# qhasm: xmm5 ^= xmm6
7411# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
7412# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
7413pxor %xmm6,%xmm5
7414
7415# qhasm: xmm2 ^= xmm1
7416# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
7417# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
7418pxor %xmm1,%xmm2
7419
7420# qhasm: xmm5 ^= xmm0
7421# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
7422# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
7423pxor %xmm0,%xmm5
7424
7425# qhasm: xmm6 ^= xmm2
7426# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
7427# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
7428pxor %xmm2,%xmm6
7429
7430# qhasm: xmm3 ^= xmm0
7431# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
7432# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
7433pxor %xmm0,%xmm3
7434
7435# qhasm: xmm6 ^= xmm3
7436# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
7437# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
7438pxor %xmm3,%xmm6
7439
7440# qhasm: xmm3 ^= xmm7
7441# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
7442# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
7443pxor %xmm7,%xmm3
7444
7445# qhasm: xmm3 ^= xmm4
7446# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
7447# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
7448pxor %xmm4,%xmm3
7449
7450# qhasm: xmm7 ^= xmm5
7451# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
7452# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
7453pxor %xmm5,%xmm7
7454
7455# qhasm: xmm3 ^= xmm1
7456# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
7457# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
7458pxor %xmm1,%xmm3
7459
7460# qhasm: xmm4 ^= xmm5
7461# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
7462# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
7463pxor %xmm5,%xmm4
7464
7465# qhasm: xmm2 ^= xmm7
7466# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
7467# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
7468pxor %xmm7,%xmm2
7469
7470# qhasm: xmm1 ^= xmm5
7471# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
7472# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
7473pxor %xmm5,%xmm1
7474
7475# qhasm: xmm11 = xmm7
7476# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
7477# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
7478movdqa %xmm7,%xmm8
7479
7480# qhasm: xmm10 = xmm1
7481# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
7482# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
7483movdqa %xmm1,%xmm9
7484
7485# qhasm: xmm9 = xmm5
7486# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
7487# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
7488movdqa %xmm5,%xmm10
7489
7490# qhasm: xmm13 = xmm2
7491# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
7492# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
7493movdqa %xmm2,%xmm11
7494
7495# qhasm: xmm12 = xmm6
7496# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
7497# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
7498movdqa %xmm6,%xmm12
7499
7500# qhasm: xmm11 ^= xmm4
7501# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
7502# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
7503pxor %xmm4,%xmm8
7504
7505# qhasm: xmm10 ^= xmm2
7506# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
7507# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
7508pxor %xmm2,%xmm9
7509
7510# qhasm: xmm9 ^= xmm3
7511# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
7512# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
7513pxor %xmm3,%xmm10
7514
7515# qhasm: xmm13 ^= xmm4
7516# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
7517# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
7518pxor %xmm4,%xmm11
7519
7520# qhasm: xmm12 ^= xmm0
7521# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
7522# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
7523pxor %xmm0,%xmm12
7524
7525# qhasm: xmm14 = xmm11
7526# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
7527# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
7528movdqa %xmm8,%xmm13
7529
7530# qhasm: xmm8 = xmm10
7531# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
7532# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
7533movdqa %xmm9,%xmm14
7534
7535# qhasm: xmm15 = xmm11
7536# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
7537# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
7538movdqa %xmm8,%xmm15
7539
7540# qhasm: xmm10 |= xmm9
7541# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
7542# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
7543por %xmm10,%xmm9
7544
7545# qhasm: xmm11 |= xmm12
7546# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
7547# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
7548por %xmm12,%xmm8
7549
7550# qhasm: xmm15 ^= xmm8
7551# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
7552# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
7553pxor %xmm14,%xmm15
7554
7555# qhasm: xmm14 &= xmm12
7556# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
7557# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
7558pand %xmm12,%xmm13
7559
7560# qhasm: xmm8 &= xmm9
7561# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
7562# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
7563pand %xmm10,%xmm14
7564
7565# qhasm: xmm12 ^= xmm9
7566# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
7567# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
7568pxor %xmm10,%xmm12
7569
7570# qhasm: xmm15 &= xmm12
7571# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
7572# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
7573pand %xmm12,%xmm15
7574
7575# qhasm: xmm12 = xmm3
7576# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
7577# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
7578movdqa %xmm3,%xmm10
7579
7580# qhasm: xmm12 ^= xmm0
7581# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
7582# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
7583pxor %xmm0,%xmm10
7584
7585# qhasm: xmm13 &= xmm12
7586# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
7587# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
7588pand %xmm10,%xmm11
7589
7590# qhasm: xmm11 ^= xmm13
7591# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
7592# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
7593pxor %xmm11,%xmm8
7594
7595# qhasm: xmm10 ^= xmm13
7596# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
7597# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
7598pxor %xmm11,%xmm9
7599
7600# qhasm: xmm13 = xmm7
7601# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
7602# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
7603movdqa %xmm7,%xmm10
7604
7605# qhasm: xmm13 ^= xmm1
7606# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
7607# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
7608pxor %xmm1,%xmm10
7609
7610# qhasm: xmm12 = xmm5
7611# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
7612# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
7613movdqa %xmm5,%xmm11
7614
7615# qhasm: xmm9 = xmm13
7616# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
7617# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
7618movdqa %xmm10,%xmm12
7619
7620# qhasm: xmm12 ^= xmm6
7621# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
7622# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
7623pxor %xmm6,%xmm11
7624
7625# qhasm: xmm9 |= xmm12
7626# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
7627# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
7628por %xmm11,%xmm12
7629
7630# qhasm: xmm13 &= xmm12
7631# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
7632# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
7633pand %xmm11,%xmm10
7634
7635# qhasm: xmm8 ^= xmm13
7636# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
7637# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
7638pxor %xmm10,%xmm14
7639
7640# qhasm: xmm11 ^= xmm15
7641# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
7642# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
7643pxor %xmm15,%xmm8
7644
7645# qhasm: xmm10 ^= xmm14
7646# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
7647# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
7648pxor %xmm13,%xmm9
7649
7650# qhasm: xmm9 ^= xmm15
7651# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
7652# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
7653pxor %xmm15,%xmm12
7654
7655# qhasm: xmm8 ^= xmm14
7656# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
7657# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
7658pxor %xmm13,%xmm14
7659
7660# qhasm: xmm9 ^= xmm14
7661# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
7662# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
7663pxor %xmm13,%xmm12
7664
7665# qhasm: xmm12 = xmm2
7666# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
7667# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
7668movdqa %xmm2,%xmm10
7669
7670# qhasm: xmm13 = xmm4
7671# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
7672# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
7673movdqa %xmm4,%xmm11
7674
7675# qhasm: xmm14 = xmm1
7676# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
7677# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
7678movdqa %xmm1,%xmm13
7679
7680# qhasm: xmm15 = xmm7
7681# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
7682# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
7683movdqa %xmm7,%xmm15
7684
7685# qhasm: xmm12 &= xmm3
7686# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
7687# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
7688pand %xmm3,%xmm10
7689
7690# qhasm: xmm13 &= xmm0
7691# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
7692# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
7693pand %xmm0,%xmm11
7694
7695# qhasm: xmm14 &= xmm5
7696# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
7697# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
7698pand %xmm5,%xmm13
7699
7700# qhasm: xmm15 |= xmm6
7701# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
7702# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
7703por %xmm6,%xmm15
7704
7705# qhasm: xmm11 ^= xmm12
7706# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
7707# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
7708pxor %xmm10,%xmm8
7709
7710# qhasm: xmm10 ^= xmm13
7711# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
7712# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
7713pxor %xmm11,%xmm9
7714
7715# qhasm: xmm9 ^= xmm14
7716# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
7717# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
7718pxor %xmm13,%xmm12
7719
7720# qhasm: xmm8 ^= xmm15
7721# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
7722# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
7723pxor %xmm15,%xmm14
7724
7725# qhasm: xmm12 = xmm11
7726# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
7727# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
7728movdqa %xmm8,%xmm10
7729
7730# qhasm: xmm12 ^= xmm10
7731# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
7732# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
7733pxor %xmm9,%xmm10
7734
7735# qhasm: xmm11 &= xmm9
7736# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
7737# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
7738pand %xmm12,%xmm8
7739
7740# qhasm: xmm14 = xmm8
7741# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
7742# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
7743movdqa %xmm14,%xmm11
7744
7745# qhasm: xmm14 ^= xmm11
7746# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
7747# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
7748pxor %xmm8,%xmm11
7749
7750# qhasm: xmm15 = xmm12
7751# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
7752# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
7753movdqa %xmm10,%xmm13
7754
7755# qhasm: xmm15 &= xmm14
7756# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
7757# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
7758pand %xmm11,%xmm13
7759
7760# qhasm: xmm15 ^= xmm10
7761# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
7762# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
7763pxor %xmm9,%xmm13
7764
7765# qhasm: xmm13 = xmm9
7766# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
7767# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
7768movdqa %xmm12,%xmm15
7769
7770# qhasm: xmm13 ^= xmm8
7771# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
7772# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
7773pxor %xmm14,%xmm15
7774
7775# qhasm: xmm11 ^= xmm10
7776# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
7777# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
7778pxor %xmm9,%xmm8
7779
7780# qhasm: xmm13 &= xmm11
7781# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
7782# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
7783pand %xmm8,%xmm15
7784
7785# qhasm: xmm13 ^= xmm8
7786# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
7787# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
7788pxor %xmm14,%xmm15
7789
7790# qhasm: xmm9 ^= xmm13
7791# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
7792# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
7793pxor %xmm15,%xmm12
7794
7795# qhasm: xmm10 = xmm14
7796# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
7797# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
7798movdqa %xmm11,%xmm8
7799
7800# qhasm: xmm10 ^= xmm13
7801# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
7802# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
7803pxor %xmm15,%xmm8
7804
7805# qhasm: xmm10 &= xmm8
7806# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
7807# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
7808pand %xmm14,%xmm8
7809
7810# qhasm: xmm9 ^= xmm10
7811# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
7812# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
7813pxor %xmm8,%xmm12
7814
7815# qhasm: xmm14 ^= xmm10
7816# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
7817# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
7818pxor %xmm8,%xmm11
7819
7820# qhasm: xmm14 &= xmm15
7821# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
7822# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
7823pand %xmm13,%xmm11
7824
7825# qhasm: xmm14 ^= xmm12
7826# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
7827# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
7828pxor %xmm10,%xmm11
7829
7830# qhasm: xmm12 = xmm6
7831# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
7832# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
7833movdqa %xmm6,%xmm8
7834
7835# qhasm: xmm8 = xmm5
7836# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
7837# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
7838movdqa %xmm5,%xmm9
7839
7840# qhasm: xmm10 = xmm15
7841# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
7842# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
7843movdqa %xmm13,%xmm10
7844
7845# qhasm: xmm10 ^= xmm14
7846# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
7847# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
7848pxor %xmm11,%xmm10
7849
7850# qhasm: xmm10 &= xmm6
7851# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
7852# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
7853pand %xmm6,%xmm10
7854
7855# qhasm: xmm6 ^= xmm5
7856# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
7857# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
7858pxor %xmm5,%xmm6
7859
7860# qhasm: xmm6 &= xmm14
7861# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
7862# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
7863pand %xmm11,%xmm6
7864
7865# qhasm: xmm5 &= xmm15
7866# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
7867# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
7868pand %xmm13,%xmm5
7869
7870# qhasm: xmm6 ^= xmm5
7871# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
7872# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
7873pxor %xmm5,%xmm6
7874
7875# qhasm: xmm5 ^= xmm10
7876# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
7877# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
7878pxor %xmm10,%xmm5
7879
7880# qhasm: xmm12 ^= xmm0
7881# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
7882# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
7883pxor %xmm0,%xmm8
7884
7885# qhasm: xmm8 ^= xmm3
7886# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
7887# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
7888pxor %xmm3,%xmm9
7889
7890# qhasm: xmm15 ^= xmm13
7891# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
7892# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
7893pxor %xmm15,%xmm13
7894
7895# qhasm: xmm14 ^= xmm9
7896# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
7897# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
7898pxor %xmm12,%xmm11
7899
7900# qhasm: xmm11 = xmm15
7901# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
7902# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
7903movdqa %xmm13,%xmm10
7904
7905# qhasm: xmm11 ^= xmm14
7906# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
7907# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
7908pxor %xmm11,%xmm10
7909
7910# qhasm: xmm11 &= xmm12
7911# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
7912# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
7913pand %xmm8,%xmm10
7914
7915# qhasm: xmm12 ^= xmm8
7916# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
7917# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
7918pxor %xmm9,%xmm8
7919
7920# qhasm: xmm12 &= xmm14
7921# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
7922# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
7923pand %xmm11,%xmm8
7924
7925# qhasm: xmm8 &= xmm15
7926# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
7927# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
7928pand %xmm13,%xmm9
7929
7930# qhasm: xmm8 ^= xmm12
7931# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
7932# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
7933pxor %xmm8,%xmm9
7934
7935# qhasm: xmm12 ^= xmm11
7936# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
7937# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
7938pxor %xmm10,%xmm8
7939
7940# qhasm: xmm10 = xmm13
7941# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
7942# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
7943movdqa %xmm15,%xmm10
7944
7945# qhasm: xmm10 ^= xmm9
7946# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
7947# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
7948pxor %xmm12,%xmm10
7949
7950# qhasm: xmm10 &= xmm0
7951# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
7952# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
7953pand %xmm0,%xmm10
7954
7955# qhasm: xmm0 ^= xmm3
7956# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
7957# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
7958pxor %xmm3,%xmm0
7959
7960# qhasm: xmm0 &= xmm9
7961# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
7962# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
7963pand %xmm12,%xmm0
7964
7965# qhasm: xmm3 &= xmm13
7966# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
7967# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
7968pand %xmm15,%xmm3
7969
7970# qhasm: xmm0 ^= xmm3
7971# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
7972# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
7973pxor %xmm3,%xmm0
7974
7975# qhasm: xmm3 ^= xmm10
7976# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
7977# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
7978pxor %xmm10,%xmm3
7979
7980# qhasm: xmm6 ^= xmm12
7981# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
7982# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
7983pxor %xmm8,%xmm6
7984
7985# qhasm: xmm0 ^= xmm12
7986# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
7987# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
7988pxor %xmm8,%xmm0
7989
7990# qhasm: xmm5 ^= xmm8
7991# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
7992# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
7993pxor %xmm9,%xmm5
7994
7995# qhasm: xmm3 ^= xmm8
7996# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
7997# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
7998pxor %xmm9,%xmm3
7999
8000# qhasm: xmm12 = xmm7
8001# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
8002# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
8003movdqa %xmm7,%xmm8
8004
8005# qhasm: xmm8 = xmm1
8006# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
8007# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
8008movdqa %xmm1,%xmm9
8009
8010# qhasm: xmm12 ^= xmm4
8011# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
8012# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
8013pxor %xmm4,%xmm8
8014
8015# qhasm: xmm8 ^= xmm2
8016# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
8017# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
8018pxor %xmm2,%xmm9
8019
8020# qhasm: xmm11 = xmm15
8021# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
8022# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
8023movdqa %xmm13,%xmm10
8024
8025# qhasm: xmm11 ^= xmm14
8026# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
8027# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
8028pxor %xmm11,%xmm10
8029
8030# qhasm: xmm11 &= xmm12
8031# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
8032# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
8033pand %xmm8,%xmm10
8034
8035# qhasm: xmm12 ^= xmm8
8036# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
8037# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
8038pxor %xmm9,%xmm8
8039
8040# qhasm: xmm12 &= xmm14
8041# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
8042# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
8043pand %xmm11,%xmm8
8044
8045# qhasm: xmm8 &= xmm15
8046# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
8047# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
8048pand %xmm13,%xmm9
8049
8050# qhasm: xmm8 ^= xmm12
8051# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
8052# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
8053pxor %xmm8,%xmm9
8054
8055# qhasm: xmm12 ^= xmm11
8056# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
8057# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
8058pxor %xmm10,%xmm8
8059
8060# qhasm: xmm10 = xmm13
8061# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
8062# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
8063movdqa %xmm15,%xmm10
8064
8065# qhasm: xmm10 ^= xmm9
8066# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
8067# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
8068pxor %xmm12,%xmm10
8069
8070# qhasm: xmm10 &= xmm4
8071# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
8072# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
8073pand %xmm4,%xmm10
8074
8075# qhasm: xmm4 ^= xmm2
8076# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
8077# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
8078pxor %xmm2,%xmm4
8079
8080# qhasm: xmm4 &= xmm9
8081# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
8082# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
8083pand %xmm12,%xmm4
8084
8085# qhasm: xmm2 &= xmm13
8086# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
8087# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
8088pand %xmm15,%xmm2
8089
8090# qhasm: xmm4 ^= xmm2
8091# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
8092# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
8093pxor %xmm2,%xmm4
8094
8095# qhasm: xmm2 ^= xmm10
8096# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
8097# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
8098pxor %xmm10,%xmm2
8099
8100# qhasm: xmm15 ^= xmm13
8101# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
8102# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
8103pxor %xmm15,%xmm13
8104
8105# qhasm: xmm14 ^= xmm9
8106# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
8107# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
8108pxor %xmm12,%xmm11
8109
8110# qhasm: xmm11 = xmm15
8111# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
8112# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
8113movdqa %xmm13,%xmm10
8114
8115# qhasm: xmm11 ^= xmm14
8116# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
8117# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
8118pxor %xmm11,%xmm10
8119
8120# qhasm: xmm11 &= xmm7
8121# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
8122# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
8123pand %xmm7,%xmm10
8124
8125# qhasm: xmm7 ^= xmm1
8126# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
8127# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
8128pxor %xmm1,%xmm7
8129
8130# qhasm: xmm7 &= xmm14
8131# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
8132# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
8133pand %xmm11,%xmm7
8134
8135# qhasm: xmm1 &= xmm15
8136# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
8137# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
8138pand %xmm13,%xmm1
8139
8140# qhasm: xmm7 ^= xmm1
8141# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
8142# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
8143pxor %xmm1,%xmm7
8144
8145# qhasm: xmm1 ^= xmm11
8146# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
8147# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
8148pxor %xmm10,%xmm1
8149
8150# qhasm: xmm7 ^= xmm12
8151# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
8152# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
8153pxor %xmm8,%xmm7
8154
8155# qhasm: xmm4 ^= xmm12
8156# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
8157# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
8158pxor %xmm8,%xmm4
8159
8160# qhasm: xmm1 ^= xmm8
8161# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
8162# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
8163pxor %xmm9,%xmm1
8164
8165# qhasm: xmm2 ^= xmm8
8166# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
8167# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
8168pxor %xmm9,%xmm2
8169
8170# qhasm: xmm7 ^= xmm0
8171# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
8172# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
8173pxor %xmm0,%xmm7
8174
8175# qhasm: xmm1 ^= xmm6
8176# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
8177# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
8178pxor %xmm6,%xmm1
8179
8180# qhasm: xmm4 ^= xmm7
8181# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
8182# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
8183pxor %xmm7,%xmm4
8184
8185# qhasm: xmm6 ^= xmm0
8186# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
8187# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
8188pxor %xmm0,%xmm6
8189
8190# qhasm: xmm0 ^= xmm1
8191# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
8192# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
8193pxor %xmm1,%xmm0
8194
8195# qhasm: xmm1 ^= xmm5
8196# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
8197# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
8198pxor %xmm5,%xmm1
8199
8200# qhasm: xmm5 ^= xmm2
8201# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
8202# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
8203pxor %xmm2,%xmm5
8204
8205# qhasm: xmm4 ^= xmm5
8206# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
8207# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
8208pxor %xmm5,%xmm4
8209
8210# qhasm: xmm2 ^= xmm3
8211# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
8212# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
8213pxor %xmm3,%xmm2
8214
8215# qhasm: xmm3 ^= xmm5
8216# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
8217# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
8218pxor %xmm5,%xmm3
8219
8220# qhasm: xmm6 ^= xmm3
8221# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
8222# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
8223pxor %xmm3,%xmm6
8224
8225# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
8226# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
8227# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
8228pshufd $0x93,%xmm0,%xmm8
8229
8230# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
8231# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
8232# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
8233pshufd $0x93,%xmm1,%xmm9
8234
8235# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
8236# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
8237# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
8238pshufd $0x93,%xmm4,%xmm10
8239
8240# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
8241# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
8242# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
8243pshufd $0x93,%xmm6,%xmm11
8244
8245# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
8246# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
8247# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
8248pshufd $0x93,%xmm3,%xmm12
8249
8250# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
8251# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
8252# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
8253pshufd $0x93,%xmm7,%xmm13
8254
8255# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
8256# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
8257# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
8258pshufd $0x93,%xmm2,%xmm14
8259
8260# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
8261# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
8262# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
8263pshufd $0x93,%xmm5,%xmm15
8264
8265# qhasm: xmm0 ^= xmm8
8266# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
8267# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
8268pxor %xmm8,%xmm0
8269
8270# qhasm: xmm1 ^= xmm9
8271# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
8272# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
8273pxor %xmm9,%xmm1
8274
8275# qhasm: xmm4 ^= xmm10
8276# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
8277# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
8278pxor %xmm10,%xmm4
8279
8280# qhasm: xmm6 ^= xmm11
8281# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
8282# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
8283pxor %xmm11,%xmm6
8284
8285# qhasm: xmm3 ^= xmm12
8286# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
8287# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
8288pxor %xmm12,%xmm3
8289
8290# qhasm: xmm7 ^= xmm13
8291# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
8292# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
8293pxor %xmm13,%xmm7
8294
8295# qhasm: xmm2 ^= xmm14
8296# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
8297# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
8298pxor %xmm14,%xmm2
8299
8300# qhasm: xmm5 ^= xmm15
8301# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
8302# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
8303pxor %xmm15,%xmm5
8304
8305# qhasm: xmm8 ^= xmm5
8306# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
8307# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
8308pxor %xmm5,%xmm8
8309
8310# qhasm: xmm9 ^= xmm0
8311# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
8312# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
8313pxor %xmm0,%xmm9
8314
8315# qhasm: xmm10 ^= xmm1
8316# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
8317# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
8318pxor %xmm1,%xmm10
8319
8320# qhasm: xmm9 ^= xmm5
8321# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
8322# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
8323pxor %xmm5,%xmm9
8324
8325# qhasm: xmm11 ^= xmm4
8326# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
8327# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
8328pxor %xmm4,%xmm11
8329
8330# qhasm: xmm12 ^= xmm6
8331# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
8332# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
8333pxor %xmm6,%xmm12
8334
8335# qhasm: xmm13 ^= xmm3
8336# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
8337# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
8338pxor %xmm3,%xmm13
8339
8340# qhasm: xmm11 ^= xmm5
8341# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
8342# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
8343pxor %xmm5,%xmm11
8344
8345# qhasm: xmm14 ^= xmm7
8346# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
8347# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
8348pxor %xmm7,%xmm14
8349
8350# qhasm: xmm15 ^= xmm2
8351# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
8352# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
8353pxor %xmm2,%xmm15
8354
8355# qhasm: xmm12 ^= xmm5
8356# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
8357# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
8358pxor %xmm5,%xmm12
8359
8360# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
8361# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
8362# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
8363pshufd $0x4E,%xmm0,%xmm0
8364
8365# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
8366# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
8367# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
8368pshufd $0x4E,%xmm1,%xmm1
8369
8370# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
8371# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
8372# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
8373pshufd $0x4E,%xmm4,%xmm4
8374
8375# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
8376# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
8377# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
8378pshufd $0x4E,%xmm6,%xmm6
8379
8380# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
8381# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
8382# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
8383pshufd $0x4E,%xmm3,%xmm3
8384
8385# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
8386# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
8387# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
8388pshufd $0x4E,%xmm7,%xmm7
8389
8390# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
8391# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
8392# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
8393pshufd $0x4E,%xmm2,%xmm2
8394
8395# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
8396# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
8397# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
8398pshufd $0x4E,%xmm5,%xmm5
8399
8400# qhasm: xmm8 ^= xmm0
8401# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
8402# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
8403pxor %xmm0,%xmm8
8404
8405# qhasm: xmm9 ^= xmm1
8406# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
8407# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
8408pxor %xmm1,%xmm9
8409
8410# qhasm: xmm10 ^= xmm4
8411# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
8412# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
8413pxor %xmm4,%xmm10
8414
8415# qhasm: xmm11 ^= xmm6
8416# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
8417# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
8418pxor %xmm6,%xmm11
8419
8420# qhasm: xmm12 ^= xmm3
8421# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
8422# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
8423pxor %xmm3,%xmm12
8424
8425# qhasm: xmm13 ^= xmm7
8426# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
8427# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
8428pxor %xmm7,%xmm13
8429
8430# qhasm: xmm14 ^= xmm2
8431# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
8432# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
8433pxor %xmm2,%xmm14
8434
8435# qhasm: xmm15 ^= xmm5
8436# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
8437# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
8438pxor %xmm5,%xmm15
8439
8440# qhasm: xmm8 ^= *(int128 *)(c + 896)
8441# asm 1: pxor 896(<c=int64#4),<xmm8=int6464#9
8442# asm 2: pxor 896(<c=%rcx),<xmm8=%xmm8
8443pxor 896(%rcx),%xmm8
8444
8445# qhasm: shuffle bytes of xmm8 by SR
8446# asm 1: pshufb SR,<xmm8=int6464#9
8447# asm 2: pshufb SR,<xmm8=%xmm8
8448pshufb SR,%xmm8
8449
8450# qhasm: xmm9 ^= *(int128 *)(c + 912)
8451# asm 1: pxor 912(<c=int64#4),<xmm9=int6464#10
8452# asm 2: pxor 912(<c=%rcx),<xmm9=%xmm9
8453pxor 912(%rcx),%xmm9
8454
8455# qhasm: shuffle bytes of xmm9 by SR
8456# asm 1: pshufb SR,<xmm9=int6464#10
8457# asm 2: pshufb SR,<xmm9=%xmm9
8458pshufb SR,%xmm9
8459
8460# qhasm: xmm10 ^= *(int128 *)(c + 928)
8461# asm 1: pxor 928(<c=int64#4),<xmm10=int6464#11
8462# asm 2: pxor 928(<c=%rcx),<xmm10=%xmm10
8463pxor 928(%rcx),%xmm10
8464
8465# qhasm: shuffle bytes of xmm10 by SR
8466# asm 1: pshufb SR,<xmm10=int6464#11
8467# asm 2: pshufb SR,<xmm10=%xmm10
8468pshufb SR,%xmm10
8469
8470# qhasm: xmm11 ^= *(int128 *)(c + 944)
8471# asm 1: pxor 944(<c=int64#4),<xmm11=int6464#12
8472# asm 2: pxor 944(<c=%rcx),<xmm11=%xmm11
8473pxor 944(%rcx),%xmm11
8474
8475# qhasm: shuffle bytes of xmm11 by SR
8476# asm 1: pshufb SR,<xmm11=int6464#12
8477# asm 2: pshufb SR,<xmm11=%xmm11
8478pshufb SR,%xmm11
8479
8480# qhasm: xmm12 ^= *(int128 *)(c + 960)
8481# asm 1: pxor 960(<c=int64#4),<xmm12=int6464#13
8482# asm 2: pxor 960(<c=%rcx),<xmm12=%xmm12
8483pxor 960(%rcx),%xmm12
8484
8485# qhasm: shuffle bytes of xmm12 by SR
8486# asm 1: pshufb SR,<xmm12=int6464#13
8487# asm 2: pshufb SR,<xmm12=%xmm12
8488pshufb SR,%xmm12
8489
8490# qhasm: xmm13 ^= *(int128 *)(c + 976)
8491# asm 1: pxor 976(<c=int64#4),<xmm13=int6464#14
8492# asm 2: pxor 976(<c=%rcx),<xmm13=%xmm13
8493pxor 976(%rcx),%xmm13
8494
8495# qhasm: shuffle bytes of xmm13 by SR
8496# asm 1: pshufb SR,<xmm13=int6464#14
8497# asm 2: pshufb SR,<xmm13=%xmm13
8498pshufb SR,%xmm13
8499
8500# qhasm: xmm14 ^= *(int128 *)(c + 992)
8501# asm 1: pxor 992(<c=int64#4),<xmm14=int6464#15
8502# asm 2: pxor 992(<c=%rcx),<xmm14=%xmm14
8503pxor 992(%rcx),%xmm14
8504
8505# qhasm: shuffle bytes of xmm14 by SR
8506# asm 1: pshufb SR,<xmm14=int6464#15
8507# asm 2: pshufb SR,<xmm14=%xmm14
8508pshufb SR,%xmm14
8509
8510# qhasm: xmm15 ^= *(int128 *)(c + 1008)
8511# asm 1: pxor 1008(<c=int64#4),<xmm15=int6464#16
8512# asm 2: pxor 1008(<c=%rcx),<xmm15=%xmm15
8513pxor 1008(%rcx),%xmm15
8514
8515# qhasm: shuffle bytes of xmm15 by SR
8516# asm 1: pshufb SR,<xmm15=int6464#16
8517# asm 2: pshufb SR,<xmm15=%xmm15
8518pshufb SR,%xmm15
8519
8520# qhasm: xmm13 ^= xmm14
8521# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
8522# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
8523pxor %xmm14,%xmm13
8524
8525# qhasm: xmm10 ^= xmm9
8526# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
8527# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
8528pxor %xmm9,%xmm10
8529
8530# qhasm: xmm13 ^= xmm8
8531# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
8532# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
8533pxor %xmm8,%xmm13
8534
8535# qhasm: xmm14 ^= xmm10
8536# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
8537# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
8538pxor %xmm10,%xmm14
8539
8540# qhasm: xmm11 ^= xmm8
8541# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
8542# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
8543pxor %xmm8,%xmm11
8544
8545# qhasm: xmm14 ^= xmm11
8546# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
8547# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
8548pxor %xmm11,%xmm14
8549
8550# qhasm: xmm11 ^= xmm15
8551# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
8552# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
8553pxor %xmm15,%xmm11
8554
8555# qhasm: xmm11 ^= xmm12
8556# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
8557# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
8558pxor %xmm12,%xmm11
8559
8560# qhasm: xmm15 ^= xmm13
8561# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
8562# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
8563pxor %xmm13,%xmm15
8564
8565# qhasm: xmm11 ^= xmm9
8566# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
8567# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
8568pxor %xmm9,%xmm11
8569
8570# qhasm: xmm12 ^= xmm13
8571# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
8572# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
8573pxor %xmm13,%xmm12
8574
8575# qhasm: xmm10 ^= xmm15
8576# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
8577# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
8578pxor %xmm15,%xmm10
8579
8580# qhasm: xmm9 ^= xmm13
8581# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
8582# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
8583pxor %xmm13,%xmm9
8584
8585# qhasm: xmm3 = xmm15
8586# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
8587# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
8588movdqa %xmm15,%xmm0
8589
8590# qhasm: xmm2 = xmm9
8591# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
8592# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
8593movdqa %xmm9,%xmm1
8594
8595# qhasm: xmm1 = xmm13
8596# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
8597# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
8598movdqa %xmm13,%xmm2
8599
8600# qhasm: xmm5 = xmm10
8601# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
8602# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
8603movdqa %xmm10,%xmm3
8604
8605# qhasm: xmm4 = xmm14
8606# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
8607# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
8608movdqa %xmm14,%xmm4
8609
8610# qhasm: xmm3 ^= xmm12
8611# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
8612# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
8613pxor %xmm12,%xmm0
8614
8615# qhasm: xmm2 ^= xmm10
8616# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
8617# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
8618pxor %xmm10,%xmm1
8619
8620# qhasm: xmm1 ^= xmm11
8621# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
8622# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
8623pxor %xmm11,%xmm2
8624
8625# qhasm: xmm5 ^= xmm12
8626# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
8627# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
8628pxor %xmm12,%xmm3
8629
8630# qhasm: xmm4 ^= xmm8
8631# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
8632# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
8633pxor %xmm8,%xmm4
8634
8635# qhasm: xmm6 = xmm3
8636# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
8637# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
8638movdqa %xmm0,%xmm5
8639
8640# qhasm: xmm0 = xmm2
8641# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
8642# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
8643movdqa %xmm1,%xmm6
8644
8645# qhasm: xmm7 = xmm3
8646# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
8647# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
8648movdqa %xmm0,%xmm7
8649
8650# qhasm: xmm2 |= xmm1
8651# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
8652# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
8653por %xmm2,%xmm1
8654
8655# qhasm: xmm3 |= xmm4
8656# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
8657# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
8658por %xmm4,%xmm0
8659
8660# qhasm: xmm7 ^= xmm0
8661# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
8662# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
8663pxor %xmm6,%xmm7
8664
8665# qhasm: xmm6 &= xmm4
8666# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
8667# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
8668pand %xmm4,%xmm5
8669
8670# qhasm: xmm0 &= xmm1
8671# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
8672# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
8673pand %xmm2,%xmm6
8674
8675# qhasm: xmm4 ^= xmm1
8676# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
8677# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
8678pxor %xmm2,%xmm4
8679
8680# qhasm: xmm7 &= xmm4
8681# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
8682# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
8683pand %xmm4,%xmm7
8684
8685# qhasm: xmm4 = xmm11
8686# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
8687# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
8688movdqa %xmm11,%xmm2
8689
8690# qhasm: xmm4 ^= xmm8
8691# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
8692# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
8693pxor %xmm8,%xmm2
8694
8695# qhasm: xmm5 &= xmm4
8696# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
8697# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
8698pand %xmm2,%xmm3
8699
8700# qhasm: xmm3 ^= xmm5
8701# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
8702# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
8703pxor %xmm3,%xmm0
8704
8705# qhasm: xmm2 ^= xmm5
8706# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
8707# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
8708pxor %xmm3,%xmm1
8709
8710# qhasm: xmm5 = xmm15
8711# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
8712# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
8713movdqa %xmm15,%xmm2
8714
8715# qhasm: xmm5 ^= xmm9
8716# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
8717# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
8718pxor %xmm9,%xmm2
8719
8720# qhasm: xmm4 = xmm13
8721# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
8722# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
8723movdqa %xmm13,%xmm3
8724
8725# qhasm: xmm1 = xmm5
8726# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
8727# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
8728movdqa %xmm2,%xmm4
8729
8730# qhasm: xmm4 ^= xmm14
8731# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
8732# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
8733pxor %xmm14,%xmm3
8734
8735# qhasm: xmm1 |= xmm4
8736# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
8737# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
8738por %xmm3,%xmm4
8739
8740# qhasm: xmm5 &= xmm4
8741# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
8742# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
8743pand %xmm3,%xmm2
8744
8745# qhasm: xmm0 ^= xmm5
8746# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
8747# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
8748pxor %xmm2,%xmm6
8749
8750# qhasm: xmm3 ^= xmm7
8751# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
8752# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
8753pxor %xmm7,%xmm0
8754
8755# qhasm: xmm2 ^= xmm6
8756# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
8757# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
8758pxor %xmm5,%xmm1
8759
8760# qhasm: xmm1 ^= xmm7
8761# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
8762# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
8763pxor %xmm7,%xmm4
8764
8765# qhasm: xmm0 ^= xmm6
8766# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
8767# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
8768pxor %xmm5,%xmm6
8769
8770# qhasm: xmm1 ^= xmm6
8771# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
8772# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
8773pxor %xmm5,%xmm4
8774
8775# qhasm: xmm4 = xmm10
8776# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
8777# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
8778movdqa %xmm10,%xmm2
8779
8780# qhasm: xmm5 = xmm12
8781# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
8782# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
8783movdqa %xmm12,%xmm3
8784
8785# qhasm: xmm6 = xmm9
8786# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
8787# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
8788movdqa %xmm9,%xmm5
8789
8790# qhasm: xmm7 = xmm15
8791# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
8792# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
8793movdqa %xmm15,%xmm7
8794
8795# qhasm: xmm4 &= xmm11
8796# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
8797# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
8798pand %xmm11,%xmm2
8799
8800# qhasm: xmm5 &= xmm8
8801# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
8802# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
8803pand %xmm8,%xmm3
8804
8805# qhasm: xmm6 &= xmm13
8806# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
8807# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
8808pand %xmm13,%xmm5
8809
8810# qhasm: xmm7 |= xmm14
8811# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
8812# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
8813por %xmm14,%xmm7
8814
8815# qhasm: xmm3 ^= xmm4
8816# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
8817# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
8818pxor %xmm2,%xmm0
8819
8820# qhasm: xmm2 ^= xmm5
8821# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
8822# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
8823pxor %xmm3,%xmm1
8824
8825# qhasm: xmm1 ^= xmm6
8826# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
8827# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
8828pxor %xmm5,%xmm4
8829
8830# qhasm: xmm0 ^= xmm7
8831# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
8832# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
8833pxor %xmm7,%xmm6
8834
8835# qhasm: xmm4 = xmm3
8836# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
8837# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
8838movdqa %xmm0,%xmm2
8839
8840# qhasm: xmm4 ^= xmm2
8841# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
8842# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
8843pxor %xmm1,%xmm2
8844
8845# qhasm: xmm3 &= xmm1
8846# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
8847# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
8848pand %xmm4,%xmm0
8849
8850# qhasm: xmm6 = xmm0
8851# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
8852# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
8853movdqa %xmm6,%xmm3
8854
8855# qhasm: xmm6 ^= xmm3
8856# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
8857# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
8858pxor %xmm0,%xmm3
8859
8860# qhasm: xmm7 = xmm4
8861# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
8862# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
8863movdqa %xmm2,%xmm5
8864
8865# qhasm: xmm7 &= xmm6
8866# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
8867# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
8868pand %xmm3,%xmm5
8869
8870# qhasm: xmm7 ^= xmm2
8871# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
8872# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
8873pxor %xmm1,%xmm5
8874
8875# qhasm: xmm5 = xmm1
8876# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
8877# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
8878movdqa %xmm4,%xmm7
8879
8880# qhasm: xmm5 ^= xmm0
8881# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
8882# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
8883pxor %xmm6,%xmm7
8884
8885# qhasm: xmm3 ^= xmm2
8886# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
8887# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
8888pxor %xmm1,%xmm0
8889
8890# qhasm: xmm5 &= xmm3
8891# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
8892# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
8893pand %xmm0,%xmm7
8894
8895# qhasm: xmm5 ^= xmm0
8896# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
8897# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
8898pxor %xmm6,%xmm7
8899
8900# qhasm: xmm1 ^= xmm5
8901# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
8902# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
8903pxor %xmm7,%xmm4
8904
8905# qhasm: xmm2 = xmm6
8906# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
8907# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
8908movdqa %xmm3,%xmm0
8909
8910# qhasm: xmm2 ^= xmm5
8911# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
8912# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
8913pxor %xmm7,%xmm0
8914
8915# qhasm: xmm2 &= xmm0
8916# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
8917# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
8918pand %xmm6,%xmm0
8919
8920# qhasm: xmm1 ^= xmm2
8921# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
8922# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
8923pxor %xmm0,%xmm4
8924
8925# qhasm: xmm6 ^= xmm2
8926# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
8927# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
8928pxor %xmm0,%xmm3
8929
8930# qhasm: xmm6 &= xmm7
8931# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
8932# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
8933pand %xmm5,%xmm3
8934
8935# qhasm: xmm6 ^= xmm4
8936# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
8937# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
8938pxor %xmm2,%xmm3
8939
8940# qhasm: xmm4 = xmm14
8941# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
8942# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
8943movdqa %xmm14,%xmm0
8944
8945# qhasm: xmm0 = xmm13
8946# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
8947# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
8948movdqa %xmm13,%xmm1
8949
8950# qhasm: xmm2 = xmm7
8951# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
8952# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
8953movdqa %xmm5,%xmm2
8954
8955# qhasm: xmm2 ^= xmm6
8956# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
8957# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
8958pxor %xmm3,%xmm2
8959
8960# qhasm: xmm2 &= xmm14
8961# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
8962# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
8963pand %xmm14,%xmm2
8964
8965# qhasm: xmm14 ^= xmm13
8966# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
8967# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
8968pxor %xmm13,%xmm14
8969
8970# qhasm: xmm14 &= xmm6
8971# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
8972# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
8973pand %xmm3,%xmm14
8974
8975# qhasm: xmm13 &= xmm7
8976# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
8977# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
8978pand %xmm5,%xmm13
8979
8980# qhasm: xmm14 ^= xmm13
8981# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
8982# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
8983pxor %xmm13,%xmm14
8984
8985# qhasm: xmm13 ^= xmm2
8986# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
8987# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
8988pxor %xmm2,%xmm13
8989
8990# qhasm: xmm4 ^= xmm8
8991# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
8992# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
8993pxor %xmm8,%xmm0
8994
8995# qhasm: xmm0 ^= xmm11
8996# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
8997# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
8998pxor %xmm11,%xmm1
8999
9000# qhasm: xmm7 ^= xmm5
9001# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
9002# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
9003pxor %xmm7,%xmm5
9004
9005# qhasm: xmm6 ^= xmm1
9006# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
9007# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
9008pxor %xmm4,%xmm3
9009
9010# qhasm: xmm3 = xmm7
9011# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
9012# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
9013movdqa %xmm5,%xmm2
9014
9015# qhasm: xmm3 ^= xmm6
9016# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
9017# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
9018pxor %xmm3,%xmm2
9019
9020# qhasm: xmm3 &= xmm4
9021# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
9022# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
9023pand %xmm0,%xmm2
9024
9025# qhasm: xmm4 ^= xmm0
9026# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
9027# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
9028pxor %xmm1,%xmm0
9029
9030# qhasm: xmm4 &= xmm6
9031# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
9032# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
9033pand %xmm3,%xmm0
9034
9035# qhasm: xmm0 &= xmm7
9036# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
9037# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
9038pand %xmm5,%xmm1
9039
9040# qhasm: xmm0 ^= xmm4
9041# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
9042# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
9043pxor %xmm0,%xmm1
9044
9045# qhasm: xmm4 ^= xmm3
9046# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
9047# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
9048pxor %xmm2,%xmm0
9049
9050# qhasm: xmm2 = xmm5
9051# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
9052# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
9053movdqa %xmm7,%xmm2
9054
9055# qhasm: xmm2 ^= xmm1
9056# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
9057# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
9058pxor %xmm4,%xmm2
9059
9060# qhasm: xmm2 &= xmm8
9061# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
9062# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
9063pand %xmm8,%xmm2
9064
9065# qhasm: xmm8 ^= xmm11
9066# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
9067# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
9068pxor %xmm11,%xmm8
9069
9070# qhasm: xmm8 &= xmm1
9071# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
9072# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
9073pand %xmm4,%xmm8
9074
9075# qhasm: xmm11 &= xmm5
9076# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
9077# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
9078pand %xmm7,%xmm11
9079
9080# qhasm: xmm8 ^= xmm11
9081# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
9082# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
9083pxor %xmm11,%xmm8
9084
9085# qhasm: xmm11 ^= xmm2
9086# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
9087# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
9088pxor %xmm2,%xmm11
9089
9090# qhasm: xmm14 ^= xmm4
9091# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
9092# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
9093pxor %xmm0,%xmm14
9094
9095# qhasm: xmm8 ^= xmm4
9096# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
9097# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
9098pxor %xmm0,%xmm8
9099
9100# qhasm: xmm13 ^= xmm0
9101# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
9102# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
9103pxor %xmm1,%xmm13
9104
9105# qhasm: xmm11 ^= xmm0
9106# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
9107# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
9108pxor %xmm1,%xmm11
9109
9110# qhasm: xmm4 = xmm15
9111# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
9112# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
9113movdqa %xmm15,%xmm0
9114
9115# qhasm: xmm0 = xmm9
9116# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
9117# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
9118movdqa %xmm9,%xmm1
9119
9120# qhasm: xmm4 ^= xmm12
9121# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
9122# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
9123pxor %xmm12,%xmm0
9124
9125# qhasm: xmm0 ^= xmm10
9126# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
9127# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
9128pxor %xmm10,%xmm1
9129
9130# qhasm: xmm3 = xmm7
9131# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
9132# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
9133movdqa %xmm5,%xmm2
9134
9135# qhasm: xmm3 ^= xmm6
9136# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
9137# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
9138pxor %xmm3,%xmm2
9139
9140# qhasm: xmm3 &= xmm4
9141# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
9142# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
9143pand %xmm0,%xmm2
9144
9145# qhasm: xmm4 ^= xmm0
9146# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
9147# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
9148pxor %xmm1,%xmm0
9149
9150# qhasm: xmm4 &= xmm6
9151# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
9152# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
9153pand %xmm3,%xmm0
9154
9155# qhasm: xmm0 &= xmm7
9156# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
9157# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
9158pand %xmm5,%xmm1
9159
9160# qhasm: xmm0 ^= xmm4
9161# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
9162# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
9163pxor %xmm0,%xmm1
9164
9165# qhasm: xmm4 ^= xmm3
9166# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
9167# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
9168pxor %xmm2,%xmm0
9169
9170# qhasm: xmm2 = xmm5
9171# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
9172# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
9173movdqa %xmm7,%xmm2
9174
9175# qhasm: xmm2 ^= xmm1
9176# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
9177# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
9178pxor %xmm4,%xmm2
9179
9180# qhasm: xmm2 &= xmm12
9181# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
9182# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
9183pand %xmm12,%xmm2
9184
9185# qhasm: xmm12 ^= xmm10
9186# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
9187# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
9188pxor %xmm10,%xmm12
9189
9190# qhasm: xmm12 &= xmm1
9191# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
9192# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
9193pand %xmm4,%xmm12
9194
9195# qhasm: xmm10 &= xmm5
9196# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
9197# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
9198pand %xmm7,%xmm10
9199
9200# qhasm: xmm12 ^= xmm10
9201# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
9202# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
9203pxor %xmm10,%xmm12
9204
9205# qhasm: xmm10 ^= xmm2
9206# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
9207# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
9208pxor %xmm2,%xmm10
9209
9210# qhasm: xmm7 ^= xmm5
9211# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
9212# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
9213pxor %xmm7,%xmm5
9214
9215# qhasm: xmm6 ^= xmm1
9216# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
9217# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
9218pxor %xmm4,%xmm3
9219
9220# qhasm: xmm3 = xmm7
9221# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
9222# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
9223movdqa %xmm5,%xmm2
9224
9225# qhasm: xmm3 ^= xmm6
9226# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
9227# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
9228pxor %xmm3,%xmm2
9229
9230# qhasm: xmm3 &= xmm15
9231# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
9232# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
9233pand %xmm15,%xmm2
9234
9235# qhasm: xmm15 ^= xmm9
9236# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
9237# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
9238pxor %xmm9,%xmm15
9239
9240# qhasm: xmm15 &= xmm6
9241# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
9242# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
9243pand %xmm3,%xmm15
9244
9245# qhasm: xmm9 &= xmm7
9246# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
9247# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
9248pand %xmm5,%xmm9
9249
9250# qhasm: xmm15 ^= xmm9
9251# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
9252# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
9253pxor %xmm9,%xmm15
9254
9255# qhasm: xmm9 ^= xmm3
9256# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
9257# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
9258pxor %xmm2,%xmm9
9259
9260# qhasm: xmm15 ^= xmm4
9261# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
9262# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
9263pxor %xmm0,%xmm15
9264
9265# qhasm: xmm12 ^= xmm4
9266# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
9267# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
9268pxor %xmm0,%xmm12
9269
9270# qhasm: xmm9 ^= xmm0
9271# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
9272# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
9273pxor %xmm1,%xmm9
9274
9275# qhasm: xmm10 ^= xmm0
9276# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
9277# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
9278pxor %xmm1,%xmm10
9279
9280# qhasm: xmm15 ^= xmm8
9281# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
9282# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
9283pxor %xmm8,%xmm15
9284
9285# qhasm: xmm9 ^= xmm14
9286# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
9287# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
9288pxor %xmm14,%xmm9
9289
9290# qhasm: xmm12 ^= xmm15
9291# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
9292# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
9293pxor %xmm15,%xmm12
9294
9295# qhasm: xmm14 ^= xmm8
9296# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
9297# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
9298pxor %xmm8,%xmm14
9299
9300# qhasm: xmm8 ^= xmm9
9301# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
9302# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
9303pxor %xmm9,%xmm8
9304
9305# qhasm: xmm9 ^= xmm13
9306# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
9307# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
9308pxor %xmm13,%xmm9
9309
9310# qhasm: xmm13 ^= xmm10
9311# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
9312# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
9313pxor %xmm10,%xmm13
9314
9315# qhasm: xmm12 ^= xmm13
9316# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
9317# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
9318pxor %xmm13,%xmm12
9319
9320# qhasm: xmm10 ^= xmm11
9321# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
9322# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
9323pxor %xmm11,%xmm10
9324
9325# qhasm: xmm11 ^= xmm13
9326# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
9327# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
9328pxor %xmm13,%xmm11
9329
9330# qhasm: xmm14 ^= xmm11
9331# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
9332# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
9333pxor %xmm11,%xmm14
9334
9335# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
9336# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
9337# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
9338pshufd $0x93,%xmm8,%xmm0
9339
9340# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
9341# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
9342# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
9343pshufd $0x93,%xmm9,%xmm1
9344
9345# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
9346# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
9347# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
9348pshufd $0x93,%xmm12,%xmm2
9349
9350# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
9351# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
9352# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
9353pshufd $0x93,%xmm14,%xmm3
9354
9355# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
9356# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
9357# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
9358pshufd $0x93,%xmm11,%xmm4
9359
9360# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
9361# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
9362# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
9363pshufd $0x93,%xmm15,%xmm5
9364
9365# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
9366# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
9367# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
9368pshufd $0x93,%xmm10,%xmm6
9369
9370# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
9371# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
9372# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
9373pshufd $0x93,%xmm13,%xmm7
9374
9375# qhasm: xmm8 ^= xmm0
9376# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
9377# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
9378pxor %xmm0,%xmm8
9379
9380# qhasm: xmm9 ^= xmm1
9381# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
9382# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
9383pxor %xmm1,%xmm9
9384
9385# qhasm: xmm12 ^= xmm2
9386# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
9387# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
9388pxor %xmm2,%xmm12
9389
9390# qhasm: xmm14 ^= xmm3
9391# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
9392# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
9393pxor %xmm3,%xmm14
9394
9395# qhasm: xmm11 ^= xmm4
9396# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
9397# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
9398pxor %xmm4,%xmm11
9399
9400# qhasm: xmm15 ^= xmm5
9401# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
9402# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
9403pxor %xmm5,%xmm15
9404
9405# qhasm: xmm10 ^= xmm6
9406# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
9407# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
9408pxor %xmm6,%xmm10
9409
9410# qhasm: xmm13 ^= xmm7
9411# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
9412# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
9413pxor %xmm7,%xmm13
9414
9415# qhasm: xmm0 ^= xmm13
9416# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
9417# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
9418pxor %xmm13,%xmm0
9419
9420# qhasm: xmm1 ^= xmm8
9421# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
9422# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
9423pxor %xmm8,%xmm1
9424
9425# qhasm: xmm2 ^= xmm9
9426# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
9427# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
9428pxor %xmm9,%xmm2
9429
9430# qhasm: xmm1 ^= xmm13
9431# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
9432# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
9433pxor %xmm13,%xmm1
9434
9435# qhasm: xmm3 ^= xmm12
9436# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
9437# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
9438pxor %xmm12,%xmm3
9439
9440# qhasm: xmm4 ^= xmm14
9441# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
9442# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
9443pxor %xmm14,%xmm4
9444
9445# qhasm: xmm5 ^= xmm11
9446# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
9447# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
9448pxor %xmm11,%xmm5
9449
9450# qhasm: xmm3 ^= xmm13
9451# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
9452# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
9453pxor %xmm13,%xmm3
9454
9455# qhasm: xmm6 ^= xmm15
9456# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
9457# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
9458pxor %xmm15,%xmm6
9459
9460# qhasm: xmm7 ^= xmm10
9461# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
9462# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
9463pxor %xmm10,%xmm7
9464
9465# qhasm: xmm4 ^= xmm13
9466# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
9467# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
9468pxor %xmm13,%xmm4
9469
9470# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
9471# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
9472# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
9473pshufd $0x4E,%xmm8,%xmm8
9474
9475# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
9476# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
9477# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
9478pshufd $0x4E,%xmm9,%xmm9
9479
9480# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
9481# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
9482# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
9483pshufd $0x4E,%xmm12,%xmm12
9484
9485# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
9486# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
9487# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
9488pshufd $0x4E,%xmm14,%xmm14
9489
9490# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
9491# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
9492# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
9493pshufd $0x4E,%xmm11,%xmm11
9494
9495# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
9496# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
9497# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
9498pshufd $0x4E,%xmm15,%xmm15
9499
9500# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
9501# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
9502# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
9503pshufd $0x4E,%xmm10,%xmm10
9504
9505# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
9506# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
9507# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
9508pshufd $0x4E,%xmm13,%xmm13
9509
9510# qhasm: xmm0 ^= xmm8
9511# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
9512# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
9513pxor %xmm8,%xmm0
9514
9515# qhasm: xmm1 ^= xmm9
9516# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
9517# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
9518pxor %xmm9,%xmm1
9519
9520# qhasm: xmm2 ^= xmm12
9521# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
9522# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
9523pxor %xmm12,%xmm2
9524
9525# qhasm: xmm3 ^= xmm14
9526# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
9527# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
9528pxor %xmm14,%xmm3
9529
9530# qhasm: xmm4 ^= xmm11
9531# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
9532# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
9533pxor %xmm11,%xmm4
9534
9535# qhasm: xmm5 ^= xmm15
9536# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
9537# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
9538pxor %xmm15,%xmm5
9539
9540# qhasm: xmm6 ^= xmm10
9541# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
9542# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
9543pxor %xmm10,%xmm6
9544
9545# qhasm: xmm7 ^= xmm13
9546# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
9547# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
9548pxor %xmm13,%xmm7
9549
9550# qhasm: xmm0 ^= *(int128 *)(c + 1024)
9551# asm 1: pxor 1024(<c=int64#4),<xmm0=int6464#1
9552# asm 2: pxor 1024(<c=%rcx),<xmm0=%xmm0
9553pxor 1024(%rcx),%xmm0
9554
9555# qhasm: shuffle bytes of xmm0 by SR
9556# asm 1: pshufb SR,<xmm0=int6464#1
9557# asm 2: pshufb SR,<xmm0=%xmm0
9558pshufb SR,%xmm0
9559
9560# qhasm: xmm1 ^= *(int128 *)(c + 1040)
9561# asm 1: pxor 1040(<c=int64#4),<xmm1=int6464#2
9562# asm 2: pxor 1040(<c=%rcx),<xmm1=%xmm1
9563pxor 1040(%rcx),%xmm1
9564
9565# qhasm: shuffle bytes of xmm1 by SR
9566# asm 1: pshufb SR,<xmm1=int6464#2
9567# asm 2: pshufb SR,<xmm1=%xmm1
9568pshufb SR,%xmm1
9569
9570# qhasm: xmm2 ^= *(int128 *)(c + 1056)
9571# asm 1: pxor 1056(<c=int64#4),<xmm2=int6464#3
9572# asm 2: pxor 1056(<c=%rcx),<xmm2=%xmm2
9573pxor 1056(%rcx),%xmm2
9574
9575# qhasm: shuffle bytes of xmm2 by SR
9576# asm 1: pshufb SR,<xmm2=int6464#3
9577# asm 2: pshufb SR,<xmm2=%xmm2
9578pshufb SR,%xmm2
9579
9580# qhasm: xmm3 ^= *(int128 *)(c + 1072)
9581# asm 1: pxor 1072(<c=int64#4),<xmm3=int6464#4
9582# asm 2: pxor 1072(<c=%rcx),<xmm3=%xmm3
9583pxor 1072(%rcx),%xmm3
9584
9585# qhasm: shuffle bytes of xmm3 by SR
9586# asm 1: pshufb SR,<xmm3=int6464#4
9587# asm 2: pshufb SR,<xmm3=%xmm3
9588pshufb SR,%xmm3
9589
9590# qhasm: xmm4 ^= *(int128 *)(c + 1088)
9591# asm 1: pxor 1088(<c=int64#4),<xmm4=int6464#5
9592# asm 2: pxor 1088(<c=%rcx),<xmm4=%xmm4
9593pxor 1088(%rcx),%xmm4
9594
9595# qhasm: shuffle bytes of xmm4 by SR
9596# asm 1: pshufb SR,<xmm4=int6464#5
9597# asm 2: pshufb SR,<xmm4=%xmm4
9598pshufb SR,%xmm4
9599
9600# qhasm: xmm5 ^= *(int128 *)(c + 1104)
9601# asm 1: pxor 1104(<c=int64#4),<xmm5=int6464#6
9602# asm 2: pxor 1104(<c=%rcx),<xmm5=%xmm5
9603pxor 1104(%rcx),%xmm5
9604
9605# qhasm: shuffle bytes of xmm5 by SR
9606# asm 1: pshufb SR,<xmm5=int6464#6
9607# asm 2: pshufb SR,<xmm5=%xmm5
9608pshufb SR,%xmm5
9609
9610# qhasm: xmm6 ^= *(int128 *)(c + 1120)
9611# asm 1: pxor 1120(<c=int64#4),<xmm6=int6464#7
9612# asm 2: pxor 1120(<c=%rcx),<xmm6=%xmm6
9613pxor 1120(%rcx),%xmm6
9614
9615# qhasm: shuffle bytes of xmm6 by SR
9616# asm 1: pshufb SR,<xmm6=int6464#7
9617# asm 2: pshufb SR,<xmm6=%xmm6
9618pshufb SR,%xmm6
9619
9620# qhasm: xmm7 ^= *(int128 *)(c + 1136)
9621# asm 1: pxor 1136(<c=int64#4),<xmm7=int6464#8
9622# asm 2: pxor 1136(<c=%rcx),<xmm7=%xmm7
9623pxor 1136(%rcx),%xmm7
9624
9625# qhasm: shuffle bytes of xmm7 by SR
9626# asm 1: pshufb SR,<xmm7=int6464#8
9627# asm 2: pshufb SR,<xmm7=%xmm7
9628pshufb SR,%xmm7
9629
9630# qhasm: xmm5 ^= xmm6
9631# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
9632# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
9633pxor %xmm6,%xmm5
9634
9635# qhasm: xmm2 ^= xmm1
9636# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
9637# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
9638pxor %xmm1,%xmm2
9639
9640# qhasm: xmm5 ^= xmm0
9641# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
9642# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
9643pxor %xmm0,%xmm5
9644
9645# qhasm: xmm6 ^= xmm2
9646# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
9647# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
9648pxor %xmm2,%xmm6
9649
9650# qhasm: xmm3 ^= xmm0
9651# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
9652# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
9653pxor %xmm0,%xmm3
9654
9655# qhasm: xmm6 ^= xmm3
9656# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
9657# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
9658pxor %xmm3,%xmm6
9659
9660# qhasm: xmm3 ^= xmm7
9661# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
9662# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
9663pxor %xmm7,%xmm3
9664
9665# qhasm: xmm3 ^= xmm4
9666# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
9667# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
9668pxor %xmm4,%xmm3
9669
9670# qhasm: xmm7 ^= xmm5
9671# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
9672# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
9673pxor %xmm5,%xmm7
9674
9675# qhasm: xmm3 ^= xmm1
9676# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
9677# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
9678pxor %xmm1,%xmm3
9679
9680# qhasm: xmm4 ^= xmm5
9681# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
9682# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
9683pxor %xmm5,%xmm4
9684
9685# qhasm: xmm2 ^= xmm7
9686# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
9687# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
9688pxor %xmm7,%xmm2
9689
9690# qhasm: xmm1 ^= xmm5
9691# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
9692# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
9693pxor %xmm5,%xmm1
9694
9695# qhasm: xmm11 = xmm7
9696# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
9697# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
9698movdqa %xmm7,%xmm8
9699
9700# qhasm: xmm10 = xmm1
9701# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
9702# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
9703movdqa %xmm1,%xmm9
9704
9705# qhasm: xmm9 = xmm5
9706# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
9707# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
9708movdqa %xmm5,%xmm10
9709
9710# qhasm: xmm13 = xmm2
9711# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
9712# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
9713movdqa %xmm2,%xmm11
9714
9715# qhasm: xmm12 = xmm6
9716# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
9717# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
9718movdqa %xmm6,%xmm12
9719
9720# qhasm: xmm11 ^= xmm4
9721# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
9722# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
9723pxor %xmm4,%xmm8
9724
9725# qhasm: xmm10 ^= xmm2
9726# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
9727# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
9728pxor %xmm2,%xmm9
9729
9730# qhasm: xmm9 ^= xmm3
9731# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
9732# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
9733pxor %xmm3,%xmm10
9734
9735# qhasm: xmm13 ^= xmm4
9736# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
9737# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
9738pxor %xmm4,%xmm11
9739
9740# qhasm: xmm12 ^= xmm0
9741# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
9742# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
9743pxor %xmm0,%xmm12
9744
9745# qhasm: xmm14 = xmm11
9746# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
9747# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
9748movdqa %xmm8,%xmm13
9749
9750# qhasm: xmm8 = xmm10
9751# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
9752# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
9753movdqa %xmm9,%xmm14
9754
9755# qhasm: xmm15 = xmm11
9756# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
9757# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
9758movdqa %xmm8,%xmm15
9759
9760# qhasm: xmm10 |= xmm9
9761# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
9762# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
9763por %xmm10,%xmm9
9764
9765# qhasm: xmm11 |= xmm12
9766# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
9767# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
9768por %xmm12,%xmm8
9769
9770# qhasm: xmm15 ^= xmm8
9771# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
9772# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
9773pxor %xmm14,%xmm15
9774
9775# qhasm: xmm14 &= xmm12
9776# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
9777# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
9778pand %xmm12,%xmm13
9779
9780# qhasm: xmm8 &= xmm9
9781# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
9782# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
9783pand %xmm10,%xmm14
9784
9785# qhasm: xmm12 ^= xmm9
9786# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
9787# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
9788pxor %xmm10,%xmm12
9789
9790# qhasm: xmm15 &= xmm12
9791# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
9792# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
9793pand %xmm12,%xmm15
9794
9795# qhasm: xmm12 = xmm3
9796# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
9797# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
9798movdqa %xmm3,%xmm10
9799
9800# qhasm: xmm12 ^= xmm0
9801# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
9802# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
9803pxor %xmm0,%xmm10
9804
9805# qhasm: xmm13 &= xmm12
9806# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
9807# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
9808pand %xmm10,%xmm11
9809
9810# qhasm: xmm11 ^= xmm13
9811# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
9812# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
9813pxor %xmm11,%xmm8
9814
9815# qhasm: xmm10 ^= xmm13
9816# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
9817# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
9818pxor %xmm11,%xmm9
9819
9820# qhasm: xmm13 = xmm7
9821# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
9822# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
9823movdqa %xmm7,%xmm10
9824
9825# qhasm: xmm13 ^= xmm1
9826# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
9827# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
9828pxor %xmm1,%xmm10
9829
9830# qhasm: xmm12 = xmm5
9831# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
9832# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
9833movdqa %xmm5,%xmm11
9834
9835# qhasm: xmm9 = xmm13
9836# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
9837# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
9838movdqa %xmm10,%xmm12
9839
9840# qhasm: xmm12 ^= xmm6
9841# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
9842# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
9843pxor %xmm6,%xmm11
9844
9845# qhasm: xmm9 |= xmm12
9846# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
9847# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
9848por %xmm11,%xmm12
9849
9850# qhasm: xmm13 &= xmm12
9851# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
9852# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
9853pand %xmm11,%xmm10
9854
9855# qhasm: xmm8 ^= xmm13
9856# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
9857# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
9858pxor %xmm10,%xmm14
9859
9860# qhasm: xmm11 ^= xmm15
9861# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
9862# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
9863pxor %xmm15,%xmm8
9864
9865# qhasm: xmm10 ^= xmm14
9866# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
9867# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
9868pxor %xmm13,%xmm9
9869
9870# qhasm: xmm9 ^= xmm15
9871# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
9872# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
9873pxor %xmm15,%xmm12
9874
9875# qhasm: xmm8 ^= xmm14
9876# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
9877# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
9878pxor %xmm13,%xmm14
9879
9880# qhasm: xmm9 ^= xmm14
9881# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
9882# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
9883pxor %xmm13,%xmm12
9884
9885# qhasm: xmm12 = xmm2
9886# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
9887# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
9888movdqa %xmm2,%xmm10
9889
9890# qhasm: xmm13 = xmm4
9891# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
9892# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
9893movdqa %xmm4,%xmm11
9894
9895# qhasm: xmm14 = xmm1
9896# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
9897# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
9898movdqa %xmm1,%xmm13
9899
9900# qhasm: xmm15 = xmm7
9901# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
9902# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
9903movdqa %xmm7,%xmm15
9904
9905# qhasm: xmm12 &= xmm3
9906# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
9907# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
9908pand %xmm3,%xmm10
9909
9910# qhasm: xmm13 &= xmm0
9911# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
9912# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
9913pand %xmm0,%xmm11
9914
9915# qhasm: xmm14 &= xmm5
9916# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
9917# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
9918pand %xmm5,%xmm13
9919
9920# qhasm: xmm15 |= xmm6
9921# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
9922# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
9923por %xmm6,%xmm15
9924
9925# qhasm: xmm11 ^= xmm12
9926# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
9927# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
9928pxor %xmm10,%xmm8
9929
9930# qhasm: xmm10 ^= xmm13
9931# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
9932# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
9933pxor %xmm11,%xmm9
9934
9935# qhasm: xmm9 ^= xmm14
9936# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
9937# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
9938pxor %xmm13,%xmm12
9939
9940# qhasm: xmm8 ^= xmm15
9941# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
9942# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
9943pxor %xmm15,%xmm14
9944
9945# qhasm: xmm12 = xmm11
9946# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
9947# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
9948movdqa %xmm8,%xmm10
9949
9950# qhasm: xmm12 ^= xmm10
9951# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
9952# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
9953pxor %xmm9,%xmm10
9954
9955# qhasm: xmm11 &= xmm9
9956# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
9957# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
9958pand %xmm12,%xmm8
9959
9960# qhasm: xmm14 = xmm8
9961# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
9962# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
9963movdqa %xmm14,%xmm11
9964
9965# qhasm: xmm14 ^= xmm11
9966# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
9967# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
9968pxor %xmm8,%xmm11
9969
9970# qhasm: xmm15 = xmm12
9971# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
9972# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
9973movdqa %xmm10,%xmm13
9974
9975# qhasm: xmm15 &= xmm14
9976# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
9977# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
9978pand %xmm11,%xmm13
9979
9980# qhasm: xmm15 ^= xmm10
9981# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
9982# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
9983pxor %xmm9,%xmm13
9984
9985# qhasm: xmm13 = xmm9
9986# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
9987# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
9988movdqa %xmm12,%xmm15
9989
9990# qhasm: xmm13 ^= xmm8
9991# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
9992# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
9993pxor %xmm14,%xmm15
9994
9995# qhasm: xmm11 ^= xmm10
9996# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
9997# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
9998pxor %xmm9,%xmm8
9999
10000# qhasm: xmm13 &= xmm11
10001# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
10002# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
10003pand %xmm8,%xmm15
10004
10005# qhasm: xmm13 ^= xmm8
10006# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
10007# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
10008pxor %xmm14,%xmm15
10009
10010# qhasm: xmm9 ^= xmm13
10011# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
10012# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
10013pxor %xmm15,%xmm12
10014
10015# qhasm: xmm10 = xmm14
10016# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
10017# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
10018movdqa %xmm11,%xmm8
10019
10020# qhasm: xmm10 ^= xmm13
10021# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
10022# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
10023pxor %xmm15,%xmm8
10024
10025# qhasm: xmm10 &= xmm8
10026# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
10027# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
10028pand %xmm14,%xmm8
10029
10030# qhasm: xmm9 ^= xmm10
10031# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
10032# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
10033pxor %xmm8,%xmm12
10034
10035# qhasm: xmm14 ^= xmm10
10036# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
10037# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
10038pxor %xmm8,%xmm11
10039
10040# qhasm: xmm14 &= xmm15
10041# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
10042# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
10043pand %xmm13,%xmm11
10044
10045# qhasm: xmm14 ^= xmm12
10046# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
10047# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
10048pxor %xmm10,%xmm11
10049
10050# qhasm: xmm12 = xmm6
10051# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
10052# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
10053movdqa %xmm6,%xmm8
10054
10055# qhasm: xmm8 = xmm5
10056# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
10057# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
10058movdqa %xmm5,%xmm9
10059
10060# qhasm: xmm10 = xmm15
10061# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
10062# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
10063movdqa %xmm13,%xmm10
10064
10065# qhasm: xmm10 ^= xmm14
10066# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
10067# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
10068pxor %xmm11,%xmm10
10069
10070# qhasm: xmm10 &= xmm6
10071# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
10072# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
10073pand %xmm6,%xmm10
10074
10075# qhasm: xmm6 ^= xmm5
10076# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
10077# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
10078pxor %xmm5,%xmm6
10079
10080# qhasm: xmm6 &= xmm14
10081# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
10082# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
10083pand %xmm11,%xmm6
10084
10085# qhasm: xmm5 &= xmm15
10086# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
10087# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
10088pand %xmm13,%xmm5
10089
10090# qhasm: xmm6 ^= xmm5
10091# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
10092# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
10093pxor %xmm5,%xmm6
10094
10095# qhasm: xmm5 ^= xmm10
10096# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
10097# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
10098pxor %xmm10,%xmm5
10099
10100# qhasm: xmm12 ^= xmm0
10101# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
10102# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
10103pxor %xmm0,%xmm8
10104
10105# qhasm: xmm8 ^= xmm3
10106# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
10107# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
10108pxor %xmm3,%xmm9
10109
10110# qhasm: xmm15 ^= xmm13
10111# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
10112# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
10113pxor %xmm15,%xmm13
10114
10115# qhasm: xmm14 ^= xmm9
10116# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
10117# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
10118pxor %xmm12,%xmm11
10119
10120# qhasm: xmm11 = xmm15
10121# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10122# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10123movdqa %xmm13,%xmm10
10124
10125# qhasm: xmm11 ^= xmm14
10126# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10127# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10128pxor %xmm11,%xmm10
10129
10130# qhasm: xmm11 &= xmm12
10131# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
10132# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
10133pand %xmm8,%xmm10
10134
10135# qhasm: xmm12 ^= xmm8
10136# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
10137# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
10138pxor %xmm9,%xmm8
10139
10140# qhasm: xmm12 &= xmm14
10141# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
10142# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
10143pand %xmm11,%xmm8
10144
10145# qhasm: xmm8 &= xmm15
10146# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
10147# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
10148pand %xmm13,%xmm9
10149
10150# qhasm: xmm8 ^= xmm12
10151# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
10152# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
10153pxor %xmm8,%xmm9
10154
10155# qhasm: xmm12 ^= xmm11
10156# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
10157# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
10158pxor %xmm10,%xmm8
10159
10160# qhasm: xmm10 = xmm13
10161# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
10162# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
10163movdqa %xmm15,%xmm10
10164
10165# qhasm: xmm10 ^= xmm9
10166# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
10167# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
10168pxor %xmm12,%xmm10
10169
10170# qhasm: xmm10 &= xmm0
10171# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
10172# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
10173pand %xmm0,%xmm10
10174
10175# qhasm: xmm0 ^= xmm3
10176# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
10177# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
10178pxor %xmm3,%xmm0
10179
10180# qhasm: xmm0 &= xmm9
10181# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
10182# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
10183pand %xmm12,%xmm0
10184
10185# qhasm: xmm3 &= xmm13
10186# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
10187# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
10188pand %xmm15,%xmm3
10189
10190# qhasm: xmm0 ^= xmm3
10191# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
10192# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
10193pxor %xmm3,%xmm0
10194
10195# qhasm: xmm3 ^= xmm10
10196# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
10197# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
10198pxor %xmm10,%xmm3
10199
10200# qhasm: xmm6 ^= xmm12
10201# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
10202# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
10203pxor %xmm8,%xmm6
10204
10205# qhasm: xmm0 ^= xmm12
10206# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
10207# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
10208pxor %xmm8,%xmm0
10209
10210# qhasm: xmm5 ^= xmm8
10211# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
10212# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
10213pxor %xmm9,%xmm5
10214
10215# qhasm: xmm3 ^= xmm8
10216# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
10217# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
10218pxor %xmm9,%xmm3
10219
10220# qhasm: xmm12 = xmm7
10221# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
10222# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
10223movdqa %xmm7,%xmm8
10224
10225# qhasm: xmm8 = xmm1
10226# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
10227# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
10228movdqa %xmm1,%xmm9
10229
10230# qhasm: xmm12 ^= xmm4
10231# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
10232# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
10233pxor %xmm4,%xmm8
10234
10235# qhasm: xmm8 ^= xmm2
10236# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
10237# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
10238pxor %xmm2,%xmm9
10239
10240# qhasm: xmm11 = xmm15
10241# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10242# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10243movdqa %xmm13,%xmm10
10244
10245# qhasm: xmm11 ^= xmm14
10246# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10247# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10248pxor %xmm11,%xmm10
10249
10250# qhasm: xmm11 &= xmm12
10251# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
10252# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
10253pand %xmm8,%xmm10
10254
10255# qhasm: xmm12 ^= xmm8
10256# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
10257# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
10258pxor %xmm9,%xmm8
10259
10260# qhasm: xmm12 &= xmm14
10261# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
10262# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
10263pand %xmm11,%xmm8
10264
10265# qhasm: xmm8 &= xmm15
10266# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
10267# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
10268pand %xmm13,%xmm9
10269
10270# qhasm: xmm8 ^= xmm12
10271# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
10272# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
10273pxor %xmm8,%xmm9
10274
10275# qhasm: xmm12 ^= xmm11
10276# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
10277# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
10278pxor %xmm10,%xmm8
10279
10280# qhasm: xmm10 = xmm13
10281# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
10282# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
10283movdqa %xmm15,%xmm10
10284
10285# qhasm: xmm10 ^= xmm9
10286# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
10287# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
10288pxor %xmm12,%xmm10
10289
10290# qhasm: xmm10 &= xmm4
10291# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
10292# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
10293pand %xmm4,%xmm10
10294
10295# qhasm: xmm4 ^= xmm2
10296# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
10297# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
10298pxor %xmm2,%xmm4
10299
10300# qhasm: xmm4 &= xmm9
10301# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
10302# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
10303pand %xmm12,%xmm4
10304
10305# qhasm: xmm2 &= xmm13
10306# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
10307# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
10308pand %xmm15,%xmm2
10309
10310# qhasm: xmm4 ^= xmm2
10311# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
10312# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
10313pxor %xmm2,%xmm4
10314
10315# qhasm: xmm2 ^= xmm10
10316# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
10317# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
10318pxor %xmm10,%xmm2
10319
10320# qhasm: xmm15 ^= xmm13
10321# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
10322# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
10323pxor %xmm15,%xmm13
10324
10325# qhasm: xmm14 ^= xmm9
10326# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
10327# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
10328pxor %xmm12,%xmm11
10329
10330# qhasm: xmm11 = xmm15
10331# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10332# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10333movdqa %xmm13,%xmm10
10334
10335# qhasm: xmm11 ^= xmm14
10336# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10337# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10338pxor %xmm11,%xmm10
10339
10340# qhasm: xmm11 &= xmm7
10341# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
10342# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
10343pand %xmm7,%xmm10
10344
10345# qhasm: xmm7 ^= xmm1
10346# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
10347# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
10348pxor %xmm1,%xmm7
10349
10350# qhasm: xmm7 &= xmm14
10351# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
10352# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
10353pand %xmm11,%xmm7
10354
10355# qhasm: xmm1 &= xmm15
10356# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
10357# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
10358pand %xmm13,%xmm1
10359
10360# qhasm: xmm7 ^= xmm1
10361# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
10362# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
10363pxor %xmm1,%xmm7
10364
10365# qhasm: xmm1 ^= xmm11
10366# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
10367# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
10368pxor %xmm10,%xmm1
10369
10370# qhasm: xmm7 ^= xmm12
10371# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
10372# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
10373pxor %xmm8,%xmm7
10374
10375# qhasm: xmm4 ^= xmm12
10376# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
10377# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
10378pxor %xmm8,%xmm4
10379
10380# qhasm: xmm1 ^= xmm8
10381# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
10382# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
10383pxor %xmm9,%xmm1
10384
10385# qhasm: xmm2 ^= xmm8
10386# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
10387# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
10388pxor %xmm9,%xmm2
10389
10390# qhasm: xmm7 ^= xmm0
10391# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
10392# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
10393pxor %xmm0,%xmm7
10394
10395# qhasm: xmm1 ^= xmm6
10396# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
10397# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
10398pxor %xmm6,%xmm1
10399
10400# qhasm: xmm4 ^= xmm7
10401# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
10402# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
10403pxor %xmm7,%xmm4
10404
10405# qhasm: xmm6 ^= xmm0
10406# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
10407# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
10408pxor %xmm0,%xmm6
10409
10410# qhasm: xmm0 ^= xmm1
10411# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
10412# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
10413pxor %xmm1,%xmm0
10414
10415# qhasm: xmm1 ^= xmm5
10416# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
10417# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
10418pxor %xmm5,%xmm1
10419
10420# qhasm: xmm5 ^= xmm2
10421# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
10422# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
10423pxor %xmm2,%xmm5
10424
10425# qhasm: xmm4 ^= xmm5
10426# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
10427# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
10428pxor %xmm5,%xmm4
10429
10430# qhasm: xmm2 ^= xmm3
10431# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
10432# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
10433pxor %xmm3,%xmm2
10434
10435# qhasm: xmm3 ^= xmm5
10436# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
10437# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
10438pxor %xmm5,%xmm3
10439
10440# qhasm: xmm6 ^= xmm3
10441# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
10442# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
10443pxor %xmm3,%xmm6
10444
10445# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
10446# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
10447# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
10448pshufd $0x93,%xmm0,%xmm8
10449
10450# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
10451# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
10452# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
10453pshufd $0x93,%xmm1,%xmm9
10454
10455# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
10456# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
10457# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
10458pshufd $0x93,%xmm4,%xmm10
10459
10460# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
10461# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
10462# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
10463pshufd $0x93,%xmm6,%xmm11
10464
10465# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
10466# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
10467# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
10468pshufd $0x93,%xmm3,%xmm12
10469
10470# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
10471# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
10472# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
10473pshufd $0x93,%xmm7,%xmm13
10474
10475# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
10476# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
10477# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
10478pshufd $0x93,%xmm2,%xmm14
10479
10480# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
10481# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
10482# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
10483pshufd $0x93,%xmm5,%xmm15
10484
10485# qhasm: xmm0 ^= xmm8
10486# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
10487# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
10488pxor %xmm8,%xmm0
10489
10490# qhasm: xmm1 ^= xmm9
10491# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
10492# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
10493pxor %xmm9,%xmm1
10494
10495# qhasm: xmm4 ^= xmm10
10496# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
10497# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
10498pxor %xmm10,%xmm4
10499
10500# qhasm: xmm6 ^= xmm11
10501# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
10502# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
10503pxor %xmm11,%xmm6
10504
10505# qhasm: xmm3 ^= xmm12
10506# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
10507# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
10508pxor %xmm12,%xmm3
10509
10510# qhasm: xmm7 ^= xmm13
10511# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
10512# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
10513pxor %xmm13,%xmm7
10514
10515# qhasm: xmm2 ^= xmm14
10516# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
10517# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
10518pxor %xmm14,%xmm2
10519
10520# qhasm: xmm5 ^= xmm15
10521# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
10522# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
10523pxor %xmm15,%xmm5
10524
10525# qhasm: xmm8 ^= xmm5
10526# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
10527# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
10528pxor %xmm5,%xmm8
10529
10530# qhasm: xmm9 ^= xmm0
10531# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
10532# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
10533pxor %xmm0,%xmm9
10534
10535# qhasm: xmm10 ^= xmm1
10536# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
10537# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
10538pxor %xmm1,%xmm10
10539
10540# qhasm: xmm9 ^= xmm5
10541# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
10542# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
10543pxor %xmm5,%xmm9
10544
10545# qhasm: xmm11 ^= xmm4
10546# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
10547# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
10548pxor %xmm4,%xmm11
10549
10550# qhasm: xmm12 ^= xmm6
10551# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
10552# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
10553pxor %xmm6,%xmm12
10554
10555# qhasm: xmm13 ^= xmm3
10556# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
10557# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
10558pxor %xmm3,%xmm13
10559
10560# qhasm: xmm11 ^= xmm5
10561# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
10562# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
10563pxor %xmm5,%xmm11
10564
10565# qhasm: xmm14 ^= xmm7
10566# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
10567# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
10568pxor %xmm7,%xmm14
10569
10570# qhasm: xmm15 ^= xmm2
10571# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
10572# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
10573pxor %xmm2,%xmm15
10574
10575# qhasm: xmm12 ^= xmm5
10576# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
10577# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
10578pxor %xmm5,%xmm12
10579
10580# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
10581# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
10582# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
10583pshufd $0x4E,%xmm0,%xmm0
10584
10585# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
10586# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
10587# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
10588pshufd $0x4E,%xmm1,%xmm1
10589
10590# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
10591# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
10592# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
10593pshufd $0x4E,%xmm4,%xmm4
10594
10595# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
10596# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
10597# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
10598pshufd $0x4E,%xmm6,%xmm6
10599
10600# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
10601# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
10602# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
10603pshufd $0x4E,%xmm3,%xmm3
10604
10605# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
10606# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
10607# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
10608pshufd $0x4E,%xmm7,%xmm7
10609
10610# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
10611# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
10612# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
10613pshufd $0x4E,%xmm2,%xmm2
10614
10615# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
10616# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
10617# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
10618pshufd $0x4E,%xmm5,%xmm5
10619
10620# qhasm: xmm8 ^= xmm0
10621# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
10622# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
10623pxor %xmm0,%xmm8
10624
10625# qhasm: xmm9 ^= xmm1
10626# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
10627# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
10628pxor %xmm1,%xmm9
10629
10630# qhasm: xmm10 ^= xmm4
10631# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
10632# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
10633pxor %xmm4,%xmm10
10634
10635# qhasm: xmm11 ^= xmm6
10636# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
10637# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
10638pxor %xmm6,%xmm11
10639
10640# qhasm: xmm12 ^= xmm3
10641# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
10642# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
10643pxor %xmm3,%xmm12
10644
10645# qhasm: xmm13 ^= xmm7
10646# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
10647# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
10648pxor %xmm7,%xmm13
10649
10650# qhasm: xmm14 ^= xmm2
10651# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
10652# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
10653pxor %xmm2,%xmm14
10654
10655# qhasm: xmm15 ^= xmm5
10656# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
10657# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
10658pxor %xmm5,%xmm15
10659
10660# qhasm: xmm8 ^= *(int128 *)(c + 1152)
10661# asm 1: pxor 1152(<c=int64#4),<xmm8=int6464#9
10662# asm 2: pxor 1152(<c=%rcx),<xmm8=%xmm8
10663pxor 1152(%rcx),%xmm8
10664
10665# qhasm: shuffle bytes of xmm8 by SRM0
10666# asm 1: pshufb SRM0,<xmm8=int6464#9
10667# asm 2: pshufb SRM0,<xmm8=%xmm8
10668pshufb SRM0,%xmm8
10669
10670# qhasm: xmm9 ^= *(int128 *)(c + 1168)
10671# asm 1: pxor 1168(<c=int64#4),<xmm9=int6464#10
10672# asm 2: pxor 1168(<c=%rcx),<xmm9=%xmm9
10673pxor 1168(%rcx),%xmm9
10674
10675# qhasm: shuffle bytes of xmm9 by SRM0
10676# asm 1: pshufb SRM0,<xmm9=int6464#10
10677# asm 2: pshufb SRM0,<xmm9=%xmm9
10678pshufb SRM0,%xmm9
10679
10680# qhasm: xmm10 ^= *(int128 *)(c + 1184)
10681# asm 1: pxor 1184(<c=int64#4),<xmm10=int6464#11
10682# asm 2: pxor 1184(<c=%rcx),<xmm10=%xmm10
10683pxor 1184(%rcx),%xmm10
10684
10685# qhasm: shuffle bytes of xmm10 by SRM0
10686# asm 1: pshufb SRM0,<xmm10=int6464#11
10687# asm 2: pshufb SRM0,<xmm10=%xmm10
10688pshufb SRM0,%xmm10
10689
10690# qhasm: xmm11 ^= *(int128 *)(c + 1200)
10691# asm 1: pxor 1200(<c=int64#4),<xmm11=int6464#12
10692# asm 2: pxor 1200(<c=%rcx),<xmm11=%xmm11
10693pxor 1200(%rcx),%xmm11
10694
10695# qhasm: shuffle bytes of xmm11 by SRM0
10696# asm 1: pshufb SRM0,<xmm11=int6464#12
10697# asm 2: pshufb SRM0,<xmm11=%xmm11
10698pshufb SRM0,%xmm11
10699
10700# qhasm: xmm12 ^= *(int128 *)(c + 1216)
10701# asm 1: pxor 1216(<c=int64#4),<xmm12=int6464#13
10702# asm 2: pxor 1216(<c=%rcx),<xmm12=%xmm12
10703pxor 1216(%rcx),%xmm12
10704
10705# qhasm: shuffle bytes of xmm12 by SRM0
10706# asm 1: pshufb SRM0,<xmm12=int6464#13
10707# asm 2: pshufb SRM0,<xmm12=%xmm12
10708pshufb SRM0,%xmm12
10709
10710# qhasm: xmm13 ^= *(int128 *)(c + 1232)
10711# asm 1: pxor 1232(<c=int64#4),<xmm13=int6464#14
10712# asm 2: pxor 1232(<c=%rcx),<xmm13=%xmm13
10713pxor 1232(%rcx),%xmm13
10714
10715# qhasm: shuffle bytes of xmm13 by SRM0
10716# asm 1: pshufb SRM0,<xmm13=int6464#14
10717# asm 2: pshufb SRM0,<xmm13=%xmm13
10718pshufb SRM0,%xmm13
10719
10720# qhasm: xmm14 ^= *(int128 *)(c + 1248)
10721# asm 1: pxor 1248(<c=int64#4),<xmm14=int6464#15
10722# asm 2: pxor 1248(<c=%rcx),<xmm14=%xmm14
10723pxor 1248(%rcx),%xmm14
10724
10725# qhasm: shuffle bytes of xmm14 by SRM0
10726# asm 1: pshufb SRM0,<xmm14=int6464#15
10727# asm 2: pshufb SRM0,<xmm14=%xmm14
10728pshufb SRM0,%xmm14
10729
10730# qhasm: xmm15 ^= *(int128 *)(c + 1264)
10731# asm 1: pxor 1264(<c=int64#4),<xmm15=int6464#16
10732# asm 2: pxor 1264(<c=%rcx),<xmm15=%xmm15
10733pxor 1264(%rcx),%xmm15
10734
10735# qhasm: shuffle bytes of xmm15 by SRM0
10736# asm 1: pshufb SRM0,<xmm15=int6464#16
10737# asm 2: pshufb SRM0,<xmm15=%xmm15
10738pshufb SRM0,%xmm15
10739
10740# qhasm: xmm13 ^= xmm14
10741# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
10742# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
10743pxor %xmm14,%xmm13
10744
10745# qhasm: xmm10 ^= xmm9
10746# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
10747# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
10748pxor %xmm9,%xmm10
10749
10750# qhasm: xmm13 ^= xmm8
10751# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
10752# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
10753pxor %xmm8,%xmm13
10754
10755# qhasm: xmm14 ^= xmm10
10756# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
10757# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
10758pxor %xmm10,%xmm14
10759
10760# qhasm: xmm11 ^= xmm8
10761# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
10762# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
10763pxor %xmm8,%xmm11
10764
10765# qhasm: xmm14 ^= xmm11
10766# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
10767# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
10768pxor %xmm11,%xmm14
10769
10770# qhasm: xmm11 ^= xmm15
10771# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
10772# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
10773pxor %xmm15,%xmm11
10774
10775# qhasm: xmm11 ^= xmm12
10776# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
10777# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
10778pxor %xmm12,%xmm11
10779
10780# qhasm: xmm15 ^= xmm13
10781# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
10782# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
10783pxor %xmm13,%xmm15
10784
10785# qhasm: xmm11 ^= xmm9
10786# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
10787# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
10788pxor %xmm9,%xmm11
10789
10790# qhasm: xmm12 ^= xmm13
10791# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
10792# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
10793pxor %xmm13,%xmm12
10794
10795# qhasm: xmm10 ^= xmm15
10796# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
10797# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
10798pxor %xmm15,%xmm10
10799
10800# qhasm: xmm9 ^= xmm13
10801# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
10802# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
10803pxor %xmm13,%xmm9
10804
10805# qhasm: xmm3 = xmm15
10806# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
10807# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
10808movdqa %xmm15,%xmm0
10809
10810# qhasm: xmm2 = xmm9
10811# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
10812# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
10813movdqa %xmm9,%xmm1
10814
10815# qhasm: xmm1 = xmm13
10816# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
10817# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
10818movdqa %xmm13,%xmm2
10819
10820# qhasm: xmm5 = xmm10
10821# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
10822# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
10823movdqa %xmm10,%xmm3
10824
10825# qhasm: xmm4 = xmm14
10826# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
10827# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
10828movdqa %xmm14,%xmm4
10829
10830# qhasm: xmm3 ^= xmm12
10831# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
10832# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
10833pxor %xmm12,%xmm0
10834
10835# qhasm: xmm2 ^= xmm10
10836# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
10837# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
10838pxor %xmm10,%xmm1
10839
10840# qhasm: xmm1 ^= xmm11
10841# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
10842# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
10843pxor %xmm11,%xmm2
10844
10845# qhasm: xmm5 ^= xmm12
10846# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
10847# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
10848pxor %xmm12,%xmm3
10849
10850# qhasm: xmm4 ^= xmm8
10851# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
10852# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
10853pxor %xmm8,%xmm4
10854
10855# qhasm: xmm6 = xmm3
10856# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
10857# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
10858movdqa %xmm0,%xmm5
10859
10860# qhasm: xmm0 = xmm2
10861# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
10862# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
10863movdqa %xmm1,%xmm6
10864
10865# qhasm: xmm7 = xmm3
10866# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
10867# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
10868movdqa %xmm0,%xmm7
10869
10870# qhasm: xmm2 |= xmm1
10871# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
10872# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
10873por %xmm2,%xmm1
10874
10875# qhasm: xmm3 |= xmm4
10876# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
10877# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
10878por %xmm4,%xmm0
10879
10880# qhasm: xmm7 ^= xmm0
10881# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
10882# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
10883pxor %xmm6,%xmm7
10884
10885# qhasm: xmm6 &= xmm4
10886# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
10887# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
10888pand %xmm4,%xmm5
10889
10890# qhasm: xmm0 &= xmm1
10891# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
10892# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
10893pand %xmm2,%xmm6
10894
10895# qhasm: xmm4 ^= xmm1
10896# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
10897# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
10898pxor %xmm2,%xmm4
10899
10900# qhasm: xmm7 &= xmm4
10901# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
10902# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
10903pand %xmm4,%xmm7
10904
10905# qhasm: xmm4 = xmm11
10906# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
10907# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
10908movdqa %xmm11,%xmm2
10909
10910# qhasm: xmm4 ^= xmm8
10911# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
10912# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
10913pxor %xmm8,%xmm2
10914
10915# qhasm: xmm5 &= xmm4
10916# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
10917# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
10918pand %xmm2,%xmm3
10919
10920# qhasm: xmm3 ^= xmm5
10921# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
10922# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
10923pxor %xmm3,%xmm0
10924
10925# qhasm: xmm2 ^= xmm5
10926# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
10927# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
10928pxor %xmm3,%xmm1
10929
10930# qhasm: xmm5 = xmm15
10931# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
10932# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
10933movdqa %xmm15,%xmm2
10934
10935# qhasm: xmm5 ^= xmm9
10936# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
10937# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
10938pxor %xmm9,%xmm2
10939
10940# qhasm: xmm4 = xmm13
10941# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
10942# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
10943movdqa %xmm13,%xmm3
10944
10945# qhasm: xmm1 = xmm5
10946# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
10947# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
10948movdqa %xmm2,%xmm4
10949
10950# qhasm: xmm4 ^= xmm14
10951# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
10952# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
10953pxor %xmm14,%xmm3
10954
10955# qhasm: xmm1 |= xmm4
10956# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
10957# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
10958por %xmm3,%xmm4
10959
10960# qhasm: xmm5 &= xmm4
10961# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
10962# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
10963pand %xmm3,%xmm2
10964
10965# qhasm: xmm0 ^= xmm5
10966# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
10967# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
10968pxor %xmm2,%xmm6
10969
10970# qhasm: xmm3 ^= xmm7
10971# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
10972# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
10973pxor %xmm7,%xmm0
10974
10975# qhasm: xmm2 ^= xmm6
10976# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
10977# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
10978pxor %xmm5,%xmm1
10979
10980# qhasm: xmm1 ^= xmm7
10981# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
10982# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
10983pxor %xmm7,%xmm4
10984
10985# qhasm: xmm0 ^= xmm6
10986# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
10987# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
10988pxor %xmm5,%xmm6
10989
10990# qhasm: xmm1 ^= xmm6
10991# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
10992# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
10993pxor %xmm5,%xmm4
10994
10995# qhasm: xmm4 = xmm10
10996# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
10997# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
10998movdqa %xmm10,%xmm2
10999
11000# qhasm: xmm5 = xmm12
11001# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
11002# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
11003movdqa %xmm12,%xmm3
11004
11005# qhasm: xmm6 = xmm9
11006# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
11007# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
11008movdqa %xmm9,%xmm5
11009
11010# qhasm: xmm7 = xmm15
11011# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
11012# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
11013movdqa %xmm15,%xmm7
11014
11015# qhasm: xmm4 &= xmm11
11016# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
11017# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
11018pand %xmm11,%xmm2
11019
11020# qhasm: xmm5 &= xmm8
11021# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
11022# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
11023pand %xmm8,%xmm3
11024
11025# qhasm: xmm6 &= xmm13
11026# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
11027# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
11028pand %xmm13,%xmm5
11029
11030# qhasm: xmm7 |= xmm14
11031# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
11032# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
11033por %xmm14,%xmm7
11034
11035# qhasm: xmm3 ^= xmm4
11036# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
11037# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
11038pxor %xmm2,%xmm0
11039
11040# qhasm: xmm2 ^= xmm5
11041# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
11042# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
11043pxor %xmm3,%xmm1
11044
11045# qhasm: xmm1 ^= xmm6
11046# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
11047# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
11048pxor %xmm5,%xmm4
11049
11050# qhasm: xmm0 ^= xmm7
11051# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
11052# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
11053pxor %xmm7,%xmm6
11054
11055# qhasm: xmm4 = xmm3
11056# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
11057# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
11058movdqa %xmm0,%xmm2
11059
11060# qhasm: xmm4 ^= xmm2
11061# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
11062# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
11063pxor %xmm1,%xmm2
11064
11065# qhasm: xmm3 &= xmm1
11066# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
11067# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
11068pand %xmm4,%xmm0
11069
11070# qhasm: xmm6 = xmm0
11071# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
11072# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
11073movdqa %xmm6,%xmm3
11074
11075# qhasm: xmm6 ^= xmm3
11076# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
11077# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
11078pxor %xmm0,%xmm3
11079
11080# qhasm: xmm7 = xmm4
11081# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
11082# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
11083movdqa %xmm2,%xmm5
11084
11085# qhasm: xmm7 &= xmm6
11086# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
11087# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
11088pand %xmm3,%xmm5
11089
11090# qhasm: xmm7 ^= xmm2
11091# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
11092# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
11093pxor %xmm1,%xmm5
11094
11095# qhasm: xmm5 = xmm1
11096# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
11097# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
11098movdqa %xmm4,%xmm7
11099
11100# qhasm: xmm5 ^= xmm0
11101# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
11102# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
11103pxor %xmm6,%xmm7
11104
11105# qhasm: xmm3 ^= xmm2
11106# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
11107# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
11108pxor %xmm1,%xmm0
11109
11110# qhasm: xmm5 &= xmm3
11111# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
11112# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
11113pand %xmm0,%xmm7
11114
11115# qhasm: xmm5 ^= xmm0
11116# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
11117# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
11118pxor %xmm6,%xmm7
11119
11120# qhasm: xmm1 ^= xmm5
11121# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
11122# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
11123pxor %xmm7,%xmm4
11124
11125# qhasm: xmm2 = xmm6
11126# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
11127# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
11128movdqa %xmm3,%xmm0
11129
11130# qhasm: xmm2 ^= xmm5
11131# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
11132# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
11133pxor %xmm7,%xmm0
11134
11135# qhasm: xmm2 &= xmm0
11136# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
11137# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
11138pand %xmm6,%xmm0
11139
11140# qhasm: xmm1 ^= xmm2
11141# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
11142# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
11143pxor %xmm0,%xmm4
11144
11145# qhasm: xmm6 ^= xmm2
11146# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
11147# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
11148pxor %xmm0,%xmm3
11149
11150# qhasm: xmm6 &= xmm7
11151# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
11152# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
11153pand %xmm5,%xmm3
11154
11155# qhasm: xmm6 ^= xmm4
11156# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
11157# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
11158pxor %xmm2,%xmm3
11159
11160# qhasm: xmm4 = xmm14
11161# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
11162# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
11163movdqa %xmm14,%xmm0
11164
11165# qhasm: xmm0 = xmm13
11166# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
11167# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
11168movdqa %xmm13,%xmm1
11169
11170# qhasm: xmm2 = xmm7
11171# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
11172# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
11173movdqa %xmm5,%xmm2
11174
11175# qhasm: xmm2 ^= xmm6
11176# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
11177# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
11178pxor %xmm3,%xmm2
11179
11180# qhasm: xmm2 &= xmm14
11181# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
11182# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
11183pand %xmm14,%xmm2
11184
11185# qhasm: xmm14 ^= xmm13
11186# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
11187# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
11188pxor %xmm13,%xmm14
11189
11190# qhasm: xmm14 &= xmm6
11191# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
11192# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
11193pand %xmm3,%xmm14
11194
11195# qhasm: xmm13 &= xmm7
11196# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
11197# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
11198pand %xmm5,%xmm13
11199
11200# qhasm: xmm14 ^= xmm13
11201# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
11202# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
11203pxor %xmm13,%xmm14
11204
11205# qhasm: xmm13 ^= xmm2
11206# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
11207# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
11208pxor %xmm2,%xmm13
11209
11210# qhasm: xmm4 ^= xmm8
11211# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
11212# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
11213pxor %xmm8,%xmm0
11214
11215# qhasm: xmm0 ^= xmm11
11216# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
11217# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
11218pxor %xmm11,%xmm1
11219
11220# qhasm: xmm7 ^= xmm5
11221# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
11222# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
11223pxor %xmm7,%xmm5
11224
11225# qhasm: xmm6 ^= xmm1
11226# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
11227# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
11228pxor %xmm4,%xmm3
11229
11230# qhasm: xmm3 = xmm7
11231# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
11232# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
11233movdqa %xmm5,%xmm2
11234
11235# qhasm: xmm3 ^= xmm6
11236# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
11237# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
11238pxor %xmm3,%xmm2
11239
11240# qhasm: xmm3 &= xmm4
11241# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
11242# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
11243pand %xmm0,%xmm2
11244
11245# qhasm: xmm4 ^= xmm0
11246# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
11247# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
11248pxor %xmm1,%xmm0
11249
11250# qhasm: xmm4 &= xmm6
11251# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
11252# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
11253pand %xmm3,%xmm0
11254
11255# qhasm: xmm0 &= xmm7
11256# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
11257# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
11258pand %xmm5,%xmm1
11259
11260# qhasm: xmm0 ^= xmm4
11261# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
11262# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
11263pxor %xmm0,%xmm1
11264
11265# qhasm: xmm4 ^= xmm3
11266# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
11267# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
11268pxor %xmm2,%xmm0
11269
11270# qhasm: xmm2 = xmm5
11271# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
11272# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
11273movdqa %xmm7,%xmm2
11274
11275# qhasm: xmm2 ^= xmm1
11276# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
11277# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
11278pxor %xmm4,%xmm2
11279
11280# qhasm: xmm2 &= xmm8
11281# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
11282# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
11283pand %xmm8,%xmm2
11284
11285# qhasm: xmm8 ^= xmm11
11286# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
11287# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
11288pxor %xmm11,%xmm8
11289
11290# qhasm: xmm8 &= xmm1
11291# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
11292# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
11293pand %xmm4,%xmm8
11294
11295# qhasm: xmm11 &= xmm5
11296# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
11297# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
11298pand %xmm7,%xmm11
11299
11300# qhasm: xmm8 ^= xmm11
11301# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
11302# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
11303pxor %xmm11,%xmm8
11304
11305# qhasm: xmm11 ^= xmm2
11306# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
11307# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
11308pxor %xmm2,%xmm11
11309
11310# qhasm: xmm14 ^= xmm4
11311# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
11312# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
11313pxor %xmm0,%xmm14
11314
11315# qhasm: xmm8 ^= xmm4
11316# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
11317# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
11318pxor %xmm0,%xmm8
11319
11320# qhasm: xmm13 ^= xmm0
11321# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
11322# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
11323pxor %xmm1,%xmm13
11324
11325# qhasm: xmm11 ^= xmm0
11326# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
11327# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
11328pxor %xmm1,%xmm11
11329
11330# qhasm: xmm4 = xmm15
11331# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
11332# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
11333movdqa %xmm15,%xmm0
11334
11335# qhasm: xmm0 = xmm9
11336# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
11337# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
11338movdqa %xmm9,%xmm1
11339
11340# qhasm: xmm4 ^= xmm12
11341# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
11342# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
11343pxor %xmm12,%xmm0
11344
11345# qhasm: xmm0 ^= xmm10
11346# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
11347# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
11348pxor %xmm10,%xmm1
11349
11350# qhasm: xmm3 = xmm7
11351# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
11352# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
11353movdqa %xmm5,%xmm2
11354
11355# qhasm: xmm3 ^= xmm6
11356# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
11357# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
11358pxor %xmm3,%xmm2
11359
11360# qhasm: xmm3 &= xmm4
11361# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
11362# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
11363pand %xmm0,%xmm2
11364
11365# qhasm: xmm4 ^= xmm0
11366# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
11367# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
11368pxor %xmm1,%xmm0
11369
11370# qhasm: xmm4 &= xmm6
11371# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
11372# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
11373pand %xmm3,%xmm0
11374
11375# qhasm: xmm0 &= xmm7
11376# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
11377# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
11378pand %xmm5,%xmm1
11379
11380# qhasm: xmm0 ^= xmm4
11381# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
11382# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
11383pxor %xmm0,%xmm1
11384
11385# qhasm: xmm4 ^= xmm3
11386# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
11387# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
11388pxor %xmm2,%xmm0
11389
11390# qhasm: xmm2 = xmm5
11391# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
11392# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
11393movdqa %xmm7,%xmm2
11394
11395# qhasm: xmm2 ^= xmm1
11396# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
11397# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
11398pxor %xmm4,%xmm2
11399
11400# qhasm: xmm2 &= xmm12
11401# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
11402# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
11403pand %xmm12,%xmm2
11404
11405# qhasm: xmm12 ^= xmm10
11406# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
11407# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
11408pxor %xmm10,%xmm12
11409
11410# qhasm: xmm12 &= xmm1
11411# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
11412# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
11413pand %xmm4,%xmm12
11414
11415# qhasm: xmm10 &= xmm5
11416# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
11417# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
11418pand %xmm7,%xmm10
11419
11420# qhasm: xmm12 ^= xmm10
11421# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
11422# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
11423pxor %xmm10,%xmm12
11424
11425# qhasm: xmm10 ^= xmm2
11426# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
11427# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
11428pxor %xmm2,%xmm10
11429
11430# qhasm: xmm7 ^= xmm5
11431# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
11432# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
11433pxor %xmm7,%xmm5
11434
11435# qhasm: xmm6 ^= xmm1
11436# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
11437# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
11438pxor %xmm4,%xmm3
11439
11440# qhasm: xmm3 = xmm7
11441# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
11442# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
11443movdqa %xmm5,%xmm2
11444
11445# qhasm: xmm3 ^= xmm6
11446# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
11447# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
11448pxor %xmm3,%xmm2
11449
11450# qhasm: xmm3 &= xmm15
11451# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
11452# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
11453pand %xmm15,%xmm2
11454
11455# qhasm: xmm15 ^= xmm9
11456# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
11457# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
11458pxor %xmm9,%xmm15
11459
11460# qhasm: xmm15 &= xmm6
11461# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
11462# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
11463pand %xmm3,%xmm15
11464
11465# qhasm: xmm9 &= xmm7
11466# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
11467# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
11468pand %xmm5,%xmm9
11469
11470# qhasm: xmm15 ^= xmm9
11471# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
11472# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
11473pxor %xmm9,%xmm15
11474
11475# qhasm: xmm9 ^= xmm3
11476# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
11477# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
11478pxor %xmm2,%xmm9
11479
11480# qhasm: xmm15 ^= xmm4
11481# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
11482# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
11483pxor %xmm0,%xmm15
11484
11485# qhasm: xmm12 ^= xmm4
11486# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
11487# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
11488pxor %xmm0,%xmm12
11489
11490# qhasm: xmm9 ^= xmm0
11491# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
11492# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
11493pxor %xmm1,%xmm9
11494
11495# qhasm: xmm10 ^= xmm0
11496# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
11497# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
11498pxor %xmm1,%xmm10
11499
11500# qhasm: xmm15 ^= xmm8
11501# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
11502# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
11503pxor %xmm8,%xmm15
11504
11505# qhasm: xmm9 ^= xmm14
11506# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
11507# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
11508pxor %xmm14,%xmm9
11509
11510# qhasm: xmm12 ^= xmm15
11511# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
11512# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
11513pxor %xmm15,%xmm12
11514
11515# qhasm: xmm14 ^= xmm8
11516# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
11517# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
11518pxor %xmm8,%xmm14
11519
11520# qhasm: xmm8 ^= xmm9
11521# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
11522# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
11523pxor %xmm9,%xmm8
11524
11525# qhasm: xmm9 ^= xmm13
11526# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
11527# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
11528pxor %xmm13,%xmm9
11529
11530# qhasm: xmm13 ^= xmm10
11531# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
11532# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
11533pxor %xmm10,%xmm13
11534
11535# qhasm: xmm12 ^= xmm13
11536# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
11537# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
11538pxor %xmm13,%xmm12
11539
11540# qhasm: xmm10 ^= xmm11
11541# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
11542# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
11543pxor %xmm11,%xmm10
11544
11545# qhasm: xmm11 ^= xmm13
11546# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
11547# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
11548pxor %xmm13,%xmm11
11549
11550# qhasm: xmm14 ^= xmm11
11551# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
11552# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
11553pxor %xmm11,%xmm14
11554
11555# qhasm: xmm8 ^= *(int128 *)(c + 1280)
11556# asm 1: pxor 1280(<c=int64#4),<xmm8=int6464#9
11557# asm 2: pxor 1280(<c=%rcx),<xmm8=%xmm8
11558pxor 1280(%rcx),%xmm8
11559
11560# qhasm: xmm9 ^= *(int128 *)(c + 1296)
11561# asm 1: pxor 1296(<c=int64#4),<xmm9=int6464#10
11562# asm 2: pxor 1296(<c=%rcx),<xmm9=%xmm9
11563pxor 1296(%rcx),%xmm9
11564
11565# qhasm: xmm12 ^= *(int128 *)(c + 1312)
11566# asm 1: pxor 1312(<c=int64#4),<xmm12=int6464#13
11567# asm 2: pxor 1312(<c=%rcx),<xmm12=%xmm12
11568pxor 1312(%rcx),%xmm12
11569
11570# qhasm: xmm14 ^= *(int128 *)(c + 1328)
11571# asm 1: pxor 1328(<c=int64#4),<xmm14=int6464#15
11572# asm 2: pxor 1328(<c=%rcx),<xmm14=%xmm14
11573pxor 1328(%rcx),%xmm14
11574
11575# qhasm: xmm11 ^= *(int128 *)(c + 1344)
11576# asm 1: pxor 1344(<c=int64#4),<xmm11=int6464#12
11577# asm 2: pxor 1344(<c=%rcx),<xmm11=%xmm11
11578pxor 1344(%rcx),%xmm11
11579
11580# qhasm: xmm15 ^= *(int128 *)(c + 1360)
11581# asm 1: pxor 1360(<c=int64#4),<xmm15=int6464#16
11582# asm 2: pxor 1360(<c=%rcx),<xmm15=%xmm15
11583pxor 1360(%rcx),%xmm15
11584
11585# qhasm: xmm10 ^= *(int128 *)(c + 1376)
11586# asm 1: pxor 1376(<c=int64#4),<xmm10=int6464#11
11587# asm 2: pxor 1376(<c=%rcx),<xmm10=%xmm10
11588pxor 1376(%rcx),%xmm10
11589
11590# qhasm: xmm13 ^= *(int128 *)(c + 1392)
11591# asm 1: pxor 1392(<c=int64#4),<xmm13=int6464#14
11592# asm 2: pxor 1392(<c=%rcx),<xmm13=%xmm13
11593pxor 1392(%rcx),%xmm13
11594
11595# qhasm: xmm0 = xmm10
11596# asm 1: movdqa <xmm10=int6464#11,>xmm0=int6464#1
11597# asm 2: movdqa <xmm10=%xmm10,>xmm0=%xmm0
11598movdqa %xmm10,%xmm0
11599
11600# qhasm: uint6464 xmm0 >>= 1
11601# asm 1: psrlq $1,<xmm0=int6464#1
11602# asm 2: psrlq $1,<xmm0=%xmm0
11603psrlq $1,%xmm0
11604
11605# qhasm: xmm0 ^= xmm13
11606# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
11607# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
11608pxor %xmm13,%xmm0
11609
11610# qhasm: xmm0 &= BS0
11611# asm 1: pand BS0,<xmm0=int6464#1
11612# asm 2: pand BS0,<xmm0=%xmm0
11613pand BS0,%xmm0
11614
11615# qhasm: xmm13 ^= xmm0
11616# asm 1: pxor <xmm0=int6464#1,<xmm13=int6464#14
11617# asm 2: pxor <xmm0=%xmm0,<xmm13=%xmm13
11618pxor %xmm0,%xmm13
11619
11620# qhasm: uint6464 xmm0 <<= 1
11621# asm 1: psllq $1,<xmm0=int6464#1
11622# asm 2: psllq $1,<xmm0=%xmm0
11623psllq $1,%xmm0
11624
11625# qhasm: xmm10 ^= xmm0
11626# asm 1: pxor <xmm0=int6464#1,<xmm10=int6464#11
11627# asm 2: pxor <xmm0=%xmm0,<xmm10=%xmm10
11628pxor %xmm0,%xmm10
11629
11630# qhasm: xmm0 = xmm11
11631# asm 1: movdqa <xmm11=int6464#12,>xmm0=int6464#1
11632# asm 2: movdqa <xmm11=%xmm11,>xmm0=%xmm0
11633movdqa %xmm11,%xmm0
11634
11635# qhasm: uint6464 xmm0 >>= 1
11636# asm 1: psrlq $1,<xmm0=int6464#1
11637# asm 2: psrlq $1,<xmm0=%xmm0
11638psrlq $1,%xmm0
11639
11640# qhasm: xmm0 ^= xmm15
11641# asm 1: pxor <xmm15=int6464#16,<xmm0=int6464#1
11642# asm 2: pxor <xmm15=%xmm15,<xmm0=%xmm0
11643pxor %xmm15,%xmm0
11644
11645# qhasm: xmm0 &= BS0
11646# asm 1: pand BS0,<xmm0=int6464#1
11647# asm 2: pand BS0,<xmm0=%xmm0
11648pand BS0,%xmm0
11649
11650# qhasm: xmm15 ^= xmm0
11651# asm 1: pxor <xmm0=int6464#1,<xmm15=int6464#16
11652# asm 2: pxor <xmm0=%xmm0,<xmm15=%xmm15
11653pxor %xmm0,%xmm15
11654
11655# qhasm: uint6464 xmm0 <<= 1
11656# asm 1: psllq $1,<xmm0=int6464#1
11657# asm 2: psllq $1,<xmm0=%xmm0
11658psllq $1,%xmm0
11659
11660# qhasm: xmm11 ^= xmm0
11661# asm 1: pxor <xmm0=int6464#1,<xmm11=int6464#12
11662# asm 2: pxor <xmm0=%xmm0,<xmm11=%xmm11
11663pxor %xmm0,%xmm11
11664
11665# qhasm: xmm0 = xmm12
11666# asm 1: movdqa <xmm12=int6464#13,>xmm0=int6464#1
11667# asm 2: movdqa <xmm12=%xmm12,>xmm0=%xmm0
11668movdqa %xmm12,%xmm0
11669
11670# qhasm: uint6464 xmm0 >>= 1
11671# asm 1: psrlq $1,<xmm0=int6464#1
11672# asm 2: psrlq $1,<xmm0=%xmm0
11673psrlq $1,%xmm0
11674
11675# qhasm: xmm0 ^= xmm14
11676# asm 1: pxor <xmm14=int6464#15,<xmm0=int6464#1
11677# asm 2: pxor <xmm14=%xmm14,<xmm0=%xmm0
11678pxor %xmm14,%xmm0
11679
11680# qhasm: xmm0 &= BS0
11681# asm 1: pand BS0,<xmm0=int6464#1
11682# asm 2: pand BS0,<xmm0=%xmm0
11683pand BS0,%xmm0
11684
11685# qhasm: xmm14 ^= xmm0
11686# asm 1: pxor <xmm0=int6464#1,<xmm14=int6464#15
11687# asm 2: pxor <xmm0=%xmm0,<xmm14=%xmm14
11688pxor %xmm0,%xmm14
11689
11690# qhasm: uint6464 xmm0 <<= 1
11691# asm 1: psllq $1,<xmm0=int6464#1
11692# asm 2: psllq $1,<xmm0=%xmm0
11693psllq $1,%xmm0
11694
11695# qhasm: xmm12 ^= xmm0
11696# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11697# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11698pxor %xmm0,%xmm12
11699
11700# qhasm: xmm0 = xmm8
11701# asm 1: movdqa <xmm8=int6464#9,>xmm0=int6464#1
11702# asm 2: movdqa <xmm8=%xmm8,>xmm0=%xmm0
11703movdqa %xmm8,%xmm0
11704
11705# qhasm: uint6464 xmm0 >>= 1
11706# asm 1: psrlq $1,<xmm0=int6464#1
11707# asm 2: psrlq $1,<xmm0=%xmm0
11708psrlq $1,%xmm0
11709
11710# qhasm: xmm0 ^= xmm9
11711# asm 1: pxor <xmm9=int6464#10,<xmm0=int6464#1
11712# asm 2: pxor <xmm9=%xmm9,<xmm0=%xmm0
11713pxor %xmm9,%xmm0
11714
11715# qhasm: xmm0 &= BS0
11716# asm 1: pand BS0,<xmm0=int6464#1
11717# asm 2: pand BS0,<xmm0=%xmm0
11718pand BS0,%xmm0
11719
11720# qhasm: xmm9 ^= xmm0
11721# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
11722# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
11723pxor %xmm0,%xmm9
11724
11725# qhasm: uint6464 xmm0 <<= 1
11726# asm 1: psllq $1,<xmm0=int6464#1
11727# asm 2: psllq $1,<xmm0=%xmm0
11728psllq $1,%xmm0
11729
11730# qhasm: xmm8 ^= xmm0
11731# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
11732# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
11733pxor %xmm0,%xmm8
11734
11735# qhasm: xmm0 = xmm15
11736# asm 1: movdqa <xmm15=int6464#16,>xmm0=int6464#1
11737# asm 2: movdqa <xmm15=%xmm15,>xmm0=%xmm0
11738movdqa %xmm15,%xmm0
11739
11740# qhasm: uint6464 xmm0 >>= 2
11741# asm 1: psrlq $2,<xmm0=int6464#1
11742# asm 2: psrlq $2,<xmm0=%xmm0
11743psrlq $2,%xmm0
11744
11745# qhasm: xmm0 ^= xmm13
11746# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
11747# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
11748pxor %xmm13,%xmm0
11749
11750# qhasm: xmm0 &= BS1
11751# asm 1: pand BS1,<xmm0=int6464#1
11752# asm 2: pand BS1,<xmm0=%xmm0
11753pand BS1,%xmm0
11754
11755# qhasm: xmm13 ^= xmm0
11756# asm 1: pxor <xmm0=int6464#1,<xmm13=int6464#14
11757# asm 2: pxor <xmm0=%xmm0,<xmm13=%xmm13
11758pxor %xmm0,%xmm13
11759
11760# qhasm: uint6464 xmm0 <<= 2
11761# asm 1: psllq $2,<xmm0=int6464#1
11762# asm 2: psllq $2,<xmm0=%xmm0
11763psllq $2,%xmm0
11764
11765# qhasm: xmm15 ^= xmm0
11766# asm 1: pxor <xmm0=int6464#1,<xmm15=int6464#16
11767# asm 2: pxor <xmm0=%xmm0,<xmm15=%xmm15
11768pxor %xmm0,%xmm15
11769
11770# qhasm: xmm0 = xmm11
11771# asm 1: movdqa <xmm11=int6464#12,>xmm0=int6464#1
11772# asm 2: movdqa <xmm11=%xmm11,>xmm0=%xmm0
11773movdqa %xmm11,%xmm0
11774
11775# qhasm: uint6464 xmm0 >>= 2
11776# asm 1: psrlq $2,<xmm0=int6464#1
11777# asm 2: psrlq $2,<xmm0=%xmm0
11778psrlq $2,%xmm0
11779
11780# qhasm: xmm0 ^= xmm10
11781# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#1
11782# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm0
11783pxor %xmm10,%xmm0
11784
11785# qhasm: xmm0 &= BS1
11786# asm 1: pand BS1,<xmm0=int6464#1
11787# asm 2: pand BS1,<xmm0=%xmm0
11788pand BS1,%xmm0
11789
11790# qhasm: xmm10 ^= xmm0
11791# asm 1: pxor <xmm0=int6464#1,<xmm10=int6464#11
11792# asm 2: pxor <xmm0=%xmm0,<xmm10=%xmm10
11793pxor %xmm0,%xmm10
11794
11795# qhasm: uint6464 xmm0 <<= 2
11796# asm 1: psllq $2,<xmm0=int6464#1
11797# asm 2: psllq $2,<xmm0=%xmm0
11798psllq $2,%xmm0
11799
11800# qhasm: xmm11 ^= xmm0
11801# asm 1: pxor <xmm0=int6464#1,<xmm11=int6464#12
11802# asm 2: pxor <xmm0=%xmm0,<xmm11=%xmm11
11803pxor %xmm0,%xmm11
11804
11805# qhasm: xmm0 = xmm9
11806# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#1
11807# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm0
11808movdqa %xmm9,%xmm0
11809
11810# qhasm: uint6464 xmm0 >>= 2
11811# asm 1: psrlq $2,<xmm0=int6464#1
11812# asm 2: psrlq $2,<xmm0=%xmm0
11813psrlq $2,%xmm0
11814
11815# qhasm: xmm0 ^= xmm14
11816# asm 1: pxor <xmm14=int6464#15,<xmm0=int6464#1
11817# asm 2: pxor <xmm14=%xmm14,<xmm0=%xmm0
11818pxor %xmm14,%xmm0
11819
11820# qhasm: xmm0 &= BS1
11821# asm 1: pand BS1,<xmm0=int6464#1
11822# asm 2: pand BS1,<xmm0=%xmm0
11823pand BS1,%xmm0
11824
11825# qhasm: xmm14 ^= xmm0
11826# asm 1: pxor <xmm0=int6464#1,<xmm14=int6464#15
11827# asm 2: pxor <xmm0=%xmm0,<xmm14=%xmm14
11828pxor %xmm0,%xmm14
11829
11830# qhasm: uint6464 xmm0 <<= 2
11831# asm 1: psllq $2,<xmm0=int6464#1
11832# asm 2: psllq $2,<xmm0=%xmm0
11833psllq $2,%xmm0
11834
11835# qhasm: xmm9 ^= xmm0
11836# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
11837# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
11838pxor %xmm0,%xmm9
11839
11840# qhasm: xmm0 = xmm8
11841# asm 1: movdqa <xmm8=int6464#9,>xmm0=int6464#1
11842# asm 2: movdqa <xmm8=%xmm8,>xmm0=%xmm0
11843movdqa %xmm8,%xmm0
11844
11845# qhasm: uint6464 xmm0 >>= 2
11846# asm 1: psrlq $2,<xmm0=int6464#1
11847# asm 2: psrlq $2,<xmm0=%xmm0
11848psrlq $2,%xmm0
11849
11850# qhasm: xmm0 ^= xmm12
11851# asm 1: pxor <xmm12=int6464#13,<xmm0=int6464#1
11852# asm 2: pxor <xmm12=%xmm12,<xmm0=%xmm0
11853pxor %xmm12,%xmm0
11854
11855# qhasm: xmm0 &= BS1
11856# asm 1: pand BS1,<xmm0=int6464#1
11857# asm 2: pand BS1,<xmm0=%xmm0
11858pand BS1,%xmm0
11859
11860# qhasm: xmm12 ^= xmm0
11861# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11862# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11863pxor %xmm0,%xmm12
11864
11865# qhasm: uint6464 xmm0 <<= 2
11866# asm 1: psllq $2,<xmm0=int6464#1
11867# asm 2: psllq $2,<xmm0=%xmm0
11868psllq $2,%xmm0
11869
11870# qhasm: xmm8 ^= xmm0
11871# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
11872# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
11873pxor %xmm0,%xmm8
11874
11875# qhasm: xmm0 = xmm14
11876# asm 1: movdqa <xmm14=int6464#15,>xmm0=int6464#1
11877# asm 2: movdqa <xmm14=%xmm14,>xmm0=%xmm0
11878movdqa %xmm14,%xmm0
11879
11880# qhasm: uint6464 xmm0 >>= 4
11881# asm 1: psrlq $4,<xmm0=int6464#1
11882# asm 2: psrlq $4,<xmm0=%xmm0
11883psrlq $4,%xmm0
11884
11885# qhasm: xmm0 ^= xmm13
11886# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
11887# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
11888pxor %xmm13,%xmm0
11889
11890# qhasm: xmm0 &= BS2
11891# asm 1: pand BS2,<xmm0=int6464#1
11892# asm 2: pand BS2,<xmm0=%xmm0
11893pand BS2,%xmm0
11894
11895# qhasm: xmm13 ^= xmm0
11896# asm 1: pxor <xmm0=int6464#1,<xmm13=int6464#14
11897# asm 2: pxor <xmm0=%xmm0,<xmm13=%xmm13
11898pxor %xmm0,%xmm13
11899
11900# qhasm: uint6464 xmm0 <<= 4
11901# asm 1: psllq $4,<xmm0=int6464#1
11902# asm 2: psllq $4,<xmm0=%xmm0
11903psllq $4,%xmm0
11904
11905# qhasm: xmm14 ^= xmm0
11906# asm 1: pxor <xmm0=int6464#1,<xmm14=int6464#15
11907# asm 2: pxor <xmm0=%xmm0,<xmm14=%xmm14
11908pxor %xmm0,%xmm14
11909
11910# qhasm: xmm0 = xmm12
11911# asm 1: movdqa <xmm12=int6464#13,>xmm0=int6464#1
11912# asm 2: movdqa <xmm12=%xmm12,>xmm0=%xmm0
11913movdqa %xmm12,%xmm0
11914
11915# qhasm: uint6464 xmm0 >>= 4
11916# asm 1: psrlq $4,<xmm0=int6464#1
11917# asm 2: psrlq $4,<xmm0=%xmm0
11918psrlq $4,%xmm0
11919
11920# qhasm: xmm0 ^= xmm10
11921# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#1
11922# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm0
11923pxor %xmm10,%xmm0
11924
11925# qhasm: xmm0 &= BS2
11926# asm 1: pand BS2,<xmm0=int6464#1
11927# asm 2: pand BS2,<xmm0=%xmm0
11928pand BS2,%xmm0
11929
11930# qhasm: xmm10 ^= xmm0
11931# asm 1: pxor <xmm0=int6464#1,<xmm10=int6464#11
11932# asm 2: pxor <xmm0=%xmm0,<xmm10=%xmm10
11933pxor %xmm0,%xmm10
11934
11935# qhasm: uint6464 xmm0 <<= 4
11936# asm 1: psllq $4,<xmm0=int6464#1
11937# asm 2: psllq $4,<xmm0=%xmm0
11938psllq $4,%xmm0
11939
11940# qhasm: xmm12 ^= xmm0
11941# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11942# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11943pxor %xmm0,%xmm12
11944
11945# qhasm: xmm0 = xmm9
11946# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#1
11947# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm0
11948movdqa %xmm9,%xmm0
11949
11950# qhasm: uint6464 xmm0 >>= 4
11951# asm 1: psrlq $4,<xmm0=int6464#1
11952# asm 2: psrlq $4,<xmm0=%xmm0
11953psrlq $4,%xmm0
11954
11955# qhasm: xmm0 ^= xmm15
11956# asm 1: pxor <xmm15=int6464#16,<xmm0=int6464#1
11957# asm 2: pxor <xmm15=%xmm15,<xmm0=%xmm0
11958pxor %xmm15,%xmm0
11959
11960# qhasm: xmm0 &= BS2
11961# asm 1: pand BS2,<xmm0=int6464#1
11962# asm 2: pand BS2,<xmm0=%xmm0
11963pand BS2,%xmm0
11964
11965# qhasm: xmm15 ^= xmm0
11966# asm 1: pxor <xmm0=int6464#1,<xmm15=int6464#16
11967# asm 2: pxor <xmm0=%xmm0,<xmm15=%xmm15
11968pxor %xmm0,%xmm15
11969
11970# qhasm: uint6464 xmm0 <<= 4
11971# asm 1: psllq $4,<xmm0=int6464#1
11972# asm 2: psllq $4,<xmm0=%xmm0
11973psllq $4,%xmm0
11974
11975# qhasm: xmm9 ^= xmm0
11976# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
11977# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
11978pxor %xmm0,%xmm9
11979
11980# qhasm: xmm0 = xmm8
11981# asm 1: movdqa <xmm8=int6464#9,>xmm0=int6464#1
11982# asm 2: movdqa <xmm8=%xmm8,>xmm0=%xmm0
11983movdqa %xmm8,%xmm0
11984
11985# qhasm: uint6464 xmm0 >>= 4
11986# asm 1: psrlq $4,<xmm0=int6464#1
11987# asm 2: psrlq $4,<xmm0=%xmm0
11988psrlq $4,%xmm0
11989
11990# qhasm: xmm0 ^= xmm11
11991# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#1
11992# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm0
11993pxor %xmm11,%xmm0
11994
11995# qhasm: xmm0 &= BS2
11996# asm 1: pand BS2,<xmm0=int6464#1
11997# asm 2: pand BS2,<xmm0=%xmm0
11998pand BS2,%xmm0
11999
12000# qhasm: xmm11 ^= xmm0
12001# asm 1: pxor <xmm0=int6464#1,<xmm11=int6464#12
12002# asm 2: pxor <xmm0=%xmm0,<xmm11=%xmm11
12003pxor %xmm0,%xmm11
12004
12005# qhasm: uint6464 xmm0 <<= 4
12006# asm 1: psllq $4,<xmm0=int6464#1
12007# asm 2: psllq $4,<xmm0=%xmm0
12008psllq $4,%xmm0
12009
12010# qhasm: xmm8 ^= xmm0
12011# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
12012# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
12013pxor %xmm0,%xmm8
12014
12015# qhasm: unsigned<? =? len-128
12016# asm 1: cmp $128,<len=int64#2
12017# asm 2: cmp $128,<len=%rsi
12018cmp $128,%rsi
12019# comment:fp stack unchanged by jump
12020
12021# qhasm: goto partial if unsigned<
12022jb ._partial
12023# comment:fp stack unchanged by jump
12024
12025# qhasm: goto full if =
12026je ._full
12027
12028# qhasm: tmp = *(uint32 *)(np + 12)
12029# asm 1: movl 12(<np=int64#3),>tmp=int64#5d
12030# asm 2: movl 12(<np=%rdx),>tmp=%r8d
12031movl 12(%rdx),%r8d
12032
12033# qhasm: (uint32) bswap tmp
12034# asm 1: bswap <tmp=int64#5d
12035# asm 2: bswap <tmp=%r8d
12036bswap %r8d
12037
12038# qhasm: tmp += 8
12039# asm 1: add $8,<tmp=int64#5
12040# asm 2: add $8,<tmp=%r8
12041add $8,%r8
12042
12043# qhasm: (uint32) bswap tmp
12044# asm 1: bswap <tmp=int64#5d
12045# asm 2: bswap <tmp=%r8d
12046bswap %r8d
12047
12048# qhasm: *(uint32 *)(np + 12) = tmp
12049# asm 1: movl <tmp=int64#5d,12(<np=int64#3)
12050# asm 2: movl <tmp=%r8d,12(<np=%rdx)
12051movl %r8d,12(%rdx)
12052
12053# qhasm: *(int128 *) (outp + 0) = xmm8
12054# asm 1: movdqa <xmm8=int6464#9,0(<outp=int64#1)
12055# asm 2: movdqa <xmm8=%xmm8,0(<outp=%rdi)
12056movdqa %xmm8,0(%rdi)
12057
12058# qhasm: *(int128 *) (outp + 16) = xmm9
12059# asm 1: movdqa <xmm9=int6464#10,16(<outp=int64#1)
12060# asm 2: movdqa <xmm9=%xmm9,16(<outp=%rdi)
12061movdqa %xmm9,16(%rdi)
12062
12063# qhasm: *(int128 *) (outp + 32) = xmm12
12064# asm 1: movdqa <xmm12=int6464#13,32(<outp=int64#1)
12065# asm 2: movdqa <xmm12=%xmm12,32(<outp=%rdi)
12066movdqa %xmm12,32(%rdi)
12067
12068# qhasm: *(int128 *) (outp + 48) = xmm14
12069# asm 1: movdqa <xmm14=int6464#15,48(<outp=int64#1)
12070# asm 2: movdqa <xmm14=%xmm14,48(<outp=%rdi)
12071movdqa %xmm14,48(%rdi)
12072
12073# qhasm: *(int128 *) (outp + 64) = xmm11
12074# asm 1: movdqa <xmm11=int6464#12,64(<outp=int64#1)
12075# asm 2: movdqa <xmm11=%xmm11,64(<outp=%rdi)
12076movdqa %xmm11,64(%rdi)
12077
12078# qhasm: *(int128 *) (outp + 80) = xmm15
12079# asm 1: movdqa <xmm15=int6464#16,80(<outp=int64#1)
12080# asm 2: movdqa <xmm15=%xmm15,80(<outp=%rdi)
12081movdqa %xmm15,80(%rdi)
12082
12083# qhasm: *(int128 *) (outp + 96) = xmm10
12084# asm 1: movdqa <xmm10=int6464#11,96(<outp=int64#1)
12085# asm 2: movdqa <xmm10=%xmm10,96(<outp=%rdi)
12086movdqa %xmm10,96(%rdi)
12087
12088# qhasm: *(int128 *) (outp + 112) = xmm13
12089# asm 1: movdqa <xmm13=int6464#14,112(<outp=int64#1)
12090# asm 2: movdqa <xmm13=%xmm13,112(<outp=%rdi)
12091movdqa %xmm13,112(%rdi)
12092
12093# qhasm: len -= 128
12094# asm 1: sub $128,<len=int64#2
12095# asm 2: sub $128,<len=%rsi
12096sub $128,%rsi
12097
12098# qhasm: outp += 128
12099# asm 1: add $128,<outp=int64#1
12100# asm 2: add $128,<outp=%rdi
12101add $128,%rdi
12102# comment:fp stack unchanged by jump
12103
12104# qhasm: goto enc_block
12105jmp ._enc_block
12106
12107# qhasm: partial:
12108._partial:
12109
12110# qhasm: lensav = len
12111# asm 1: mov <len=int64#2,>lensav=int64#4
12112# asm 2: mov <len=%rsi,>lensav=%rcx
12113mov %rsi,%rcx
12114
12115# qhasm: (uint32) len >>= 4
12116# asm 1: shr $4,<len=int64#2d
12117# asm 2: shr $4,<len=%esi
12118shr $4,%esi
12119
12120# qhasm: tmp = *(uint32 *)(np + 12)
12121# asm 1: movl 12(<np=int64#3),>tmp=int64#5d
12122# asm 2: movl 12(<np=%rdx),>tmp=%r8d
12123movl 12(%rdx),%r8d
12124
12125# qhasm: (uint32) bswap tmp
12126# asm 1: bswap <tmp=int64#5d
12127# asm 2: bswap <tmp=%r8d
12128bswap %r8d
12129
12130# qhasm: tmp += len
12131# asm 1: add <len=int64#2,<tmp=int64#5
12132# asm 2: add <len=%rsi,<tmp=%r8
12133add %rsi,%r8
12134
12135# qhasm: (uint32) bswap tmp
12136# asm 1: bswap <tmp=int64#5d
12137# asm 2: bswap <tmp=%r8d
12138bswap %r8d
12139
12140# qhasm: *(uint32 *)(np + 12) = tmp
12141# asm 1: movl <tmp=int64#5d,12(<np=int64#3)
12142# asm 2: movl <tmp=%r8d,12(<np=%rdx)
12143movl %r8d,12(%rdx)
12144
12145# qhasm: blp = &bl
12146# asm 1: leaq <bl=stack1024#1,>blp=int64#2
12147# asm 2: leaq <bl=32(%rsp),>blp=%rsi
12148leaq 32(%rsp),%rsi
12149
12150# qhasm: *(int128 *)(blp + 0) = xmm8
12151# asm 1: movdqa <xmm8=int6464#9,0(<blp=int64#2)
12152# asm 2: movdqa <xmm8=%xmm8,0(<blp=%rsi)
12153movdqa %xmm8,0(%rsi)
12154
12155# qhasm: *(int128 *)(blp + 16) = xmm9
12156# asm 1: movdqa <xmm9=int6464#10,16(<blp=int64#2)
12157# asm 2: movdqa <xmm9=%xmm9,16(<blp=%rsi)
12158movdqa %xmm9,16(%rsi)
12159
12160# qhasm: *(int128 *)(blp + 32) = xmm12
12161# asm 1: movdqa <xmm12=int6464#13,32(<blp=int64#2)
12162# asm 2: movdqa <xmm12=%xmm12,32(<blp=%rsi)
12163movdqa %xmm12,32(%rsi)
12164
12165# qhasm: *(int128 *)(blp + 48) = xmm14
12166# asm 1: movdqa <xmm14=int6464#15,48(<blp=int64#2)
12167# asm 2: movdqa <xmm14=%xmm14,48(<blp=%rsi)
12168movdqa %xmm14,48(%rsi)
12169
12170# qhasm: *(int128 *)(blp + 64) = xmm11
12171# asm 1: movdqa <xmm11=int6464#12,64(<blp=int64#2)
12172# asm 2: movdqa <xmm11=%xmm11,64(<blp=%rsi)
12173movdqa %xmm11,64(%rsi)
12174
12175# qhasm: *(int128 *)(blp + 80) = xmm15
12176# asm 1: movdqa <xmm15=int6464#16,80(<blp=int64#2)
12177# asm 2: movdqa <xmm15=%xmm15,80(<blp=%rsi)
12178movdqa %xmm15,80(%rsi)
12179
12180# qhasm: *(int128 *)(blp + 96) = xmm10
12181# asm 1: movdqa <xmm10=int6464#11,96(<blp=int64#2)
12182# asm 2: movdqa <xmm10=%xmm10,96(<blp=%rsi)
12183movdqa %xmm10,96(%rsi)
12184
12185# qhasm: *(int128 *)(blp + 112) = xmm13
12186# asm 1: movdqa <xmm13=int6464#14,112(<blp=int64#2)
12187# asm 2: movdqa <xmm13=%xmm13,112(<blp=%rsi)
12188movdqa %xmm13,112(%rsi)
12189
12190# qhasm: bytes:
12191._bytes:
12192
12193# qhasm: =? lensav-0
12194# asm 1: cmp $0,<lensav=int64#4
12195# asm 2: cmp $0,<lensav=%rcx
12196cmp $0,%rcx
12197# comment:fp stack unchanged by jump
12198
12199# qhasm: goto end if =
12200je ._end
12201
12202# qhasm: b = *(uint8 *)(blp + 0)
12203# asm 1: movzbq 0(<blp=int64#2),>b=int64#3
12204# asm 2: movzbq 0(<blp=%rsi),>b=%rdx
12205movzbq 0(%rsi),%rdx
12206
12207# qhasm: *(uint8 *)(outp + 0) = b
12208# asm 1: movb <b=int64#3b,0(<outp=int64#1)
12209# asm 2: movb <b=%dl,0(<outp=%rdi)
12210movb %dl,0(%rdi)
12211
12212# qhasm: blp += 1
12213# asm 1: add $1,<blp=int64#2
12214# asm 2: add $1,<blp=%rsi
12215add $1,%rsi
12216
12217# qhasm: outp +=1
12218# asm 1: add $1,<outp=int64#1
12219# asm 2: add $1,<outp=%rdi
12220add $1,%rdi
12221
12222# qhasm: lensav -= 1
12223# asm 1: sub $1,<lensav=int64#4
12224# asm 2: sub $1,<lensav=%rcx
12225sub $1,%rcx
12226# comment:fp stack unchanged by jump
12227
12228# qhasm: goto bytes
12229jmp ._bytes
12230
12231# qhasm: full:
12232._full:
12233
12234# qhasm: tmp = *(uint32 *)(np + 12)
12235# asm 1: movl 12(<np=int64#3),>tmp=int64#4d
12236# asm 2: movl 12(<np=%rdx),>tmp=%ecx
12237movl 12(%rdx),%ecx
12238
12239# qhasm: (uint32) bswap tmp
12240# asm 1: bswap <tmp=int64#4d
12241# asm 2: bswap <tmp=%ecx
12242bswap %ecx
12243
12244# qhasm: tmp += len
12245# asm 1: add <len=int64#2,<tmp=int64#4
12246# asm 2: add <len=%rsi,<tmp=%rcx
12247add %rsi,%rcx
12248
12249# qhasm: (uint32) bswap tmp
12250# asm 1: bswap <tmp=int64#4d
12251# asm 2: bswap <tmp=%ecx
12252bswap %ecx
12253
12254# qhasm: *(uint32 *)(np + 12) = tmp
12255# asm 1: movl <tmp=int64#4d,12(<np=int64#3)
12256# asm 2: movl <tmp=%ecx,12(<np=%rdx)
12257movl %ecx,12(%rdx)
12258
12259# qhasm: *(int128 *) (outp + 0) = xmm8
12260# asm 1: movdqa <xmm8=int6464#9,0(<outp=int64#1)
12261# asm 2: movdqa <xmm8=%xmm8,0(<outp=%rdi)
12262movdqa %xmm8,0(%rdi)
12263
12264# qhasm: *(int128 *) (outp + 16) = xmm9
12265# asm 1: movdqa <xmm9=int6464#10,16(<outp=int64#1)
12266# asm 2: movdqa <xmm9=%xmm9,16(<outp=%rdi)
12267movdqa %xmm9,16(%rdi)
12268
12269# qhasm: *(int128 *) (outp + 32) = xmm12
12270# asm 1: movdqa <xmm12=int6464#13,32(<outp=int64#1)
12271# asm 2: movdqa <xmm12=%xmm12,32(<outp=%rdi)
12272movdqa %xmm12,32(%rdi)
12273
12274# qhasm: *(int128 *) (outp + 48) = xmm14
12275# asm 1: movdqa <xmm14=int6464#15,48(<outp=int64#1)
12276# asm 2: movdqa <xmm14=%xmm14,48(<outp=%rdi)
12277movdqa %xmm14,48(%rdi)
12278
12279# qhasm: *(int128 *) (outp + 64) = xmm11
12280# asm 1: movdqa <xmm11=int6464#12,64(<outp=int64#1)
12281# asm 2: movdqa <xmm11=%xmm11,64(<outp=%rdi)
12282movdqa %xmm11,64(%rdi)
12283
12284# qhasm: *(int128 *) (outp + 80) = xmm15
12285# asm 1: movdqa <xmm15=int6464#16,80(<outp=int64#1)
12286# asm 2: movdqa <xmm15=%xmm15,80(<outp=%rdi)
12287movdqa %xmm15,80(%rdi)
12288
12289# qhasm: *(int128 *) (outp + 96) = xmm10
12290# asm 1: movdqa <xmm10=int6464#11,96(<outp=int64#1)
12291# asm 2: movdqa <xmm10=%xmm10,96(<outp=%rdi)
12292movdqa %xmm10,96(%rdi)
12293
12294# qhasm: *(int128 *) (outp + 112) = xmm13
12295# asm 1: movdqa <xmm13=int6464#14,112(<outp=int64#1)
12296# asm 2: movdqa <xmm13=%xmm13,112(<outp=%rdi)
12297movdqa %xmm13,112(%rdi)
12298# comment:fp stack unchanged by fallthrough
12299
12300# qhasm: end:
12301._end:
12302
12303# qhasm: leave
12304add %r11,%rsp
12305mov %rdi,%rax
12306mov %rsi,%rdx
12307xor %rax,%rax
12308ret
diff --git a/nacl/crypto_stream/aes128ctr/core2/api.h b/nacl/crypto_stream/aes128ctr/core2/api.h
new file mode 100644
index 00000000..62fc8d88
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/core2/api.h
@@ -0,0 +1,3 @@
1#define CRYPTO_KEYBYTES 16
2#define CRYPTO_NONCEBYTES 16
3#define CRYPTO_BEFORENMBYTES 1408
diff --git a/nacl/crypto_stream/aes128ctr/core2/beforenm.s b/nacl/crypto_stream/aes128ctr/core2/beforenm.s
new file mode 100644
index 00000000..689ad8c3
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/core2/beforenm.s
@@ -0,0 +1,13694 @@
1# Author: Emilia Käsper and Peter Schwabe
2# Date: 2009-03-19
3# +2010.01.31: minor namespace modifications
4# Public domain
5
6.data
7.p2align 6
8
9RCON: .int 0x00000000, 0x00000000, 0x00000000, 0xffffffff
10ROTB: .int 0x0c000000, 0x00000000, 0x04000000, 0x08000000
11EXPB0: .int 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f
12CTRINC1: .int 0x00000001, 0x00000000, 0x00000000, 0x00000000
13CTRINC2: .int 0x00000002, 0x00000000, 0x00000000, 0x00000000
14CTRINC3: .int 0x00000003, 0x00000000, 0x00000000, 0x00000000
15CTRINC4: .int 0x00000004, 0x00000000, 0x00000000, 0x00000000
16CTRINC5: .int 0x00000005, 0x00000000, 0x00000000, 0x00000000
17CTRINC6: .int 0x00000006, 0x00000000, 0x00000000, 0x00000000
18CTRINC7: .int 0x00000007, 0x00000000, 0x00000000, 0x00000000
19RCTRINC1: .int 0x00000000, 0x00000000, 0x00000000, 0x00000001
20RCTRINC2: .int 0x00000000, 0x00000000, 0x00000000, 0x00000002
21RCTRINC3: .int 0x00000000, 0x00000000, 0x00000000, 0x00000003
22RCTRINC4: .int 0x00000000, 0x00000000, 0x00000000, 0x00000004
23RCTRINC5: .int 0x00000000, 0x00000000, 0x00000000, 0x00000005
24RCTRINC6: .int 0x00000000, 0x00000000, 0x00000000, 0x00000006
25RCTRINC7: .int 0x00000000, 0x00000000, 0x00000000, 0x00000007
26
27SWAP32: .int 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
28M0SWAP: .quad 0x0105090d0004080c , 0x03070b0f02060a0e
29
30BS0: .quad 0x5555555555555555, 0x5555555555555555
31BS1: .quad 0x3333333333333333, 0x3333333333333333
32BS2: .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
33ONE: .quad 0xffffffffffffffff, 0xffffffffffffffff
34M0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d
35SRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d
36SR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
37
38# qhasm: int64 arg1
39
40# qhasm: int64 arg2
41
42# qhasm: input arg1
43
44# qhasm: input arg2
45
46# qhasm: int64 r11_caller
47
48# qhasm: int64 r12_caller
49
50# qhasm: int64 r13_caller
51
52# qhasm: int64 r14_caller
53
54# qhasm: int64 r15_caller
55
56# qhasm: int64 rbx_caller
57
58# qhasm: int64 rbp_caller
59
60# qhasm: caller r11_caller
61
62# qhasm: caller r12_caller
63
64# qhasm: caller r13_caller
65
66# qhasm: caller r14_caller
67
68# qhasm: caller r15_caller
69
70# qhasm: caller rbx_caller
71
72# qhasm: caller rbp_caller
73
74# qhasm: int64 sboxp
75
76# qhasm: int64 c
77
78# qhasm: int64 k
79
80# qhasm: int64 x0
81
82# qhasm: int64 x1
83
84# qhasm: int64 x2
85
86# qhasm: int64 x3
87
88# qhasm: int64 e
89
90# qhasm: int64 q0
91
92# qhasm: int64 q1
93
94# qhasm: int64 q2
95
96# qhasm: int64 q3
97
98# qhasm: int6464 xmm0
99
100# qhasm: int6464 xmm1
101
102# qhasm: int6464 xmm2
103
104# qhasm: int6464 xmm3
105
106# qhasm: int6464 xmm4
107
108# qhasm: int6464 xmm5
109
110# qhasm: int6464 xmm6
111
112# qhasm: int6464 xmm7
113
114# qhasm: int6464 xmm8
115
116# qhasm: int6464 xmm9
117
118# qhasm: int6464 xmm10
119
120# qhasm: int6464 xmm11
121
122# qhasm: int6464 xmm12
123
124# qhasm: int6464 xmm13
125
126# qhasm: int6464 xmm14
127
128# qhasm: int6464 xmm15
129
130# qhasm: int6464 t
131
132# qhasm: enter crypto_stream_aes128ctr_core2_beforenm
133.text
134.p2align 5
135.globl _crypto_stream_aes128ctr_core2_beforenm
136.globl crypto_stream_aes128ctr_core2_beforenm
137_crypto_stream_aes128ctr_core2_beforenm:
138crypto_stream_aes128ctr_core2_beforenm:
139mov %rsp,%r11
140and $31,%r11
141add $0,%r11
142sub %r11,%rsp
143
144# qhasm: c = arg1
145# asm 1: mov <arg1=int64#1,>c=int64#1
146# asm 2: mov <arg1=%rdi,>c=%rdi
147mov %rdi,%rdi
148
149# qhasm: k = arg2
150# asm 1: mov <arg2=int64#2,>k=int64#2
151# asm 2: mov <arg2=%rsi,>k=%rsi
152mov %rsi,%rsi
153
154# qhasm: xmm0 = *(int128 *) (k + 0)
155# asm 1: movdqa 0(<k=int64#2),>xmm0=int6464#1
156# asm 2: movdqa 0(<k=%rsi),>xmm0=%xmm0
157movdqa 0(%rsi),%xmm0
158
159# qhasm: shuffle bytes of xmm0 by M0
160# asm 1: pshufb M0,<xmm0=int6464#1
161# asm 2: pshufb M0,<xmm0=%xmm0
162pshufb M0,%xmm0
163
164# qhasm: xmm1 = xmm0
165# asm 1: movdqa <xmm0=int6464#1,>xmm1=int6464#2
166# asm 2: movdqa <xmm0=%xmm0,>xmm1=%xmm1
167movdqa %xmm0,%xmm1
168
169# qhasm: xmm2 = xmm0
170# asm 1: movdqa <xmm0=int6464#1,>xmm2=int6464#3
171# asm 2: movdqa <xmm0=%xmm0,>xmm2=%xmm2
172movdqa %xmm0,%xmm2
173
174# qhasm: xmm3 = xmm0
175# asm 1: movdqa <xmm0=int6464#1,>xmm3=int6464#4
176# asm 2: movdqa <xmm0=%xmm0,>xmm3=%xmm3
177movdqa %xmm0,%xmm3
178
179# qhasm: xmm4 = xmm0
180# asm 1: movdqa <xmm0=int6464#1,>xmm4=int6464#5
181# asm 2: movdqa <xmm0=%xmm0,>xmm4=%xmm4
182movdqa %xmm0,%xmm4
183
184# qhasm: xmm5 = xmm0
185# asm 1: movdqa <xmm0=int6464#1,>xmm5=int6464#6
186# asm 2: movdqa <xmm0=%xmm0,>xmm5=%xmm5
187movdqa %xmm0,%xmm5
188
189# qhasm: xmm6 = xmm0
190# asm 1: movdqa <xmm0=int6464#1,>xmm6=int6464#7
191# asm 2: movdqa <xmm0=%xmm0,>xmm6=%xmm6
192movdqa %xmm0,%xmm6
193
194# qhasm: xmm7 = xmm0
195# asm 1: movdqa <xmm0=int6464#1,>xmm7=int6464#8
196# asm 2: movdqa <xmm0=%xmm0,>xmm7=%xmm7
197movdqa %xmm0,%xmm7
198
199# qhasm: t = xmm6
200# asm 1: movdqa <xmm6=int6464#7,>t=int6464#9
201# asm 2: movdqa <xmm6=%xmm6,>t=%xmm8
202movdqa %xmm6,%xmm8
203
204# qhasm: uint6464 t >>= 1
205# asm 1: psrlq $1,<t=int6464#9
206# asm 2: psrlq $1,<t=%xmm8
207psrlq $1,%xmm8
208
209# qhasm: t ^= xmm7
210# asm 1: pxor <xmm7=int6464#8,<t=int6464#9
211# asm 2: pxor <xmm7=%xmm7,<t=%xmm8
212pxor %xmm7,%xmm8
213
214# qhasm: t &= BS0
215# asm 1: pand BS0,<t=int6464#9
216# asm 2: pand BS0,<t=%xmm8
217pand BS0,%xmm8
218
219# qhasm: xmm7 ^= t
220# asm 1: pxor <t=int6464#9,<xmm7=int6464#8
221# asm 2: pxor <t=%xmm8,<xmm7=%xmm7
222pxor %xmm8,%xmm7
223
224# qhasm: uint6464 t <<= 1
225# asm 1: psllq $1,<t=int6464#9
226# asm 2: psllq $1,<t=%xmm8
227psllq $1,%xmm8
228
229# qhasm: xmm6 ^= t
230# asm 1: pxor <t=int6464#9,<xmm6=int6464#7
231# asm 2: pxor <t=%xmm8,<xmm6=%xmm6
232pxor %xmm8,%xmm6
233
234# qhasm: t = xmm4
235# asm 1: movdqa <xmm4=int6464#5,>t=int6464#9
236# asm 2: movdqa <xmm4=%xmm4,>t=%xmm8
237movdqa %xmm4,%xmm8
238
239# qhasm: uint6464 t >>= 1
240# asm 1: psrlq $1,<t=int6464#9
241# asm 2: psrlq $1,<t=%xmm8
242psrlq $1,%xmm8
243
244# qhasm: t ^= xmm5
245# asm 1: pxor <xmm5=int6464#6,<t=int6464#9
246# asm 2: pxor <xmm5=%xmm5,<t=%xmm8
247pxor %xmm5,%xmm8
248
249# qhasm: t &= BS0
250# asm 1: pand BS0,<t=int6464#9
251# asm 2: pand BS0,<t=%xmm8
252pand BS0,%xmm8
253
254# qhasm: xmm5 ^= t
255# asm 1: pxor <t=int6464#9,<xmm5=int6464#6
256# asm 2: pxor <t=%xmm8,<xmm5=%xmm5
257pxor %xmm8,%xmm5
258
259# qhasm: uint6464 t <<= 1
260# asm 1: psllq $1,<t=int6464#9
261# asm 2: psllq $1,<t=%xmm8
262psllq $1,%xmm8
263
264# qhasm: xmm4 ^= t
265# asm 1: pxor <t=int6464#9,<xmm4=int6464#5
266# asm 2: pxor <t=%xmm8,<xmm4=%xmm4
267pxor %xmm8,%xmm4
268
269# qhasm: t = xmm2
270# asm 1: movdqa <xmm2=int6464#3,>t=int6464#9
271# asm 2: movdqa <xmm2=%xmm2,>t=%xmm8
272movdqa %xmm2,%xmm8
273
274# qhasm: uint6464 t >>= 1
275# asm 1: psrlq $1,<t=int6464#9
276# asm 2: psrlq $1,<t=%xmm8
277psrlq $1,%xmm8
278
279# qhasm: t ^= xmm3
280# asm 1: pxor <xmm3=int6464#4,<t=int6464#9
281# asm 2: pxor <xmm3=%xmm3,<t=%xmm8
282pxor %xmm3,%xmm8
283
284# qhasm: t &= BS0
285# asm 1: pand BS0,<t=int6464#9
286# asm 2: pand BS0,<t=%xmm8
287pand BS0,%xmm8
288
289# qhasm: xmm3 ^= t
290# asm 1: pxor <t=int6464#9,<xmm3=int6464#4
291# asm 2: pxor <t=%xmm8,<xmm3=%xmm3
292pxor %xmm8,%xmm3
293
294# qhasm: uint6464 t <<= 1
295# asm 1: psllq $1,<t=int6464#9
296# asm 2: psllq $1,<t=%xmm8
297psllq $1,%xmm8
298
299# qhasm: xmm2 ^= t
300# asm 1: pxor <t=int6464#9,<xmm2=int6464#3
301# asm 2: pxor <t=%xmm8,<xmm2=%xmm2
302pxor %xmm8,%xmm2
303
304# qhasm: t = xmm0
305# asm 1: movdqa <xmm0=int6464#1,>t=int6464#9
306# asm 2: movdqa <xmm0=%xmm0,>t=%xmm8
307movdqa %xmm0,%xmm8
308
309# qhasm: uint6464 t >>= 1
310# asm 1: psrlq $1,<t=int6464#9
311# asm 2: psrlq $1,<t=%xmm8
312psrlq $1,%xmm8
313
314# qhasm: t ^= xmm1
315# asm 1: pxor <xmm1=int6464#2,<t=int6464#9
316# asm 2: pxor <xmm1=%xmm1,<t=%xmm8
317pxor %xmm1,%xmm8
318
319# qhasm: t &= BS0
320# asm 1: pand BS0,<t=int6464#9
321# asm 2: pand BS0,<t=%xmm8
322pand BS0,%xmm8
323
324# qhasm: xmm1 ^= t
325# asm 1: pxor <t=int6464#9,<xmm1=int6464#2
326# asm 2: pxor <t=%xmm8,<xmm1=%xmm1
327pxor %xmm8,%xmm1
328
329# qhasm: uint6464 t <<= 1
330# asm 1: psllq $1,<t=int6464#9
331# asm 2: psllq $1,<t=%xmm8
332psllq $1,%xmm8
333
334# qhasm: xmm0 ^= t
335# asm 1: pxor <t=int6464#9,<xmm0=int6464#1
336# asm 2: pxor <t=%xmm8,<xmm0=%xmm0
337pxor %xmm8,%xmm0
338
339# qhasm: t = xmm5
340# asm 1: movdqa <xmm5=int6464#6,>t=int6464#9
341# asm 2: movdqa <xmm5=%xmm5,>t=%xmm8
342movdqa %xmm5,%xmm8
343
344# qhasm: uint6464 t >>= 2
345# asm 1: psrlq $2,<t=int6464#9
346# asm 2: psrlq $2,<t=%xmm8
347psrlq $2,%xmm8
348
349# qhasm: t ^= xmm7
350# asm 1: pxor <xmm7=int6464#8,<t=int6464#9
351# asm 2: pxor <xmm7=%xmm7,<t=%xmm8
352pxor %xmm7,%xmm8
353
354# qhasm: t &= BS1
355# asm 1: pand BS1,<t=int6464#9
356# asm 2: pand BS1,<t=%xmm8
357pand BS1,%xmm8
358
359# qhasm: xmm7 ^= t
360# asm 1: pxor <t=int6464#9,<xmm7=int6464#8
361# asm 2: pxor <t=%xmm8,<xmm7=%xmm7
362pxor %xmm8,%xmm7
363
364# qhasm: uint6464 t <<= 2
365# asm 1: psllq $2,<t=int6464#9
366# asm 2: psllq $2,<t=%xmm8
367psllq $2,%xmm8
368
369# qhasm: xmm5 ^= t
370# asm 1: pxor <t=int6464#9,<xmm5=int6464#6
371# asm 2: pxor <t=%xmm8,<xmm5=%xmm5
372pxor %xmm8,%xmm5
373
374# qhasm: t = xmm4
375# asm 1: movdqa <xmm4=int6464#5,>t=int6464#9
376# asm 2: movdqa <xmm4=%xmm4,>t=%xmm8
377movdqa %xmm4,%xmm8
378
379# qhasm: uint6464 t >>= 2
380# asm 1: psrlq $2,<t=int6464#9
381# asm 2: psrlq $2,<t=%xmm8
382psrlq $2,%xmm8
383
384# qhasm: t ^= xmm6
385# asm 1: pxor <xmm6=int6464#7,<t=int6464#9
386# asm 2: pxor <xmm6=%xmm6,<t=%xmm8
387pxor %xmm6,%xmm8
388
389# qhasm: t &= BS1
390# asm 1: pand BS1,<t=int6464#9
391# asm 2: pand BS1,<t=%xmm8
392pand BS1,%xmm8
393
394# qhasm: xmm6 ^= t
395# asm 1: pxor <t=int6464#9,<xmm6=int6464#7
396# asm 2: pxor <t=%xmm8,<xmm6=%xmm6
397pxor %xmm8,%xmm6
398
399# qhasm: uint6464 t <<= 2
400# asm 1: psllq $2,<t=int6464#9
401# asm 2: psllq $2,<t=%xmm8
402psllq $2,%xmm8
403
404# qhasm: xmm4 ^= t
405# asm 1: pxor <t=int6464#9,<xmm4=int6464#5
406# asm 2: pxor <t=%xmm8,<xmm4=%xmm4
407pxor %xmm8,%xmm4
408
409# qhasm: t = xmm1
410# asm 1: movdqa <xmm1=int6464#2,>t=int6464#9
411# asm 2: movdqa <xmm1=%xmm1,>t=%xmm8
412movdqa %xmm1,%xmm8
413
414# qhasm: uint6464 t >>= 2
415# asm 1: psrlq $2,<t=int6464#9
416# asm 2: psrlq $2,<t=%xmm8
417psrlq $2,%xmm8
418
419# qhasm: t ^= xmm3
420# asm 1: pxor <xmm3=int6464#4,<t=int6464#9
421# asm 2: pxor <xmm3=%xmm3,<t=%xmm8
422pxor %xmm3,%xmm8
423
424# qhasm: t &= BS1
425# asm 1: pand BS1,<t=int6464#9
426# asm 2: pand BS1,<t=%xmm8
427pand BS1,%xmm8
428
429# qhasm: xmm3 ^= t
430# asm 1: pxor <t=int6464#9,<xmm3=int6464#4
431# asm 2: pxor <t=%xmm8,<xmm3=%xmm3
432pxor %xmm8,%xmm3
433
434# qhasm: uint6464 t <<= 2
435# asm 1: psllq $2,<t=int6464#9
436# asm 2: psllq $2,<t=%xmm8
437psllq $2,%xmm8
438
439# qhasm: xmm1 ^= t
440# asm 1: pxor <t=int6464#9,<xmm1=int6464#2
441# asm 2: pxor <t=%xmm8,<xmm1=%xmm1
442pxor %xmm8,%xmm1
443
444# qhasm: t = xmm0
445# asm 1: movdqa <xmm0=int6464#1,>t=int6464#9
446# asm 2: movdqa <xmm0=%xmm0,>t=%xmm8
447movdqa %xmm0,%xmm8
448
449# qhasm: uint6464 t >>= 2
450# asm 1: psrlq $2,<t=int6464#9
451# asm 2: psrlq $2,<t=%xmm8
452psrlq $2,%xmm8
453
454# qhasm: t ^= xmm2
455# asm 1: pxor <xmm2=int6464#3,<t=int6464#9
456# asm 2: pxor <xmm2=%xmm2,<t=%xmm8
457pxor %xmm2,%xmm8
458
459# qhasm: t &= BS1
460# asm 1: pand BS1,<t=int6464#9
461# asm 2: pand BS1,<t=%xmm8
462pand BS1,%xmm8
463
464# qhasm: xmm2 ^= t
465# asm 1: pxor <t=int6464#9,<xmm2=int6464#3
466# asm 2: pxor <t=%xmm8,<xmm2=%xmm2
467pxor %xmm8,%xmm2
468
469# qhasm: uint6464 t <<= 2
470# asm 1: psllq $2,<t=int6464#9
471# asm 2: psllq $2,<t=%xmm8
472psllq $2,%xmm8
473
474# qhasm: xmm0 ^= t
475# asm 1: pxor <t=int6464#9,<xmm0=int6464#1
476# asm 2: pxor <t=%xmm8,<xmm0=%xmm0
477pxor %xmm8,%xmm0
478
479# qhasm: t = xmm3
480# asm 1: movdqa <xmm3=int6464#4,>t=int6464#9
481# asm 2: movdqa <xmm3=%xmm3,>t=%xmm8
482movdqa %xmm3,%xmm8
483
484# qhasm: uint6464 t >>= 4
485# asm 1: psrlq $4,<t=int6464#9
486# asm 2: psrlq $4,<t=%xmm8
487psrlq $4,%xmm8
488
489# qhasm: t ^= xmm7
490# asm 1: pxor <xmm7=int6464#8,<t=int6464#9
491# asm 2: pxor <xmm7=%xmm7,<t=%xmm8
492pxor %xmm7,%xmm8
493
494# qhasm: t &= BS2
495# asm 1: pand BS2,<t=int6464#9
496# asm 2: pand BS2,<t=%xmm8
497pand BS2,%xmm8
498
499# qhasm: xmm7 ^= t
500# asm 1: pxor <t=int6464#9,<xmm7=int6464#8
501# asm 2: pxor <t=%xmm8,<xmm7=%xmm7
502pxor %xmm8,%xmm7
503
504# qhasm: uint6464 t <<= 4
505# asm 1: psllq $4,<t=int6464#9
506# asm 2: psllq $4,<t=%xmm8
507psllq $4,%xmm8
508
509# qhasm: xmm3 ^= t
510# asm 1: pxor <t=int6464#9,<xmm3=int6464#4
511# asm 2: pxor <t=%xmm8,<xmm3=%xmm3
512pxor %xmm8,%xmm3
513
514# qhasm: t = xmm2
515# asm 1: movdqa <xmm2=int6464#3,>t=int6464#9
516# asm 2: movdqa <xmm2=%xmm2,>t=%xmm8
517movdqa %xmm2,%xmm8
518
519# qhasm: uint6464 t >>= 4
520# asm 1: psrlq $4,<t=int6464#9
521# asm 2: psrlq $4,<t=%xmm8
522psrlq $4,%xmm8
523
524# qhasm: t ^= xmm6
525# asm 1: pxor <xmm6=int6464#7,<t=int6464#9
526# asm 2: pxor <xmm6=%xmm6,<t=%xmm8
527pxor %xmm6,%xmm8
528
529# qhasm: t &= BS2
530# asm 1: pand BS2,<t=int6464#9
531# asm 2: pand BS2,<t=%xmm8
532pand BS2,%xmm8
533
534# qhasm: xmm6 ^= t
535# asm 1: pxor <t=int6464#9,<xmm6=int6464#7
536# asm 2: pxor <t=%xmm8,<xmm6=%xmm6
537pxor %xmm8,%xmm6
538
539# qhasm: uint6464 t <<= 4
540# asm 1: psllq $4,<t=int6464#9
541# asm 2: psllq $4,<t=%xmm8
542psllq $4,%xmm8
543
544# qhasm: xmm2 ^= t
545# asm 1: pxor <t=int6464#9,<xmm2=int6464#3
546# asm 2: pxor <t=%xmm8,<xmm2=%xmm2
547pxor %xmm8,%xmm2
548
549# qhasm: t = xmm1
550# asm 1: movdqa <xmm1=int6464#2,>t=int6464#9
551# asm 2: movdqa <xmm1=%xmm1,>t=%xmm8
552movdqa %xmm1,%xmm8
553
554# qhasm: uint6464 t >>= 4
555# asm 1: psrlq $4,<t=int6464#9
556# asm 2: psrlq $4,<t=%xmm8
557psrlq $4,%xmm8
558
559# qhasm: t ^= xmm5
560# asm 1: pxor <xmm5=int6464#6,<t=int6464#9
561# asm 2: pxor <xmm5=%xmm5,<t=%xmm8
562pxor %xmm5,%xmm8
563
564# qhasm: t &= BS2
565# asm 1: pand BS2,<t=int6464#9
566# asm 2: pand BS2,<t=%xmm8
567pand BS2,%xmm8
568
569# qhasm: xmm5 ^= t
570# asm 1: pxor <t=int6464#9,<xmm5=int6464#6
571# asm 2: pxor <t=%xmm8,<xmm5=%xmm5
572pxor %xmm8,%xmm5
573
574# qhasm: uint6464 t <<= 4
575# asm 1: psllq $4,<t=int6464#9
576# asm 2: psllq $4,<t=%xmm8
577psllq $4,%xmm8
578
579# qhasm: xmm1 ^= t
580# asm 1: pxor <t=int6464#9,<xmm1=int6464#2
581# asm 2: pxor <t=%xmm8,<xmm1=%xmm1
582pxor %xmm8,%xmm1
583
584# qhasm: t = xmm0
585# asm 1: movdqa <xmm0=int6464#1,>t=int6464#9
586# asm 2: movdqa <xmm0=%xmm0,>t=%xmm8
587movdqa %xmm0,%xmm8
588
589# qhasm: uint6464 t >>= 4
590# asm 1: psrlq $4,<t=int6464#9
591# asm 2: psrlq $4,<t=%xmm8
592psrlq $4,%xmm8
593
594# qhasm: t ^= xmm4
595# asm 1: pxor <xmm4=int6464#5,<t=int6464#9
596# asm 2: pxor <xmm4=%xmm4,<t=%xmm8
597pxor %xmm4,%xmm8
598
599# qhasm: t &= BS2
600# asm 1: pand BS2,<t=int6464#9
601# asm 2: pand BS2,<t=%xmm8
602pand BS2,%xmm8
603
604# qhasm: xmm4 ^= t
605# asm 1: pxor <t=int6464#9,<xmm4=int6464#5
606# asm 2: pxor <t=%xmm8,<xmm4=%xmm4
607pxor %xmm8,%xmm4
608
609# qhasm: uint6464 t <<= 4
610# asm 1: psllq $4,<t=int6464#9
611# asm 2: psllq $4,<t=%xmm8
612psllq $4,%xmm8
613
614# qhasm: xmm0 ^= t
615# asm 1: pxor <t=int6464#9,<xmm0=int6464#1
616# asm 2: pxor <t=%xmm8,<xmm0=%xmm0
617pxor %xmm8,%xmm0
618
619# qhasm: *(int128 *) (c + 0) = xmm0
620# asm 1: movdqa <xmm0=int6464#1,0(<c=int64#1)
621# asm 2: movdqa <xmm0=%xmm0,0(<c=%rdi)
622movdqa %xmm0,0(%rdi)
623
624# qhasm: *(int128 *) (c + 16) = xmm1
625# asm 1: movdqa <xmm1=int6464#2,16(<c=int64#1)
626# asm 2: movdqa <xmm1=%xmm1,16(<c=%rdi)
627movdqa %xmm1,16(%rdi)
628
629# qhasm: *(int128 *) (c + 32) = xmm2
630# asm 1: movdqa <xmm2=int6464#3,32(<c=int64#1)
631# asm 2: movdqa <xmm2=%xmm2,32(<c=%rdi)
632movdqa %xmm2,32(%rdi)
633
634# qhasm: *(int128 *) (c + 48) = xmm3
635# asm 1: movdqa <xmm3=int6464#4,48(<c=int64#1)
636# asm 2: movdqa <xmm3=%xmm3,48(<c=%rdi)
637movdqa %xmm3,48(%rdi)
638
639# qhasm: *(int128 *) (c + 64) = xmm4
640# asm 1: movdqa <xmm4=int6464#5,64(<c=int64#1)
641# asm 2: movdqa <xmm4=%xmm4,64(<c=%rdi)
642movdqa %xmm4,64(%rdi)
643
644# qhasm: *(int128 *) (c + 80) = xmm5
645# asm 1: movdqa <xmm5=int6464#6,80(<c=int64#1)
646# asm 2: movdqa <xmm5=%xmm5,80(<c=%rdi)
647movdqa %xmm5,80(%rdi)
648
649# qhasm: *(int128 *) (c + 96) = xmm6
650# asm 1: movdqa <xmm6=int6464#7,96(<c=int64#1)
651# asm 2: movdqa <xmm6=%xmm6,96(<c=%rdi)
652movdqa %xmm6,96(%rdi)
653
654# qhasm: *(int128 *) (c + 112) = xmm7
655# asm 1: movdqa <xmm7=int6464#8,112(<c=int64#1)
656# asm 2: movdqa <xmm7=%xmm7,112(<c=%rdi)
657movdqa %xmm7,112(%rdi)
658
659# qhasm: shuffle bytes of xmm0 by ROTB
660# asm 1: pshufb ROTB,<xmm0=int6464#1
661# asm 2: pshufb ROTB,<xmm0=%xmm0
662pshufb ROTB,%xmm0
663
664# qhasm: shuffle bytes of xmm1 by ROTB
665# asm 1: pshufb ROTB,<xmm1=int6464#2
666# asm 2: pshufb ROTB,<xmm1=%xmm1
667pshufb ROTB,%xmm1
668
669# qhasm: shuffle bytes of xmm2 by ROTB
670# asm 1: pshufb ROTB,<xmm2=int6464#3
671# asm 2: pshufb ROTB,<xmm2=%xmm2
672pshufb ROTB,%xmm2
673
674# qhasm: shuffle bytes of xmm3 by ROTB
675# asm 1: pshufb ROTB,<xmm3=int6464#4
676# asm 2: pshufb ROTB,<xmm3=%xmm3
677pshufb ROTB,%xmm3
678
679# qhasm: shuffle bytes of xmm4 by ROTB
680# asm 1: pshufb ROTB,<xmm4=int6464#5
681# asm 2: pshufb ROTB,<xmm4=%xmm4
682pshufb ROTB,%xmm4
683
684# qhasm: shuffle bytes of xmm5 by ROTB
685# asm 1: pshufb ROTB,<xmm5=int6464#6
686# asm 2: pshufb ROTB,<xmm5=%xmm5
687pshufb ROTB,%xmm5
688
689# qhasm: shuffle bytes of xmm6 by ROTB
690# asm 1: pshufb ROTB,<xmm6=int6464#7
691# asm 2: pshufb ROTB,<xmm6=%xmm6
692pshufb ROTB,%xmm6
693
694# qhasm: shuffle bytes of xmm7 by ROTB
695# asm 1: pshufb ROTB,<xmm7=int6464#8
696# asm 2: pshufb ROTB,<xmm7=%xmm7
697pshufb ROTB,%xmm7
698
699# qhasm: xmm5 ^= xmm6
700# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
701# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
702pxor %xmm6,%xmm5
703
704# qhasm: xmm2 ^= xmm1
705# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
706# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
707pxor %xmm1,%xmm2
708
709# qhasm: xmm5 ^= xmm0
710# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
711# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
712pxor %xmm0,%xmm5
713
714# qhasm: xmm6 ^= xmm2
715# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
716# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
717pxor %xmm2,%xmm6
718
719# qhasm: xmm3 ^= xmm0
720# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
721# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
722pxor %xmm0,%xmm3
723
724# qhasm: xmm6 ^= xmm3
725# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
726# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
727pxor %xmm3,%xmm6
728
729# qhasm: xmm3 ^= xmm7
730# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
731# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
732pxor %xmm7,%xmm3
733
734# qhasm: xmm3 ^= xmm4
735# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
736# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
737pxor %xmm4,%xmm3
738
739# qhasm: xmm7 ^= xmm5
740# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
741# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
742pxor %xmm5,%xmm7
743
744# qhasm: xmm3 ^= xmm1
745# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
746# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
747pxor %xmm1,%xmm3
748
749# qhasm: xmm4 ^= xmm5
750# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
751# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
752pxor %xmm5,%xmm4
753
754# qhasm: xmm2 ^= xmm7
755# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
756# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
757pxor %xmm7,%xmm2
758
759# qhasm: xmm1 ^= xmm5
760# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
761# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
762pxor %xmm5,%xmm1
763
764# qhasm: xmm11 = xmm7
765# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
766# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
767movdqa %xmm7,%xmm8
768
769# qhasm: xmm10 = xmm1
770# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
771# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
772movdqa %xmm1,%xmm9
773
774# qhasm: xmm9 = xmm5
775# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
776# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
777movdqa %xmm5,%xmm10
778
779# qhasm: xmm13 = xmm2
780# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
781# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
782movdqa %xmm2,%xmm11
783
784# qhasm: xmm12 = xmm6
785# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
786# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
787movdqa %xmm6,%xmm12
788
789# qhasm: xmm11 ^= xmm4
790# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
791# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
792pxor %xmm4,%xmm8
793
794# qhasm: xmm10 ^= xmm2
795# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
796# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
797pxor %xmm2,%xmm9
798
799# qhasm: xmm9 ^= xmm3
800# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
801# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
802pxor %xmm3,%xmm10
803
804# qhasm: xmm13 ^= xmm4
805# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
806# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
807pxor %xmm4,%xmm11
808
809# qhasm: xmm12 ^= xmm0
810# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
811# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
812pxor %xmm0,%xmm12
813
814# qhasm: xmm14 = xmm11
815# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
816# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
817movdqa %xmm8,%xmm13
818
819# qhasm: xmm8 = xmm10
820# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
821# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
822movdqa %xmm9,%xmm14
823
824# qhasm: xmm15 = xmm11
825# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
826# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
827movdqa %xmm8,%xmm15
828
829# qhasm: xmm10 |= xmm9
830# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
831# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
832por %xmm10,%xmm9
833
834# qhasm: xmm11 |= xmm12
835# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
836# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
837por %xmm12,%xmm8
838
839# qhasm: xmm15 ^= xmm8
840# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
841# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
842pxor %xmm14,%xmm15
843
844# qhasm: xmm14 &= xmm12
845# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
846# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
847pand %xmm12,%xmm13
848
849# qhasm: xmm8 &= xmm9
850# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
851# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
852pand %xmm10,%xmm14
853
854# qhasm: xmm12 ^= xmm9
855# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
856# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
857pxor %xmm10,%xmm12
858
859# qhasm: xmm15 &= xmm12
860# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
861# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
862pand %xmm12,%xmm15
863
864# qhasm: xmm12 = xmm3
865# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
866# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
867movdqa %xmm3,%xmm10
868
869# qhasm: xmm12 ^= xmm0
870# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
871# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
872pxor %xmm0,%xmm10
873
874# qhasm: xmm13 &= xmm12
875# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
876# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
877pand %xmm10,%xmm11
878
879# qhasm: xmm11 ^= xmm13
880# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
881# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
882pxor %xmm11,%xmm8
883
884# qhasm: xmm10 ^= xmm13
885# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
886# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
887pxor %xmm11,%xmm9
888
889# qhasm: xmm13 = xmm7
890# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
891# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
892movdqa %xmm7,%xmm10
893
894# qhasm: xmm13 ^= xmm1
895# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
896# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
897pxor %xmm1,%xmm10
898
899# qhasm: xmm12 = xmm5
900# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
901# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
902movdqa %xmm5,%xmm11
903
904# qhasm: xmm9 = xmm13
905# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
906# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
907movdqa %xmm10,%xmm12
908
909# qhasm: xmm12 ^= xmm6
910# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
911# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
912pxor %xmm6,%xmm11
913
914# qhasm: xmm9 |= xmm12
915# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
916# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
917por %xmm11,%xmm12
918
919# qhasm: xmm13 &= xmm12
920# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
921# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
922pand %xmm11,%xmm10
923
924# qhasm: xmm8 ^= xmm13
925# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
926# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
927pxor %xmm10,%xmm14
928
929# qhasm: xmm11 ^= xmm15
930# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
931# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
932pxor %xmm15,%xmm8
933
934# qhasm: xmm10 ^= xmm14
935# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
936# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
937pxor %xmm13,%xmm9
938
939# qhasm: xmm9 ^= xmm15
940# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
941# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
942pxor %xmm15,%xmm12
943
944# qhasm: xmm8 ^= xmm14
945# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
946# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
947pxor %xmm13,%xmm14
948
949# qhasm: xmm9 ^= xmm14
950# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
951# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
952pxor %xmm13,%xmm12
953
954# qhasm: xmm12 = xmm2
955# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
956# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
957movdqa %xmm2,%xmm10
958
959# qhasm: xmm13 = xmm4
960# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
961# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
962movdqa %xmm4,%xmm11
963
964# qhasm: xmm14 = xmm1
965# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
966# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
967movdqa %xmm1,%xmm13
968
969# qhasm: xmm15 = xmm7
970# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
971# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
972movdqa %xmm7,%xmm15
973
974# qhasm: xmm12 &= xmm3
975# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
976# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
977pand %xmm3,%xmm10
978
979# qhasm: xmm13 &= xmm0
980# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
981# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
982pand %xmm0,%xmm11
983
984# qhasm: xmm14 &= xmm5
985# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
986# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
987pand %xmm5,%xmm13
988
989# qhasm: xmm15 |= xmm6
990# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
991# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
992por %xmm6,%xmm15
993
994# qhasm: xmm11 ^= xmm12
995# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
996# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
997pxor %xmm10,%xmm8
998
999# qhasm: xmm10 ^= xmm13
1000# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
1001# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
1002pxor %xmm11,%xmm9
1003
1004# qhasm: xmm9 ^= xmm14
1005# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
1006# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
1007pxor %xmm13,%xmm12
1008
1009# qhasm: xmm8 ^= xmm15
1010# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
1011# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
1012pxor %xmm15,%xmm14
1013
1014# qhasm: xmm12 = xmm11
1015# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
1016# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
1017movdqa %xmm8,%xmm10
1018
1019# qhasm: xmm12 ^= xmm10
1020# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
1021# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
1022pxor %xmm9,%xmm10
1023
1024# qhasm: xmm11 &= xmm9
1025# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
1026# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
1027pand %xmm12,%xmm8
1028
1029# qhasm: xmm14 = xmm8
1030# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
1031# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
1032movdqa %xmm14,%xmm11
1033
1034# qhasm: xmm14 ^= xmm11
1035# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
1036# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
1037pxor %xmm8,%xmm11
1038
1039# qhasm: xmm15 = xmm12
1040# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
1041# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
1042movdqa %xmm10,%xmm13
1043
1044# qhasm: xmm15 &= xmm14
1045# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
1046# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
1047pand %xmm11,%xmm13
1048
1049# qhasm: xmm15 ^= xmm10
1050# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
1051# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
1052pxor %xmm9,%xmm13
1053
1054# qhasm: xmm13 = xmm9
1055# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
1056# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
1057movdqa %xmm12,%xmm15
1058
1059# qhasm: xmm13 ^= xmm8
1060# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
1061# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
1062pxor %xmm14,%xmm15
1063
1064# qhasm: xmm11 ^= xmm10
1065# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
1066# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
1067pxor %xmm9,%xmm8
1068
1069# qhasm: xmm13 &= xmm11
1070# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
1071# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
1072pand %xmm8,%xmm15
1073
1074# qhasm: xmm13 ^= xmm8
1075# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
1076# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
1077pxor %xmm14,%xmm15
1078
1079# qhasm: xmm9 ^= xmm13
1080# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
1081# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
1082pxor %xmm15,%xmm12
1083
1084# qhasm: xmm10 = xmm14
1085# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
1086# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
1087movdqa %xmm11,%xmm8
1088
1089# qhasm: xmm10 ^= xmm13
1090# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
1091# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
1092pxor %xmm15,%xmm8
1093
1094# qhasm: xmm10 &= xmm8
1095# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
1096# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
1097pand %xmm14,%xmm8
1098
1099# qhasm: xmm9 ^= xmm10
1100# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
1101# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
1102pxor %xmm8,%xmm12
1103
1104# qhasm: xmm14 ^= xmm10
1105# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
1106# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
1107pxor %xmm8,%xmm11
1108
1109# qhasm: xmm14 &= xmm15
1110# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
1111# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
1112pand %xmm13,%xmm11
1113
1114# qhasm: xmm14 ^= xmm12
1115# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
1116# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
1117pxor %xmm10,%xmm11
1118
1119# qhasm: xmm12 = xmm6
1120# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
1121# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
1122movdqa %xmm6,%xmm8
1123
1124# qhasm: xmm8 = xmm5
1125# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
1126# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
1127movdqa %xmm5,%xmm9
1128
1129# qhasm: xmm10 = xmm15
1130# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
1131# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
1132movdqa %xmm13,%xmm10
1133
1134# qhasm: xmm10 ^= xmm14
1135# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
1136# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
1137pxor %xmm11,%xmm10
1138
1139# qhasm: xmm10 &= xmm6
1140# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
1141# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
1142pand %xmm6,%xmm10
1143
1144# qhasm: xmm6 ^= xmm5
1145# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
1146# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
1147pxor %xmm5,%xmm6
1148
1149# qhasm: xmm6 &= xmm14
1150# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
1151# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
1152pand %xmm11,%xmm6
1153
1154# qhasm: xmm5 &= xmm15
1155# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
1156# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
1157pand %xmm13,%xmm5
1158
1159# qhasm: xmm6 ^= xmm5
1160# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
1161# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
1162pxor %xmm5,%xmm6
1163
1164# qhasm: xmm5 ^= xmm10
1165# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
1166# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
1167pxor %xmm10,%xmm5
1168
1169# qhasm: xmm12 ^= xmm0
1170# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
1171# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
1172pxor %xmm0,%xmm8
1173
1174# qhasm: xmm8 ^= xmm3
1175# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
1176# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
1177pxor %xmm3,%xmm9
1178
1179# qhasm: xmm15 ^= xmm13
1180# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
1181# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
1182pxor %xmm15,%xmm13
1183
1184# qhasm: xmm14 ^= xmm9
1185# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
1186# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
1187pxor %xmm12,%xmm11
1188
1189# qhasm: xmm11 = xmm15
1190# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1191# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1192movdqa %xmm13,%xmm10
1193
1194# qhasm: xmm11 ^= xmm14
1195# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1196# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1197pxor %xmm11,%xmm10
1198
1199# qhasm: xmm11 &= xmm12
1200# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
1201# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
1202pand %xmm8,%xmm10
1203
1204# qhasm: xmm12 ^= xmm8
1205# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
1206# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
1207pxor %xmm9,%xmm8
1208
1209# qhasm: xmm12 &= xmm14
1210# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
1211# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
1212pand %xmm11,%xmm8
1213
1214# qhasm: xmm8 &= xmm15
1215# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
1216# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
1217pand %xmm13,%xmm9
1218
1219# qhasm: xmm8 ^= xmm12
1220# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
1221# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
1222pxor %xmm8,%xmm9
1223
1224# qhasm: xmm12 ^= xmm11
1225# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
1226# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
1227pxor %xmm10,%xmm8
1228
1229# qhasm: xmm10 = xmm13
1230# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
1231# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
1232movdqa %xmm15,%xmm10
1233
1234# qhasm: xmm10 ^= xmm9
1235# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
1236# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
1237pxor %xmm12,%xmm10
1238
1239# qhasm: xmm10 &= xmm0
1240# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
1241# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
1242pand %xmm0,%xmm10
1243
1244# qhasm: xmm0 ^= xmm3
1245# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
1246# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
1247pxor %xmm3,%xmm0
1248
1249# qhasm: xmm0 &= xmm9
1250# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
1251# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
1252pand %xmm12,%xmm0
1253
1254# qhasm: xmm3 &= xmm13
1255# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
1256# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
1257pand %xmm15,%xmm3
1258
1259# qhasm: xmm0 ^= xmm3
1260# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
1261# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
1262pxor %xmm3,%xmm0
1263
1264# qhasm: xmm3 ^= xmm10
1265# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
1266# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
1267pxor %xmm10,%xmm3
1268
1269# qhasm: xmm6 ^= xmm12
1270# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
1271# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
1272pxor %xmm8,%xmm6
1273
1274# qhasm: xmm0 ^= xmm12
1275# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
1276# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
1277pxor %xmm8,%xmm0
1278
1279# qhasm: xmm5 ^= xmm8
1280# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
1281# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
1282pxor %xmm9,%xmm5
1283
1284# qhasm: xmm3 ^= xmm8
1285# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
1286# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
1287pxor %xmm9,%xmm3
1288
1289# qhasm: xmm12 = xmm7
1290# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
1291# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
1292movdqa %xmm7,%xmm8
1293
1294# qhasm: xmm8 = xmm1
1295# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
1296# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
1297movdqa %xmm1,%xmm9
1298
1299# qhasm: xmm12 ^= xmm4
1300# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
1301# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
1302pxor %xmm4,%xmm8
1303
1304# qhasm: xmm8 ^= xmm2
1305# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
1306# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
1307pxor %xmm2,%xmm9
1308
1309# qhasm: xmm11 = xmm15
1310# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1311# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1312movdqa %xmm13,%xmm10
1313
1314# qhasm: xmm11 ^= xmm14
1315# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1316# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1317pxor %xmm11,%xmm10
1318
1319# qhasm: xmm11 &= xmm12
1320# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
1321# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
1322pand %xmm8,%xmm10
1323
1324# qhasm: xmm12 ^= xmm8
1325# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
1326# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
1327pxor %xmm9,%xmm8
1328
1329# qhasm: xmm12 &= xmm14
1330# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
1331# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
1332pand %xmm11,%xmm8
1333
1334# qhasm: xmm8 &= xmm15
1335# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
1336# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
1337pand %xmm13,%xmm9
1338
1339# qhasm: xmm8 ^= xmm12
1340# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
1341# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
1342pxor %xmm8,%xmm9
1343
1344# qhasm: xmm12 ^= xmm11
1345# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
1346# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
1347pxor %xmm10,%xmm8
1348
1349# qhasm: xmm10 = xmm13
1350# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
1351# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
1352movdqa %xmm15,%xmm10
1353
1354# qhasm: xmm10 ^= xmm9
1355# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
1356# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
1357pxor %xmm12,%xmm10
1358
1359# qhasm: xmm10 &= xmm4
1360# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
1361# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
1362pand %xmm4,%xmm10
1363
1364# qhasm: xmm4 ^= xmm2
1365# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
1366# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
1367pxor %xmm2,%xmm4
1368
1369# qhasm: xmm4 &= xmm9
1370# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
1371# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
1372pand %xmm12,%xmm4
1373
1374# qhasm: xmm2 &= xmm13
1375# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
1376# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
1377pand %xmm15,%xmm2
1378
1379# qhasm: xmm4 ^= xmm2
1380# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
1381# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
1382pxor %xmm2,%xmm4
1383
1384# qhasm: xmm2 ^= xmm10
1385# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
1386# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
1387pxor %xmm10,%xmm2
1388
1389# qhasm: xmm15 ^= xmm13
1390# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
1391# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
1392pxor %xmm15,%xmm13
1393
1394# qhasm: xmm14 ^= xmm9
1395# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
1396# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
1397pxor %xmm12,%xmm11
1398
1399# qhasm: xmm11 = xmm15
1400# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1401# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1402movdqa %xmm13,%xmm10
1403
1404# qhasm: xmm11 ^= xmm14
1405# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1406# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1407pxor %xmm11,%xmm10
1408
1409# qhasm: xmm11 &= xmm7
1410# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
1411# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
1412pand %xmm7,%xmm10
1413
1414# qhasm: xmm7 ^= xmm1
1415# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
1416# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
1417pxor %xmm1,%xmm7
1418
1419# qhasm: xmm7 &= xmm14
1420# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
1421# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
1422pand %xmm11,%xmm7
1423
1424# qhasm: xmm1 &= xmm15
1425# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
1426# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
1427pand %xmm13,%xmm1
1428
1429# qhasm: xmm7 ^= xmm1
1430# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
1431# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
1432pxor %xmm1,%xmm7
1433
1434# qhasm: xmm1 ^= xmm11
1435# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
1436# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
1437pxor %xmm10,%xmm1
1438
1439# qhasm: xmm7 ^= xmm12
1440# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
1441# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
1442pxor %xmm8,%xmm7
1443
1444# qhasm: xmm4 ^= xmm12
1445# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
1446# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
1447pxor %xmm8,%xmm4
1448
1449# qhasm: xmm1 ^= xmm8
1450# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
1451# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
1452pxor %xmm9,%xmm1
1453
1454# qhasm: xmm2 ^= xmm8
1455# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
1456# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
1457pxor %xmm9,%xmm2
1458
1459# qhasm: xmm7 ^= xmm0
1460# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
1461# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
1462pxor %xmm0,%xmm7
1463
1464# qhasm: xmm1 ^= xmm6
1465# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
1466# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
1467pxor %xmm6,%xmm1
1468
1469# qhasm: xmm4 ^= xmm7
1470# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
1471# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
1472pxor %xmm7,%xmm4
1473
1474# qhasm: xmm6 ^= xmm0
1475# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
1476# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
1477pxor %xmm0,%xmm6
1478
1479# qhasm: xmm0 ^= xmm1
1480# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
1481# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
1482pxor %xmm1,%xmm0
1483
1484# qhasm: xmm1 ^= xmm5
1485# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
1486# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
1487pxor %xmm5,%xmm1
1488
1489# qhasm: xmm5 ^= xmm2
1490# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
1491# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
1492pxor %xmm2,%xmm5
1493
1494# qhasm: xmm4 ^= xmm5
1495# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
1496# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
1497pxor %xmm5,%xmm4
1498
1499# qhasm: xmm2 ^= xmm3
1500# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
1501# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
1502pxor %xmm3,%xmm2
1503
1504# qhasm: xmm3 ^= xmm5
1505# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
1506# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
1507pxor %xmm5,%xmm3
1508
1509# qhasm: xmm6 ^= xmm3
1510# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
1511# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
1512pxor %xmm3,%xmm6
1513
1514# qhasm: xmm0 ^= RCON
1515# asm 1: pxor RCON,<xmm0=int6464#1
1516# asm 2: pxor RCON,<xmm0=%xmm0
1517pxor RCON,%xmm0
1518
1519# qhasm: shuffle bytes of xmm0 by EXPB0
1520# asm 1: pshufb EXPB0,<xmm0=int6464#1
1521# asm 2: pshufb EXPB0,<xmm0=%xmm0
1522pshufb EXPB0,%xmm0
1523
1524# qhasm: shuffle bytes of xmm1 by EXPB0
1525# asm 1: pshufb EXPB0,<xmm1=int6464#2
1526# asm 2: pshufb EXPB0,<xmm1=%xmm1
1527pshufb EXPB0,%xmm1
1528
1529# qhasm: shuffle bytes of xmm4 by EXPB0
1530# asm 1: pshufb EXPB0,<xmm4=int6464#5
1531# asm 2: pshufb EXPB0,<xmm4=%xmm4
1532pshufb EXPB0,%xmm4
1533
1534# qhasm: shuffle bytes of xmm6 by EXPB0
1535# asm 1: pshufb EXPB0,<xmm6=int6464#7
1536# asm 2: pshufb EXPB0,<xmm6=%xmm6
1537pshufb EXPB0,%xmm6
1538
1539# qhasm: shuffle bytes of xmm3 by EXPB0
1540# asm 1: pshufb EXPB0,<xmm3=int6464#4
1541# asm 2: pshufb EXPB0,<xmm3=%xmm3
1542pshufb EXPB0,%xmm3
1543
1544# qhasm: shuffle bytes of xmm7 by EXPB0
1545# asm 1: pshufb EXPB0,<xmm7=int6464#8
1546# asm 2: pshufb EXPB0,<xmm7=%xmm7
1547pshufb EXPB0,%xmm7
1548
1549# qhasm: shuffle bytes of xmm2 by EXPB0
1550# asm 1: pshufb EXPB0,<xmm2=int6464#3
1551# asm 2: pshufb EXPB0,<xmm2=%xmm2
1552pshufb EXPB0,%xmm2
1553
1554# qhasm: shuffle bytes of xmm5 by EXPB0
1555# asm 1: pshufb EXPB0,<xmm5=int6464#6
1556# asm 2: pshufb EXPB0,<xmm5=%xmm5
1557pshufb EXPB0,%xmm5
1558
1559# qhasm: xmm8 = *(int128 *)(c + 0)
1560# asm 1: movdqa 0(<c=int64#1),>xmm8=int6464#9
1561# asm 2: movdqa 0(<c=%rdi),>xmm8=%xmm8
1562movdqa 0(%rdi),%xmm8
1563
1564# qhasm: xmm9 = *(int128 *)(c + 16)
1565# asm 1: movdqa 16(<c=int64#1),>xmm9=int6464#10
1566# asm 2: movdqa 16(<c=%rdi),>xmm9=%xmm9
1567movdqa 16(%rdi),%xmm9
1568
1569# qhasm: xmm10 = *(int128 *)(c + 32)
1570# asm 1: movdqa 32(<c=int64#1),>xmm10=int6464#11
1571# asm 2: movdqa 32(<c=%rdi),>xmm10=%xmm10
1572movdqa 32(%rdi),%xmm10
1573
1574# qhasm: xmm11 = *(int128 *)(c + 48)
1575# asm 1: movdqa 48(<c=int64#1),>xmm11=int6464#12
1576# asm 2: movdqa 48(<c=%rdi),>xmm11=%xmm11
1577movdqa 48(%rdi),%xmm11
1578
1579# qhasm: xmm12 = *(int128 *)(c + 64)
1580# asm 1: movdqa 64(<c=int64#1),>xmm12=int6464#13
1581# asm 2: movdqa 64(<c=%rdi),>xmm12=%xmm12
1582movdqa 64(%rdi),%xmm12
1583
1584# qhasm: xmm13 = *(int128 *)(c + 80)
1585# asm 1: movdqa 80(<c=int64#1),>xmm13=int6464#14
1586# asm 2: movdqa 80(<c=%rdi),>xmm13=%xmm13
1587movdqa 80(%rdi),%xmm13
1588
1589# qhasm: xmm14 = *(int128 *)(c + 96)
1590# asm 1: movdqa 96(<c=int64#1),>xmm14=int6464#15
1591# asm 2: movdqa 96(<c=%rdi),>xmm14=%xmm14
1592movdqa 96(%rdi),%xmm14
1593
1594# qhasm: xmm15 = *(int128 *)(c + 112)
1595# asm 1: movdqa 112(<c=int64#1),>xmm15=int6464#16
1596# asm 2: movdqa 112(<c=%rdi),>xmm15=%xmm15
1597movdqa 112(%rdi),%xmm15
1598
1599# qhasm: xmm0 ^= xmm8
1600# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
1601# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
1602pxor %xmm8,%xmm0
1603
1604# qhasm: xmm1 ^= xmm9
1605# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
1606# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
1607pxor %xmm9,%xmm1
1608
1609# qhasm: xmm4 ^= xmm10
1610# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
1611# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
1612pxor %xmm10,%xmm4
1613
1614# qhasm: xmm6 ^= xmm11
1615# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
1616# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
1617pxor %xmm11,%xmm6
1618
1619# qhasm: xmm3 ^= xmm12
1620# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
1621# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
1622pxor %xmm12,%xmm3
1623
1624# qhasm: xmm7 ^= xmm13
1625# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
1626# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
1627pxor %xmm13,%xmm7
1628
1629# qhasm: xmm2 ^= xmm14
1630# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
1631# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
1632pxor %xmm14,%xmm2
1633
1634# qhasm: xmm5 ^= xmm15
1635# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
1636# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
1637pxor %xmm15,%xmm5
1638
1639# qhasm: uint32323232 xmm8 >>= 8
1640# asm 1: psrld $8,<xmm8=int6464#9
1641# asm 2: psrld $8,<xmm8=%xmm8
1642psrld $8,%xmm8
1643
1644# qhasm: uint32323232 xmm9 >>= 8
1645# asm 1: psrld $8,<xmm9=int6464#10
1646# asm 2: psrld $8,<xmm9=%xmm9
1647psrld $8,%xmm9
1648
1649# qhasm: uint32323232 xmm10 >>= 8
1650# asm 1: psrld $8,<xmm10=int6464#11
1651# asm 2: psrld $8,<xmm10=%xmm10
1652psrld $8,%xmm10
1653
1654# qhasm: uint32323232 xmm11 >>= 8
1655# asm 1: psrld $8,<xmm11=int6464#12
1656# asm 2: psrld $8,<xmm11=%xmm11
1657psrld $8,%xmm11
1658
1659# qhasm: uint32323232 xmm12 >>= 8
1660# asm 1: psrld $8,<xmm12=int6464#13
1661# asm 2: psrld $8,<xmm12=%xmm12
1662psrld $8,%xmm12
1663
1664# qhasm: uint32323232 xmm13 >>= 8
1665# asm 1: psrld $8,<xmm13=int6464#14
1666# asm 2: psrld $8,<xmm13=%xmm13
1667psrld $8,%xmm13
1668
1669# qhasm: uint32323232 xmm14 >>= 8
1670# asm 1: psrld $8,<xmm14=int6464#15
1671# asm 2: psrld $8,<xmm14=%xmm14
1672psrld $8,%xmm14
1673
1674# qhasm: uint32323232 xmm15 >>= 8
1675# asm 1: psrld $8,<xmm15=int6464#16
1676# asm 2: psrld $8,<xmm15=%xmm15
1677psrld $8,%xmm15
1678
1679# qhasm: xmm0 ^= xmm8
1680# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
1681# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
1682pxor %xmm8,%xmm0
1683
1684# qhasm: xmm1 ^= xmm9
1685# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
1686# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
1687pxor %xmm9,%xmm1
1688
1689# qhasm: xmm4 ^= xmm10
1690# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
1691# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
1692pxor %xmm10,%xmm4
1693
1694# qhasm: xmm6 ^= xmm11
1695# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
1696# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
1697pxor %xmm11,%xmm6
1698
1699# qhasm: xmm3 ^= xmm12
1700# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
1701# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
1702pxor %xmm12,%xmm3
1703
1704# qhasm: xmm7 ^= xmm13
1705# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
1706# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
1707pxor %xmm13,%xmm7
1708
1709# qhasm: xmm2 ^= xmm14
1710# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
1711# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
1712pxor %xmm14,%xmm2
1713
1714# qhasm: xmm5 ^= xmm15
1715# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
1716# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
1717pxor %xmm15,%xmm5
1718
1719# qhasm: uint32323232 xmm8 >>= 8
1720# asm 1: psrld $8,<xmm8=int6464#9
1721# asm 2: psrld $8,<xmm8=%xmm8
1722psrld $8,%xmm8
1723
1724# qhasm: uint32323232 xmm9 >>= 8
1725# asm 1: psrld $8,<xmm9=int6464#10
1726# asm 2: psrld $8,<xmm9=%xmm9
1727psrld $8,%xmm9
1728
1729# qhasm: uint32323232 xmm10 >>= 8
1730# asm 1: psrld $8,<xmm10=int6464#11
1731# asm 2: psrld $8,<xmm10=%xmm10
1732psrld $8,%xmm10
1733
1734# qhasm: uint32323232 xmm11 >>= 8
1735# asm 1: psrld $8,<xmm11=int6464#12
1736# asm 2: psrld $8,<xmm11=%xmm11
1737psrld $8,%xmm11
1738
1739# qhasm: uint32323232 xmm12 >>= 8
1740# asm 1: psrld $8,<xmm12=int6464#13
1741# asm 2: psrld $8,<xmm12=%xmm12
1742psrld $8,%xmm12
1743
1744# qhasm: uint32323232 xmm13 >>= 8
1745# asm 1: psrld $8,<xmm13=int6464#14
1746# asm 2: psrld $8,<xmm13=%xmm13
1747psrld $8,%xmm13
1748
1749# qhasm: uint32323232 xmm14 >>= 8
1750# asm 1: psrld $8,<xmm14=int6464#15
1751# asm 2: psrld $8,<xmm14=%xmm14
1752psrld $8,%xmm14
1753
1754# qhasm: uint32323232 xmm15 >>= 8
1755# asm 1: psrld $8,<xmm15=int6464#16
1756# asm 2: psrld $8,<xmm15=%xmm15
1757psrld $8,%xmm15
1758
1759# qhasm: xmm0 ^= xmm8
1760# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
1761# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
1762pxor %xmm8,%xmm0
1763
1764# qhasm: xmm1 ^= xmm9
1765# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
1766# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
1767pxor %xmm9,%xmm1
1768
1769# qhasm: xmm4 ^= xmm10
1770# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
1771# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
1772pxor %xmm10,%xmm4
1773
1774# qhasm: xmm6 ^= xmm11
1775# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
1776# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
1777pxor %xmm11,%xmm6
1778
1779# qhasm: xmm3 ^= xmm12
1780# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
1781# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
1782pxor %xmm12,%xmm3
1783
1784# qhasm: xmm7 ^= xmm13
1785# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
1786# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
1787pxor %xmm13,%xmm7
1788
1789# qhasm: xmm2 ^= xmm14
1790# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
1791# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
1792pxor %xmm14,%xmm2
1793
1794# qhasm: xmm5 ^= xmm15
1795# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
1796# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
1797pxor %xmm15,%xmm5
1798
1799# qhasm: uint32323232 xmm8 >>= 8
1800# asm 1: psrld $8,<xmm8=int6464#9
1801# asm 2: psrld $8,<xmm8=%xmm8
1802psrld $8,%xmm8
1803
1804# qhasm: uint32323232 xmm9 >>= 8
1805# asm 1: psrld $8,<xmm9=int6464#10
1806# asm 2: psrld $8,<xmm9=%xmm9
1807psrld $8,%xmm9
1808
1809# qhasm: uint32323232 xmm10 >>= 8
1810# asm 1: psrld $8,<xmm10=int6464#11
1811# asm 2: psrld $8,<xmm10=%xmm10
1812psrld $8,%xmm10
1813
1814# qhasm: uint32323232 xmm11 >>= 8
1815# asm 1: psrld $8,<xmm11=int6464#12
1816# asm 2: psrld $8,<xmm11=%xmm11
1817psrld $8,%xmm11
1818
1819# qhasm: uint32323232 xmm12 >>= 8
1820# asm 1: psrld $8,<xmm12=int6464#13
1821# asm 2: psrld $8,<xmm12=%xmm12
1822psrld $8,%xmm12
1823
1824# qhasm: uint32323232 xmm13 >>= 8
1825# asm 1: psrld $8,<xmm13=int6464#14
1826# asm 2: psrld $8,<xmm13=%xmm13
1827psrld $8,%xmm13
1828
1829# qhasm: uint32323232 xmm14 >>= 8
1830# asm 1: psrld $8,<xmm14=int6464#15
1831# asm 2: psrld $8,<xmm14=%xmm14
1832psrld $8,%xmm14
1833
1834# qhasm: uint32323232 xmm15 >>= 8
1835# asm 1: psrld $8,<xmm15=int6464#16
1836# asm 2: psrld $8,<xmm15=%xmm15
1837psrld $8,%xmm15
1838
1839# qhasm: xmm0 ^= xmm8
1840# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
1841# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
1842pxor %xmm8,%xmm0
1843
1844# qhasm: xmm1 ^= xmm9
1845# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
1846# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
1847pxor %xmm9,%xmm1
1848
1849# qhasm: xmm4 ^= xmm10
1850# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
1851# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
1852pxor %xmm10,%xmm4
1853
1854# qhasm: xmm6 ^= xmm11
1855# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
1856# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
1857pxor %xmm11,%xmm6
1858
1859# qhasm: xmm3 ^= xmm12
1860# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
1861# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
1862pxor %xmm12,%xmm3
1863
1864# qhasm: xmm7 ^= xmm13
1865# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
1866# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
1867pxor %xmm13,%xmm7
1868
1869# qhasm: xmm2 ^= xmm14
1870# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
1871# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
1872pxor %xmm14,%xmm2
1873
1874# qhasm: xmm5 ^= xmm15
1875# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
1876# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
1877pxor %xmm15,%xmm5
1878
1879# qhasm: *(int128 *)(c + 128) = xmm0
1880# asm 1: movdqa <xmm0=int6464#1,128(<c=int64#1)
1881# asm 2: movdqa <xmm0=%xmm0,128(<c=%rdi)
1882movdqa %xmm0,128(%rdi)
1883
1884# qhasm: *(int128 *)(c + 144) = xmm1
1885# asm 1: movdqa <xmm1=int6464#2,144(<c=int64#1)
1886# asm 2: movdqa <xmm1=%xmm1,144(<c=%rdi)
1887movdqa %xmm1,144(%rdi)
1888
1889# qhasm: *(int128 *)(c + 160) = xmm4
1890# asm 1: movdqa <xmm4=int6464#5,160(<c=int64#1)
1891# asm 2: movdqa <xmm4=%xmm4,160(<c=%rdi)
1892movdqa %xmm4,160(%rdi)
1893
1894# qhasm: *(int128 *)(c + 176) = xmm6
1895# asm 1: movdqa <xmm6=int6464#7,176(<c=int64#1)
1896# asm 2: movdqa <xmm6=%xmm6,176(<c=%rdi)
1897movdqa %xmm6,176(%rdi)
1898
1899# qhasm: *(int128 *)(c + 192) = xmm3
1900# asm 1: movdqa <xmm3=int6464#4,192(<c=int64#1)
1901# asm 2: movdqa <xmm3=%xmm3,192(<c=%rdi)
1902movdqa %xmm3,192(%rdi)
1903
1904# qhasm: *(int128 *)(c + 208) = xmm7
1905# asm 1: movdqa <xmm7=int6464#8,208(<c=int64#1)
1906# asm 2: movdqa <xmm7=%xmm7,208(<c=%rdi)
1907movdqa %xmm7,208(%rdi)
1908
1909# qhasm: *(int128 *)(c + 224) = xmm2
1910# asm 1: movdqa <xmm2=int6464#3,224(<c=int64#1)
1911# asm 2: movdqa <xmm2=%xmm2,224(<c=%rdi)
1912movdqa %xmm2,224(%rdi)
1913
1914# qhasm: *(int128 *)(c + 240) = xmm5
1915# asm 1: movdqa <xmm5=int6464#6,240(<c=int64#1)
1916# asm 2: movdqa <xmm5=%xmm5,240(<c=%rdi)
1917movdqa %xmm5,240(%rdi)
1918
1919# qhasm: xmm0 ^= ONE
1920# asm 1: pxor ONE,<xmm0=int6464#1
1921# asm 2: pxor ONE,<xmm0=%xmm0
1922pxor ONE,%xmm0
1923
1924# qhasm: xmm1 ^= ONE
1925# asm 1: pxor ONE,<xmm1=int6464#2
1926# asm 2: pxor ONE,<xmm1=%xmm1
1927pxor ONE,%xmm1
1928
1929# qhasm: xmm7 ^= ONE
1930# asm 1: pxor ONE,<xmm7=int6464#8
1931# asm 2: pxor ONE,<xmm7=%xmm7
1932pxor ONE,%xmm7
1933
1934# qhasm: xmm2 ^= ONE
1935# asm 1: pxor ONE,<xmm2=int6464#3
1936# asm 2: pxor ONE,<xmm2=%xmm2
1937pxor ONE,%xmm2
1938
1939# qhasm: shuffle bytes of xmm0 by ROTB
1940# asm 1: pshufb ROTB,<xmm0=int6464#1
1941# asm 2: pshufb ROTB,<xmm0=%xmm0
1942pshufb ROTB,%xmm0
1943
1944# qhasm: shuffle bytes of xmm1 by ROTB
1945# asm 1: pshufb ROTB,<xmm1=int6464#2
1946# asm 2: pshufb ROTB,<xmm1=%xmm1
1947pshufb ROTB,%xmm1
1948
1949# qhasm: shuffle bytes of xmm4 by ROTB
1950# asm 1: pshufb ROTB,<xmm4=int6464#5
1951# asm 2: pshufb ROTB,<xmm4=%xmm4
1952pshufb ROTB,%xmm4
1953
1954# qhasm: shuffle bytes of xmm6 by ROTB
1955# asm 1: pshufb ROTB,<xmm6=int6464#7
1956# asm 2: pshufb ROTB,<xmm6=%xmm6
1957pshufb ROTB,%xmm6
1958
1959# qhasm: shuffle bytes of xmm3 by ROTB
1960# asm 1: pshufb ROTB,<xmm3=int6464#4
1961# asm 2: pshufb ROTB,<xmm3=%xmm3
1962pshufb ROTB,%xmm3
1963
1964# qhasm: shuffle bytes of xmm7 by ROTB
1965# asm 1: pshufb ROTB,<xmm7=int6464#8
1966# asm 2: pshufb ROTB,<xmm7=%xmm7
1967pshufb ROTB,%xmm7
1968
1969# qhasm: shuffle bytes of xmm2 by ROTB
1970# asm 1: pshufb ROTB,<xmm2=int6464#3
1971# asm 2: pshufb ROTB,<xmm2=%xmm2
1972pshufb ROTB,%xmm2
1973
1974# qhasm: shuffle bytes of xmm5 by ROTB
1975# asm 1: pshufb ROTB,<xmm5=int6464#6
1976# asm 2: pshufb ROTB,<xmm5=%xmm5
1977pshufb ROTB,%xmm5
1978
1979# qhasm: xmm7 ^= xmm2
1980# asm 1: pxor <xmm2=int6464#3,<xmm7=int6464#8
1981# asm 2: pxor <xmm2=%xmm2,<xmm7=%xmm7
1982pxor %xmm2,%xmm7
1983
1984# qhasm: xmm4 ^= xmm1
1985# asm 1: pxor <xmm1=int6464#2,<xmm4=int6464#5
1986# asm 2: pxor <xmm1=%xmm1,<xmm4=%xmm4
1987pxor %xmm1,%xmm4
1988
1989# qhasm: xmm7 ^= xmm0
1990# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
1991# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
1992pxor %xmm0,%xmm7
1993
1994# qhasm: xmm2 ^= xmm4
1995# asm 1: pxor <xmm4=int6464#5,<xmm2=int6464#3
1996# asm 2: pxor <xmm4=%xmm4,<xmm2=%xmm2
1997pxor %xmm4,%xmm2
1998
1999# qhasm: xmm6 ^= xmm0
2000# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
2001# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
2002pxor %xmm0,%xmm6
2003
2004# qhasm: xmm2 ^= xmm6
2005# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
2006# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
2007pxor %xmm6,%xmm2
2008
2009# qhasm: xmm6 ^= xmm5
2010# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
2011# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
2012pxor %xmm5,%xmm6
2013
2014# qhasm: xmm6 ^= xmm3
2015# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
2016# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
2017pxor %xmm3,%xmm6
2018
2019# qhasm: xmm5 ^= xmm7
2020# asm 1: pxor <xmm7=int6464#8,<xmm5=int6464#6
2021# asm 2: pxor <xmm7=%xmm7,<xmm5=%xmm5
2022pxor %xmm7,%xmm5
2023
2024# qhasm: xmm6 ^= xmm1
2025# asm 1: pxor <xmm1=int6464#2,<xmm6=int6464#7
2026# asm 2: pxor <xmm1=%xmm1,<xmm6=%xmm6
2027pxor %xmm1,%xmm6
2028
2029# qhasm: xmm3 ^= xmm7
2030# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
2031# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
2032pxor %xmm7,%xmm3
2033
2034# qhasm: xmm4 ^= xmm5
2035# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
2036# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
2037pxor %xmm5,%xmm4
2038
2039# qhasm: xmm1 ^= xmm7
2040# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
2041# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
2042pxor %xmm7,%xmm1
2043
2044# qhasm: xmm11 = xmm5
2045# asm 1: movdqa <xmm5=int6464#6,>xmm11=int6464#9
2046# asm 2: movdqa <xmm5=%xmm5,>xmm11=%xmm8
2047movdqa %xmm5,%xmm8
2048
2049# qhasm: xmm10 = xmm1
2050# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
2051# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
2052movdqa %xmm1,%xmm9
2053
2054# qhasm: xmm9 = xmm7
2055# asm 1: movdqa <xmm7=int6464#8,>xmm9=int6464#11
2056# asm 2: movdqa <xmm7=%xmm7,>xmm9=%xmm10
2057movdqa %xmm7,%xmm10
2058
2059# qhasm: xmm13 = xmm4
2060# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
2061# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
2062movdqa %xmm4,%xmm11
2063
2064# qhasm: xmm12 = xmm2
2065# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#13
2066# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm12
2067movdqa %xmm2,%xmm12
2068
2069# qhasm: xmm11 ^= xmm3
2070# asm 1: pxor <xmm3=int6464#4,<xmm11=int6464#9
2071# asm 2: pxor <xmm3=%xmm3,<xmm11=%xmm8
2072pxor %xmm3,%xmm8
2073
2074# qhasm: xmm10 ^= xmm4
2075# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#10
2076# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm9
2077pxor %xmm4,%xmm9
2078
2079# qhasm: xmm9 ^= xmm6
2080# asm 1: pxor <xmm6=int6464#7,<xmm9=int6464#11
2081# asm 2: pxor <xmm6=%xmm6,<xmm9=%xmm10
2082pxor %xmm6,%xmm10
2083
2084# qhasm: xmm13 ^= xmm3
2085# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#12
2086# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm11
2087pxor %xmm3,%xmm11
2088
2089# qhasm: xmm12 ^= xmm0
2090# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
2091# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
2092pxor %xmm0,%xmm12
2093
2094# qhasm: xmm14 = xmm11
2095# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
2096# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
2097movdqa %xmm8,%xmm13
2098
2099# qhasm: xmm8 = xmm10
2100# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
2101# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
2102movdqa %xmm9,%xmm14
2103
2104# qhasm: xmm15 = xmm11
2105# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
2106# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
2107movdqa %xmm8,%xmm15
2108
2109# qhasm: xmm10 |= xmm9
2110# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
2111# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
2112por %xmm10,%xmm9
2113
2114# qhasm: xmm11 |= xmm12
2115# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
2116# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
2117por %xmm12,%xmm8
2118
2119# qhasm: xmm15 ^= xmm8
2120# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
2121# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
2122pxor %xmm14,%xmm15
2123
2124# qhasm: xmm14 &= xmm12
2125# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
2126# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
2127pand %xmm12,%xmm13
2128
2129# qhasm: xmm8 &= xmm9
2130# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
2131# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
2132pand %xmm10,%xmm14
2133
2134# qhasm: xmm12 ^= xmm9
2135# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
2136# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
2137pxor %xmm10,%xmm12
2138
2139# qhasm: xmm15 &= xmm12
2140# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
2141# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
2142pand %xmm12,%xmm15
2143
2144# qhasm: xmm12 = xmm6
2145# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#11
2146# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm10
2147movdqa %xmm6,%xmm10
2148
2149# qhasm: xmm12 ^= xmm0
2150# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
2151# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
2152pxor %xmm0,%xmm10
2153
2154# qhasm: xmm13 &= xmm12
2155# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
2156# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
2157pand %xmm10,%xmm11
2158
2159# qhasm: xmm11 ^= xmm13
2160# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
2161# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
2162pxor %xmm11,%xmm8
2163
2164# qhasm: xmm10 ^= xmm13
2165# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
2166# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
2167pxor %xmm11,%xmm9
2168
2169# qhasm: xmm13 = xmm5
2170# asm 1: movdqa <xmm5=int6464#6,>xmm13=int6464#11
2171# asm 2: movdqa <xmm5=%xmm5,>xmm13=%xmm10
2172movdqa %xmm5,%xmm10
2173
2174# qhasm: xmm13 ^= xmm1
2175# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
2176# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
2177pxor %xmm1,%xmm10
2178
2179# qhasm: xmm12 = xmm7
2180# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#12
2181# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm11
2182movdqa %xmm7,%xmm11
2183
2184# qhasm: xmm9 = xmm13
2185# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
2186# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
2187movdqa %xmm10,%xmm12
2188
2189# qhasm: xmm12 ^= xmm2
2190# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#12
2191# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm11
2192pxor %xmm2,%xmm11
2193
2194# qhasm: xmm9 |= xmm12
2195# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
2196# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
2197por %xmm11,%xmm12
2198
2199# qhasm: xmm13 &= xmm12
2200# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
2201# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
2202pand %xmm11,%xmm10
2203
2204# qhasm: xmm8 ^= xmm13
2205# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
2206# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
2207pxor %xmm10,%xmm14
2208
2209# qhasm: xmm11 ^= xmm15
2210# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
2211# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
2212pxor %xmm15,%xmm8
2213
2214# qhasm: xmm10 ^= xmm14
2215# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
2216# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
2217pxor %xmm13,%xmm9
2218
2219# qhasm: xmm9 ^= xmm15
2220# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
2221# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
2222pxor %xmm15,%xmm12
2223
2224# qhasm: xmm8 ^= xmm14
2225# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
2226# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
2227pxor %xmm13,%xmm14
2228
2229# qhasm: xmm9 ^= xmm14
2230# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
2231# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
2232pxor %xmm13,%xmm12
2233
2234# qhasm: xmm12 = xmm4
2235# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#11
2236# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm10
2237movdqa %xmm4,%xmm10
2238
2239# qhasm: xmm13 = xmm3
2240# asm 1: movdqa <xmm3=int6464#4,>xmm13=int6464#12
2241# asm 2: movdqa <xmm3=%xmm3,>xmm13=%xmm11
2242movdqa %xmm3,%xmm11
2243
2244# qhasm: xmm14 = xmm1
2245# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
2246# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
2247movdqa %xmm1,%xmm13
2248
2249# qhasm: xmm15 = xmm5
2250# asm 1: movdqa <xmm5=int6464#6,>xmm15=int6464#16
2251# asm 2: movdqa <xmm5=%xmm5,>xmm15=%xmm15
2252movdqa %xmm5,%xmm15
2253
2254# qhasm: xmm12 &= xmm6
2255# asm 1: pand <xmm6=int6464#7,<xmm12=int6464#11
2256# asm 2: pand <xmm6=%xmm6,<xmm12=%xmm10
2257pand %xmm6,%xmm10
2258
2259# qhasm: xmm13 &= xmm0
2260# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
2261# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
2262pand %xmm0,%xmm11
2263
2264# qhasm: xmm14 &= xmm7
2265# asm 1: pand <xmm7=int6464#8,<xmm14=int6464#14
2266# asm 2: pand <xmm7=%xmm7,<xmm14=%xmm13
2267pand %xmm7,%xmm13
2268
2269# qhasm: xmm15 |= xmm2
2270# asm 1: por <xmm2=int6464#3,<xmm15=int6464#16
2271# asm 2: por <xmm2=%xmm2,<xmm15=%xmm15
2272por %xmm2,%xmm15
2273
2274# qhasm: xmm11 ^= xmm12
2275# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
2276# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
2277pxor %xmm10,%xmm8
2278
2279# qhasm: xmm10 ^= xmm13
2280# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
2281# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
2282pxor %xmm11,%xmm9
2283
2284# qhasm: xmm9 ^= xmm14
2285# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
2286# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
2287pxor %xmm13,%xmm12
2288
2289# qhasm: xmm8 ^= xmm15
2290# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
2291# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
2292pxor %xmm15,%xmm14
2293
2294# qhasm: xmm12 = xmm11
2295# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
2296# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
2297movdqa %xmm8,%xmm10
2298
2299# qhasm: xmm12 ^= xmm10
2300# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
2301# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
2302pxor %xmm9,%xmm10
2303
2304# qhasm: xmm11 &= xmm9
2305# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
2306# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
2307pand %xmm12,%xmm8
2308
2309# qhasm: xmm14 = xmm8
2310# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
2311# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
2312movdqa %xmm14,%xmm11
2313
2314# qhasm: xmm14 ^= xmm11
2315# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
2316# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
2317pxor %xmm8,%xmm11
2318
2319# qhasm: xmm15 = xmm12
2320# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
2321# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
2322movdqa %xmm10,%xmm13
2323
2324# qhasm: xmm15 &= xmm14
2325# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
2326# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
2327pand %xmm11,%xmm13
2328
2329# qhasm: xmm15 ^= xmm10
2330# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
2331# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
2332pxor %xmm9,%xmm13
2333
2334# qhasm: xmm13 = xmm9
2335# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
2336# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
2337movdqa %xmm12,%xmm15
2338
2339# qhasm: xmm13 ^= xmm8
2340# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
2341# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
2342pxor %xmm14,%xmm15
2343
2344# qhasm: xmm11 ^= xmm10
2345# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
2346# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
2347pxor %xmm9,%xmm8
2348
2349# qhasm: xmm13 &= xmm11
2350# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
2351# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
2352pand %xmm8,%xmm15
2353
2354# qhasm: xmm13 ^= xmm8
2355# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
2356# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
2357pxor %xmm14,%xmm15
2358
2359# qhasm: xmm9 ^= xmm13
2360# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
2361# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
2362pxor %xmm15,%xmm12
2363
2364# qhasm: xmm10 = xmm14
2365# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
2366# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
2367movdqa %xmm11,%xmm8
2368
2369# qhasm: xmm10 ^= xmm13
2370# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
2371# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
2372pxor %xmm15,%xmm8
2373
2374# qhasm: xmm10 &= xmm8
2375# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
2376# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
2377pand %xmm14,%xmm8
2378
2379# qhasm: xmm9 ^= xmm10
2380# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
2381# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
2382pxor %xmm8,%xmm12
2383
2384# qhasm: xmm14 ^= xmm10
2385# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
2386# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
2387pxor %xmm8,%xmm11
2388
2389# qhasm: xmm14 &= xmm15
2390# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
2391# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
2392pand %xmm13,%xmm11
2393
2394# qhasm: xmm14 ^= xmm12
2395# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
2396# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
2397pxor %xmm10,%xmm11
2398
2399# qhasm: xmm12 = xmm2
2400# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#9
2401# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm8
2402movdqa %xmm2,%xmm8
2403
2404# qhasm: xmm8 = xmm7
2405# asm 1: movdqa <xmm7=int6464#8,>xmm8=int6464#10
2406# asm 2: movdqa <xmm7=%xmm7,>xmm8=%xmm9
2407movdqa %xmm7,%xmm9
2408
2409# qhasm: xmm10 = xmm15
2410# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
2411# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
2412movdqa %xmm13,%xmm10
2413
2414# qhasm: xmm10 ^= xmm14
2415# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
2416# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
2417pxor %xmm11,%xmm10
2418
2419# qhasm: xmm10 &= xmm2
2420# asm 1: pand <xmm2=int6464#3,<xmm10=int6464#11
2421# asm 2: pand <xmm2=%xmm2,<xmm10=%xmm10
2422pand %xmm2,%xmm10
2423
2424# qhasm: xmm2 ^= xmm7
2425# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
2426# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
2427pxor %xmm7,%xmm2
2428
2429# qhasm: xmm2 &= xmm14
2430# asm 1: pand <xmm14=int6464#12,<xmm2=int6464#3
2431# asm 2: pand <xmm14=%xmm11,<xmm2=%xmm2
2432pand %xmm11,%xmm2
2433
2434# qhasm: xmm7 &= xmm15
2435# asm 1: pand <xmm15=int6464#14,<xmm7=int6464#8
2436# asm 2: pand <xmm15=%xmm13,<xmm7=%xmm7
2437pand %xmm13,%xmm7
2438
2439# qhasm: xmm2 ^= xmm7
2440# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
2441# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
2442pxor %xmm7,%xmm2
2443
2444# qhasm: xmm7 ^= xmm10
2445# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
2446# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
2447pxor %xmm10,%xmm7
2448
2449# qhasm: xmm12 ^= xmm0
2450# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
2451# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
2452pxor %xmm0,%xmm8
2453
2454# qhasm: xmm8 ^= xmm6
2455# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#10
2456# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm9
2457pxor %xmm6,%xmm9
2458
2459# qhasm: xmm15 ^= xmm13
2460# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
2461# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
2462pxor %xmm15,%xmm13
2463
2464# qhasm: xmm14 ^= xmm9
2465# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
2466# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
2467pxor %xmm12,%xmm11
2468
2469# qhasm: xmm11 = xmm15
2470# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
2471# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
2472movdqa %xmm13,%xmm10
2473
2474# qhasm: xmm11 ^= xmm14
2475# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
2476# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
2477pxor %xmm11,%xmm10
2478
2479# qhasm: xmm11 &= xmm12
2480# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
2481# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
2482pand %xmm8,%xmm10
2483
2484# qhasm: xmm12 ^= xmm8
2485# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
2486# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
2487pxor %xmm9,%xmm8
2488
2489# qhasm: xmm12 &= xmm14
2490# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
2491# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
2492pand %xmm11,%xmm8
2493
2494# qhasm: xmm8 &= xmm15
2495# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
2496# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
2497pand %xmm13,%xmm9
2498
2499# qhasm: xmm8 ^= xmm12
2500# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
2501# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
2502pxor %xmm8,%xmm9
2503
2504# qhasm: xmm12 ^= xmm11
2505# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
2506# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
2507pxor %xmm10,%xmm8
2508
2509# qhasm: xmm10 = xmm13
2510# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
2511# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
2512movdqa %xmm15,%xmm10
2513
2514# qhasm: xmm10 ^= xmm9
2515# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
2516# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
2517pxor %xmm12,%xmm10
2518
2519# qhasm: xmm10 &= xmm0
2520# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
2521# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
2522pand %xmm0,%xmm10
2523
2524# qhasm: xmm0 ^= xmm6
2525# asm 1: pxor <xmm6=int6464#7,<xmm0=int6464#1
2526# asm 2: pxor <xmm6=%xmm6,<xmm0=%xmm0
2527pxor %xmm6,%xmm0
2528
2529# qhasm: xmm0 &= xmm9
2530# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
2531# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
2532pand %xmm12,%xmm0
2533
2534# qhasm: xmm6 &= xmm13
2535# asm 1: pand <xmm13=int6464#16,<xmm6=int6464#7
2536# asm 2: pand <xmm13=%xmm15,<xmm6=%xmm6
2537pand %xmm15,%xmm6
2538
2539# qhasm: xmm0 ^= xmm6
2540# asm 1: pxor <xmm6=int6464#7,<xmm0=int6464#1
2541# asm 2: pxor <xmm6=%xmm6,<xmm0=%xmm0
2542pxor %xmm6,%xmm0
2543
2544# qhasm: xmm6 ^= xmm10
2545# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
2546# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
2547pxor %xmm10,%xmm6
2548
2549# qhasm: xmm2 ^= xmm12
2550# asm 1: pxor <xmm12=int6464#9,<xmm2=int6464#3
2551# asm 2: pxor <xmm12=%xmm8,<xmm2=%xmm2
2552pxor %xmm8,%xmm2
2553
2554# qhasm: xmm0 ^= xmm12
2555# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
2556# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
2557pxor %xmm8,%xmm0
2558
2559# qhasm: xmm7 ^= xmm8
2560# asm 1: pxor <xmm8=int6464#10,<xmm7=int6464#8
2561# asm 2: pxor <xmm8=%xmm9,<xmm7=%xmm7
2562pxor %xmm9,%xmm7
2563
2564# qhasm: xmm6 ^= xmm8
2565# asm 1: pxor <xmm8=int6464#10,<xmm6=int6464#7
2566# asm 2: pxor <xmm8=%xmm9,<xmm6=%xmm6
2567pxor %xmm9,%xmm6
2568
2569# qhasm: xmm12 = xmm5
2570# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#9
2571# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm8
2572movdqa %xmm5,%xmm8
2573
2574# qhasm: xmm8 = xmm1
2575# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
2576# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
2577movdqa %xmm1,%xmm9
2578
2579# qhasm: xmm12 ^= xmm3
2580# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#9
2581# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm8
2582pxor %xmm3,%xmm8
2583
2584# qhasm: xmm8 ^= xmm4
2585# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#10
2586# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm9
2587pxor %xmm4,%xmm9
2588
2589# qhasm: xmm11 = xmm15
2590# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
2591# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
2592movdqa %xmm13,%xmm10
2593
2594# qhasm: xmm11 ^= xmm14
2595# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
2596# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
2597pxor %xmm11,%xmm10
2598
2599# qhasm: xmm11 &= xmm12
2600# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
2601# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
2602pand %xmm8,%xmm10
2603
2604# qhasm: xmm12 ^= xmm8
2605# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
2606# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
2607pxor %xmm9,%xmm8
2608
2609# qhasm: xmm12 &= xmm14
2610# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
2611# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
2612pand %xmm11,%xmm8
2613
2614# qhasm: xmm8 &= xmm15
2615# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
2616# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
2617pand %xmm13,%xmm9
2618
2619# qhasm: xmm8 ^= xmm12
2620# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
2621# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
2622pxor %xmm8,%xmm9
2623
2624# qhasm: xmm12 ^= xmm11
2625# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
2626# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
2627pxor %xmm10,%xmm8
2628
2629# qhasm: xmm10 = xmm13
2630# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
2631# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
2632movdqa %xmm15,%xmm10
2633
2634# qhasm: xmm10 ^= xmm9
2635# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
2636# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
2637pxor %xmm12,%xmm10
2638
2639# qhasm: xmm10 &= xmm3
2640# asm 1: pand <xmm3=int6464#4,<xmm10=int6464#11
2641# asm 2: pand <xmm3=%xmm3,<xmm10=%xmm10
2642pand %xmm3,%xmm10
2643
2644# qhasm: xmm3 ^= xmm4
2645# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
2646# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
2647pxor %xmm4,%xmm3
2648
2649# qhasm: xmm3 &= xmm9
2650# asm 1: pand <xmm9=int6464#13,<xmm3=int6464#4
2651# asm 2: pand <xmm9=%xmm12,<xmm3=%xmm3
2652pand %xmm12,%xmm3
2653
2654# qhasm: xmm4 &= xmm13
2655# asm 1: pand <xmm13=int6464#16,<xmm4=int6464#5
2656# asm 2: pand <xmm13=%xmm15,<xmm4=%xmm4
2657pand %xmm15,%xmm4
2658
2659# qhasm: xmm3 ^= xmm4
2660# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
2661# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
2662pxor %xmm4,%xmm3
2663
2664# qhasm: xmm4 ^= xmm10
2665# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
2666# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
2667pxor %xmm10,%xmm4
2668
2669# qhasm: xmm15 ^= xmm13
2670# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
2671# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
2672pxor %xmm15,%xmm13
2673
2674# qhasm: xmm14 ^= xmm9
2675# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
2676# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
2677pxor %xmm12,%xmm11
2678
2679# qhasm: xmm11 = xmm15
2680# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
2681# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
2682movdqa %xmm13,%xmm10
2683
2684# qhasm: xmm11 ^= xmm14
2685# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
2686# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
2687pxor %xmm11,%xmm10
2688
2689# qhasm: xmm11 &= xmm5
2690# asm 1: pand <xmm5=int6464#6,<xmm11=int6464#11
2691# asm 2: pand <xmm5=%xmm5,<xmm11=%xmm10
2692pand %xmm5,%xmm10
2693
2694# qhasm: xmm5 ^= xmm1
2695# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
2696# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
2697pxor %xmm1,%xmm5
2698
2699# qhasm: xmm5 &= xmm14
2700# asm 1: pand <xmm14=int6464#12,<xmm5=int6464#6
2701# asm 2: pand <xmm14=%xmm11,<xmm5=%xmm5
2702pand %xmm11,%xmm5
2703
2704# qhasm: xmm1 &= xmm15
2705# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
2706# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
2707pand %xmm13,%xmm1
2708
2709# qhasm: xmm5 ^= xmm1
2710# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
2711# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
2712pxor %xmm1,%xmm5
2713
2714# qhasm: xmm1 ^= xmm11
2715# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
2716# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
2717pxor %xmm10,%xmm1
2718
2719# qhasm: xmm5 ^= xmm12
2720# asm 1: pxor <xmm12=int6464#9,<xmm5=int6464#6
2721# asm 2: pxor <xmm12=%xmm8,<xmm5=%xmm5
2722pxor %xmm8,%xmm5
2723
2724# qhasm: xmm3 ^= xmm12
2725# asm 1: pxor <xmm12=int6464#9,<xmm3=int6464#4
2726# asm 2: pxor <xmm12=%xmm8,<xmm3=%xmm3
2727pxor %xmm8,%xmm3
2728
2729# qhasm: xmm1 ^= xmm8
2730# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
2731# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
2732pxor %xmm9,%xmm1
2733
2734# qhasm: xmm4 ^= xmm8
2735# asm 1: pxor <xmm8=int6464#10,<xmm4=int6464#5
2736# asm 2: pxor <xmm8=%xmm9,<xmm4=%xmm4
2737pxor %xmm9,%xmm4
2738
2739# qhasm: xmm5 ^= xmm0
2740# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
2741# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
2742pxor %xmm0,%xmm5
2743
2744# qhasm: xmm1 ^= xmm2
2745# asm 1: pxor <xmm2=int6464#3,<xmm1=int6464#2
2746# asm 2: pxor <xmm2=%xmm2,<xmm1=%xmm1
2747pxor %xmm2,%xmm1
2748
2749# qhasm: xmm3 ^= xmm5
2750# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
2751# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
2752pxor %xmm5,%xmm3
2753
2754# qhasm: xmm2 ^= xmm0
2755# asm 1: pxor <xmm0=int6464#1,<xmm2=int6464#3
2756# asm 2: pxor <xmm0=%xmm0,<xmm2=%xmm2
2757pxor %xmm0,%xmm2
2758
2759# qhasm: xmm0 ^= xmm1
2760# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
2761# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
2762pxor %xmm1,%xmm0
2763
2764# qhasm: xmm1 ^= xmm7
2765# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
2766# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
2767pxor %xmm7,%xmm1
2768
2769# qhasm: xmm7 ^= xmm4
2770# asm 1: pxor <xmm4=int6464#5,<xmm7=int6464#8
2771# asm 2: pxor <xmm4=%xmm4,<xmm7=%xmm7
2772pxor %xmm4,%xmm7
2773
2774# qhasm: xmm3 ^= xmm7
2775# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
2776# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
2777pxor %xmm7,%xmm3
2778
2779# qhasm: xmm4 ^= xmm6
2780# asm 1: pxor <xmm6=int6464#7,<xmm4=int6464#5
2781# asm 2: pxor <xmm6=%xmm6,<xmm4=%xmm4
2782pxor %xmm6,%xmm4
2783
2784# qhasm: xmm6 ^= xmm7
2785# asm 1: pxor <xmm7=int6464#8,<xmm6=int6464#7
2786# asm 2: pxor <xmm7=%xmm7,<xmm6=%xmm6
2787pxor %xmm7,%xmm6
2788
2789# qhasm: xmm2 ^= xmm6
2790# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
2791# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
2792pxor %xmm6,%xmm2
2793
2794# qhasm: xmm1 ^= RCON
2795# asm 1: pxor RCON,<xmm1=int6464#2
2796# asm 2: pxor RCON,<xmm1=%xmm1
2797pxor RCON,%xmm1
2798
2799# qhasm: shuffle bytes of xmm0 by EXPB0
2800# asm 1: pshufb EXPB0,<xmm0=int6464#1
2801# asm 2: pshufb EXPB0,<xmm0=%xmm0
2802pshufb EXPB0,%xmm0
2803
2804# qhasm: shuffle bytes of xmm1 by EXPB0
2805# asm 1: pshufb EXPB0,<xmm1=int6464#2
2806# asm 2: pshufb EXPB0,<xmm1=%xmm1
2807pshufb EXPB0,%xmm1
2808
2809# qhasm: shuffle bytes of xmm3 by EXPB0
2810# asm 1: pshufb EXPB0,<xmm3=int6464#4
2811# asm 2: pshufb EXPB0,<xmm3=%xmm3
2812pshufb EXPB0,%xmm3
2813
2814# qhasm: shuffle bytes of xmm2 by EXPB0
2815# asm 1: pshufb EXPB0,<xmm2=int6464#3
2816# asm 2: pshufb EXPB0,<xmm2=%xmm2
2817pshufb EXPB0,%xmm2
2818
2819# qhasm: shuffle bytes of xmm6 by EXPB0
2820# asm 1: pshufb EXPB0,<xmm6=int6464#7
2821# asm 2: pshufb EXPB0,<xmm6=%xmm6
2822pshufb EXPB0,%xmm6
2823
2824# qhasm: shuffle bytes of xmm5 by EXPB0
2825# asm 1: pshufb EXPB0,<xmm5=int6464#6
2826# asm 2: pshufb EXPB0,<xmm5=%xmm5
2827pshufb EXPB0,%xmm5
2828
2829# qhasm: shuffle bytes of xmm4 by EXPB0
2830# asm 1: pshufb EXPB0,<xmm4=int6464#5
2831# asm 2: pshufb EXPB0,<xmm4=%xmm4
2832pshufb EXPB0,%xmm4
2833
2834# qhasm: shuffle bytes of xmm7 by EXPB0
2835# asm 1: pshufb EXPB0,<xmm7=int6464#8
2836# asm 2: pshufb EXPB0,<xmm7=%xmm7
2837pshufb EXPB0,%xmm7
2838
2839# qhasm: xmm8 = *(int128 *)(c + 128)
2840# asm 1: movdqa 128(<c=int64#1),>xmm8=int6464#9
2841# asm 2: movdqa 128(<c=%rdi),>xmm8=%xmm8
2842movdqa 128(%rdi),%xmm8
2843
2844# qhasm: xmm9 = *(int128 *)(c + 144)
2845# asm 1: movdqa 144(<c=int64#1),>xmm9=int6464#10
2846# asm 2: movdqa 144(<c=%rdi),>xmm9=%xmm9
2847movdqa 144(%rdi),%xmm9
2848
2849# qhasm: xmm10 = *(int128 *)(c + 160)
2850# asm 1: movdqa 160(<c=int64#1),>xmm10=int6464#11
2851# asm 2: movdqa 160(<c=%rdi),>xmm10=%xmm10
2852movdqa 160(%rdi),%xmm10
2853
2854# qhasm: xmm11 = *(int128 *)(c + 176)
2855# asm 1: movdqa 176(<c=int64#1),>xmm11=int6464#12
2856# asm 2: movdqa 176(<c=%rdi),>xmm11=%xmm11
2857movdqa 176(%rdi),%xmm11
2858
2859# qhasm: xmm12 = *(int128 *)(c + 192)
2860# asm 1: movdqa 192(<c=int64#1),>xmm12=int6464#13
2861# asm 2: movdqa 192(<c=%rdi),>xmm12=%xmm12
2862movdqa 192(%rdi),%xmm12
2863
2864# qhasm: xmm13 = *(int128 *)(c + 208)
2865# asm 1: movdqa 208(<c=int64#1),>xmm13=int6464#14
2866# asm 2: movdqa 208(<c=%rdi),>xmm13=%xmm13
2867movdqa 208(%rdi),%xmm13
2868
2869# qhasm: xmm14 = *(int128 *)(c + 224)
2870# asm 1: movdqa 224(<c=int64#1),>xmm14=int6464#15
2871# asm 2: movdqa 224(<c=%rdi),>xmm14=%xmm14
2872movdqa 224(%rdi),%xmm14
2873
2874# qhasm: xmm15 = *(int128 *)(c + 240)
2875# asm 1: movdqa 240(<c=int64#1),>xmm15=int6464#16
2876# asm 2: movdqa 240(<c=%rdi),>xmm15=%xmm15
2877movdqa 240(%rdi),%xmm15
2878
2879# qhasm: xmm8 ^= ONE
2880# asm 1: pxor ONE,<xmm8=int6464#9
2881# asm 2: pxor ONE,<xmm8=%xmm8
2882pxor ONE,%xmm8
2883
2884# qhasm: xmm9 ^= ONE
2885# asm 1: pxor ONE,<xmm9=int6464#10
2886# asm 2: pxor ONE,<xmm9=%xmm9
2887pxor ONE,%xmm9
2888
2889# qhasm: xmm13 ^= ONE
2890# asm 1: pxor ONE,<xmm13=int6464#14
2891# asm 2: pxor ONE,<xmm13=%xmm13
2892pxor ONE,%xmm13
2893
2894# qhasm: xmm14 ^= ONE
2895# asm 1: pxor ONE,<xmm14=int6464#15
2896# asm 2: pxor ONE,<xmm14=%xmm14
2897pxor ONE,%xmm14
2898
2899# qhasm: xmm0 ^= xmm8
2900# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
2901# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
2902pxor %xmm8,%xmm0
2903
2904# qhasm: xmm1 ^= xmm9
2905# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
2906# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
2907pxor %xmm9,%xmm1
2908
2909# qhasm: xmm3 ^= xmm10
2910# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
2911# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
2912pxor %xmm10,%xmm3
2913
2914# qhasm: xmm2 ^= xmm11
2915# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
2916# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
2917pxor %xmm11,%xmm2
2918
2919# qhasm: xmm6 ^= xmm12
2920# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
2921# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
2922pxor %xmm12,%xmm6
2923
2924# qhasm: xmm5 ^= xmm13
2925# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
2926# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
2927pxor %xmm13,%xmm5
2928
2929# qhasm: xmm4 ^= xmm14
2930# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
2931# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
2932pxor %xmm14,%xmm4
2933
2934# qhasm: xmm7 ^= xmm15
2935# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
2936# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
2937pxor %xmm15,%xmm7
2938
2939# qhasm: uint32323232 xmm8 >>= 8
2940# asm 1: psrld $8,<xmm8=int6464#9
2941# asm 2: psrld $8,<xmm8=%xmm8
2942psrld $8,%xmm8
2943
2944# qhasm: uint32323232 xmm9 >>= 8
2945# asm 1: psrld $8,<xmm9=int6464#10
2946# asm 2: psrld $8,<xmm9=%xmm9
2947psrld $8,%xmm9
2948
2949# qhasm: uint32323232 xmm10 >>= 8
2950# asm 1: psrld $8,<xmm10=int6464#11
2951# asm 2: psrld $8,<xmm10=%xmm10
2952psrld $8,%xmm10
2953
2954# qhasm: uint32323232 xmm11 >>= 8
2955# asm 1: psrld $8,<xmm11=int6464#12
2956# asm 2: psrld $8,<xmm11=%xmm11
2957psrld $8,%xmm11
2958
2959# qhasm: uint32323232 xmm12 >>= 8
2960# asm 1: psrld $8,<xmm12=int6464#13
2961# asm 2: psrld $8,<xmm12=%xmm12
2962psrld $8,%xmm12
2963
2964# qhasm: uint32323232 xmm13 >>= 8
2965# asm 1: psrld $8,<xmm13=int6464#14
2966# asm 2: psrld $8,<xmm13=%xmm13
2967psrld $8,%xmm13
2968
2969# qhasm: uint32323232 xmm14 >>= 8
2970# asm 1: psrld $8,<xmm14=int6464#15
2971# asm 2: psrld $8,<xmm14=%xmm14
2972psrld $8,%xmm14
2973
2974# qhasm: uint32323232 xmm15 >>= 8
2975# asm 1: psrld $8,<xmm15=int6464#16
2976# asm 2: psrld $8,<xmm15=%xmm15
2977psrld $8,%xmm15
2978
2979# qhasm: xmm0 ^= xmm8
2980# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
2981# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
2982pxor %xmm8,%xmm0
2983
2984# qhasm: xmm1 ^= xmm9
2985# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
2986# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
2987pxor %xmm9,%xmm1
2988
2989# qhasm: xmm3 ^= xmm10
2990# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
2991# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
2992pxor %xmm10,%xmm3
2993
2994# qhasm: xmm2 ^= xmm11
2995# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
2996# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
2997pxor %xmm11,%xmm2
2998
2999# qhasm: xmm6 ^= xmm12
3000# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
3001# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
3002pxor %xmm12,%xmm6
3003
3004# qhasm: xmm5 ^= xmm13
3005# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
3006# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
3007pxor %xmm13,%xmm5
3008
3009# qhasm: xmm4 ^= xmm14
3010# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
3011# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
3012pxor %xmm14,%xmm4
3013
3014# qhasm: xmm7 ^= xmm15
3015# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
3016# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
3017pxor %xmm15,%xmm7
3018
3019# qhasm: uint32323232 xmm8 >>= 8
3020# asm 1: psrld $8,<xmm8=int6464#9
3021# asm 2: psrld $8,<xmm8=%xmm8
3022psrld $8,%xmm8
3023
3024# qhasm: uint32323232 xmm9 >>= 8
3025# asm 1: psrld $8,<xmm9=int6464#10
3026# asm 2: psrld $8,<xmm9=%xmm9
3027psrld $8,%xmm9
3028
3029# qhasm: uint32323232 xmm10 >>= 8
3030# asm 1: psrld $8,<xmm10=int6464#11
3031# asm 2: psrld $8,<xmm10=%xmm10
3032psrld $8,%xmm10
3033
3034# qhasm: uint32323232 xmm11 >>= 8
3035# asm 1: psrld $8,<xmm11=int6464#12
3036# asm 2: psrld $8,<xmm11=%xmm11
3037psrld $8,%xmm11
3038
3039# qhasm: uint32323232 xmm12 >>= 8
3040# asm 1: psrld $8,<xmm12=int6464#13
3041# asm 2: psrld $8,<xmm12=%xmm12
3042psrld $8,%xmm12
3043
3044# qhasm: uint32323232 xmm13 >>= 8
3045# asm 1: psrld $8,<xmm13=int6464#14
3046# asm 2: psrld $8,<xmm13=%xmm13
3047psrld $8,%xmm13
3048
3049# qhasm: uint32323232 xmm14 >>= 8
3050# asm 1: psrld $8,<xmm14=int6464#15
3051# asm 2: psrld $8,<xmm14=%xmm14
3052psrld $8,%xmm14
3053
3054# qhasm: uint32323232 xmm15 >>= 8
3055# asm 1: psrld $8,<xmm15=int6464#16
3056# asm 2: psrld $8,<xmm15=%xmm15
3057psrld $8,%xmm15
3058
3059# qhasm: xmm0 ^= xmm8
3060# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
3061# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
3062pxor %xmm8,%xmm0
3063
3064# qhasm: xmm1 ^= xmm9
3065# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
3066# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
3067pxor %xmm9,%xmm1
3068
3069# qhasm: xmm3 ^= xmm10
3070# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
3071# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
3072pxor %xmm10,%xmm3
3073
3074# qhasm: xmm2 ^= xmm11
3075# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
3076# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
3077pxor %xmm11,%xmm2
3078
3079# qhasm: xmm6 ^= xmm12
3080# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
3081# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
3082pxor %xmm12,%xmm6
3083
3084# qhasm: xmm5 ^= xmm13
3085# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
3086# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
3087pxor %xmm13,%xmm5
3088
3089# qhasm: xmm4 ^= xmm14
3090# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
3091# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
3092pxor %xmm14,%xmm4
3093
3094# qhasm: xmm7 ^= xmm15
3095# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
3096# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
3097pxor %xmm15,%xmm7
3098
3099# qhasm: uint32323232 xmm8 >>= 8
3100# asm 1: psrld $8,<xmm8=int6464#9
3101# asm 2: psrld $8,<xmm8=%xmm8
3102psrld $8,%xmm8
3103
3104# qhasm: uint32323232 xmm9 >>= 8
3105# asm 1: psrld $8,<xmm9=int6464#10
3106# asm 2: psrld $8,<xmm9=%xmm9
3107psrld $8,%xmm9
3108
3109# qhasm: uint32323232 xmm10 >>= 8
3110# asm 1: psrld $8,<xmm10=int6464#11
3111# asm 2: psrld $8,<xmm10=%xmm10
3112psrld $8,%xmm10
3113
3114# qhasm: uint32323232 xmm11 >>= 8
3115# asm 1: psrld $8,<xmm11=int6464#12
3116# asm 2: psrld $8,<xmm11=%xmm11
3117psrld $8,%xmm11
3118
3119# qhasm: uint32323232 xmm12 >>= 8
3120# asm 1: psrld $8,<xmm12=int6464#13
3121# asm 2: psrld $8,<xmm12=%xmm12
3122psrld $8,%xmm12
3123
3124# qhasm: uint32323232 xmm13 >>= 8
3125# asm 1: psrld $8,<xmm13=int6464#14
3126# asm 2: psrld $8,<xmm13=%xmm13
3127psrld $8,%xmm13
3128
3129# qhasm: uint32323232 xmm14 >>= 8
3130# asm 1: psrld $8,<xmm14=int6464#15
3131# asm 2: psrld $8,<xmm14=%xmm14
3132psrld $8,%xmm14
3133
3134# qhasm: uint32323232 xmm15 >>= 8
3135# asm 1: psrld $8,<xmm15=int6464#16
3136# asm 2: psrld $8,<xmm15=%xmm15
3137psrld $8,%xmm15
3138
3139# qhasm: xmm0 ^= xmm8
3140# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
3141# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
3142pxor %xmm8,%xmm0
3143
3144# qhasm: xmm1 ^= xmm9
3145# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
3146# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
3147pxor %xmm9,%xmm1
3148
3149# qhasm: xmm3 ^= xmm10
3150# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
3151# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
3152pxor %xmm10,%xmm3
3153
3154# qhasm: xmm2 ^= xmm11
3155# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
3156# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
3157pxor %xmm11,%xmm2
3158
3159# qhasm: xmm6 ^= xmm12
3160# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
3161# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
3162pxor %xmm12,%xmm6
3163
3164# qhasm: xmm5 ^= xmm13
3165# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
3166# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
3167pxor %xmm13,%xmm5
3168
3169# qhasm: xmm4 ^= xmm14
3170# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
3171# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
3172pxor %xmm14,%xmm4
3173
3174# qhasm: xmm7 ^= xmm15
3175# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
3176# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
3177pxor %xmm15,%xmm7
3178
3179# qhasm: *(int128 *)(c + 256) = xmm0
3180# asm 1: movdqa <xmm0=int6464#1,256(<c=int64#1)
3181# asm 2: movdqa <xmm0=%xmm0,256(<c=%rdi)
3182movdqa %xmm0,256(%rdi)
3183
3184# qhasm: *(int128 *)(c + 272) = xmm1
3185# asm 1: movdqa <xmm1=int6464#2,272(<c=int64#1)
3186# asm 2: movdqa <xmm1=%xmm1,272(<c=%rdi)
3187movdqa %xmm1,272(%rdi)
3188
3189# qhasm: *(int128 *)(c + 288) = xmm3
3190# asm 1: movdqa <xmm3=int6464#4,288(<c=int64#1)
3191# asm 2: movdqa <xmm3=%xmm3,288(<c=%rdi)
3192movdqa %xmm3,288(%rdi)
3193
3194# qhasm: *(int128 *)(c + 304) = xmm2
3195# asm 1: movdqa <xmm2=int6464#3,304(<c=int64#1)
3196# asm 2: movdqa <xmm2=%xmm2,304(<c=%rdi)
3197movdqa %xmm2,304(%rdi)
3198
3199# qhasm: *(int128 *)(c + 320) = xmm6
3200# asm 1: movdqa <xmm6=int6464#7,320(<c=int64#1)
3201# asm 2: movdqa <xmm6=%xmm6,320(<c=%rdi)
3202movdqa %xmm6,320(%rdi)
3203
3204# qhasm: *(int128 *)(c + 336) = xmm5
3205# asm 1: movdqa <xmm5=int6464#6,336(<c=int64#1)
3206# asm 2: movdqa <xmm5=%xmm5,336(<c=%rdi)
3207movdqa %xmm5,336(%rdi)
3208
3209# qhasm: *(int128 *)(c + 352) = xmm4
3210# asm 1: movdqa <xmm4=int6464#5,352(<c=int64#1)
3211# asm 2: movdqa <xmm4=%xmm4,352(<c=%rdi)
3212movdqa %xmm4,352(%rdi)
3213
3214# qhasm: *(int128 *)(c + 368) = xmm7
3215# asm 1: movdqa <xmm7=int6464#8,368(<c=int64#1)
3216# asm 2: movdqa <xmm7=%xmm7,368(<c=%rdi)
3217movdqa %xmm7,368(%rdi)
3218
3219# qhasm: xmm0 ^= ONE
3220# asm 1: pxor ONE,<xmm0=int6464#1
3221# asm 2: pxor ONE,<xmm0=%xmm0
3222pxor ONE,%xmm0
3223
3224# qhasm: xmm1 ^= ONE
3225# asm 1: pxor ONE,<xmm1=int6464#2
3226# asm 2: pxor ONE,<xmm1=%xmm1
3227pxor ONE,%xmm1
3228
3229# qhasm: xmm5 ^= ONE
3230# asm 1: pxor ONE,<xmm5=int6464#6
3231# asm 2: pxor ONE,<xmm5=%xmm5
3232pxor ONE,%xmm5
3233
3234# qhasm: xmm4 ^= ONE
3235# asm 1: pxor ONE,<xmm4=int6464#5
3236# asm 2: pxor ONE,<xmm4=%xmm4
3237pxor ONE,%xmm4
3238
3239# qhasm: shuffle bytes of xmm0 by ROTB
3240# asm 1: pshufb ROTB,<xmm0=int6464#1
3241# asm 2: pshufb ROTB,<xmm0=%xmm0
3242pshufb ROTB,%xmm0
3243
3244# qhasm: shuffle bytes of xmm1 by ROTB
3245# asm 1: pshufb ROTB,<xmm1=int6464#2
3246# asm 2: pshufb ROTB,<xmm1=%xmm1
3247pshufb ROTB,%xmm1
3248
3249# qhasm: shuffle bytes of xmm3 by ROTB
3250# asm 1: pshufb ROTB,<xmm3=int6464#4
3251# asm 2: pshufb ROTB,<xmm3=%xmm3
3252pshufb ROTB,%xmm3
3253
3254# qhasm: shuffle bytes of xmm2 by ROTB
3255# asm 1: pshufb ROTB,<xmm2=int6464#3
3256# asm 2: pshufb ROTB,<xmm2=%xmm2
3257pshufb ROTB,%xmm2
3258
3259# qhasm: shuffle bytes of xmm6 by ROTB
3260# asm 1: pshufb ROTB,<xmm6=int6464#7
3261# asm 2: pshufb ROTB,<xmm6=%xmm6
3262pshufb ROTB,%xmm6
3263
3264# qhasm: shuffle bytes of xmm5 by ROTB
3265# asm 1: pshufb ROTB,<xmm5=int6464#6
3266# asm 2: pshufb ROTB,<xmm5=%xmm5
3267pshufb ROTB,%xmm5
3268
3269# qhasm: shuffle bytes of xmm4 by ROTB
3270# asm 1: pshufb ROTB,<xmm4=int6464#5
3271# asm 2: pshufb ROTB,<xmm4=%xmm4
3272pshufb ROTB,%xmm4
3273
3274# qhasm: shuffle bytes of xmm7 by ROTB
3275# asm 1: pshufb ROTB,<xmm7=int6464#8
3276# asm 2: pshufb ROTB,<xmm7=%xmm7
3277pshufb ROTB,%xmm7
3278
3279# qhasm: xmm5 ^= xmm4
3280# asm 1: pxor <xmm4=int6464#5,<xmm5=int6464#6
3281# asm 2: pxor <xmm4=%xmm4,<xmm5=%xmm5
3282pxor %xmm4,%xmm5
3283
3284# qhasm: xmm3 ^= xmm1
3285# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
3286# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
3287pxor %xmm1,%xmm3
3288
3289# qhasm: xmm5 ^= xmm0
3290# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
3291# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
3292pxor %xmm0,%xmm5
3293
3294# qhasm: xmm4 ^= xmm3
3295# asm 1: pxor <xmm3=int6464#4,<xmm4=int6464#5
3296# asm 2: pxor <xmm3=%xmm3,<xmm4=%xmm4
3297pxor %xmm3,%xmm4
3298
3299# qhasm: xmm2 ^= xmm0
3300# asm 1: pxor <xmm0=int6464#1,<xmm2=int6464#3
3301# asm 2: pxor <xmm0=%xmm0,<xmm2=%xmm2
3302pxor %xmm0,%xmm2
3303
3304# qhasm: xmm4 ^= xmm2
3305# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
3306# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
3307pxor %xmm2,%xmm4
3308
3309# qhasm: xmm2 ^= xmm7
3310# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
3311# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
3312pxor %xmm7,%xmm2
3313
3314# qhasm: xmm2 ^= xmm6
3315# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
3316# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
3317pxor %xmm6,%xmm2
3318
3319# qhasm: xmm7 ^= xmm5
3320# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
3321# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
3322pxor %xmm5,%xmm7
3323
3324# qhasm: xmm2 ^= xmm1
3325# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
3326# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
3327pxor %xmm1,%xmm2
3328
3329# qhasm: xmm6 ^= xmm5
3330# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
3331# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
3332pxor %xmm5,%xmm6
3333
3334# qhasm: xmm3 ^= xmm7
3335# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
3336# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
3337pxor %xmm7,%xmm3
3338
3339# qhasm: xmm1 ^= xmm5
3340# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
3341# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
3342pxor %xmm5,%xmm1
3343
3344# qhasm: xmm11 = xmm7
3345# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
3346# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
3347movdqa %xmm7,%xmm8
3348
3349# qhasm: xmm10 = xmm1
3350# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
3351# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
3352movdqa %xmm1,%xmm9
3353
3354# qhasm: xmm9 = xmm5
3355# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
3356# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
3357movdqa %xmm5,%xmm10
3358
3359# qhasm: xmm13 = xmm3
3360# asm 1: movdqa <xmm3=int6464#4,>xmm13=int6464#12
3361# asm 2: movdqa <xmm3=%xmm3,>xmm13=%xmm11
3362movdqa %xmm3,%xmm11
3363
3364# qhasm: xmm12 = xmm4
3365# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#13
3366# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm12
3367movdqa %xmm4,%xmm12
3368
3369# qhasm: xmm11 ^= xmm6
3370# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#9
3371# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm8
3372pxor %xmm6,%xmm8
3373
3374# qhasm: xmm10 ^= xmm3
3375# asm 1: pxor <xmm3=int6464#4,<xmm10=int6464#10
3376# asm 2: pxor <xmm3=%xmm3,<xmm10=%xmm9
3377pxor %xmm3,%xmm9
3378
3379# qhasm: xmm9 ^= xmm2
3380# asm 1: pxor <xmm2=int6464#3,<xmm9=int6464#11
3381# asm 2: pxor <xmm2=%xmm2,<xmm9=%xmm10
3382pxor %xmm2,%xmm10
3383
3384# qhasm: xmm13 ^= xmm6
3385# asm 1: pxor <xmm6=int6464#7,<xmm13=int6464#12
3386# asm 2: pxor <xmm6=%xmm6,<xmm13=%xmm11
3387pxor %xmm6,%xmm11
3388
3389# qhasm: xmm12 ^= xmm0
3390# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
3391# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
3392pxor %xmm0,%xmm12
3393
3394# qhasm: xmm14 = xmm11
3395# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
3396# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
3397movdqa %xmm8,%xmm13
3398
3399# qhasm: xmm8 = xmm10
3400# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
3401# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
3402movdqa %xmm9,%xmm14
3403
3404# qhasm: xmm15 = xmm11
3405# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
3406# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
3407movdqa %xmm8,%xmm15
3408
3409# qhasm: xmm10 |= xmm9
3410# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
3411# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
3412por %xmm10,%xmm9
3413
3414# qhasm: xmm11 |= xmm12
3415# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
3416# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
3417por %xmm12,%xmm8
3418
3419# qhasm: xmm15 ^= xmm8
3420# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
3421# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
3422pxor %xmm14,%xmm15
3423
3424# qhasm: xmm14 &= xmm12
3425# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
3426# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
3427pand %xmm12,%xmm13
3428
3429# qhasm: xmm8 &= xmm9
3430# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
3431# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
3432pand %xmm10,%xmm14
3433
3434# qhasm: xmm12 ^= xmm9
3435# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
3436# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
3437pxor %xmm10,%xmm12
3438
3439# qhasm: xmm15 &= xmm12
3440# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
3441# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
3442pand %xmm12,%xmm15
3443
3444# qhasm: xmm12 = xmm2
3445# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
3446# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
3447movdqa %xmm2,%xmm10
3448
3449# qhasm: xmm12 ^= xmm0
3450# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
3451# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
3452pxor %xmm0,%xmm10
3453
3454# qhasm: xmm13 &= xmm12
3455# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
3456# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
3457pand %xmm10,%xmm11
3458
3459# qhasm: xmm11 ^= xmm13
3460# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
3461# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
3462pxor %xmm11,%xmm8
3463
3464# qhasm: xmm10 ^= xmm13
3465# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
3466# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
3467pxor %xmm11,%xmm9
3468
3469# qhasm: xmm13 = xmm7
3470# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
3471# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
3472movdqa %xmm7,%xmm10
3473
3474# qhasm: xmm13 ^= xmm1
3475# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
3476# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
3477pxor %xmm1,%xmm10
3478
3479# qhasm: xmm12 = xmm5
3480# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
3481# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
3482movdqa %xmm5,%xmm11
3483
3484# qhasm: xmm9 = xmm13
3485# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
3486# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
3487movdqa %xmm10,%xmm12
3488
3489# qhasm: xmm12 ^= xmm4
3490# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#12
3491# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm11
3492pxor %xmm4,%xmm11
3493
3494# qhasm: xmm9 |= xmm12
3495# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
3496# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
3497por %xmm11,%xmm12
3498
3499# qhasm: xmm13 &= xmm12
3500# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
3501# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
3502pand %xmm11,%xmm10
3503
3504# qhasm: xmm8 ^= xmm13
3505# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
3506# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
3507pxor %xmm10,%xmm14
3508
3509# qhasm: xmm11 ^= xmm15
3510# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
3511# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
3512pxor %xmm15,%xmm8
3513
3514# qhasm: xmm10 ^= xmm14
3515# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
3516# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
3517pxor %xmm13,%xmm9
3518
3519# qhasm: xmm9 ^= xmm15
3520# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
3521# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
3522pxor %xmm15,%xmm12
3523
3524# qhasm: xmm8 ^= xmm14
3525# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
3526# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
3527pxor %xmm13,%xmm14
3528
3529# qhasm: xmm9 ^= xmm14
3530# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
3531# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
3532pxor %xmm13,%xmm12
3533
3534# qhasm: xmm12 = xmm3
3535# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
3536# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
3537movdqa %xmm3,%xmm10
3538
3539# qhasm: xmm13 = xmm6
3540# asm 1: movdqa <xmm6=int6464#7,>xmm13=int6464#12
3541# asm 2: movdqa <xmm6=%xmm6,>xmm13=%xmm11
3542movdqa %xmm6,%xmm11
3543
3544# qhasm: xmm14 = xmm1
3545# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
3546# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
3547movdqa %xmm1,%xmm13
3548
3549# qhasm: xmm15 = xmm7
3550# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
3551# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
3552movdqa %xmm7,%xmm15
3553
3554# qhasm: xmm12 &= xmm2
3555# asm 1: pand <xmm2=int6464#3,<xmm12=int6464#11
3556# asm 2: pand <xmm2=%xmm2,<xmm12=%xmm10
3557pand %xmm2,%xmm10
3558
3559# qhasm: xmm13 &= xmm0
3560# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
3561# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
3562pand %xmm0,%xmm11
3563
3564# qhasm: xmm14 &= xmm5
3565# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
3566# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
3567pand %xmm5,%xmm13
3568
3569# qhasm: xmm15 |= xmm4
3570# asm 1: por <xmm4=int6464#5,<xmm15=int6464#16
3571# asm 2: por <xmm4=%xmm4,<xmm15=%xmm15
3572por %xmm4,%xmm15
3573
3574# qhasm: xmm11 ^= xmm12
3575# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
3576# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
3577pxor %xmm10,%xmm8
3578
3579# qhasm: xmm10 ^= xmm13
3580# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
3581# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
3582pxor %xmm11,%xmm9
3583
3584# qhasm: xmm9 ^= xmm14
3585# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
3586# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
3587pxor %xmm13,%xmm12
3588
3589# qhasm: xmm8 ^= xmm15
3590# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
3591# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
3592pxor %xmm15,%xmm14
3593
3594# qhasm: xmm12 = xmm11
3595# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
3596# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
3597movdqa %xmm8,%xmm10
3598
3599# qhasm: xmm12 ^= xmm10
3600# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
3601# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
3602pxor %xmm9,%xmm10
3603
3604# qhasm: xmm11 &= xmm9
3605# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
3606# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
3607pand %xmm12,%xmm8
3608
3609# qhasm: xmm14 = xmm8
3610# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
3611# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
3612movdqa %xmm14,%xmm11
3613
3614# qhasm: xmm14 ^= xmm11
3615# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
3616# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
3617pxor %xmm8,%xmm11
3618
3619# qhasm: xmm15 = xmm12
3620# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
3621# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
3622movdqa %xmm10,%xmm13
3623
3624# qhasm: xmm15 &= xmm14
3625# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
3626# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
3627pand %xmm11,%xmm13
3628
3629# qhasm: xmm15 ^= xmm10
3630# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
3631# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
3632pxor %xmm9,%xmm13
3633
3634# qhasm: xmm13 = xmm9
3635# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
3636# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
3637movdqa %xmm12,%xmm15
3638
3639# qhasm: xmm13 ^= xmm8
3640# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
3641# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
3642pxor %xmm14,%xmm15
3643
3644# qhasm: xmm11 ^= xmm10
3645# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
3646# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
3647pxor %xmm9,%xmm8
3648
3649# qhasm: xmm13 &= xmm11
3650# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
3651# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
3652pand %xmm8,%xmm15
3653
3654# qhasm: xmm13 ^= xmm8
3655# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
3656# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
3657pxor %xmm14,%xmm15
3658
3659# qhasm: xmm9 ^= xmm13
3660# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
3661# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
3662pxor %xmm15,%xmm12
3663
3664# qhasm: xmm10 = xmm14
3665# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
3666# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
3667movdqa %xmm11,%xmm8
3668
3669# qhasm: xmm10 ^= xmm13
3670# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
3671# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
3672pxor %xmm15,%xmm8
3673
3674# qhasm: xmm10 &= xmm8
3675# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
3676# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
3677pand %xmm14,%xmm8
3678
3679# qhasm: xmm9 ^= xmm10
3680# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
3681# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
3682pxor %xmm8,%xmm12
3683
3684# qhasm: xmm14 ^= xmm10
3685# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
3686# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
3687pxor %xmm8,%xmm11
3688
3689# qhasm: xmm14 &= xmm15
3690# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
3691# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
3692pand %xmm13,%xmm11
3693
3694# qhasm: xmm14 ^= xmm12
3695# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
3696# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
3697pxor %xmm10,%xmm11
3698
3699# qhasm: xmm12 = xmm4
3700# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#9
3701# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm8
3702movdqa %xmm4,%xmm8
3703
3704# qhasm: xmm8 = xmm5
3705# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
3706# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
3707movdqa %xmm5,%xmm9
3708
3709# qhasm: xmm10 = xmm15
3710# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
3711# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
3712movdqa %xmm13,%xmm10
3713
3714# qhasm: xmm10 ^= xmm14
3715# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
3716# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
3717pxor %xmm11,%xmm10
3718
3719# qhasm: xmm10 &= xmm4
3720# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
3721# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
3722pand %xmm4,%xmm10
3723
3724# qhasm: xmm4 ^= xmm5
3725# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
3726# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
3727pxor %xmm5,%xmm4
3728
3729# qhasm: xmm4 &= xmm14
3730# asm 1: pand <xmm14=int6464#12,<xmm4=int6464#5
3731# asm 2: pand <xmm14=%xmm11,<xmm4=%xmm4
3732pand %xmm11,%xmm4
3733
3734# qhasm: xmm5 &= xmm15
3735# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
3736# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
3737pand %xmm13,%xmm5
3738
3739# qhasm: xmm4 ^= xmm5
3740# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
3741# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
3742pxor %xmm5,%xmm4
3743
3744# qhasm: xmm5 ^= xmm10
3745# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
3746# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
3747pxor %xmm10,%xmm5
3748
3749# qhasm: xmm12 ^= xmm0
3750# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
3751# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
3752pxor %xmm0,%xmm8
3753
3754# qhasm: xmm8 ^= xmm2
3755# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
3756# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
3757pxor %xmm2,%xmm9
3758
3759# qhasm: xmm15 ^= xmm13
3760# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
3761# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
3762pxor %xmm15,%xmm13
3763
3764# qhasm: xmm14 ^= xmm9
3765# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
3766# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
3767pxor %xmm12,%xmm11
3768
3769# qhasm: xmm11 = xmm15
3770# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3771# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3772movdqa %xmm13,%xmm10
3773
3774# qhasm: xmm11 ^= xmm14
3775# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3776# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3777pxor %xmm11,%xmm10
3778
3779# qhasm: xmm11 &= xmm12
3780# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
3781# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
3782pand %xmm8,%xmm10
3783
3784# qhasm: xmm12 ^= xmm8
3785# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
3786# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
3787pxor %xmm9,%xmm8
3788
3789# qhasm: xmm12 &= xmm14
3790# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
3791# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
3792pand %xmm11,%xmm8
3793
3794# qhasm: xmm8 &= xmm15
3795# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
3796# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
3797pand %xmm13,%xmm9
3798
3799# qhasm: xmm8 ^= xmm12
3800# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
3801# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
3802pxor %xmm8,%xmm9
3803
3804# qhasm: xmm12 ^= xmm11
3805# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
3806# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
3807pxor %xmm10,%xmm8
3808
3809# qhasm: xmm10 = xmm13
3810# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
3811# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
3812movdqa %xmm15,%xmm10
3813
3814# qhasm: xmm10 ^= xmm9
3815# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
3816# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
3817pxor %xmm12,%xmm10
3818
3819# qhasm: xmm10 &= xmm0
3820# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
3821# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
3822pand %xmm0,%xmm10
3823
3824# qhasm: xmm0 ^= xmm2
3825# asm 1: pxor <xmm2=int6464#3,<xmm0=int6464#1
3826# asm 2: pxor <xmm2=%xmm2,<xmm0=%xmm0
3827pxor %xmm2,%xmm0
3828
3829# qhasm: xmm0 &= xmm9
3830# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
3831# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
3832pand %xmm12,%xmm0
3833
3834# qhasm: xmm2 &= xmm13
3835# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
3836# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
3837pand %xmm15,%xmm2
3838
3839# qhasm: xmm0 ^= xmm2
3840# asm 1: pxor <xmm2=int6464#3,<xmm0=int6464#1
3841# asm 2: pxor <xmm2=%xmm2,<xmm0=%xmm0
3842pxor %xmm2,%xmm0
3843
3844# qhasm: xmm2 ^= xmm10
3845# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
3846# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
3847pxor %xmm10,%xmm2
3848
3849# qhasm: xmm4 ^= xmm12
3850# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
3851# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
3852pxor %xmm8,%xmm4
3853
3854# qhasm: xmm0 ^= xmm12
3855# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
3856# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
3857pxor %xmm8,%xmm0
3858
3859# qhasm: xmm5 ^= xmm8
3860# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
3861# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
3862pxor %xmm9,%xmm5
3863
3864# qhasm: xmm2 ^= xmm8
3865# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
3866# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
3867pxor %xmm9,%xmm2
3868
3869# qhasm: xmm12 = xmm7
3870# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
3871# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
3872movdqa %xmm7,%xmm8
3873
3874# qhasm: xmm8 = xmm1
3875# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
3876# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
3877movdqa %xmm1,%xmm9
3878
3879# qhasm: xmm12 ^= xmm6
3880# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#9
3881# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm8
3882pxor %xmm6,%xmm8
3883
3884# qhasm: xmm8 ^= xmm3
3885# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
3886# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
3887pxor %xmm3,%xmm9
3888
3889# qhasm: xmm11 = xmm15
3890# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3891# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3892movdqa %xmm13,%xmm10
3893
3894# qhasm: xmm11 ^= xmm14
3895# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3896# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3897pxor %xmm11,%xmm10
3898
3899# qhasm: xmm11 &= xmm12
3900# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
3901# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
3902pand %xmm8,%xmm10
3903
3904# qhasm: xmm12 ^= xmm8
3905# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
3906# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
3907pxor %xmm9,%xmm8
3908
3909# qhasm: xmm12 &= xmm14
3910# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
3911# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
3912pand %xmm11,%xmm8
3913
3914# qhasm: xmm8 &= xmm15
3915# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
3916# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
3917pand %xmm13,%xmm9
3918
3919# qhasm: xmm8 ^= xmm12
3920# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
3921# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
3922pxor %xmm8,%xmm9
3923
3924# qhasm: xmm12 ^= xmm11
3925# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
3926# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
3927pxor %xmm10,%xmm8
3928
3929# qhasm: xmm10 = xmm13
3930# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
3931# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
3932movdqa %xmm15,%xmm10
3933
3934# qhasm: xmm10 ^= xmm9
3935# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
3936# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
3937pxor %xmm12,%xmm10
3938
3939# qhasm: xmm10 &= xmm6
3940# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
3941# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
3942pand %xmm6,%xmm10
3943
3944# qhasm: xmm6 ^= xmm3
3945# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
3946# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
3947pxor %xmm3,%xmm6
3948
3949# qhasm: xmm6 &= xmm9
3950# asm 1: pand <xmm9=int6464#13,<xmm6=int6464#7
3951# asm 2: pand <xmm9=%xmm12,<xmm6=%xmm6
3952pand %xmm12,%xmm6
3953
3954# qhasm: xmm3 &= xmm13
3955# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
3956# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
3957pand %xmm15,%xmm3
3958
3959# qhasm: xmm6 ^= xmm3
3960# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
3961# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
3962pxor %xmm3,%xmm6
3963
3964# qhasm: xmm3 ^= xmm10
3965# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
3966# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
3967pxor %xmm10,%xmm3
3968
3969# qhasm: xmm15 ^= xmm13
3970# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
3971# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
3972pxor %xmm15,%xmm13
3973
3974# qhasm: xmm14 ^= xmm9
3975# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
3976# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
3977pxor %xmm12,%xmm11
3978
3979# qhasm: xmm11 = xmm15
3980# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3981# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3982movdqa %xmm13,%xmm10
3983
3984# qhasm: xmm11 ^= xmm14
3985# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3986# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3987pxor %xmm11,%xmm10
3988
3989# qhasm: xmm11 &= xmm7
3990# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
3991# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
3992pand %xmm7,%xmm10
3993
3994# qhasm: xmm7 ^= xmm1
3995# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
3996# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
3997pxor %xmm1,%xmm7
3998
3999# qhasm: xmm7 &= xmm14
4000# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
4001# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
4002pand %xmm11,%xmm7
4003
4004# qhasm: xmm1 &= xmm15
4005# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
4006# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
4007pand %xmm13,%xmm1
4008
4009# qhasm: xmm7 ^= xmm1
4010# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
4011# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
4012pxor %xmm1,%xmm7
4013
4014# qhasm: xmm1 ^= xmm11
4015# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
4016# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
4017pxor %xmm10,%xmm1
4018
4019# qhasm: xmm7 ^= xmm12
4020# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
4021# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
4022pxor %xmm8,%xmm7
4023
4024# qhasm: xmm6 ^= xmm12
4025# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
4026# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
4027pxor %xmm8,%xmm6
4028
4029# qhasm: xmm1 ^= xmm8
4030# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
4031# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
4032pxor %xmm9,%xmm1
4033
4034# qhasm: xmm3 ^= xmm8
4035# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
4036# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
4037pxor %xmm9,%xmm3
4038
4039# qhasm: xmm7 ^= xmm0
4040# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
4041# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
4042pxor %xmm0,%xmm7
4043
4044# qhasm: xmm1 ^= xmm4
4045# asm 1: pxor <xmm4=int6464#5,<xmm1=int6464#2
4046# asm 2: pxor <xmm4=%xmm4,<xmm1=%xmm1
4047pxor %xmm4,%xmm1
4048
4049# qhasm: xmm6 ^= xmm7
4050# asm 1: pxor <xmm7=int6464#8,<xmm6=int6464#7
4051# asm 2: pxor <xmm7=%xmm7,<xmm6=%xmm6
4052pxor %xmm7,%xmm6
4053
4054# qhasm: xmm4 ^= xmm0
4055# asm 1: pxor <xmm0=int6464#1,<xmm4=int6464#5
4056# asm 2: pxor <xmm0=%xmm0,<xmm4=%xmm4
4057pxor %xmm0,%xmm4
4058
4059# qhasm: xmm0 ^= xmm1
4060# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
4061# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
4062pxor %xmm1,%xmm0
4063
4064# qhasm: xmm1 ^= xmm5
4065# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
4066# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
4067pxor %xmm5,%xmm1
4068
4069# qhasm: xmm5 ^= xmm3
4070# asm 1: pxor <xmm3=int6464#4,<xmm5=int6464#6
4071# asm 2: pxor <xmm3=%xmm3,<xmm5=%xmm5
4072pxor %xmm3,%xmm5
4073
4074# qhasm: xmm6 ^= xmm5
4075# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
4076# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
4077pxor %xmm5,%xmm6
4078
4079# qhasm: xmm3 ^= xmm2
4080# asm 1: pxor <xmm2=int6464#3,<xmm3=int6464#4
4081# asm 2: pxor <xmm2=%xmm2,<xmm3=%xmm3
4082pxor %xmm2,%xmm3
4083
4084# qhasm: xmm2 ^= xmm5
4085# asm 1: pxor <xmm5=int6464#6,<xmm2=int6464#3
4086# asm 2: pxor <xmm5=%xmm5,<xmm2=%xmm2
4087pxor %xmm5,%xmm2
4088
4089# qhasm: xmm4 ^= xmm2
4090# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
4091# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
4092pxor %xmm2,%xmm4
4093
4094# qhasm: xmm6 ^= RCON
4095# asm 1: pxor RCON,<xmm6=int6464#7
4096# asm 2: pxor RCON,<xmm6=%xmm6
4097pxor RCON,%xmm6
4098
4099# qhasm: shuffle bytes of xmm0 by EXPB0
4100# asm 1: pshufb EXPB0,<xmm0=int6464#1
4101# asm 2: pshufb EXPB0,<xmm0=%xmm0
4102pshufb EXPB0,%xmm0
4103
4104# qhasm: shuffle bytes of xmm1 by EXPB0
4105# asm 1: pshufb EXPB0,<xmm1=int6464#2
4106# asm 2: pshufb EXPB0,<xmm1=%xmm1
4107pshufb EXPB0,%xmm1
4108
4109# qhasm: shuffle bytes of xmm6 by EXPB0
4110# asm 1: pshufb EXPB0,<xmm6=int6464#7
4111# asm 2: pshufb EXPB0,<xmm6=%xmm6
4112pshufb EXPB0,%xmm6
4113
4114# qhasm: shuffle bytes of xmm4 by EXPB0
4115# asm 1: pshufb EXPB0,<xmm4=int6464#5
4116# asm 2: pshufb EXPB0,<xmm4=%xmm4
4117pshufb EXPB0,%xmm4
4118
4119# qhasm: shuffle bytes of xmm2 by EXPB0
4120# asm 1: pshufb EXPB0,<xmm2=int6464#3
4121# asm 2: pshufb EXPB0,<xmm2=%xmm2
4122pshufb EXPB0,%xmm2
4123
4124# qhasm: shuffle bytes of xmm7 by EXPB0
4125# asm 1: pshufb EXPB0,<xmm7=int6464#8
4126# asm 2: pshufb EXPB0,<xmm7=%xmm7
4127pshufb EXPB0,%xmm7
4128
4129# qhasm: shuffle bytes of xmm3 by EXPB0
4130# asm 1: pshufb EXPB0,<xmm3=int6464#4
4131# asm 2: pshufb EXPB0,<xmm3=%xmm3
4132pshufb EXPB0,%xmm3
4133
4134# qhasm: shuffle bytes of xmm5 by EXPB0
4135# asm 1: pshufb EXPB0,<xmm5=int6464#6
4136# asm 2: pshufb EXPB0,<xmm5=%xmm5
4137pshufb EXPB0,%xmm5
4138
4139# qhasm: xmm8 = *(int128 *)(c + 256)
4140# asm 1: movdqa 256(<c=int64#1),>xmm8=int6464#9
4141# asm 2: movdqa 256(<c=%rdi),>xmm8=%xmm8
4142movdqa 256(%rdi),%xmm8
4143
4144# qhasm: xmm9 = *(int128 *)(c + 272)
4145# asm 1: movdqa 272(<c=int64#1),>xmm9=int6464#10
4146# asm 2: movdqa 272(<c=%rdi),>xmm9=%xmm9
4147movdqa 272(%rdi),%xmm9
4148
4149# qhasm: xmm10 = *(int128 *)(c + 288)
4150# asm 1: movdqa 288(<c=int64#1),>xmm10=int6464#11
4151# asm 2: movdqa 288(<c=%rdi),>xmm10=%xmm10
4152movdqa 288(%rdi),%xmm10
4153
4154# qhasm: xmm11 = *(int128 *)(c + 304)
4155# asm 1: movdqa 304(<c=int64#1),>xmm11=int6464#12
4156# asm 2: movdqa 304(<c=%rdi),>xmm11=%xmm11
4157movdqa 304(%rdi),%xmm11
4158
4159# qhasm: xmm12 = *(int128 *)(c + 320)
4160# asm 1: movdqa 320(<c=int64#1),>xmm12=int6464#13
4161# asm 2: movdqa 320(<c=%rdi),>xmm12=%xmm12
4162movdqa 320(%rdi),%xmm12
4163
4164# qhasm: xmm13 = *(int128 *)(c + 336)
4165# asm 1: movdqa 336(<c=int64#1),>xmm13=int6464#14
4166# asm 2: movdqa 336(<c=%rdi),>xmm13=%xmm13
4167movdqa 336(%rdi),%xmm13
4168
4169# qhasm: xmm14 = *(int128 *)(c + 352)
4170# asm 1: movdqa 352(<c=int64#1),>xmm14=int6464#15
4171# asm 2: movdqa 352(<c=%rdi),>xmm14=%xmm14
4172movdqa 352(%rdi),%xmm14
4173
4174# qhasm: xmm15 = *(int128 *)(c + 368)
4175# asm 1: movdqa 368(<c=int64#1),>xmm15=int6464#16
4176# asm 2: movdqa 368(<c=%rdi),>xmm15=%xmm15
4177movdqa 368(%rdi),%xmm15
4178
4179# qhasm: xmm8 ^= ONE
4180# asm 1: pxor ONE,<xmm8=int6464#9
4181# asm 2: pxor ONE,<xmm8=%xmm8
4182pxor ONE,%xmm8
4183
4184# qhasm: xmm9 ^= ONE
4185# asm 1: pxor ONE,<xmm9=int6464#10
4186# asm 2: pxor ONE,<xmm9=%xmm9
4187pxor ONE,%xmm9
4188
4189# qhasm: xmm13 ^= ONE
4190# asm 1: pxor ONE,<xmm13=int6464#14
4191# asm 2: pxor ONE,<xmm13=%xmm13
4192pxor ONE,%xmm13
4193
4194# qhasm: xmm14 ^= ONE
4195# asm 1: pxor ONE,<xmm14=int6464#15
4196# asm 2: pxor ONE,<xmm14=%xmm14
4197pxor ONE,%xmm14
4198
4199# qhasm: xmm0 ^= xmm8
4200# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
4201# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
4202pxor %xmm8,%xmm0
4203
4204# qhasm: xmm1 ^= xmm9
4205# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
4206# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
4207pxor %xmm9,%xmm1
4208
4209# qhasm: xmm6 ^= xmm10
4210# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
4211# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
4212pxor %xmm10,%xmm6
4213
4214# qhasm: xmm4 ^= xmm11
4215# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
4216# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
4217pxor %xmm11,%xmm4
4218
4219# qhasm: xmm2 ^= xmm12
4220# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
4221# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
4222pxor %xmm12,%xmm2
4223
4224# qhasm: xmm7 ^= xmm13
4225# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
4226# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
4227pxor %xmm13,%xmm7
4228
4229# qhasm: xmm3 ^= xmm14
4230# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
4231# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
4232pxor %xmm14,%xmm3
4233
4234# qhasm: xmm5 ^= xmm15
4235# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
4236# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
4237pxor %xmm15,%xmm5
4238
4239# qhasm: uint32323232 xmm8 >>= 8
4240# asm 1: psrld $8,<xmm8=int6464#9
4241# asm 2: psrld $8,<xmm8=%xmm8
4242psrld $8,%xmm8
4243
4244# qhasm: uint32323232 xmm9 >>= 8
4245# asm 1: psrld $8,<xmm9=int6464#10
4246# asm 2: psrld $8,<xmm9=%xmm9
4247psrld $8,%xmm9
4248
4249# qhasm: uint32323232 xmm10 >>= 8
4250# asm 1: psrld $8,<xmm10=int6464#11
4251# asm 2: psrld $8,<xmm10=%xmm10
4252psrld $8,%xmm10
4253
4254# qhasm: uint32323232 xmm11 >>= 8
4255# asm 1: psrld $8,<xmm11=int6464#12
4256# asm 2: psrld $8,<xmm11=%xmm11
4257psrld $8,%xmm11
4258
4259# qhasm: uint32323232 xmm12 >>= 8
4260# asm 1: psrld $8,<xmm12=int6464#13
4261# asm 2: psrld $8,<xmm12=%xmm12
4262psrld $8,%xmm12
4263
4264# qhasm: uint32323232 xmm13 >>= 8
4265# asm 1: psrld $8,<xmm13=int6464#14
4266# asm 2: psrld $8,<xmm13=%xmm13
4267psrld $8,%xmm13
4268
4269# qhasm: uint32323232 xmm14 >>= 8
4270# asm 1: psrld $8,<xmm14=int6464#15
4271# asm 2: psrld $8,<xmm14=%xmm14
4272psrld $8,%xmm14
4273
4274# qhasm: uint32323232 xmm15 >>= 8
4275# asm 1: psrld $8,<xmm15=int6464#16
4276# asm 2: psrld $8,<xmm15=%xmm15
4277psrld $8,%xmm15
4278
4279# qhasm: xmm0 ^= xmm8
4280# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
4281# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
4282pxor %xmm8,%xmm0
4283
4284# qhasm: xmm1 ^= xmm9
4285# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
4286# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
4287pxor %xmm9,%xmm1
4288
4289# qhasm: xmm6 ^= xmm10
4290# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
4291# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
4292pxor %xmm10,%xmm6
4293
4294# qhasm: xmm4 ^= xmm11
4295# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
4296# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
4297pxor %xmm11,%xmm4
4298
4299# qhasm: xmm2 ^= xmm12
4300# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
4301# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
4302pxor %xmm12,%xmm2
4303
4304# qhasm: xmm7 ^= xmm13
4305# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
4306# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
4307pxor %xmm13,%xmm7
4308
4309# qhasm: xmm3 ^= xmm14
4310# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
4311# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
4312pxor %xmm14,%xmm3
4313
4314# qhasm: xmm5 ^= xmm15
4315# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
4316# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
4317pxor %xmm15,%xmm5
4318
4319# qhasm: uint32323232 xmm8 >>= 8
4320# asm 1: psrld $8,<xmm8=int6464#9
4321# asm 2: psrld $8,<xmm8=%xmm8
4322psrld $8,%xmm8
4323
4324# qhasm: uint32323232 xmm9 >>= 8
4325# asm 1: psrld $8,<xmm9=int6464#10
4326# asm 2: psrld $8,<xmm9=%xmm9
4327psrld $8,%xmm9
4328
4329# qhasm: uint32323232 xmm10 >>= 8
4330# asm 1: psrld $8,<xmm10=int6464#11
4331# asm 2: psrld $8,<xmm10=%xmm10
4332psrld $8,%xmm10
4333
4334# qhasm: uint32323232 xmm11 >>= 8
4335# asm 1: psrld $8,<xmm11=int6464#12
4336# asm 2: psrld $8,<xmm11=%xmm11
4337psrld $8,%xmm11
4338
4339# qhasm: uint32323232 xmm12 >>= 8
4340# asm 1: psrld $8,<xmm12=int6464#13
4341# asm 2: psrld $8,<xmm12=%xmm12
4342psrld $8,%xmm12
4343
4344# qhasm: uint32323232 xmm13 >>= 8
4345# asm 1: psrld $8,<xmm13=int6464#14
4346# asm 2: psrld $8,<xmm13=%xmm13
4347psrld $8,%xmm13
4348
4349# qhasm: uint32323232 xmm14 >>= 8
4350# asm 1: psrld $8,<xmm14=int6464#15
4351# asm 2: psrld $8,<xmm14=%xmm14
4352psrld $8,%xmm14
4353
4354# qhasm: uint32323232 xmm15 >>= 8
4355# asm 1: psrld $8,<xmm15=int6464#16
4356# asm 2: psrld $8,<xmm15=%xmm15
4357psrld $8,%xmm15
4358
4359# qhasm: xmm0 ^= xmm8
4360# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
4361# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
4362pxor %xmm8,%xmm0
4363
4364# qhasm: xmm1 ^= xmm9
4365# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
4366# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
4367pxor %xmm9,%xmm1
4368
4369# qhasm: xmm6 ^= xmm10
4370# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
4371# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
4372pxor %xmm10,%xmm6
4373
4374# qhasm: xmm4 ^= xmm11
4375# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
4376# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
4377pxor %xmm11,%xmm4
4378
4379# qhasm: xmm2 ^= xmm12
4380# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
4381# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
4382pxor %xmm12,%xmm2
4383
4384# qhasm: xmm7 ^= xmm13
4385# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
4386# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
4387pxor %xmm13,%xmm7
4388
4389# qhasm: xmm3 ^= xmm14
4390# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
4391# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
4392pxor %xmm14,%xmm3
4393
4394# qhasm: xmm5 ^= xmm15
4395# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
4396# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
4397pxor %xmm15,%xmm5
4398
4399# qhasm: uint32323232 xmm8 >>= 8
4400# asm 1: psrld $8,<xmm8=int6464#9
4401# asm 2: psrld $8,<xmm8=%xmm8
4402psrld $8,%xmm8
4403
4404# qhasm: uint32323232 xmm9 >>= 8
4405# asm 1: psrld $8,<xmm9=int6464#10
4406# asm 2: psrld $8,<xmm9=%xmm9
4407psrld $8,%xmm9
4408
4409# qhasm: uint32323232 xmm10 >>= 8
4410# asm 1: psrld $8,<xmm10=int6464#11
4411# asm 2: psrld $8,<xmm10=%xmm10
4412psrld $8,%xmm10
4413
4414# qhasm: uint32323232 xmm11 >>= 8
4415# asm 1: psrld $8,<xmm11=int6464#12
4416# asm 2: psrld $8,<xmm11=%xmm11
4417psrld $8,%xmm11
4418
4419# qhasm: uint32323232 xmm12 >>= 8
4420# asm 1: psrld $8,<xmm12=int6464#13
4421# asm 2: psrld $8,<xmm12=%xmm12
4422psrld $8,%xmm12
4423
4424# qhasm: uint32323232 xmm13 >>= 8
4425# asm 1: psrld $8,<xmm13=int6464#14
4426# asm 2: psrld $8,<xmm13=%xmm13
4427psrld $8,%xmm13
4428
4429# qhasm: uint32323232 xmm14 >>= 8
4430# asm 1: psrld $8,<xmm14=int6464#15
4431# asm 2: psrld $8,<xmm14=%xmm14
4432psrld $8,%xmm14
4433
4434# qhasm: uint32323232 xmm15 >>= 8
4435# asm 1: psrld $8,<xmm15=int6464#16
4436# asm 2: psrld $8,<xmm15=%xmm15
4437psrld $8,%xmm15
4438
4439# qhasm: xmm0 ^= xmm8
4440# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
4441# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
4442pxor %xmm8,%xmm0
4443
4444# qhasm: xmm1 ^= xmm9
4445# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
4446# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
4447pxor %xmm9,%xmm1
4448
4449# qhasm: xmm6 ^= xmm10
4450# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
4451# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
4452pxor %xmm10,%xmm6
4453
4454# qhasm: xmm4 ^= xmm11
4455# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
4456# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
4457pxor %xmm11,%xmm4
4458
4459# qhasm: xmm2 ^= xmm12
4460# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
4461# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
4462pxor %xmm12,%xmm2
4463
4464# qhasm: xmm7 ^= xmm13
4465# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
4466# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
4467pxor %xmm13,%xmm7
4468
4469# qhasm: xmm3 ^= xmm14
4470# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
4471# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
4472pxor %xmm14,%xmm3
4473
4474# qhasm: xmm5 ^= xmm15
4475# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
4476# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
4477pxor %xmm15,%xmm5
4478
4479# qhasm: *(int128 *)(c + 384) = xmm0
4480# asm 1: movdqa <xmm0=int6464#1,384(<c=int64#1)
4481# asm 2: movdqa <xmm0=%xmm0,384(<c=%rdi)
4482movdqa %xmm0,384(%rdi)
4483
4484# qhasm: *(int128 *)(c + 400) = xmm1
4485# asm 1: movdqa <xmm1=int6464#2,400(<c=int64#1)
4486# asm 2: movdqa <xmm1=%xmm1,400(<c=%rdi)
4487movdqa %xmm1,400(%rdi)
4488
4489# qhasm: *(int128 *)(c + 416) = xmm6
4490# asm 1: movdqa <xmm6=int6464#7,416(<c=int64#1)
4491# asm 2: movdqa <xmm6=%xmm6,416(<c=%rdi)
4492movdqa %xmm6,416(%rdi)
4493
4494# qhasm: *(int128 *)(c + 432) = xmm4
4495# asm 1: movdqa <xmm4=int6464#5,432(<c=int64#1)
4496# asm 2: movdqa <xmm4=%xmm4,432(<c=%rdi)
4497movdqa %xmm4,432(%rdi)
4498
4499# qhasm: *(int128 *)(c + 448) = xmm2
4500# asm 1: movdqa <xmm2=int6464#3,448(<c=int64#1)
4501# asm 2: movdqa <xmm2=%xmm2,448(<c=%rdi)
4502movdqa %xmm2,448(%rdi)
4503
4504# qhasm: *(int128 *)(c + 464) = xmm7
4505# asm 1: movdqa <xmm7=int6464#8,464(<c=int64#1)
4506# asm 2: movdqa <xmm7=%xmm7,464(<c=%rdi)
4507movdqa %xmm7,464(%rdi)
4508
4509# qhasm: *(int128 *)(c + 480) = xmm3
4510# asm 1: movdqa <xmm3=int6464#4,480(<c=int64#1)
4511# asm 2: movdqa <xmm3=%xmm3,480(<c=%rdi)
4512movdqa %xmm3,480(%rdi)
4513
4514# qhasm: *(int128 *)(c + 496) = xmm5
4515# asm 1: movdqa <xmm5=int6464#6,496(<c=int64#1)
4516# asm 2: movdqa <xmm5=%xmm5,496(<c=%rdi)
4517movdqa %xmm5,496(%rdi)
4518
4519# qhasm: xmm0 ^= ONE
4520# asm 1: pxor ONE,<xmm0=int6464#1
4521# asm 2: pxor ONE,<xmm0=%xmm0
4522pxor ONE,%xmm0
4523
4524# qhasm: xmm1 ^= ONE
4525# asm 1: pxor ONE,<xmm1=int6464#2
4526# asm 2: pxor ONE,<xmm1=%xmm1
4527pxor ONE,%xmm1
4528
4529# qhasm: xmm7 ^= ONE
4530# asm 1: pxor ONE,<xmm7=int6464#8
4531# asm 2: pxor ONE,<xmm7=%xmm7
4532pxor ONE,%xmm7
4533
4534# qhasm: xmm3 ^= ONE
4535# asm 1: pxor ONE,<xmm3=int6464#4
4536# asm 2: pxor ONE,<xmm3=%xmm3
4537pxor ONE,%xmm3
4538
4539# qhasm: shuffle bytes of xmm0 by ROTB
4540# asm 1: pshufb ROTB,<xmm0=int6464#1
4541# asm 2: pshufb ROTB,<xmm0=%xmm0
4542pshufb ROTB,%xmm0
4543
4544# qhasm: shuffle bytes of xmm1 by ROTB
4545# asm 1: pshufb ROTB,<xmm1=int6464#2
4546# asm 2: pshufb ROTB,<xmm1=%xmm1
4547pshufb ROTB,%xmm1
4548
4549# qhasm: shuffle bytes of xmm6 by ROTB
4550# asm 1: pshufb ROTB,<xmm6=int6464#7
4551# asm 2: pshufb ROTB,<xmm6=%xmm6
4552pshufb ROTB,%xmm6
4553
4554# qhasm: shuffle bytes of xmm4 by ROTB
4555# asm 1: pshufb ROTB,<xmm4=int6464#5
4556# asm 2: pshufb ROTB,<xmm4=%xmm4
4557pshufb ROTB,%xmm4
4558
4559# qhasm: shuffle bytes of xmm2 by ROTB
4560# asm 1: pshufb ROTB,<xmm2=int6464#3
4561# asm 2: pshufb ROTB,<xmm2=%xmm2
4562pshufb ROTB,%xmm2
4563
4564# qhasm: shuffle bytes of xmm7 by ROTB
4565# asm 1: pshufb ROTB,<xmm7=int6464#8
4566# asm 2: pshufb ROTB,<xmm7=%xmm7
4567pshufb ROTB,%xmm7
4568
4569# qhasm: shuffle bytes of xmm3 by ROTB
4570# asm 1: pshufb ROTB,<xmm3=int6464#4
4571# asm 2: pshufb ROTB,<xmm3=%xmm3
4572pshufb ROTB,%xmm3
4573
4574# qhasm: shuffle bytes of xmm5 by ROTB
4575# asm 1: pshufb ROTB,<xmm5=int6464#6
4576# asm 2: pshufb ROTB,<xmm5=%xmm5
4577pshufb ROTB,%xmm5
4578
4579# qhasm: xmm7 ^= xmm3
4580# asm 1: pxor <xmm3=int6464#4,<xmm7=int6464#8
4581# asm 2: pxor <xmm3=%xmm3,<xmm7=%xmm7
4582pxor %xmm3,%xmm7
4583
4584# qhasm: xmm6 ^= xmm1
4585# asm 1: pxor <xmm1=int6464#2,<xmm6=int6464#7
4586# asm 2: pxor <xmm1=%xmm1,<xmm6=%xmm6
4587pxor %xmm1,%xmm6
4588
4589# qhasm: xmm7 ^= xmm0
4590# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
4591# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
4592pxor %xmm0,%xmm7
4593
4594# qhasm: xmm3 ^= xmm6
4595# asm 1: pxor <xmm6=int6464#7,<xmm3=int6464#4
4596# asm 2: pxor <xmm6=%xmm6,<xmm3=%xmm3
4597pxor %xmm6,%xmm3
4598
4599# qhasm: xmm4 ^= xmm0
4600# asm 1: pxor <xmm0=int6464#1,<xmm4=int6464#5
4601# asm 2: pxor <xmm0=%xmm0,<xmm4=%xmm4
4602pxor %xmm0,%xmm4
4603
4604# qhasm: xmm3 ^= xmm4
4605# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
4606# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
4607pxor %xmm4,%xmm3
4608
4609# qhasm: xmm4 ^= xmm5
4610# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
4611# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
4612pxor %xmm5,%xmm4
4613
4614# qhasm: xmm4 ^= xmm2
4615# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
4616# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
4617pxor %xmm2,%xmm4
4618
4619# qhasm: xmm5 ^= xmm7
4620# asm 1: pxor <xmm7=int6464#8,<xmm5=int6464#6
4621# asm 2: pxor <xmm7=%xmm7,<xmm5=%xmm5
4622pxor %xmm7,%xmm5
4623
4624# qhasm: xmm4 ^= xmm1
4625# asm 1: pxor <xmm1=int6464#2,<xmm4=int6464#5
4626# asm 2: pxor <xmm1=%xmm1,<xmm4=%xmm4
4627pxor %xmm1,%xmm4
4628
4629# qhasm: xmm2 ^= xmm7
4630# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
4631# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
4632pxor %xmm7,%xmm2
4633
4634# qhasm: xmm6 ^= xmm5
4635# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
4636# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
4637pxor %xmm5,%xmm6
4638
4639# qhasm: xmm1 ^= xmm7
4640# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
4641# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
4642pxor %xmm7,%xmm1
4643
4644# qhasm: xmm11 = xmm5
4645# asm 1: movdqa <xmm5=int6464#6,>xmm11=int6464#9
4646# asm 2: movdqa <xmm5=%xmm5,>xmm11=%xmm8
4647movdqa %xmm5,%xmm8
4648
4649# qhasm: xmm10 = xmm1
4650# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
4651# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
4652movdqa %xmm1,%xmm9
4653
4654# qhasm: xmm9 = xmm7
4655# asm 1: movdqa <xmm7=int6464#8,>xmm9=int6464#11
4656# asm 2: movdqa <xmm7=%xmm7,>xmm9=%xmm10
4657movdqa %xmm7,%xmm10
4658
4659# qhasm: xmm13 = xmm6
4660# asm 1: movdqa <xmm6=int6464#7,>xmm13=int6464#12
4661# asm 2: movdqa <xmm6=%xmm6,>xmm13=%xmm11
4662movdqa %xmm6,%xmm11
4663
4664# qhasm: xmm12 = xmm3
4665# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#13
4666# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm12
4667movdqa %xmm3,%xmm12
4668
4669# qhasm: xmm11 ^= xmm2
4670# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#9
4671# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm8
4672pxor %xmm2,%xmm8
4673
4674# qhasm: xmm10 ^= xmm6
4675# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#10
4676# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm9
4677pxor %xmm6,%xmm9
4678
4679# qhasm: xmm9 ^= xmm4
4680# asm 1: pxor <xmm4=int6464#5,<xmm9=int6464#11
4681# asm 2: pxor <xmm4=%xmm4,<xmm9=%xmm10
4682pxor %xmm4,%xmm10
4683
4684# qhasm: xmm13 ^= xmm2
4685# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#12
4686# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm11
4687pxor %xmm2,%xmm11
4688
4689# qhasm: xmm12 ^= xmm0
4690# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
4691# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
4692pxor %xmm0,%xmm12
4693
4694# qhasm: xmm14 = xmm11
4695# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
4696# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
4697movdqa %xmm8,%xmm13
4698
4699# qhasm: xmm8 = xmm10
4700# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
4701# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
4702movdqa %xmm9,%xmm14
4703
4704# qhasm: xmm15 = xmm11
4705# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
4706# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
4707movdqa %xmm8,%xmm15
4708
4709# qhasm: xmm10 |= xmm9
4710# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
4711# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
4712por %xmm10,%xmm9
4713
4714# qhasm: xmm11 |= xmm12
4715# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
4716# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
4717por %xmm12,%xmm8
4718
4719# qhasm: xmm15 ^= xmm8
4720# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
4721# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
4722pxor %xmm14,%xmm15
4723
4724# qhasm: xmm14 &= xmm12
4725# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
4726# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
4727pand %xmm12,%xmm13
4728
4729# qhasm: xmm8 &= xmm9
4730# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
4731# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
4732pand %xmm10,%xmm14
4733
4734# qhasm: xmm12 ^= xmm9
4735# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
4736# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
4737pxor %xmm10,%xmm12
4738
4739# qhasm: xmm15 &= xmm12
4740# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
4741# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
4742pand %xmm12,%xmm15
4743
4744# qhasm: xmm12 = xmm4
4745# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#11
4746# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm10
4747movdqa %xmm4,%xmm10
4748
4749# qhasm: xmm12 ^= xmm0
4750# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
4751# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
4752pxor %xmm0,%xmm10
4753
4754# qhasm: xmm13 &= xmm12
4755# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
4756# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
4757pand %xmm10,%xmm11
4758
4759# qhasm: xmm11 ^= xmm13
4760# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
4761# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
4762pxor %xmm11,%xmm8
4763
4764# qhasm: xmm10 ^= xmm13
4765# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
4766# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
4767pxor %xmm11,%xmm9
4768
4769# qhasm: xmm13 = xmm5
4770# asm 1: movdqa <xmm5=int6464#6,>xmm13=int6464#11
4771# asm 2: movdqa <xmm5=%xmm5,>xmm13=%xmm10
4772movdqa %xmm5,%xmm10
4773
4774# qhasm: xmm13 ^= xmm1
4775# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
4776# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
4777pxor %xmm1,%xmm10
4778
4779# qhasm: xmm12 = xmm7
4780# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#12
4781# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm11
4782movdqa %xmm7,%xmm11
4783
4784# qhasm: xmm9 = xmm13
4785# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
4786# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
4787movdqa %xmm10,%xmm12
4788
4789# qhasm: xmm12 ^= xmm3
4790# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#12
4791# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm11
4792pxor %xmm3,%xmm11
4793
4794# qhasm: xmm9 |= xmm12
4795# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
4796# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
4797por %xmm11,%xmm12
4798
4799# qhasm: xmm13 &= xmm12
4800# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
4801# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
4802pand %xmm11,%xmm10
4803
4804# qhasm: xmm8 ^= xmm13
4805# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
4806# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
4807pxor %xmm10,%xmm14
4808
4809# qhasm: xmm11 ^= xmm15
4810# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
4811# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
4812pxor %xmm15,%xmm8
4813
4814# qhasm: xmm10 ^= xmm14
4815# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
4816# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
4817pxor %xmm13,%xmm9
4818
4819# qhasm: xmm9 ^= xmm15
4820# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
4821# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
4822pxor %xmm15,%xmm12
4823
4824# qhasm: xmm8 ^= xmm14
4825# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
4826# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
4827pxor %xmm13,%xmm14
4828
4829# qhasm: xmm9 ^= xmm14
4830# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
4831# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
4832pxor %xmm13,%xmm12
4833
4834# qhasm: xmm12 = xmm6
4835# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#11
4836# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm10
4837movdqa %xmm6,%xmm10
4838
4839# qhasm: xmm13 = xmm2
4840# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
4841# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
4842movdqa %xmm2,%xmm11
4843
4844# qhasm: xmm14 = xmm1
4845# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
4846# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
4847movdqa %xmm1,%xmm13
4848
4849# qhasm: xmm15 = xmm5
4850# asm 1: movdqa <xmm5=int6464#6,>xmm15=int6464#16
4851# asm 2: movdqa <xmm5=%xmm5,>xmm15=%xmm15
4852movdqa %xmm5,%xmm15
4853
4854# qhasm: xmm12 &= xmm4
4855# asm 1: pand <xmm4=int6464#5,<xmm12=int6464#11
4856# asm 2: pand <xmm4=%xmm4,<xmm12=%xmm10
4857pand %xmm4,%xmm10
4858
4859# qhasm: xmm13 &= xmm0
4860# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
4861# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
4862pand %xmm0,%xmm11
4863
4864# qhasm: xmm14 &= xmm7
4865# asm 1: pand <xmm7=int6464#8,<xmm14=int6464#14
4866# asm 2: pand <xmm7=%xmm7,<xmm14=%xmm13
4867pand %xmm7,%xmm13
4868
4869# qhasm: xmm15 |= xmm3
4870# asm 1: por <xmm3=int6464#4,<xmm15=int6464#16
4871# asm 2: por <xmm3=%xmm3,<xmm15=%xmm15
4872por %xmm3,%xmm15
4873
4874# qhasm: xmm11 ^= xmm12
4875# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
4876# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
4877pxor %xmm10,%xmm8
4878
4879# qhasm: xmm10 ^= xmm13
4880# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
4881# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
4882pxor %xmm11,%xmm9
4883
4884# qhasm: xmm9 ^= xmm14
4885# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
4886# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
4887pxor %xmm13,%xmm12
4888
4889# qhasm: xmm8 ^= xmm15
4890# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
4891# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
4892pxor %xmm15,%xmm14
4893
4894# qhasm: xmm12 = xmm11
4895# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
4896# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
4897movdqa %xmm8,%xmm10
4898
4899# qhasm: xmm12 ^= xmm10
4900# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
4901# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
4902pxor %xmm9,%xmm10
4903
4904# qhasm: xmm11 &= xmm9
4905# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
4906# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
4907pand %xmm12,%xmm8
4908
4909# qhasm: xmm14 = xmm8
4910# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
4911# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
4912movdqa %xmm14,%xmm11
4913
4914# qhasm: xmm14 ^= xmm11
4915# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
4916# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
4917pxor %xmm8,%xmm11
4918
4919# qhasm: xmm15 = xmm12
4920# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
4921# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
4922movdqa %xmm10,%xmm13
4923
4924# qhasm: xmm15 &= xmm14
4925# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
4926# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
4927pand %xmm11,%xmm13
4928
4929# qhasm: xmm15 ^= xmm10
4930# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
4931# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
4932pxor %xmm9,%xmm13
4933
4934# qhasm: xmm13 = xmm9
4935# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
4936# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
4937movdqa %xmm12,%xmm15
4938
4939# qhasm: xmm13 ^= xmm8
4940# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
4941# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
4942pxor %xmm14,%xmm15
4943
4944# qhasm: xmm11 ^= xmm10
4945# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
4946# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
4947pxor %xmm9,%xmm8
4948
4949# qhasm: xmm13 &= xmm11
4950# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
4951# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
4952pand %xmm8,%xmm15
4953
4954# qhasm: xmm13 ^= xmm8
4955# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
4956# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
4957pxor %xmm14,%xmm15
4958
4959# qhasm: xmm9 ^= xmm13
4960# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
4961# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
4962pxor %xmm15,%xmm12
4963
4964# qhasm: xmm10 = xmm14
4965# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
4966# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
4967movdqa %xmm11,%xmm8
4968
4969# qhasm: xmm10 ^= xmm13
4970# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
4971# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
4972pxor %xmm15,%xmm8
4973
4974# qhasm: xmm10 &= xmm8
4975# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
4976# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
4977pand %xmm14,%xmm8
4978
4979# qhasm: xmm9 ^= xmm10
4980# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
4981# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
4982pxor %xmm8,%xmm12
4983
4984# qhasm: xmm14 ^= xmm10
4985# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
4986# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
4987pxor %xmm8,%xmm11
4988
4989# qhasm: xmm14 &= xmm15
4990# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
4991# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
4992pand %xmm13,%xmm11
4993
4994# qhasm: xmm14 ^= xmm12
4995# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
4996# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
4997pxor %xmm10,%xmm11
4998
4999# qhasm: xmm12 = xmm3
5000# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#9
5001# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm8
5002movdqa %xmm3,%xmm8
5003
5004# qhasm: xmm8 = xmm7
5005# asm 1: movdqa <xmm7=int6464#8,>xmm8=int6464#10
5006# asm 2: movdqa <xmm7=%xmm7,>xmm8=%xmm9
5007movdqa %xmm7,%xmm9
5008
5009# qhasm: xmm10 = xmm15
5010# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
5011# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
5012movdqa %xmm13,%xmm10
5013
5014# qhasm: xmm10 ^= xmm14
5015# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
5016# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
5017pxor %xmm11,%xmm10
5018
5019# qhasm: xmm10 &= xmm3
5020# asm 1: pand <xmm3=int6464#4,<xmm10=int6464#11
5021# asm 2: pand <xmm3=%xmm3,<xmm10=%xmm10
5022pand %xmm3,%xmm10
5023
5024# qhasm: xmm3 ^= xmm7
5025# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
5026# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
5027pxor %xmm7,%xmm3
5028
5029# qhasm: xmm3 &= xmm14
5030# asm 1: pand <xmm14=int6464#12,<xmm3=int6464#4
5031# asm 2: pand <xmm14=%xmm11,<xmm3=%xmm3
5032pand %xmm11,%xmm3
5033
5034# qhasm: xmm7 &= xmm15
5035# asm 1: pand <xmm15=int6464#14,<xmm7=int6464#8
5036# asm 2: pand <xmm15=%xmm13,<xmm7=%xmm7
5037pand %xmm13,%xmm7
5038
5039# qhasm: xmm3 ^= xmm7
5040# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
5041# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
5042pxor %xmm7,%xmm3
5043
5044# qhasm: xmm7 ^= xmm10
5045# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
5046# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
5047pxor %xmm10,%xmm7
5048
5049# qhasm: xmm12 ^= xmm0
5050# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
5051# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
5052pxor %xmm0,%xmm8
5053
5054# qhasm: xmm8 ^= xmm4
5055# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#10
5056# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm9
5057pxor %xmm4,%xmm9
5058
5059# qhasm: xmm15 ^= xmm13
5060# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
5061# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
5062pxor %xmm15,%xmm13
5063
5064# qhasm: xmm14 ^= xmm9
5065# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
5066# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
5067pxor %xmm12,%xmm11
5068
5069# qhasm: xmm11 = xmm15
5070# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5071# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5072movdqa %xmm13,%xmm10
5073
5074# qhasm: xmm11 ^= xmm14
5075# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5076# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5077pxor %xmm11,%xmm10
5078
5079# qhasm: xmm11 &= xmm12
5080# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
5081# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
5082pand %xmm8,%xmm10
5083
5084# qhasm: xmm12 ^= xmm8
5085# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
5086# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
5087pxor %xmm9,%xmm8
5088
5089# qhasm: xmm12 &= xmm14
5090# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
5091# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
5092pand %xmm11,%xmm8
5093
5094# qhasm: xmm8 &= xmm15
5095# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
5096# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
5097pand %xmm13,%xmm9
5098
5099# qhasm: xmm8 ^= xmm12
5100# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
5101# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
5102pxor %xmm8,%xmm9
5103
5104# qhasm: xmm12 ^= xmm11
5105# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
5106# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
5107pxor %xmm10,%xmm8
5108
5109# qhasm: xmm10 = xmm13
5110# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
5111# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
5112movdqa %xmm15,%xmm10
5113
5114# qhasm: xmm10 ^= xmm9
5115# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
5116# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
5117pxor %xmm12,%xmm10
5118
5119# qhasm: xmm10 &= xmm0
5120# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
5121# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
5122pand %xmm0,%xmm10
5123
5124# qhasm: xmm0 ^= xmm4
5125# asm 1: pxor <xmm4=int6464#5,<xmm0=int6464#1
5126# asm 2: pxor <xmm4=%xmm4,<xmm0=%xmm0
5127pxor %xmm4,%xmm0
5128
5129# qhasm: xmm0 &= xmm9
5130# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
5131# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
5132pand %xmm12,%xmm0
5133
5134# qhasm: xmm4 &= xmm13
5135# asm 1: pand <xmm13=int6464#16,<xmm4=int6464#5
5136# asm 2: pand <xmm13=%xmm15,<xmm4=%xmm4
5137pand %xmm15,%xmm4
5138
5139# qhasm: xmm0 ^= xmm4
5140# asm 1: pxor <xmm4=int6464#5,<xmm0=int6464#1
5141# asm 2: pxor <xmm4=%xmm4,<xmm0=%xmm0
5142pxor %xmm4,%xmm0
5143
5144# qhasm: xmm4 ^= xmm10
5145# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
5146# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
5147pxor %xmm10,%xmm4
5148
5149# qhasm: xmm3 ^= xmm12
5150# asm 1: pxor <xmm12=int6464#9,<xmm3=int6464#4
5151# asm 2: pxor <xmm12=%xmm8,<xmm3=%xmm3
5152pxor %xmm8,%xmm3
5153
5154# qhasm: xmm0 ^= xmm12
5155# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
5156# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
5157pxor %xmm8,%xmm0
5158
5159# qhasm: xmm7 ^= xmm8
5160# asm 1: pxor <xmm8=int6464#10,<xmm7=int6464#8
5161# asm 2: pxor <xmm8=%xmm9,<xmm7=%xmm7
5162pxor %xmm9,%xmm7
5163
5164# qhasm: xmm4 ^= xmm8
5165# asm 1: pxor <xmm8=int6464#10,<xmm4=int6464#5
5166# asm 2: pxor <xmm8=%xmm9,<xmm4=%xmm4
5167pxor %xmm9,%xmm4
5168
5169# qhasm: xmm12 = xmm5
5170# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#9
5171# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm8
5172movdqa %xmm5,%xmm8
5173
5174# qhasm: xmm8 = xmm1
5175# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
5176# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
5177movdqa %xmm1,%xmm9
5178
5179# qhasm: xmm12 ^= xmm2
5180# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#9
5181# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm8
5182pxor %xmm2,%xmm8
5183
5184# qhasm: xmm8 ^= xmm6
5185# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#10
5186# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm9
5187pxor %xmm6,%xmm9
5188
5189# qhasm: xmm11 = xmm15
5190# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5191# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5192movdqa %xmm13,%xmm10
5193
5194# qhasm: xmm11 ^= xmm14
5195# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5196# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5197pxor %xmm11,%xmm10
5198
5199# qhasm: xmm11 &= xmm12
5200# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
5201# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
5202pand %xmm8,%xmm10
5203
5204# qhasm: xmm12 ^= xmm8
5205# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
5206# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
5207pxor %xmm9,%xmm8
5208
5209# qhasm: xmm12 &= xmm14
5210# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
5211# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
5212pand %xmm11,%xmm8
5213
5214# qhasm: xmm8 &= xmm15
5215# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
5216# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
5217pand %xmm13,%xmm9
5218
5219# qhasm: xmm8 ^= xmm12
5220# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
5221# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
5222pxor %xmm8,%xmm9
5223
5224# qhasm: xmm12 ^= xmm11
5225# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
5226# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
5227pxor %xmm10,%xmm8
5228
5229# qhasm: xmm10 = xmm13
5230# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
5231# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
5232movdqa %xmm15,%xmm10
5233
5234# qhasm: xmm10 ^= xmm9
5235# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
5236# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
5237pxor %xmm12,%xmm10
5238
5239# qhasm: xmm10 &= xmm2
5240# asm 1: pand <xmm2=int6464#3,<xmm10=int6464#11
5241# asm 2: pand <xmm2=%xmm2,<xmm10=%xmm10
5242pand %xmm2,%xmm10
5243
5244# qhasm: xmm2 ^= xmm6
5245# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
5246# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
5247pxor %xmm6,%xmm2
5248
5249# qhasm: xmm2 &= xmm9
5250# asm 1: pand <xmm9=int6464#13,<xmm2=int6464#3
5251# asm 2: pand <xmm9=%xmm12,<xmm2=%xmm2
5252pand %xmm12,%xmm2
5253
5254# qhasm: xmm6 &= xmm13
5255# asm 1: pand <xmm13=int6464#16,<xmm6=int6464#7
5256# asm 2: pand <xmm13=%xmm15,<xmm6=%xmm6
5257pand %xmm15,%xmm6
5258
5259# qhasm: xmm2 ^= xmm6
5260# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
5261# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
5262pxor %xmm6,%xmm2
5263
5264# qhasm: xmm6 ^= xmm10
5265# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
5266# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
5267pxor %xmm10,%xmm6
5268
5269# qhasm: xmm15 ^= xmm13
5270# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
5271# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
5272pxor %xmm15,%xmm13
5273
5274# qhasm: xmm14 ^= xmm9
5275# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
5276# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
5277pxor %xmm12,%xmm11
5278
5279# qhasm: xmm11 = xmm15
5280# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5281# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5282movdqa %xmm13,%xmm10
5283
5284# qhasm: xmm11 ^= xmm14
5285# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5286# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5287pxor %xmm11,%xmm10
5288
5289# qhasm: xmm11 &= xmm5
5290# asm 1: pand <xmm5=int6464#6,<xmm11=int6464#11
5291# asm 2: pand <xmm5=%xmm5,<xmm11=%xmm10
5292pand %xmm5,%xmm10
5293
5294# qhasm: xmm5 ^= xmm1
5295# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
5296# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
5297pxor %xmm1,%xmm5
5298
5299# qhasm: xmm5 &= xmm14
5300# asm 1: pand <xmm14=int6464#12,<xmm5=int6464#6
5301# asm 2: pand <xmm14=%xmm11,<xmm5=%xmm5
5302pand %xmm11,%xmm5
5303
5304# qhasm: xmm1 &= xmm15
5305# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
5306# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
5307pand %xmm13,%xmm1
5308
5309# qhasm: xmm5 ^= xmm1
5310# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
5311# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
5312pxor %xmm1,%xmm5
5313
5314# qhasm: xmm1 ^= xmm11
5315# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
5316# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
5317pxor %xmm10,%xmm1
5318
5319# qhasm: xmm5 ^= xmm12
5320# asm 1: pxor <xmm12=int6464#9,<xmm5=int6464#6
5321# asm 2: pxor <xmm12=%xmm8,<xmm5=%xmm5
5322pxor %xmm8,%xmm5
5323
5324# qhasm: xmm2 ^= xmm12
5325# asm 1: pxor <xmm12=int6464#9,<xmm2=int6464#3
5326# asm 2: pxor <xmm12=%xmm8,<xmm2=%xmm2
5327pxor %xmm8,%xmm2
5328
5329# qhasm: xmm1 ^= xmm8
5330# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
5331# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
5332pxor %xmm9,%xmm1
5333
5334# qhasm: xmm6 ^= xmm8
5335# asm 1: pxor <xmm8=int6464#10,<xmm6=int6464#7
5336# asm 2: pxor <xmm8=%xmm9,<xmm6=%xmm6
5337pxor %xmm9,%xmm6
5338
5339# qhasm: xmm5 ^= xmm0
5340# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
5341# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
5342pxor %xmm0,%xmm5
5343
5344# qhasm: xmm1 ^= xmm3
5345# asm 1: pxor <xmm3=int6464#4,<xmm1=int6464#2
5346# asm 2: pxor <xmm3=%xmm3,<xmm1=%xmm1
5347pxor %xmm3,%xmm1
5348
5349# qhasm: xmm2 ^= xmm5
5350# asm 1: pxor <xmm5=int6464#6,<xmm2=int6464#3
5351# asm 2: pxor <xmm5=%xmm5,<xmm2=%xmm2
5352pxor %xmm5,%xmm2
5353
5354# qhasm: xmm3 ^= xmm0
5355# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
5356# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
5357pxor %xmm0,%xmm3
5358
5359# qhasm: xmm0 ^= xmm1
5360# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
5361# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
5362pxor %xmm1,%xmm0
5363
5364# qhasm: xmm1 ^= xmm7
5365# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
5366# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
5367pxor %xmm7,%xmm1
5368
5369# qhasm: xmm7 ^= xmm6
5370# asm 1: pxor <xmm6=int6464#7,<xmm7=int6464#8
5371# asm 2: pxor <xmm6=%xmm6,<xmm7=%xmm7
5372pxor %xmm6,%xmm7
5373
5374# qhasm: xmm2 ^= xmm7
5375# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
5376# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
5377pxor %xmm7,%xmm2
5378
5379# qhasm: xmm6 ^= xmm4
5380# asm 1: pxor <xmm4=int6464#5,<xmm6=int6464#7
5381# asm 2: pxor <xmm4=%xmm4,<xmm6=%xmm6
5382pxor %xmm4,%xmm6
5383
5384# qhasm: xmm4 ^= xmm7
5385# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
5386# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
5387pxor %xmm7,%xmm4
5388
5389# qhasm: xmm3 ^= xmm4
5390# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
5391# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
5392pxor %xmm4,%xmm3
5393
5394# qhasm: xmm3 ^= RCON
5395# asm 1: pxor RCON,<xmm3=int6464#4
5396# asm 2: pxor RCON,<xmm3=%xmm3
5397pxor RCON,%xmm3
5398
5399# qhasm: shuffle bytes of xmm0 by EXPB0
5400# asm 1: pshufb EXPB0,<xmm0=int6464#1
5401# asm 2: pshufb EXPB0,<xmm0=%xmm0
5402pshufb EXPB0,%xmm0
5403
5404# qhasm: shuffle bytes of xmm1 by EXPB0
5405# asm 1: pshufb EXPB0,<xmm1=int6464#2
5406# asm 2: pshufb EXPB0,<xmm1=%xmm1
5407pshufb EXPB0,%xmm1
5408
5409# qhasm: shuffle bytes of xmm2 by EXPB0
5410# asm 1: pshufb EXPB0,<xmm2=int6464#3
5411# asm 2: pshufb EXPB0,<xmm2=%xmm2
5412pshufb EXPB0,%xmm2
5413
5414# qhasm: shuffle bytes of xmm3 by EXPB0
5415# asm 1: pshufb EXPB0,<xmm3=int6464#4
5416# asm 2: pshufb EXPB0,<xmm3=%xmm3
5417pshufb EXPB0,%xmm3
5418
5419# qhasm: shuffle bytes of xmm4 by EXPB0
5420# asm 1: pshufb EXPB0,<xmm4=int6464#5
5421# asm 2: pshufb EXPB0,<xmm4=%xmm4
5422pshufb EXPB0,%xmm4
5423
5424# qhasm: shuffle bytes of xmm5 by EXPB0
5425# asm 1: pshufb EXPB0,<xmm5=int6464#6
5426# asm 2: pshufb EXPB0,<xmm5=%xmm5
5427pshufb EXPB0,%xmm5
5428
5429# qhasm: shuffle bytes of xmm6 by EXPB0
5430# asm 1: pshufb EXPB0,<xmm6=int6464#7
5431# asm 2: pshufb EXPB0,<xmm6=%xmm6
5432pshufb EXPB0,%xmm6
5433
5434# qhasm: shuffle bytes of xmm7 by EXPB0
5435# asm 1: pshufb EXPB0,<xmm7=int6464#8
5436# asm 2: pshufb EXPB0,<xmm7=%xmm7
5437pshufb EXPB0,%xmm7
5438
5439# qhasm: xmm8 = *(int128 *)(c + 384)
5440# asm 1: movdqa 384(<c=int64#1),>xmm8=int6464#9
5441# asm 2: movdqa 384(<c=%rdi),>xmm8=%xmm8
5442movdqa 384(%rdi),%xmm8
5443
5444# qhasm: xmm9 = *(int128 *)(c + 400)
5445# asm 1: movdqa 400(<c=int64#1),>xmm9=int6464#10
5446# asm 2: movdqa 400(<c=%rdi),>xmm9=%xmm9
5447movdqa 400(%rdi),%xmm9
5448
5449# qhasm: xmm10 = *(int128 *)(c + 416)
5450# asm 1: movdqa 416(<c=int64#1),>xmm10=int6464#11
5451# asm 2: movdqa 416(<c=%rdi),>xmm10=%xmm10
5452movdqa 416(%rdi),%xmm10
5453
5454# qhasm: xmm11 = *(int128 *)(c + 432)
5455# asm 1: movdqa 432(<c=int64#1),>xmm11=int6464#12
5456# asm 2: movdqa 432(<c=%rdi),>xmm11=%xmm11
5457movdqa 432(%rdi),%xmm11
5458
5459# qhasm: xmm12 = *(int128 *)(c + 448)
5460# asm 1: movdqa 448(<c=int64#1),>xmm12=int6464#13
5461# asm 2: movdqa 448(<c=%rdi),>xmm12=%xmm12
5462movdqa 448(%rdi),%xmm12
5463
5464# qhasm: xmm13 = *(int128 *)(c + 464)
5465# asm 1: movdqa 464(<c=int64#1),>xmm13=int6464#14
5466# asm 2: movdqa 464(<c=%rdi),>xmm13=%xmm13
5467movdqa 464(%rdi),%xmm13
5468
5469# qhasm: xmm14 = *(int128 *)(c + 480)
5470# asm 1: movdqa 480(<c=int64#1),>xmm14=int6464#15
5471# asm 2: movdqa 480(<c=%rdi),>xmm14=%xmm14
5472movdqa 480(%rdi),%xmm14
5473
5474# qhasm: xmm15 = *(int128 *)(c + 496)
5475# asm 1: movdqa 496(<c=int64#1),>xmm15=int6464#16
5476# asm 2: movdqa 496(<c=%rdi),>xmm15=%xmm15
5477movdqa 496(%rdi),%xmm15
5478
5479# qhasm: xmm8 ^= ONE
5480# asm 1: pxor ONE,<xmm8=int6464#9
5481# asm 2: pxor ONE,<xmm8=%xmm8
5482pxor ONE,%xmm8
5483
5484# qhasm: xmm9 ^= ONE
5485# asm 1: pxor ONE,<xmm9=int6464#10
5486# asm 2: pxor ONE,<xmm9=%xmm9
5487pxor ONE,%xmm9
5488
5489# qhasm: xmm13 ^= ONE
5490# asm 1: pxor ONE,<xmm13=int6464#14
5491# asm 2: pxor ONE,<xmm13=%xmm13
5492pxor ONE,%xmm13
5493
5494# qhasm: xmm14 ^= ONE
5495# asm 1: pxor ONE,<xmm14=int6464#15
5496# asm 2: pxor ONE,<xmm14=%xmm14
5497pxor ONE,%xmm14
5498
5499# qhasm: xmm0 ^= xmm8
5500# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
5501# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
5502pxor %xmm8,%xmm0
5503
5504# qhasm: xmm1 ^= xmm9
5505# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
5506# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
5507pxor %xmm9,%xmm1
5508
5509# qhasm: xmm2 ^= xmm10
5510# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
5511# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
5512pxor %xmm10,%xmm2
5513
5514# qhasm: xmm3 ^= xmm11
5515# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
5516# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
5517pxor %xmm11,%xmm3
5518
5519# qhasm: xmm4 ^= xmm12
5520# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
5521# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
5522pxor %xmm12,%xmm4
5523
5524# qhasm: xmm5 ^= xmm13
5525# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
5526# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
5527pxor %xmm13,%xmm5
5528
5529# qhasm: xmm6 ^= xmm14
5530# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
5531# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
5532pxor %xmm14,%xmm6
5533
5534# qhasm: xmm7 ^= xmm15
5535# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
5536# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
5537pxor %xmm15,%xmm7
5538
5539# qhasm: uint32323232 xmm8 >>= 8
5540# asm 1: psrld $8,<xmm8=int6464#9
5541# asm 2: psrld $8,<xmm8=%xmm8
5542psrld $8,%xmm8
5543
5544# qhasm: uint32323232 xmm9 >>= 8
5545# asm 1: psrld $8,<xmm9=int6464#10
5546# asm 2: psrld $8,<xmm9=%xmm9
5547psrld $8,%xmm9
5548
5549# qhasm: uint32323232 xmm10 >>= 8
5550# asm 1: psrld $8,<xmm10=int6464#11
5551# asm 2: psrld $8,<xmm10=%xmm10
5552psrld $8,%xmm10
5553
5554# qhasm: uint32323232 xmm11 >>= 8
5555# asm 1: psrld $8,<xmm11=int6464#12
5556# asm 2: psrld $8,<xmm11=%xmm11
5557psrld $8,%xmm11
5558
5559# qhasm: uint32323232 xmm12 >>= 8
5560# asm 1: psrld $8,<xmm12=int6464#13
5561# asm 2: psrld $8,<xmm12=%xmm12
5562psrld $8,%xmm12
5563
5564# qhasm: uint32323232 xmm13 >>= 8
5565# asm 1: psrld $8,<xmm13=int6464#14
5566# asm 2: psrld $8,<xmm13=%xmm13
5567psrld $8,%xmm13
5568
5569# qhasm: uint32323232 xmm14 >>= 8
5570# asm 1: psrld $8,<xmm14=int6464#15
5571# asm 2: psrld $8,<xmm14=%xmm14
5572psrld $8,%xmm14
5573
5574# qhasm: uint32323232 xmm15 >>= 8
5575# asm 1: psrld $8,<xmm15=int6464#16
5576# asm 2: psrld $8,<xmm15=%xmm15
5577psrld $8,%xmm15
5578
5579# qhasm: xmm0 ^= xmm8
5580# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
5581# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
5582pxor %xmm8,%xmm0
5583
5584# qhasm: xmm1 ^= xmm9
5585# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
5586# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
5587pxor %xmm9,%xmm1
5588
5589# qhasm: xmm2 ^= xmm10
5590# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
5591# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
5592pxor %xmm10,%xmm2
5593
5594# qhasm: xmm3 ^= xmm11
5595# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
5596# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
5597pxor %xmm11,%xmm3
5598
5599# qhasm: xmm4 ^= xmm12
5600# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
5601# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
5602pxor %xmm12,%xmm4
5603
5604# qhasm: xmm5 ^= xmm13
5605# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
5606# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
5607pxor %xmm13,%xmm5
5608
5609# qhasm: xmm6 ^= xmm14
5610# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
5611# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
5612pxor %xmm14,%xmm6
5613
5614# qhasm: xmm7 ^= xmm15
5615# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
5616# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
5617pxor %xmm15,%xmm7
5618
5619# qhasm: uint32323232 xmm8 >>= 8
5620# asm 1: psrld $8,<xmm8=int6464#9
5621# asm 2: psrld $8,<xmm8=%xmm8
5622psrld $8,%xmm8
5623
5624# qhasm: uint32323232 xmm9 >>= 8
5625# asm 1: psrld $8,<xmm9=int6464#10
5626# asm 2: psrld $8,<xmm9=%xmm9
5627psrld $8,%xmm9
5628
5629# qhasm: uint32323232 xmm10 >>= 8
5630# asm 1: psrld $8,<xmm10=int6464#11
5631# asm 2: psrld $8,<xmm10=%xmm10
5632psrld $8,%xmm10
5633
5634# qhasm: uint32323232 xmm11 >>= 8
5635# asm 1: psrld $8,<xmm11=int6464#12
5636# asm 2: psrld $8,<xmm11=%xmm11
5637psrld $8,%xmm11
5638
5639# qhasm: uint32323232 xmm12 >>= 8
5640# asm 1: psrld $8,<xmm12=int6464#13
5641# asm 2: psrld $8,<xmm12=%xmm12
5642psrld $8,%xmm12
5643
5644# qhasm: uint32323232 xmm13 >>= 8
5645# asm 1: psrld $8,<xmm13=int6464#14
5646# asm 2: psrld $8,<xmm13=%xmm13
5647psrld $8,%xmm13
5648
5649# qhasm: uint32323232 xmm14 >>= 8
5650# asm 1: psrld $8,<xmm14=int6464#15
5651# asm 2: psrld $8,<xmm14=%xmm14
5652psrld $8,%xmm14
5653
5654# qhasm: uint32323232 xmm15 >>= 8
5655# asm 1: psrld $8,<xmm15=int6464#16
5656# asm 2: psrld $8,<xmm15=%xmm15
5657psrld $8,%xmm15
5658
5659# qhasm: xmm0 ^= xmm8
5660# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
5661# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
5662pxor %xmm8,%xmm0
5663
5664# qhasm: xmm1 ^= xmm9
5665# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
5666# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
5667pxor %xmm9,%xmm1
5668
5669# qhasm: xmm2 ^= xmm10
5670# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
5671# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
5672pxor %xmm10,%xmm2
5673
5674# qhasm: xmm3 ^= xmm11
5675# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
5676# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
5677pxor %xmm11,%xmm3
5678
5679# qhasm: xmm4 ^= xmm12
5680# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
5681# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
5682pxor %xmm12,%xmm4
5683
5684# qhasm: xmm5 ^= xmm13
5685# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
5686# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
5687pxor %xmm13,%xmm5
5688
5689# qhasm: xmm6 ^= xmm14
5690# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
5691# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
5692pxor %xmm14,%xmm6
5693
5694# qhasm: xmm7 ^= xmm15
5695# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
5696# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
5697pxor %xmm15,%xmm7
5698
5699# qhasm: uint32323232 xmm8 >>= 8
5700# asm 1: psrld $8,<xmm8=int6464#9
5701# asm 2: psrld $8,<xmm8=%xmm8
5702psrld $8,%xmm8
5703
5704# qhasm: uint32323232 xmm9 >>= 8
5705# asm 1: psrld $8,<xmm9=int6464#10
5706# asm 2: psrld $8,<xmm9=%xmm9
5707psrld $8,%xmm9
5708
5709# qhasm: uint32323232 xmm10 >>= 8
5710# asm 1: psrld $8,<xmm10=int6464#11
5711# asm 2: psrld $8,<xmm10=%xmm10
5712psrld $8,%xmm10
5713
5714# qhasm: uint32323232 xmm11 >>= 8
5715# asm 1: psrld $8,<xmm11=int6464#12
5716# asm 2: psrld $8,<xmm11=%xmm11
5717psrld $8,%xmm11
5718
5719# qhasm: uint32323232 xmm12 >>= 8
5720# asm 1: psrld $8,<xmm12=int6464#13
5721# asm 2: psrld $8,<xmm12=%xmm12
5722psrld $8,%xmm12
5723
5724# qhasm: uint32323232 xmm13 >>= 8
5725# asm 1: psrld $8,<xmm13=int6464#14
5726# asm 2: psrld $8,<xmm13=%xmm13
5727psrld $8,%xmm13
5728
5729# qhasm: uint32323232 xmm14 >>= 8
5730# asm 1: psrld $8,<xmm14=int6464#15
5731# asm 2: psrld $8,<xmm14=%xmm14
5732psrld $8,%xmm14
5733
5734# qhasm: uint32323232 xmm15 >>= 8
5735# asm 1: psrld $8,<xmm15=int6464#16
5736# asm 2: psrld $8,<xmm15=%xmm15
5737psrld $8,%xmm15
5738
5739# qhasm: xmm0 ^= xmm8
5740# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
5741# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
5742pxor %xmm8,%xmm0
5743
5744# qhasm: xmm1 ^= xmm9
5745# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
5746# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
5747pxor %xmm9,%xmm1
5748
5749# qhasm: xmm2 ^= xmm10
5750# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
5751# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
5752pxor %xmm10,%xmm2
5753
5754# qhasm: xmm3 ^= xmm11
5755# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
5756# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
5757pxor %xmm11,%xmm3
5758
5759# qhasm: xmm4 ^= xmm12
5760# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
5761# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
5762pxor %xmm12,%xmm4
5763
5764# qhasm: xmm5 ^= xmm13
5765# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
5766# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
5767pxor %xmm13,%xmm5
5768
5769# qhasm: xmm6 ^= xmm14
5770# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
5771# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
5772pxor %xmm14,%xmm6
5773
5774# qhasm: xmm7 ^= xmm15
5775# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
5776# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
5777pxor %xmm15,%xmm7
5778
5779# qhasm: *(int128 *)(c + 512) = xmm0
5780# asm 1: movdqa <xmm0=int6464#1,512(<c=int64#1)
5781# asm 2: movdqa <xmm0=%xmm0,512(<c=%rdi)
5782movdqa %xmm0,512(%rdi)
5783
5784# qhasm: *(int128 *)(c + 528) = xmm1
5785# asm 1: movdqa <xmm1=int6464#2,528(<c=int64#1)
5786# asm 2: movdqa <xmm1=%xmm1,528(<c=%rdi)
5787movdqa %xmm1,528(%rdi)
5788
5789# qhasm: *(int128 *)(c + 544) = xmm2
5790# asm 1: movdqa <xmm2=int6464#3,544(<c=int64#1)
5791# asm 2: movdqa <xmm2=%xmm2,544(<c=%rdi)
5792movdqa %xmm2,544(%rdi)
5793
5794# qhasm: *(int128 *)(c + 560) = xmm3
5795# asm 1: movdqa <xmm3=int6464#4,560(<c=int64#1)
5796# asm 2: movdqa <xmm3=%xmm3,560(<c=%rdi)
5797movdqa %xmm3,560(%rdi)
5798
5799# qhasm: *(int128 *)(c + 576) = xmm4
5800# asm 1: movdqa <xmm4=int6464#5,576(<c=int64#1)
5801# asm 2: movdqa <xmm4=%xmm4,576(<c=%rdi)
5802movdqa %xmm4,576(%rdi)
5803
5804# qhasm: *(int128 *)(c + 592) = xmm5
5805# asm 1: movdqa <xmm5=int6464#6,592(<c=int64#1)
5806# asm 2: movdqa <xmm5=%xmm5,592(<c=%rdi)
5807movdqa %xmm5,592(%rdi)
5808
5809# qhasm: *(int128 *)(c + 608) = xmm6
5810# asm 1: movdqa <xmm6=int6464#7,608(<c=int64#1)
5811# asm 2: movdqa <xmm6=%xmm6,608(<c=%rdi)
5812movdqa %xmm6,608(%rdi)
5813
5814# qhasm: *(int128 *)(c + 624) = xmm7
5815# asm 1: movdqa <xmm7=int6464#8,624(<c=int64#1)
5816# asm 2: movdqa <xmm7=%xmm7,624(<c=%rdi)
5817movdqa %xmm7,624(%rdi)
5818
5819# qhasm: xmm0 ^= ONE
5820# asm 1: pxor ONE,<xmm0=int6464#1
5821# asm 2: pxor ONE,<xmm0=%xmm0
5822pxor ONE,%xmm0
5823
5824# qhasm: xmm1 ^= ONE
5825# asm 1: pxor ONE,<xmm1=int6464#2
5826# asm 2: pxor ONE,<xmm1=%xmm1
5827pxor ONE,%xmm1
5828
5829# qhasm: xmm5 ^= ONE
5830# asm 1: pxor ONE,<xmm5=int6464#6
5831# asm 2: pxor ONE,<xmm5=%xmm5
5832pxor ONE,%xmm5
5833
5834# qhasm: xmm6 ^= ONE
5835# asm 1: pxor ONE,<xmm6=int6464#7
5836# asm 2: pxor ONE,<xmm6=%xmm6
5837pxor ONE,%xmm6
5838
5839# qhasm: shuffle bytes of xmm0 by ROTB
5840# asm 1: pshufb ROTB,<xmm0=int6464#1
5841# asm 2: pshufb ROTB,<xmm0=%xmm0
5842pshufb ROTB,%xmm0
5843
5844# qhasm: shuffle bytes of xmm1 by ROTB
5845# asm 1: pshufb ROTB,<xmm1=int6464#2
5846# asm 2: pshufb ROTB,<xmm1=%xmm1
5847pshufb ROTB,%xmm1
5848
5849# qhasm: shuffle bytes of xmm2 by ROTB
5850# asm 1: pshufb ROTB,<xmm2=int6464#3
5851# asm 2: pshufb ROTB,<xmm2=%xmm2
5852pshufb ROTB,%xmm2
5853
5854# qhasm: shuffle bytes of xmm3 by ROTB
5855# asm 1: pshufb ROTB,<xmm3=int6464#4
5856# asm 2: pshufb ROTB,<xmm3=%xmm3
5857pshufb ROTB,%xmm3
5858
5859# qhasm: shuffle bytes of xmm4 by ROTB
5860# asm 1: pshufb ROTB,<xmm4=int6464#5
5861# asm 2: pshufb ROTB,<xmm4=%xmm4
5862pshufb ROTB,%xmm4
5863
5864# qhasm: shuffle bytes of xmm5 by ROTB
5865# asm 1: pshufb ROTB,<xmm5=int6464#6
5866# asm 2: pshufb ROTB,<xmm5=%xmm5
5867pshufb ROTB,%xmm5
5868
5869# qhasm: shuffle bytes of xmm6 by ROTB
5870# asm 1: pshufb ROTB,<xmm6=int6464#7
5871# asm 2: pshufb ROTB,<xmm6=%xmm6
5872pshufb ROTB,%xmm6
5873
5874# qhasm: shuffle bytes of xmm7 by ROTB
5875# asm 1: pshufb ROTB,<xmm7=int6464#8
5876# asm 2: pshufb ROTB,<xmm7=%xmm7
5877pshufb ROTB,%xmm7
5878
5879# qhasm: xmm5 ^= xmm6
5880# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
5881# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
5882pxor %xmm6,%xmm5
5883
5884# qhasm: xmm2 ^= xmm1
5885# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
5886# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
5887pxor %xmm1,%xmm2
5888
5889# qhasm: xmm5 ^= xmm0
5890# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
5891# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
5892pxor %xmm0,%xmm5
5893
5894# qhasm: xmm6 ^= xmm2
5895# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
5896# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
5897pxor %xmm2,%xmm6
5898
5899# qhasm: xmm3 ^= xmm0
5900# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
5901# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
5902pxor %xmm0,%xmm3
5903
5904# qhasm: xmm6 ^= xmm3
5905# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
5906# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
5907pxor %xmm3,%xmm6
5908
5909# qhasm: xmm3 ^= xmm7
5910# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
5911# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
5912pxor %xmm7,%xmm3
5913
5914# qhasm: xmm3 ^= xmm4
5915# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
5916# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
5917pxor %xmm4,%xmm3
5918
5919# qhasm: xmm7 ^= xmm5
5920# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
5921# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
5922pxor %xmm5,%xmm7
5923
5924# qhasm: xmm3 ^= xmm1
5925# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
5926# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
5927pxor %xmm1,%xmm3
5928
5929# qhasm: xmm4 ^= xmm5
5930# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
5931# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
5932pxor %xmm5,%xmm4
5933
5934# qhasm: xmm2 ^= xmm7
5935# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
5936# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
5937pxor %xmm7,%xmm2
5938
5939# qhasm: xmm1 ^= xmm5
5940# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
5941# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
5942pxor %xmm5,%xmm1
5943
5944# qhasm: xmm11 = xmm7
5945# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
5946# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
5947movdqa %xmm7,%xmm8
5948
5949# qhasm: xmm10 = xmm1
5950# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
5951# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
5952movdqa %xmm1,%xmm9
5953
5954# qhasm: xmm9 = xmm5
5955# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
5956# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
5957movdqa %xmm5,%xmm10
5958
5959# qhasm: xmm13 = xmm2
5960# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
5961# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
5962movdqa %xmm2,%xmm11
5963
5964# qhasm: xmm12 = xmm6
5965# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
5966# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
5967movdqa %xmm6,%xmm12
5968
5969# qhasm: xmm11 ^= xmm4
5970# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
5971# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
5972pxor %xmm4,%xmm8
5973
5974# qhasm: xmm10 ^= xmm2
5975# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
5976# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
5977pxor %xmm2,%xmm9
5978
5979# qhasm: xmm9 ^= xmm3
5980# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
5981# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
5982pxor %xmm3,%xmm10
5983
5984# qhasm: xmm13 ^= xmm4
5985# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
5986# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
5987pxor %xmm4,%xmm11
5988
5989# qhasm: xmm12 ^= xmm0
5990# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
5991# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
5992pxor %xmm0,%xmm12
5993
5994# qhasm: xmm14 = xmm11
5995# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
5996# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
5997movdqa %xmm8,%xmm13
5998
5999# qhasm: xmm8 = xmm10
6000# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
6001# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
6002movdqa %xmm9,%xmm14
6003
6004# qhasm: xmm15 = xmm11
6005# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
6006# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
6007movdqa %xmm8,%xmm15
6008
6009# qhasm: xmm10 |= xmm9
6010# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
6011# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
6012por %xmm10,%xmm9
6013
6014# qhasm: xmm11 |= xmm12
6015# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
6016# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
6017por %xmm12,%xmm8
6018
6019# qhasm: xmm15 ^= xmm8
6020# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
6021# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
6022pxor %xmm14,%xmm15
6023
6024# qhasm: xmm14 &= xmm12
6025# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
6026# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
6027pand %xmm12,%xmm13
6028
6029# qhasm: xmm8 &= xmm9
6030# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
6031# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
6032pand %xmm10,%xmm14
6033
6034# qhasm: xmm12 ^= xmm9
6035# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
6036# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
6037pxor %xmm10,%xmm12
6038
6039# qhasm: xmm15 &= xmm12
6040# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
6041# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
6042pand %xmm12,%xmm15
6043
6044# qhasm: xmm12 = xmm3
6045# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
6046# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
6047movdqa %xmm3,%xmm10
6048
6049# qhasm: xmm12 ^= xmm0
6050# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
6051# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
6052pxor %xmm0,%xmm10
6053
6054# qhasm: xmm13 &= xmm12
6055# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
6056# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
6057pand %xmm10,%xmm11
6058
6059# qhasm: xmm11 ^= xmm13
6060# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
6061# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
6062pxor %xmm11,%xmm8
6063
6064# qhasm: xmm10 ^= xmm13
6065# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
6066# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
6067pxor %xmm11,%xmm9
6068
6069# qhasm: xmm13 = xmm7
6070# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
6071# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
6072movdqa %xmm7,%xmm10
6073
6074# qhasm: xmm13 ^= xmm1
6075# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
6076# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
6077pxor %xmm1,%xmm10
6078
6079# qhasm: xmm12 = xmm5
6080# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
6081# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
6082movdqa %xmm5,%xmm11
6083
6084# qhasm: xmm9 = xmm13
6085# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
6086# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
6087movdqa %xmm10,%xmm12
6088
6089# qhasm: xmm12 ^= xmm6
6090# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
6091# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
6092pxor %xmm6,%xmm11
6093
6094# qhasm: xmm9 |= xmm12
6095# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
6096# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
6097por %xmm11,%xmm12
6098
6099# qhasm: xmm13 &= xmm12
6100# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
6101# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
6102pand %xmm11,%xmm10
6103
6104# qhasm: xmm8 ^= xmm13
6105# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
6106# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
6107pxor %xmm10,%xmm14
6108
6109# qhasm: xmm11 ^= xmm15
6110# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
6111# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
6112pxor %xmm15,%xmm8
6113
6114# qhasm: xmm10 ^= xmm14
6115# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
6116# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
6117pxor %xmm13,%xmm9
6118
6119# qhasm: xmm9 ^= xmm15
6120# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
6121# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
6122pxor %xmm15,%xmm12
6123
6124# qhasm: xmm8 ^= xmm14
6125# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
6126# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
6127pxor %xmm13,%xmm14
6128
6129# qhasm: xmm9 ^= xmm14
6130# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
6131# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
6132pxor %xmm13,%xmm12
6133
6134# qhasm: xmm12 = xmm2
6135# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
6136# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
6137movdqa %xmm2,%xmm10
6138
6139# qhasm: xmm13 = xmm4
6140# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
6141# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
6142movdqa %xmm4,%xmm11
6143
6144# qhasm: xmm14 = xmm1
6145# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
6146# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
6147movdqa %xmm1,%xmm13
6148
6149# qhasm: xmm15 = xmm7
6150# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
6151# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
6152movdqa %xmm7,%xmm15
6153
6154# qhasm: xmm12 &= xmm3
6155# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
6156# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
6157pand %xmm3,%xmm10
6158
6159# qhasm: xmm13 &= xmm0
6160# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
6161# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
6162pand %xmm0,%xmm11
6163
6164# qhasm: xmm14 &= xmm5
6165# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
6166# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
6167pand %xmm5,%xmm13
6168
6169# qhasm: xmm15 |= xmm6
6170# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
6171# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
6172por %xmm6,%xmm15
6173
6174# qhasm: xmm11 ^= xmm12
6175# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
6176# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
6177pxor %xmm10,%xmm8
6178
6179# qhasm: xmm10 ^= xmm13
6180# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
6181# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
6182pxor %xmm11,%xmm9
6183
6184# qhasm: xmm9 ^= xmm14
6185# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
6186# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
6187pxor %xmm13,%xmm12
6188
6189# qhasm: xmm8 ^= xmm15
6190# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
6191# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
6192pxor %xmm15,%xmm14
6193
6194# qhasm: xmm12 = xmm11
6195# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
6196# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
6197movdqa %xmm8,%xmm10
6198
6199# qhasm: xmm12 ^= xmm10
6200# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
6201# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
6202pxor %xmm9,%xmm10
6203
6204# qhasm: xmm11 &= xmm9
6205# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
6206# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
6207pand %xmm12,%xmm8
6208
6209# qhasm: xmm14 = xmm8
6210# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
6211# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
6212movdqa %xmm14,%xmm11
6213
6214# qhasm: xmm14 ^= xmm11
6215# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
6216# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
6217pxor %xmm8,%xmm11
6218
6219# qhasm: xmm15 = xmm12
6220# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
6221# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
6222movdqa %xmm10,%xmm13
6223
6224# qhasm: xmm15 &= xmm14
6225# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
6226# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
6227pand %xmm11,%xmm13
6228
6229# qhasm: xmm15 ^= xmm10
6230# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
6231# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
6232pxor %xmm9,%xmm13
6233
6234# qhasm: xmm13 = xmm9
6235# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
6236# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
6237movdqa %xmm12,%xmm15
6238
6239# qhasm: xmm13 ^= xmm8
6240# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
6241# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
6242pxor %xmm14,%xmm15
6243
6244# qhasm: xmm11 ^= xmm10
6245# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
6246# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
6247pxor %xmm9,%xmm8
6248
6249# qhasm: xmm13 &= xmm11
6250# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
6251# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
6252pand %xmm8,%xmm15
6253
6254# qhasm: xmm13 ^= xmm8
6255# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
6256# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
6257pxor %xmm14,%xmm15
6258
6259# qhasm: xmm9 ^= xmm13
6260# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
6261# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
6262pxor %xmm15,%xmm12
6263
6264# qhasm: xmm10 = xmm14
6265# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
6266# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
6267movdqa %xmm11,%xmm8
6268
6269# qhasm: xmm10 ^= xmm13
6270# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
6271# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
6272pxor %xmm15,%xmm8
6273
6274# qhasm: xmm10 &= xmm8
6275# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
6276# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
6277pand %xmm14,%xmm8
6278
6279# qhasm: xmm9 ^= xmm10
6280# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
6281# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
6282pxor %xmm8,%xmm12
6283
6284# qhasm: xmm14 ^= xmm10
6285# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
6286# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
6287pxor %xmm8,%xmm11
6288
6289# qhasm: xmm14 &= xmm15
6290# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
6291# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
6292pand %xmm13,%xmm11
6293
6294# qhasm: xmm14 ^= xmm12
6295# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
6296# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
6297pxor %xmm10,%xmm11
6298
6299# qhasm: xmm12 = xmm6
6300# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
6301# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
6302movdqa %xmm6,%xmm8
6303
6304# qhasm: xmm8 = xmm5
6305# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
6306# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
6307movdqa %xmm5,%xmm9
6308
6309# qhasm: xmm10 = xmm15
6310# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
6311# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
6312movdqa %xmm13,%xmm10
6313
6314# qhasm: xmm10 ^= xmm14
6315# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
6316# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
6317pxor %xmm11,%xmm10
6318
6319# qhasm: xmm10 &= xmm6
6320# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
6321# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
6322pand %xmm6,%xmm10
6323
6324# qhasm: xmm6 ^= xmm5
6325# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
6326# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
6327pxor %xmm5,%xmm6
6328
6329# qhasm: xmm6 &= xmm14
6330# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
6331# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
6332pand %xmm11,%xmm6
6333
6334# qhasm: xmm5 &= xmm15
6335# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
6336# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
6337pand %xmm13,%xmm5
6338
6339# qhasm: xmm6 ^= xmm5
6340# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
6341# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
6342pxor %xmm5,%xmm6
6343
6344# qhasm: xmm5 ^= xmm10
6345# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
6346# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
6347pxor %xmm10,%xmm5
6348
6349# qhasm: xmm12 ^= xmm0
6350# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
6351# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
6352pxor %xmm0,%xmm8
6353
6354# qhasm: xmm8 ^= xmm3
6355# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
6356# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
6357pxor %xmm3,%xmm9
6358
6359# qhasm: xmm15 ^= xmm13
6360# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
6361# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
6362pxor %xmm15,%xmm13
6363
6364# qhasm: xmm14 ^= xmm9
6365# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
6366# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
6367pxor %xmm12,%xmm11
6368
6369# qhasm: xmm11 = xmm15
6370# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
6371# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
6372movdqa %xmm13,%xmm10
6373
6374# qhasm: xmm11 ^= xmm14
6375# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
6376# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
6377pxor %xmm11,%xmm10
6378
6379# qhasm: xmm11 &= xmm12
6380# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
6381# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
6382pand %xmm8,%xmm10
6383
6384# qhasm: xmm12 ^= xmm8
6385# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
6386# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
6387pxor %xmm9,%xmm8
6388
6389# qhasm: xmm12 &= xmm14
6390# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
6391# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
6392pand %xmm11,%xmm8
6393
6394# qhasm: xmm8 &= xmm15
6395# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
6396# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
6397pand %xmm13,%xmm9
6398
6399# qhasm: xmm8 ^= xmm12
6400# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
6401# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
6402pxor %xmm8,%xmm9
6403
6404# qhasm: xmm12 ^= xmm11
6405# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
6406# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
6407pxor %xmm10,%xmm8
6408
6409# qhasm: xmm10 = xmm13
6410# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
6411# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
6412movdqa %xmm15,%xmm10
6413
6414# qhasm: xmm10 ^= xmm9
6415# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
6416# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
6417pxor %xmm12,%xmm10
6418
6419# qhasm: xmm10 &= xmm0
6420# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
6421# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
6422pand %xmm0,%xmm10
6423
6424# qhasm: xmm0 ^= xmm3
6425# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
6426# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
6427pxor %xmm3,%xmm0
6428
6429# qhasm: xmm0 &= xmm9
6430# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
6431# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
6432pand %xmm12,%xmm0
6433
6434# qhasm: xmm3 &= xmm13
6435# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
6436# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
6437pand %xmm15,%xmm3
6438
6439# qhasm: xmm0 ^= xmm3
6440# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
6441# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
6442pxor %xmm3,%xmm0
6443
6444# qhasm: xmm3 ^= xmm10
6445# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
6446# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
6447pxor %xmm10,%xmm3
6448
6449# qhasm: xmm6 ^= xmm12
6450# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
6451# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
6452pxor %xmm8,%xmm6
6453
6454# qhasm: xmm0 ^= xmm12
6455# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
6456# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
6457pxor %xmm8,%xmm0
6458
6459# qhasm: xmm5 ^= xmm8
6460# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
6461# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
6462pxor %xmm9,%xmm5
6463
6464# qhasm: xmm3 ^= xmm8
6465# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
6466# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
6467pxor %xmm9,%xmm3
6468
6469# qhasm: xmm12 = xmm7
6470# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
6471# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
6472movdqa %xmm7,%xmm8
6473
6474# qhasm: xmm8 = xmm1
6475# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
6476# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
6477movdqa %xmm1,%xmm9
6478
6479# qhasm: xmm12 ^= xmm4
6480# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
6481# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
6482pxor %xmm4,%xmm8
6483
6484# qhasm: xmm8 ^= xmm2
6485# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
6486# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
6487pxor %xmm2,%xmm9
6488
6489# qhasm: xmm11 = xmm15
6490# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
6491# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
6492movdqa %xmm13,%xmm10
6493
6494# qhasm: xmm11 ^= xmm14
6495# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
6496# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
6497pxor %xmm11,%xmm10
6498
6499# qhasm: xmm11 &= xmm12
6500# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
6501# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
6502pand %xmm8,%xmm10
6503
6504# qhasm: xmm12 ^= xmm8
6505# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
6506# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
6507pxor %xmm9,%xmm8
6508
6509# qhasm: xmm12 &= xmm14
6510# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
6511# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
6512pand %xmm11,%xmm8
6513
6514# qhasm: xmm8 &= xmm15
6515# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
6516# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
6517pand %xmm13,%xmm9
6518
6519# qhasm: xmm8 ^= xmm12
6520# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
6521# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
6522pxor %xmm8,%xmm9
6523
6524# qhasm: xmm12 ^= xmm11
6525# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
6526# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
6527pxor %xmm10,%xmm8
6528
6529# qhasm: xmm10 = xmm13
6530# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
6531# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
6532movdqa %xmm15,%xmm10
6533
6534# qhasm: xmm10 ^= xmm9
6535# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
6536# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
6537pxor %xmm12,%xmm10
6538
6539# qhasm: xmm10 &= xmm4
6540# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
6541# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
6542pand %xmm4,%xmm10
6543
6544# qhasm: xmm4 ^= xmm2
6545# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
6546# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
6547pxor %xmm2,%xmm4
6548
6549# qhasm: xmm4 &= xmm9
6550# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
6551# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
6552pand %xmm12,%xmm4
6553
6554# qhasm: xmm2 &= xmm13
6555# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
6556# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
6557pand %xmm15,%xmm2
6558
6559# qhasm: xmm4 ^= xmm2
6560# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
6561# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
6562pxor %xmm2,%xmm4
6563
6564# qhasm: xmm2 ^= xmm10
6565# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
6566# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
6567pxor %xmm10,%xmm2
6568
6569# qhasm: xmm15 ^= xmm13
6570# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
6571# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
6572pxor %xmm15,%xmm13
6573
6574# qhasm: xmm14 ^= xmm9
6575# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
6576# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
6577pxor %xmm12,%xmm11
6578
6579# qhasm: xmm11 = xmm15
6580# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
6581# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
6582movdqa %xmm13,%xmm10
6583
6584# qhasm: xmm11 ^= xmm14
6585# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
6586# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
6587pxor %xmm11,%xmm10
6588
6589# qhasm: xmm11 &= xmm7
6590# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
6591# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
6592pand %xmm7,%xmm10
6593
6594# qhasm: xmm7 ^= xmm1
6595# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
6596# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
6597pxor %xmm1,%xmm7
6598
6599# qhasm: xmm7 &= xmm14
6600# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
6601# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
6602pand %xmm11,%xmm7
6603
6604# qhasm: xmm1 &= xmm15
6605# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
6606# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
6607pand %xmm13,%xmm1
6608
6609# qhasm: xmm7 ^= xmm1
6610# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
6611# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
6612pxor %xmm1,%xmm7
6613
6614# qhasm: xmm1 ^= xmm11
6615# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
6616# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
6617pxor %xmm10,%xmm1
6618
6619# qhasm: xmm7 ^= xmm12
6620# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
6621# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
6622pxor %xmm8,%xmm7
6623
6624# qhasm: xmm4 ^= xmm12
6625# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
6626# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
6627pxor %xmm8,%xmm4
6628
6629# qhasm: xmm1 ^= xmm8
6630# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
6631# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
6632pxor %xmm9,%xmm1
6633
6634# qhasm: xmm2 ^= xmm8
6635# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
6636# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
6637pxor %xmm9,%xmm2
6638
6639# qhasm: xmm7 ^= xmm0
6640# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
6641# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
6642pxor %xmm0,%xmm7
6643
6644# qhasm: xmm1 ^= xmm6
6645# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
6646# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
6647pxor %xmm6,%xmm1
6648
6649# qhasm: xmm4 ^= xmm7
6650# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
6651# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
6652pxor %xmm7,%xmm4
6653
6654# qhasm: xmm6 ^= xmm0
6655# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
6656# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
6657pxor %xmm0,%xmm6
6658
6659# qhasm: xmm0 ^= xmm1
6660# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
6661# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
6662pxor %xmm1,%xmm0
6663
6664# qhasm: xmm1 ^= xmm5
6665# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
6666# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
6667pxor %xmm5,%xmm1
6668
6669# qhasm: xmm5 ^= xmm2
6670# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
6671# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
6672pxor %xmm2,%xmm5
6673
6674# qhasm: xmm4 ^= xmm5
6675# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
6676# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
6677pxor %xmm5,%xmm4
6678
6679# qhasm: xmm2 ^= xmm3
6680# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
6681# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
6682pxor %xmm3,%xmm2
6683
6684# qhasm: xmm3 ^= xmm5
6685# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
6686# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
6687pxor %xmm5,%xmm3
6688
6689# qhasm: xmm6 ^= xmm3
6690# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
6691# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
6692pxor %xmm3,%xmm6
6693
6694# qhasm: xmm3 ^= RCON
6695# asm 1: pxor RCON,<xmm3=int6464#4
6696# asm 2: pxor RCON,<xmm3=%xmm3
6697pxor RCON,%xmm3
6698
6699# qhasm: shuffle bytes of xmm0 by EXPB0
6700# asm 1: pshufb EXPB0,<xmm0=int6464#1
6701# asm 2: pshufb EXPB0,<xmm0=%xmm0
6702pshufb EXPB0,%xmm0
6703
6704# qhasm: shuffle bytes of xmm1 by EXPB0
6705# asm 1: pshufb EXPB0,<xmm1=int6464#2
6706# asm 2: pshufb EXPB0,<xmm1=%xmm1
6707pshufb EXPB0,%xmm1
6708
6709# qhasm: shuffle bytes of xmm4 by EXPB0
6710# asm 1: pshufb EXPB0,<xmm4=int6464#5
6711# asm 2: pshufb EXPB0,<xmm4=%xmm4
6712pshufb EXPB0,%xmm4
6713
6714# qhasm: shuffle bytes of xmm6 by EXPB0
6715# asm 1: pshufb EXPB0,<xmm6=int6464#7
6716# asm 2: pshufb EXPB0,<xmm6=%xmm6
6717pshufb EXPB0,%xmm6
6718
6719# qhasm: shuffle bytes of xmm3 by EXPB0
6720# asm 1: pshufb EXPB0,<xmm3=int6464#4
6721# asm 2: pshufb EXPB0,<xmm3=%xmm3
6722pshufb EXPB0,%xmm3
6723
6724# qhasm: shuffle bytes of xmm7 by EXPB0
6725# asm 1: pshufb EXPB0,<xmm7=int6464#8
6726# asm 2: pshufb EXPB0,<xmm7=%xmm7
6727pshufb EXPB0,%xmm7
6728
6729# qhasm: shuffle bytes of xmm2 by EXPB0
6730# asm 1: pshufb EXPB0,<xmm2=int6464#3
6731# asm 2: pshufb EXPB0,<xmm2=%xmm2
6732pshufb EXPB0,%xmm2
6733
6734# qhasm: shuffle bytes of xmm5 by EXPB0
6735# asm 1: pshufb EXPB0,<xmm5=int6464#6
6736# asm 2: pshufb EXPB0,<xmm5=%xmm5
6737pshufb EXPB0,%xmm5
6738
6739# qhasm: xmm8 = *(int128 *)(c + 512)
6740# asm 1: movdqa 512(<c=int64#1),>xmm8=int6464#9
6741# asm 2: movdqa 512(<c=%rdi),>xmm8=%xmm8
6742movdqa 512(%rdi),%xmm8
6743
6744# qhasm: xmm9 = *(int128 *)(c + 528)
6745# asm 1: movdqa 528(<c=int64#1),>xmm9=int6464#10
6746# asm 2: movdqa 528(<c=%rdi),>xmm9=%xmm9
6747movdqa 528(%rdi),%xmm9
6748
6749# qhasm: xmm10 = *(int128 *)(c + 544)
6750# asm 1: movdqa 544(<c=int64#1),>xmm10=int6464#11
6751# asm 2: movdqa 544(<c=%rdi),>xmm10=%xmm10
6752movdqa 544(%rdi),%xmm10
6753
6754# qhasm: xmm11 = *(int128 *)(c + 560)
6755# asm 1: movdqa 560(<c=int64#1),>xmm11=int6464#12
6756# asm 2: movdqa 560(<c=%rdi),>xmm11=%xmm11
6757movdqa 560(%rdi),%xmm11
6758
6759# qhasm: xmm12 = *(int128 *)(c + 576)
6760# asm 1: movdqa 576(<c=int64#1),>xmm12=int6464#13
6761# asm 2: movdqa 576(<c=%rdi),>xmm12=%xmm12
6762movdqa 576(%rdi),%xmm12
6763
6764# qhasm: xmm13 = *(int128 *)(c + 592)
6765# asm 1: movdqa 592(<c=int64#1),>xmm13=int6464#14
6766# asm 2: movdqa 592(<c=%rdi),>xmm13=%xmm13
6767movdqa 592(%rdi),%xmm13
6768
6769# qhasm: xmm14 = *(int128 *)(c + 608)
6770# asm 1: movdqa 608(<c=int64#1),>xmm14=int6464#15
6771# asm 2: movdqa 608(<c=%rdi),>xmm14=%xmm14
6772movdqa 608(%rdi),%xmm14
6773
6774# qhasm: xmm15 = *(int128 *)(c + 624)
6775# asm 1: movdqa 624(<c=int64#1),>xmm15=int6464#16
6776# asm 2: movdqa 624(<c=%rdi),>xmm15=%xmm15
6777movdqa 624(%rdi),%xmm15
6778
6779# qhasm: xmm8 ^= ONE
6780# asm 1: pxor ONE,<xmm8=int6464#9
6781# asm 2: pxor ONE,<xmm8=%xmm8
6782pxor ONE,%xmm8
6783
6784# qhasm: xmm9 ^= ONE
6785# asm 1: pxor ONE,<xmm9=int6464#10
6786# asm 2: pxor ONE,<xmm9=%xmm9
6787pxor ONE,%xmm9
6788
6789# qhasm: xmm13 ^= ONE
6790# asm 1: pxor ONE,<xmm13=int6464#14
6791# asm 2: pxor ONE,<xmm13=%xmm13
6792pxor ONE,%xmm13
6793
6794# qhasm: xmm14 ^= ONE
6795# asm 1: pxor ONE,<xmm14=int6464#15
6796# asm 2: pxor ONE,<xmm14=%xmm14
6797pxor ONE,%xmm14
6798
6799# qhasm: xmm0 ^= xmm8
6800# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
6801# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
6802pxor %xmm8,%xmm0
6803
6804# qhasm: xmm1 ^= xmm9
6805# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
6806# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
6807pxor %xmm9,%xmm1
6808
6809# qhasm: xmm4 ^= xmm10
6810# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
6811# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
6812pxor %xmm10,%xmm4
6813
6814# qhasm: xmm6 ^= xmm11
6815# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
6816# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
6817pxor %xmm11,%xmm6
6818
6819# qhasm: xmm3 ^= xmm12
6820# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
6821# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
6822pxor %xmm12,%xmm3
6823
6824# qhasm: xmm7 ^= xmm13
6825# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
6826# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
6827pxor %xmm13,%xmm7
6828
6829# qhasm: xmm2 ^= xmm14
6830# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
6831# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
6832pxor %xmm14,%xmm2
6833
6834# qhasm: xmm5 ^= xmm15
6835# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
6836# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
6837pxor %xmm15,%xmm5
6838
6839# qhasm: uint32323232 xmm8 >>= 8
6840# asm 1: psrld $8,<xmm8=int6464#9
6841# asm 2: psrld $8,<xmm8=%xmm8
6842psrld $8,%xmm8
6843
6844# qhasm: uint32323232 xmm9 >>= 8
6845# asm 1: psrld $8,<xmm9=int6464#10
6846# asm 2: psrld $8,<xmm9=%xmm9
6847psrld $8,%xmm9
6848
6849# qhasm: uint32323232 xmm10 >>= 8
6850# asm 1: psrld $8,<xmm10=int6464#11
6851# asm 2: psrld $8,<xmm10=%xmm10
6852psrld $8,%xmm10
6853
6854# qhasm: uint32323232 xmm11 >>= 8
6855# asm 1: psrld $8,<xmm11=int6464#12
6856# asm 2: psrld $8,<xmm11=%xmm11
6857psrld $8,%xmm11
6858
6859# qhasm: uint32323232 xmm12 >>= 8
6860# asm 1: psrld $8,<xmm12=int6464#13
6861# asm 2: psrld $8,<xmm12=%xmm12
6862psrld $8,%xmm12
6863
6864# qhasm: uint32323232 xmm13 >>= 8
6865# asm 1: psrld $8,<xmm13=int6464#14
6866# asm 2: psrld $8,<xmm13=%xmm13
6867psrld $8,%xmm13
6868
6869# qhasm: uint32323232 xmm14 >>= 8
6870# asm 1: psrld $8,<xmm14=int6464#15
6871# asm 2: psrld $8,<xmm14=%xmm14
6872psrld $8,%xmm14
6873
6874# qhasm: uint32323232 xmm15 >>= 8
6875# asm 1: psrld $8,<xmm15=int6464#16
6876# asm 2: psrld $8,<xmm15=%xmm15
6877psrld $8,%xmm15
6878
6879# qhasm: xmm0 ^= xmm8
6880# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
6881# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
6882pxor %xmm8,%xmm0
6883
6884# qhasm: xmm1 ^= xmm9
6885# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
6886# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
6887pxor %xmm9,%xmm1
6888
6889# qhasm: xmm4 ^= xmm10
6890# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
6891# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
6892pxor %xmm10,%xmm4
6893
6894# qhasm: xmm6 ^= xmm11
6895# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
6896# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
6897pxor %xmm11,%xmm6
6898
6899# qhasm: xmm3 ^= xmm12
6900# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
6901# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
6902pxor %xmm12,%xmm3
6903
6904# qhasm: xmm7 ^= xmm13
6905# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
6906# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
6907pxor %xmm13,%xmm7
6908
6909# qhasm: xmm2 ^= xmm14
6910# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
6911# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
6912pxor %xmm14,%xmm2
6913
6914# qhasm: xmm5 ^= xmm15
6915# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
6916# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
6917pxor %xmm15,%xmm5
6918
6919# qhasm: uint32323232 xmm8 >>= 8
6920# asm 1: psrld $8,<xmm8=int6464#9
6921# asm 2: psrld $8,<xmm8=%xmm8
6922psrld $8,%xmm8
6923
6924# qhasm: uint32323232 xmm9 >>= 8
6925# asm 1: psrld $8,<xmm9=int6464#10
6926# asm 2: psrld $8,<xmm9=%xmm9
6927psrld $8,%xmm9
6928
6929# qhasm: uint32323232 xmm10 >>= 8
6930# asm 1: psrld $8,<xmm10=int6464#11
6931# asm 2: psrld $8,<xmm10=%xmm10
6932psrld $8,%xmm10
6933
6934# qhasm: uint32323232 xmm11 >>= 8
6935# asm 1: psrld $8,<xmm11=int6464#12
6936# asm 2: psrld $8,<xmm11=%xmm11
6937psrld $8,%xmm11
6938
6939# qhasm: uint32323232 xmm12 >>= 8
6940# asm 1: psrld $8,<xmm12=int6464#13
6941# asm 2: psrld $8,<xmm12=%xmm12
6942psrld $8,%xmm12
6943
6944# qhasm: uint32323232 xmm13 >>= 8
6945# asm 1: psrld $8,<xmm13=int6464#14
6946# asm 2: psrld $8,<xmm13=%xmm13
6947psrld $8,%xmm13
6948
6949# qhasm: uint32323232 xmm14 >>= 8
6950# asm 1: psrld $8,<xmm14=int6464#15
6951# asm 2: psrld $8,<xmm14=%xmm14
6952psrld $8,%xmm14
6953
6954# qhasm: uint32323232 xmm15 >>= 8
6955# asm 1: psrld $8,<xmm15=int6464#16
6956# asm 2: psrld $8,<xmm15=%xmm15
6957psrld $8,%xmm15
6958
6959# qhasm: xmm0 ^= xmm8
6960# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
6961# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
6962pxor %xmm8,%xmm0
6963
6964# qhasm: xmm1 ^= xmm9
6965# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
6966# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
6967pxor %xmm9,%xmm1
6968
6969# qhasm: xmm4 ^= xmm10
6970# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
6971# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
6972pxor %xmm10,%xmm4
6973
6974# qhasm: xmm6 ^= xmm11
6975# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
6976# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
6977pxor %xmm11,%xmm6
6978
6979# qhasm: xmm3 ^= xmm12
6980# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
6981# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
6982pxor %xmm12,%xmm3
6983
6984# qhasm: xmm7 ^= xmm13
6985# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
6986# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
6987pxor %xmm13,%xmm7
6988
6989# qhasm: xmm2 ^= xmm14
6990# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
6991# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
6992pxor %xmm14,%xmm2
6993
6994# qhasm: xmm5 ^= xmm15
6995# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
6996# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
6997pxor %xmm15,%xmm5
6998
6999# qhasm: uint32323232 xmm8 >>= 8
7000# asm 1: psrld $8,<xmm8=int6464#9
7001# asm 2: psrld $8,<xmm8=%xmm8
7002psrld $8,%xmm8
7003
7004# qhasm: uint32323232 xmm9 >>= 8
7005# asm 1: psrld $8,<xmm9=int6464#10
7006# asm 2: psrld $8,<xmm9=%xmm9
7007psrld $8,%xmm9
7008
7009# qhasm: uint32323232 xmm10 >>= 8
7010# asm 1: psrld $8,<xmm10=int6464#11
7011# asm 2: psrld $8,<xmm10=%xmm10
7012psrld $8,%xmm10
7013
7014# qhasm: uint32323232 xmm11 >>= 8
7015# asm 1: psrld $8,<xmm11=int6464#12
7016# asm 2: psrld $8,<xmm11=%xmm11
7017psrld $8,%xmm11
7018
7019# qhasm: uint32323232 xmm12 >>= 8
7020# asm 1: psrld $8,<xmm12=int6464#13
7021# asm 2: psrld $8,<xmm12=%xmm12
7022psrld $8,%xmm12
7023
7024# qhasm: uint32323232 xmm13 >>= 8
7025# asm 1: psrld $8,<xmm13=int6464#14
7026# asm 2: psrld $8,<xmm13=%xmm13
7027psrld $8,%xmm13
7028
7029# qhasm: uint32323232 xmm14 >>= 8
7030# asm 1: psrld $8,<xmm14=int6464#15
7031# asm 2: psrld $8,<xmm14=%xmm14
7032psrld $8,%xmm14
7033
7034# qhasm: uint32323232 xmm15 >>= 8
7035# asm 1: psrld $8,<xmm15=int6464#16
7036# asm 2: psrld $8,<xmm15=%xmm15
7037psrld $8,%xmm15
7038
7039# qhasm: xmm0 ^= xmm8
7040# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
7041# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
7042pxor %xmm8,%xmm0
7043
7044# qhasm: xmm1 ^= xmm9
7045# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
7046# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
7047pxor %xmm9,%xmm1
7048
7049# qhasm: xmm4 ^= xmm10
7050# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
7051# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
7052pxor %xmm10,%xmm4
7053
7054# qhasm: xmm6 ^= xmm11
7055# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
7056# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
7057pxor %xmm11,%xmm6
7058
7059# qhasm: xmm3 ^= xmm12
7060# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
7061# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
7062pxor %xmm12,%xmm3
7063
7064# qhasm: xmm7 ^= xmm13
7065# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
7066# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
7067pxor %xmm13,%xmm7
7068
7069# qhasm: xmm2 ^= xmm14
7070# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
7071# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
7072pxor %xmm14,%xmm2
7073
7074# qhasm: xmm5 ^= xmm15
7075# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
7076# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
7077pxor %xmm15,%xmm5
7078
7079# qhasm: *(int128 *)(c + 640) = xmm0
7080# asm 1: movdqa <xmm0=int6464#1,640(<c=int64#1)
7081# asm 2: movdqa <xmm0=%xmm0,640(<c=%rdi)
7082movdqa %xmm0,640(%rdi)
7083
7084# qhasm: *(int128 *)(c + 656) = xmm1
7085# asm 1: movdqa <xmm1=int6464#2,656(<c=int64#1)
7086# asm 2: movdqa <xmm1=%xmm1,656(<c=%rdi)
7087movdqa %xmm1,656(%rdi)
7088
7089# qhasm: *(int128 *)(c + 672) = xmm4
7090# asm 1: movdqa <xmm4=int6464#5,672(<c=int64#1)
7091# asm 2: movdqa <xmm4=%xmm4,672(<c=%rdi)
7092movdqa %xmm4,672(%rdi)
7093
7094# qhasm: *(int128 *)(c + 688) = xmm6
7095# asm 1: movdqa <xmm6=int6464#7,688(<c=int64#1)
7096# asm 2: movdqa <xmm6=%xmm6,688(<c=%rdi)
7097movdqa %xmm6,688(%rdi)
7098
7099# qhasm: *(int128 *)(c + 704) = xmm3
7100# asm 1: movdqa <xmm3=int6464#4,704(<c=int64#1)
7101# asm 2: movdqa <xmm3=%xmm3,704(<c=%rdi)
7102movdqa %xmm3,704(%rdi)
7103
7104# qhasm: *(int128 *)(c + 720) = xmm7
7105# asm 1: movdqa <xmm7=int6464#8,720(<c=int64#1)
7106# asm 2: movdqa <xmm7=%xmm7,720(<c=%rdi)
7107movdqa %xmm7,720(%rdi)
7108
7109# qhasm: *(int128 *)(c + 736) = xmm2
7110# asm 1: movdqa <xmm2=int6464#3,736(<c=int64#1)
7111# asm 2: movdqa <xmm2=%xmm2,736(<c=%rdi)
7112movdqa %xmm2,736(%rdi)
7113
7114# qhasm: *(int128 *)(c + 752) = xmm5
7115# asm 1: movdqa <xmm5=int6464#6,752(<c=int64#1)
7116# asm 2: movdqa <xmm5=%xmm5,752(<c=%rdi)
7117movdqa %xmm5,752(%rdi)
7118
7119# qhasm: xmm0 ^= ONE
7120# asm 1: pxor ONE,<xmm0=int6464#1
7121# asm 2: pxor ONE,<xmm0=%xmm0
7122pxor ONE,%xmm0
7123
7124# qhasm: xmm1 ^= ONE
7125# asm 1: pxor ONE,<xmm1=int6464#2
7126# asm 2: pxor ONE,<xmm1=%xmm1
7127pxor ONE,%xmm1
7128
7129# qhasm: xmm7 ^= ONE
7130# asm 1: pxor ONE,<xmm7=int6464#8
7131# asm 2: pxor ONE,<xmm7=%xmm7
7132pxor ONE,%xmm7
7133
7134# qhasm: xmm2 ^= ONE
7135# asm 1: pxor ONE,<xmm2=int6464#3
7136# asm 2: pxor ONE,<xmm2=%xmm2
7137pxor ONE,%xmm2
7138
7139# qhasm: shuffle bytes of xmm0 by ROTB
7140# asm 1: pshufb ROTB,<xmm0=int6464#1
7141# asm 2: pshufb ROTB,<xmm0=%xmm0
7142pshufb ROTB,%xmm0
7143
7144# qhasm: shuffle bytes of xmm1 by ROTB
7145# asm 1: pshufb ROTB,<xmm1=int6464#2
7146# asm 2: pshufb ROTB,<xmm1=%xmm1
7147pshufb ROTB,%xmm1
7148
7149# qhasm: shuffle bytes of xmm4 by ROTB
7150# asm 1: pshufb ROTB,<xmm4=int6464#5
7151# asm 2: pshufb ROTB,<xmm4=%xmm4
7152pshufb ROTB,%xmm4
7153
7154# qhasm: shuffle bytes of xmm6 by ROTB
7155# asm 1: pshufb ROTB,<xmm6=int6464#7
7156# asm 2: pshufb ROTB,<xmm6=%xmm6
7157pshufb ROTB,%xmm6
7158
7159# qhasm: shuffle bytes of xmm3 by ROTB
7160# asm 1: pshufb ROTB,<xmm3=int6464#4
7161# asm 2: pshufb ROTB,<xmm3=%xmm3
7162pshufb ROTB,%xmm3
7163
7164# qhasm: shuffle bytes of xmm7 by ROTB
7165# asm 1: pshufb ROTB,<xmm7=int6464#8
7166# asm 2: pshufb ROTB,<xmm7=%xmm7
7167pshufb ROTB,%xmm7
7168
7169# qhasm: shuffle bytes of xmm2 by ROTB
7170# asm 1: pshufb ROTB,<xmm2=int6464#3
7171# asm 2: pshufb ROTB,<xmm2=%xmm2
7172pshufb ROTB,%xmm2
7173
7174# qhasm: shuffle bytes of xmm5 by ROTB
7175# asm 1: pshufb ROTB,<xmm5=int6464#6
7176# asm 2: pshufb ROTB,<xmm5=%xmm5
7177pshufb ROTB,%xmm5
7178
7179# qhasm: xmm7 ^= xmm2
7180# asm 1: pxor <xmm2=int6464#3,<xmm7=int6464#8
7181# asm 2: pxor <xmm2=%xmm2,<xmm7=%xmm7
7182pxor %xmm2,%xmm7
7183
7184# qhasm: xmm4 ^= xmm1
7185# asm 1: pxor <xmm1=int6464#2,<xmm4=int6464#5
7186# asm 2: pxor <xmm1=%xmm1,<xmm4=%xmm4
7187pxor %xmm1,%xmm4
7188
7189# qhasm: xmm7 ^= xmm0
7190# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
7191# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
7192pxor %xmm0,%xmm7
7193
7194# qhasm: xmm2 ^= xmm4
7195# asm 1: pxor <xmm4=int6464#5,<xmm2=int6464#3
7196# asm 2: pxor <xmm4=%xmm4,<xmm2=%xmm2
7197pxor %xmm4,%xmm2
7198
7199# qhasm: xmm6 ^= xmm0
7200# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
7201# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
7202pxor %xmm0,%xmm6
7203
7204# qhasm: xmm2 ^= xmm6
7205# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
7206# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
7207pxor %xmm6,%xmm2
7208
7209# qhasm: xmm6 ^= xmm5
7210# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
7211# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
7212pxor %xmm5,%xmm6
7213
7214# qhasm: xmm6 ^= xmm3
7215# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
7216# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
7217pxor %xmm3,%xmm6
7218
7219# qhasm: xmm5 ^= xmm7
7220# asm 1: pxor <xmm7=int6464#8,<xmm5=int6464#6
7221# asm 2: pxor <xmm7=%xmm7,<xmm5=%xmm5
7222pxor %xmm7,%xmm5
7223
7224# qhasm: xmm6 ^= xmm1
7225# asm 1: pxor <xmm1=int6464#2,<xmm6=int6464#7
7226# asm 2: pxor <xmm1=%xmm1,<xmm6=%xmm6
7227pxor %xmm1,%xmm6
7228
7229# qhasm: xmm3 ^= xmm7
7230# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
7231# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
7232pxor %xmm7,%xmm3
7233
7234# qhasm: xmm4 ^= xmm5
7235# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
7236# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
7237pxor %xmm5,%xmm4
7238
7239# qhasm: xmm1 ^= xmm7
7240# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
7241# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
7242pxor %xmm7,%xmm1
7243
7244# qhasm: xmm11 = xmm5
7245# asm 1: movdqa <xmm5=int6464#6,>xmm11=int6464#9
7246# asm 2: movdqa <xmm5=%xmm5,>xmm11=%xmm8
7247movdqa %xmm5,%xmm8
7248
7249# qhasm: xmm10 = xmm1
7250# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
7251# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
7252movdqa %xmm1,%xmm9
7253
7254# qhasm: xmm9 = xmm7
7255# asm 1: movdqa <xmm7=int6464#8,>xmm9=int6464#11
7256# asm 2: movdqa <xmm7=%xmm7,>xmm9=%xmm10
7257movdqa %xmm7,%xmm10
7258
7259# qhasm: xmm13 = xmm4
7260# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
7261# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
7262movdqa %xmm4,%xmm11
7263
7264# qhasm: xmm12 = xmm2
7265# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#13
7266# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm12
7267movdqa %xmm2,%xmm12
7268
7269# qhasm: xmm11 ^= xmm3
7270# asm 1: pxor <xmm3=int6464#4,<xmm11=int6464#9
7271# asm 2: pxor <xmm3=%xmm3,<xmm11=%xmm8
7272pxor %xmm3,%xmm8
7273
7274# qhasm: xmm10 ^= xmm4
7275# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#10
7276# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm9
7277pxor %xmm4,%xmm9
7278
7279# qhasm: xmm9 ^= xmm6
7280# asm 1: pxor <xmm6=int6464#7,<xmm9=int6464#11
7281# asm 2: pxor <xmm6=%xmm6,<xmm9=%xmm10
7282pxor %xmm6,%xmm10
7283
7284# qhasm: xmm13 ^= xmm3
7285# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#12
7286# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm11
7287pxor %xmm3,%xmm11
7288
7289# qhasm: xmm12 ^= xmm0
7290# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
7291# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
7292pxor %xmm0,%xmm12
7293
7294# qhasm: xmm14 = xmm11
7295# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
7296# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
7297movdqa %xmm8,%xmm13
7298
7299# qhasm: xmm8 = xmm10
7300# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
7301# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
7302movdqa %xmm9,%xmm14
7303
7304# qhasm: xmm15 = xmm11
7305# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
7306# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
7307movdqa %xmm8,%xmm15
7308
7309# qhasm: xmm10 |= xmm9
7310# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
7311# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
7312por %xmm10,%xmm9
7313
7314# qhasm: xmm11 |= xmm12
7315# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
7316# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
7317por %xmm12,%xmm8
7318
7319# qhasm: xmm15 ^= xmm8
7320# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
7321# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
7322pxor %xmm14,%xmm15
7323
7324# qhasm: xmm14 &= xmm12
7325# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
7326# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
7327pand %xmm12,%xmm13
7328
7329# qhasm: xmm8 &= xmm9
7330# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
7331# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
7332pand %xmm10,%xmm14
7333
7334# qhasm: xmm12 ^= xmm9
7335# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
7336# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
7337pxor %xmm10,%xmm12
7338
7339# qhasm: xmm15 &= xmm12
7340# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
7341# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
7342pand %xmm12,%xmm15
7343
7344# qhasm: xmm12 = xmm6
7345# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#11
7346# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm10
7347movdqa %xmm6,%xmm10
7348
7349# qhasm: xmm12 ^= xmm0
7350# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
7351# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
7352pxor %xmm0,%xmm10
7353
7354# qhasm: xmm13 &= xmm12
7355# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
7356# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
7357pand %xmm10,%xmm11
7358
7359# qhasm: xmm11 ^= xmm13
7360# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
7361# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
7362pxor %xmm11,%xmm8
7363
7364# qhasm: xmm10 ^= xmm13
7365# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
7366# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
7367pxor %xmm11,%xmm9
7368
7369# qhasm: xmm13 = xmm5
7370# asm 1: movdqa <xmm5=int6464#6,>xmm13=int6464#11
7371# asm 2: movdqa <xmm5=%xmm5,>xmm13=%xmm10
7372movdqa %xmm5,%xmm10
7373
7374# qhasm: xmm13 ^= xmm1
7375# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
7376# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
7377pxor %xmm1,%xmm10
7378
7379# qhasm: xmm12 = xmm7
7380# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#12
7381# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm11
7382movdqa %xmm7,%xmm11
7383
7384# qhasm: xmm9 = xmm13
7385# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
7386# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
7387movdqa %xmm10,%xmm12
7388
7389# qhasm: xmm12 ^= xmm2
7390# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#12
7391# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm11
7392pxor %xmm2,%xmm11
7393
7394# qhasm: xmm9 |= xmm12
7395# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
7396# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
7397por %xmm11,%xmm12
7398
7399# qhasm: xmm13 &= xmm12
7400# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
7401# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
7402pand %xmm11,%xmm10
7403
7404# qhasm: xmm8 ^= xmm13
7405# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
7406# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
7407pxor %xmm10,%xmm14
7408
7409# qhasm: xmm11 ^= xmm15
7410# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
7411# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
7412pxor %xmm15,%xmm8
7413
7414# qhasm: xmm10 ^= xmm14
7415# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
7416# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
7417pxor %xmm13,%xmm9
7418
7419# qhasm: xmm9 ^= xmm15
7420# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
7421# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
7422pxor %xmm15,%xmm12
7423
7424# qhasm: xmm8 ^= xmm14
7425# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
7426# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
7427pxor %xmm13,%xmm14
7428
7429# qhasm: xmm9 ^= xmm14
7430# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
7431# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
7432pxor %xmm13,%xmm12
7433
7434# qhasm: xmm12 = xmm4
7435# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#11
7436# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm10
7437movdqa %xmm4,%xmm10
7438
7439# qhasm: xmm13 = xmm3
7440# asm 1: movdqa <xmm3=int6464#4,>xmm13=int6464#12
7441# asm 2: movdqa <xmm3=%xmm3,>xmm13=%xmm11
7442movdqa %xmm3,%xmm11
7443
7444# qhasm: xmm14 = xmm1
7445# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
7446# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
7447movdqa %xmm1,%xmm13
7448
7449# qhasm: xmm15 = xmm5
7450# asm 1: movdqa <xmm5=int6464#6,>xmm15=int6464#16
7451# asm 2: movdqa <xmm5=%xmm5,>xmm15=%xmm15
7452movdqa %xmm5,%xmm15
7453
7454# qhasm: xmm12 &= xmm6
7455# asm 1: pand <xmm6=int6464#7,<xmm12=int6464#11
7456# asm 2: pand <xmm6=%xmm6,<xmm12=%xmm10
7457pand %xmm6,%xmm10
7458
7459# qhasm: xmm13 &= xmm0
7460# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
7461# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
7462pand %xmm0,%xmm11
7463
7464# qhasm: xmm14 &= xmm7
7465# asm 1: pand <xmm7=int6464#8,<xmm14=int6464#14
7466# asm 2: pand <xmm7=%xmm7,<xmm14=%xmm13
7467pand %xmm7,%xmm13
7468
7469# qhasm: xmm15 |= xmm2
7470# asm 1: por <xmm2=int6464#3,<xmm15=int6464#16
7471# asm 2: por <xmm2=%xmm2,<xmm15=%xmm15
7472por %xmm2,%xmm15
7473
7474# qhasm: xmm11 ^= xmm12
7475# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
7476# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
7477pxor %xmm10,%xmm8
7478
7479# qhasm: xmm10 ^= xmm13
7480# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
7481# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
7482pxor %xmm11,%xmm9
7483
7484# qhasm: xmm9 ^= xmm14
7485# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
7486# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
7487pxor %xmm13,%xmm12
7488
7489# qhasm: xmm8 ^= xmm15
7490# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
7491# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
7492pxor %xmm15,%xmm14
7493
7494# qhasm: xmm12 = xmm11
7495# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
7496# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
7497movdqa %xmm8,%xmm10
7498
7499# qhasm: xmm12 ^= xmm10
7500# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
7501# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
7502pxor %xmm9,%xmm10
7503
7504# qhasm: xmm11 &= xmm9
7505# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
7506# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
7507pand %xmm12,%xmm8
7508
7509# qhasm: xmm14 = xmm8
7510# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
7511# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
7512movdqa %xmm14,%xmm11
7513
7514# qhasm: xmm14 ^= xmm11
7515# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
7516# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
7517pxor %xmm8,%xmm11
7518
7519# qhasm: xmm15 = xmm12
7520# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
7521# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
7522movdqa %xmm10,%xmm13
7523
7524# qhasm: xmm15 &= xmm14
7525# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
7526# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
7527pand %xmm11,%xmm13
7528
7529# qhasm: xmm15 ^= xmm10
7530# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
7531# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
7532pxor %xmm9,%xmm13
7533
7534# qhasm: xmm13 = xmm9
7535# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
7536# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
7537movdqa %xmm12,%xmm15
7538
7539# qhasm: xmm13 ^= xmm8
7540# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
7541# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
7542pxor %xmm14,%xmm15
7543
7544# qhasm: xmm11 ^= xmm10
7545# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
7546# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
7547pxor %xmm9,%xmm8
7548
7549# qhasm: xmm13 &= xmm11
7550# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
7551# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
7552pand %xmm8,%xmm15
7553
7554# qhasm: xmm13 ^= xmm8
7555# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
7556# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
7557pxor %xmm14,%xmm15
7558
7559# qhasm: xmm9 ^= xmm13
7560# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
7561# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
7562pxor %xmm15,%xmm12
7563
7564# qhasm: xmm10 = xmm14
7565# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
7566# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
7567movdqa %xmm11,%xmm8
7568
7569# qhasm: xmm10 ^= xmm13
7570# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
7571# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
7572pxor %xmm15,%xmm8
7573
7574# qhasm: xmm10 &= xmm8
7575# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
7576# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
7577pand %xmm14,%xmm8
7578
7579# qhasm: xmm9 ^= xmm10
7580# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
7581# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
7582pxor %xmm8,%xmm12
7583
7584# qhasm: xmm14 ^= xmm10
7585# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
7586# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
7587pxor %xmm8,%xmm11
7588
7589# qhasm: xmm14 &= xmm15
7590# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
7591# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
7592pand %xmm13,%xmm11
7593
7594# qhasm: xmm14 ^= xmm12
7595# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
7596# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
7597pxor %xmm10,%xmm11
7598
7599# qhasm: xmm12 = xmm2
7600# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#9
7601# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm8
7602movdqa %xmm2,%xmm8
7603
7604# qhasm: xmm8 = xmm7
7605# asm 1: movdqa <xmm7=int6464#8,>xmm8=int6464#10
7606# asm 2: movdqa <xmm7=%xmm7,>xmm8=%xmm9
7607movdqa %xmm7,%xmm9
7608
7609# qhasm: xmm10 = xmm15
7610# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
7611# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
7612movdqa %xmm13,%xmm10
7613
7614# qhasm: xmm10 ^= xmm14
7615# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
7616# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
7617pxor %xmm11,%xmm10
7618
7619# qhasm: xmm10 &= xmm2
7620# asm 1: pand <xmm2=int6464#3,<xmm10=int6464#11
7621# asm 2: pand <xmm2=%xmm2,<xmm10=%xmm10
7622pand %xmm2,%xmm10
7623
7624# qhasm: xmm2 ^= xmm7
7625# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
7626# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
7627pxor %xmm7,%xmm2
7628
7629# qhasm: xmm2 &= xmm14
7630# asm 1: pand <xmm14=int6464#12,<xmm2=int6464#3
7631# asm 2: pand <xmm14=%xmm11,<xmm2=%xmm2
7632pand %xmm11,%xmm2
7633
7634# qhasm: xmm7 &= xmm15
7635# asm 1: pand <xmm15=int6464#14,<xmm7=int6464#8
7636# asm 2: pand <xmm15=%xmm13,<xmm7=%xmm7
7637pand %xmm13,%xmm7
7638
7639# qhasm: xmm2 ^= xmm7
7640# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
7641# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
7642pxor %xmm7,%xmm2
7643
7644# qhasm: xmm7 ^= xmm10
7645# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
7646# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
7647pxor %xmm10,%xmm7
7648
7649# qhasm: xmm12 ^= xmm0
7650# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
7651# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
7652pxor %xmm0,%xmm8
7653
7654# qhasm: xmm8 ^= xmm6
7655# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#10
7656# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm9
7657pxor %xmm6,%xmm9
7658
7659# qhasm: xmm15 ^= xmm13
7660# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
7661# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
7662pxor %xmm15,%xmm13
7663
7664# qhasm: xmm14 ^= xmm9
7665# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
7666# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
7667pxor %xmm12,%xmm11
7668
7669# qhasm: xmm11 = xmm15
7670# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
7671# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
7672movdqa %xmm13,%xmm10
7673
7674# qhasm: xmm11 ^= xmm14
7675# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
7676# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
7677pxor %xmm11,%xmm10
7678
7679# qhasm: xmm11 &= xmm12
7680# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
7681# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
7682pand %xmm8,%xmm10
7683
7684# qhasm: xmm12 ^= xmm8
7685# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
7686# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
7687pxor %xmm9,%xmm8
7688
7689# qhasm: xmm12 &= xmm14
7690# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
7691# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
7692pand %xmm11,%xmm8
7693
7694# qhasm: xmm8 &= xmm15
7695# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
7696# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
7697pand %xmm13,%xmm9
7698
7699# qhasm: xmm8 ^= xmm12
7700# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
7701# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
7702pxor %xmm8,%xmm9
7703
7704# qhasm: xmm12 ^= xmm11
7705# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
7706# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
7707pxor %xmm10,%xmm8
7708
7709# qhasm: xmm10 = xmm13
7710# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
7711# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
7712movdqa %xmm15,%xmm10
7713
7714# qhasm: xmm10 ^= xmm9
7715# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
7716# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
7717pxor %xmm12,%xmm10
7718
7719# qhasm: xmm10 &= xmm0
7720# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
7721# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
7722pand %xmm0,%xmm10
7723
7724# qhasm: xmm0 ^= xmm6
7725# asm 1: pxor <xmm6=int6464#7,<xmm0=int6464#1
7726# asm 2: pxor <xmm6=%xmm6,<xmm0=%xmm0
7727pxor %xmm6,%xmm0
7728
7729# qhasm: xmm0 &= xmm9
7730# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
7731# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
7732pand %xmm12,%xmm0
7733
7734# qhasm: xmm6 &= xmm13
7735# asm 1: pand <xmm13=int6464#16,<xmm6=int6464#7
7736# asm 2: pand <xmm13=%xmm15,<xmm6=%xmm6
7737pand %xmm15,%xmm6
7738
7739# qhasm: xmm0 ^= xmm6
7740# asm 1: pxor <xmm6=int6464#7,<xmm0=int6464#1
7741# asm 2: pxor <xmm6=%xmm6,<xmm0=%xmm0
7742pxor %xmm6,%xmm0
7743
7744# qhasm: xmm6 ^= xmm10
7745# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
7746# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
7747pxor %xmm10,%xmm6
7748
7749# qhasm: xmm2 ^= xmm12
7750# asm 1: pxor <xmm12=int6464#9,<xmm2=int6464#3
7751# asm 2: pxor <xmm12=%xmm8,<xmm2=%xmm2
7752pxor %xmm8,%xmm2
7753
7754# qhasm: xmm0 ^= xmm12
7755# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
7756# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
7757pxor %xmm8,%xmm0
7758
7759# qhasm: xmm7 ^= xmm8
7760# asm 1: pxor <xmm8=int6464#10,<xmm7=int6464#8
7761# asm 2: pxor <xmm8=%xmm9,<xmm7=%xmm7
7762pxor %xmm9,%xmm7
7763
7764# qhasm: xmm6 ^= xmm8
7765# asm 1: pxor <xmm8=int6464#10,<xmm6=int6464#7
7766# asm 2: pxor <xmm8=%xmm9,<xmm6=%xmm6
7767pxor %xmm9,%xmm6
7768
7769# qhasm: xmm12 = xmm5
7770# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#9
7771# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm8
7772movdqa %xmm5,%xmm8
7773
7774# qhasm: xmm8 = xmm1
7775# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
7776# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
7777movdqa %xmm1,%xmm9
7778
7779# qhasm: xmm12 ^= xmm3
7780# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#9
7781# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm8
7782pxor %xmm3,%xmm8
7783
7784# qhasm: xmm8 ^= xmm4
7785# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#10
7786# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm9
7787pxor %xmm4,%xmm9
7788
7789# qhasm: xmm11 = xmm15
7790# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
7791# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
7792movdqa %xmm13,%xmm10
7793
7794# qhasm: xmm11 ^= xmm14
7795# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
7796# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
7797pxor %xmm11,%xmm10
7798
7799# qhasm: xmm11 &= xmm12
7800# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
7801# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
7802pand %xmm8,%xmm10
7803
7804# qhasm: xmm12 ^= xmm8
7805# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
7806# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
7807pxor %xmm9,%xmm8
7808
7809# qhasm: xmm12 &= xmm14
7810# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
7811# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
7812pand %xmm11,%xmm8
7813
7814# qhasm: xmm8 &= xmm15
7815# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
7816# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
7817pand %xmm13,%xmm9
7818
7819# qhasm: xmm8 ^= xmm12
7820# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
7821# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
7822pxor %xmm8,%xmm9
7823
7824# qhasm: xmm12 ^= xmm11
7825# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
7826# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
7827pxor %xmm10,%xmm8
7828
7829# qhasm: xmm10 = xmm13
7830# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
7831# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
7832movdqa %xmm15,%xmm10
7833
7834# qhasm: xmm10 ^= xmm9
7835# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
7836# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
7837pxor %xmm12,%xmm10
7838
7839# qhasm: xmm10 &= xmm3
7840# asm 1: pand <xmm3=int6464#4,<xmm10=int6464#11
7841# asm 2: pand <xmm3=%xmm3,<xmm10=%xmm10
7842pand %xmm3,%xmm10
7843
7844# qhasm: xmm3 ^= xmm4
7845# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
7846# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
7847pxor %xmm4,%xmm3
7848
7849# qhasm: xmm3 &= xmm9
7850# asm 1: pand <xmm9=int6464#13,<xmm3=int6464#4
7851# asm 2: pand <xmm9=%xmm12,<xmm3=%xmm3
7852pand %xmm12,%xmm3
7853
7854# qhasm: xmm4 &= xmm13
7855# asm 1: pand <xmm13=int6464#16,<xmm4=int6464#5
7856# asm 2: pand <xmm13=%xmm15,<xmm4=%xmm4
7857pand %xmm15,%xmm4
7858
7859# qhasm: xmm3 ^= xmm4
7860# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
7861# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
7862pxor %xmm4,%xmm3
7863
7864# qhasm: xmm4 ^= xmm10
7865# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
7866# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
7867pxor %xmm10,%xmm4
7868
7869# qhasm: xmm15 ^= xmm13
7870# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
7871# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
7872pxor %xmm15,%xmm13
7873
7874# qhasm: xmm14 ^= xmm9
7875# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
7876# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
7877pxor %xmm12,%xmm11
7878
7879# qhasm: xmm11 = xmm15
7880# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
7881# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
7882movdqa %xmm13,%xmm10
7883
7884# qhasm: xmm11 ^= xmm14
7885# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
7886# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
7887pxor %xmm11,%xmm10
7888
7889# qhasm: xmm11 &= xmm5
7890# asm 1: pand <xmm5=int6464#6,<xmm11=int6464#11
7891# asm 2: pand <xmm5=%xmm5,<xmm11=%xmm10
7892pand %xmm5,%xmm10
7893
7894# qhasm: xmm5 ^= xmm1
7895# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
7896# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
7897pxor %xmm1,%xmm5
7898
7899# qhasm: xmm5 &= xmm14
7900# asm 1: pand <xmm14=int6464#12,<xmm5=int6464#6
7901# asm 2: pand <xmm14=%xmm11,<xmm5=%xmm5
7902pand %xmm11,%xmm5
7903
7904# qhasm: xmm1 &= xmm15
7905# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
7906# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
7907pand %xmm13,%xmm1
7908
7909# qhasm: xmm5 ^= xmm1
7910# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
7911# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
7912pxor %xmm1,%xmm5
7913
7914# qhasm: xmm1 ^= xmm11
7915# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
7916# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
7917pxor %xmm10,%xmm1
7918
7919# qhasm: xmm5 ^= xmm12
7920# asm 1: pxor <xmm12=int6464#9,<xmm5=int6464#6
7921# asm 2: pxor <xmm12=%xmm8,<xmm5=%xmm5
7922pxor %xmm8,%xmm5
7923
7924# qhasm: xmm3 ^= xmm12
7925# asm 1: pxor <xmm12=int6464#9,<xmm3=int6464#4
7926# asm 2: pxor <xmm12=%xmm8,<xmm3=%xmm3
7927pxor %xmm8,%xmm3
7928
7929# qhasm: xmm1 ^= xmm8
7930# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
7931# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
7932pxor %xmm9,%xmm1
7933
7934# qhasm: xmm4 ^= xmm8
7935# asm 1: pxor <xmm8=int6464#10,<xmm4=int6464#5
7936# asm 2: pxor <xmm8=%xmm9,<xmm4=%xmm4
7937pxor %xmm9,%xmm4
7938
7939# qhasm: xmm5 ^= xmm0
7940# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
7941# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
7942pxor %xmm0,%xmm5
7943
7944# qhasm: xmm1 ^= xmm2
7945# asm 1: pxor <xmm2=int6464#3,<xmm1=int6464#2
7946# asm 2: pxor <xmm2=%xmm2,<xmm1=%xmm1
7947pxor %xmm2,%xmm1
7948
7949# qhasm: xmm3 ^= xmm5
7950# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
7951# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
7952pxor %xmm5,%xmm3
7953
7954# qhasm: xmm2 ^= xmm0
7955# asm 1: pxor <xmm0=int6464#1,<xmm2=int6464#3
7956# asm 2: pxor <xmm0=%xmm0,<xmm2=%xmm2
7957pxor %xmm0,%xmm2
7958
7959# qhasm: xmm0 ^= xmm1
7960# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
7961# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
7962pxor %xmm1,%xmm0
7963
7964# qhasm: xmm1 ^= xmm7
7965# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
7966# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
7967pxor %xmm7,%xmm1
7968
7969# qhasm: xmm7 ^= xmm4
7970# asm 1: pxor <xmm4=int6464#5,<xmm7=int6464#8
7971# asm 2: pxor <xmm4=%xmm4,<xmm7=%xmm7
7972pxor %xmm4,%xmm7
7973
7974# qhasm: xmm3 ^= xmm7
7975# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
7976# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
7977pxor %xmm7,%xmm3
7978
7979# qhasm: xmm4 ^= xmm6
7980# asm 1: pxor <xmm6=int6464#7,<xmm4=int6464#5
7981# asm 2: pxor <xmm6=%xmm6,<xmm4=%xmm4
7982pxor %xmm6,%xmm4
7983
7984# qhasm: xmm6 ^= xmm7
7985# asm 1: pxor <xmm7=int6464#8,<xmm6=int6464#7
7986# asm 2: pxor <xmm7=%xmm7,<xmm6=%xmm6
7987pxor %xmm7,%xmm6
7988
7989# qhasm: xmm2 ^= xmm6
7990# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
7991# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
7992pxor %xmm6,%xmm2
7993
7994# qhasm: xmm5 ^= RCON
7995# asm 1: pxor RCON,<xmm5=int6464#6
7996# asm 2: pxor RCON,<xmm5=%xmm5
7997pxor RCON,%xmm5
7998
7999# qhasm: shuffle bytes of xmm0 by EXPB0
8000# asm 1: pshufb EXPB0,<xmm0=int6464#1
8001# asm 2: pshufb EXPB0,<xmm0=%xmm0
8002pshufb EXPB0,%xmm0
8003
8004# qhasm: shuffle bytes of xmm1 by EXPB0
8005# asm 1: pshufb EXPB0,<xmm1=int6464#2
8006# asm 2: pshufb EXPB0,<xmm1=%xmm1
8007pshufb EXPB0,%xmm1
8008
8009# qhasm: shuffle bytes of xmm3 by EXPB0
8010# asm 1: pshufb EXPB0,<xmm3=int6464#4
8011# asm 2: pshufb EXPB0,<xmm3=%xmm3
8012pshufb EXPB0,%xmm3
8013
8014# qhasm: shuffle bytes of xmm2 by EXPB0
8015# asm 1: pshufb EXPB0,<xmm2=int6464#3
8016# asm 2: pshufb EXPB0,<xmm2=%xmm2
8017pshufb EXPB0,%xmm2
8018
8019# qhasm: shuffle bytes of xmm6 by EXPB0
8020# asm 1: pshufb EXPB0,<xmm6=int6464#7
8021# asm 2: pshufb EXPB0,<xmm6=%xmm6
8022pshufb EXPB0,%xmm6
8023
8024# qhasm: shuffle bytes of xmm5 by EXPB0
8025# asm 1: pshufb EXPB0,<xmm5=int6464#6
8026# asm 2: pshufb EXPB0,<xmm5=%xmm5
8027pshufb EXPB0,%xmm5
8028
8029# qhasm: shuffle bytes of xmm4 by EXPB0
8030# asm 1: pshufb EXPB0,<xmm4=int6464#5
8031# asm 2: pshufb EXPB0,<xmm4=%xmm4
8032pshufb EXPB0,%xmm4
8033
8034# qhasm: shuffle bytes of xmm7 by EXPB0
8035# asm 1: pshufb EXPB0,<xmm7=int6464#8
8036# asm 2: pshufb EXPB0,<xmm7=%xmm7
8037pshufb EXPB0,%xmm7
8038
8039# qhasm: xmm8 = *(int128 *)(c + 640)
8040# asm 1: movdqa 640(<c=int64#1),>xmm8=int6464#9
8041# asm 2: movdqa 640(<c=%rdi),>xmm8=%xmm8
8042movdqa 640(%rdi),%xmm8
8043
8044# qhasm: xmm9 = *(int128 *)(c + 656)
8045# asm 1: movdqa 656(<c=int64#1),>xmm9=int6464#10
8046# asm 2: movdqa 656(<c=%rdi),>xmm9=%xmm9
8047movdqa 656(%rdi),%xmm9
8048
8049# qhasm: xmm10 = *(int128 *)(c + 672)
8050# asm 1: movdqa 672(<c=int64#1),>xmm10=int6464#11
8051# asm 2: movdqa 672(<c=%rdi),>xmm10=%xmm10
8052movdqa 672(%rdi),%xmm10
8053
8054# qhasm: xmm11 = *(int128 *)(c + 688)
8055# asm 1: movdqa 688(<c=int64#1),>xmm11=int6464#12
8056# asm 2: movdqa 688(<c=%rdi),>xmm11=%xmm11
8057movdqa 688(%rdi),%xmm11
8058
8059# qhasm: xmm12 = *(int128 *)(c + 704)
8060# asm 1: movdqa 704(<c=int64#1),>xmm12=int6464#13
8061# asm 2: movdqa 704(<c=%rdi),>xmm12=%xmm12
8062movdqa 704(%rdi),%xmm12
8063
8064# qhasm: xmm13 = *(int128 *)(c + 720)
8065# asm 1: movdqa 720(<c=int64#1),>xmm13=int6464#14
8066# asm 2: movdqa 720(<c=%rdi),>xmm13=%xmm13
8067movdqa 720(%rdi),%xmm13
8068
8069# qhasm: xmm14 = *(int128 *)(c + 736)
8070# asm 1: movdqa 736(<c=int64#1),>xmm14=int6464#15
8071# asm 2: movdqa 736(<c=%rdi),>xmm14=%xmm14
8072movdqa 736(%rdi),%xmm14
8073
8074# qhasm: xmm15 = *(int128 *)(c + 752)
8075# asm 1: movdqa 752(<c=int64#1),>xmm15=int6464#16
8076# asm 2: movdqa 752(<c=%rdi),>xmm15=%xmm15
8077movdqa 752(%rdi),%xmm15
8078
8079# qhasm: xmm8 ^= ONE
8080# asm 1: pxor ONE,<xmm8=int6464#9
8081# asm 2: pxor ONE,<xmm8=%xmm8
8082pxor ONE,%xmm8
8083
8084# qhasm: xmm9 ^= ONE
8085# asm 1: pxor ONE,<xmm9=int6464#10
8086# asm 2: pxor ONE,<xmm9=%xmm9
8087pxor ONE,%xmm9
8088
8089# qhasm: xmm13 ^= ONE
8090# asm 1: pxor ONE,<xmm13=int6464#14
8091# asm 2: pxor ONE,<xmm13=%xmm13
8092pxor ONE,%xmm13
8093
8094# qhasm: xmm14 ^= ONE
8095# asm 1: pxor ONE,<xmm14=int6464#15
8096# asm 2: pxor ONE,<xmm14=%xmm14
8097pxor ONE,%xmm14
8098
8099# qhasm: xmm0 ^= xmm8
8100# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
8101# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
8102pxor %xmm8,%xmm0
8103
8104# qhasm: xmm1 ^= xmm9
8105# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
8106# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
8107pxor %xmm9,%xmm1
8108
8109# qhasm: xmm3 ^= xmm10
8110# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
8111# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
8112pxor %xmm10,%xmm3
8113
8114# qhasm: xmm2 ^= xmm11
8115# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
8116# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
8117pxor %xmm11,%xmm2
8118
8119# qhasm: xmm6 ^= xmm12
8120# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
8121# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
8122pxor %xmm12,%xmm6
8123
8124# qhasm: xmm5 ^= xmm13
8125# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
8126# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
8127pxor %xmm13,%xmm5
8128
8129# qhasm: xmm4 ^= xmm14
8130# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
8131# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
8132pxor %xmm14,%xmm4
8133
8134# qhasm: xmm7 ^= xmm15
8135# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
8136# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
8137pxor %xmm15,%xmm7
8138
8139# qhasm: uint32323232 xmm8 >>= 8
8140# asm 1: psrld $8,<xmm8=int6464#9
8141# asm 2: psrld $8,<xmm8=%xmm8
8142psrld $8,%xmm8
8143
8144# qhasm: uint32323232 xmm9 >>= 8
8145# asm 1: psrld $8,<xmm9=int6464#10
8146# asm 2: psrld $8,<xmm9=%xmm9
8147psrld $8,%xmm9
8148
8149# qhasm: uint32323232 xmm10 >>= 8
8150# asm 1: psrld $8,<xmm10=int6464#11
8151# asm 2: psrld $8,<xmm10=%xmm10
8152psrld $8,%xmm10
8153
8154# qhasm: uint32323232 xmm11 >>= 8
8155# asm 1: psrld $8,<xmm11=int6464#12
8156# asm 2: psrld $8,<xmm11=%xmm11
8157psrld $8,%xmm11
8158
8159# qhasm: uint32323232 xmm12 >>= 8
8160# asm 1: psrld $8,<xmm12=int6464#13
8161# asm 2: psrld $8,<xmm12=%xmm12
8162psrld $8,%xmm12
8163
8164# qhasm: uint32323232 xmm13 >>= 8
8165# asm 1: psrld $8,<xmm13=int6464#14
8166# asm 2: psrld $8,<xmm13=%xmm13
8167psrld $8,%xmm13
8168
8169# qhasm: uint32323232 xmm14 >>= 8
8170# asm 1: psrld $8,<xmm14=int6464#15
8171# asm 2: psrld $8,<xmm14=%xmm14
8172psrld $8,%xmm14
8173
8174# qhasm: uint32323232 xmm15 >>= 8
8175# asm 1: psrld $8,<xmm15=int6464#16
8176# asm 2: psrld $8,<xmm15=%xmm15
8177psrld $8,%xmm15
8178
8179# qhasm: xmm0 ^= xmm8
8180# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
8181# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
8182pxor %xmm8,%xmm0
8183
8184# qhasm: xmm1 ^= xmm9
8185# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
8186# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
8187pxor %xmm9,%xmm1
8188
8189# qhasm: xmm3 ^= xmm10
8190# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
8191# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
8192pxor %xmm10,%xmm3
8193
8194# qhasm: xmm2 ^= xmm11
8195# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
8196# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
8197pxor %xmm11,%xmm2
8198
8199# qhasm: xmm6 ^= xmm12
8200# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
8201# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
8202pxor %xmm12,%xmm6
8203
8204# qhasm: xmm5 ^= xmm13
8205# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
8206# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
8207pxor %xmm13,%xmm5
8208
8209# qhasm: xmm4 ^= xmm14
8210# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
8211# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
8212pxor %xmm14,%xmm4
8213
8214# qhasm: xmm7 ^= xmm15
8215# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
8216# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
8217pxor %xmm15,%xmm7
8218
8219# qhasm: uint32323232 xmm8 >>= 8
8220# asm 1: psrld $8,<xmm8=int6464#9
8221# asm 2: psrld $8,<xmm8=%xmm8
8222psrld $8,%xmm8
8223
8224# qhasm: uint32323232 xmm9 >>= 8
8225# asm 1: psrld $8,<xmm9=int6464#10
8226# asm 2: psrld $8,<xmm9=%xmm9
8227psrld $8,%xmm9
8228
8229# qhasm: uint32323232 xmm10 >>= 8
8230# asm 1: psrld $8,<xmm10=int6464#11
8231# asm 2: psrld $8,<xmm10=%xmm10
8232psrld $8,%xmm10
8233
8234# qhasm: uint32323232 xmm11 >>= 8
8235# asm 1: psrld $8,<xmm11=int6464#12
8236# asm 2: psrld $8,<xmm11=%xmm11
8237psrld $8,%xmm11
8238
8239# qhasm: uint32323232 xmm12 >>= 8
8240# asm 1: psrld $8,<xmm12=int6464#13
8241# asm 2: psrld $8,<xmm12=%xmm12
8242psrld $8,%xmm12
8243
8244# qhasm: uint32323232 xmm13 >>= 8
8245# asm 1: psrld $8,<xmm13=int6464#14
8246# asm 2: psrld $8,<xmm13=%xmm13
8247psrld $8,%xmm13
8248
8249# qhasm: uint32323232 xmm14 >>= 8
8250# asm 1: psrld $8,<xmm14=int6464#15
8251# asm 2: psrld $8,<xmm14=%xmm14
8252psrld $8,%xmm14
8253
8254# qhasm: uint32323232 xmm15 >>= 8
8255# asm 1: psrld $8,<xmm15=int6464#16
8256# asm 2: psrld $8,<xmm15=%xmm15
8257psrld $8,%xmm15
8258
8259# qhasm: xmm0 ^= xmm8
8260# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
8261# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
8262pxor %xmm8,%xmm0
8263
8264# qhasm: xmm1 ^= xmm9
8265# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
8266# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
8267pxor %xmm9,%xmm1
8268
8269# qhasm: xmm3 ^= xmm10
8270# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
8271# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
8272pxor %xmm10,%xmm3
8273
8274# qhasm: xmm2 ^= xmm11
8275# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
8276# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
8277pxor %xmm11,%xmm2
8278
8279# qhasm: xmm6 ^= xmm12
8280# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
8281# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
8282pxor %xmm12,%xmm6
8283
8284# qhasm: xmm5 ^= xmm13
8285# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
8286# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
8287pxor %xmm13,%xmm5
8288
8289# qhasm: xmm4 ^= xmm14
8290# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
8291# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
8292pxor %xmm14,%xmm4
8293
8294# qhasm: xmm7 ^= xmm15
8295# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
8296# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
8297pxor %xmm15,%xmm7
8298
8299# qhasm: uint32323232 xmm8 >>= 8
8300# asm 1: psrld $8,<xmm8=int6464#9
8301# asm 2: psrld $8,<xmm8=%xmm8
8302psrld $8,%xmm8
8303
8304# qhasm: uint32323232 xmm9 >>= 8
8305# asm 1: psrld $8,<xmm9=int6464#10
8306# asm 2: psrld $8,<xmm9=%xmm9
8307psrld $8,%xmm9
8308
8309# qhasm: uint32323232 xmm10 >>= 8
8310# asm 1: psrld $8,<xmm10=int6464#11
8311# asm 2: psrld $8,<xmm10=%xmm10
8312psrld $8,%xmm10
8313
8314# qhasm: uint32323232 xmm11 >>= 8
8315# asm 1: psrld $8,<xmm11=int6464#12
8316# asm 2: psrld $8,<xmm11=%xmm11
8317psrld $8,%xmm11
8318
8319# qhasm: uint32323232 xmm12 >>= 8
8320# asm 1: psrld $8,<xmm12=int6464#13
8321# asm 2: psrld $8,<xmm12=%xmm12
8322psrld $8,%xmm12
8323
8324# qhasm: uint32323232 xmm13 >>= 8
8325# asm 1: psrld $8,<xmm13=int6464#14
8326# asm 2: psrld $8,<xmm13=%xmm13
8327psrld $8,%xmm13
8328
8329# qhasm: uint32323232 xmm14 >>= 8
8330# asm 1: psrld $8,<xmm14=int6464#15
8331# asm 2: psrld $8,<xmm14=%xmm14
8332psrld $8,%xmm14
8333
8334# qhasm: uint32323232 xmm15 >>= 8
8335# asm 1: psrld $8,<xmm15=int6464#16
8336# asm 2: psrld $8,<xmm15=%xmm15
8337psrld $8,%xmm15
8338
8339# qhasm: xmm0 ^= xmm8
8340# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
8341# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
8342pxor %xmm8,%xmm0
8343
8344# qhasm: xmm1 ^= xmm9
8345# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
8346# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
8347pxor %xmm9,%xmm1
8348
8349# qhasm: xmm3 ^= xmm10
8350# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
8351# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
8352pxor %xmm10,%xmm3
8353
8354# qhasm: xmm2 ^= xmm11
8355# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
8356# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
8357pxor %xmm11,%xmm2
8358
8359# qhasm: xmm6 ^= xmm12
8360# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
8361# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
8362pxor %xmm12,%xmm6
8363
8364# qhasm: xmm5 ^= xmm13
8365# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
8366# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
8367pxor %xmm13,%xmm5
8368
8369# qhasm: xmm4 ^= xmm14
8370# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
8371# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
8372pxor %xmm14,%xmm4
8373
8374# qhasm: xmm7 ^= xmm15
8375# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
8376# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
8377pxor %xmm15,%xmm7
8378
8379# qhasm: *(int128 *)(c + 768) = xmm0
8380# asm 1: movdqa <xmm0=int6464#1,768(<c=int64#1)
8381# asm 2: movdqa <xmm0=%xmm0,768(<c=%rdi)
8382movdqa %xmm0,768(%rdi)
8383
8384# qhasm: *(int128 *)(c + 784) = xmm1
8385# asm 1: movdqa <xmm1=int6464#2,784(<c=int64#1)
8386# asm 2: movdqa <xmm1=%xmm1,784(<c=%rdi)
8387movdqa %xmm1,784(%rdi)
8388
8389# qhasm: *(int128 *)(c + 800) = xmm3
8390# asm 1: movdqa <xmm3=int6464#4,800(<c=int64#1)
8391# asm 2: movdqa <xmm3=%xmm3,800(<c=%rdi)
8392movdqa %xmm3,800(%rdi)
8393
8394# qhasm: *(int128 *)(c + 816) = xmm2
8395# asm 1: movdqa <xmm2=int6464#3,816(<c=int64#1)
8396# asm 2: movdqa <xmm2=%xmm2,816(<c=%rdi)
8397movdqa %xmm2,816(%rdi)
8398
8399# qhasm: *(int128 *)(c + 832) = xmm6
8400# asm 1: movdqa <xmm6=int6464#7,832(<c=int64#1)
8401# asm 2: movdqa <xmm6=%xmm6,832(<c=%rdi)
8402movdqa %xmm6,832(%rdi)
8403
8404# qhasm: *(int128 *)(c + 848) = xmm5
8405# asm 1: movdqa <xmm5=int6464#6,848(<c=int64#1)
8406# asm 2: movdqa <xmm5=%xmm5,848(<c=%rdi)
8407movdqa %xmm5,848(%rdi)
8408
8409# qhasm: *(int128 *)(c + 864) = xmm4
8410# asm 1: movdqa <xmm4=int6464#5,864(<c=int64#1)
8411# asm 2: movdqa <xmm4=%xmm4,864(<c=%rdi)
8412movdqa %xmm4,864(%rdi)
8413
8414# qhasm: *(int128 *)(c + 880) = xmm7
8415# asm 1: movdqa <xmm7=int6464#8,880(<c=int64#1)
8416# asm 2: movdqa <xmm7=%xmm7,880(<c=%rdi)
8417movdqa %xmm7,880(%rdi)
8418
8419# qhasm: xmm0 ^= ONE
8420# asm 1: pxor ONE,<xmm0=int6464#1
8421# asm 2: pxor ONE,<xmm0=%xmm0
8422pxor ONE,%xmm0
8423
8424# qhasm: xmm1 ^= ONE
8425# asm 1: pxor ONE,<xmm1=int6464#2
8426# asm 2: pxor ONE,<xmm1=%xmm1
8427pxor ONE,%xmm1
8428
8429# qhasm: xmm5 ^= ONE
8430# asm 1: pxor ONE,<xmm5=int6464#6
8431# asm 2: pxor ONE,<xmm5=%xmm5
8432pxor ONE,%xmm5
8433
8434# qhasm: xmm4 ^= ONE
8435# asm 1: pxor ONE,<xmm4=int6464#5
8436# asm 2: pxor ONE,<xmm4=%xmm4
8437pxor ONE,%xmm4
8438
8439# qhasm: shuffle bytes of xmm0 by ROTB
8440# asm 1: pshufb ROTB,<xmm0=int6464#1
8441# asm 2: pshufb ROTB,<xmm0=%xmm0
8442pshufb ROTB,%xmm0
8443
8444# qhasm: shuffle bytes of xmm1 by ROTB
8445# asm 1: pshufb ROTB,<xmm1=int6464#2
8446# asm 2: pshufb ROTB,<xmm1=%xmm1
8447pshufb ROTB,%xmm1
8448
8449# qhasm: shuffle bytes of xmm3 by ROTB
8450# asm 1: pshufb ROTB,<xmm3=int6464#4
8451# asm 2: pshufb ROTB,<xmm3=%xmm3
8452pshufb ROTB,%xmm3
8453
8454# qhasm: shuffle bytes of xmm2 by ROTB
8455# asm 1: pshufb ROTB,<xmm2=int6464#3
8456# asm 2: pshufb ROTB,<xmm2=%xmm2
8457pshufb ROTB,%xmm2
8458
8459# qhasm: shuffle bytes of xmm6 by ROTB
8460# asm 1: pshufb ROTB,<xmm6=int6464#7
8461# asm 2: pshufb ROTB,<xmm6=%xmm6
8462pshufb ROTB,%xmm6
8463
8464# qhasm: shuffle bytes of xmm5 by ROTB
8465# asm 1: pshufb ROTB,<xmm5=int6464#6
8466# asm 2: pshufb ROTB,<xmm5=%xmm5
8467pshufb ROTB,%xmm5
8468
8469# qhasm: shuffle bytes of xmm4 by ROTB
8470# asm 1: pshufb ROTB,<xmm4=int6464#5
8471# asm 2: pshufb ROTB,<xmm4=%xmm4
8472pshufb ROTB,%xmm4
8473
8474# qhasm: shuffle bytes of xmm7 by ROTB
8475# asm 1: pshufb ROTB,<xmm7=int6464#8
8476# asm 2: pshufb ROTB,<xmm7=%xmm7
8477pshufb ROTB,%xmm7
8478
8479# qhasm: xmm5 ^= xmm4
8480# asm 1: pxor <xmm4=int6464#5,<xmm5=int6464#6
8481# asm 2: pxor <xmm4=%xmm4,<xmm5=%xmm5
8482pxor %xmm4,%xmm5
8483
8484# qhasm: xmm3 ^= xmm1
8485# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
8486# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
8487pxor %xmm1,%xmm3
8488
8489# qhasm: xmm5 ^= xmm0
8490# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
8491# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
8492pxor %xmm0,%xmm5
8493
8494# qhasm: xmm4 ^= xmm3
8495# asm 1: pxor <xmm3=int6464#4,<xmm4=int6464#5
8496# asm 2: pxor <xmm3=%xmm3,<xmm4=%xmm4
8497pxor %xmm3,%xmm4
8498
8499# qhasm: xmm2 ^= xmm0
8500# asm 1: pxor <xmm0=int6464#1,<xmm2=int6464#3
8501# asm 2: pxor <xmm0=%xmm0,<xmm2=%xmm2
8502pxor %xmm0,%xmm2
8503
8504# qhasm: xmm4 ^= xmm2
8505# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
8506# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
8507pxor %xmm2,%xmm4
8508
8509# qhasm: xmm2 ^= xmm7
8510# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
8511# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
8512pxor %xmm7,%xmm2
8513
8514# qhasm: xmm2 ^= xmm6
8515# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
8516# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
8517pxor %xmm6,%xmm2
8518
8519# qhasm: xmm7 ^= xmm5
8520# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
8521# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
8522pxor %xmm5,%xmm7
8523
8524# qhasm: xmm2 ^= xmm1
8525# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
8526# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
8527pxor %xmm1,%xmm2
8528
8529# qhasm: xmm6 ^= xmm5
8530# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
8531# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
8532pxor %xmm5,%xmm6
8533
8534# qhasm: xmm3 ^= xmm7
8535# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
8536# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
8537pxor %xmm7,%xmm3
8538
8539# qhasm: xmm1 ^= xmm5
8540# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
8541# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
8542pxor %xmm5,%xmm1
8543
8544# qhasm: xmm11 = xmm7
8545# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
8546# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
8547movdqa %xmm7,%xmm8
8548
8549# qhasm: xmm10 = xmm1
8550# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
8551# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
8552movdqa %xmm1,%xmm9
8553
8554# qhasm: xmm9 = xmm5
8555# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
8556# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
8557movdqa %xmm5,%xmm10
8558
8559# qhasm: xmm13 = xmm3
8560# asm 1: movdqa <xmm3=int6464#4,>xmm13=int6464#12
8561# asm 2: movdqa <xmm3=%xmm3,>xmm13=%xmm11
8562movdqa %xmm3,%xmm11
8563
8564# qhasm: xmm12 = xmm4
8565# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#13
8566# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm12
8567movdqa %xmm4,%xmm12
8568
8569# qhasm: xmm11 ^= xmm6
8570# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#9
8571# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm8
8572pxor %xmm6,%xmm8
8573
8574# qhasm: xmm10 ^= xmm3
8575# asm 1: pxor <xmm3=int6464#4,<xmm10=int6464#10
8576# asm 2: pxor <xmm3=%xmm3,<xmm10=%xmm9
8577pxor %xmm3,%xmm9
8578
8579# qhasm: xmm9 ^= xmm2
8580# asm 1: pxor <xmm2=int6464#3,<xmm9=int6464#11
8581# asm 2: pxor <xmm2=%xmm2,<xmm9=%xmm10
8582pxor %xmm2,%xmm10
8583
8584# qhasm: xmm13 ^= xmm6
8585# asm 1: pxor <xmm6=int6464#7,<xmm13=int6464#12
8586# asm 2: pxor <xmm6=%xmm6,<xmm13=%xmm11
8587pxor %xmm6,%xmm11
8588
8589# qhasm: xmm12 ^= xmm0
8590# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
8591# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
8592pxor %xmm0,%xmm12
8593
8594# qhasm: xmm14 = xmm11
8595# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
8596# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
8597movdqa %xmm8,%xmm13
8598
8599# qhasm: xmm8 = xmm10
8600# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
8601# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
8602movdqa %xmm9,%xmm14
8603
8604# qhasm: xmm15 = xmm11
8605# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
8606# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
8607movdqa %xmm8,%xmm15
8608
8609# qhasm: xmm10 |= xmm9
8610# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
8611# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
8612por %xmm10,%xmm9
8613
8614# qhasm: xmm11 |= xmm12
8615# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
8616# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
8617por %xmm12,%xmm8
8618
8619# qhasm: xmm15 ^= xmm8
8620# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
8621# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
8622pxor %xmm14,%xmm15
8623
8624# qhasm: xmm14 &= xmm12
8625# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
8626# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
8627pand %xmm12,%xmm13
8628
8629# qhasm: xmm8 &= xmm9
8630# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
8631# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
8632pand %xmm10,%xmm14
8633
8634# qhasm: xmm12 ^= xmm9
8635# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
8636# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
8637pxor %xmm10,%xmm12
8638
8639# qhasm: xmm15 &= xmm12
8640# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
8641# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
8642pand %xmm12,%xmm15
8643
8644# qhasm: xmm12 = xmm2
8645# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
8646# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
8647movdqa %xmm2,%xmm10
8648
8649# qhasm: xmm12 ^= xmm0
8650# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
8651# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
8652pxor %xmm0,%xmm10
8653
8654# qhasm: xmm13 &= xmm12
8655# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
8656# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
8657pand %xmm10,%xmm11
8658
8659# qhasm: xmm11 ^= xmm13
8660# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
8661# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
8662pxor %xmm11,%xmm8
8663
8664# qhasm: xmm10 ^= xmm13
8665# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
8666# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
8667pxor %xmm11,%xmm9
8668
8669# qhasm: xmm13 = xmm7
8670# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
8671# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
8672movdqa %xmm7,%xmm10
8673
8674# qhasm: xmm13 ^= xmm1
8675# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
8676# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
8677pxor %xmm1,%xmm10
8678
8679# qhasm: xmm12 = xmm5
8680# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
8681# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
8682movdqa %xmm5,%xmm11
8683
8684# qhasm: xmm9 = xmm13
8685# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
8686# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
8687movdqa %xmm10,%xmm12
8688
8689# qhasm: xmm12 ^= xmm4
8690# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#12
8691# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm11
8692pxor %xmm4,%xmm11
8693
8694# qhasm: xmm9 |= xmm12
8695# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
8696# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
8697por %xmm11,%xmm12
8698
8699# qhasm: xmm13 &= xmm12
8700# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
8701# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
8702pand %xmm11,%xmm10
8703
8704# qhasm: xmm8 ^= xmm13
8705# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
8706# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
8707pxor %xmm10,%xmm14
8708
8709# qhasm: xmm11 ^= xmm15
8710# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
8711# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
8712pxor %xmm15,%xmm8
8713
8714# qhasm: xmm10 ^= xmm14
8715# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
8716# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
8717pxor %xmm13,%xmm9
8718
8719# qhasm: xmm9 ^= xmm15
8720# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
8721# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
8722pxor %xmm15,%xmm12
8723
8724# qhasm: xmm8 ^= xmm14
8725# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
8726# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
8727pxor %xmm13,%xmm14
8728
8729# qhasm: xmm9 ^= xmm14
8730# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
8731# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
8732pxor %xmm13,%xmm12
8733
8734# qhasm: xmm12 = xmm3
8735# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
8736# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
8737movdqa %xmm3,%xmm10
8738
8739# qhasm: xmm13 = xmm6
8740# asm 1: movdqa <xmm6=int6464#7,>xmm13=int6464#12
8741# asm 2: movdqa <xmm6=%xmm6,>xmm13=%xmm11
8742movdqa %xmm6,%xmm11
8743
8744# qhasm: xmm14 = xmm1
8745# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
8746# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
8747movdqa %xmm1,%xmm13
8748
8749# qhasm: xmm15 = xmm7
8750# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
8751# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
8752movdqa %xmm7,%xmm15
8753
8754# qhasm: xmm12 &= xmm2
8755# asm 1: pand <xmm2=int6464#3,<xmm12=int6464#11
8756# asm 2: pand <xmm2=%xmm2,<xmm12=%xmm10
8757pand %xmm2,%xmm10
8758
8759# qhasm: xmm13 &= xmm0
8760# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
8761# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
8762pand %xmm0,%xmm11
8763
8764# qhasm: xmm14 &= xmm5
8765# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
8766# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
8767pand %xmm5,%xmm13
8768
8769# qhasm: xmm15 |= xmm4
8770# asm 1: por <xmm4=int6464#5,<xmm15=int6464#16
8771# asm 2: por <xmm4=%xmm4,<xmm15=%xmm15
8772por %xmm4,%xmm15
8773
8774# qhasm: xmm11 ^= xmm12
8775# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
8776# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
8777pxor %xmm10,%xmm8
8778
8779# qhasm: xmm10 ^= xmm13
8780# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
8781# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
8782pxor %xmm11,%xmm9
8783
8784# qhasm: xmm9 ^= xmm14
8785# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
8786# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
8787pxor %xmm13,%xmm12
8788
8789# qhasm: xmm8 ^= xmm15
8790# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
8791# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
8792pxor %xmm15,%xmm14
8793
8794# qhasm: xmm12 = xmm11
8795# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
8796# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
8797movdqa %xmm8,%xmm10
8798
8799# qhasm: xmm12 ^= xmm10
8800# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
8801# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
8802pxor %xmm9,%xmm10
8803
8804# qhasm: xmm11 &= xmm9
8805# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
8806# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
8807pand %xmm12,%xmm8
8808
8809# qhasm: xmm14 = xmm8
8810# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
8811# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
8812movdqa %xmm14,%xmm11
8813
8814# qhasm: xmm14 ^= xmm11
8815# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
8816# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
8817pxor %xmm8,%xmm11
8818
8819# qhasm: xmm15 = xmm12
8820# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
8821# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
8822movdqa %xmm10,%xmm13
8823
8824# qhasm: xmm15 &= xmm14
8825# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
8826# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
8827pand %xmm11,%xmm13
8828
8829# qhasm: xmm15 ^= xmm10
8830# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
8831# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
8832pxor %xmm9,%xmm13
8833
8834# qhasm: xmm13 = xmm9
8835# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
8836# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
8837movdqa %xmm12,%xmm15
8838
8839# qhasm: xmm13 ^= xmm8
8840# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
8841# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
8842pxor %xmm14,%xmm15
8843
8844# qhasm: xmm11 ^= xmm10
8845# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
8846# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
8847pxor %xmm9,%xmm8
8848
8849# qhasm: xmm13 &= xmm11
8850# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
8851# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
8852pand %xmm8,%xmm15
8853
8854# qhasm: xmm13 ^= xmm8
8855# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
8856# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
8857pxor %xmm14,%xmm15
8858
8859# qhasm: xmm9 ^= xmm13
8860# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
8861# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
8862pxor %xmm15,%xmm12
8863
8864# qhasm: xmm10 = xmm14
8865# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
8866# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
8867movdqa %xmm11,%xmm8
8868
8869# qhasm: xmm10 ^= xmm13
8870# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
8871# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
8872pxor %xmm15,%xmm8
8873
8874# qhasm: xmm10 &= xmm8
8875# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
8876# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
8877pand %xmm14,%xmm8
8878
8879# qhasm: xmm9 ^= xmm10
8880# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
8881# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
8882pxor %xmm8,%xmm12
8883
8884# qhasm: xmm14 ^= xmm10
8885# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
8886# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
8887pxor %xmm8,%xmm11
8888
8889# qhasm: xmm14 &= xmm15
8890# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
8891# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
8892pand %xmm13,%xmm11
8893
8894# qhasm: xmm14 ^= xmm12
8895# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
8896# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
8897pxor %xmm10,%xmm11
8898
8899# qhasm: xmm12 = xmm4
8900# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#9
8901# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm8
8902movdqa %xmm4,%xmm8
8903
8904# qhasm: xmm8 = xmm5
8905# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
8906# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
8907movdqa %xmm5,%xmm9
8908
8909# qhasm: xmm10 = xmm15
8910# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
8911# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
8912movdqa %xmm13,%xmm10
8913
8914# qhasm: xmm10 ^= xmm14
8915# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
8916# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
8917pxor %xmm11,%xmm10
8918
8919# qhasm: xmm10 &= xmm4
8920# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
8921# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
8922pand %xmm4,%xmm10
8923
8924# qhasm: xmm4 ^= xmm5
8925# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
8926# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
8927pxor %xmm5,%xmm4
8928
8929# qhasm: xmm4 &= xmm14
8930# asm 1: pand <xmm14=int6464#12,<xmm4=int6464#5
8931# asm 2: pand <xmm14=%xmm11,<xmm4=%xmm4
8932pand %xmm11,%xmm4
8933
8934# qhasm: xmm5 &= xmm15
8935# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
8936# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
8937pand %xmm13,%xmm5
8938
8939# qhasm: xmm4 ^= xmm5
8940# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
8941# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
8942pxor %xmm5,%xmm4
8943
8944# qhasm: xmm5 ^= xmm10
8945# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
8946# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
8947pxor %xmm10,%xmm5
8948
8949# qhasm: xmm12 ^= xmm0
8950# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
8951# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
8952pxor %xmm0,%xmm8
8953
8954# qhasm: xmm8 ^= xmm2
8955# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
8956# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
8957pxor %xmm2,%xmm9
8958
8959# qhasm: xmm15 ^= xmm13
8960# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
8961# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
8962pxor %xmm15,%xmm13
8963
8964# qhasm: xmm14 ^= xmm9
8965# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
8966# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
8967pxor %xmm12,%xmm11
8968
8969# qhasm: xmm11 = xmm15
8970# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
8971# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
8972movdqa %xmm13,%xmm10
8973
8974# qhasm: xmm11 ^= xmm14
8975# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
8976# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
8977pxor %xmm11,%xmm10
8978
8979# qhasm: xmm11 &= xmm12
8980# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
8981# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
8982pand %xmm8,%xmm10
8983
8984# qhasm: xmm12 ^= xmm8
8985# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
8986# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
8987pxor %xmm9,%xmm8
8988
8989# qhasm: xmm12 &= xmm14
8990# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
8991# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
8992pand %xmm11,%xmm8
8993
8994# qhasm: xmm8 &= xmm15
8995# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
8996# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
8997pand %xmm13,%xmm9
8998
8999# qhasm: xmm8 ^= xmm12
9000# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
9001# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
9002pxor %xmm8,%xmm9
9003
9004# qhasm: xmm12 ^= xmm11
9005# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
9006# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
9007pxor %xmm10,%xmm8
9008
9009# qhasm: xmm10 = xmm13
9010# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
9011# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
9012movdqa %xmm15,%xmm10
9013
9014# qhasm: xmm10 ^= xmm9
9015# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
9016# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
9017pxor %xmm12,%xmm10
9018
9019# qhasm: xmm10 &= xmm0
9020# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
9021# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
9022pand %xmm0,%xmm10
9023
9024# qhasm: xmm0 ^= xmm2
9025# asm 1: pxor <xmm2=int6464#3,<xmm0=int6464#1
9026# asm 2: pxor <xmm2=%xmm2,<xmm0=%xmm0
9027pxor %xmm2,%xmm0
9028
9029# qhasm: xmm0 &= xmm9
9030# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
9031# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
9032pand %xmm12,%xmm0
9033
9034# qhasm: xmm2 &= xmm13
9035# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
9036# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
9037pand %xmm15,%xmm2
9038
9039# qhasm: xmm0 ^= xmm2
9040# asm 1: pxor <xmm2=int6464#3,<xmm0=int6464#1
9041# asm 2: pxor <xmm2=%xmm2,<xmm0=%xmm0
9042pxor %xmm2,%xmm0
9043
9044# qhasm: xmm2 ^= xmm10
9045# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
9046# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
9047pxor %xmm10,%xmm2
9048
9049# qhasm: xmm4 ^= xmm12
9050# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
9051# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
9052pxor %xmm8,%xmm4
9053
9054# qhasm: xmm0 ^= xmm12
9055# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
9056# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
9057pxor %xmm8,%xmm0
9058
9059# qhasm: xmm5 ^= xmm8
9060# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
9061# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
9062pxor %xmm9,%xmm5
9063
9064# qhasm: xmm2 ^= xmm8
9065# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
9066# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
9067pxor %xmm9,%xmm2
9068
9069# qhasm: xmm12 = xmm7
9070# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
9071# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
9072movdqa %xmm7,%xmm8
9073
9074# qhasm: xmm8 = xmm1
9075# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
9076# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
9077movdqa %xmm1,%xmm9
9078
9079# qhasm: xmm12 ^= xmm6
9080# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#9
9081# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm8
9082pxor %xmm6,%xmm8
9083
9084# qhasm: xmm8 ^= xmm3
9085# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
9086# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
9087pxor %xmm3,%xmm9
9088
9089# qhasm: xmm11 = xmm15
9090# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
9091# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
9092movdqa %xmm13,%xmm10
9093
9094# qhasm: xmm11 ^= xmm14
9095# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
9096# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
9097pxor %xmm11,%xmm10
9098
9099# qhasm: xmm11 &= xmm12
9100# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
9101# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
9102pand %xmm8,%xmm10
9103
9104# qhasm: xmm12 ^= xmm8
9105# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
9106# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
9107pxor %xmm9,%xmm8
9108
9109# qhasm: xmm12 &= xmm14
9110# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
9111# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
9112pand %xmm11,%xmm8
9113
9114# qhasm: xmm8 &= xmm15
9115# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
9116# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
9117pand %xmm13,%xmm9
9118
9119# qhasm: xmm8 ^= xmm12
9120# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
9121# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
9122pxor %xmm8,%xmm9
9123
9124# qhasm: xmm12 ^= xmm11
9125# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
9126# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
9127pxor %xmm10,%xmm8
9128
9129# qhasm: xmm10 = xmm13
9130# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
9131# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
9132movdqa %xmm15,%xmm10
9133
9134# qhasm: xmm10 ^= xmm9
9135# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
9136# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
9137pxor %xmm12,%xmm10
9138
9139# qhasm: xmm10 &= xmm6
9140# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
9141# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
9142pand %xmm6,%xmm10
9143
9144# qhasm: xmm6 ^= xmm3
9145# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
9146# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
9147pxor %xmm3,%xmm6
9148
9149# qhasm: xmm6 &= xmm9
9150# asm 1: pand <xmm9=int6464#13,<xmm6=int6464#7
9151# asm 2: pand <xmm9=%xmm12,<xmm6=%xmm6
9152pand %xmm12,%xmm6
9153
9154# qhasm: xmm3 &= xmm13
9155# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
9156# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
9157pand %xmm15,%xmm3
9158
9159# qhasm: xmm6 ^= xmm3
9160# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
9161# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
9162pxor %xmm3,%xmm6
9163
9164# qhasm: xmm3 ^= xmm10
9165# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
9166# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
9167pxor %xmm10,%xmm3
9168
9169# qhasm: xmm15 ^= xmm13
9170# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
9171# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
9172pxor %xmm15,%xmm13
9173
9174# qhasm: xmm14 ^= xmm9
9175# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
9176# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
9177pxor %xmm12,%xmm11
9178
9179# qhasm: xmm11 = xmm15
9180# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
9181# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
9182movdqa %xmm13,%xmm10
9183
9184# qhasm: xmm11 ^= xmm14
9185# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
9186# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
9187pxor %xmm11,%xmm10
9188
9189# qhasm: xmm11 &= xmm7
9190# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
9191# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
9192pand %xmm7,%xmm10
9193
9194# qhasm: xmm7 ^= xmm1
9195# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
9196# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
9197pxor %xmm1,%xmm7
9198
9199# qhasm: xmm7 &= xmm14
9200# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
9201# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
9202pand %xmm11,%xmm7
9203
9204# qhasm: xmm1 &= xmm15
9205# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
9206# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
9207pand %xmm13,%xmm1
9208
9209# qhasm: xmm7 ^= xmm1
9210# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
9211# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
9212pxor %xmm1,%xmm7
9213
9214# qhasm: xmm1 ^= xmm11
9215# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
9216# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
9217pxor %xmm10,%xmm1
9218
9219# qhasm: xmm7 ^= xmm12
9220# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
9221# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
9222pxor %xmm8,%xmm7
9223
9224# qhasm: xmm6 ^= xmm12
9225# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
9226# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
9227pxor %xmm8,%xmm6
9228
9229# qhasm: xmm1 ^= xmm8
9230# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
9231# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
9232pxor %xmm9,%xmm1
9233
9234# qhasm: xmm3 ^= xmm8
9235# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
9236# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
9237pxor %xmm9,%xmm3
9238
9239# qhasm: xmm7 ^= xmm0
9240# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
9241# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
9242pxor %xmm0,%xmm7
9243
9244# qhasm: xmm1 ^= xmm4
9245# asm 1: pxor <xmm4=int6464#5,<xmm1=int6464#2
9246# asm 2: pxor <xmm4=%xmm4,<xmm1=%xmm1
9247pxor %xmm4,%xmm1
9248
9249# qhasm: xmm6 ^= xmm7
9250# asm 1: pxor <xmm7=int6464#8,<xmm6=int6464#7
9251# asm 2: pxor <xmm7=%xmm7,<xmm6=%xmm6
9252pxor %xmm7,%xmm6
9253
9254# qhasm: xmm4 ^= xmm0
9255# asm 1: pxor <xmm0=int6464#1,<xmm4=int6464#5
9256# asm 2: pxor <xmm0=%xmm0,<xmm4=%xmm4
9257pxor %xmm0,%xmm4
9258
9259# qhasm: xmm0 ^= xmm1
9260# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
9261# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
9262pxor %xmm1,%xmm0
9263
9264# qhasm: xmm1 ^= xmm5
9265# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
9266# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
9267pxor %xmm5,%xmm1
9268
9269# qhasm: xmm5 ^= xmm3
9270# asm 1: pxor <xmm3=int6464#4,<xmm5=int6464#6
9271# asm 2: pxor <xmm3=%xmm3,<xmm5=%xmm5
9272pxor %xmm3,%xmm5
9273
9274# qhasm: xmm6 ^= xmm5
9275# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
9276# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
9277pxor %xmm5,%xmm6
9278
9279# qhasm: xmm3 ^= xmm2
9280# asm 1: pxor <xmm2=int6464#3,<xmm3=int6464#4
9281# asm 2: pxor <xmm2=%xmm2,<xmm3=%xmm3
9282pxor %xmm2,%xmm3
9283
9284# qhasm: xmm2 ^= xmm5
9285# asm 1: pxor <xmm5=int6464#6,<xmm2=int6464#3
9286# asm 2: pxor <xmm5=%xmm5,<xmm2=%xmm2
9287pxor %xmm5,%xmm2
9288
9289# qhasm: xmm4 ^= xmm2
9290# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
9291# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
9292pxor %xmm2,%xmm4
9293
9294# qhasm: xmm3 ^= RCON
9295# asm 1: pxor RCON,<xmm3=int6464#4
9296# asm 2: pxor RCON,<xmm3=%xmm3
9297pxor RCON,%xmm3
9298
9299# qhasm: shuffle bytes of xmm0 by EXPB0
9300# asm 1: pshufb EXPB0,<xmm0=int6464#1
9301# asm 2: pshufb EXPB0,<xmm0=%xmm0
9302pshufb EXPB0,%xmm0
9303
9304# qhasm: shuffle bytes of xmm1 by EXPB0
9305# asm 1: pshufb EXPB0,<xmm1=int6464#2
9306# asm 2: pshufb EXPB0,<xmm1=%xmm1
9307pshufb EXPB0,%xmm1
9308
9309# qhasm: shuffle bytes of xmm6 by EXPB0
9310# asm 1: pshufb EXPB0,<xmm6=int6464#7
9311# asm 2: pshufb EXPB0,<xmm6=%xmm6
9312pshufb EXPB0,%xmm6
9313
9314# qhasm: shuffle bytes of xmm4 by EXPB0
9315# asm 1: pshufb EXPB0,<xmm4=int6464#5
9316# asm 2: pshufb EXPB0,<xmm4=%xmm4
9317pshufb EXPB0,%xmm4
9318
9319# qhasm: shuffle bytes of xmm2 by EXPB0
9320# asm 1: pshufb EXPB0,<xmm2=int6464#3
9321# asm 2: pshufb EXPB0,<xmm2=%xmm2
9322pshufb EXPB0,%xmm2
9323
9324# qhasm: shuffle bytes of xmm7 by EXPB0
9325# asm 1: pshufb EXPB0,<xmm7=int6464#8
9326# asm 2: pshufb EXPB0,<xmm7=%xmm7
9327pshufb EXPB0,%xmm7
9328
9329# qhasm: shuffle bytes of xmm3 by EXPB0
9330# asm 1: pshufb EXPB0,<xmm3=int6464#4
9331# asm 2: pshufb EXPB0,<xmm3=%xmm3
9332pshufb EXPB0,%xmm3
9333
9334# qhasm: shuffle bytes of xmm5 by EXPB0
9335# asm 1: pshufb EXPB0,<xmm5=int6464#6
9336# asm 2: pshufb EXPB0,<xmm5=%xmm5
9337pshufb EXPB0,%xmm5
9338
9339# qhasm: xmm8 = *(int128 *)(c + 768)
9340# asm 1: movdqa 768(<c=int64#1),>xmm8=int6464#9
9341# asm 2: movdqa 768(<c=%rdi),>xmm8=%xmm8
9342movdqa 768(%rdi),%xmm8
9343
9344# qhasm: xmm9 = *(int128 *)(c + 784)
9345# asm 1: movdqa 784(<c=int64#1),>xmm9=int6464#10
9346# asm 2: movdqa 784(<c=%rdi),>xmm9=%xmm9
9347movdqa 784(%rdi),%xmm9
9348
9349# qhasm: xmm10 = *(int128 *)(c + 800)
9350# asm 1: movdqa 800(<c=int64#1),>xmm10=int6464#11
9351# asm 2: movdqa 800(<c=%rdi),>xmm10=%xmm10
9352movdqa 800(%rdi),%xmm10
9353
9354# qhasm: xmm11 = *(int128 *)(c + 816)
9355# asm 1: movdqa 816(<c=int64#1),>xmm11=int6464#12
9356# asm 2: movdqa 816(<c=%rdi),>xmm11=%xmm11
9357movdqa 816(%rdi),%xmm11
9358
9359# qhasm: xmm12 = *(int128 *)(c + 832)
9360# asm 1: movdqa 832(<c=int64#1),>xmm12=int6464#13
9361# asm 2: movdqa 832(<c=%rdi),>xmm12=%xmm12
9362movdqa 832(%rdi),%xmm12
9363
9364# qhasm: xmm13 = *(int128 *)(c + 848)
9365# asm 1: movdqa 848(<c=int64#1),>xmm13=int6464#14
9366# asm 2: movdqa 848(<c=%rdi),>xmm13=%xmm13
9367movdqa 848(%rdi),%xmm13
9368
9369# qhasm: xmm14 = *(int128 *)(c + 864)
9370# asm 1: movdqa 864(<c=int64#1),>xmm14=int6464#15
9371# asm 2: movdqa 864(<c=%rdi),>xmm14=%xmm14
9372movdqa 864(%rdi),%xmm14
9373
9374# qhasm: xmm15 = *(int128 *)(c + 880)
9375# asm 1: movdqa 880(<c=int64#1),>xmm15=int6464#16
9376# asm 2: movdqa 880(<c=%rdi),>xmm15=%xmm15
9377movdqa 880(%rdi),%xmm15
9378
9379# qhasm: xmm8 ^= ONE
9380# asm 1: pxor ONE,<xmm8=int6464#9
9381# asm 2: pxor ONE,<xmm8=%xmm8
9382pxor ONE,%xmm8
9383
9384# qhasm: xmm9 ^= ONE
9385# asm 1: pxor ONE,<xmm9=int6464#10
9386# asm 2: pxor ONE,<xmm9=%xmm9
9387pxor ONE,%xmm9
9388
9389# qhasm: xmm13 ^= ONE
9390# asm 1: pxor ONE,<xmm13=int6464#14
9391# asm 2: pxor ONE,<xmm13=%xmm13
9392pxor ONE,%xmm13
9393
9394# qhasm: xmm14 ^= ONE
9395# asm 1: pxor ONE,<xmm14=int6464#15
9396# asm 2: pxor ONE,<xmm14=%xmm14
9397pxor ONE,%xmm14
9398
9399# qhasm: xmm0 ^= xmm8
9400# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
9401# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
9402pxor %xmm8,%xmm0
9403
9404# qhasm: xmm1 ^= xmm9
9405# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
9406# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
9407pxor %xmm9,%xmm1
9408
9409# qhasm: xmm6 ^= xmm10
9410# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
9411# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
9412pxor %xmm10,%xmm6
9413
9414# qhasm: xmm4 ^= xmm11
9415# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
9416# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
9417pxor %xmm11,%xmm4
9418
9419# qhasm: xmm2 ^= xmm12
9420# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
9421# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
9422pxor %xmm12,%xmm2
9423
9424# qhasm: xmm7 ^= xmm13
9425# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
9426# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
9427pxor %xmm13,%xmm7
9428
9429# qhasm: xmm3 ^= xmm14
9430# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
9431# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
9432pxor %xmm14,%xmm3
9433
9434# qhasm: xmm5 ^= xmm15
9435# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
9436# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
9437pxor %xmm15,%xmm5
9438
9439# qhasm: uint32323232 xmm8 >>= 8
9440# asm 1: psrld $8,<xmm8=int6464#9
9441# asm 2: psrld $8,<xmm8=%xmm8
9442psrld $8,%xmm8
9443
9444# qhasm: uint32323232 xmm9 >>= 8
9445# asm 1: psrld $8,<xmm9=int6464#10
9446# asm 2: psrld $8,<xmm9=%xmm9
9447psrld $8,%xmm9
9448
9449# qhasm: uint32323232 xmm10 >>= 8
9450# asm 1: psrld $8,<xmm10=int6464#11
9451# asm 2: psrld $8,<xmm10=%xmm10
9452psrld $8,%xmm10
9453
9454# qhasm: uint32323232 xmm11 >>= 8
9455# asm 1: psrld $8,<xmm11=int6464#12
9456# asm 2: psrld $8,<xmm11=%xmm11
9457psrld $8,%xmm11
9458
9459# qhasm: uint32323232 xmm12 >>= 8
9460# asm 1: psrld $8,<xmm12=int6464#13
9461# asm 2: psrld $8,<xmm12=%xmm12
9462psrld $8,%xmm12
9463
9464# qhasm: uint32323232 xmm13 >>= 8
9465# asm 1: psrld $8,<xmm13=int6464#14
9466# asm 2: psrld $8,<xmm13=%xmm13
9467psrld $8,%xmm13
9468
9469# qhasm: uint32323232 xmm14 >>= 8
9470# asm 1: psrld $8,<xmm14=int6464#15
9471# asm 2: psrld $8,<xmm14=%xmm14
9472psrld $8,%xmm14
9473
9474# qhasm: uint32323232 xmm15 >>= 8
9475# asm 1: psrld $8,<xmm15=int6464#16
9476# asm 2: psrld $8,<xmm15=%xmm15
9477psrld $8,%xmm15
9478
9479# qhasm: xmm0 ^= xmm8
9480# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
9481# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
9482pxor %xmm8,%xmm0
9483
9484# qhasm: xmm1 ^= xmm9
9485# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
9486# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
9487pxor %xmm9,%xmm1
9488
9489# qhasm: xmm6 ^= xmm10
9490# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
9491# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
9492pxor %xmm10,%xmm6
9493
9494# qhasm: xmm4 ^= xmm11
9495# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
9496# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
9497pxor %xmm11,%xmm4
9498
9499# qhasm: xmm2 ^= xmm12
9500# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
9501# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
9502pxor %xmm12,%xmm2
9503
9504# qhasm: xmm7 ^= xmm13
9505# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
9506# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
9507pxor %xmm13,%xmm7
9508
9509# qhasm: xmm3 ^= xmm14
9510# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
9511# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
9512pxor %xmm14,%xmm3
9513
9514# qhasm: xmm5 ^= xmm15
9515# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
9516# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
9517pxor %xmm15,%xmm5
9518
9519# qhasm: uint32323232 xmm8 >>= 8
9520# asm 1: psrld $8,<xmm8=int6464#9
9521# asm 2: psrld $8,<xmm8=%xmm8
9522psrld $8,%xmm8
9523
9524# qhasm: uint32323232 xmm9 >>= 8
9525# asm 1: psrld $8,<xmm9=int6464#10
9526# asm 2: psrld $8,<xmm9=%xmm9
9527psrld $8,%xmm9
9528
9529# qhasm: uint32323232 xmm10 >>= 8
9530# asm 1: psrld $8,<xmm10=int6464#11
9531# asm 2: psrld $8,<xmm10=%xmm10
9532psrld $8,%xmm10
9533
9534# qhasm: uint32323232 xmm11 >>= 8
9535# asm 1: psrld $8,<xmm11=int6464#12
9536# asm 2: psrld $8,<xmm11=%xmm11
9537psrld $8,%xmm11
9538
9539# qhasm: uint32323232 xmm12 >>= 8
9540# asm 1: psrld $8,<xmm12=int6464#13
9541# asm 2: psrld $8,<xmm12=%xmm12
9542psrld $8,%xmm12
9543
9544# qhasm: uint32323232 xmm13 >>= 8
9545# asm 1: psrld $8,<xmm13=int6464#14
9546# asm 2: psrld $8,<xmm13=%xmm13
9547psrld $8,%xmm13
9548
9549# qhasm: uint32323232 xmm14 >>= 8
9550# asm 1: psrld $8,<xmm14=int6464#15
9551# asm 2: psrld $8,<xmm14=%xmm14
9552psrld $8,%xmm14
9553
9554# qhasm: uint32323232 xmm15 >>= 8
9555# asm 1: psrld $8,<xmm15=int6464#16
9556# asm 2: psrld $8,<xmm15=%xmm15
9557psrld $8,%xmm15
9558
9559# qhasm: xmm0 ^= xmm8
9560# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
9561# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
9562pxor %xmm8,%xmm0
9563
9564# qhasm: xmm1 ^= xmm9
9565# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
9566# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
9567pxor %xmm9,%xmm1
9568
9569# qhasm: xmm6 ^= xmm10
9570# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
9571# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
9572pxor %xmm10,%xmm6
9573
9574# qhasm: xmm4 ^= xmm11
9575# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
9576# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
9577pxor %xmm11,%xmm4
9578
9579# qhasm: xmm2 ^= xmm12
9580# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
9581# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
9582pxor %xmm12,%xmm2
9583
9584# qhasm: xmm7 ^= xmm13
9585# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
9586# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
9587pxor %xmm13,%xmm7
9588
9589# qhasm: xmm3 ^= xmm14
9590# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
9591# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
9592pxor %xmm14,%xmm3
9593
9594# qhasm: xmm5 ^= xmm15
9595# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
9596# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
9597pxor %xmm15,%xmm5
9598
9599# qhasm: uint32323232 xmm8 >>= 8
9600# asm 1: psrld $8,<xmm8=int6464#9
9601# asm 2: psrld $8,<xmm8=%xmm8
9602psrld $8,%xmm8
9603
9604# qhasm: uint32323232 xmm9 >>= 8
9605# asm 1: psrld $8,<xmm9=int6464#10
9606# asm 2: psrld $8,<xmm9=%xmm9
9607psrld $8,%xmm9
9608
9609# qhasm: uint32323232 xmm10 >>= 8
9610# asm 1: psrld $8,<xmm10=int6464#11
9611# asm 2: psrld $8,<xmm10=%xmm10
9612psrld $8,%xmm10
9613
9614# qhasm: uint32323232 xmm11 >>= 8
9615# asm 1: psrld $8,<xmm11=int6464#12
9616# asm 2: psrld $8,<xmm11=%xmm11
9617psrld $8,%xmm11
9618
9619# qhasm: uint32323232 xmm12 >>= 8
9620# asm 1: psrld $8,<xmm12=int6464#13
9621# asm 2: psrld $8,<xmm12=%xmm12
9622psrld $8,%xmm12
9623
9624# qhasm: uint32323232 xmm13 >>= 8
9625# asm 1: psrld $8,<xmm13=int6464#14
9626# asm 2: psrld $8,<xmm13=%xmm13
9627psrld $8,%xmm13
9628
9629# qhasm: uint32323232 xmm14 >>= 8
9630# asm 1: psrld $8,<xmm14=int6464#15
9631# asm 2: psrld $8,<xmm14=%xmm14
9632psrld $8,%xmm14
9633
9634# qhasm: uint32323232 xmm15 >>= 8
9635# asm 1: psrld $8,<xmm15=int6464#16
9636# asm 2: psrld $8,<xmm15=%xmm15
9637psrld $8,%xmm15
9638
9639# qhasm: xmm0 ^= xmm8
9640# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
9641# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
9642pxor %xmm8,%xmm0
9643
9644# qhasm: xmm1 ^= xmm9
9645# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
9646# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
9647pxor %xmm9,%xmm1
9648
9649# qhasm: xmm6 ^= xmm10
9650# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
9651# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
9652pxor %xmm10,%xmm6
9653
9654# qhasm: xmm4 ^= xmm11
9655# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
9656# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
9657pxor %xmm11,%xmm4
9658
9659# qhasm: xmm2 ^= xmm12
9660# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
9661# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
9662pxor %xmm12,%xmm2
9663
9664# qhasm: xmm7 ^= xmm13
9665# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
9666# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
9667pxor %xmm13,%xmm7
9668
9669# qhasm: xmm3 ^= xmm14
9670# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
9671# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
9672pxor %xmm14,%xmm3
9673
9674# qhasm: xmm5 ^= xmm15
9675# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
9676# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
9677pxor %xmm15,%xmm5
9678
9679# qhasm: *(int128 *)(c + 896) = xmm0
9680# asm 1: movdqa <xmm0=int6464#1,896(<c=int64#1)
9681# asm 2: movdqa <xmm0=%xmm0,896(<c=%rdi)
9682movdqa %xmm0,896(%rdi)
9683
9684# qhasm: *(int128 *)(c + 912) = xmm1
9685# asm 1: movdqa <xmm1=int6464#2,912(<c=int64#1)
9686# asm 2: movdqa <xmm1=%xmm1,912(<c=%rdi)
9687movdqa %xmm1,912(%rdi)
9688
9689# qhasm: *(int128 *)(c + 928) = xmm6
9690# asm 1: movdqa <xmm6=int6464#7,928(<c=int64#1)
9691# asm 2: movdqa <xmm6=%xmm6,928(<c=%rdi)
9692movdqa %xmm6,928(%rdi)
9693
9694# qhasm: *(int128 *)(c + 944) = xmm4
9695# asm 1: movdqa <xmm4=int6464#5,944(<c=int64#1)
9696# asm 2: movdqa <xmm4=%xmm4,944(<c=%rdi)
9697movdqa %xmm4,944(%rdi)
9698
9699# qhasm: *(int128 *)(c + 960) = xmm2
9700# asm 1: movdqa <xmm2=int6464#3,960(<c=int64#1)
9701# asm 2: movdqa <xmm2=%xmm2,960(<c=%rdi)
9702movdqa %xmm2,960(%rdi)
9703
9704# qhasm: *(int128 *)(c + 976) = xmm7
9705# asm 1: movdqa <xmm7=int6464#8,976(<c=int64#1)
9706# asm 2: movdqa <xmm7=%xmm7,976(<c=%rdi)
9707movdqa %xmm7,976(%rdi)
9708
9709# qhasm: *(int128 *)(c + 992) = xmm3
9710# asm 1: movdqa <xmm3=int6464#4,992(<c=int64#1)
9711# asm 2: movdqa <xmm3=%xmm3,992(<c=%rdi)
9712movdqa %xmm3,992(%rdi)
9713
9714# qhasm: *(int128 *)(c + 1008) = xmm5
9715# asm 1: movdqa <xmm5=int6464#6,1008(<c=int64#1)
9716# asm 2: movdqa <xmm5=%xmm5,1008(<c=%rdi)
9717movdqa %xmm5,1008(%rdi)
9718
9719# qhasm: xmm0 ^= ONE
9720# asm 1: pxor ONE,<xmm0=int6464#1
9721# asm 2: pxor ONE,<xmm0=%xmm0
9722pxor ONE,%xmm0
9723
9724# qhasm: xmm1 ^= ONE
9725# asm 1: pxor ONE,<xmm1=int6464#2
9726# asm 2: pxor ONE,<xmm1=%xmm1
9727pxor ONE,%xmm1
9728
9729# qhasm: xmm7 ^= ONE
9730# asm 1: pxor ONE,<xmm7=int6464#8
9731# asm 2: pxor ONE,<xmm7=%xmm7
9732pxor ONE,%xmm7
9733
9734# qhasm: xmm3 ^= ONE
9735# asm 1: pxor ONE,<xmm3=int6464#4
9736# asm 2: pxor ONE,<xmm3=%xmm3
9737pxor ONE,%xmm3
9738
9739# qhasm: shuffle bytes of xmm0 by ROTB
9740# asm 1: pshufb ROTB,<xmm0=int6464#1
9741# asm 2: pshufb ROTB,<xmm0=%xmm0
9742pshufb ROTB,%xmm0
9743
9744# qhasm: shuffle bytes of xmm1 by ROTB
9745# asm 1: pshufb ROTB,<xmm1=int6464#2
9746# asm 2: pshufb ROTB,<xmm1=%xmm1
9747pshufb ROTB,%xmm1
9748
9749# qhasm: shuffle bytes of xmm6 by ROTB
9750# asm 1: pshufb ROTB,<xmm6=int6464#7
9751# asm 2: pshufb ROTB,<xmm6=%xmm6
9752pshufb ROTB,%xmm6
9753
9754# qhasm: shuffle bytes of xmm4 by ROTB
9755# asm 1: pshufb ROTB,<xmm4=int6464#5
9756# asm 2: pshufb ROTB,<xmm4=%xmm4
9757pshufb ROTB,%xmm4
9758
9759# qhasm: shuffle bytes of xmm2 by ROTB
9760# asm 1: pshufb ROTB,<xmm2=int6464#3
9761# asm 2: pshufb ROTB,<xmm2=%xmm2
9762pshufb ROTB,%xmm2
9763
9764# qhasm: shuffle bytes of xmm7 by ROTB
9765# asm 1: pshufb ROTB,<xmm7=int6464#8
9766# asm 2: pshufb ROTB,<xmm7=%xmm7
9767pshufb ROTB,%xmm7
9768
9769# qhasm: shuffle bytes of xmm3 by ROTB
9770# asm 1: pshufb ROTB,<xmm3=int6464#4
9771# asm 2: pshufb ROTB,<xmm3=%xmm3
9772pshufb ROTB,%xmm3
9773
9774# qhasm: shuffle bytes of xmm5 by ROTB
9775# asm 1: pshufb ROTB,<xmm5=int6464#6
9776# asm 2: pshufb ROTB,<xmm5=%xmm5
9777pshufb ROTB,%xmm5
9778
9779# qhasm: xmm7 ^= xmm3
9780# asm 1: pxor <xmm3=int6464#4,<xmm7=int6464#8
9781# asm 2: pxor <xmm3=%xmm3,<xmm7=%xmm7
9782pxor %xmm3,%xmm7
9783
9784# qhasm: xmm6 ^= xmm1
9785# asm 1: pxor <xmm1=int6464#2,<xmm6=int6464#7
9786# asm 2: pxor <xmm1=%xmm1,<xmm6=%xmm6
9787pxor %xmm1,%xmm6
9788
9789# qhasm: xmm7 ^= xmm0
9790# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
9791# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
9792pxor %xmm0,%xmm7
9793
9794# qhasm: xmm3 ^= xmm6
9795# asm 1: pxor <xmm6=int6464#7,<xmm3=int6464#4
9796# asm 2: pxor <xmm6=%xmm6,<xmm3=%xmm3
9797pxor %xmm6,%xmm3
9798
9799# qhasm: xmm4 ^= xmm0
9800# asm 1: pxor <xmm0=int6464#1,<xmm4=int6464#5
9801# asm 2: pxor <xmm0=%xmm0,<xmm4=%xmm4
9802pxor %xmm0,%xmm4
9803
9804# qhasm: xmm3 ^= xmm4
9805# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
9806# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
9807pxor %xmm4,%xmm3
9808
9809# qhasm: xmm4 ^= xmm5
9810# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
9811# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
9812pxor %xmm5,%xmm4
9813
9814# qhasm: xmm4 ^= xmm2
9815# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
9816# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
9817pxor %xmm2,%xmm4
9818
9819# qhasm: xmm5 ^= xmm7
9820# asm 1: pxor <xmm7=int6464#8,<xmm5=int6464#6
9821# asm 2: pxor <xmm7=%xmm7,<xmm5=%xmm5
9822pxor %xmm7,%xmm5
9823
9824# qhasm: xmm4 ^= xmm1
9825# asm 1: pxor <xmm1=int6464#2,<xmm4=int6464#5
9826# asm 2: pxor <xmm1=%xmm1,<xmm4=%xmm4
9827pxor %xmm1,%xmm4
9828
9829# qhasm: xmm2 ^= xmm7
9830# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
9831# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
9832pxor %xmm7,%xmm2
9833
9834# qhasm: xmm6 ^= xmm5
9835# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
9836# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
9837pxor %xmm5,%xmm6
9838
9839# qhasm: xmm1 ^= xmm7
9840# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
9841# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
9842pxor %xmm7,%xmm1
9843
9844# qhasm: xmm11 = xmm5
9845# asm 1: movdqa <xmm5=int6464#6,>xmm11=int6464#9
9846# asm 2: movdqa <xmm5=%xmm5,>xmm11=%xmm8
9847movdqa %xmm5,%xmm8
9848
9849# qhasm: xmm10 = xmm1
9850# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
9851# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
9852movdqa %xmm1,%xmm9
9853
9854# qhasm: xmm9 = xmm7
9855# asm 1: movdqa <xmm7=int6464#8,>xmm9=int6464#11
9856# asm 2: movdqa <xmm7=%xmm7,>xmm9=%xmm10
9857movdqa %xmm7,%xmm10
9858
9859# qhasm: xmm13 = xmm6
9860# asm 1: movdqa <xmm6=int6464#7,>xmm13=int6464#12
9861# asm 2: movdqa <xmm6=%xmm6,>xmm13=%xmm11
9862movdqa %xmm6,%xmm11
9863
9864# qhasm: xmm12 = xmm3
9865# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#13
9866# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm12
9867movdqa %xmm3,%xmm12
9868
9869# qhasm: xmm11 ^= xmm2
9870# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#9
9871# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm8
9872pxor %xmm2,%xmm8
9873
9874# qhasm: xmm10 ^= xmm6
9875# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#10
9876# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm9
9877pxor %xmm6,%xmm9
9878
9879# qhasm: xmm9 ^= xmm4
9880# asm 1: pxor <xmm4=int6464#5,<xmm9=int6464#11
9881# asm 2: pxor <xmm4=%xmm4,<xmm9=%xmm10
9882pxor %xmm4,%xmm10
9883
9884# qhasm: xmm13 ^= xmm2
9885# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#12
9886# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm11
9887pxor %xmm2,%xmm11
9888
9889# qhasm: xmm12 ^= xmm0
9890# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
9891# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
9892pxor %xmm0,%xmm12
9893
9894# qhasm: xmm14 = xmm11
9895# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
9896# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
9897movdqa %xmm8,%xmm13
9898
9899# qhasm: xmm8 = xmm10
9900# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
9901# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
9902movdqa %xmm9,%xmm14
9903
9904# qhasm: xmm15 = xmm11
9905# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
9906# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
9907movdqa %xmm8,%xmm15
9908
9909# qhasm: xmm10 |= xmm9
9910# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
9911# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
9912por %xmm10,%xmm9
9913
9914# qhasm: xmm11 |= xmm12
9915# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
9916# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
9917por %xmm12,%xmm8
9918
9919# qhasm: xmm15 ^= xmm8
9920# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
9921# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
9922pxor %xmm14,%xmm15
9923
9924# qhasm: xmm14 &= xmm12
9925# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
9926# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
9927pand %xmm12,%xmm13
9928
9929# qhasm: xmm8 &= xmm9
9930# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
9931# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
9932pand %xmm10,%xmm14
9933
9934# qhasm: xmm12 ^= xmm9
9935# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
9936# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
9937pxor %xmm10,%xmm12
9938
9939# qhasm: xmm15 &= xmm12
9940# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
9941# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
9942pand %xmm12,%xmm15
9943
9944# qhasm: xmm12 = xmm4
9945# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#11
9946# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm10
9947movdqa %xmm4,%xmm10
9948
9949# qhasm: xmm12 ^= xmm0
9950# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
9951# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
9952pxor %xmm0,%xmm10
9953
9954# qhasm: xmm13 &= xmm12
9955# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
9956# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
9957pand %xmm10,%xmm11
9958
9959# qhasm: xmm11 ^= xmm13
9960# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
9961# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
9962pxor %xmm11,%xmm8
9963
9964# qhasm: xmm10 ^= xmm13
9965# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
9966# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
9967pxor %xmm11,%xmm9
9968
9969# qhasm: xmm13 = xmm5
9970# asm 1: movdqa <xmm5=int6464#6,>xmm13=int6464#11
9971# asm 2: movdqa <xmm5=%xmm5,>xmm13=%xmm10
9972movdqa %xmm5,%xmm10
9973
9974# qhasm: xmm13 ^= xmm1
9975# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
9976# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
9977pxor %xmm1,%xmm10
9978
9979# qhasm: xmm12 = xmm7
9980# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#12
9981# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm11
9982movdqa %xmm7,%xmm11
9983
9984# qhasm: xmm9 = xmm13
9985# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
9986# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
9987movdqa %xmm10,%xmm12
9988
9989# qhasm: xmm12 ^= xmm3
9990# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#12
9991# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm11
9992pxor %xmm3,%xmm11
9993
9994# qhasm: xmm9 |= xmm12
9995# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
9996# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
9997por %xmm11,%xmm12
9998
9999# qhasm: xmm13 &= xmm12
10000# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
10001# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
10002pand %xmm11,%xmm10
10003
10004# qhasm: xmm8 ^= xmm13
10005# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
10006# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
10007pxor %xmm10,%xmm14
10008
10009# qhasm: xmm11 ^= xmm15
10010# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
10011# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
10012pxor %xmm15,%xmm8
10013
10014# qhasm: xmm10 ^= xmm14
10015# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
10016# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
10017pxor %xmm13,%xmm9
10018
10019# qhasm: xmm9 ^= xmm15
10020# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
10021# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
10022pxor %xmm15,%xmm12
10023
10024# qhasm: xmm8 ^= xmm14
10025# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
10026# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
10027pxor %xmm13,%xmm14
10028
10029# qhasm: xmm9 ^= xmm14
10030# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
10031# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
10032pxor %xmm13,%xmm12
10033
10034# qhasm: xmm12 = xmm6
10035# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#11
10036# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm10
10037movdqa %xmm6,%xmm10
10038
10039# qhasm: xmm13 = xmm2
10040# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
10041# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
10042movdqa %xmm2,%xmm11
10043
10044# qhasm: xmm14 = xmm1
10045# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
10046# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
10047movdqa %xmm1,%xmm13
10048
10049# qhasm: xmm15 = xmm5
10050# asm 1: movdqa <xmm5=int6464#6,>xmm15=int6464#16
10051# asm 2: movdqa <xmm5=%xmm5,>xmm15=%xmm15
10052movdqa %xmm5,%xmm15
10053
10054# qhasm: xmm12 &= xmm4
10055# asm 1: pand <xmm4=int6464#5,<xmm12=int6464#11
10056# asm 2: pand <xmm4=%xmm4,<xmm12=%xmm10
10057pand %xmm4,%xmm10
10058
10059# qhasm: xmm13 &= xmm0
10060# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
10061# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
10062pand %xmm0,%xmm11
10063
10064# qhasm: xmm14 &= xmm7
10065# asm 1: pand <xmm7=int6464#8,<xmm14=int6464#14
10066# asm 2: pand <xmm7=%xmm7,<xmm14=%xmm13
10067pand %xmm7,%xmm13
10068
10069# qhasm: xmm15 |= xmm3
10070# asm 1: por <xmm3=int6464#4,<xmm15=int6464#16
10071# asm 2: por <xmm3=%xmm3,<xmm15=%xmm15
10072por %xmm3,%xmm15
10073
10074# qhasm: xmm11 ^= xmm12
10075# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
10076# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
10077pxor %xmm10,%xmm8
10078
10079# qhasm: xmm10 ^= xmm13
10080# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
10081# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
10082pxor %xmm11,%xmm9
10083
10084# qhasm: xmm9 ^= xmm14
10085# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
10086# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
10087pxor %xmm13,%xmm12
10088
10089# qhasm: xmm8 ^= xmm15
10090# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
10091# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
10092pxor %xmm15,%xmm14
10093
10094# qhasm: xmm12 = xmm11
10095# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
10096# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
10097movdqa %xmm8,%xmm10
10098
10099# qhasm: xmm12 ^= xmm10
10100# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
10101# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
10102pxor %xmm9,%xmm10
10103
10104# qhasm: xmm11 &= xmm9
10105# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
10106# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
10107pand %xmm12,%xmm8
10108
10109# qhasm: xmm14 = xmm8
10110# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
10111# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
10112movdqa %xmm14,%xmm11
10113
10114# qhasm: xmm14 ^= xmm11
10115# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
10116# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
10117pxor %xmm8,%xmm11
10118
10119# qhasm: xmm15 = xmm12
10120# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
10121# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
10122movdqa %xmm10,%xmm13
10123
10124# qhasm: xmm15 &= xmm14
10125# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
10126# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
10127pand %xmm11,%xmm13
10128
10129# qhasm: xmm15 ^= xmm10
10130# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
10131# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
10132pxor %xmm9,%xmm13
10133
10134# qhasm: xmm13 = xmm9
10135# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
10136# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
10137movdqa %xmm12,%xmm15
10138
10139# qhasm: xmm13 ^= xmm8
10140# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
10141# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
10142pxor %xmm14,%xmm15
10143
10144# qhasm: xmm11 ^= xmm10
10145# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
10146# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
10147pxor %xmm9,%xmm8
10148
10149# qhasm: xmm13 &= xmm11
10150# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
10151# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
10152pand %xmm8,%xmm15
10153
10154# qhasm: xmm13 ^= xmm8
10155# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
10156# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
10157pxor %xmm14,%xmm15
10158
10159# qhasm: xmm9 ^= xmm13
10160# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
10161# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
10162pxor %xmm15,%xmm12
10163
10164# qhasm: xmm10 = xmm14
10165# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
10166# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
10167movdqa %xmm11,%xmm8
10168
10169# qhasm: xmm10 ^= xmm13
10170# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
10171# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
10172pxor %xmm15,%xmm8
10173
10174# qhasm: xmm10 &= xmm8
10175# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
10176# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
10177pand %xmm14,%xmm8
10178
10179# qhasm: xmm9 ^= xmm10
10180# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
10181# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
10182pxor %xmm8,%xmm12
10183
10184# qhasm: xmm14 ^= xmm10
10185# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
10186# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
10187pxor %xmm8,%xmm11
10188
10189# qhasm: xmm14 &= xmm15
10190# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
10191# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
10192pand %xmm13,%xmm11
10193
10194# qhasm: xmm14 ^= xmm12
10195# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
10196# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
10197pxor %xmm10,%xmm11
10198
10199# qhasm: xmm12 = xmm3
10200# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#9
10201# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm8
10202movdqa %xmm3,%xmm8
10203
10204# qhasm: xmm8 = xmm7
10205# asm 1: movdqa <xmm7=int6464#8,>xmm8=int6464#10
10206# asm 2: movdqa <xmm7=%xmm7,>xmm8=%xmm9
10207movdqa %xmm7,%xmm9
10208
10209# qhasm: xmm10 = xmm15
10210# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
10211# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
10212movdqa %xmm13,%xmm10
10213
10214# qhasm: xmm10 ^= xmm14
10215# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
10216# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
10217pxor %xmm11,%xmm10
10218
10219# qhasm: xmm10 &= xmm3
10220# asm 1: pand <xmm3=int6464#4,<xmm10=int6464#11
10221# asm 2: pand <xmm3=%xmm3,<xmm10=%xmm10
10222pand %xmm3,%xmm10
10223
10224# qhasm: xmm3 ^= xmm7
10225# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
10226# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
10227pxor %xmm7,%xmm3
10228
10229# qhasm: xmm3 &= xmm14
10230# asm 1: pand <xmm14=int6464#12,<xmm3=int6464#4
10231# asm 2: pand <xmm14=%xmm11,<xmm3=%xmm3
10232pand %xmm11,%xmm3
10233
10234# qhasm: xmm7 &= xmm15
10235# asm 1: pand <xmm15=int6464#14,<xmm7=int6464#8
10236# asm 2: pand <xmm15=%xmm13,<xmm7=%xmm7
10237pand %xmm13,%xmm7
10238
10239# qhasm: xmm3 ^= xmm7
10240# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
10241# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
10242pxor %xmm7,%xmm3
10243
10244# qhasm: xmm7 ^= xmm10
10245# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
10246# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
10247pxor %xmm10,%xmm7
10248
10249# qhasm: xmm12 ^= xmm0
10250# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
10251# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
10252pxor %xmm0,%xmm8
10253
10254# qhasm: xmm8 ^= xmm4
10255# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#10
10256# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm9
10257pxor %xmm4,%xmm9
10258
10259# qhasm: xmm15 ^= xmm13
10260# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
10261# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
10262pxor %xmm15,%xmm13
10263
10264# qhasm: xmm14 ^= xmm9
10265# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
10266# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
10267pxor %xmm12,%xmm11
10268
10269# qhasm: xmm11 = xmm15
10270# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10271# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10272movdqa %xmm13,%xmm10
10273
10274# qhasm: xmm11 ^= xmm14
10275# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10276# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10277pxor %xmm11,%xmm10
10278
10279# qhasm: xmm11 &= xmm12
10280# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
10281# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
10282pand %xmm8,%xmm10
10283
10284# qhasm: xmm12 ^= xmm8
10285# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
10286# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
10287pxor %xmm9,%xmm8
10288
10289# qhasm: xmm12 &= xmm14
10290# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
10291# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
10292pand %xmm11,%xmm8
10293
10294# qhasm: xmm8 &= xmm15
10295# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
10296# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
10297pand %xmm13,%xmm9
10298
10299# qhasm: xmm8 ^= xmm12
10300# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
10301# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
10302pxor %xmm8,%xmm9
10303
10304# qhasm: xmm12 ^= xmm11
10305# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
10306# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
10307pxor %xmm10,%xmm8
10308
10309# qhasm: xmm10 = xmm13
10310# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
10311# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
10312movdqa %xmm15,%xmm10
10313
10314# qhasm: xmm10 ^= xmm9
10315# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
10316# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
10317pxor %xmm12,%xmm10
10318
10319# qhasm: xmm10 &= xmm0
10320# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
10321# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
10322pand %xmm0,%xmm10
10323
10324# qhasm: xmm0 ^= xmm4
10325# asm 1: pxor <xmm4=int6464#5,<xmm0=int6464#1
10326# asm 2: pxor <xmm4=%xmm4,<xmm0=%xmm0
10327pxor %xmm4,%xmm0
10328
10329# qhasm: xmm0 &= xmm9
10330# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
10331# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
10332pand %xmm12,%xmm0
10333
10334# qhasm: xmm4 &= xmm13
10335# asm 1: pand <xmm13=int6464#16,<xmm4=int6464#5
10336# asm 2: pand <xmm13=%xmm15,<xmm4=%xmm4
10337pand %xmm15,%xmm4
10338
10339# qhasm: xmm0 ^= xmm4
10340# asm 1: pxor <xmm4=int6464#5,<xmm0=int6464#1
10341# asm 2: pxor <xmm4=%xmm4,<xmm0=%xmm0
10342pxor %xmm4,%xmm0
10343
10344# qhasm: xmm4 ^= xmm10
10345# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
10346# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
10347pxor %xmm10,%xmm4
10348
10349# qhasm: xmm3 ^= xmm12
10350# asm 1: pxor <xmm12=int6464#9,<xmm3=int6464#4
10351# asm 2: pxor <xmm12=%xmm8,<xmm3=%xmm3
10352pxor %xmm8,%xmm3
10353
10354# qhasm: xmm0 ^= xmm12
10355# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
10356# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
10357pxor %xmm8,%xmm0
10358
10359# qhasm: xmm7 ^= xmm8
10360# asm 1: pxor <xmm8=int6464#10,<xmm7=int6464#8
10361# asm 2: pxor <xmm8=%xmm9,<xmm7=%xmm7
10362pxor %xmm9,%xmm7
10363
10364# qhasm: xmm4 ^= xmm8
10365# asm 1: pxor <xmm8=int6464#10,<xmm4=int6464#5
10366# asm 2: pxor <xmm8=%xmm9,<xmm4=%xmm4
10367pxor %xmm9,%xmm4
10368
10369# qhasm: xmm12 = xmm5
10370# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#9
10371# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm8
10372movdqa %xmm5,%xmm8
10373
10374# qhasm: xmm8 = xmm1
10375# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
10376# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
10377movdqa %xmm1,%xmm9
10378
10379# qhasm: xmm12 ^= xmm2
10380# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#9
10381# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm8
10382pxor %xmm2,%xmm8
10383
10384# qhasm: xmm8 ^= xmm6
10385# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#10
10386# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm9
10387pxor %xmm6,%xmm9
10388
10389# qhasm: xmm11 = xmm15
10390# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10391# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10392movdqa %xmm13,%xmm10
10393
10394# qhasm: xmm11 ^= xmm14
10395# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10396# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10397pxor %xmm11,%xmm10
10398
10399# qhasm: xmm11 &= xmm12
10400# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
10401# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
10402pand %xmm8,%xmm10
10403
10404# qhasm: xmm12 ^= xmm8
10405# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
10406# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
10407pxor %xmm9,%xmm8
10408
10409# qhasm: xmm12 &= xmm14
10410# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
10411# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
10412pand %xmm11,%xmm8
10413
10414# qhasm: xmm8 &= xmm15
10415# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
10416# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
10417pand %xmm13,%xmm9
10418
10419# qhasm: xmm8 ^= xmm12
10420# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
10421# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
10422pxor %xmm8,%xmm9
10423
10424# qhasm: xmm12 ^= xmm11
10425# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
10426# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
10427pxor %xmm10,%xmm8
10428
10429# qhasm: xmm10 = xmm13
10430# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
10431# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
10432movdqa %xmm15,%xmm10
10433
10434# qhasm: xmm10 ^= xmm9
10435# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
10436# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
10437pxor %xmm12,%xmm10
10438
10439# qhasm: xmm10 &= xmm2
10440# asm 1: pand <xmm2=int6464#3,<xmm10=int6464#11
10441# asm 2: pand <xmm2=%xmm2,<xmm10=%xmm10
10442pand %xmm2,%xmm10
10443
10444# qhasm: xmm2 ^= xmm6
10445# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
10446# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
10447pxor %xmm6,%xmm2
10448
10449# qhasm: xmm2 &= xmm9
10450# asm 1: pand <xmm9=int6464#13,<xmm2=int6464#3
10451# asm 2: pand <xmm9=%xmm12,<xmm2=%xmm2
10452pand %xmm12,%xmm2
10453
10454# qhasm: xmm6 &= xmm13
10455# asm 1: pand <xmm13=int6464#16,<xmm6=int6464#7
10456# asm 2: pand <xmm13=%xmm15,<xmm6=%xmm6
10457pand %xmm15,%xmm6
10458
10459# qhasm: xmm2 ^= xmm6
10460# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
10461# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
10462pxor %xmm6,%xmm2
10463
10464# qhasm: xmm6 ^= xmm10
10465# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
10466# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
10467pxor %xmm10,%xmm6
10468
10469# qhasm: xmm15 ^= xmm13
10470# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
10471# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
10472pxor %xmm15,%xmm13
10473
10474# qhasm: xmm14 ^= xmm9
10475# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
10476# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
10477pxor %xmm12,%xmm11
10478
10479# qhasm: xmm11 = xmm15
10480# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10481# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10482movdqa %xmm13,%xmm10
10483
10484# qhasm: xmm11 ^= xmm14
10485# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10486# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10487pxor %xmm11,%xmm10
10488
10489# qhasm: xmm11 &= xmm5
10490# asm 1: pand <xmm5=int6464#6,<xmm11=int6464#11
10491# asm 2: pand <xmm5=%xmm5,<xmm11=%xmm10
10492pand %xmm5,%xmm10
10493
10494# qhasm: xmm5 ^= xmm1
10495# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
10496# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
10497pxor %xmm1,%xmm5
10498
10499# qhasm: xmm5 &= xmm14
10500# asm 1: pand <xmm14=int6464#12,<xmm5=int6464#6
10501# asm 2: pand <xmm14=%xmm11,<xmm5=%xmm5
10502pand %xmm11,%xmm5
10503
10504# qhasm: xmm1 &= xmm15
10505# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
10506# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
10507pand %xmm13,%xmm1
10508
10509# qhasm: xmm5 ^= xmm1
10510# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
10511# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
10512pxor %xmm1,%xmm5
10513
10514# qhasm: xmm1 ^= xmm11
10515# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
10516# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
10517pxor %xmm10,%xmm1
10518
10519# qhasm: xmm5 ^= xmm12
10520# asm 1: pxor <xmm12=int6464#9,<xmm5=int6464#6
10521# asm 2: pxor <xmm12=%xmm8,<xmm5=%xmm5
10522pxor %xmm8,%xmm5
10523
10524# qhasm: xmm2 ^= xmm12
10525# asm 1: pxor <xmm12=int6464#9,<xmm2=int6464#3
10526# asm 2: pxor <xmm12=%xmm8,<xmm2=%xmm2
10527pxor %xmm8,%xmm2
10528
10529# qhasm: xmm1 ^= xmm8
10530# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
10531# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
10532pxor %xmm9,%xmm1
10533
10534# qhasm: xmm6 ^= xmm8
10535# asm 1: pxor <xmm8=int6464#10,<xmm6=int6464#7
10536# asm 2: pxor <xmm8=%xmm9,<xmm6=%xmm6
10537pxor %xmm9,%xmm6
10538
10539# qhasm: xmm5 ^= xmm0
10540# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
10541# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
10542pxor %xmm0,%xmm5
10543
10544# qhasm: xmm1 ^= xmm3
10545# asm 1: pxor <xmm3=int6464#4,<xmm1=int6464#2
10546# asm 2: pxor <xmm3=%xmm3,<xmm1=%xmm1
10547pxor %xmm3,%xmm1
10548
10549# qhasm: xmm2 ^= xmm5
10550# asm 1: pxor <xmm5=int6464#6,<xmm2=int6464#3
10551# asm 2: pxor <xmm5=%xmm5,<xmm2=%xmm2
10552pxor %xmm5,%xmm2
10553
10554# qhasm: xmm3 ^= xmm0
10555# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
10556# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
10557pxor %xmm0,%xmm3
10558
10559# qhasm: xmm0 ^= xmm1
10560# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
10561# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
10562pxor %xmm1,%xmm0
10563
10564# qhasm: xmm1 ^= xmm7
10565# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
10566# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
10567pxor %xmm7,%xmm1
10568
10569# qhasm: xmm7 ^= xmm6
10570# asm 1: pxor <xmm6=int6464#7,<xmm7=int6464#8
10571# asm 2: pxor <xmm6=%xmm6,<xmm7=%xmm7
10572pxor %xmm6,%xmm7
10573
10574# qhasm: xmm2 ^= xmm7
10575# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
10576# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
10577pxor %xmm7,%xmm2
10578
10579# qhasm: xmm6 ^= xmm4
10580# asm 1: pxor <xmm4=int6464#5,<xmm6=int6464#7
10581# asm 2: pxor <xmm4=%xmm4,<xmm6=%xmm6
10582pxor %xmm4,%xmm6
10583
10584# qhasm: xmm4 ^= xmm7
10585# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
10586# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
10587pxor %xmm7,%xmm4
10588
10589# qhasm: xmm3 ^= xmm4
10590# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
10591# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
10592pxor %xmm4,%xmm3
10593
10594# qhasm: xmm7 ^= RCON
10595# asm 1: pxor RCON,<xmm7=int6464#8
10596# asm 2: pxor RCON,<xmm7=%xmm7
10597pxor RCON,%xmm7
10598
10599# qhasm: shuffle bytes of xmm0 by EXPB0
10600# asm 1: pshufb EXPB0,<xmm0=int6464#1
10601# asm 2: pshufb EXPB0,<xmm0=%xmm0
10602pshufb EXPB0,%xmm0
10603
10604# qhasm: shuffle bytes of xmm1 by EXPB0
10605# asm 1: pshufb EXPB0,<xmm1=int6464#2
10606# asm 2: pshufb EXPB0,<xmm1=%xmm1
10607pshufb EXPB0,%xmm1
10608
10609# qhasm: shuffle bytes of xmm2 by EXPB0
10610# asm 1: pshufb EXPB0,<xmm2=int6464#3
10611# asm 2: pshufb EXPB0,<xmm2=%xmm2
10612pshufb EXPB0,%xmm2
10613
10614# qhasm: shuffle bytes of xmm3 by EXPB0
10615# asm 1: pshufb EXPB0,<xmm3=int6464#4
10616# asm 2: pshufb EXPB0,<xmm3=%xmm3
10617pshufb EXPB0,%xmm3
10618
10619# qhasm: shuffle bytes of xmm4 by EXPB0
10620# asm 1: pshufb EXPB0,<xmm4=int6464#5
10621# asm 2: pshufb EXPB0,<xmm4=%xmm4
10622pshufb EXPB0,%xmm4
10623
10624# qhasm: shuffle bytes of xmm5 by EXPB0
10625# asm 1: pshufb EXPB0,<xmm5=int6464#6
10626# asm 2: pshufb EXPB0,<xmm5=%xmm5
10627pshufb EXPB0,%xmm5
10628
10629# qhasm: shuffle bytes of xmm6 by EXPB0
10630# asm 1: pshufb EXPB0,<xmm6=int6464#7
10631# asm 2: pshufb EXPB0,<xmm6=%xmm6
10632pshufb EXPB0,%xmm6
10633
10634# qhasm: shuffle bytes of xmm7 by EXPB0
10635# asm 1: pshufb EXPB0,<xmm7=int6464#8
10636# asm 2: pshufb EXPB0,<xmm7=%xmm7
10637pshufb EXPB0,%xmm7
10638
10639# qhasm: xmm8 = *(int128 *)(c + 896)
10640# asm 1: movdqa 896(<c=int64#1),>xmm8=int6464#9
10641# asm 2: movdqa 896(<c=%rdi),>xmm8=%xmm8
10642movdqa 896(%rdi),%xmm8
10643
10644# qhasm: xmm9 = *(int128 *)(c + 912)
10645# asm 1: movdqa 912(<c=int64#1),>xmm9=int6464#10
10646# asm 2: movdqa 912(<c=%rdi),>xmm9=%xmm9
10647movdqa 912(%rdi),%xmm9
10648
10649# qhasm: xmm10 = *(int128 *)(c + 928)
10650# asm 1: movdqa 928(<c=int64#1),>xmm10=int6464#11
10651# asm 2: movdqa 928(<c=%rdi),>xmm10=%xmm10
10652movdqa 928(%rdi),%xmm10
10653
10654# qhasm: xmm11 = *(int128 *)(c + 944)
10655# asm 1: movdqa 944(<c=int64#1),>xmm11=int6464#12
10656# asm 2: movdqa 944(<c=%rdi),>xmm11=%xmm11
10657movdqa 944(%rdi),%xmm11
10658
10659# qhasm: xmm12 = *(int128 *)(c + 960)
10660# asm 1: movdqa 960(<c=int64#1),>xmm12=int6464#13
10661# asm 2: movdqa 960(<c=%rdi),>xmm12=%xmm12
10662movdqa 960(%rdi),%xmm12
10663
10664# qhasm: xmm13 = *(int128 *)(c + 976)
10665# asm 1: movdqa 976(<c=int64#1),>xmm13=int6464#14
10666# asm 2: movdqa 976(<c=%rdi),>xmm13=%xmm13
10667movdqa 976(%rdi),%xmm13
10668
10669# qhasm: xmm14 = *(int128 *)(c + 992)
10670# asm 1: movdqa 992(<c=int64#1),>xmm14=int6464#15
10671# asm 2: movdqa 992(<c=%rdi),>xmm14=%xmm14
10672movdqa 992(%rdi),%xmm14
10673
10674# qhasm: xmm15 = *(int128 *)(c + 1008)
10675# asm 1: movdqa 1008(<c=int64#1),>xmm15=int6464#16
10676# asm 2: movdqa 1008(<c=%rdi),>xmm15=%xmm15
10677movdqa 1008(%rdi),%xmm15
10678
10679# qhasm: xmm8 ^= ONE
10680# asm 1: pxor ONE,<xmm8=int6464#9
10681# asm 2: pxor ONE,<xmm8=%xmm8
10682pxor ONE,%xmm8
10683
10684# qhasm: xmm9 ^= ONE
10685# asm 1: pxor ONE,<xmm9=int6464#10
10686# asm 2: pxor ONE,<xmm9=%xmm9
10687pxor ONE,%xmm9
10688
10689# qhasm: xmm13 ^= ONE
10690# asm 1: pxor ONE,<xmm13=int6464#14
10691# asm 2: pxor ONE,<xmm13=%xmm13
10692pxor ONE,%xmm13
10693
10694# qhasm: xmm14 ^= ONE
10695# asm 1: pxor ONE,<xmm14=int6464#15
10696# asm 2: pxor ONE,<xmm14=%xmm14
10697pxor ONE,%xmm14
10698
10699# qhasm: xmm0 ^= xmm8
10700# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
10701# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
10702pxor %xmm8,%xmm0
10703
10704# qhasm: xmm1 ^= xmm9
10705# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
10706# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
10707pxor %xmm9,%xmm1
10708
10709# qhasm: xmm2 ^= xmm10
10710# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
10711# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
10712pxor %xmm10,%xmm2
10713
10714# qhasm: xmm3 ^= xmm11
10715# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
10716# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
10717pxor %xmm11,%xmm3
10718
10719# qhasm: xmm4 ^= xmm12
10720# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
10721# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
10722pxor %xmm12,%xmm4
10723
10724# qhasm: xmm5 ^= xmm13
10725# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
10726# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
10727pxor %xmm13,%xmm5
10728
10729# qhasm: xmm6 ^= xmm14
10730# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
10731# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
10732pxor %xmm14,%xmm6
10733
10734# qhasm: xmm7 ^= xmm15
10735# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
10736# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
10737pxor %xmm15,%xmm7
10738
10739# qhasm: uint32323232 xmm8 >>= 8
10740# asm 1: psrld $8,<xmm8=int6464#9
10741# asm 2: psrld $8,<xmm8=%xmm8
10742psrld $8,%xmm8
10743
10744# qhasm: uint32323232 xmm9 >>= 8
10745# asm 1: psrld $8,<xmm9=int6464#10
10746# asm 2: psrld $8,<xmm9=%xmm9
10747psrld $8,%xmm9
10748
10749# qhasm: uint32323232 xmm10 >>= 8
10750# asm 1: psrld $8,<xmm10=int6464#11
10751# asm 2: psrld $8,<xmm10=%xmm10
10752psrld $8,%xmm10
10753
10754# qhasm: uint32323232 xmm11 >>= 8
10755# asm 1: psrld $8,<xmm11=int6464#12
10756# asm 2: psrld $8,<xmm11=%xmm11
10757psrld $8,%xmm11
10758
10759# qhasm: uint32323232 xmm12 >>= 8
10760# asm 1: psrld $8,<xmm12=int6464#13
10761# asm 2: psrld $8,<xmm12=%xmm12
10762psrld $8,%xmm12
10763
10764# qhasm: uint32323232 xmm13 >>= 8
10765# asm 1: psrld $8,<xmm13=int6464#14
10766# asm 2: psrld $8,<xmm13=%xmm13
10767psrld $8,%xmm13
10768
10769# qhasm: uint32323232 xmm14 >>= 8
10770# asm 1: psrld $8,<xmm14=int6464#15
10771# asm 2: psrld $8,<xmm14=%xmm14
10772psrld $8,%xmm14
10773
10774# qhasm: uint32323232 xmm15 >>= 8
10775# asm 1: psrld $8,<xmm15=int6464#16
10776# asm 2: psrld $8,<xmm15=%xmm15
10777psrld $8,%xmm15
10778
10779# qhasm: xmm0 ^= xmm8
10780# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
10781# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
10782pxor %xmm8,%xmm0
10783
10784# qhasm: xmm1 ^= xmm9
10785# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
10786# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
10787pxor %xmm9,%xmm1
10788
10789# qhasm: xmm2 ^= xmm10
10790# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
10791# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
10792pxor %xmm10,%xmm2
10793
10794# qhasm: xmm3 ^= xmm11
10795# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
10796# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
10797pxor %xmm11,%xmm3
10798
10799# qhasm: xmm4 ^= xmm12
10800# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
10801# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
10802pxor %xmm12,%xmm4
10803
10804# qhasm: xmm5 ^= xmm13
10805# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
10806# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
10807pxor %xmm13,%xmm5
10808
10809# qhasm: xmm6 ^= xmm14
10810# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
10811# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
10812pxor %xmm14,%xmm6
10813
10814# qhasm: xmm7 ^= xmm15
10815# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
10816# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
10817pxor %xmm15,%xmm7
10818
10819# qhasm: uint32323232 xmm8 >>= 8
10820# asm 1: psrld $8,<xmm8=int6464#9
10821# asm 2: psrld $8,<xmm8=%xmm8
10822psrld $8,%xmm8
10823
10824# qhasm: uint32323232 xmm9 >>= 8
10825# asm 1: psrld $8,<xmm9=int6464#10
10826# asm 2: psrld $8,<xmm9=%xmm9
10827psrld $8,%xmm9
10828
10829# qhasm: uint32323232 xmm10 >>= 8
10830# asm 1: psrld $8,<xmm10=int6464#11
10831# asm 2: psrld $8,<xmm10=%xmm10
10832psrld $8,%xmm10
10833
10834# qhasm: uint32323232 xmm11 >>= 8
10835# asm 1: psrld $8,<xmm11=int6464#12
10836# asm 2: psrld $8,<xmm11=%xmm11
10837psrld $8,%xmm11
10838
10839# qhasm: uint32323232 xmm12 >>= 8
10840# asm 1: psrld $8,<xmm12=int6464#13
10841# asm 2: psrld $8,<xmm12=%xmm12
10842psrld $8,%xmm12
10843
10844# qhasm: uint32323232 xmm13 >>= 8
10845# asm 1: psrld $8,<xmm13=int6464#14
10846# asm 2: psrld $8,<xmm13=%xmm13
10847psrld $8,%xmm13
10848
10849# qhasm: uint32323232 xmm14 >>= 8
10850# asm 1: psrld $8,<xmm14=int6464#15
10851# asm 2: psrld $8,<xmm14=%xmm14
10852psrld $8,%xmm14
10853
10854# qhasm: uint32323232 xmm15 >>= 8
10855# asm 1: psrld $8,<xmm15=int6464#16
10856# asm 2: psrld $8,<xmm15=%xmm15
10857psrld $8,%xmm15
10858
10859# qhasm: xmm0 ^= xmm8
10860# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
10861# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
10862pxor %xmm8,%xmm0
10863
10864# qhasm: xmm1 ^= xmm9
10865# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
10866# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
10867pxor %xmm9,%xmm1
10868
10869# qhasm: xmm2 ^= xmm10
10870# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
10871# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
10872pxor %xmm10,%xmm2
10873
10874# qhasm: xmm3 ^= xmm11
10875# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
10876# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
10877pxor %xmm11,%xmm3
10878
10879# qhasm: xmm4 ^= xmm12
10880# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
10881# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
10882pxor %xmm12,%xmm4
10883
10884# qhasm: xmm5 ^= xmm13
10885# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
10886# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
10887pxor %xmm13,%xmm5
10888
10889# qhasm: xmm6 ^= xmm14
10890# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
10891# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
10892pxor %xmm14,%xmm6
10893
10894# qhasm: xmm7 ^= xmm15
10895# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
10896# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
10897pxor %xmm15,%xmm7
10898
10899# qhasm: uint32323232 xmm8 >>= 8
10900# asm 1: psrld $8,<xmm8=int6464#9
10901# asm 2: psrld $8,<xmm8=%xmm8
10902psrld $8,%xmm8
10903
10904# qhasm: uint32323232 xmm9 >>= 8
10905# asm 1: psrld $8,<xmm9=int6464#10
10906# asm 2: psrld $8,<xmm9=%xmm9
10907psrld $8,%xmm9
10908
10909# qhasm: uint32323232 xmm10 >>= 8
10910# asm 1: psrld $8,<xmm10=int6464#11
10911# asm 2: psrld $8,<xmm10=%xmm10
10912psrld $8,%xmm10
10913
10914# qhasm: uint32323232 xmm11 >>= 8
10915# asm 1: psrld $8,<xmm11=int6464#12
10916# asm 2: psrld $8,<xmm11=%xmm11
10917psrld $8,%xmm11
10918
10919# qhasm: uint32323232 xmm12 >>= 8
10920# asm 1: psrld $8,<xmm12=int6464#13
10921# asm 2: psrld $8,<xmm12=%xmm12
10922psrld $8,%xmm12
10923
10924# qhasm: uint32323232 xmm13 >>= 8
10925# asm 1: psrld $8,<xmm13=int6464#14
10926# asm 2: psrld $8,<xmm13=%xmm13
10927psrld $8,%xmm13
10928
10929# qhasm: uint32323232 xmm14 >>= 8
10930# asm 1: psrld $8,<xmm14=int6464#15
10931# asm 2: psrld $8,<xmm14=%xmm14
10932psrld $8,%xmm14
10933
10934# qhasm: uint32323232 xmm15 >>= 8
10935# asm 1: psrld $8,<xmm15=int6464#16
10936# asm 2: psrld $8,<xmm15=%xmm15
10937psrld $8,%xmm15
10938
10939# qhasm: xmm0 ^= xmm8
10940# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
10941# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
10942pxor %xmm8,%xmm0
10943
10944# qhasm: xmm1 ^= xmm9
10945# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
10946# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
10947pxor %xmm9,%xmm1
10948
10949# qhasm: xmm2 ^= xmm10
10950# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
10951# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
10952pxor %xmm10,%xmm2
10953
10954# qhasm: xmm3 ^= xmm11
10955# asm 1: pxor <xmm11=int6464#12,<xmm3=int6464#4
10956# asm 2: pxor <xmm11=%xmm11,<xmm3=%xmm3
10957pxor %xmm11,%xmm3
10958
10959# qhasm: xmm4 ^= xmm12
10960# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#5
10961# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm4
10962pxor %xmm12,%xmm4
10963
10964# qhasm: xmm5 ^= xmm13
10965# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
10966# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
10967pxor %xmm13,%xmm5
10968
10969# qhasm: xmm6 ^= xmm14
10970# asm 1: pxor <xmm14=int6464#15,<xmm6=int6464#7
10971# asm 2: pxor <xmm14=%xmm14,<xmm6=%xmm6
10972pxor %xmm14,%xmm6
10973
10974# qhasm: xmm7 ^= xmm15
10975# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
10976# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
10977pxor %xmm15,%xmm7
10978
10979# qhasm: *(int128 *)(c + 1024) = xmm0
10980# asm 1: movdqa <xmm0=int6464#1,1024(<c=int64#1)
10981# asm 2: movdqa <xmm0=%xmm0,1024(<c=%rdi)
10982movdqa %xmm0,1024(%rdi)
10983
10984# qhasm: *(int128 *)(c + 1040) = xmm1
10985# asm 1: movdqa <xmm1=int6464#2,1040(<c=int64#1)
10986# asm 2: movdqa <xmm1=%xmm1,1040(<c=%rdi)
10987movdqa %xmm1,1040(%rdi)
10988
10989# qhasm: *(int128 *)(c + 1056) = xmm2
10990# asm 1: movdqa <xmm2=int6464#3,1056(<c=int64#1)
10991# asm 2: movdqa <xmm2=%xmm2,1056(<c=%rdi)
10992movdqa %xmm2,1056(%rdi)
10993
10994# qhasm: *(int128 *)(c + 1072) = xmm3
10995# asm 1: movdqa <xmm3=int6464#4,1072(<c=int64#1)
10996# asm 2: movdqa <xmm3=%xmm3,1072(<c=%rdi)
10997movdqa %xmm3,1072(%rdi)
10998
10999# qhasm: *(int128 *)(c + 1088) = xmm4
11000# asm 1: movdqa <xmm4=int6464#5,1088(<c=int64#1)
11001# asm 2: movdqa <xmm4=%xmm4,1088(<c=%rdi)
11002movdqa %xmm4,1088(%rdi)
11003
11004# qhasm: *(int128 *)(c + 1104) = xmm5
11005# asm 1: movdqa <xmm5=int6464#6,1104(<c=int64#1)
11006# asm 2: movdqa <xmm5=%xmm5,1104(<c=%rdi)
11007movdqa %xmm5,1104(%rdi)
11008
11009# qhasm: *(int128 *)(c + 1120) = xmm6
11010# asm 1: movdqa <xmm6=int6464#7,1120(<c=int64#1)
11011# asm 2: movdqa <xmm6=%xmm6,1120(<c=%rdi)
11012movdqa %xmm6,1120(%rdi)
11013
11014# qhasm: *(int128 *)(c + 1136) = xmm7
11015# asm 1: movdqa <xmm7=int6464#8,1136(<c=int64#1)
11016# asm 2: movdqa <xmm7=%xmm7,1136(<c=%rdi)
11017movdqa %xmm7,1136(%rdi)
11018
11019# qhasm: xmm0 ^= ONE
11020# asm 1: pxor ONE,<xmm0=int6464#1
11021# asm 2: pxor ONE,<xmm0=%xmm0
11022pxor ONE,%xmm0
11023
11024# qhasm: xmm1 ^= ONE
11025# asm 1: pxor ONE,<xmm1=int6464#2
11026# asm 2: pxor ONE,<xmm1=%xmm1
11027pxor ONE,%xmm1
11028
11029# qhasm: xmm5 ^= ONE
11030# asm 1: pxor ONE,<xmm5=int6464#6
11031# asm 2: pxor ONE,<xmm5=%xmm5
11032pxor ONE,%xmm5
11033
11034# qhasm: xmm6 ^= ONE
11035# asm 1: pxor ONE,<xmm6=int6464#7
11036# asm 2: pxor ONE,<xmm6=%xmm6
11037pxor ONE,%xmm6
11038
11039# qhasm: shuffle bytes of xmm0 by ROTB
11040# asm 1: pshufb ROTB,<xmm0=int6464#1
11041# asm 2: pshufb ROTB,<xmm0=%xmm0
11042pshufb ROTB,%xmm0
11043
11044# qhasm: shuffle bytes of xmm1 by ROTB
11045# asm 1: pshufb ROTB,<xmm1=int6464#2
11046# asm 2: pshufb ROTB,<xmm1=%xmm1
11047pshufb ROTB,%xmm1
11048
11049# qhasm: shuffle bytes of xmm2 by ROTB
11050# asm 1: pshufb ROTB,<xmm2=int6464#3
11051# asm 2: pshufb ROTB,<xmm2=%xmm2
11052pshufb ROTB,%xmm2
11053
11054# qhasm: shuffle bytes of xmm3 by ROTB
11055# asm 1: pshufb ROTB,<xmm3=int6464#4
11056# asm 2: pshufb ROTB,<xmm3=%xmm3
11057pshufb ROTB,%xmm3
11058
11059# qhasm: shuffle bytes of xmm4 by ROTB
11060# asm 1: pshufb ROTB,<xmm4=int6464#5
11061# asm 2: pshufb ROTB,<xmm4=%xmm4
11062pshufb ROTB,%xmm4
11063
11064# qhasm: shuffle bytes of xmm5 by ROTB
11065# asm 1: pshufb ROTB,<xmm5=int6464#6
11066# asm 2: pshufb ROTB,<xmm5=%xmm5
11067pshufb ROTB,%xmm5
11068
11069# qhasm: shuffle bytes of xmm6 by ROTB
11070# asm 1: pshufb ROTB,<xmm6=int6464#7
11071# asm 2: pshufb ROTB,<xmm6=%xmm6
11072pshufb ROTB,%xmm6
11073
11074# qhasm: shuffle bytes of xmm7 by ROTB
11075# asm 1: pshufb ROTB,<xmm7=int6464#8
11076# asm 2: pshufb ROTB,<xmm7=%xmm7
11077pshufb ROTB,%xmm7
11078
11079# qhasm: xmm5 ^= xmm6
11080# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
11081# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
11082pxor %xmm6,%xmm5
11083
11084# qhasm: xmm2 ^= xmm1
11085# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
11086# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
11087pxor %xmm1,%xmm2
11088
11089# qhasm: xmm5 ^= xmm0
11090# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
11091# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
11092pxor %xmm0,%xmm5
11093
11094# qhasm: xmm6 ^= xmm2
11095# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
11096# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
11097pxor %xmm2,%xmm6
11098
11099# qhasm: xmm3 ^= xmm0
11100# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
11101# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
11102pxor %xmm0,%xmm3
11103
11104# qhasm: xmm6 ^= xmm3
11105# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
11106# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
11107pxor %xmm3,%xmm6
11108
11109# qhasm: xmm3 ^= xmm7
11110# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
11111# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
11112pxor %xmm7,%xmm3
11113
11114# qhasm: xmm3 ^= xmm4
11115# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
11116# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
11117pxor %xmm4,%xmm3
11118
11119# qhasm: xmm7 ^= xmm5
11120# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
11121# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
11122pxor %xmm5,%xmm7
11123
11124# qhasm: xmm3 ^= xmm1
11125# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
11126# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
11127pxor %xmm1,%xmm3
11128
11129# qhasm: xmm4 ^= xmm5
11130# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
11131# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
11132pxor %xmm5,%xmm4
11133
11134# qhasm: xmm2 ^= xmm7
11135# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
11136# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
11137pxor %xmm7,%xmm2
11138
11139# qhasm: xmm1 ^= xmm5
11140# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
11141# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
11142pxor %xmm5,%xmm1
11143
11144# qhasm: xmm11 = xmm7
11145# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
11146# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
11147movdqa %xmm7,%xmm8
11148
11149# qhasm: xmm10 = xmm1
11150# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
11151# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
11152movdqa %xmm1,%xmm9
11153
11154# qhasm: xmm9 = xmm5
11155# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
11156# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
11157movdqa %xmm5,%xmm10
11158
11159# qhasm: xmm13 = xmm2
11160# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
11161# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
11162movdqa %xmm2,%xmm11
11163
11164# qhasm: xmm12 = xmm6
11165# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
11166# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
11167movdqa %xmm6,%xmm12
11168
11169# qhasm: xmm11 ^= xmm4
11170# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
11171# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
11172pxor %xmm4,%xmm8
11173
11174# qhasm: xmm10 ^= xmm2
11175# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
11176# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
11177pxor %xmm2,%xmm9
11178
11179# qhasm: xmm9 ^= xmm3
11180# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
11181# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
11182pxor %xmm3,%xmm10
11183
11184# qhasm: xmm13 ^= xmm4
11185# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
11186# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
11187pxor %xmm4,%xmm11
11188
11189# qhasm: xmm12 ^= xmm0
11190# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11191# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11192pxor %xmm0,%xmm12
11193
11194# qhasm: xmm14 = xmm11
11195# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
11196# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
11197movdqa %xmm8,%xmm13
11198
11199# qhasm: xmm8 = xmm10
11200# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
11201# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
11202movdqa %xmm9,%xmm14
11203
11204# qhasm: xmm15 = xmm11
11205# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
11206# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
11207movdqa %xmm8,%xmm15
11208
11209# qhasm: xmm10 |= xmm9
11210# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
11211# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
11212por %xmm10,%xmm9
11213
11214# qhasm: xmm11 |= xmm12
11215# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
11216# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
11217por %xmm12,%xmm8
11218
11219# qhasm: xmm15 ^= xmm8
11220# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
11221# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
11222pxor %xmm14,%xmm15
11223
11224# qhasm: xmm14 &= xmm12
11225# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
11226# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
11227pand %xmm12,%xmm13
11228
11229# qhasm: xmm8 &= xmm9
11230# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
11231# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
11232pand %xmm10,%xmm14
11233
11234# qhasm: xmm12 ^= xmm9
11235# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
11236# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
11237pxor %xmm10,%xmm12
11238
11239# qhasm: xmm15 &= xmm12
11240# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
11241# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
11242pand %xmm12,%xmm15
11243
11244# qhasm: xmm12 = xmm3
11245# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
11246# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
11247movdqa %xmm3,%xmm10
11248
11249# qhasm: xmm12 ^= xmm0
11250# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
11251# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
11252pxor %xmm0,%xmm10
11253
11254# qhasm: xmm13 &= xmm12
11255# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
11256# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
11257pand %xmm10,%xmm11
11258
11259# qhasm: xmm11 ^= xmm13
11260# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
11261# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
11262pxor %xmm11,%xmm8
11263
11264# qhasm: xmm10 ^= xmm13
11265# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
11266# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
11267pxor %xmm11,%xmm9
11268
11269# qhasm: xmm13 = xmm7
11270# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
11271# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
11272movdqa %xmm7,%xmm10
11273
11274# qhasm: xmm13 ^= xmm1
11275# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
11276# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
11277pxor %xmm1,%xmm10
11278
11279# qhasm: xmm12 = xmm5
11280# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
11281# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
11282movdqa %xmm5,%xmm11
11283
11284# qhasm: xmm9 = xmm13
11285# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
11286# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
11287movdqa %xmm10,%xmm12
11288
11289# qhasm: xmm12 ^= xmm6
11290# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
11291# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
11292pxor %xmm6,%xmm11
11293
11294# qhasm: xmm9 |= xmm12
11295# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
11296# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
11297por %xmm11,%xmm12
11298
11299# qhasm: xmm13 &= xmm12
11300# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
11301# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
11302pand %xmm11,%xmm10
11303
11304# qhasm: xmm8 ^= xmm13
11305# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
11306# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
11307pxor %xmm10,%xmm14
11308
11309# qhasm: xmm11 ^= xmm15
11310# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
11311# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
11312pxor %xmm15,%xmm8
11313
11314# qhasm: xmm10 ^= xmm14
11315# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
11316# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
11317pxor %xmm13,%xmm9
11318
11319# qhasm: xmm9 ^= xmm15
11320# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
11321# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
11322pxor %xmm15,%xmm12
11323
11324# qhasm: xmm8 ^= xmm14
11325# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
11326# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
11327pxor %xmm13,%xmm14
11328
11329# qhasm: xmm9 ^= xmm14
11330# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
11331# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
11332pxor %xmm13,%xmm12
11333
11334# qhasm: xmm12 = xmm2
11335# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
11336# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
11337movdqa %xmm2,%xmm10
11338
11339# qhasm: xmm13 = xmm4
11340# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
11341# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
11342movdqa %xmm4,%xmm11
11343
11344# qhasm: xmm14 = xmm1
11345# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
11346# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
11347movdqa %xmm1,%xmm13
11348
11349# qhasm: xmm15 = xmm7
11350# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
11351# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
11352movdqa %xmm7,%xmm15
11353
11354# qhasm: xmm12 &= xmm3
11355# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
11356# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
11357pand %xmm3,%xmm10
11358
11359# qhasm: xmm13 &= xmm0
11360# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
11361# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
11362pand %xmm0,%xmm11
11363
11364# qhasm: xmm14 &= xmm5
11365# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
11366# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
11367pand %xmm5,%xmm13
11368
11369# qhasm: xmm15 |= xmm6
11370# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
11371# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
11372por %xmm6,%xmm15
11373
11374# qhasm: xmm11 ^= xmm12
11375# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
11376# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
11377pxor %xmm10,%xmm8
11378
11379# qhasm: xmm10 ^= xmm13
11380# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
11381# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
11382pxor %xmm11,%xmm9
11383
11384# qhasm: xmm9 ^= xmm14
11385# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
11386# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
11387pxor %xmm13,%xmm12
11388
11389# qhasm: xmm8 ^= xmm15
11390# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
11391# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
11392pxor %xmm15,%xmm14
11393
11394# qhasm: xmm12 = xmm11
11395# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
11396# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
11397movdqa %xmm8,%xmm10
11398
11399# qhasm: xmm12 ^= xmm10
11400# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
11401# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
11402pxor %xmm9,%xmm10
11403
11404# qhasm: xmm11 &= xmm9
11405# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
11406# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
11407pand %xmm12,%xmm8
11408
11409# qhasm: xmm14 = xmm8
11410# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
11411# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
11412movdqa %xmm14,%xmm11
11413
11414# qhasm: xmm14 ^= xmm11
11415# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
11416# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
11417pxor %xmm8,%xmm11
11418
11419# qhasm: xmm15 = xmm12
11420# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
11421# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
11422movdqa %xmm10,%xmm13
11423
11424# qhasm: xmm15 &= xmm14
11425# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
11426# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
11427pand %xmm11,%xmm13
11428
11429# qhasm: xmm15 ^= xmm10
11430# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
11431# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
11432pxor %xmm9,%xmm13
11433
11434# qhasm: xmm13 = xmm9
11435# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
11436# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
11437movdqa %xmm12,%xmm15
11438
11439# qhasm: xmm13 ^= xmm8
11440# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
11441# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
11442pxor %xmm14,%xmm15
11443
11444# qhasm: xmm11 ^= xmm10
11445# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
11446# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
11447pxor %xmm9,%xmm8
11448
11449# qhasm: xmm13 &= xmm11
11450# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
11451# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
11452pand %xmm8,%xmm15
11453
11454# qhasm: xmm13 ^= xmm8
11455# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
11456# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
11457pxor %xmm14,%xmm15
11458
11459# qhasm: xmm9 ^= xmm13
11460# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
11461# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
11462pxor %xmm15,%xmm12
11463
11464# qhasm: xmm10 = xmm14
11465# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
11466# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
11467movdqa %xmm11,%xmm8
11468
11469# qhasm: xmm10 ^= xmm13
11470# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
11471# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
11472pxor %xmm15,%xmm8
11473
11474# qhasm: xmm10 &= xmm8
11475# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
11476# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
11477pand %xmm14,%xmm8
11478
11479# qhasm: xmm9 ^= xmm10
11480# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
11481# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
11482pxor %xmm8,%xmm12
11483
11484# qhasm: xmm14 ^= xmm10
11485# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
11486# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
11487pxor %xmm8,%xmm11
11488
11489# qhasm: xmm14 &= xmm15
11490# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
11491# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
11492pand %xmm13,%xmm11
11493
11494# qhasm: xmm14 ^= xmm12
11495# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
11496# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
11497pxor %xmm10,%xmm11
11498
11499# qhasm: xmm12 = xmm6
11500# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
11501# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
11502movdqa %xmm6,%xmm8
11503
11504# qhasm: xmm8 = xmm5
11505# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
11506# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
11507movdqa %xmm5,%xmm9
11508
11509# qhasm: xmm10 = xmm15
11510# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
11511# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
11512movdqa %xmm13,%xmm10
11513
11514# qhasm: xmm10 ^= xmm14
11515# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
11516# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
11517pxor %xmm11,%xmm10
11518
11519# qhasm: xmm10 &= xmm6
11520# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
11521# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
11522pand %xmm6,%xmm10
11523
11524# qhasm: xmm6 ^= xmm5
11525# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
11526# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
11527pxor %xmm5,%xmm6
11528
11529# qhasm: xmm6 &= xmm14
11530# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
11531# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
11532pand %xmm11,%xmm6
11533
11534# qhasm: xmm5 &= xmm15
11535# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
11536# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
11537pand %xmm13,%xmm5
11538
11539# qhasm: xmm6 ^= xmm5
11540# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
11541# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
11542pxor %xmm5,%xmm6
11543
11544# qhasm: xmm5 ^= xmm10
11545# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
11546# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
11547pxor %xmm10,%xmm5
11548
11549# qhasm: xmm12 ^= xmm0
11550# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
11551# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
11552pxor %xmm0,%xmm8
11553
11554# qhasm: xmm8 ^= xmm3
11555# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
11556# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
11557pxor %xmm3,%xmm9
11558
11559# qhasm: xmm15 ^= xmm13
11560# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
11561# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
11562pxor %xmm15,%xmm13
11563
11564# qhasm: xmm14 ^= xmm9
11565# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
11566# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
11567pxor %xmm12,%xmm11
11568
11569# qhasm: xmm11 = xmm15
11570# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
11571# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
11572movdqa %xmm13,%xmm10
11573
11574# qhasm: xmm11 ^= xmm14
11575# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
11576# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
11577pxor %xmm11,%xmm10
11578
11579# qhasm: xmm11 &= xmm12
11580# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
11581# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
11582pand %xmm8,%xmm10
11583
11584# qhasm: xmm12 ^= xmm8
11585# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
11586# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
11587pxor %xmm9,%xmm8
11588
11589# qhasm: xmm12 &= xmm14
11590# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
11591# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
11592pand %xmm11,%xmm8
11593
11594# qhasm: xmm8 &= xmm15
11595# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
11596# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
11597pand %xmm13,%xmm9
11598
11599# qhasm: xmm8 ^= xmm12
11600# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
11601# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
11602pxor %xmm8,%xmm9
11603
11604# qhasm: xmm12 ^= xmm11
11605# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
11606# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
11607pxor %xmm10,%xmm8
11608
11609# qhasm: xmm10 = xmm13
11610# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
11611# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
11612movdqa %xmm15,%xmm10
11613
11614# qhasm: xmm10 ^= xmm9
11615# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
11616# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
11617pxor %xmm12,%xmm10
11618
11619# qhasm: xmm10 &= xmm0
11620# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
11621# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
11622pand %xmm0,%xmm10
11623
11624# qhasm: xmm0 ^= xmm3
11625# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
11626# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
11627pxor %xmm3,%xmm0
11628
11629# qhasm: xmm0 &= xmm9
11630# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
11631# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
11632pand %xmm12,%xmm0
11633
11634# qhasm: xmm3 &= xmm13
11635# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
11636# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
11637pand %xmm15,%xmm3
11638
11639# qhasm: xmm0 ^= xmm3
11640# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
11641# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
11642pxor %xmm3,%xmm0
11643
11644# qhasm: xmm3 ^= xmm10
11645# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
11646# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
11647pxor %xmm10,%xmm3
11648
11649# qhasm: xmm6 ^= xmm12
11650# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
11651# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
11652pxor %xmm8,%xmm6
11653
11654# qhasm: xmm0 ^= xmm12
11655# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
11656# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
11657pxor %xmm8,%xmm0
11658
11659# qhasm: xmm5 ^= xmm8
11660# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
11661# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
11662pxor %xmm9,%xmm5
11663
11664# qhasm: xmm3 ^= xmm8
11665# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
11666# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
11667pxor %xmm9,%xmm3
11668
11669# qhasm: xmm12 = xmm7
11670# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
11671# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
11672movdqa %xmm7,%xmm8
11673
11674# qhasm: xmm8 = xmm1
11675# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
11676# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
11677movdqa %xmm1,%xmm9
11678
11679# qhasm: xmm12 ^= xmm4
11680# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
11681# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
11682pxor %xmm4,%xmm8
11683
11684# qhasm: xmm8 ^= xmm2
11685# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
11686# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
11687pxor %xmm2,%xmm9
11688
11689# qhasm: xmm11 = xmm15
11690# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
11691# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
11692movdqa %xmm13,%xmm10
11693
11694# qhasm: xmm11 ^= xmm14
11695# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
11696# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
11697pxor %xmm11,%xmm10
11698
11699# qhasm: xmm11 &= xmm12
11700# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
11701# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
11702pand %xmm8,%xmm10
11703
11704# qhasm: xmm12 ^= xmm8
11705# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
11706# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
11707pxor %xmm9,%xmm8
11708
11709# qhasm: xmm12 &= xmm14
11710# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
11711# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
11712pand %xmm11,%xmm8
11713
11714# qhasm: xmm8 &= xmm15
11715# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
11716# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
11717pand %xmm13,%xmm9
11718
11719# qhasm: xmm8 ^= xmm12
11720# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
11721# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
11722pxor %xmm8,%xmm9
11723
11724# qhasm: xmm12 ^= xmm11
11725# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
11726# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
11727pxor %xmm10,%xmm8
11728
11729# qhasm: xmm10 = xmm13
11730# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
11731# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
11732movdqa %xmm15,%xmm10
11733
11734# qhasm: xmm10 ^= xmm9
11735# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
11736# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
11737pxor %xmm12,%xmm10
11738
11739# qhasm: xmm10 &= xmm4
11740# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
11741# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
11742pand %xmm4,%xmm10
11743
11744# qhasm: xmm4 ^= xmm2
11745# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
11746# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
11747pxor %xmm2,%xmm4
11748
11749# qhasm: xmm4 &= xmm9
11750# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
11751# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
11752pand %xmm12,%xmm4
11753
11754# qhasm: xmm2 &= xmm13
11755# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
11756# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
11757pand %xmm15,%xmm2
11758
11759# qhasm: xmm4 ^= xmm2
11760# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
11761# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
11762pxor %xmm2,%xmm4
11763
11764# qhasm: xmm2 ^= xmm10
11765# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
11766# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
11767pxor %xmm10,%xmm2
11768
11769# qhasm: xmm15 ^= xmm13
11770# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
11771# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
11772pxor %xmm15,%xmm13
11773
11774# qhasm: xmm14 ^= xmm9
11775# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
11776# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
11777pxor %xmm12,%xmm11
11778
11779# qhasm: xmm11 = xmm15
11780# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
11781# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
11782movdqa %xmm13,%xmm10
11783
11784# qhasm: xmm11 ^= xmm14
11785# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
11786# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
11787pxor %xmm11,%xmm10
11788
11789# qhasm: xmm11 &= xmm7
11790# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
11791# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
11792pand %xmm7,%xmm10
11793
11794# qhasm: xmm7 ^= xmm1
11795# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
11796# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
11797pxor %xmm1,%xmm7
11798
11799# qhasm: xmm7 &= xmm14
11800# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
11801# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
11802pand %xmm11,%xmm7
11803
11804# qhasm: xmm1 &= xmm15
11805# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
11806# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
11807pand %xmm13,%xmm1
11808
11809# qhasm: xmm7 ^= xmm1
11810# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
11811# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
11812pxor %xmm1,%xmm7
11813
11814# qhasm: xmm1 ^= xmm11
11815# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
11816# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
11817pxor %xmm10,%xmm1
11818
11819# qhasm: xmm7 ^= xmm12
11820# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
11821# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
11822pxor %xmm8,%xmm7
11823
11824# qhasm: xmm4 ^= xmm12
11825# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
11826# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
11827pxor %xmm8,%xmm4
11828
11829# qhasm: xmm1 ^= xmm8
11830# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
11831# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
11832pxor %xmm9,%xmm1
11833
11834# qhasm: xmm2 ^= xmm8
11835# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
11836# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
11837pxor %xmm9,%xmm2
11838
11839# qhasm: xmm7 ^= xmm0
11840# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
11841# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
11842pxor %xmm0,%xmm7
11843
11844# qhasm: xmm1 ^= xmm6
11845# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
11846# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
11847pxor %xmm6,%xmm1
11848
11849# qhasm: xmm4 ^= xmm7
11850# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
11851# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
11852pxor %xmm7,%xmm4
11853
11854# qhasm: xmm6 ^= xmm0
11855# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
11856# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
11857pxor %xmm0,%xmm6
11858
11859# qhasm: xmm0 ^= xmm1
11860# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
11861# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
11862pxor %xmm1,%xmm0
11863
11864# qhasm: xmm1 ^= xmm5
11865# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
11866# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
11867pxor %xmm5,%xmm1
11868
11869# qhasm: xmm5 ^= xmm2
11870# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
11871# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
11872pxor %xmm2,%xmm5
11873
11874# qhasm: xmm4 ^= xmm5
11875# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
11876# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
11877pxor %xmm5,%xmm4
11878
11879# qhasm: xmm2 ^= xmm3
11880# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
11881# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
11882pxor %xmm3,%xmm2
11883
11884# qhasm: xmm3 ^= xmm5
11885# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
11886# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
11887pxor %xmm5,%xmm3
11888
11889# qhasm: xmm6 ^= xmm3
11890# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
11891# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
11892pxor %xmm3,%xmm6
11893
11894# qhasm: xmm0 ^= RCON
11895# asm 1: pxor RCON,<xmm0=int6464#1
11896# asm 2: pxor RCON,<xmm0=%xmm0
11897pxor RCON,%xmm0
11898
11899# qhasm: xmm1 ^= RCON
11900# asm 1: pxor RCON,<xmm1=int6464#2
11901# asm 2: pxor RCON,<xmm1=%xmm1
11902pxor RCON,%xmm1
11903
11904# qhasm: xmm6 ^= RCON
11905# asm 1: pxor RCON,<xmm6=int6464#7
11906# asm 2: pxor RCON,<xmm6=%xmm6
11907pxor RCON,%xmm6
11908
11909# qhasm: xmm3 ^= RCON
11910# asm 1: pxor RCON,<xmm3=int6464#4
11911# asm 2: pxor RCON,<xmm3=%xmm3
11912pxor RCON,%xmm3
11913
11914# qhasm: shuffle bytes of xmm0 by EXPB0
11915# asm 1: pshufb EXPB0,<xmm0=int6464#1
11916# asm 2: pshufb EXPB0,<xmm0=%xmm0
11917pshufb EXPB0,%xmm0
11918
11919# qhasm: shuffle bytes of xmm1 by EXPB0
11920# asm 1: pshufb EXPB0,<xmm1=int6464#2
11921# asm 2: pshufb EXPB0,<xmm1=%xmm1
11922pshufb EXPB0,%xmm1
11923
11924# qhasm: shuffle bytes of xmm4 by EXPB0
11925# asm 1: pshufb EXPB0,<xmm4=int6464#5
11926# asm 2: pshufb EXPB0,<xmm4=%xmm4
11927pshufb EXPB0,%xmm4
11928
11929# qhasm: shuffle bytes of xmm6 by EXPB0
11930# asm 1: pshufb EXPB0,<xmm6=int6464#7
11931# asm 2: pshufb EXPB0,<xmm6=%xmm6
11932pshufb EXPB0,%xmm6
11933
11934# qhasm: shuffle bytes of xmm3 by EXPB0
11935# asm 1: pshufb EXPB0,<xmm3=int6464#4
11936# asm 2: pshufb EXPB0,<xmm3=%xmm3
11937pshufb EXPB0,%xmm3
11938
11939# qhasm: shuffle bytes of xmm7 by EXPB0
11940# asm 1: pshufb EXPB0,<xmm7=int6464#8
11941# asm 2: pshufb EXPB0,<xmm7=%xmm7
11942pshufb EXPB0,%xmm7
11943
11944# qhasm: shuffle bytes of xmm2 by EXPB0
11945# asm 1: pshufb EXPB0,<xmm2=int6464#3
11946# asm 2: pshufb EXPB0,<xmm2=%xmm2
11947pshufb EXPB0,%xmm2
11948
11949# qhasm: shuffle bytes of xmm5 by EXPB0
11950# asm 1: pshufb EXPB0,<xmm5=int6464#6
11951# asm 2: pshufb EXPB0,<xmm5=%xmm5
11952pshufb EXPB0,%xmm5
11953
11954# qhasm: xmm8 = *(int128 *)(c + 1024)
11955# asm 1: movdqa 1024(<c=int64#1),>xmm8=int6464#9
11956# asm 2: movdqa 1024(<c=%rdi),>xmm8=%xmm8
11957movdqa 1024(%rdi),%xmm8
11958
11959# qhasm: xmm9 = *(int128 *)(c + 1040)
11960# asm 1: movdqa 1040(<c=int64#1),>xmm9=int6464#10
11961# asm 2: movdqa 1040(<c=%rdi),>xmm9=%xmm9
11962movdqa 1040(%rdi),%xmm9
11963
11964# qhasm: xmm10 = *(int128 *)(c + 1056)
11965# asm 1: movdqa 1056(<c=int64#1),>xmm10=int6464#11
11966# asm 2: movdqa 1056(<c=%rdi),>xmm10=%xmm10
11967movdqa 1056(%rdi),%xmm10
11968
11969# qhasm: xmm11 = *(int128 *)(c + 1072)
11970# asm 1: movdqa 1072(<c=int64#1),>xmm11=int6464#12
11971# asm 2: movdqa 1072(<c=%rdi),>xmm11=%xmm11
11972movdqa 1072(%rdi),%xmm11
11973
11974# qhasm: xmm12 = *(int128 *)(c + 1088)
11975# asm 1: movdqa 1088(<c=int64#1),>xmm12=int6464#13
11976# asm 2: movdqa 1088(<c=%rdi),>xmm12=%xmm12
11977movdqa 1088(%rdi),%xmm12
11978
11979# qhasm: xmm13 = *(int128 *)(c + 1104)
11980# asm 1: movdqa 1104(<c=int64#1),>xmm13=int6464#14
11981# asm 2: movdqa 1104(<c=%rdi),>xmm13=%xmm13
11982movdqa 1104(%rdi),%xmm13
11983
11984# qhasm: xmm14 = *(int128 *)(c + 1120)
11985# asm 1: movdqa 1120(<c=int64#1),>xmm14=int6464#15
11986# asm 2: movdqa 1120(<c=%rdi),>xmm14=%xmm14
11987movdqa 1120(%rdi),%xmm14
11988
11989# qhasm: xmm15 = *(int128 *)(c + 1136)
11990# asm 1: movdqa 1136(<c=int64#1),>xmm15=int6464#16
11991# asm 2: movdqa 1136(<c=%rdi),>xmm15=%xmm15
11992movdqa 1136(%rdi),%xmm15
11993
11994# qhasm: xmm8 ^= ONE
11995# asm 1: pxor ONE,<xmm8=int6464#9
11996# asm 2: pxor ONE,<xmm8=%xmm8
11997pxor ONE,%xmm8
11998
11999# qhasm: xmm9 ^= ONE
12000# asm 1: pxor ONE,<xmm9=int6464#10
12001# asm 2: pxor ONE,<xmm9=%xmm9
12002pxor ONE,%xmm9
12003
12004# qhasm: xmm13 ^= ONE
12005# asm 1: pxor ONE,<xmm13=int6464#14
12006# asm 2: pxor ONE,<xmm13=%xmm13
12007pxor ONE,%xmm13
12008
12009# qhasm: xmm14 ^= ONE
12010# asm 1: pxor ONE,<xmm14=int6464#15
12011# asm 2: pxor ONE,<xmm14=%xmm14
12012pxor ONE,%xmm14
12013
12014# qhasm: xmm0 ^= xmm8
12015# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
12016# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
12017pxor %xmm8,%xmm0
12018
12019# qhasm: xmm1 ^= xmm9
12020# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
12021# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
12022pxor %xmm9,%xmm1
12023
12024# qhasm: xmm4 ^= xmm10
12025# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
12026# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
12027pxor %xmm10,%xmm4
12028
12029# qhasm: xmm6 ^= xmm11
12030# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
12031# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
12032pxor %xmm11,%xmm6
12033
12034# qhasm: xmm3 ^= xmm12
12035# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
12036# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
12037pxor %xmm12,%xmm3
12038
12039# qhasm: xmm7 ^= xmm13
12040# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
12041# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
12042pxor %xmm13,%xmm7
12043
12044# qhasm: xmm2 ^= xmm14
12045# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
12046# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
12047pxor %xmm14,%xmm2
12048
12049# qhasm: xmm5 ^= xmm15
12050# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
12051# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
12052pxor %xmm15,%xmm5
12053
12054# qhasm: uint32323232 xmm8 >>= 8
12055# asm 1: psrld $8,<xmm8=int6464#9
12056# asm 2: psrld $8,<xmm8=%xmm8
12057psrld $8,%xmm8
12058
12059# qhasm: uint32323232 xmm9 >>= 8
12060# asm 1: psrld $8,<xmm9=int6464#10
12061# asm 2: psrld $8,<xmm9=%xmm9
12062psrld $8,%xmm9
12063
12064# qhasm: uint32323232 xmm10 >>= 8
12065# asm 1: psrld $8,<xmm10=int6464#11
12066# asm 2: psrld $8,<xmm10=%xmm10
12067psrld $8,%xmm10
12068
12069# qhasm: uint32323232 xmm11 >>= 8
12070# asm 1: psrld $8,<xmm11=int6464#12
12071# asm 2: psrld $8,<xmm11=%xmm11
12072psrld $8,%xmm11
12073
12074# qhasm: uint32323232 xmm12 >>= 8
12075# asm 1: psrld $8,<xmm12=int6464#13
12076# asm 2: psrld $8,<xmm12=%xmm12
12077psrld $8,%xmm12
12078
12079# qhasm: uint32323232 xmm13 >>= 8
12080# asm 1: psrld $8,<xmm13=int6464#14
12081# asm 2: psrld $8,<xmm13=%xmm13
12082psrld $8,%xmm13
12083
12084# qhasm: uint32323232 xmm14 >>= 8
12085# asm 1: psrld $8,<xmm14=int6464#15
12086# asm 2: psrld $8,<xmm14=%xmm14
12087psrld $8,%xmm14
12088
12089# qhasm: uint32323232 xmm15 >>= 8
12090# asm 1: psrld $8,<xmm15=int6464#16
12091# asm 2: psrld $8,<xmm15=%xmm15
12092psrld $8,%xmm15
12093
12094# qhasm: xmm0 ^= xmm8
12095# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
12096# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
12097pxor %xmm8,%xmm0
12098
12099# qhasm: xmm1 ^= xmm9
12100# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
12101# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
12102pxor %xmm9,%xmm1
12103
12104# qhasm: xmm4 ^= xmm10
12105# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
12106# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
12107pxor %xmm10,%xmm4
12108
12109# qhasm: xmm6 ^= xmm11
12110# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
12111# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
12112pxor %xmm11,%xmm6
12113
12114# qhasm: xmm3 ^= xmm12
12115# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
12116# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
12117pxor %xmm12,%xmm3
12118
12119# qhasm: xmm7 ^= xmm13
12120# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
12121# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
12122pxor %xmm13,%xmm7
12123
12124# qhasm: xmm2 ^= xmm14
12125# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
12126# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
12127pxor %xmm14,%xmm2
12128
12129# qhasm: xmm5 ^= xmm15
12130# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
12131# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
12132pxor %xmm15,%xmm5
12133
12134# qhasm: uint32323232 xmm8 >>= 8
12135# asm 1: psrld $8,<xmm8=int6464#9
12136# asm 2: psrld $8,<xmm8=%xmm8
12137psrld $8,%xmm8
12138
12139# qhasm: uint32323232 xmm9 >>= 8
12140# asm 1: psrld $8,<xmm9=int6464#10
12141# asm 2: psrld $8,<xmm9=%xmm9
12142psrld $8,%xmm9
12143
12144# qhasm: uint32323232 xmm10 >>= 8
12145# asm 1: psrld $8,<xmm10=int6464#11
12146# asm 2: psrld $8,<xmm10=%xmm10
12147psrld $8,%xmm10
12148
12149# qhasm: uint32323232 xmm11 >>= 8
12150# asm 1: psrld $8,<xmm11=int6464#12
12151# asm 2: psrld $8,<xmm11=%xmm11
12152psrld $8,%xmm11
12153
12154# qhasm: uint32323232 xmm12 >>= 8
12155# asm 1: psrld $8,<xmm12=int6464#13
12156# asm 2: psrld $8,<xmm12=%xmm12
12157psrld $8,%xmm12
12158
12159# qhasm: uint32323232 xmm13 >>= 8
12160# asm 1: psrld $8,<xmm13=int6464#14
12161# asm 2: psrld $8,<xmm13=%xmm13
12162psrld $8,%xmm13
12163
12164# qhasm: uint32323232 xmm14 >>= 8
12165# asm 1: psrld $8,<xmm14=int6464#15
12166# asm 2: psrld $8,<xmm14=%xmm14
12167psrld $8,%xmm14
12168
12169# qhasm: uint32323232 xmm15 >>= 8
12170# asm 1: psrld $8,<xmm15=int6464#16
12171# asm 2: psrld $8,<xmm15=%xmm15
12172psrld $8,%xmm15
12173
12174# qhasm: xmm0 ^= xmm8
12175# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
12176# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
12177pxor %xmm8,%xmm0
12178
12179# qhasm: xmm1 ^= xmm9
12180# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
12181# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
12182pxor %xmm9,%xmm1
12183
12184# qhasm: xmm4 ^= xmm10
12185# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
12186# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
12187pxor %xmm10,%xmm4
12188
12189# qhasm: xmm6 ^= xmm11
12190# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
12191# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
12192pxor %xmm11,%xmm6
12193
12194# qhasm: xmm3 ^= xmm12
12195# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
12196# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
12197pxor %xmm12,%xmm3
12198
12199# qhasm: xmm7 ^= xmm13
12200# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
12201# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
12202pxor %xmm13,%xmm7
12203
12204# qhasm: xmm2 ^= xmm14
12205# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
12206# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
12207pxor %xmm14,%xmm2
12208
12209# qhasm: xmm5 ^= xmm15
12210# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
12211# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
12212pxor %xmm15,%xmm5
12213
12214# qhasm: uint32323232 xmm8 >>= 8
12215# asm 1: psrld $8,<xmm8=int6464#9
12216# asm 2: psrld $8,<xmm8=%xmm8
12217psrld $8,%xmm8
12218
12219# qhasm: uint32323232 xmm9 >>= 8
12220# asm 1: psrld $8,<xmm9=int6464#10
12221# asm 2: psrld $8,<xmm9=%xmm9
12222psrld $8,%xmm9
12223
12224# qhasm: uint32323232 xmm10 >>= 8
12225# asm 1: psrld $8,<xmm10=int6464#11
12226# asm 2: psrld $8,<xmm10=%xmm10
12227psrld $8,%xmm10
12228
12229# qhasm: uint32323232 xmm11 >>= 8
12230# asm 1: psrld $8,<xmm11=int6464#12
12231# asm 2: psrld $8,<xmm11=%xmm11
12232psrld $8,%xmm11
12233
12234# qhasm: uint32323232 xmm12 >>= 8
12235# asm 1: psrld $8,<xmm12=int6464#13
12236# asm 2: psrld $8,<xmm12=%xmm12
12237psrld $8,%xmm12
12238
12239# qhasm: uint32323232 xmm13 >>= 8
12240# asm 1: psrld $8,<xmm13=int6464#14
12241# asm 2: psrld $8,<xmm13=%xmm13
12242psrld $8,%xmm13
12243
12244# qhasm: uint32323232 xmm14 >>= 8
12245# asm 1: psrld $8,<xmm14=int6464#15
12246# asm 2: psrld $8,<xmm14=%xmm14
12247psrld $8,%xmm14
12248
12249# qhasm: uint32323232 xmm15 >>= 8
12250# asm 1: psrld $8,<xmm15=int6464#16
12251# asm 2: psrld $8,<xmm15=%xmm15
12252psrld $8,%xmm15
12253
12254# qhasm: xmm0 ^= xmm8
12255# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
12256# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
12257pxor %xmm8,%xmm0
12258
12259# qhasm: xmm1 ^= xmm9
12260# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
12261# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
12262pxor %xmm9,%xmm1
12263
12264# qhasm: xmm4 ^= xmm10
12265# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
12266# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
12267pxor %xmm10,%xmm4
12268
12269# qhasm: xmm6 ^= xmm11
12270# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
12271# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
12272pxor %xmm11,%xmm6
12273
12274# qhasm: xmm3 ^= xmm12
12275# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
12276# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
12277pxor %xmm12,%xmm3
12278
12279# qhasm: xmm7 ^= xmm13
12280# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
12281# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
12282pxor %xmm13,%xmm7
12283
12284# qhasm: xmm2 ^= xmm14
12285# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
12286# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
12287pxor %xmm14,%xmm2
12288
12289# qhasm: xmm5 ^= xmm15
12290# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
12291# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
12292pxor %xmm15,%xmm5
12293
12294# qhasm: *(int128 *)(c + 1152) = xmm0
12295# asm 1: movdqa <xmm0=int6464#1,1152(<c=int64#1)
12296# asm 2: movdqa <xmm0=%xmm0,1152(<c=%rdi)
12297movdqa %xmm0,1152(%rdi)
12298
12299# qhasm: *(int128 *)(c + 1168) = xmm1
12300# asm 1: movdqa <xmm1=int6464#2,1168(<c=int64#1)
12301# asm 2: movdqa <xmm1=%xmm1,1168(<c=%rdi)
12302movdqa %xmm1,1168(%rdi)
12303
12304# qhasm: *(int128 *)(c + 1184) = xmm4
12305# asm 1: movdqa <xmm4=int6464#5,1184(<c=int64#1)
12306# asm 2: movdqa <xmm4=%xmm4,1184(<c=%rdi)
12307movdqa %xmm4,1184(%rdi)
12308
12309# qhasm: *(int128 *)(c + 1200) = xmm6
12310# asm 1: movdqa <xmm6=int6464#7,1200(<c=int64#1)
12311# asm 2: movdqa <xmm6=%xmm6,1200(<c=%rdi)
12312movdqa %xmm6,1200(%rdi)
12313
12314# qhasm: *(int128 *)(c + 1216) = xmm3
12315# asm 1: movdqa <xmm3=int6464#4,1216(<c=int64#1)
12316# asm 2: movdqa <xmm3=%xmm3,1216(<c=%rdi)
12317movdqa %xmm3,1216(%rdi)
12318
12319# qhasm: *(int128 *)(c + 1232) = xmm7
12320# asm 1: movdqa <xmm7=int6464#8,1232(<c=int64#1)
12321# asm 2: movdqa <xmm7=%xmm7,1232(<c=%rdi)
12322movdqa %xmm7,1232(%rdi)
12323
12324# qhasm: *(int128 *)(c + 1248) = xmm2
12325# asm 1: movdqa <xmm2=int6464#3,1248(<c=int64#1)
12326# asm 2: movdqa <xmm2=%xmm2,1248(<c=%rdi)
12327movdqa %xmm2,1248(%rdi)
12328
12329# qhasm: *(int128 *)(c + 1264) = xmm5
12330# asm 1: movdqa <xmm5=int6464#6,1264(<c=int64#1)
12331# asm 2: movdqa <xmm5=%xmm5,1264(<c=%rdi)
12332movdqa %xmm5,1264(%rdi)
12333
12334# qhasm: xmm0 ^= ONE
12335# asm 1: pxor ONE,<xmm0=int6464#1
12336# asm 2: pxor ONE,<xmm0=%xmm0
12337pxor ONE,%xmm0
12338
12339# qhasm: xmm1 ^= ONE
12340# asm 1: pxor ONE,<xmm1=int6464#2
12341# asm 2: pxor ONE,<xmm1=%xmm1
12342pxor ONE,%xmm1
12343
12344# qhasm: xmm7 ^= ONE
12345# asm 1: pxor ONE,<xmm7=int6464#8
12346# asm 2: pxor ONE,<xmm7=%xmm7
12347pxor ONE,%xmm7
12348
12349# qhasm: xmm2 ^= ONE
12350# asm 1: pxor ONE,<xmm2=int6464#3
12351# asm 2: pxor ONE,<xmm2=%xmm2
12352pxor ONE,%xmm2
12353
12354# qhasm: shuffle bytes of xmm0 by ROTB
12355# asm 1: pshufb ROTB,<xmm0=int6464#1
12356# asm 2: pshufb ROTB,<xmm0=%xmm0
12357pshufb ROTB,%xmm0
12358
12359# qhasm: shuffle bytes of xmm1 by ROTB
12360# asm 1: pshufb ROTB,<xmm1=int6464#2
12361# asm 2: pshufb ROTB,<xmm1=%xmm1
12362pshufb ROTB,%xmm1
12363
12364# qhasm: shuffle bytes of xmm4 by ROTB
12365# asm 1: pshufb ROTB,<xmm4=int6464#5
12366# asm 2: pshufb ROTB,<xmm4=%xmm4
12367pshufb ROTB,%xmm4
12368
12369# qhasm: shuffle bytes of xmm6 by ROTB
12370# asm 1: pshufb ROTB,<xmm6=int6464#7
12371# asm 2: pshufb ROTB,<xmm6=%xmm6
12372pshufb ROTB,%xmm6
12373
12374# qhasm: shuffle bytes of xmm3 by ROTB
12375# asm 1: pshufb ROTB,<xmm3=int6464#4
12376# asm 2: pshufb ROTB,<xmm3=%xmm3
12377pshufb ROTB,%xmm3
12378
12379# qhasm: shuffle bytes of xmm7 by ROTB
12380# asm 1: pshufb ROTB,<xmm7=int6464#8
12381# asm 2: pshufb ROTB,<xmm7=%xmm7
12382pshufb ROTB,%xmm7
12383
12384# qhasm: shuffle bytes of xmm2 by ROTB
12385# asm 1: pshufb ROTB,<xmm2=int6464#3
12386# asm 2: pshufb ROTB,<xmm2=%xmm2
12387pshufb ROTB,%xmm2
12388
12389# qhasm: shuffle bytes of xmm5 by ROTB
12390# asm 1: pshufb ROTB,<xmm5=int6464#6
12391# asm 2: pshufb ROTB,<xmm5=%xmm5
12392pshufb ROTB,%xmm5
12393
12394# qhasm: xmm7 ^= xmm2
12395# asm 1: pxor <xmm2=int6464#3,<xmm7=int6464#8
12396# asm 2: pxor <xmm2=%xmm2,<xmm7=%xmm7
12397pxor %xmm2,%xmm7
12398
12399# qhasm: xmm4 ^= xmm1
12400# asm 1: pxor <xmm1=int6464#2,<xmm4=int6464#5
12401# asm 2: pxor <xmm1=%xmm1,<xmm4=%xmm4
12402pxor %xmm1,%xmm4
12403
12404# qhasm: xmm7 ^= xmm0
12405# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
12406# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
12407pxor %xmm0,%xmm7
12408
12409# qhasm: xmm2 ^= xmm4
12410# asm 1: pxor <xmm4=int6464#5,<xmm2=int6464#3
12411# asm 2: pxor <xmm4=%xmm4,<xmm2=%xmm2
12412pxor %xmm4,%xmm2
12413
12414# qhasm: xmm6 ^= xmm0
12415# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
12416# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
12417pxor %xmm0,%xmm6
12418
12419# qhasm: xmm2 ^= xmm6
12420# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
12421# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
12422pxor %xmm6,%xmm2
12423
12424# qhasm: xmm6 ^= xmm5
12425# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
12426# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
12427pxor %xmm5,%xmm6
12428
12429# qhasm: xmm6 ^= xmm3
12430# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
12431# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
12432pxor %xmm3,%xmm6
12433
12434# qhasm: xmm5 ^= xmm7
12435# asm 1: pxor <xmm7=int6464#8,<xmm5=int6464#6
12436# asm 2: pxor <xmm7=%xmm7,<xmm5=%xmm5
12437pxor %xmm7,%xmm5
12438
12439# qhasm: xmm6 ^= xmm1
12440# asm 1: pxor <xmm1=int6464#2,<xmm6=int6464#7
12441# asm 2: pxor <xmm1=%xmm1,<xmm6=%xmm6
12442pxor %xmm1,%xmm6
12443
12444# qhasm: xmm3 ^= xmm7
12445# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
12446# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
12447pxor %xmm7,%xmm3
12448
12449# qhasm: xmm4 ^= xmm5
12450# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
12451# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
12452pxor %xmm5,%xmm4
12453
12454# qhasm: xmm1 ^= xmm7
12455# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
12456# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
12457pxor %xmm7,%xmm1
12458
12459# qhasm: xmm11 = xmm5
12460# asm 1: movdqa <xmm5=int6464#6,>xmm11=int6464#9
12461# asm 2: movdqa <xmm5=%xmm5,>xmm11=%xmm8
12462movdqa %xmm5,%xmm8
12463
12464# qhasm: xmm10 = xmm1
12465# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
12466# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
12467movdqa %xmm1,%xmm9
12468
12469# qhasm: xmm9 = xmm7
12470# asm 1: movdqa <xmm7=int6464#8,>xmm9=int6464#11
12471# asm 2: movdqa <xmm7=%xmm7,>xmm9=%xmm10
12472movdqa %xmm7,%xmm10
12473
12474# qhasm: xmm13 = xmm4
12475# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
12476# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
12477movdqa %xmm4,%xmm11
12478
12479# qhasm: xmm12 = xmm2
12480# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#13
12481# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm12
12482movdqa %xmm2,%xmm12
12483
12484# qhasm: xmm11 ^= xmm3
12485# asm 1: pxor <xmm3=int6464#4,<xmm11=int6464#9
12486# asm 2: pxor <xmm3=%xmm3,<xmm11=%xmm8
12487pxor %xmm3,%xmm8
12488
12489# qhasm: xmm10 ^= xmm4
12490# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#10
12491# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm9
12492pxor %xmm4,%xmm9
12493
12494# qhasm: xmm9 ^= xmm6
12495# asm 1: pxor <xmm6=int6464#7,<xmm9=int6464#11
12496# asm 2: pxor <xmm6=%xmm6,<xmm9=%xmm10
12497pxor %xmm6,%xmm10
12498
12499# qhasm: xmm13 ^= xmm3
12500# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#12
12501# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm11
12502pxor %xmm3,%xmm11
12503
12504# qhasm: xmm12 ^= xmm0
12505# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
12506# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
12507pxor %xmm0,%xmm12
12508
12509# qhasm: xmm14 = xmm11
12510# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
12511# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
12512movdqa %xmm8,%xmm13
12513
12514# qhasm: xmm8 = xmm10
12515# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
12516# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
12517movdqa %xmm9,%xmm14
12518
12519# qhasm: xmm15 = xmm11
12520# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
12521# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
12522movdqa %xmm8,%xmm15
12523
12524# qhasm: xmm10 |= xmm9
12525# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
12526# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
12527por %xmm10,%xmm9
12528
12529# qhasm: xmm11 |= xmm12
12530# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
12531# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
12532por %xmm12,%xmm8
12533
12534# qhasm: xmm15 ^= xmm8
12535# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
12536# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
12537pxor %xmm14,%xmm15
12538
12539# qhasm: xmm14 &= xmm12
12540# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
12541# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
12542pand %xmm12,%xmm13
12543
12544# qhasm: xmm8 &= xmm9
12545# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
12546# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
12547pand %xmm10,%xmm14
12548
12549# qhasm: xmm12 ^= xmm9
12550# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
12551# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
12552pxor %xmm10,%xmm12
12553
12554# qhasm: xmm15 &= xmm12
12555# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
12556# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
12557pand %xmm12,%xmm15
12558
12559# qhasm: xmm12 = xmm6
12560# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#11
12561# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm10
12562movdqa %xmm6,%xmm10
12563
12564# qhasm: xmm12 ^= xmm0
12565# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
12566# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
12567pxor %xmm0,%xmm10
12568
12569# qhasm: xmm13 &= xmm12
12570# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
12571# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
12572pand %xmm10,%xmm11
12573
12574# qhasm: xmm11 ^= xmm13
12575# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
12576# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
12577pxor %xmm11,%xmm8
12578
12579# qhasm: xmm10 ^= xmm13
12580# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
12581# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
12582pxor %xmm11,%xmm9
12583
12584# qhasm: xmm13 = xmm5
12585# asm 1: movdqa <xmm5=int6464#6,>xmm13=int6464#11
12586# asm 2: movdqa <xmm5=%xmm5,>xmm13=%xmm10
12587movdqa %xmm5,%xmm10
12588
12589# qhasm: xmm13 ^= xmm1
12590# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
12591# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
12592pxor %xmm1,%xmm10
12593
12594# qhasm: xmm12 = xmm7
12595# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#12
12596# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm11
12597movdqa %xmm7,%xmm11
12598
12599# qhasm: xmm9 = xmm13
12600# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
12601# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
12602movdqa %xmm10,%xmm12
12603
12604# qhasm: xmm12 ^= xmm2
12605# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#12
12606# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm11
12607pxor %xmm2,%xmm11
12608
12609# qhasm: xmm9 |= xmm12
12610# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
12611# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
12612por %xmm11,%xmm12
12613
12614# qhasm: xmm13 &= xmm12
12615# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
12616# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
12617pand %xmm11,%xmm10
12618
12619# qhasm: xmm8 ^= xmm13
12620# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
12621# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
12622pxor %xmm10,%xmm14
12623
12624# qhasm: xmm11 ^= xmm15
12625# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
12626# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
12627pxor %xmm15,%xmm8
12628
12629# qhasm: xmm10 ^= xmm14
12630# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
12631# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
12632pxor %xmm13,%xmm9
12633
12634# qhasm: xmm9 ^= xmm15
12635# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
12636# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
12637pxor %xmm15,%xmm12
12638
12639# qhasm: xmm8 ^= xmm14
12640# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
12641# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
12642pxor %xmm13,%xmm14
12643
12644# qhasm: xmm9 ^= xmm14
12645# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
12646# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
12647pxor %xmm13,%xmm12
12648
12649# qhasm: xmm12 = xmm4
12650# asm 1: movdqa <xmm4=int6464#5,>xmm12=int6464#11
12651# asm 2: movdqa <xmm4=%xmm4,>xmm12=%xmm10
12652movdqa %xmm4,%xmm10
12653
12654# qhasm: xmm13 = xmm3
12655# asm 1: movdqa <xmm3=int6464#4,>xmm13=int6464#12
12656# asm 2: movdqa <xmm3=%xmm3,>xmm13=%xmm11
12657movdqa %xmm3,%xmm11
12658
12659# qhasm: xmm14 = xmm1
12660# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
12661# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
12662movdqa %xmm1,%xmm13
12663
12664# qhasm: xmm15 = xmm5
12665# asm 1: movdqa <xmm5=int6464#6,>xmm15=int6464#16
12666# asm 2: movdqa <xmm5=%xmm5,>xmm15=%xmm15
12667movdqa %xmm5,%xmm15
12668
12669# qhasm: xmm12 &= xmm6
12670# asm 1: pand <xmm6=int6464#7,<xmm12=int6464#11
12671# asm 2: pand <xmm6=%xmm6,<xmm12=%xmm10
12672pand %xmm6,%xmm10
12673
12674# qhasm: xmm13 &= xmm0
12675# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
12676# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
12677pand %xmm0,%xmm11
12678
12679# qhasm: xmm14 &= xmm7
12680# asm 1: pand <xmm7=int6464#8,<xmm14=int6464#14
12681# asm 2: pand <xmm7=%xmm7,<xmm14=%xmm13
12682pand %xmm7,%xmm13
12683
12684# qhasm: xmm15 |= xmm2
12685# asm 1: por <xmm2=int6464#3,<xmm15=int6464#16
12686# asm 2: por <xmm2=%xmm2,<xmm15=%xmm15
12687por %xmm2,%xmm15
12688
12689# qhasm: xmm11 ^= xmm12
12690# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
12691# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
12692pxor %xmm10,%xmm8
12693
12694# qhasm: xmm10 ^= xmm13
12695# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
12696# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
12697pxor %xmm11,%xmm9
12698
12699# qhasm: xmm9 ^= xmm14
12700# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
12701# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
12702pxor %xmm13,%xmm12
12703
12704# qhasm: xmm8 ^= xmm15
12705# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
12706# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
12707pxor %xmm15,%xmm14
12708
12709# qhasm: xmm12 = xmm11
12710# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
12711# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
12712movdqa %xmm8,%xmm10
12713
12714# qhasm: xmm12 ^= xmm10
12715# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
12716# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
12717pxor %xmm9,%xmm10
12718
12719# qhasm: xmm11 &= xmm9
12720# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
12721# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
12722pand %xmm12,%xmm8
12723
12724# qhasm: xmm14 = xmm8
12725# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
12726# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
12727movdqa %xmm14,%xmm11
12728
12729# qhasm: xmm14 ^= xmm11
12730# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
12731# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
12732pxor %xmm8,%xmm11
12733
12734# qhasm: xmm15 = xmm12
12735# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
12736# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
12737movdqa %xmm10,%xmm13
12738
12739# qhasm: xmm15 &= xmm14
12740# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
12741# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
12742pand %xmm11,%xmm13
12743
12744# qhasm: xmm15 ^= xmm10
12745# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
12746# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
12747pxor %xmm9,%xmm13
12748
12749# qhasm: xmm13 = xmm9
12750# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
12751# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
12752movdqa %xmm12,%xmm15
12753
12754# qhasm: xmm13 ^= xmm8
12755# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
12756# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
12757pxor %xmm14,%xmm15
12758
12759# qhasm: xmm11 ^= xmm10
12760# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
12761# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
12762pxor %xmm9,%xmm8
12763
12764# qhasm: xmm13 &= xmm11
12765# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
12766# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
12767pand %xmm8,%xmm15
12768
12769# qhasm: xmm13 ^= xmm8
12770# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
12771# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
12772pxor %xmm14,%xmm15
12773
12774# qhasm: xmm9 ^= xmm13
12775# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
12776# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
12777pxor %xmm15,%xmm12
12778
12779# qhasm: xmm10 = xmm14
12780# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
12781# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
12782movdqa %xmm11,%xmm8
12783
12784# qhasm: xmm10 ^= xmm13
12785# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
12786# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
12787pxor %xmm15,%xmm8
12788
12789# qhasm: xmm10 &= xmm8
12790# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
12791# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
12792pand %xmm14,%xmm8
12793
12794# qhasm: xmm9 ^= xmm10
12795# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
12796# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
12797pxor %xmm8,%xmm12
12798
12799# qhasm: xmm14 ^= xmm10
12800# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
12801# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
12802pxor %xmm8,%xmm11
12803
12804# qhasm: xmm14 &= xmm15
12805# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
12806# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
12807pand %xmm13,%xmm11
12808
12809# qhasm: xmm14 ^= xmm12
12810# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
12811# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
12812pxor %xmm10,%xmm11
12813
12814# qhasm: xmm12 = xmm2
12815# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#9
12816# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm8
12817movdqa %xmm2,%xmm8
12818
12819# qhasm: xmm8 = xmm7
12820# asm 1: movdqa <xmm7=int6464#8,>xmm8=int6464#10
12821# asm 2: movdqa <xmm7=%xmm7,>xmm8=%xmm9
12822movdqa %xmm7,%xmm9
12823
12824# qhasm: xmm10 = xmm15
12825# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
12826# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
12827movdqa %xmm13,%xmm10
12828
12829# qhasm: xmm10 ^= xmm14
12830# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
12831# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
12832pxor %xmm11,%xmm10
12833
12834# qhasm: xmm10 &= xmm2
12835# asm 1: pand <xmm2=int6464#3,<xmm10=int6464#11
12836# asm 2: pand <xmm2=%xmm2,<xmm10=%xmm10
12837pand %xmm2,%xmm10
12838
12839# qhasm: xmm2 ^= xmm7
12840# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
12841# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
12842pxor %xmm7,%xmm2
12843
12844# qhasm: xmm2 &= xmm14
12845# asm 1: pand <xmm14=int6464#12,<xmm2=int6464#3
12846# asm 2: pand <xmm14=%xmm11,<xmm2=%xmm2
12847pand %xmm11,%xmm2
12848
12849# qhasm: xmm7 &= xmm15
12850# asm 1: pand <xmm15=int6464#14,<xmm7=int6464#8
12851# asm 2: pand <xmm15=%xmm13,<xmm7=%xmm7
12852pand %xmm13,%xmm7
12853
12854# qhasm: xmm2 ^= xmm7
12855# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
12856# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
12857pxor %xmm7,%xmm2
12858
12859# qhasm: xmm7 ^= xmm10
12860# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
12861# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
12862pxor %xmm10,%xmm7
12863
12864# qhasm: xmm12 ^= xmm0
12865# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
12866# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
12867pxor %xmm0,%xmm8
12868
12869# qhasm: xmm8 ^= xmm6
12870# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#10
12871# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm9
12872pxor %xmm6,%xmm9
12873
12874# qhasm: xmm15 ^= xmm13
12875# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
12876# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
12877pxor %xmm15,%xmm13
12878
12879# qhasm: xmm14 ^= xmm9
12880# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
12881# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
12882pxor %xmm12,%xmm11
12883
12884# qhasm: xmm11 = xmm15
12885# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
12886# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
12887movdqa %xmm13,%xmm10
12888
12889# qhasm: xmm11 ^= xmm14
12890# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
12891# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
12892pxor %xmm11,%xmm10
12893
12894# qhasm: xmm11 &= xmm12
12895# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
12896# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
12897pand %xmm8,%xmm10
12898
12899# qhasm: xmm12 ^= xmm8
12900# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
12901# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
12902pxor %xmm9,%xmm8
12903
12904# qhasm: xmm12 &= xmm14
12905# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
12906# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
12907pand %xmm11,%xmm8
12908
12909# qhasm: xmm8 &= xmm15
12910# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
12911# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
12912pand %xmm13,%xmm9
12913
12914# qhasm: xmm8 ^= xmm12
12915# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
12916# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
12917pxor %xmm8,%xmm9
12918
12919# qhasm: xmm12 ^= xmm11
12920# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
12921# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
12922pxor %xmm10,%xmm8
12923
12924# qhasm: xmm10 = xmm13
12925# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
12926# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
12927movdqa %xmm15,%xmm10
12928
12929# qhasm: xmm10 ^= xmm9
12930# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
12931# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
12932pxor %xmm12,%xmm10
12933
12934# qhasm: xmm10 &= xmm0
12935# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
12936# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
12937pand %xmm0,%xmm10
12938
12939# qhasm: xmm0 ^= xmm6
12940# asm 1: pxor <xmm6=int6464#7,<xmm0=int6464#1
12941# asm 2: pxor <xmm6=%xmm6,<xmm0=%xmm0
12942pxor %xmm6,%xmm0
12943
12944# qhasm: xmm0 &= xmm9
12945# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
12946# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
12947pand %xmm12,%xmm0
12948
12949# qhasm: xmm6 &= xmm13
12950# asm 1: pand <xmm13=int6464#16,<xmm6=int6464#7
12951# asm 2: pand <xmm13=%xmm15,<xmm6=%xmm6
12952pand %xmm15,%xmm6
12953
12954# qhasm: xmm0 ^= xmm6
12955# asm 1: pxor <xmm6=int6464#7,<xmm0=int6464#1
12956# asm 2: pxor <xmm6=%xmm6,<xmm0=%xmm0
12957pxor %xmm6,%xmm0
12958
12959# qhasm: xmm6 ^= xmm10
12960# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
12961# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
12962pxor %xmm10,%xmm6
12963
12964# qhasm: xmm2 ^= xmm12
12965# asm 1: pxor <xmm12=int6464#9,<xmm2=int6464#3
12966# asm 2: pxor <xmm12=%xmm8,<xmm2=%xmm2
12967pxor %xmm8,%xmm2
12968
12969# qhasm: xmm0 ^= xmm12
12970# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
12971# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
12972pxor %xmm8,%xmm0
12973
12974# qhasm: xmm7 ^= xmm8
12975# asm 1: pxor <xmm8=int6464#10,<xmm7=int6464#8
12976# asm 2: pxor <xmm8=%xmm9,<xmm7=%xmm7
12977pxor %xmm9,%xmm7
12978
12979# qhasm: xmm6 ^= xmm8
12980# asm 1: pxor <xmm8=int6464#10,<xmm6=int6464#7
12981# asm 2: pxor <xmm8=%xmm9,<xmm6=%xmm6
12982pxor %xmm9,%xmm6
12983
12984# qhasm: xmm12 = xmm5
12985# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#9
12986# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm8
12987movdqa %xmm5,%xmm8
12988
12989# qhasm: xmm8 = xmm1
12990# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
12991# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
12992movdqa %xmm1,%xmm9
12993
12994# qhasm: xmm12 ^= xmm3
12995# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#9
12996# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm8
12997pxor %xmm3,%xmm8
12998
12999# qhasm: xmm8 ^= xmm4
13000# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#10
13001# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm9
13002pxor %xmm4,%xmm9
13003
13004# qhasm: xmm11 = xmm15
13005# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
13006# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
13007movdqa %xmm13,%xmm10
13008
13009# qhasm: xmm11 ^= xmm14
13010# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
13011# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
13012pxor %xmm11,%xmm10
13013
13014# qhasm: xmm11 &= xmm12
13015# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
13016# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
13017pand %xmm8,%xmm10
13018
13019# qhasm: xmm12 ^= xmm8
13020# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
13021# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
13022pxor %xmm9,%xmm8
13023
13024# qhasm: xmm12 &= xmm14
13025# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
13026# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
13027pand %xmm11,%xmm8
13028
13029# qhasm: xmm8 &= xmm15
13030# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
13031# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
13032pand %xmm13,%xmm9
13033
13034# qhasm: xmm8 ^= xmm12
13035# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
13036# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
13037pxor %xmm8,%xmm9
13038
13039# qhasm: xmm12 ^= xmm11
13040# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
13041# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
13042pxor %xmm10,%xmm8
13043
13044# qhasm: xmm10 = xmm13
13045# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
13046# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
13047movdqa %xmm15,%xmm10
13048
13049# qhasm: xmm10 ^= xmm9
13050# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
13051# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
13052pxor %xmm12,%xmm10
13053
13054# qhasm: xmm10 &= xmm3
13055# asm 1: pand <xmm3=int6464#4,<xmm10=int6464#11
13056# asm 2: pand <xmm3=%xmm3,<xmm10=%xmm10
13057pand %xmm3,%xmm10
13058
13059# qhasm: xmm3 ^= xmm4
13060# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
13061# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
13062pxor %xmm4,%xmm3
13063
13064# qhasm: xmm3 &= xmm9
13065# asm 1: pand <xmm9=int6464#13,<xmm3=int6464#4
13066# asm 2: pand <xmm9=%xmm12,<xmm3=%xmm3
13067pand %xmm12,%xmm3
13068
13069# qhasm: xmm4 &= xmm13
13070# asm 1: pand <xmm13=int6464#16,<xmm4=int6464#5
13071# asm 2: pand <xmm13=%xmm15,<xmm4=%xmm4
13072pand %xmm15,%xmm4
13073
13074# qhasm: xmm3 ^= xmm4
13075# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
13076# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
13077pxor %xmm4,%xmm3
13078
13079# qhasm: xmm4 ^= xmm10
13080# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
13081# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
13082pxor %xmm10,%xmm4
13083
13084# qhasm: xmm15 ^= xmm13
13085# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
13086# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
13087pxor %xmm15,%xmm13
13088
13089# qhasm: xmm14 ^= xmm9
13090# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
13091# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
13092pxor %xmm12,%xmm11
13093
13094# qhasm: xmm11 = xmm15
13095# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
13096# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
13097movdqa %xmm13,%xmm10
13098
13099# qhasm: xmm11 ^= xmm14
13100# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
13101# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
13102pxor %xmm11,%xmm10
13103
13104# qhasm: xmm11 &= xmm5
13105# asm 1: pand <xmm5=int6464#6,<xmm11=int6464#11
13106# asm 2: pand <xmm5=%xmm5,<xmm11=%xmm10
13107pand %xmm5,%xmm10
13108
13109# qhasm: xmm5 ^= xmm1
13110# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
13111# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
13112pxor %xmm1,%xmm5
13113
13114# qhasm: xmm5 &= xmm14
13115# asm 1: pand <xmm14=int6464#12,<xmm5=int6464#6
13116# asm 2: pand <xmm14=%xmm11,<xmm5=%xmm5
13117pand %xmm11,%xmm5
13118
13119# qhasm: xmm1 &= xmm15
13120# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
13121# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
13122pand %xmm13,%xmm1
13123
13124# qhasm: xmm5 ^= xmm1
13125# asm 1: pxor <xmm1=int6464#2,<xmm5=int6464#6
13126# asm 2: pxor <xmm1=%xmm1,<xmm5=%xmm5
13127pxor %xmm1,%xmm5
13128
13129# qhasm: xmm1 ^= xmm11
13130# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
13131# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
13132pxor %xmm10,%xmm1
13133
13134# qhasm: xmm5 ^= xmm12
13135# asm 1: pxor <xmm12=int6464#9,<xmm5=int6464#6
13136# asm 2: pxor <xmm12=%xmm8,<xmm5=%xmm5
13137pxor %xmm8,%xmm5
13138
13139# qhasm: xmm3 ^= xmm12
13140# asm 1: pxor <xmm12=int6464#9,<xmm3=int6464#4
13141# asm 2: pxor <xmm12=%xmm8,<xmm3=%xmm3
13142pxor %xmm8,%xmm3
13143
13144# qhasm: xmm1 ^= xmm8
13145# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
13146# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
13147pxor %xmm9,%xmm1
13148
13149# qhasm: xmm4 ^= xmm8
13150# asm 1: pxor <xmm8=int6464#10,<xmm4=int6464#5
13151# asm 2: pxor <xmm8=%xmm9,<xmm4=%xmm4
13152pxor %xmm9,%xmm4
13153
13154# qhasm: xmm5 ^= xmm0
13155# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
13156# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
13157pxor %xmm0,%xmm5
13158
13159# qhasm: xmm1 ^= xmm2
13160# asm 1: pxor <xmm2=int6464#3,<xmm1=int6464#2
13161# asm 2: pxor <xmm2=%xmm2,<xmm1=%xmm1
13162pxor %xmm2,%xmm1
13163
13164# qhasm: xmm3 ^= xmm5
13165# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
13166# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
13167pxor %xmm5,%xmm3
13168
13169# qhasm: xmm2 ^= xmm0
13170# asm 1: pxor <xmm0=int6464#1,<xmm2=int6464#3
13171# asm 2: pxor <xmm0=%xmm0,<xmm2=%xmm2
13172pxor %xmm0,%xmm2
13173
13174# qhasm: xmm0 ^= xmm1
13175# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
13176# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
13177pxor %xmm1,%xmm0
13178
13179# qhasm: xmm1 ^= xmm7
13180# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#2
13181# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm1
13182pxor %xmm7,%xmm1
13183
13184# qhasm: xmm7 ^= xmm4
13185# asm 1: pxor <xmm4=int6464#5,<xmm7=int6464#8
13186# asm 2: pxor <xmm4=%xmm4,<xmm7=%xmm7
13187pxor %xmm4,%xmm7
13188
13189# qhasm: xmm3 ^= xmm7
13190# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
13191# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
13192pxor %xmm7,%xmm3
13193
13194# qhasm: xmm4 ^= xmm6
13195# asm 1: pxor <xmm6=int6464#7,<xmm4=int6464#5
13196# asm 2: pxor <xmm6=%xmm6,<xmm4=%xmm4
13197pxor %xmm6,%xmm4
13198
13199# qhasm: xmm6 ^= xmm7
13200# asm 1: pxor <xmm7=int6464#8,<xmm6=int6464#7
13201# asm 2: pxor <xmm7=%xmm7,<xmm6=%xmm6
13202pxor %xmm7,%xmm6
13203
13204# qhasm: xmm2 ^= xmm6
13205# asm 1: pxor <xmm6=int6464#7,<xmm2=int6464#3
13206# asm 2: pxor <xmm6=%xmm6,<xmm2=%xmm2
13207pxor %xmm6,%xmm2
13208
13209# qhasm: xmm1 ^= RCON
13210# asm 1: pxor RCON,<xmm1=int6464#2
13211# asm 2: pxor RCON,<xmm1=%xmm1
13212pxor RCON,%xmm1
13213
13214# qhasm: xmm3 ^= RCON
13215# asm 1: pxor RCON,<xmm3=int6464#4
13216# asm 2: pxor RCON,<xmm3=%xmm3
13217pxor RCON,%xmm3
13218
13219# qhasm: xmm6 ^= RCON
13220# asm 1: pxor RCON,<xmm6=int6464#7
13221# asm 2: pxor RCON,<xmm6=%xmm6
13222pxor RCON,%xmm6
13223
13224# qhasm: xmm5 ^= RCON
13225# asm 1: pxor RCON,<xmm5=int6464#6
13226# asm 2: pxor RCON,<xmm5=%xmm5
13227pxor RCON,%xmm5
13228
13229# qhasm: shuffle bytes of xmm0 by EXPB0
13230# asm 1: pshufb EXPB0,<xmm0=int6464#1
13231# asm 2: pshufb EXPB0,<xmm0=%xmm0
13232pshufb EXPB0,%xmm0
13233
13234# qhasm: shuffle bytes of xmm1 by EXPB0
13235# asm 1: pshufb EXPB0,<xmm1=int6464#2
13236# asm 2: pshufb EXPB0,<xmm1=%xmm1
13237pshufb EXPB0,%xmm1
13238
13239# qhasm: shuffle bytes of xmm3 by EXPB0
13240# asm 1: pshufb EXPB0,<xmm3=int6464#4
13241# asm 2: pshufb EXPB0,<xmm3=%xmm3
13242pshufb EXPB0,%xmm3
13243
13244# qhasm: shuffle bytes of xmm2 by EXPB0
13245# asm 1: pshufb EXPB0,<xmm2=int6464#3
13246# asm 2: pshufb EXPB0,<xmm2=%xmm2
13247pshufb EXPB0,%xmm2
13248
13249# qhasm: shuffle bytes of xmm6 by EXPB0
13250# asm 1: pshufb EXPB0,<xmm6=int6464#7
13251# asm 2: pshufb EXPB0,<xmm6=%xmm6
13252pshufb EXPB0,%xmm6
13253
13254# qhasm: shuffle bytes of xmm5 by EXPB0
13255# asm 1: pshufb EXPB0,<xmm5=int6464#6
13256# asm 2: pshufb EXPB0,<xmm5=%xmm5
13257pshufb EXPB0,%xmm5
13258
13259# qhasm: shuffle bytes of xmm4 by EXPB0
13260# asm 1: pshufb EXPB0,<xmm4=int6464#5
13261# asm 2: pshufb EXPB0,<xmm4=%xmm4
13262pshufb EXPB0,%xmm4
13263
13264# qhasm: shuffle bytes of xmm7 by EXPB0
13265# asm 1: pshufb EXPB0,<xmm7=int6464#8
13266# asm 2: pshufb EXPB0,<xmm7=%xmm7
13267pshufb EXPB0,%xmm7
13268
13269# qhasm: xmm8 = *(int128 *)(c + 1152)
13270# asm 1: movdqa 1152(<c=int64#1),>xmm8=int6464#9
13271# asm 2: movdqa 1152(<c=%rdi),>xmm8=%xmm8
13272movdqa 1152(%rdi),%xmm8
13273
13274# qhasm: xmm9 = *(int128 *)(c + 1168)
13275# asm 1: movdqa 1168(<c=int64#1),>xmm9=int6464#10
13276# asm 2: movdqa 1168(<c=%rdi),>xmm9=%xmm9
13277movdqa 1168(%rdi),%xmm9
13278
13279# qhasm: xmm10 = *(int128 *)(c + 1184)
13280# asm 1: movdqa 1184(<c=int64#1),>xmm10=int6464#11
13281# asm 2: movdqa 1184(<c=%rdi),>xmm10=%xmm10
13282movdqa 1184(%rdi),%xmm10
13283
13284# qhasm: xmm11 = *(int128 *)(c + 1200)
13285# asm 1: movdqa 1200(<c=int64#1),>xmm11=int6464#12
13286# asm 2: movdqa 1200(<c=%rdi),>xmm11=%xmm11
13287movdqa 1200(%rdi),%xmm11
13288
13289# qhasm: xmm12 = *(int128 *)(c + 1216)
13290# asm 1: movdqa 1216(<c=int64#1),>xmm12=int6464#13
13291# asm 2: movdqa 1216(<c=%rdi),>xmm12=%xmm12
13292movdqa 1216(%rdi),%xmm12
13293
13294# qhasm: xmm13 = *(int128 *)(c + 1232)
13295# asm 1: movdqa 1232(<c=int64#1),>xmm13=int6464#14
13296# asm 2: movdqa 1232(<c=%rdi),>xmm13=%xmm13
13297movdqa 1232(%rdi),%xmm13
13298
13299# qhasm: xmm14 = *(int128 *)(c + 1248)
13300# asm 1: movdqa 1248(<c=int64#1),>xmm14=int6464#15
13301# asm 2: movdqa 1248(<c=%rdi),>xmm14=%xmm14
13302movdqa 1248(%rdi),%xmm14
13303
13304# qhasm: xmm15 = *(int128 *)(c + 1264)
13305# asm 1: movdqa 1264(<c=int64#1),>xmm15=int6464#16
13306# asm 2: movdqa 1264(<c=%rdi),>xmm15=%xmm15
13307movdqa 1264(%rdi),%xmm15
13308
13309# qhasm: xmm8 ^= ONE
13310# asm 1: pxor ONE,<xmm8=int6464#9
13311# asm 2: pxor ONE,<xmm8=%xmm8
13312pxor ONE,%xmm8
13313
13314# qhasm: xmm9 ^= ONE
13315# asm 1: pxor ONE,<xmm9=int6464#10
13316# asm 2: pxor ONE,<xmm9=%xmm9
13317pxor ONE,%xmm9
13318
13319# qhasm: xmm13 ^= ONE
13320# asm 1: pxor ONE,<xmm13=int6464#14
13321# asm 2: pxor ONE,<xmm13=%xmm13
13322pxor ONE,%xmm13
13323
13324# qhasm: xmm14 ^= ONE
13325# asm 1: pxor ONE,<xmm14=int6464#15
13326# asm 2: pxor ONE,<xmm14=%xmm14
13327pxor ONE,%xmm14
13328
13329# qhasm: xmm0 ^= xmm8
13330# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
13331# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
13332pxor %xmm8,%xmm0
13333
13334# qhasm: xmm1 ^= xmm9
13335# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
13336# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
13337pxor %xmm9,%xmm1
13338
13339# qhasm: xmm3 ^= xmm10
13340# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
13341# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
13342pxor %xmm10,%xmm3
13343
13344# qhasm: xmm2 ^= xmm11
13345# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
13346# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
13347pxor %xmm11,%xmm2
13348
13349# qhasm: xmm6 ^= xmm12
13350# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
13351# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
13352pxor %xmm12,%xmm6
13353
13354# qhasm: xmm5 ^= xmm13
13355# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
13356# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
13357pxor %xmm13,%xmm5
13358
13359# qhasm: xmm4 ^= xmm14
13360# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
13361# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
13362pxor %xmm14,%xmm4
13363
13364# qhasm: xmm7 ^= xmm15
13365# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
13366# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
13367pxor %xmm15,%xmm7
13368
13369# qhasm: uint32323232 xmm8 >>= 8
13370# asm 1: psrld $8,<xmm8=int6464#9
13371# asm 2: psrld $8,<xmm8=%xmm8
13372psrld $8,%xmm8
13373
13374# qhasm: uint32323232 xmm9 >>= 8
13375# asm 1: psrld $8,<xmm9=int6464#10
13376# asm 2: psrld $8,<xmm9=%xmm9
13377psrld $8,%xmm9
13378
13379# qhasm: uint32323232 xmm10 >>= 8
13380# asm 1: psrld $8,<xmm10=int6464#11
13381# asm 2: psrld $8,<xmm10=%xmm10
13382psrld $8,%xmm10
13383
13384# qhasm: uint32323232 xmm11 >>= 8
13385# asm 1: psrld $8,<xmm11=int6464#12
13386# asm 2: psrld $8,<xmm11=%xmm11
13387psrld $8,%xmm11
13388
13389# qhasm: uint32323232 xmm12 >>= 8
13390# asm 1: psrld $8,<xmm12=int6464#13
13391# asm 2: psrld $8,<xmm12=%xmm12
13392psrld $8,%xmm12
13393
13394# qhasm: uint32323232 xmm13 >>= 8
13395# asm 1: psrld $8,<xmm13=int6464#14
13396# asm 2: psrld $8,<xmm13=%xmm13
13397psrld $8,%xmm13
13398
13399# qhasm: uint32323232 xmm14 >>= 8
13400# asm 1: psrld $8,<xmm14=int6464#15
13401# asm 2: psrld $8,<xmm14=%xmm14
13402psrld $8,%xmm14
13403
13404# qhasm: uint32323232 xmm15 >>= 8
13405# asm 1: psrld $8,<xmm15=int6464#16
13406# asm 2: psrld $8,<xmm15=%xmm15
13407psrld $8,%xmm15
13408
13409# qhasm: xmm0 ^= xmm8
13410# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
13411# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
13412pxor %xmm8,%xmm0
13413
13414# qhasm: xmm1 ^= xmm9
13415# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
13416# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
13417pxor %xmm9,%xmm1
13418
13419# qhasm: xmm3 ^= xmm10
13420# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
13421# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
13422pxor %xmm10,%xmm3
13423
13424# qhasm: xmm2 ^= xmm11
13425# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
13426# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
13427pxor %xmm11,%xmm2
13428
13429# qhasm: xmm6 ^= xmm12
13430# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
13431# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
13432pxor %xmm12,%xmm6
13433
13434# qhasm: xmm5 ^= xmm13
13435# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
13436# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
13437pxor %xmm13,%xmm5
13438
13439# qhasm: xmm4 ^= xmm14
13440# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
13441# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
13442pxor %xmm14,%xmm4
13443
13444# qhasm: xmm7 ^= xmm15
13445# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
13446# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
13447pxor %xmm15,%xmm7
13448
13449# qhasm: uint32323232 xmm8 >>= 8
13450# asm 1: psrld $8,<xmm8=int6464#9
13451# asm 2: psrld $8,<xmm8=%xmm8
13452psrld $8,%xmm8
13453
13454# qhasm: uint32323232 xmm9 >>= 8
13455# asm 1: psrld $8,<xmm9=int6464#10
13456# asm 2: psrld $8,<xmm9=%xmm9
13457psrld $8,%xmm9
13458
13459# qhasm: uint32323232 xmm10 >>= 8
13460# asm 1: psrld $8,<xmm10=int6464#11
13461# asm 2: psrld $8,<xmm10=%xmm10
13462psrld $8,%xmm10
13463
13464# qhasm: uint32323232 xmm11 >>= 8
13465# asm 1: psrld $8,<xmm11=int6464#12
13466# asm 2: psrld $8,<xmm11=%xmm11
13467psrld $8,%xmm11
13468
13469# qhasm: uint32323232 xmm12 >>= 8
13470# asm 1: psrld $8,<xmm12=int6464#13
13471# asm 2: psrld $8,<xmm12=%xmm12
13472psrld $8,%xmm12
13473
13474# qhasm: uint32323232 xmm13 >>= 8
13475# asm 1: psrld $8,<xmm13=int6464#14
13476# asm 2: psrld $8,<xmm13=%xmm13
13477psrld $8,%xmm13
13478
13479# qhasm: uint32323232 xmm14 >>= 8
13480# asm 1: psrld $8,<xmm14=int6464#15
13481# asm 2: psrld $8,<xmm14=%xmm14
13482psrld $8,%xmm14
13483
13484# qhasm: uint32323232 xmm15 >>= 8
13485# asm 1: psrld $8,<xmm15=int6464#16
13486# asm 2: psrld $8,<xmm15=%xmm15
13487psrld $8,%xmm15
13488
13489# qhasm: xmm0 ^= xmm8
13490# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
13491# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
13492pxor %xmm8,%xmm0
13493
13494# qhasm: xmm1 ^= xmm9
13495# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
13496# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
13497pxor %xmm9,%xmm1
13498
13499# qhasm: xmm3 ^= xmm10
13500# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
13501# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
13502pxor %xmm10,%xmm3
13503
13504# qhasm: xmm2 ^= xmm11
13505# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
13506# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
13507pxor %xmm11,%xmm2
13508
13509# qhasm: xmm6 ^= xmm12
13510# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
13511# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
13512pxor %xmm12,%xmm6
13513
13514# qhasm: xmm5 ^= xmm13
13515# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
13516# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
13517pxor %xmm13,%xmm5
13518
13519# qhasm: xmm4 ^= xmm14
13520# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
13521# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
13522pxor %xmm14,%xmm4
13523
13524# qhasm: xmm7 ^= xmm15
13525# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
13526# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
13527pxor %xmm15,%xmm7
13528
13529# qhasm: uint32323232 xmm8 >>= 8
13530# asm 1: psrld $8,<xmm8=int6464#9
13531# asm 2: psrld $8,<xmm8=%xmm8
13532psrld $8,%xmm8
13533
13534# qhasm: uint32323232 xmm9 >>= 8
13535# asm 1: psrld $8,<xmm9=int6464#10
13536# asm 2: psrld $8,<xmm9=%xmm9
13537psrld $8,%xmm9
13538
13539# qhasm: uint32323232 xmm10 >>= 8
13540# asm 1: psrld $8,<xmm10=int6464#11
13541# asm 2: psrld $8,<xmm10=%xmm10
13542psrld $8,%xmm10
13543
13544# qhasm: uint32323232 xmm11 >>= 8
13545# asm 1: psrld $8,<xmm11=int6464#12
13546# asm 2: psrld $8,<xmm11=%xmm11
13547psrld $8,%xmm11
13548
13549# qhasm: uint32323232 xmm12 >>= 8
13550# asm 1: psrld $8,<xmm12=int6464#13
13551# asm 2: psrld $8,<xmm12=%xmm12
13552psrld $8,%xmm12
13553
13554# qhasm: uint32323232 xmm13 >>= 8
13555# asm 1: psrld $8,<xmm13=int6464#14
13556# asm 2: psrld $8,<xmm13=%xmm13
13557psrld $8,%xmm13
13558
13559# qhasm: uint32323232 xmm14 >>= 8
13560# asm 1: psrld $8,<xmm14=int6464#15
13561# asm 2: psrld $8,<xmm14=%xmm14
13562psrld $8,%xmm14
13563
13564# qhasm: uint32323232 xmm15 >>= 8
13565# asm 1: psrld $8,<xmm15=int6464#16
13566# asm 2: psrld $8,<xmm15=%xmm15
13567psrld $8,%xmm15
13568
13569# qhasm: xmm0 ^= xmm8
13570# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
13571# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
13572pxor %xmm8,%xmm0
13573
13574# qhasm: xmm1 ^= xmm9
13575# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
13576# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
13577pxor %xmm9,%xmm1
13578
13579# qhasm: xmm3 ^= xmm10
13580# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
13581# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
13582pxor %xmm10,%xmm3
13583
13584# qhasm: xmm2 ^= xmm11
13585# asm 1: pxor <xmm11=int6464#12,<xmm2=int6464#3
13586# asm 2: pxor <xmm11=%xmm11,<xmm2=%xmm2
13587pxor %xmm11,%xmm2
13588
13589# qhasm: xmm6 ^= xmm12
13590# asm 1: pxor <xmm12=int6464#13,<xmm6=int6464#7
13591# asm 2: pxor <xmm12=%xmm12,<xmm6=%xmm6
13592pxor %xmm12,%xmm6
13593
13594# qhasm: xmm5 ^= xmm13
13595# asm 1: pxor <xmm13=int6464#14,<xmm5=int6464#6
13596# asm 2: pxor <xmm13=%xmm13,<xmm5=%xmm5
13597pxor %xmm13,%xmm5
13598
13599# qhasm: xmm4 ^= xmm14
13600# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
13601# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
13602pxor %xmm14,%xmm4
13603
13604# qhasm: xmm7 ^= xmm15
13605# asm 1: pxor <xmm15=int6464#16,<xmm7=int6464#8
13606# asm 2: pxor <xmm15=%xmm15,<xmm7=%xmm7
13607pxor %xmm15,%xmm7
13608
13609# qhasm: shuffle bytes of xmm0 by M0
13610# asm 1: pshufb M0,<xmm0=int6464#1
13611# asm 2: pshufb M0,<xmm0=%xmm0
13612pshufb M0,%xmm0
13613
13614# qhasm: shuffle bytes of xmm1 by M0
13615# asm 1: pshufb M0,<xmm1=int6464#2
13616# asm 2: pshufb M0,<xmm1=%xmm1
13617pshufb M0,%xmm1
13618
13619# qhasm: shuffle bytes of xmm4 by M0
13620# asm 1: pshufb M0,<xmm4=int6464#5
13621# asm 2: pshufb M0,<xmm4=%xmm4
13622pshufb M0,%xmm4
13623
13624# qhasm: shuffle bytes of xmm6 by M0
13625# asm 1: pshufb M0,<xmm6=int6464#7
13626# asm 2: pshufb M0,<xmm6=%xmm6
13627pshufb M0,%xmm6
13628
13629# qhasm: shuffle bytes of xmm3 by M0
13630# asm 1: pshufb M0,<xmm3=int6464#4
13631# asm 2: pshufb M0,<xmm3=%xmm3
13632pshufb M0,%xmm3
13633
13634# qhasm: shuffle bytes of xmm7 by M0
13635# asm 1: pshufb M0,<xmm7=int6464#8
13636# asm 2: pshufb M0,<xmm7=%xmm7
13637pshufb M0,%xmm7
13638
13639# qhasm: shuffle bytes of xmm2 by M0
13640# asm 1: pshufb M0,<xmm2=int6464#3
13641# asm 2: pshufb M0,<xmm2=%xmm2
13642pshufb M0,%xmm2
13643
13644# qhasm: shuffle bytes of xmm5 by M0
13645# asm 1: pshufb M0,<xmm5=int6464#6
13646# asm 2: pshufb M0,<xmm5=%xmm5
13647pshufb M0,%xmm5
13648
13649# qhasm: *(int128 *)(c + 1280) = xmm0
13650# asm 1: movdqa <xmm0=int6464#1,1280(<c=int64#1)
13651# asm 2: movdqa <xmm0=%xmm0,1280(<c=%rdi)
13652movdqa %xmm0,1280(%rdi)
13653
13654# qhasm: *(int128 *)(c + 1296) = xmm1
13655# asm 1: movdqa <xmm1=int6464#2,1296(<c=int64#1)
13656# asm 2: movdqa <xmm1=%xmm1,1296(<c=%rdi)
13657movdqa %xmm1,1296(%rdi)
13658
13659# qhasm: *(int128 *)(c + 1312) = xmm3
13660# asm 1: movdqa <xmm3=int6464#4,1312(<c=int64#1)
13661# asm 2: movdqa <xmm3=%xmm3,1312(<c=%rdi)
13662movdqa %xmm3,1312(%rdi)
13663
13664# qhasm: *(int128 *)(c + 1328) = xmm2
13665# asm 1: movdqa <xmm2=int6464#3,1328(<c=int64#1)
13666# asm 2: movdqa <xmm2=%xmm2,1328(<c=%rdi)
13667movdqa %xmm2,1328(%rdi)
13668
13669# qhasm: *(int128 *)(c + 1344) = xmm6
13670# asm 1: movdqa <xmm6=int6464#7,1344(<c=int64#1)
13671# asm 2: movdqa <xmm6=%xmm6,1344(<c=%rdi)
13672movdqa %xmm6,1344(%rdi)
13673
13674# qhasm: *(int128 *)(c + 1360) = xmm5
13675# asm 1: movdqa <xmm5=int6464#6,1360(<c=int64#1)
13676# asm 2: movdqa <xmm5=%xmm5,1360(<c=%rdi)
13677movdqa %xmm5,1360(%rdi)
13678
13679# qhasm: *(int128 *)(c + 1376) = xmm4
13680# asm 1: movdqa <xmm4=int6464#5,1376(<c=int64#1)
13681# asm 2: movdqa <xmm4=%xmm4,1376(<c=%rdi)
13682movdqa %xmm4,1376(%rdi)
13683
13684# qhasm: *(int128 *)(c + 1392) = xmm7
13685# asm 1: movdqa <xmm7=int6464#8,1392(<c=int64#1)
13686# asm 2: movdqa <xmm7=%xmm7,1392(<c=%rdi)
13687movdqa %xmm7,1392(%rdi)
13688
13689# qhasm: leave
13690add %r11,%rsp
13691mov %rdi,%rax
13692mov %rsi,%rdx
13693xor %rax,%rax
13694ret
diff --git a/nacl/crypto_stream/aes128ctr/core2/stream.c b/nacl/crypto_stream/aes128ctr/core2/stream.c
new file mode 100644
index 00000000..53524a62
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/core2/stream.c
@@ -0,0 +1,14 @@
1#include "crypto_stream.h"
2
3int crypto_stream(
4 unsigned char *out,
5 unsigned long long outlen,
6 const unsigned char *n,
7 const unsigned char *k
8 )
9{
10 unsigned char d[crypto_stream_BEFORENMBYTES];
11 crypto_stream_beforenm(d, k);
12 crypto_stream_afternm(out, outlen, n, d);
13 return 0;
14}
diff --git a/nacl/crypto_stream/aes128ctr/core2/xor.c b/nacl/crypto_stream/aes128ctr/core2/xor.c
new file mode 100644
index 00000000..825088cc
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/core2/xor.c
@@ -0,0 +1,15 @@
1#include "crypto_stream.h"
2
3int crypto_stream_xor(
4 unsigned char *out,
5 const unsigned char *in,
6 unsigned long long inlen,
7 const unsigned char *n,
8 const unsigned char *k
9 )
10{
11 unsigned char d[crypto_stream_BEFORENMBYTES];
12 crypto_stream_beforenm(d, k);
13 crypto_stream_xor_afternm(out, in, inlen, n, d);
14 return 0;
15}
diff --git a/nacl/crypto_stream/aes128ctr/core2/xor_afternm.s b/nacl/crypto_stream/aes128ctr/core2/xor_afternm.s
new file mode 100644
index 00000000..022691a2
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/core2/xor_afternm.s
@@ -0,0 +1,12407 @@
1# Author: Emilia Käsper and Peter Schwabe
2# Date: 2009-03-19
3# +2010.01.31: minor namespace modifications
4# Public domain
5
6.data
7.p2align 6
8
9RCON: .int 0x00000000, 0x00000000, 0x00000000, 0xffffffff
10ROTB: .int 0x0c000000, 0x00000000, 0x04000000, 0x08000000
11EXPB0: .int 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f
12CTRINC1: .int 0x00000001, 0x00000000, 0x00000000, 0x00000000
13CTRINC2: .int 0x00000002, 0x00000000, 0x00000000, 0x00000000
14CTRINC3: .int 0x00000003, 0x00000000, 0x00000000, 0x00000000
15CTRINC4: .int 0x00000004, 0x00000000, 0x00000000, 0x00000000
16CTRINC5: .int 0x00000005, 0x00000000, 0x00000000, 0x00000000
17CTRINC6: .int 0x00000006, 0x00000000, 0x00000000, 0x00000000
18CTRINC7: .int 0x00000007, 0x00000000, 0x00000000, 0x00000000
19RCTRINC1: .int 0x00000000, 0x00000000, 0x00000000, 0x00000001
20RCTRINC2: .int 0x00000000, 0x00000000, 0x00000000, 0x00000002
21RCTRINC3: .int 0x00000000, 0x00000000, 0x00000000, 0x00000003
22RCTRINC4: .int 0x00000000, 0x00000000, 0x00000000, 0x00000004
23RCTRINC5: .int 0x00000000, 0x00000000, 0x00000000, 0x00000005
24RCTRINC6: .int 0x00000000, 0x00000000, 0x00000000, 0x00000006
25RCTRINC7: .int 0x00000000, 0x00000000, 0x00000000, 0x00000007
26
27SWAP32: .int 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
28M0SWAP: .quad 0x0105090d0004080c , 0x03070b0f02060a0e
29
30BS0: .quad 0x5555555555555555, 0x5555555555555555
31BS1: .quad 0x3333333333333333, 0x3333333333333333
32BS2: .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
33ONE: .quad 0xffffffffffffffff, 0xffffffffffffffff
34M0: .quad 0x02060a0e03070b0f, 0x0004080c0105090d
35SRM0: .quad 0x0304090e00050a0f, 0x01060b0c0207080d
36SR: .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
37
38# qhasm: int64 outp
39
40# qhasm: int64 inp
41
42# qhasm: int64 len
43
44# qhasm: int64 np
45
46# qhasm: int64 c
47
48# qhasm: input outp
49
50# qhasm: input inp
51
52# qhasm: input len
53
54# qhasm: input np
55
56# qhasm: input c
57
58# qhasm: int64 lensav
59
60# qhasm: int64 tmp
61
62# qhasm: int6464 xmm0
63
64# qhasm: int6464 xmm1
65
66# qhasm: int6464 xmm2
67
68# qhasm: int6464 xmm3
69
70# qhasm: int6464 xmm4
71
72# qhasm: int6464 xmm5
73
74# qhasm: int6464 xmm6
75
76# qhasm: int6464 xmm7
77
78# qhasm: int6464 xmm8
79
80# qhasm: int6464 xmm9
81
82# qhasm: int6464 xmm10
83
84# qhasm: int6464 xmm11
85
86# qhasm: int6464 xmm12
87
88# qhasm: int6464 xmm13
89
90# qhasm: int6464 xmm14
91
92# qhasm: int6464 xmm15
93
94# qhasm: int6464 t
95
96# qhasm: stack1024 bl
97
98# qhasm: stack128 nonce_stack
99
100# qhasm: int64 blp
101
102# qhasm: int64 b
103
104# qhasm: enter crypto_stream_aes128ctr_core2_xor_afternm
105.text
106.p2align 5
107.globl _crypto_stream_aes128ctr_core2_xor_afternm
108.globl crypto_stream_aes128ctr_core2_xor_afternm
109_crypto_stream_aes128ctr_core2_xor_afternm:
110crypto_stream_aes128ctr_core2_xor_afternm:
111mov %rsp,%r11
112and $31,%r11
113add $160,%r11
114sub %r11,%rsp
115
116# qhasm: xmm0 = *(int128 *) (np + 0)
117# asm 1: movdqa 0(<np=int64#4),>xmm0=int6464#1
118# asm 2: movdqa 0(<np=%rcx),>xmm0=%xmm0
119movdqa 0(%rcx),%xmm0
120
121# qhasm: nonce_stack = xmm0
122# asm 1: movdqa <xmm0=int6464#1,>nonce_stack=stack128#1
123# asm 2: movdqa <xmm0=%xmm0,>nonce_stack=0(%rsp)
124movdqa %xmm0,0(%rsp)
125
126# qhasm: np = &nonce_stack
127# asm 1: leaq <nonce_stack=stack128#1,>np=int64#4
128# asm 2: leaq <nonce_stack=0(%rsp),>np=%rcx
129leaq 0(%rsp),%rcx
130
131# qhasm: enc_block:
132._enc_block:
133
134# qhasm: xmm0 = *(int128 *) (np + 0)
135# asm 1: movdqa 0(<np=int64#4),>xmm0=int6464#1
136# asm 2: movdqa 0(<np=%rcx),>xmm0=%xmm0
137movdqa 0(%rcx),%xmm0
138
139# qhasm: xmm1 = xmm0
140# asm 1: movdqa <xmm0=int6464#1,>xmm1=int6464#2
141# asm 2: movdqa <xmm0=%xmm0,>xmm1=%xmm1
142movdqa %xmm0,%xmm1
143
144# qhasm: shuffle bytes of xmm1 by SWAP32
145# asm 1: pshufb SWAP32,<xmm1=int6464#2
146# asm 2: pshufb SWAP32,<xmm1=%xmm1
147pshufb SWAP32,%xmm1
148
149# qhasm: xmm2 = xmm1
150# asm 1: movdqa <xmm1=int6464#2,>xmm2=int6464#3
151# asm 2: movdqa <xmm1=%xmm1,>xmm2=%xmm2
152movdqa %xmm1,%xmm2
153
154# qhasm: xmm3 = xmm1
155# asm 1: movdqa <xmm1=int6464#2,>xmm3=int6464#4
156# asm 2: movdqa <xmm1=%xmm1,>xmm3=%xmm3
157movdqa %xmm1,%xmm3
158
159# qhasm: xmm4 = xmm1
160# asm 1: movdqa <xmm1=int6464#2,>xmm4=int6464#5
161# asm 2: movdqa <xmm1=%xmm1,>xmm4=%xmm4
162movdqa %xmm1,%xmm4
163
164# qhasm: xmm5 = xmm1
165# asm 1: movdqa <xmm1=int6464#2,>xmm5=int6464#6
166# asm 2: movdqa <xmm1=%xmm1,>xmm5=%xmm5
167movdqa %xmm1,%xmm5
168
169# qhasm: xmm6 = xmm1
170# asm 1: movdqa <xmm1=int6464#2,>xmm6=int6464#7
171# asm 2: movdqa <xmm1=%xmm1,>xmm6=%xmm6
172movdqa %xmm1,%xmm6
173
174# qhasm: xmm7 = xmm1
175# asm 1: movdqa <xmm1=int6464#2,>xmm7=int6464#8
176# asm 2: movdqa <xmm1=%xmm1,>xmm7=%xmm7
177movdqa %xmm1,%xmm7
178
179# qhasm: int32323232 xmm1 += RCTRINC1
180# asm 1: paddd RCTRINC1,<xmm1=int6464#2
181# asm 2: paddd RCTRINC1,<xmm1=%xmm1
182paddd RCTRINC1,%xmm1
183
184# qhasm: int32323232 xmm2 += RCTRINC2
185# asm 1: paddd RCTRINC2,<xmm2=int6464#3
186# asm 2: paddd RCTRINC2,<xmm2=%xmm2
187paddd RCTRINC2,%xmm2
188
189# qhasm: int32323232 xmm3 += RCTRINC3
190# asm 1: paddd RCTRINC3,<xmm3=int6464#4
191# asm 2: paddd RCTRINC3,<xmm3=%xmm3
192paddd RCTRINC3,%xmm3
193
194# qhasm: int32323232 xmm4 += RCTRINC4
195# asm 1: paddd RCTRINC4,<xmm4=int6464#5
196# asm 2: paddd RCTRINC4,<xmm4=%xmm4
197paddd RCTRINC4,%xmm4
198
199# qhasm: int32323232 xmm5 += RCTRINC5
200# asm 1: paddd RCTRINC5,<xmm5=int6464#6
201# asm 2: paddd RCTRINC5,<xmm5=%xmm5
202paddd RCTRINC5,%xmm5
203
204# qhasm: int32323232 xmm6 += RCTRINC6
205# asm 1: paddd RCTRINC6,<xmm6=int6464#7
206# asm 2: paddd RCTRINC6,<xmm6=%xmm6
207paddd RCTRINC6,%xmm6
208
209# qhasm: int32323232 xmm7 += RCTRINC7
210# asm 1: paddd RCTRINC7,<xmm7=int6464#8
211# asm 2: paddd RCTRINC7,<xmm7=%xmm7
212paddd RCTRINC7,%xmm7
213
214# qhasm: shuffle bytes of xmm0 by M0
215# asm 1: pshufb M0,<xmm0=int6464#1
216# asm 2: pshufb M0,<xmm0=%xmm0
217pshufb M0,%xmm0
218
219# qhasm: shuffle bytes of xmm1 by M0SWAP
220# asm 1: pshufb M0SWAP,<xmm1=int6464#2
221# asm 2: pshufb M0SWAP,<xmm1=%xmm1
222pshufb M0SWAP,%xmm1
223
224# qhasm: shuffle bytes of xmm2 by M0SWAP
225# asm 1: pshufb M0SWAP,<xmm2=int6464#3
226# asm 2: pshufb M0SWAP,<xmm2=%xmm2
227pshufb M0SWAP,%xmm2
228
229# qhasm: shuffle bytes of xmm3 by M0SWAP
230# asm 1: pshufb M0SWAP,<xmm3=int6464#4
231# asm 2: pshufb M0SWAP,<xmm3=%xmm3
232pshufb M0SWAP,%xmm3
233
234# qhasm: shuffle bytes of xmm4 by M0SWAP
235# asm 1: pshufb M0SWAP,<xmm4=int6464#5
236# asm 2: pshufb M0SWAP,<xmm4=%xmm4
237pshufb M0SWAP,%xmm4
238
239# qhasm: shuffle bytes of xmm5 by M0SWAP
240# asm 1: pshufb M0SWAP,<xmm5=int6464#6
241# asm 2: pshufb M0SWAP,<xmm5=%xmm5
242pshufb M0SWAP,%xmm5
243
244# qhasm: shuffle bytes of xmm6 by M0SWAP
245# asm 1: pshufb M0SWAP,<xmm6=int6464#7
246# asm 2: pshufb M0SWAP,<xmm6=%xmm6
247pshufb M0SWAP,%xmm6
248
249# qhasm: shuffle bytes of xmm7 by M0SWAP
250# asm 1: pshufb M0SWAP,<xmm7=int6464#8
251# asm 2: pshufb M0SWAP,<xmm7=%xmm7
252pshufb M0SWAP,%xmm7
253
254# qhasm: xmm8 = xmm6
255# asm 1: movdqa <xmm6=int6464#7,>xmm8=int6464#9
256# asm 2: movdqa <xmm6=%xmm6,>xmm8=%xmm8
257movdqa %xmm6,%xmm8
258
259# qhasm: uint6464 xmm8 >>= 1
260# asm 1: psrlq $1,<xmm8=int6464#9
261# asm 2: psrlq $1,<xmm8=%xmm8
262psrlq $1,%xmm8
263
264# qhasm: xmm8 ^= xmm7
265# asm 1: pxor <xmm7=int6464#8,<xmm8=int6464#9
266# asm 2: pxor <xmm7=%xmm7,<xmm8=%xmm8
267pxor %xmm7,%xmm8
268
269# qhasm: xmm8 &= BS0
270# asm 1: pand BS0,<xmm8=int6464#9
271# asm 2: pand BS0,<xmm8=%xmm8
272pand BS0,%xmm8
273
274# qhasm: xmm7 ^= xmm8
275# asm 1: pxor <xmm8=int6464#9,<xmm7=int6464#8
276# asm 2: pxor <xmm8=%xmm8,<xmm7=%xmm7
277pxor %xmm8,%xmm7
278
279# qhasm: uint6464 xmm8 <<= 1
280# asm 1: psllq $1,<xmm8=int6464#9
281# asm 2: psllq $1,<xmm8=%xmm8
282psllq $1,%xmm8
283
284# qhasm: xmm6 ^= xmm8
285# asm 1: pxor <xmm8=int6464#9,<xmm6=int6464#7
286# asm 2: pxor <xmm8=%xmm8,<xmm6=%xmm6
287pxor %xmm8,%xmm6
288
289# qhasm: xmm8 = xmm4
290# asm 1: movdqa <xmm4=int6464#5,>xmm8=int6464#9
291# asm 2: movdqa <xmm4=%xmm4,>xmm8=%xmm8
292movdqa %xmm4,%xmm8
293
294# qhasm: uint6464 xmm8 >>= 1
295# asm 1: psrlq $1,<xmm8=int6464#9
296# asm 2: psrlq $1,<xmm8=%xmm8
297psrlq $1,%xmm8
298
299# qhasm: xmm8 ^= xmm5
300# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
301# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
302pxor %xmm5,%xmm8
303
304# qhasm: xmm8 &= BS0
305# asm 1: pand BS0,<xmm8=int6464#9
306# asm 2: pand BS0,<xmm8=%xmm8
307pand BS0,%xmm8
308
309# qhasm: xmm5 ^= xmm8
310# asm 1: pxor <xmm8=int6464#9,<xmm5=int6464#6
311# asm 2: pxor <xmm8=%xmm8,<xmm5=%xmm5
312pxor %xmm8,%xmm5
313
314# qhasm: uint6464 xmm8 <<= 1
315# asm 1: psllq $1,<xmm8=int6464#9
316# asm 2: psllq $1,<xmm8=%xmm8
317psllq $1,%xmm8
318
319# qhasm: xmm4 ^= xmm8
320# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
321# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
322pxor %xmm8,%xmm4
323
324# qhasm: xmm8 = xmm2
325# asm 1: movdqa <xmm2=int6464#3,>xmm8=int6464#9
326# asm 2: movdqa <xmm2=%xmm2,>xmm8=%xmm8
327movdqa %xmm2,%xmm8
328
329# qhasm: uint6464 xmm8 >>= 1
330# asm 1: psrlq $1,<xmm8=int6464#9
331# asm 2: psrlq $1,<xmm8=%xmm8
332psrlq $1,%xmm8
333
334# qhasm: xmm8 ^= xmm3
335# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#9
336# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm8
337pxor %xmm3,%xmm8
338
339# qhasm: xmm8 &= BS0
340# asm 1: pand BS0,<xmm8=int6464#9
341# asm 2: pand BS0,<xmm8=%xmm8
342pand BS0,%xmm8
343
344# qhasm: xmm3 ^= xmm8
345# asm 1: pxor <xmm8=int6464#9,<xmm3=int6464#4
346# asm 2: pxor <xmm8=%xmm8,<xmm3=%xmm3
347pxor %xmm8,%xmm3
348
349# qhasm: uint6464 xmm8 <<= 1
350# asm 1: psllq $1,<xmm8=int6464#9
351# asm 2: psllq $1,<xmm8=%xmm8
352psllq $1,%xmm8
353
354# qhasm: xmm2 ^= xmm8
355# asm 1: pxor <xmm8=int6464#9,<xmm2=int6464#3
356# asm 2: pxor <xmm8=%xmm8,<xmm2=%xmm2
357pxor %xmm8,%xmm2
358
359# qhasm: xmm8 = xmm0
360# asm 1: movdqa <xmm0=int6464#1,>xmm8=int6464#9
361# asm 2: movdqa <xmm0=%xmm0,>xmm8=%xmm8
362movdqa %xmm0,%xmm8
363
364# qhasm: uint6464 xmm8 >>= 1
365# asm 1: psrlq $1,<xmm8=int6464#9
366# asm 2: psrlq $1,<xmm8=%xmm8
367psrlq $1,%xmm8
368
369# qhasm: xmm8 ^= xmm1
370# asm 1: pxor <xmm1=int6464#2,<xmm8=int6464#9
371# asm 2: pxor <xmm1=%xmm1,<xmm8=%xmm8
372pxor %xmm1,%xmm8
373
374# qhasm: xmm8 &= BS0
375# asm 1: pand BS0,<xmm8=int6464#9
376# asm 2: pand BS0,<xmm8=%xmm8
377pand BS0,%xmm8
378
379# qhasm: xmm1 ^= xmm8
380# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
381# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
382pxor %xmm8,%xmm1
383
384# qhasm: uint6464 xmm8 <<= 1
385# asm 1: psllq $1,<xmm8=int6464#9
386# asm 2: psllq $1,<xmm8=%xmm8
387psllq $1,%xmm8
388
389# qhasm: xmm0 ^= xmm8
390# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
391# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
392pxor %xmm8,%xmm0
393
394# qhasm: xmm8 = xmm5
395# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#9
396# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm8
397movdqa %xmm5,%xmm8
398
399# qhasm: uint6464 xmm8 >>= 2
400# asm 1: psrlq $2,<xmm8=int6464#9
401# asm 2: psrlq $2,<xmm8=%xmm8
402psrlq $2,%xmm8
403
404# qhasm: xmm8 ^= xmm7
405# asm 1: pxor <xmm7=int6464#8,<xmm8=int6464#9
406# asm 2: pxor <xmm7=%xmm7,<xmm8=%xmm8
407pxor %xmm7,%xmm8
408
409# qhasm: xmm8 &= BS1
410# asm 1: pand BS1,<xmm8=int6464#9
411# asm 2: pand BS1,<xmm8=%xmm8
412pand BS1,%xmm8
413
414# qhasm: xmm7 ^= xmm8
415# asm 1: pxor <xmm8=int6464#9,<xmm7=int6464#8
416# asm 2: pxor <xmm8=%xmm8,<xmm7=%xmm7
417pxor %xmm8,%xmm7
418
419# qhasm: uint6464 xmm8 <<= 2
420# asm 1: psllq $2,<xmm8=int6464#9
421# asm 2: psllq $2,<xmm8=%xmm8
422psllq $2,%xmm8
423
424# qhasm: xmm5 ^= xmm8
425# asm 1: pxor <xmm8=int6464#9,<xmm5=int6464#6
426# asm 2: pxor <xmm8=%xmm8,<xmm5=%xmm5
427pxor %xmm8,%xmm5
428
429# qhasm: xmm8 = xmm4
430# asm 1: movdqa <xmm4=int6464#5,>xmm8=int6464#9
431# asm 2: movdqa <xmm4=%xmm4,>xmm8=%xmm8
432movdqa %xmm4,%xmm8
433
434# qhasm: uint6464 xmm8 >>= 2
435# asm 1: psrlq $2,<xmm8=int6464#9
436# asm 2: psrlq $2,<xmm8=%xmm8
437psrlq $2,%xmm8
438
439# qhasm: xmm8 ^= xmm6
440# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#9
441# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm8
442pxor %xmm6,%xmm8
443
444# qhasm: xmm8 &= BS1
445# asm 1: pand BS1,<xmm8=int6464#9
446# asm 2: pand BS1,<xmm8=%xmm8
447pand BS1,%xmm8
448
449# qhasm: xmm6 ^= xmm8
450# asm 1: pxor <xmm8=int6464#9,<xmm6=int6464#7
451# asm 2: pxor <xmm8=%xmm8,<xmm6=%xmm6
452pxor %xmm8,%xmm6
453
454# qhasm: uint6464 xmm8 <<= 2
455# asm 1: psllq $2,<xmm8=int6464#9
456# asm 2: psllq $2,<xmm8=%xmm8
457psllq $2,%xmm8
458
459# qhasm: xmm4 ^= xmm8
460# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
461# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
462pxor %xmm8,%xmm4
463
464# qhasm: xmm8 = xmm1
465# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#9
466# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm8
467movdqa %xmm1,%xmm8
468
469# qhasm: uint6464 xmm8 >>= 2
470# asm 1: psrlq $2,<xmm8=int6464#9
471# asm 2: psrlq $2,<xmm8=%xmm8
472psrlq $2,%xmm8
473
474# qhasm: xmm8 ^= xmm3
475# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#9
476# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm8
477pxor %xmm3,%xmm8
478
479# qhasm: xmm8 &= BS1
480# asm 1: pand BS1,<xmm8=int6464#9
481# asm 2: pand BS1,<xmm8=%xmm8
482pand BS1,%xmm8
483
484# qhasm: xmm3 ^= xmm8
485# asm 1: pxor <xmm8=int6464#9,<xmm3=int6464#4
486# asm 2: pxor <xmm8=%xmm8,<xmm3=%xmm3
487pxor %xmm8,%xmm3
488
489# qhasm: uint6464 xmm8 <<= 2
490# asm 1: psllq $2,<xmm8=int6464#9
491# asm 2: psllq $2,<xmm8=%xmm8
492psllq $2,%xmm8
493
494# qhasm: xmm1 ^= xmm8
495# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
496# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
497pxor %xmm8,%xmm1
498
499# qhasm: xmm8 = xmm0
500# asm 1: movdqa <xmm0=int6464#1,>xmm8=int6464#9
501# asm 2: movdqa <xmm0=%xmm0,>xmm8=%xmm8
502movdqa %xmm0,%xmm8
503
504# qhasm: uint6464 xmm8 >>= 2
505# asm 1: psrlq $2,<xmm8=int6464#9
506# asm 2: psrlq $2,<xmm8=%xmm8
507psrlq $2,%xmm8
508
509# qhasm: xmm8 ^= xmm2
510# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#9
511# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm8
512pxor %xmm2,%xmm8
513
514# qhasm: xmm8 &= BS1
515# asm 1: pand BS1,<xmm8=int6464#9
516# asm 2: pand BS1,<xmm8=%xmm8
517pand BS1,%xmm8
518
519# qhasm: xmm2 ^= xmm8
520# asm 1: pxor <xmm8=int6464#9,<xmm2=int6464#3
521# asm 2: pxor <xmm8=%xmm8,<xmm2=%xmm2
522pxor %xmm8,%xmm2
523
524# qhasm: uint6464 xmm8 <<= 2
525# asm 1: psllq $2,<xmm8=int6464#9
526# asm 2: psllq $2,<xmm8=%xmm8
527psllq $2,%xmm8
528
529# qhasm: xmm0 ^= xmm8
530# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
531# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
532pxor %xmm8,%xmm0
533
534# qhasm: xmm8 = xmm3
535# asm 1: movdqa <xmm3=int6464#4,>xmm8=int6464#9
536# asm 2: movdqa <xmm3=%xmm3,>xmm8=%xmm8
537movdqa %xmm3,%xmm8
538
539# qhasm: uint6464 xmm8 >>= 4
540# asm 1: psrlq $4,<xmm8=int6464#9
541# asm 2: psrlq $4,<xmm8=%xmm8
542psrlq $4,%xmm8
543
544# qhasm: xmm8 ^= xmm7
545# asm 1: pxor <xmm7=int6464#8,<xmm8=int6464#9
546# asm 2: pxor <xmm7=%xmm7,<xmm8=%xmm8
547pxor %xmm7,%xmm8
548
549# qhasm: xmm8 &= BS2
550# asm 1: pand BS2,<xmm8=int6464#9
551# asm 2: pand BS2,<xmm8=%xmm8
552pand BS2,%xmm8
553
554# qhasm: xmm7 ^= xmm8
555# asm 1: pxor <xmm8=int6464#9,<xmm7=int6464#8
556# asm 2: pxor <xmm8=%xmm8,<xmm7=%xmm7
557pxor %xmm8,%xmm7
558
559# qhasm: uint6464 xmm8 <<= 4
560# asm 1: psllq $4,<xmm8=int6464#9
561# asm 2: psllq $4,<xmm8=%xmm8
562psllq $4,%xmm8
563
564# qhasm: xmm3 ^= xmm8
565# asm 1: pxor <xmm8=int6464#9,<xmm3=int6464#4
566# asm 2: pxor <xmm8=%xmm8,<xmm3=%xmm3
567pxor %xmm8,%xmm3
568
569# qhasm: xmm8 = xmm2
570# asm 1: movdqa <xmm2=int6464#3,>xmm8=int6464#9
571# asm 2: movdqa <xmm2=%xmm2,>xmm8=%xmm8
572movdqa %xmm2,%xmm8
573
574# qhasm: uint6464 xmm8 >>= 4
575# asm 1: psrlq $4,<xmm8=int6464#9
576# asm 2: psrlq $4,<xmm8=%xmm8
577psrlq $4,%xmm8
578
579# qhasm: xmm8 ^= xmm6
580# asm 1: pxor <xmm6=int6464#7,<xmm8=int6464#9
581# asm 2: pxor <xmm6=%xmm6,<xmm8=%xmm8
582pxor %xmm6,%xmm8
583
584# qhasm: xmm8 &= BS2
585# asm 1: pand BS2,<xmm8=int6464#9
586# asm 2: pand BS2,<xmm8=%xmm8
587pand BS2,%xmm8
588
589# qhasm: xmm6 ^= xmm8
590# asm 1: pxor <xmm8=int6464#9,<xmm6=int6464#7
591# asm 2: pxor <xmm8=%xmm8,<xmm6=%xmm6
592pxor %xmm8,%xmm6
593
594# qhasm: uint6464 xmm8 <<= 4
595# asm 1: psllq $4,<xmm8=int6464#9
596# asm 2: psllq $4,<xmm8=%xmm8
597psllq $4,%xmm8
598
599# qhasm: xmm2 ^= xmm8
600# asm 1: pxor <xmm8=int6464#9,<xmm2=int6464#3
601# asm 2: pxor <xmm8=%xmm8,<xmm2=%xmm2
602pxor %xmm8,%xmm2
603
604# qhasm: xmm8 = xmm1
605# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#9
606# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm8
607movdqa %xmm1,%xmm8
608
609# qhasm: uint6464 xmm8 >>= 4
610# asm 1: psrlq $4,<xmm8=int6464#9
611# asm 2: psrlq $4,<xmm8=%xmm8
612psrlq $4,%xmm8
613
614# qhasm: xmm8 ^= xmm5
615# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
616# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
617pxor %xmm5,%xmm8
618
619# qhasm: xmm8 &= BS2
620# asm 1: pand BS2,<xmm8=int6464#9
621# asm 2: pand BS2,<xmm8=%xmm8
622pand BS2,%xmm8
623
624# qhasm: xmm5 ^= xmm8
625# asm 1: pxor <xmm8=int6464#9,<xmm5=int6464#6
626# asm 2: pxor <xmm8=%xmm8,<xmm5=%xmm5
627pxor %xmm8,%xmm5
628
629# qhasm: uint6464 xmm8 <<= 4
630# asm 1: psllq $4,<xmm8=int6464#9
631# asm 2: psllq $4,<xmm8=%xmm8
632psllq $4,%xmm8
633
634# qhasm: xmm1 ^= xmm8
635# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
636# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
637pxor %xmm8,%xmm1
638
639# qhasm: xmm8 = xmm0
640# asm 1: movdqa <xmm0=int6464#1,>xmm8=int6464#9
641# asm 2: movdqa <xmm0=%xmm0,>xmm8=%xmm8
642movdqa %xmm0,%xmm8
643
644# qhasm: uint6464 xmm8 >>= 4
645# asm 1: psrlq $4,<xmm8=int6464#9
646# asm 2: psrlq $4,<xmm8=%xmm8
647psrlq $4,%xmm8
648
649# qhasm: xmm8 ^= xmm4
650# asm 1: pxor <xmm4=int6464#5,<xmm8=int6464#9
651# asm 2: pxor <xmm4=%xmm4,<xmm8=%xmm8
652pxor %xmm4,%xmm8
653
654# qhasm: xmm8 &= BS2
655# asm 1: pand BS2,<xmm8=int6464#9
656# asm 2: pand BS2,<xmm8=%xmm8
657pand BS2,%xmm8
658
659# qhasm: xmm4 ^= xmm8
660# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
661# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
662pxor %xmm8,%xmm4
663
664# qhasm: uint6464 xmm8 <<= 4
665# asm 1: psllq $4,<xmm8=int6464#9
666# asm 2: psllq $4,<xmm8=%xmm8
667psllq $4,%xmm8
668
669# qhasm: xmm0 ^= xmm8
670# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
671# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
672pxor %xmm8,%xmm0
673
674# qhasm: xmm0 ^= *(int128 *)(c + 0)
675# asm 1: pxor 0(<c=int64#5),<xmm0=int6464#1
676# asm 2: pxor 0(<c=%r8),<xmm0=%xmm0
677pxor 0(%r8),%xmm0
678
679# qhasm: shuffle bytes of xmm0 by SR
680# asm 1: pshufb SR,<xmm0=int6464#1
681# asm 2: pshufb SR,<xmm0=%xmm0
682pshufb SR,%xmm0
683
684# qhasm: xmm1 ^= *(int128 *)(c + 16)
685# asm 1: pxor 16(<c=int64#5),<xmm1=int6464#2
686# asm 2: pxor 16(<c=%r8),<xmm1=%xmm1
687pxor 16(%r8),%xmm1
688
689# qhasm: shuffle bytes of xmm1 by SR
690# asm 1: pshufb SR,<xmm1=int6464#2
691# asm 2: pshufb SR,<xmm1=%xmm1
692pshufb SR,%xmm1
693
694# qhasm: xmm2 ^= *(int128 *)(c + 32)
695# asm 1: pxor 32(<c=int64#5),<xmm2=int6464#3
696# asm 2: pxor 32(<c=%r8),<xmm2=%xmm2
697pxor 32(%r8),%xmm2
698
699# qhasm: shuffle bytes of xmm2 by SR
700# asm 1: pshufb SR,<xmm2=int6464#3
701# asm 2: pshufb SR,<xmm2=%xmm2
702pshufb SR,%xmm2
703
704# qhasm: xmm3 ^= *(int128 *)(c + 48)
705# asm 1: pxor 48(<c=int64#5),<xmm3=int6464#4
706# asm 2: pxor 48(<c=%r8),<xmm3=%xmm3
707pxor 48(%r8),%xmm3
708
709# qhasm: shuffle bytes of xmm3 by SR
710# asm 1: pshufb SR,<xmm3=int6464#4
711# asm 2: pshufb SR,<xmm3=%xmm3
712pshufb SR,%xmm3
713
714# qhasm: xmm4 ^= *(int128 *)(c + 64)
715# asm 1: pxor 64(<c=int64#5),<xmm4=int6464#5
716# asm 2: pxor 64(<c=%r8),<xmm4=%xmm4
717pxor 64(%r8),%xmm4
718
719# qhasm: shuffle bytes of xmm4 by SR
720# asm 1: pshufb SR,<xmm4=int6464#5
721# asm 2: pshufb SR,<xmm4=%xmm4
722pshufb SR,%xmm4
723
724# qhasm: xmm5 ^= *(int128 *)(c + 80)
725# asm 1: pxor 80(<c=int64#5),<xmm5=int6464#6
726# asm 2: pxor 80(<c=%r8),<xmm5=%xmm5
727pxor 80(%r8),%xmm5
728
729# qhasm: shuffle bytes of xmm5 by SR
730# asm 1: pshufb SR,<xmm5=int6464#6
731# asm 2: pshufb SR,<xmm5=%xmm5
732pshufb SR,%xmm5
733
734# qhasm: xmm6 ^= *(int128 *)(c + 96)
735# asm 1: pxor 96(<c=int64#5),<xmm6=int6464#7
736# asm 2: pxor 96(<c=%r8),<xmm6=%xmm6
737pxor 96(%r8),%xmm6
738
739# qhasm: shuffle bytes of xmm6 by SR
740# asm 1: pshufb SR,<xmm6=int6464#7
741# asm 2: pshufb SR,<xmm6=%xmm6
742pshufb SR,%xmm6
743
744# qhasm: xmm7 ^= *(int128 *)(c + 112)
745# asm 1: pxor 112(<c=int64#5),<xmm7=int6464#8
746# asm 2: pxor 112(<c=%r8),<xmm7=%xmm7
747pxor 112(%r8),%xmm7
748
749# qhasm: shuffle bytes of xmm7 by SR
750# asm 1: pshufb SR,<xmm7=int6464#8
751# asm 2: pshufb SR,<xmm7=%xmm7
752pshufb SR,%xmm7
753
754# qhasm: xmm5 ^= xmm6
755# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
756# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
757pxor %xmm6,%xmm5
758
759# qhasm: xmm2 ^= xmm1
760# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
761# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
762pxor %xmm1,%xmm2
763
764# qhasm: xmm5 ^= xmm0
765# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
766# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
767pxor %xmm0,%xmm5
768
769# qhasm: xmm6 ^= xmm2
770# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
771# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
772pxor %xmm2,%xmm6
773
774# qhasm: xmm3 ^= xmm0
775# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
776# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
777pxor %xmm0,%xmm3
778
779# qhasm: xmm6 ^= xmm3
780# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
781# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
782pxor %xmm3,%xmm6
783
784# qhasm: xmm3 ^= xmm7
785# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
786# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
787pxor %xmm7,%xmm3
788
789# qhasm: xmm3 ^= xmm4
790# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
791# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
792pxor %xmm4,%xmm3
793
794# qhasm: xmm7 ^= xmm5
795# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
796# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
797pxor %xmm5,%xmm7
798
799# qhasm: xmm3 ^= xmm1
800# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
801# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
802pxor %xmm1,%xmm3
803
804# qhasm: xmm4 ^= xmm5
805# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
806# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
807pxor %xmm5,%xmm4
808
809# qhasm: xmm2 ^= xmm7
810# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
811# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
812pxor %xmm7,%xmm2
813
814# qhasm: xmm1 ^= xmm5
815# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
816# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
817pxor %xmm5,%xmm1
818
819# qhasm: xmm11 = xmm7
820# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
821# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
822movdqa %xmm7,%xmm8
823
824# qhasm: xmm10 = xmm1
825# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
826# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
827movdqa %xmm1,%xmm9
828
829# qhasm: xmm9 = xmm5
830# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
831# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
832movdqa %xmm5,%xmm10
833
834# qhasm: xmm13 = xmm2
835# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
836# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
837movdqa %xmm2,%xmm11
838
839# qhasm: xmm12 = xmm6
840# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
841# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
842movdqa %xmm6,%xmm12
843
844# qhasm: xmm11 ^= xmm4
845# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
846# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
847pxor %xmm4,%xmm8
848
849# qhasm: xmm10 ^= xmm2
850# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
851# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
852pxor %xmm2,%xmm9
853
854# qhasm: xmm9 ^= xmm3
855# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
856# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
857pxor %xmm3,%xmm10
858
859# qhasm: xmm13 ^= xmm4
860# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
861# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
862pxor %xmm4,%xmm11
863
864# qhasm: xmm12 ^= xmm0
865# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
866# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
867pxor %xmm0,%xmm12
868
869# qhasm: xmm14 = xmm11
870# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
871# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
872movdqa %xmm8,%xmm13
873
874# qhasm: xmm8 = xmm10
875# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
876# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
877movdqa %xmm9,%xmm14
878
879# qhasm: xmm15 = xmm11
880# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
881# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
882movdqa %xmm8,%xmm15
883
884# qhasm: xmm10 |= xmm9
885# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
886# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
887por %xmm10,%xmm9
888
889# qhasm: xmm11 |= xmm12
890# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
891# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
892por %xmm12,%xmm8
893
894# qhasm: xmm15 ^= xmm8
895# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
896# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
897pxor %xmm14,%xmm15
898
899# qhasm: xmm14 &= xmm12
900# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
901# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
902pand %xmm12,%xmm13
903
904# qhasm: xmm8 &= xmm9
905# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
906# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
907pand %xmm10,%xmm14
908
909# qhasm: xmm12 ^= xmm9
910# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
911# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
912pxor %xmm10,%xmm12
913
914# qhasm: xmm15 &= xmm12
915# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
916# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
917pand %xmm12,%xmm15
918
919# qhasm: xmm12 = xmm3
920# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
921# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
922movdqa %xmm3,%xmm10
923
924# qhasm: xmm12 ^= xmm0
925# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
926# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
927pxor %xmm0,%xmm10
928
929# qhasm: xmm13 &= xmm12
930# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
931# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
932pand %xmm10,%xmm11
933
934# qhasm: xmm11 ^= xmm13
935# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
936# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
937pxor %xmm11,%xmm8
938
939# qhasm: xmm10 ^= xmm13
940# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
941# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
942pxor %xmm11,%xmm9
943
944# qhasm: xmm13 = xmm7
945# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
946# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
947movdqa %xmm7,%xmm10
948
949# qhasm: xmm13 ^= xmm1
950# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
951# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
952pxor %xmm1,%xmm10
953
954# qhasm: xmm12 = xmm5
955# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
956# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
957movdqa %xmm5,%xmm11
958
959# qhasm: xmm9 = xmm13
960# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
961# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
962movdqa %xmm10,%xmm12
963
964# qhasm: xmm12 ^= xmm6
965# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
966# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
967pxor %xmm6,%xmm11
968
969# qhasm: xmm9 |= xmm12
970# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
971# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
972por %xmm11,%xmm12
973
974# qhasm: xmm13 &= xmm12
975# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
976# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
977pand %xmm11,%xmm10
978
979# qhasm: xmm8 ^= xmm13
980# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
981# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
982pxor %xmm10,%xmm14
983
984# qhasm: xmm11 ^= xmm15
985# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
986# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
987pxor %xmm15,%xmm8
988
989# qhasm: xmm10 ^= xmm14
990# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
991# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
992pxor %xmm13,%xmm9
993
994# qhasm: xmm9 ^= xmm15
995# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
996# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
997pxor %xmm15,%xmm12
998
999# qhasm: xmm8 ^= xmm14
1000# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
1001# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
1002pxor %xmm13,%xmm14
1003
1004# qhasm: xmm9 ^= xmm14
1005# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
1006# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
1007pxor %xmm13,%xmm12
1008
1009# qhasm: xmm12 = xmm2
1010# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
1011# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
1012movdqa %xmm2,%xmm10
1013
1014# qhasm: xmm13 = xmm4
1015# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
1016# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
1017movdqa %xmm4,%xmm11
1018
1019# qhasm: xmm14 = xmm1
1020# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
1021# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
1022movdqa %xmm1,%xmm13
1023
1024# qhasm: xmm15 = xmm7
1025# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
1026# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
1027movdqa %xmm7,%xmm15
1028
1029# qhasm: xmm12 &= xmm3
1030# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
1031# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
1032pand %xmm3,%xmm10
1033
1034# qhasm: xmm13 &= xmm0
1035# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
1036# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
1037pand %xmm0,%xmm11
1038
1039# qhasm: xmm14 &= xmm5
1040# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
1041# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
1042pand %xmm5,%xmm13
1043
1044# qhasm: xmm15 |= xmm6
1045# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
1046# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
1047por %xmm6,%xmm15
1048
1049# qhasm: xmm11 ^= xmm12
1050# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
1051# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
1052pxor %xmm10,%xmm8
1053
1054# qhasm: xmm10 ^= xmm13
1055# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
1056# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
1057pxor %xmm11,%xmm9
1058
1059# qhasm: xmm9 ^= xmm14
1060# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
1061# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
1062pxor %xmm13,%xmm12
1063
1064# qhasm: xmm8 ^= xmm15
1065# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
1066# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
1067pxor %xmm15,%xmm14
1068
1069# qhasm: xmm12 = xmm11
1070# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
1071# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
1072movdqa %xmm8,%xmm10
1073
1074# qhasm: xmm12 ^= xmm10
1075# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
1076# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
1077pxor %xmm9,%xmm10
1078
1079# qhasm: xmm11 &= xmm9
1080# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
1081# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
1082pand %xmm12,%xmm8
1083
1084# qhasm: xmm14 = xmm8
1085# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
1086# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
1087movdqa %xmm14,%xmm11
1088
1089# qhasm: xmm14 ^= xmm11
1090# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
1091# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
1092pxor %xmm8,%xmm11
1093
1094# qhasm: xmm15 = xmm12
1095# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
1096# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
1097movdqa %xmm10,%xmm13
1098
1099# qhasm: xmm15 &= xmm14
1100# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
1101# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
1102pand %xmm11,%xmm13
1103
1104# qhasm: xmm15 ^= xmm10
1105# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
1106# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
1107pxor %xmm9,%xmm13
1108
1109# qhasm: xmm13 = xmm9
1110# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
1111# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
1112movdqa %xmm12,%xmm15
1113
1114# qhasm: xmm13 ^= xmm8
1115# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
1116# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
1117pxor %xmm14,%xmm15
1118
1119# qhasm: xmm11 ^= xmm10
1120# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
1121# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
1122pxor %xmm9,%xmm8
1123
1124# qhasm: xmm13 &= xmm11
1125# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
1126# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
1127pand %xmm8,%xmm15
1128
1129# qhasm: xmm13 ^= xmm8
1130# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
1131# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
1132pxor %xmm14,%xmm15
1133
1134# qhasm: xmm9 ^= xmm13
1135# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
1136# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
1137pxor %xmm15,%xmm12
1138
1139# qhasm: xmm10 = xmm14
1140# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
1141# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
1142movdqa %xmm11,%xmm8
1143
1144# qhasm: xmm10 ^= xmm13
1145# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
1146# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
1147pxor %xmm15,%xmm8
1148
1149# qhasm: xmm10 &= xmm8
1150# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
1151# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
1152pand %xmm14,%xmm8
1153
1154# qhasm: xmm9 ^= xmm10
1155# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
1156# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
1157pxor %xmm8,%xmm12
1158
1159# qhasm: xmm14 ^= xmm10
1160# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
1161# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
1162pxor %xmm8,%xmm11
1163
1164# qhasm: xmm14 &= xmm15
1165# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
1166# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
1167pand %xmm13,%xmm11
1168
1169# qhasm: xmm14 ^= xmm12
1170# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
1171# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
1172pxor %xmm10,%xmm11
1173
1174# qhasm: xmm12 = xmm6
1175# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
1176# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
1177movdqa %xmm6,%xmm8
1178
1179# qhasm: xmm8 = xmm5
1180# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
1181# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
1182movdqa %xmm5,%xmm9
1183
1184# qhasm: xmm10 = xmm15
1185# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
1186# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
1187movdqa %xmm13,%xmm10
1188
1189# qhasm: xmm10 ^= xmm14
1190# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
1191# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
1192pxor %xmm11,%xmm10
1193
1194# qhasm: xmm10 &= xmm6
1195# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
1196# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
1197pand %xmm6,%xmm10
1198
1199# qhasm: xmm6 ^= xmm5
1200# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
1201# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
1202pxor %xmm5,%xmm6
1203
1204# qhasm: xmm6 &= xmm14
1205# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
1206# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
1207pand %xmm11,%xmm6
1208
1209# qhasm: xmm5 &= xmm15
1210# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
1211# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
1212pand %xmm13,%xmm5
1213
1214# qhasm: xmm6 ^= xmm5
1215# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
1216# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
1217pxor %xmm5,%xmm6
1218
1219# qhasm: xmm5 ^= xmm10
1220# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
1221# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
1222pxor %xmm10,%xmm5
1223
1224# qhasm: xmm12 ^= xmm0
1225# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
1226# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
1227pxor %xmm0,%xmm8
1228
1229# qhasm: xmm8 ^= xmm3
1230# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
1231# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
1232pxor %xmm3,%xmm9
1233
1234# qhasm: xmm15 ^= xmm13
1235# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
1236# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
1237pxor %xmm15,%xmm13
1238
1239# qhasm: xmm14 ^= xmm9
1240# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
1241# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
1242pxor %xmm12,%xmm11
1243
1244# qhasm: xmm11 = xmm15
1245# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1246# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1247movdqa %xmm13,%xmm10
1248
1249# qhasm: xmm11 ^= xmm14
1250# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1251# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1252pxor %xmm11,%xmm10
1253
1254# qhasm: xmm11 &= xmm12
1255# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
1256# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
1257pand %xmm8,%xmm10
1258
1259# qhasm: xmm12 ^= xmm8
1260# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
1261# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
1262pxor %xmm9,%xmm8
1263
1264# qhasm: xmm12 &= xmm14
1265# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
1266# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
1267pand %xmm11,%xmm8
1268
1269# qhasm: xmm8 &= xmm15
1270# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
1271# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
1272pand %xmm13,%xmm9
1273
1274# qhasm: xmm8 ^= xmm12
1275# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
1276# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
1277pxor %xmm8,%xmm9
1278
1279# qhasm: xmm12 ^= xmm11
1280# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
1281# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
1282pxor %xmm10,%xmm8
1283
1284# qhasm: xmm10 = xmm13
1285# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
1286# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
1287movdqa %xmm15,%xmm10
1288
1289# qhasm: xmm10 ^= xmm9
1290# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
1291# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
1292pxor %xmm12,%xmm10
1293
1294# qhasm: xmm10 &= xmm0
1295# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
1296# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
1297pand %xmm0,%xmm10
1298
1299# qhasm: xmm0 ^= xmm3
1300# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
1301# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
1302pxor %xmm3,%xmm0
1303
1304# qhasm: xmm0 &= xmm9
1305# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
1306# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
1307pand %xmm12,%xmm0
1308
1309# qhasm: xmm3 &= xmm13
1310# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
1311# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
1312pand %xmm15,%xmm3
1313
1314# qhasm: xmm0 ^= xmm3
1315# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
1316# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
1317pxor %xmm3,%xmm0
1318
1319# qhasm: xmm3 ^= xmm10
1320# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
1321# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
1322pxor %xmm10,%xmm3
1323
1324# qhasm: xmm6 ^= xmm12
1325# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
1326# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
1327pxor %xmm8,%xmm6
1328
1329# qhasm: xmm0 ^= xmm12
1330# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
1331# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
1332pxor %xmm8,%xmm0
1333
1334# qhasm: xmm5 ^= xmm8
1335# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
1336# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
1337pxor %xmm9,%xmm5
1338
1339# qhasm: xmm3 ^= xmm8
1340# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
1341# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
1342pxor %xmm9,%xmm3
1343
1344# qhasm: xmm12 = xmm7
1345# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
1346# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
1347movdqa %xmm7,%xmm8
1348
1349# qhasm: xmm8 = xmm1
1350# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
1351# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
1352movdqa %xmm1,%xmm9
1353
1354# qhasm: xmm12 ^= xmm4
1355# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
1356# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
1357pxor %xmm4,%xmm8
1358
1359# qhasm: xmm8 ^= xmm2
1360# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
1361# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
1362pxor %xmm2,%xmm9
1363
1364# qhasm: xmm11 = xmm15
1365# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1366# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1367movdqa %xmm13,%xmm10
1368
1369# qhasm: xmm11 ^= xmm14
1370# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1371# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1372pxor %xmm11,%xmm10
1373
1374# qhasm: xmm11 &= xmm12
1375# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
1376# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
1377pand %xmm8,%xmm10
1378
1379# qhasm: xmm12 ^= xmm8
1380# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
1381# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
1382pxor %xmm9,%xmm8
1383
1384# qhasm: xmm12 &= xmm14
1385# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
1386# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
1387pand %xmm11,%xmm8
1388
1389# qhasm: xmm8 &= xmm15
1390# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
1391# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
1392pand %xmm13,%xmm9
1393
1394# qhasm: xmm8 ^= xmm12
1395# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
1396# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
1397pxor %xmm8,%xmm9
1398
1399# qhasm: xmm12 ^= xmm11
1400# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
1401# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
1402pxor %xmm10,%xmm8
1403
1404# qhasm: xmm10 = xmm13
1405# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
1406# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
1407movdqa %xmm15,%xmm10
1408
1409# qhasm: xmm10 ^= xmm9
1410# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
1411# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
1412pxor %xmm12,%xmm10
1413
1414# qhasm: xmm10 &= xmm4
1415# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
1416# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
1417pand %xmm4,%xmm10
1418
1419# qhasm: xmm4 ^= xmm2
1420# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
1421# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
1422pxor %xmm2,%xmm4
1423
1424# qhasm: xmm4 &= xmm9
1425# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
1426# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
1427pand %xmm12,%xmm4
1428
1429# qhasm: xmm2 &= xmm13
1430# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
1431# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
1432pand %xmm15,%xmm2
1433
1434# qhasm: xmm4 ^= xmm2
1435# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
1436# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
1437pxor %xmm2,%xmm4
1438
1439# qhasm: xmm2 ^= xmm10
1440# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
1441# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
1442pxor %xmm10,%xmm2
1443
1444# qhasm: xmm15 ^= xmm13
1445# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
1446# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
1447pxor %xmm15,%xmm13
1448
1449# qhasm: xmm14 ^= xmm9
1450# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
1451# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
1452pxor %xmm12,%xmm11
1453
1454# qhasm: xmm11 = xmm15
1455# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
1456# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
1457movdqa %xmm13,%xmm10
1458
1459# qhasm: xmm11 ^= xmm14
1460# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
1461# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
1462pxor %xmm11,%xmm10
1463
1464# qhasm: xmm11 &= xmm7
1465# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
1466# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
1467pand %xmm7,%xmm10
1468
1469# qhasm: xmm7 ^= xmm1
1470# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
1471# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
1472pxor %xmm1,%xmm7
1473
1474# qhasm: xmm7 &= xmm14
1475# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
1476# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
1477pand %xmm11,%xmm7
1478
1479# qhasm: xmm1 &= xmm15
1480# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
1481# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
1482pand %xmm13,%xmm1
1483
1484# qhasm: xmm7 ^= xmm1
1485# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
1486# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
1487pxor %xmm1,%xmm7
1488
1489# qhasm: xmm1 ^= xmm11
1490# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
1491# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
1492pxor %xmm10,%xmm1
1493
1494# qhasm: xmm7 ^= xmm12
1495# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
1496# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
1497pxor %xmm8,%xmm7
1498
1499# qhasm: xmm4 ^= xmm12
1500# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
1501# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
1502pxor %xmm8,%xmm4
1503
1504# qhasm: xmm1 ^= xmm8
1505# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
1506# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
1507pxor %xmm9,%xmm1
1508
1509# qhasm: xmm2 ^= xmm8
1510# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
1511# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
1512pxor %xmm9,%xmm2
1513
1514# qhasm: xmm7 ^= xmm0
1515# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
1516# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
1517pxor %xmm0,%xmm7
1518
1519# qhasm: xmm1 ^= xmm6
1520# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
1521# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
1522pxor %xmm6,%xmm1
1523
1524# qhasm: xmm4 ^= xmm7
1525# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
1526# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
1527pxor %xmm7,%xmm4
1528
1529# qhasm: xmm6 ^= xmm0
1530# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
1531# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
1532pxor %xmm0,%xmm6
1533
1534# qhasm: xmm0 ^= xmm1
1535# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
1536# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
1537pxor %xmm1,%xmm0
1538
1539# qhasm: xmm1 ^= xmm5
1540# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
1541# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
1542pxor %xmm5,%xmm1
1543
1544# qhasm: xmm5 ^= xmm2
1545# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
1546# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
1547pxor %xmm2,%xmm5
1548
1549# qhasm: xmm4 ^= xmm5
1550# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
1551# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
1552pxor %xmm5,%xmm4
1553
1554# qhasm: xmm2 ^= xmm3
1555# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
1556# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
1557pxor %xmm3,%xmm2
1558
1559# qhasm: xmm3 ^= xmm5
1560# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
1561# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
1562pxor %xmm5,%xmm3
1563
1564# qhasm: xmm6 ^= xmm3
1565# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
1566# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
1567pxor %xmm3,%xmm6
1568
1569# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
1570# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
1571# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
1572pshufd $0x93,%xmm0,%xmm8
1573
1574# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
1575# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
1576# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
1577pshufd $0x93,%xmm1,%xmm9
1578
1579# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
1580# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
1581# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
1582pshufd $0x93,%xmm4,%xmm10
1583
1584# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
1585# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
1586# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
1587pshufd $0x93,%xmm6,%xmm11
1588
1589# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
1590# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
1591# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
1592pshufd $0x93,%xmm3,%xmm12
1593
1594# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
1595# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
1596# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
1597pshufd $0x93,%xmm7,%xmm13
1598
1599# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
1600# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
1601# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
1602pshufd $0x93,%xmm2,%xmm14
1603
1604# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
1605# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
1606# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
1607pshufd $0x93,%xmm5,%xmm15
1608
1609# qhasm: xmm0 ^= xmm8
1610# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
1611# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
1612pxor %xmm8,%xmm0
1613
1614# qhasm: xmm1 ^= xmm9
1615# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
1616# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
1617pxor %xmm9,%xmm1
1618
1619# qhasm: xmm4 ^= xmm10
1620# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
1621# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
1622pxor %xmm10,%xmm4
1623
1624# qhasm: xmm6 ^= xmm11
1625# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
1626# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
1627pxor %xmm11,%xmm6
1628
1629# qhasm: xmm3 ^= xmm12
1630# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
1631# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
1632pxor %xmm12,%xmm3
1633
1634# qhasm: xmm7 ^= xmm13
1635# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
1636# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
1637pxor %xmm13,%xmm7
1638
1639# qhasm: xmm2 ^= xmm14
1640# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
1641# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
1642pxor %xmm14,%xmm2
1643
1644# qhasm: xmm5 ^= xmm15
1645# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
1646# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
1647pxor %xmm15,%xmm5
1648
1649# qhasm: xmm8 ^= xmm5
1650# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
1651# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
1652pxor %xmm5,%xmm8
1653
1654# qhasm: xmm9 ^= xmm0
1655# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
1656# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
1657pxor %xmm0,%xmm9
1658
1659# qhasm: xmm10 ^= xmm1
1660# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
1661# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
1662pxor %xmm1,%xmm10
1663
1664# qhasm: xmm9 ^= xmm5
1665# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
1666# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
1667pxor %xmm5,%xmm9
1668
1669# qhasm: xmm11 ^= xmm4
1670# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
1671# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
1672pxor %xmm4,%xmm11
1673
1674# qhasm: xmm12 ^= xmm6
1675# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
1676# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
1677pxor %xmm6,%xmm12
1678
1679# qhasm: xmm13 ^= xmm3
1680# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
1681# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
1682pxor %xmm3,%xmm13
1683
1684# qhasm: xmm11 ^= xmm5
1685# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
1686# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
1687pxor %xmm5,%xmm11
1688
1689# qhasm: xmm14 ^= xmm7
1690# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
1691# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
1692pxor %xmm7,%xmm14
1693
1694# qhasm: xmm15 ^= xmm2
1695# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
1696# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
1697pxor %xmm2,%xmm15
1698
1699# qhasm: xmm12 ^= xmm5
1700# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
1701# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
1702pxor %xmm5,%xmm12
1703
1704# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
1705# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
1706# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
1707pshufd $0x4E,%xmm0,%xmm0
1708
1709# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
1710# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
1711# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
1712pshufd $0x4E,%xmm1,%xmm1
1713
1714# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
1715# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
1716# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
1717pshufd $0x4E,%xmm4,%xmm4
1718
1719# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
1720# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
1721# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
1722pshufd $0x4E,%xmm6,%xmm6
1723
1724# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
1725# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
1726# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
1727pshufd $0x4E,%xmm3,%xmm3
1728
1729# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
1730# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
1731# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
1732pshufd $0x4E,%xmm7,%xmm7
1733
1734# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
1735# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
1736# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
1737pshufd $0x4E,%xmm2,%xmm2
1738
1739# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
1740# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
1741# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
1742pshufd $0x4E,%xmm5,%xmm5
1743
1744# qhasm: xmm8 ^= xmm0
1745# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
1746# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
1747pxor %xmm0,%xmm8
1748
1749# qhasm: xmm9 ^= xmm1
1750# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
1751# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
1752pxor %xmm1,%xmm9
1753
1754# qhasm: xmm10 ^= xmm4
1755# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
1756# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
1757pxor %xmm4,%xmm10
1758
1759# qhasm: xmm11 ^= xmm6
1760# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
1761# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
1762pxor %xmm6,%xmm11
1763
1764# qhasm: xmm12 ^= xmm3
1765# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
1766# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
1767pxor %xmm3,%xmm12
1768
1769# qhasm: xmm13 ^= xmm7
1770# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
1771# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
1772pxor %xmm7,%xmm13
1773
1774# qhasm: xmm14 ^= xmm2
1775# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
1776# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
1777pxor %xmm2,%xmm14
1778
1779# qhasm: xmm15 ^= xmm5
1780# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
1781# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
1782pxor %xmm5,%xmm15
1783
1784# qhasm: xmm8 ^= *(int128 *)(c + 128)
1785# asm 1: pxor 128(<c=int64#5),<xmm8=int6464#9
1786# asm 2: pxor 128(<c=%r8),<xmm8=%xmm8
1787pxor 128(%r8),%xmm8
1788
1789# qhasm: shuffle bytes of xmm8 by SR
1790# asm 1: pshufb SR,<xmm8=int6464#9
1791# asm 2: pshufb SR,<xmm8=%xmm8
1792pshufb SR,%xmm8
1793
1794# qhasm: xmm9 ^= *(int128 *)(c + 144)
1795# asm 1: pxor 144(<c=int64#5),<xmm9=int6464#10
1796# asm 2: pxor 144(<c=%r8),<xmm9=%xmm9
1797pxor 144(%r8),%xmm9
1798
1799# qhasm: shuffle bytes of xmm9 by SR
1800# asm 1: pshufb SR,<xmm9=int6464#10
1801# asm 2: pshufb SR,<xmm9=%xmm9
1802pshufb SR,%xmm9
1803
1804# qhasm: xmm10 ^= *(int128 *)(c + 160)
1805# asm 1: pxor 160(<c=int64#5),<xmm10=int6464#11
1806# asm 2: pxor 160(<c=%r8),<xmm10=%xmm10
1807pxor 160(%r8),%xmm10
1808
1809# qhasm: shuffle bytes of xmm10 by SR
1810# asm 1: pshufb SR,<xmm10=int6464#11
1811# asm 2: pshufb SR,<xmm10=%xmm10
1812pshufb SR,%xmm10
1813
1814# qhasm: xmm11 ^= *(int128 *)(c + 176)
1815# asm 1: pxor 176(<c=int64#5),<xmm11=int6464#12
1816# asm 2: pxor 176(<c=%r8),<xmm11=%xmm11
1817pxor 176(%r8),%xmm11
1818
1819# qhasm: shuffle bytes of xmm11 by SR
1820# asm 1: pshufb SR,<xmm11=int6464#12
1821# asm 2: pshufb SR,<xmm11=%xmm11
1822pshufb SR,%xmm11
1823
1824# qhasm: xmm12 ^= *(int128 *)(c + 192)
1825# asm 1: pxor 192(<c=int64#5),<xmm12=int6464#13
1826# asm 2: pxor 192(<c=%r8),<xmm12=%xmm12
1827pxor 192(%r8),%xmm12
1828
1829# qhasm: shuffle bytes of xmm12 by SR
1830# asm 1: pshufb SR,<xmm12=int6464#13
1831# asm 2: pshufb SR,<xmm12=%xmm12
1832pshufb SR,%xmm12
1833
1834# qhasm: xmm13 ^= *(int128 *)(c + 208)
1835# asm 1: pxor 208(<c=int64#5),<xmm13=int6464#14
1836# asm 2: pxor 208(<c=%r8),<xmm13=%xmm13
1837pxor 208(%r8),%xmm13
1838
1839# qhasm: shuffle bytes of xmm13 by SR
1840# asm 1: pshufb SR,<xmm13=int6464#14
1841# asm 2: pshufb SR,<xmm13=%xmm13
1842pshufb SR,%xmm13
1843
1844# qhasm: xmm14 ^= *(int128 *)(c + 224)
1845# asm 1: pxor 224(<c=int64#5),<xmm14=int6464#15
1846# asm 2: pxor 224(<c=%r8),<xmm14=%xmm14
1847pxor 224(%r8),%xmm14
1848
1849# qhasm: shuffle bytes of xmm14 by SR
1850# asm 1: pshufb SR,<xmm14=int6464#15
1851# asm 2: pshufb SR,<xmm14=%xmm14
1852pshufb SR,%xmm14
1853
1854# qhasm: xmm15 ^= *(int128 *)(c + 240)
1855# asm 1: pxor 240(<c=int64#5),<xmm15=int6464#16
1856# asm 2: pxor 240(<c=%r8),<xmm15=%xmm15
1857pxor 240(%r8),%xmm15
1858
1859# qhasm: shuffle bytes of xmm15 by SR
1860# asm 1: pshufb SR,<xmm15=int6464#16
1861# asm 2: pshufb SR,<xmm15=%xmm15
1862pshufb SR,%xmm15
1863
1864# qhasm: xmm13 ^= xmm14
1865# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
1866# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
1867pxor %xmm14,%xmm13
1868
1869# qhasm: xmm10 ^= xmm9
1870# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
1871# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
1872pxor %xmm9,%xmm10
1873
1874# qhasm: xmm13 ^= xmm8
1875# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
1876# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
1877pxor %xmm8,%xmm13
1878
1879# qhasm: xmm14 ^= xmm10
1880# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
1881# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
1882pxor %xmm10,%xmm14
1883
1884# qhasm: xmm11 ^= xmm8
1885# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
1886# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
1887pxor %xmm8,%xmm11
1888
1889# qhasm: xmm14 ^= xmm11
1890# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
1891# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
1892pxor %xmm11,%xmm14
1893
1894# qhasm: xmm11 ^= xmm15
1895# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
1896# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
1897pxor %xmm15,%xmm11
1898
1899# qhasm: xmm11 ^= xmm12
1900# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
1901# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
1902pxor %xmm12,%xmm11
1903
1904# qhasm: xmm15 ^= xmm13
1905# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
1906# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
1907pxor %xmm13,%xmm15
1908
1909# qhasm: xmm11 ^= xmm9
1910# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
1911# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
1912pxor %xmm9,%xmm11
1913
1914# qhasm: xmm12 ^= xmm13
1915# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
1916# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
1917pxor %xmm13,%xmm12
1918
1919# qhasm: xmm10 ^= xmm15
1920# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
1921# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
1922pxor %xmm15,%xmm10
1923
1924# qhasm: xmm9 ^= xmm13
1925# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
1926# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
1927pxor %xmm13,%xmm9
1928
1929# qhasm: xmm3 = xmm15
1930# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
1931# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
1932movdqa %xmm15,%xmm0
1933
1934# qhasm: xmm2 = xmm9
1935# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
1936# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
1937movdqa %xmm9,%xmm1
1938
1939# qhasm: xmm1 = xmm13
1940# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
1941# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
1942movdqa %xmm13,%xmm2
1943
1944# qhasm: xmm5 = xmm10
1945# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
1946# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
1947movdqa %xmm10,%xmm3
1948
1949# qhasm: xmm4 = xmm14
1950# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
1951# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
1952movdqa %xmm14,%xmm4
1953
1954# qhasm: xmm3 ^= xmm12
1955# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
1956# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
1957pxor %xmm12,%xmm0
1958
1959# qhasm: xmm2 ^= xmm10
1960# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
1961# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
1962pxor %xmm10,%xmm1
1963
1964# qhasm: xmm1 ^= xmm11
1965# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
1966# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
1967pxor %xmm11,%xmm2
1968
1969# qhasm: xmm5 ^= xmm12
1970# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
1971# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
1972pxor %xmm12,%xmm3
1973
1974# qhasm: xmm4 ^= xmm8
1975# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
1976# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
1977pxor %xmm8,%xmm4
1978
1979# qhasm: xmm6 = xmm3
1980# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
1981# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
1982movdqa %xmm0,%xmm5
1983
1984# qhasm: xmm0 = xmm2
1985# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
1986# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
1987movdqa %xmm1,%xmm6
1988
1989# qhasm: xmm7 = xmm3
1990# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
1991# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
1992movdqa %xmm0,%xmm7
1993
1994# qhasm: xmm2 |= xmm1
1995# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
1996# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
1997por %xmm2,%xmm1
1998
1999# qhasm: xmm3 |= xmm4
2000# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
2001# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
2002por %xmm4,%xmm0
2003
2004# qhasm: xmm7 ^= xmm0
2005# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
2006# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
2007pxor %xmm6,%xmm7
2008
2009# qhasm: xmm6 &= xmm4
2010# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
2011# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
2012pand %xmm4,%xmm5
2013
2014# qhasm: xmm0 &= xmm1
2015# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
2016# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
2017pand %xmm2,%xmm6
2018
2019# qhasm: xmm4 ^= xmm1
2020# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
2021# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
2022pxor %xmm2,%xmm4
2023
2024# qhasm: xmm7 &= xmm4
2025# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
2026# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
2027pand %xmm4,%xmm7
2028
2029# qhasm: xmm4 = xmm11
2030# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
2031# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
2032movdqa %xmm11,%xmm2
2033
2034# qhasm: xmm4 ^= xmm8
2035# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
2036# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
2037pxor %xmm8,%xmm2
2038
2039# qhasm: xmm5 &= xmm4
2040# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
2041# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
2042pand %xmm2,%xmm3
2043
2044# qhasm: xmm3 ^= xmm5
2045# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
2046# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
2047pxor %xmm3,%xmm0
2048
2049# qhasm: xmm2 ^= xmm5
2050# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
2051# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
2052pxor %xmm3,%xmm1
2053
2054# qhasm: xmm5 = xmm15
2055# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
2056# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
2057movdqa %xmm15,%xmm2
2058
2059# qhasm: xmm5 ^= xmm9
2060# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
2061# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
2062pxor %xmm9,%xmm2
2063
2064# qhasm: xmm4 = xmm13
2065# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
2066# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
2067movdqa %xmm13,%xmm3
2068
2069# qhasm: xmm1 = xmm5
2070# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
2071# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
2072movdqa %xmm2,%xmm4
2073
2074# qhasm: xmm4 ^= xmm14
2075# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
2076# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
2077pxor %xmm14,%xmm3
2078
2079# qhasm: xmm1 |= xmm4
2080# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
2081# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
2082por %xmm3,%xmm4
2083
2084# qhasm: xmm5 &= xmm4
2085# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
2086# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
2087pand %xmm3,%xmm2
2088
2089# qhasm: xmm0 ^= xmm5
2090# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
2091# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
2092pxor %xmm2,%xmm6
2093
2094# qhasm: xmm3 ^= xmm7
2095# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
2096# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
2097pxor %xmm7,%xmm0
2098
2099# qhasm: xmm2 ^= xmm6
2100# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
2101# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
2102pxor %xmm5,%xmm1
2103
2104# qhasm: xmm1 ^= xmm7
2105# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
2106# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
2107pxor %xmm7,%xmm4
2108
2109# qhasm: xmm0 ^= xmm6
2110# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
2111# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
2112pxor %xmm5,%xmm6
2113
2114# qhasm: xmm1 ^= xmm6
2115# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
2116# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
2117pxor %xmm5,%xmm4
2118
2119# qhasm: xmm4 = xmm10
2120# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
2121# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
2122movdqa %xmm10,%xmm2
2123
2124# qhasm: xmm5 = xmm12
2125# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
2126# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
2127movdqa %xmm12,%xmm3
2128
2129# qhasm: xmm6 = xmm9
2130# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
2131# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
2132movdqa %xmm9,%xmm5
2133
2134# qhasm: xmm7 = xmm15
2135# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
2136# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
2137movdqa %xmm15,%xmm7
2138
2139# qhasm: xmm4 &= xmm11
2140# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
2141# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
2142pand %xmm11,%xmm2
2143
2144# qhasm: xmm5 &= xmm8
2145# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
2146# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
2147pand %xmm8,%xmm3
2148
2149# qhasm: xmm6 &= xmm13
2150# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
2151# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
2152pand %xmm13,%xmm5
2153
2154# qhasm: xmm7 |= xmm14
2155# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
2156# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
2157por %xmm14,%xmm7
2158
2159# qhasm: xmm3 ^= xmm4
2160# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
2161# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
2162pxor %xmm2,%xmm0
2163
2164# qhasm: xmm2 ^= xmm5
2165# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
2166# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
2167pxor %xmm3,%xmm1
2168
2169# qhasm: xmm1 ^= xmm6
2170# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
2171# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
2172pxor %xmm5,%xmm4
2173
2174# qhasm: xmm0 ^= xmm7
2175# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
2176# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
2177pxor %xmm7,%xmm6
2178
2179# qhasm: xmm4 = xmm3
2180# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
2181# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
2182movdqa %xmm0,%xmm2
2183
2184# qhasm: xmm4 ^= xmm2
2185# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
2186# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
2187pxor %xmm1,%xmm2
2188
2189# qhasm: xmm3 &= xmm1
2190# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
2191# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
2192pand %xmm4,%xmm0
2193
2194# qhasm: xmm6 = xmm0
2195# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
2196# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
2197movdqa %xmm6,%xmm3
2198
2199# qhasm: xmm6 ^= xmm3
2200# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
2201# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
2202pxor %xmm0,%xmm3
2203
2204# qhasm: xmm7 = xmm4
2205# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
2206# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
2207movdqa %xmm2,%xmm5
2208
2209# qhasm: xmm7 &= xmm6
2210# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
2211# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
2212pand %xmm3,%xmm5
2213
2214# qhasm: xmm7 ^= xmm2
2215# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
2216# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
2217pxor %xmm1,%xmm5
2218
2219# qhasm: xmm5 = xmm1
2220# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
2221# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
2222movdqa %xmm4,%xmm7
2223
2224# qhasm: xmm5 ^= xmm0
2225# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
2226# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
2227pxor %xmm6,%xmm7
2228
2229# qhasm: xmm3 ^= xmm2
2230# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
2231# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
2232pxor %xmm1,%xmm0
2233
2234# qhasm: xmm5 &= xmm3
2235# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
2236# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
2237pand %xmm0,%xmm7
2238
2239# qhasm: xmm5 ^= xmm0
2240# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
2241# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
2242pxor %xmm6,%xmm7
2243
2244# qhasm: xmm1 ^= xmm5
2245# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
2246# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
2247pxor %xmm7,%xmm4
2248
2249# qhasm: xmm2 = xmm6
2250# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
2251# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
2252movdqa %xmm3,%xmm0
2253
2254# qhasm: xmm2 ^= xmm5
2255# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
2256# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
2257pxor %xmm7,%xmm0
2258
2259# qhasm: xmm2 &= xmm0
2260# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
2261# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
2262pand %xmm6,%xmm0
2263
2264# qhasm: xmm1 ^= xmm2
2265# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
2266# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
2267pxor %xmm0,%xmm4
2268
2269# qhasm: xmm6 ^= xmm2
2270# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
2271# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
2272pxor %xmm0,%xmm3
2273
2274# qhasm: xmm6 &= xmm7
2275# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
2276# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
2277pand %xmm5,%xmm3
2278
2279# qhasm: xmm6 ^= xmm4
2280# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
2281# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
2282pxor %xmm2,%xmm3
2283
2284# qhasm: xmm4 = xmm14
2285# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
2286# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
2287movdqa %xmm14,%xmm0
2288
2289# qhasm: xmm0 = xmm13
2290# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
2291# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
2292movdqa %xmm13,%xmm1
2293
2294# qhasm: xmm2 = xmm7
2295# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
2296# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
2297movdqa %xmm5,%xmm2
2298
2299# qhasm: xmm2 ^= xmm6
2300# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
2301# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
2302pxor %xmm3,%xmm2
2303
2304# qhasm: xmm2 &= xmm14
2305# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
2306# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
2307pand %xmm14,%xmm2
2308
2309# qhasm: xmm14 ^= xmm13
2310# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
2311# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
2312pxor %xmm13,%xmm14
2313
2314# qhasm: xmm14 &= xmm6
2315# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
2316# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
2317pand %xmm3,%xmm14
2318
2319# qhasm: xmm13 &= xmm7
2320# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
2321# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
2322pand %xmm5,%xmm13
2323
2324# qhasm: xmm14 ^= xmm13
2325# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
2326# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
2327pxor %xmm13,%xmm14
2328
2329# qhasm: xmm13 ^= xmm2
2330# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
2331# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
2332pxor %xmm2,%xmm13
2333
2334# qhasm: xmm4 ^= xmm8
2335# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
2336# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
2337pxor %xmm8,%xmm0
2338
2339# qhasm: xmm0 ^= xmm11
2340# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
2341# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
2342pxor %xmm11,%xmm1
2343
2344# qhasm: xmm7 ^= xmm5
2345# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
2346# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
2347pxor %xmm7,%xmm5
2348
2349# qhasm: xmm6 ^= xmm1
2350# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
2351# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
2352pxor %xmm4,%xmm3
2353
2354# qhasm: xmm3 = xmm7
2355# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
2356# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
2357movdqa %xmm5,%xmm2
2358
2359# qhasm: xmm3 ^= xmm6
2360# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
2361# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
2362pxor %xmm3,%xmm2
2363
2364# qhasm: xmm3 &= xmm4
2365# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
2366# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
2367pand %xmm0,%xmm2
2368
2369# qhasm: xmm4 ^= xmm0
2370# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
2371# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
2372pxor %xmm1,%xmm0
2373
2374# qhasm: xmm4 &= xmm6
2375# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
2376# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
2377pand %xmm3,%xmm0
2378
2379# qhasm: xmm0 &= xmm7
2380# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
2381# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
2382pand %xmm5,%xmm1
2383
2384# qhasm: xmm0 ^= xmm4
2385# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
2386# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
2387pxor %xmm0,%xmm1
2388
2389# qhasm: xmm4 ^= xmm3
2390# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
2391# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
2392pxor %xmm2,%xmm0
2393
2394# qhasm: xmm2 = xmm5
2395# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
2396# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
2397movdqa %xmm7,%xmm2
2398
2399# qhasm: xmm2 ^= xmm1
2400# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
2401# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
2402pxor %xmm4,%xmm2
2403
2404# qhasm: xmm2 &= xmm8
2405# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
2406# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
2407pand %xmm8,%xmm2
2408
2409# qhasm: xmm8 ^= xmm11
2410# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
2411# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
2412pxor %xmm11,%xmm8
2413
2414# qhasm: xmm8 &= xmm1
2415# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
2416# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
2417pand %xmm4,%xmm8
2418
2419# qhasm: xmm11 &= xmm5
2420# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
2421# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
2422pand %xmm7,%xmm11
2423
2424# qhasm: xmm8 ^= xmm11
2425# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
2426# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
2427pxor %xmm11,%xmm8
2428
2429# qhasm: xmm11 ^= xmm2
2430# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
2431# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
2432pxor %xmm2,%xmm11
2433
2434# qhasm: xmm14 ^= xmm4
2435# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
2436# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
2437pxor %xmm0,%xmm14
2438
2439# qhasm: xmm8 ^= xmm4
2440# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
2441# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
2442pxor %xmm0,%xmm8
2443
2444# qhasm: xmm13 ^= xmm0
2445# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
2446# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
2447pxor %xmm1,%xmm13
2448
2449# qhasm: xmm11 ^= xmm0
2450# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
2451# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
2452pxor %xmm1,%xmm11
2453
2454# qhasm: xmm4 = xmm15
2455# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
2456# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
2457movdqa %xmm15,%xmm0
2458
2459# qhasm: xmm0 = xmm9
2460# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
2461# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
2462movdqa %xmm9,%xmm1
2463
2464# qhasm: xmm4 ^= xmm12
2465# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
2466# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
2467pxor %xmm12,%xmm0
2468
2469# qhasm: xmm0 ^= xmm10
2470# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
2471# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
2472pxor %xmm10,%xmm1
2473
2474# qhasm: xmm3 = xmm7
2475# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
2476# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
2477movdqa %xmm5,%xmm2
2478
2479# qhasm: xmm3 ^= xmm6
2480# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
2481# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
2482pxor %xmm3,%xmm2
2483
2484# qhasm: xmm3 &= xmm4
2485# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
2486# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
2487pand %xmm0,%xmm2
2488
2489# qhasm: xmm4 ^= xmm0
2490# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
2491# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
2492pxor %xmm1,%xmm0
2493
2494# qhasm: xmm4 &= xmm6
2495# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
2496# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
2497pand %xmm3,%xmm0
2498
2499# qhasm: xmm0 &= xmm7
2500# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
2501# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
2502pand %xmm5,%xmm1
2503
2504# qhasm: xmm0 ^= xmm4
2505# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
2506# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
2507pxor %xmm0,%xmm1
2508
2509# qhasm: xmm4 ^= xmm3
2510# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
2511# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
2512pxor %xmm2,%xmm0
2513
2514# qhasm: xmm2 = xmm5
2515# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
2516# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
2517movdqa %xmm7,%xmm2
2518
2519# qhasm: xmm2 ^= xmm1
2520# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
2521# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
2522pxor %xmm4,%xmm2
2523
2524# qhasm: xmm2 &= xmm12
2525# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
2526# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
2527pand %xmm12,%xmm2
2528
2529# qhasm: xmm12 ^= xmm10
2530# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
2531# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
2532pxor %xmm10,%xmm12
2533
2534# qhasm: xmm12 &= xmm1
2535# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
2536# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
2537pand %xmm4,%xmm12
2538
2539# qhasm: xmm10 &= xmm5
2540# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
2541# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
2542pand %xmm7,%xmm10
2543
2544# qhasm: xmm12 ^= xmm10
2545# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
2546# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
2547pxor %xmm10,%xmm12
2548
2549# qhasm: xmm10 ^= xmm2
2550# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
2551# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
2552pxor %xmm2,%xmm10
2553
2554# qhasm: xmm7 ^= xmm5
2555# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
2556# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
2557pxor %xmm7,%xmm5
2558
2559# qhasm: xmm6 ^= xmm1
2560# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
2561# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
2562pxor %xmm4,%xmm3
2563
2564# qhasm: xmm3 = xmm7
2565# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
2566# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
2567movdqa %xmm5,%xmm2
2568
2569# qhasm: xmm3 ^= xmm6
2570# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
2571# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
2572pxor %xmm3,%xmm2
2573
2574# qhasm: xmm3 &= xmm15
2575# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
2576# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
2577pand %xmm15,%xmm2
2578
2579# qhasm: xmm15 ^= xmm9
2580# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
2581# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
2582pxor %xmm9,%xmm15
2583
2584# qhasm: xmm15 &= xmm6
2585# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
2586# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
2587pand %xmm3,%xmm15
2588
2589# qhasm: xmm9 &= xmm7
2590# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
2591# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
2592pand %xmm5,%xmm9
2593
2594# qhasm: xmm15 ^= xmm9
2595# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
2596# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
2597pxor %xmm9,%xmm15
2598
2599# qhasm: xmm9 ^= xmm3
2600# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
2601# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
2602pxor %xmm2,%xmm9
2603
2604# qhasm: xmm15 ^= xmm4
2605# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
2606# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
2607pxor %xmm0,%xmm15
2608
2609# qhasm: xmm12 ^= xmm4
2610# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
2611# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
2612pxor %xmm0,%xmm12
2613
2614# qhasm: xmm9 ^= xmm0
2615# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
2616# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
2617pxor %xmm1,%xmm9
2618
2619# qhasm: xmm10 ^= xmm0
2620# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
2621# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
2622pxor %xmm1,%xmm10
2623
2624# qhasm: xmm15 ^= xmm8
2625# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
2626# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
2627pxor %xmm8,%xmm15
2628
2629# qhasm: xmm9 ^= xmm14
2630# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
2631# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
2632pxor %xmm14,%xmm9
2633
2634# qhasm: xmm12 ^= xmm15
2635# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
2636# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
2637pxor %xmm15,%xmm12
2638
2639# qhasm: xmm14 ^= xmm8
2640# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
2641# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
2642pxor %xmm8,%xmm14
2643
2644# qhasm: xmm8 ^= xmm9
2645# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
2646# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
2647pxor %xmm9,%xmm8
2648
2649# qhasm: xmm9 ^= xmm13
2650# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
2651# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
2652pxor %xmm13,%xmm9
2653
2654# qhasm: xmm13 ^= xmm10
2655# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
2656# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
2657pxor %xmm10,%xmm13
2658
2659# qhasm: xmm12 ^= xmm13
2660# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
2661# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
2662pxor %xmm13,%xmm12
2663
2664# qhasm: xmm10 ^= xmm11
2665# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
2666# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
2667pxor %xmm11,%xmm10
2668
2669# qhasm: xmm11 ^= xmm13
2670# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
2671# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
2672pxor %xmm13,%xmm11
2673
2674# qhasm: xmm14 ^= xmm11
2675# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
2676# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
2677pxor %xmm11,%xmm14
2678
2679# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
2680# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
2681# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
2682pshufd $0x93,%xmm8,%xmm0
2683
2684# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
2685# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
2686# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
2687pshufd $0x93,%xmm9,%xmm1
2688
2689# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
2690# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
2691# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
2692pshufd $0x93,%xmm12,%xmm2
2693
2694# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
2695# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
2696# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
2697pshufd $0x93,%xmm14,%xmm3
2698
2699# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
2700# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
2701# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
2702pshufd $0x93,%xmm11,%xmm4
2703
2704# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
2705# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
2706# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
2707pshufd $0x93,%xmm15,%xmm5
2708
2709# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
2710# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
2711# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
2712pshufd $0x93,%xmm10,%xmm6
2713
2714# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
2715# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
2716# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
2717pshufd $0x93,%xmm13,%xmm7
2718
2719# qhasm: xmm8 ^= xmm0
2720# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
2721# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
2722pxor %xmm0,%xmm8
2723
2724# qhasm: xmm9 ^= xmm1
2725# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
2726# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
2727pxor %xmm1,%xmm9
2728
2729# qhasm: xmm12 ^= xmm2
2730# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
2731# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
2732pxor %xmm2,%xmm12
2733
2734# qhasm: xmm14 ^= xmm3
2735# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
2736# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
2737pxor %xmm3,%xmm14
2738
2739# qhasm: xmm11 ^= xmm4
2740# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
2741# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
2742pxor %xmm4,%xmm11
2743
2744# qhasm: xmm15 ^= xmm5
2745# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
2746# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
2747pxor %xmm5,%xmm15
2748
2749# qhasm: xmm10 ^= xmm6
2750# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
2751# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
2752pxor %xmm6,%xmm10
2753
2754# qhasm: xmm13 ^= xmm7
2755# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
2756# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
2757pxor %xmm7,%xmm13
2758
2759# qhasm: xmm0 ^= xmm13
2760# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
2761# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
2762pxor %xmm13,%xmm0
2763
2764# qhasm: xmm1 ^= xmm8
2765# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
2766# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
2767pxor %xmm8,%xmm1
2768
2769# qhasm: xmm2 ^= xmm9
2770# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
2771# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
2772pxor %xmm9,%xmm2
2773
2774# qhasm: xmm1 ^= xmm13
2775# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
2776# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
2777pxor %xmm13,%xmm1
2778
2779# qhasm: xmm3 ^= xmm12
2780# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
2781# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
2782pxor %xmm12,%xmm3
2783
2784# qhasm: xmm4 ^= xmm14
2785# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
2786# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
2787pxor %xmm14,%xmm4
2788
2789# qhasm: xmm5 ^= xmm11
2790# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
2791# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
2792pxor %xmm11,%xmm5
2793
2794# qhasm: xmm3 ^= xmm13
2795# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
2796# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
2797pxor %xmm13,%xmm3
2798
2799# qhasm: xmm6 ^= xmm15
2800# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
2801# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
2802pxor %xmm15,%xmm6
2803
2804# qhasm: xmm7 ^= xmm10
2805# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
2806# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
2807pxor %xmm10,%xmm7
2808
2809# qhasm: xmm4 ^= xmm13
2810# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
2811# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
2812pxor %xmm13,%xmm4
2813
2814# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
2815# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
2816# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
2817pshufd $0x4E,%xmm8,%xmm8
2818
2819# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
2820# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
2821# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
2822pshufd $0x4E,%xmm9,%xmm9
2823
2824# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
2825# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
2826# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
2827pshufd $0x4E,%xmm12,%xmm12
2828
2829# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
2830# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
2831# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
2832pshufd $0x4E,%xmm14,%xmm14
2833
2834# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
2835# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
2836# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
2837pshufd $0x4E,%xmm11,%xmm11
2838
2839# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
2840# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
2841# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
2842pshufd $0x4E,%xmm15,%xmm15
2843
2844# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
2845# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
2846# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
2847pshufd $0x4E,%xmm10,%xmm10
2848
2849# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
2850# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
2851# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
2852pshufd $0x4E,%xmm13,%xmm13
2853
2854# qhasm: xmm0 ^= xmm8
2855# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
2856# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
2857pxor %xmm8,%xmm0
2858
2859# qhasm: xmm1 ^= xmm9
2860# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
2861# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
2862pxor %xmm9,%xmm1
2863
2864# qhasm: xmm2 ^= xmm12
2865# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
2866# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
2867pxor %xmm12,%xmm2
2868
2869# qhasm: xmm3 ^= xmm14
2870# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
2871# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
2872pxor %xmm14,%xmm3
2873
2874# qhasm: xmm4 ^= xmm11
2875# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
2876# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
2877pxor %xmm11,%xmm4
2878
2879# qhasm: xmm5 ^= xmm15
2880# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
2881# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
2882pxor %xmm15,%xmm5
2883
2884# qhasm: xmm6 ^= xmm10
2885# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
2886# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
2887pxor %xmm10,%xmm6
2888
2889# qhasm: xmm7 ^= xmm13
2890# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
2891# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
2892pxor %xmm13,%xmm7
2893
2894# qhasm: xmm0 ^= *(int128 *)(c + 256)
2895# asm 1: pxor 256(<c=int64#5),<xmm0=int6464#1
2896# asm 2: pxor 256(<c=%r8),<xmm0=%xmm0
2897pxor 256(%r8),%xmm0
2898
2899# qhasm: shuffle bytes of xmm0 by SR
2900# asm 1: pshufb SR,<xmm0=int6464#1
2901# asm 2: pshufb SR,<xmm0=%xmm0
2902pshufb SR,%xmm0
2903
2904# qhasm: xmm1 ^= *(int128 *)(c + 272)
2905# asm 1: pxor 272(<c=int64#5),<xmm1=int6464#2
2906# asm 2: pxor 272(<c=%r8),<xmm1=%xmm1
2907pxor 272(%r8),%xmm1
2908
2909# qhasm: shuffle bytes of xmm1 by SR
2910# asm 1: pshufb SR,<xmm1=int6464#2
2911# asm 2: pshufb SR,<xmm1=%xmm1
2912pshufb SR,%xmm1
2913
2914# qhasm: xmm2 ^= *(int128 *)(c + 288)
2915# asm 1: pxor 288(<c=int64#5),<xmm2=int6464#3
2916# asm 2: pxor 288(<c=%r8),<xmm2=%xmm2
2917pxor 288(%r8),%xmm2
2918
2919# qhasm: shuffle bytes of xmm2 by SR
2920# asm 1: pshufb SR,<xmm2=int6464#3
2921# asm 2: pshufb SR,<xmm2=%xmm2
2922pshufb SR,%xmm2
2923
2924# qhasm: xmm3 ^= *(int128 *)(c + 304)
2925# asm 1: pxor 304(<c=int64#5),<xmm3=int6464#4
2926# asm 2: pxor 304(<c=%r8),<xmm3=%xmm3
2927pxor 304(%r8),%xmm3
2928
2929# qhasm: shuffle bytes of xmm3 by SR
2930# asm 1: pshufb SR,<xmm3=int6464#4
2931# asm 2: pshufb SR,<xmm3=%xmm3
2932pshufb SR,%xmm3
2933
2934# qhasm: xmm4 ^= *(int128 *)(c + 320)
2935# asm 1: pxor 320(<c=int64#5),<xmm4=int6464#5
2936# asm 2: pxor 320(<c=%r8),<xmm4=%xmm4
2937pxor 320(%r8),%xmm4
2938
2939# qhasm: shuffle bytes of xmm4 by SR
2940# asm 1: pshufb SR,<xmm4=int6464#5
2941# asm 2: pshufb SR,<xmm4=%xmm4
2942pshufb SR,%xmm4
2943
2944# qhasm: xmm5 ^= *(int128 *)(c + 336)
2945# asm 1: pxor 336(<c=int64#5),<xmm5=int6464#6
2946# asm 2: pxor 336(<c=%r8),<xmm5=%xmm5
2947pxor 336(%r8),%xmm5
2948
2949# qhasm: shuffle bytes of xmm5 by SR
2950# asm 1: pshufb SR,<xmm5=int6464#6
2951# asm 2: pshufb SR,<xmm5=%xmm5
2952pshufb SR,%xmm5
2953
2954# qhasm: xmm6 ^= *(int128 *)(c + 352)
2955# asm 1: pxor 352(<c=int64#5),<xmm6=int6464#7
2956# asm 2: pxor 352(<c=%r8),<xmm6=%xmm6
2957pxor 352(%r8),%xmm6
2958
2959# qhasm: shuffle bytes of xmm6 by SR
2960# asm 1: pshufb SR,<xmm6=int6464#7
2961# asm 2: pshufb SR,<xmm6=%xmm6
2962pshufb SR,%xmm6
2963
2964# qhasm: xmm7 ^= *(int128 *)(c + 368)
2965# asm 1: pxor 368(<c=int64#5),<xmm7=int6464#8
2966# asm 2: pxor 368(<c=%r8),<xmm7=%xmm7
2967pxor 368(%r8),%xmm7
2968
2969# qhasm: shuffle bytes of xmm7 by SR
2970# asm 1: pshufb SR,<xmm7=int6464#8
2971# asm 2: pshufb SR,<xmm7=%xmm7
2972pshufb SR,%xmm7
2973
2974# qhasm: xmm5 ^= xmm6
2975# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
2976# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
2977pxor %xmm6,%xmm5
2978
2979# qhasm: xmm2 ^= xmm1
2980# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
2981# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
2982pxor %xmm1,%xmm2
2983
2984# qhasm: xmm5 ^= xmm0
2985# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
2986# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
2987pxor %xmm0,%xmm5
2988
2989# qhasm: xmm6 ^= xmm2
2990# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
2991# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
2992pxor %xmm2,%xmm6
2993
2994# qhasm: xmm3 ^= xmm0
2995# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
2996# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
2997pxor %xmm0,%xmm3
2998
2999# qhasm: xmm6 ^= xmm3
3000# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
3001# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
3002pxor %xmm3,%xmm6
3003
3004# qhasm: xmm3 ^= xmm7
3005# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
3006# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
3007pxor %xmm7,%xmm3
3008
3009# qhasm: xmm3 ^= xmm4
3010# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
3011# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
3012pxor %xmm4,%xmm3
3013
3014# qhasm: xmm7 ^= xmm5
3015# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
3016# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
3017pxor %xmm5,%xmm7
3018
3019# qhasm: xmm3 ^= xmm1
3020# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
3021# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
3022pxor %xmm1,%xmm3
3023
3024# qhasm: xmm4 ^= xmm5
3025# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
3026# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
3027pxor %xmm5,%xmm4
3028
3029# qhasm: xmm2 ^= xmm7
3030# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
3031# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
3032pxor %xmm7,%xmm2
3033
3034# qhasm: xmm1 ^= xmm5
3035# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
3036# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
3037pxor %xmm5,%xmm1
3038
3039# qhasm: xmm11 = xmm7
3040# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
3041# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
3042movdqa %xmm7,%xmm8
3043
3044# qhasm: xmm10 = xmm1
3045# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
3046# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
3047movdqa %xmm1,%xmm9
3048
3049# qhasm: xmm9 = xmm5
3050# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
3051# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
3052movdqa %xmm5,%xmm10
3053
3054# qhasm: xmm13 = xmm2
3055# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
3056# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
3057movdqa %xmm2,%xmm11
3058
3059# qhasm: xmm12 = xmm6
3060# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
3061# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
3062movdqa %xmm6,%xmm12
3063
3064# qhasm: xmm11 ^= xmm4
3065# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
3066# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
3067pxor %xmm4,%xmm8
3068
3069# qhasm: xmm10 ^= xmm2
3070# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
3071# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
3072pxor %xmm2,%xmm9
3073
3074# qhasm: xmm9 ^= xmm3
3075# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
3076# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
3077pxor %xmm3,%xmm10
3078
3079# qhasm: xmm13 ^= xmm4
3080# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
3081# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
3082pxor %xmm4,%xmm11
3083
3084# qhasm: xmm12 ^= xmm0
3085# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
3086# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
3087pxor %xmm0,%xmm12
3088
3089# qhasm: xmm14 = xmm11
3090# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
3091# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
3092movdqa %xmm8,%xmm13
3093
3094# qhasm: xmm8 = xmm10
3095# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
3096# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
3097movdqa %xmm9,%xmm14
3098
3099# qhasm: xmm15 = xmm11
3100# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
3101# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
3102movdqa %xmm8,%xmm15
3103
3104# qhasm: xmm10 |= xmm9
3105# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
3106# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
3107por %xmm10,%xmm9
3108
3109# qhasm: xmm11 |= xmm12
3110# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
3111# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
3112por %xmm12,%xmm8
3113
3114# qhasm: xmm15 ^= xmm8
3115# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
3116# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
3117pxor %xmm14,%xmm15
3118
3119# qhasm: xmm14 &= xmm12
3120# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
3121# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
3122pand %xmm12,%xmm13
3123
3124# qhasm: xmm8 &= xmm9
3125# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
3126# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
3127pand %xmm10,%xmm14
3128
3129# qhasm: xmm12 ^= xmm9
3130# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
3131# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
3132pxor %xmm10,%xmm12
3133
3134# qhasm: xmm15 &= xmm12
3135# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
3136# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
3137pand %xmm12,%xmm15
3138
3139# qhasm: xmm12 = xmm3
3140# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
3141# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
3142movdqa %xmm3,%xmm10
3143
3144# qhasm: xmm12 ^= xmm0
3145# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
3146# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
3147pxor %xmm0,%xmm10
3148
3149# qhasm: xmm13 &= xmm12
3150# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
3151# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
3152pand %xmm10,%xmm11
3153
3154# qhasm: xmm11 ^= xmm13
3155# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
3156# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
3157pxor %xmm11,%xmm8
3158
3159# qhasm: xmm10 ^= xmm13
3160# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
3161# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
3162pxor %xmm11,%xmm9
3163
3164# qhasm: xmm13 = xmm7
3165# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
3166# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
3167movdqa %xmm7,%xmm10
3168
3169# qhasm: xmm13 ^= xmm1
3170# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
3171# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
3172pxor %xmm1,%xmm10
3173
3174# qhasm: xmm12 = xmm5
3175# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
3176# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
3177movdqa %xmm5,%xmm11
3178
3179# qhasm: xmm9 = xmm13
3180# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
3181# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
3182movdqa %xmm10,%xmm12
3183
3184# qhasm: xmm12 ^= xmm6
3185# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
3186# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
3187pxor %xmm6,%xmm11
3188
3189# qhasm: xmm9 |= xmm12
3190# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
3191# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
3192por %xmm11,%xmm12
3193
3194# qhasm: xmm13 &= xmm12
3195# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
3196# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
3197pand %xmm11,%xmm10
3198
3199# qhasm: xmm8 ^= xmm13
3200# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
3201# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
3202pxor %xmm10,%xmm14
3203
3204# qhasm: xmm11 ^= xmm15
3205# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
3206# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
3207pxor %xmm15,%xmm8
3208
3209# qhasm: xmm10 ^= xmm14
3210# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
3211# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
3212pxor %xmm13,%xmm9
3213
3214# qhasm: xmm9 ^= xmm15
3215# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
3216# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
3217pxor %xmm15,%xmm12
3218
3219# qhasm: xmm8 ^= xmm14
3220# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
3221# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
3222pxor %xmm13,%xmm14
3223
3224# qhasm: xmm9 ^= xmm14
3225# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
3226# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
3227pxor %xmm13,%xmm12
3228
3229# qhasm: xmm12 = xmm2
3230# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
3231# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
3232movdqa %xmm2,%xmm10
3233
3234# qhasm: xmm13 = xmm4
3235# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
3236# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
3237movdqa %xmm4,%xmm11
3238
3239# qhasm: xmm14 = xmm1
3240# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
3241# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
3242movdqa %xmm1,%xmm13
3243
3244# qhasm: xmm15 = xmm7
3245# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
3246# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
3247movdqa %xmm7,%xmm15
3248
3249# qhasm: xmm12 &= xmm3
3250# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
3251# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
3252pand %xmm3,%xmm10
3253
3254# qhasm: xmm13 &= xmm0
3255# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
3256# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
3257pand %xmm0,%xmm11
3258
3259# qhasm: xmm14 &= xmm5
3260# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
3261# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
3262pand %xmm5,%xmm13
3263
3264# qhasm: xmm15 |= xmm6
3265# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
3266# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
3267por %xmm6,%xmm15
3268
3269# qhasm: xmm11 ^= xmm12
3270# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
3271# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
3272pxor %xmm10,%xmm8
3273
3274# qhasm: xmm10 ^= xmm13
3275# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
3276# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
3277pxor %xmm11,%xmm9
3278
3279# qhasm: xmm9 ^= xmm14
3280# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
3281# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
3282pxor %xmm13,%xmm12
3283
3284# qhasm: xmm8 ^= xmm15
3285# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
3286# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
3287pxor %xmm15,%xmm14
3288
3289# qhasm: xmm12 = xmm11
3290# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
3291# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
3292movdqa %xmm8,%xmm10
3293
3294# qhasm: xmm12 ^= xmm10
3295# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
3296# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
3297pxor %xmm9,%xmm10
3298
3299# qhasm: xmm11 &= xmm9
3300# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
3301# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
3302pand %xmm12,%xmm8
3303
3304# qhasm: xmm14 = xmm8
3305# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
3306# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
3307movdqa %xmm14,%xmm11
3308
3309# qhasm: xmm14 ^= xmm11
3310# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
3311# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
3312pxor %xmm8,%xmm11
3313
3314# qhasm: xmm15 = xmm12
3315# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
3316# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
3317movdqa %xmm10,%xmm13
3318
3319# qhasm: xmm15 &= xmm14
3320# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
3321# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
3322pand %xmm11,%xmm13
3323
3324# qhasm: xmm15 ^= xmm10
3325# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
3326# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
3327pxor %xmm9,%xmm13
3328
3329# qhasm: xmm13 = xmm9
3330# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
3331# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
3332movdqa %xmm12,%xmm15
3333
3334# qhasm: xmm13 ^= xmm8
3335# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
3336# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
3337pxor %xmm14,%xmm15
3338
3339# qhasm: xmm11 ^= xmm10
3340# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
3341# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
3342pxor %xmm9,%xmm8
3343
3344# qhasm: xmm13 &= xmm11
3345# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
3346# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
3347pand %xmm8,%xmm15
3348
3349# qhasm: xmm13 ^= xmm8
3350# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
3351# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
3352pxor %xmm14,%xmm15
3353
3354# qhasm: xmm9 ^= xmm13
3355# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
3356# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
3357pxor %xmm15,%xmm12
3358
3359# qhasm: xmm10 = xmm14
3360# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
3361# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
3362movdqa %xmm11,%xmm8
3363
3364# qhasm: xmm10 ^= xmm13
3365# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
3366# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
3367pxor %xmm15,%xmm8
3368
3369# qhasm: xmm10 &= xmm8
3370# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
3371# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
3372pand %xmm14,%xmm8
3373
3374# qhasm: xmm9 ^= xmm10
3375# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
3376# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
3377pxor %xmm8,%xmm12
3378
3379# qhasm: xmm14 ^= xmm10
3380# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
3381# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
3382pxor %xmm8,%xmm11
3383
3384# qhasm: xmm14 &= xmm15
3385# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
3386# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
3387pand %xmm13,%xmm11
3388
3389# qhasm: xmm14 ^= xmm12
3390# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
3391# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
3392pxor %xmm10,%xmm11
3393
3394# qhasm: xmm12 = xmm6
3395# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
3396# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
3397movdqa %xmm6,%xmm8
3398
3399# qhasm: xmm8 = xmm5
3400# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
3401# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
3402movdqa %xmm5,%xmm9
3403
3404# qhasm: xmm10 = xmm15
3405# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
3406# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
3407movdqa %xmm13,%xmm10
3408
3409# qhasm: xmm10 ^= xmm14
3410# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
3411# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
3412pxor %xmm11,%xmm10
3413
3414# qhasm: xmm10 &= xmm6
3415# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
3416# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
3417pand %xmm6,%xmm10
3418
3419# qhasm: xmm6 ^= xmm5
3420# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
3421# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
3422pxor %xmm5,%xmm6
3423
3424# qhasm: xmm6 &= xmm14
3425# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
3426# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
3427pand %xmm11,%xmm6
3428
3429# qhasm: xmm5 &= xmm15
3430# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
3431# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
3432pand %xmm13,%xmm5
3433
3434# qhasm: xmm6 ^= xmm5
3435# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
3436# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
3437pxor %xmm5,%xmm6
3438
3439# qhasm: xmm5 ^= xmm10
3440# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
3441# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
3442pxor %xmm10,%xmm5
3443
3444# qhasm: xmm12 ^= xmm0
3445# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
3446# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
3447pxor %xmm0,%xmm8
3448
3449# qhasm: xmm8 ^= xmm3
3450# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
3451# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
3452pxor %xmm3,%xmm9
3453
3454# qhasm: xmm15 ^= xmm13
3455# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
3456# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
3457pxor %xmm15,%xmm13
3458
3459# qhasm: xmm14 ^= xmm9
3460# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
3461# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
3462pxor %xmm12,%xmm11
3463
3464# qhasm: xmm11 = xmm15
3465# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3466# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3467movdqa %xmm13,%xmm10
3468
3469# qhasm: xmm11 ^= xmm14
3470# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3471# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3472pxor %xmm11,%xmm10
3473
3474# qhasm: xmm11 &= xmm12
3475# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
3476# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
3477pand %xmm8,%xmm10
3478
3479# qhasm: xmm12 ^= xmm8
3480# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
3481# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
3482pxor %xmm9,%xmm8
3483
3484# qhasm: xmm12 &= xmm14
3485# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
3486# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
3487pand %xmm11,%xmm8
3488
3489# qhasm: xmm8 &= xmm15
3490# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
3491# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
3492pand %xmm13,%xmm9
3493
3494# qhasm: xmm8 ^= xmm12
3495# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
3496# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
3497pxor %xmm8,%xmm9
3498
3499# qhasm: xmm12 ^= xmm11
3500# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
3501# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
3502pxor %xmm10,%xmm8
3503
3504# qhasm: xmm10 = xmm13
3505# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
3506# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
3507movdqa %xmm15,%xmm10
3508
3509# qhasm: xmm10 ^= xmm9
3510# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
3511# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
3512pxor %xmm12,%xmm10
3513
3514# qhasm: xmm10 &= xmm0
3515# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
3516# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
3517pand %xmm0,%xmm10
3518
3519# qhasm: xmm0 ^= xmm3
3520# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
3521# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
3522pxor %xmm3,%xmm0
3523
3524# qhasm: xmm0 &= xmm9
3525# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
3526# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
3527pand %xmm12,%xmm0
3528
3529# qhasm: xmm3 &= xmm13
3530# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
3531# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
3532pand %xmm15,%xmm3
3533
3534# qhasm: xmm0 ^= xmm3
3535# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
3536# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
3537pxor %xmm3,%xmm0
3538
3539# qhasm: xmm3 ^= xmm10
3540# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
3541# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
3542pxor %xmm10,%xmm3
3543
3544# qhasm: xmm6 ^= xmm12
3545# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
3546# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
3547pxor %xmm8,%xmm6
3548
3549# qhasm: xmm0 ^= xmm12
3550# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
3551# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
3552pxor %xmm8,%xmm0
3553
3554# qhasm: xmm5 ^= xmm8
3555# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
3556# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
3557pxor %xmm9,%xmm5
3558
3559# qhasm: xmm3 ^= xmm8
3560# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
3561# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
3562pxor %xmm9,%xmm3
3563
3564# qhasm: xmm12 = xmm7
3565# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
3566# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
3567movdqa %xmm7,%xmm8
3568
3569# qhasm: xmm8 = xmm1
3570# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
3571# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
3572movdqa %xmm1,%xmm9
3573
3574# qhasm: xmm12 ^= xmm4
3575# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
3576# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
3577pxor %xmm4,%xmm8
3578
3579# qhasm: xmm8 ^= xmm2
3580# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
3581# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
3582pxor %xmm2,%xmm9
3583
3584# qhasm: xmm11 = xmm15
3585# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3586# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3587movdqa %xmm13,%xmm10
3588
3589# qhasm: xmm11 ^= xmm14
3590# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3591# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3592pxor %xmm11,%xmm10
3593
3594# qhasm: xmm11 &= xmm12
3595# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
3596# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
3597pand %xmm8,%xmm10
3598
3599# qhasm: xmm12 ^= xmm8
3600# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
3601# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
3602pxor %xmm9,%xmm8
3603
3604# qhasm: xmm12 &= xmm14
3605# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
3606# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
3607pand %xmm11,%xmm8
3608
3609# qhasm: xmm8 &= xmm15
3610# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
3611# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
3612pand %xmm13,%xmm9
3613
3614# qhasm: xmm8 ^= xmm12
3615# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
3616# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
3617pxor %xmm8,%xmm9
3618
3619# qhasm: xmm12 ^= xmm11
3620# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
3621# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
3622pxor %xmm10,%xmm8
3623
3624# qhasm: xmm10 = xmm13
3625# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
3626# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
3627movdqa %xmm15,%xmm10
3628
3629# qhasm: xmm10 ^= xmm9
3630# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
3631# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
3632pxor %xmm12,%xmm10
3633
3634# qhasm: xmm10 &= xmm4
3635# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
3636# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
3637pand %xmm4,%xmm10
3638
3639# qhasm: xmm4 ^= xmm2
3640# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
3641# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
3642pxor %xmm2,%xmm4
3643
3644# qhasm: xmm4 &= xmm9
3645# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
3646# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
3647pand %xmm12,%xmm4
3648
3649# qhasm: xmm2 &= xmm13
3650# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
3651# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
3652pand %xmm15,%xmm2
3653
3654# qhasm: xmm4 ^= xmm2
3655# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
3656# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
3657pxor %xmm2,%xmm4
3658
3659# qhasm: xmm2 ^= xmm10
3660# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
3661# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
3662pxor %xmm10,%xmm2
3663
3664# qhasm: xmm15 ^= xmm13
3665# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
3666# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
3667pxor %xmm15,%xmm13
3668
3669# qhasm: xmm14 ^= xmm9
3670# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
3671# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
3672pxor %xmm12,%xmm11
3673
3674# qhasm: xmm11 = xmm15
3675# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
3676# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
3677movdqa %xmm13,%xmm10
3678
3679# qhasm: xmm11 ^= xmm14
3680# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
3681# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
3682pxor %xmm11,%xmm10
3683
3684# qhasm: xmm11 &= xmm7
3685# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
3686# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
3687pand %xmm7,%xmm10
3688
3689# qhasm: xmm7 ^= xmm1
3690# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
3691# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
3692pxor %xmm1,%xmm7
3693
3694# qhasm: xmm7 &= xmm14
3695# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
3696# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
3697pand %xmm11,%xmm7
3698
3699# qhasm: xmm1 &= xmm15
3700# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
3701# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
3702pand %xmm13,%xmm1
3703
3704# qhasm: xmm7 ^= xmm1
3705# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
3706# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
3707pxor %xmm1,%xmm7
3708
3709# qhasm: xmm1 ^= xmm11
3710# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
3711# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
3712pxor %xmm10,%xmm1
3713
3714# qhasm: xmm7 ^= xmm12
3715# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
3716# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
3717pxor %xmm8,%xmm7
3718
3719# qhasm: xmm4 ^= xmm12
3720# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
3721# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
3722pxor %xmm8,%xmm4
3723
3724# qhasm: xmm1 ^= xmm8
3725# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
3726# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
3727pxor %xmm9,%xmm1
3728
3729# qhasm: xmm2 ^= xmm8
3730# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
3731# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
3732pxor %xmm9,%xmm2
3733
3734# qhasm: xmm7 ^= xmm0
3735# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
3736# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
3737pxor %xmm0,%xmm7
3738
3739# qhasm: xmm1 ^= xmm6
3740# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
3741# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
3742pxor %xmm6,%xmm1
3743
3744# qhasm: xmm4 ^= xmm7
3745# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
3746# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
3747pxor %xmm7,%xmm4
3748
3749# qhasm: xmm6 ^= xmm0
3750# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
3751# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
3752pxor %xmm0,%xmm6
3753
3754# qhasm: xmm0 ^= xmm1
3755# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
3756# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
3757pxor %xmm1,%xmm0
3758
3759# qhasm: xmm1 ^= xmm5
3760# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
3761# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
3762pxor %xmm5,%xmm1
3763
3764# qhasm: xmm5 ^= xmm2
3765# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
3766# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
3767pxor %xmm2,%xmm5
3768
3769# qhasm: xmm4 ^= xmm5
3770# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
3771# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
3772pxor %xmm5,%xmm4
3773
3774# qhasm: xmm2 ^= xmm3
3775# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
3776# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
3777pxor %xmm3,%xmm2
3778
3779# qhasm: xmm3 ^= xmm5
3780# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
3781# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
3782pxor %xmm5,%xmm3
3783
3784# qhasm: xmm6 ^= xmm3
3785# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
3786# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
3787pxor %xmm3,%xmm6
3788
3789# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
3790# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
3791# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
3792pshufd $0x93,%xmm0,%xmm8
3793
3794# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
3795# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
3796# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
3797pshufd $0x93,%xmm1,%xmm9
3798
3799# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
3800# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
3801# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
3802pshufd $0x93,%xmm4,%xmm10
3803
3804# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
3805# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
3806# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
3807pshufd $0x93,%xmm6,%xmm11
3808
3809# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
3810# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
3811# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
3812pshufd $0x93,%xmm3,%xmm12
3813
3814# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
3815# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
3816# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
3817pshufd $0x93,%xmm7,%xmm13
3818
3819# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
3820# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
3821# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
3822pshufd $0x93,%xmm2,%xmm14
3823
3824# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
3825# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
3826# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
3827pshufd $0x93,%xmm5,%xmm15
3828
3829# qhasm: xmm0 ^= xmm8
3830# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
3831# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
3832pxor %xmm8,%xmm0
3833
3834# qhasm: xmm1 ^= xmm9
3835# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
3836# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
3837pxor %xmm9,%xmm1
3838
3839# qhasm: xmm4 ^= xmm10
3840# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
3841# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
3842pxor %xmm10,%xmm4
3843
3844# qhasm: xmm6 ^= xmm11
3845# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
3846# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
3847pxor %xmm11,%xmm6
3848
3849# qhasm: xmm3 ^= xmm12
3850# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
3851# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
3852pxor %xmm12,%xmm3
3853
3854# qhasm: xmm7 ^= xmm13
3855# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
3856# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
3857pxor %xmm13,%xmm7
3858
3859# qhasm: xmm2 ^= xmm14
3860# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
3861# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
3862pxor %xmm14,%xmm2
3863
3864# qhasm: xmm5 ^= xmm15
3865# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
3866# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
3867pxor %xmm15,%xmm5
3868
3869# qhasm: xmm8 ^= xmm5
3870# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
3871# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
3872pxor %xmm5,%xmm8
3873
3874# qhasm: xmm9 ^= xmm0
3875# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
3876# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
3877pxor %xmm0,%xmm9
3878
3879# qhasm: xmm10 ^= xmm1
3880# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
3881# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
3882pxor %xmm1,%xmm10
3883
3884# qhasm: xmm9 ^= xmm5
3885# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
3886# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
3887pxor %xmm5,%xmm9
3888
3889# qhasm: xmm11 ^= xmm4
3890# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
3891# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
3892pxor %xmm4,%xmm11
3893
3894# qhasm: xmm12 ^= xmm6
3895# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
3896# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
3897pxor %xmm6,%xmm12
3898
3899# qhasm: xmm13 ^= xmm3
3900# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
3901# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
3902pxor %xmm3,%xmm13
3903
3904# qhasm: xmm11 ^= xmm5
3905# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
3906# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
3907pxor %xmm5,%xmm11
3908
3909# qhasm: xmm14 ^= xmm7
3910# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
3911# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
3912pxor %xmm7,%xmm14
3913
3914# qhasm: xmm15 ^= xmm2
3915# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
3916# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
3917pxor %xmm2,%xmm15
3918
3919# qhasm: xmm12 ^= xmm5
3920# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
3921# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
3922pxor %xmm5,%xmm12
3923
3924# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
3925# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
3926# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
3927pshufd $0x4E,%xmm0,%xmm0
3928
3929# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
3930# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
3931# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
3932pshufd $0x4E,%xmm1,%xmm1
3933
3934# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
3935# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
3936# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
3937pshufd $0x4E,%xmm4,%xmm4
3938
3939# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
3940# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
3941# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
3942pshufd $0x4E,%xmm6,%xmm6
3943
3944# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
3945# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
3946# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
3947pshufd $0x4E,%xmm3,%xmm3
3948
3949# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
3950# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
3951# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
3952pshufd $0x4E,%xmm7,%xmm7
3953
3954# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
3955# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
3956# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
3957pshufd $0x4E,%xmm2,%xmm2
3958
3959# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
3960# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
3961# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
3962pshufd $0x4E,%xmm5,%xmm5
3963
3964# qhasm: xmm8 ^= xmm0
3965# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
3966# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
3967pxor %xmm0,%xmm8
3968
3969# qhasm: xmm9 ^= xmm1
3970# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
3971# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
3972pxor %xmm1,%xmm9
3973
3974# qhasm: xmm10 ^= xmm4
3975# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
3976# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
3977pxor %xmm4,%xmm10
3978
3979# qhasm: xmm11 ^= xmm6
3980# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
3981# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
3982pxor %xmm6,%xmm11
3983
3984# qhasm: xmm12 ^= xmm3
3985# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
3986# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
3987pxor %xmm3,%xmm12
3988
3989# qhasm: xmm13 ^= xmm7
3990# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
3991# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
3992pxor %xmm7,%xmm13
3993
3994# qhasm: xmm14 ^= xmm2
3995# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
3996# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
3997pxor %xmm2,%xmm14
3998
3999# qhasm: xmm15 ^= xmm5
4000# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
4001# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
4002pxor %xmm5,%xmm15
4003
4004# qhasm: xmm8 ^= *(int128 *)(c + 384)
4005# asm 1: pxor 384(<c=int64#5),<xmm8=int6464#9
4006# asm 2: pxor 384(<c=%r8),<xmm8=%xmm8
4007pxor 384(%r8),%xmm8
4008
4009# qhasm: shuffle bytes of xmm8 by SR
4010# asm 1: pshufb SR,<xmm8=int6464#9
4011# asm 2: pshufb SR,<xmm8=%xmm8
4012pshufb SR,%xmm8
4013
4014# qhasm: xmm9 ^= *(int128 *)(c + 400)
4015# asm 1: pxor 400(<c=int64#5),<xmm9=int6464#10
4016# asm 2: pxor 400(<c=%r8),<xmm9=%xmm9
4017pxor 400(%r8),%xmm9
4018
4019# qhasm: shuffle bytes of xmm9 by SR
4020# asm 1: pshufb SR,<xmm9=int6464#10
4021# asm 2: pshufb SR,<xmm9=%xmm9
4022pshufb SR,%xmm9
4023
4024# qhasm: xmm10 ^= *(int128 *)(c + 416)
4025# asm 1: pxor 416(<c=int64#5),<xmm10=int6464#11
4026# asm 2: pxor 416(<c=%r8),<xmm10=%xmm10
4027pxor 416(%r8),%xmm10
4028
4029# qhasm: shuffle bytes of xmm10 by SR
4030# asm 1: pshufb SR,<xmm10=int6464#11
4031# asm 2: pshufb SR,<xmm10=%xmm10
4032pshufb SR,%xmm10
4033
4034# qhasm: xmm11 ^= *(int128 *)(c + 432)
4035# asm 1: pxor 432(<c=int64#5),<xmm11=int6464#12
4036# asm 2: pxor 432(<c=%r8),<xmm11=%xmm11
4037pxor 432(%r8),%xmm11
4038
4039# qhasm: shuffle bytes of xmm11 by SR
4040# asm 1: pshufb SR,<xmm11=int6464#12
4041# asm 2: pshufb SR,<xmm11=%xmm11
4042pshufb SR,%xmm11
4043
4044# qhasm: xmm12 ^= *(int128 *)(c + 448)
4045# asm 1: pxor 448(<c=int64#5),<xmm12=int6464#13
4046# asm 2: pxor 448(<c=%r8),<xmm12=%xmm12
4047pxor 448(%r8),%xmm12
4048
4049# qhasm: shuffle bytes of xmm12 by SR
4050# asm 1: pshufb SR,<xmm12=int6464#13
4051# asm 2: pshufb SR,<xmm12=%xmm12
4052pshufb SR,%xmm12
4053
4054# qhasm: xmm13 ^= *(int128 *)(c + 464)
4055# asm 1: pxor 464(<c=int64#5),<xmm13=int6464#14
4056# asm 2: pxor 464(<c=%r8),<xmm13=%xmm13
4057pxor 464(%r8),%xmm13
4058
4059# qhasm: shuffle bytes of xmm13 by SR
4060# asm 1: pshufb SR,<xmm13=int6464#14
4061# asm 2: pshufb SR,<xmm13=%xmm13
4062pshufb SR,%xmm13
4063
4064# qhasm: xmm14 ^= *(int128 *)(c + 480)
4065# asm 1: pxor 480(<c=int64#5),<xmm14=int6464#15
4066# asm 2: pxor 480(<c=%r8),<xmm14=%xmm14
4067pxor 480(%r8),%xmm14
4068
4069# qhasm: shuffle bytes of xmm14 by SR
4070# asm 1: pshufb SR,<xmm14=int6464#15
4071# asm 2: pshufb SR,<xmm14=%xmm14
4072pshufb SR,%xmm14
4073
4074# qhasm: xmm15 ^= *(int128 *)(c + 496)
4075# asm 1: pxor 496(<c=int64#5),<xmm15=int6464#16
4076# asm 2: pxor 496(<c=%r8),<xmm15=%xmm15
4077pxor 496(%r8),%xmm15
4078
4079# qhasm: shuffle bytes of xmm15 by SR
4080# asm 1: pshufb SR,<xmm15=int6464#16
4081# asm 2: pshufb SR,<xmm15=%xmm15
4082pshufb SR,%xmm15
4083
4084# qhasm: xmm13 ^= xmm14
4085# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
4086# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
4087pxor %xmm14,%xmm13
4088
4089# qhasm: xmm10 ^= xmm9
4090# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
4091# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
4092pxor %xmm9,%xmm10
4093
4094# qhasm: xmm13 ^= xmm8
4095# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
4096# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
4097pxor %xmm8,%xmm13
4098
4099# qhasm: xmm14 ^= xmm10
4100# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
4101# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
4102pxor %xmm10,%xmm14
4103
4104# qhasm: xmm11 ^= xmm8
4105# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
4106# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
4107pxor %xmm8,%xmm11
4108
4109# qhasm: xmm14 ^= xmm11
4110# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
4111# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
4112pxor %xmm11,%xmm14
4113
4114# qhasm: xmm11 ^= xmm15
4115# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
4116# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
4117pxor %xmm15,%xmm11
4118
4119# qhasm: xmm11 ^= xmm12
4120# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
4121# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
4122pxor %xmm12,%xmm11
4123
4124# qhasm: xmm15 ^= xmm13
4125# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
4126# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
4127pxor %xmm13,%xmm15
4128
4129# qhasm: xmm11 ^= xmm9
4130# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
4131# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
4132pxor %xmm9,%xmm11
4133
4134# qhasm: xmm12 ^= xmm13
4135# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
4136# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
4137pxor %xmm13,%xmm12
4138
4139# qhasm: xmm10 ^= xmm15
4140# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
4141# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
4142pxor %xmm15,%xmm10
4143
4144# qhasm: xmm9 ^= xmm13
4145# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
4146# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
4147pxor %xmm13,%xmm9
4148
4149# qhasm: xmm3 = xmm15
4150# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
4151# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
4152movdqa %xmm15,%xmm0
4153
4154# qhasm: xmm2 = xmm9
4155# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
4156# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
4157movdqa %xmm9,%xmm1
4158
4159# qhasm: xmm1 = xmm13
4160# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
4161# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
4162movdqa %xmm13,%xmm2
4163
4164# qhasm: xmm5 = xmm10
4165# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
4166# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
4167movdqa %xmm10,%xmm3
4168
4169# qhasm: xmm4 = xmm14
4170# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
4171# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
4172movdqa %xmm14,%xmm4
4173
4174# qhasm: xmm3 ^= xmm12
4175# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
4176# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
4177pxor %xmm12,%xmm0
4178
4179# qhasm: xmm2 ^= xmm10
4180# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
4181# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
4182pxor %xmm10,%xmm1
4183
4184# qhasm: xmm1 ^= xmm11
4185# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
4186# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
4187pxor %xmm11,%xmm2
4188
4189# qhasm: xmm5 ^= xmm12
4190# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
4191# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
4192pxor %xmm12,%xmm3
4193
4194# qhasm: xmm4 ^= xmm8
4195# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
4196# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
4197pxor %xmm8,%xmm4
4198
4199# qhasm: xmm6 = xmm3
4200# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
4201# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
4202movdqa %xmm0,%xmm5
4203
4204# qhasm: xmm0 = xmm2
4205# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
4206# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
4207movdqa %xmm1,%xmm6
4208
4209# qhasm: xmm7 = xmm3
4210# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
4211# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
4212movdqa %xmm0,%xmm7
4213
4214# qhasm: xmm2 |= xmm1
4215# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
4216# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
4217por %xmm2,%xmm1
4218
4219# qhasm: xmm3 |= xmm4
4220# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
4221# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
4222por %xmm4,%xmm0
4223
4224# qhasm: xmm7 ^= xmm0
4225# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
4226# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
4227pxor %xmm6,%xmm7
4228
4229# qhasm: xmm6 &= xmm4
4230# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
4231# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
4232pand %xmm4,%xmm5
4233
4234# qhasm: xmm0 &= xmm1
4235# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
4236# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
4237pand %xmm2,%xmm6
4238
4239# qhasm: xmm4 ^= xmm1
4240# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
4241# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
4242pxor %xmm2,%xmm4
4243
4244# qhasm: xmm7 &= xmm4
4245# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
4246# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
4247pand %xmm4,%xmm7
4248
4249# qhasm: xmm4 = xmm11
4250# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
4251# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
4252movdqa %xmm11,%xmm2
4253
4254# qhasm: xmm4 ^= xmm8
4255# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
4256# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
4257pxor %xmm8,%xmm2
4258
4259# qhasm: xmm5 &= xmm4
4260# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
4261# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
4262pand %xmm2,%xmm3
4263
4264# qhasm: xmm3 ^= xmm5
4265# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
4266# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
4267pxor %xmm3,%xmm0
4268
4269# qhasm: xmm2 ^= xmm5
4270# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
4271# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
4272pxor %xmm3,%xmm1
4273
4274# qhasm: xmm5 = xmm15
4275# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
4276# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
4277movdqa %xmm15,%xmm2
4278
4279# qhasm: xmm5 ^= xmm9
4280# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
4281# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
4282pxor %xmm9,%xmm2
4283
4284# qhasm: xmm4 = xmm13
4285# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
4286# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
4287movdqa %xmm13,%xmm3
4288
4289# qhasm: xmm1 = xmm5
4290# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
4291# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
4292movdqa %xmm2,%xmm4
4293
4294# qhasm: xmm4 ^= xmm14
4295# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
4296# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
4297pxor %xmm14,%xmm3
4298
4299# qhasm: xmm1 |= xmm4
4300# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
4301# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
4302por %xmm3,%xmm4
4303
4304# qhasm: xmm5 &= xmm4
4305# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
4306# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
4307pand %xmm3,%xmm2
4308
4309# qhasm: xmm0 ^= xmm5
4310# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
4311# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
4312pxor %xmm2,%xmm6
4313
4314# qhasm: xmm3 ^= xmm7
4315# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
4316# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
4317pxor %xmm7,%xmm0
4318
4319# qhasm: xmm2 ^= xmm6
4320# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
4321# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
4322pxor %xmm5,%xmm1
4323
4324# qhasm: xmm1 ^= xmm7
4325# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
4326# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
4327pxor %xmm7,%xmm4
4328
4329# qhasm: xmm0 ^= xmm6
4330# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
4331# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
4332pxor %xmm5,%xmm6
4333
4334# qhasm: xmm1 ^= xmm6
4335# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
4336# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
4337pxor %xmm5,%xmm4
4338
4339# qhasm: xmm4 = xmm10
4340# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
4341# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
4342movdqa %xmm10,%xmm2
4343
4344# qhasm: xmm5 = xmm12
4345# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
4346# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
4347movdqa %xmm12,%xmm3
4348
4349# qhasm: xmm6 = xmm9
4350# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
4351# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
4352movdqa %xmm9,%xmm5
4353
4354# qhasm: xmm7 = xmm15
4355# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
4356# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
4357movdqa %xmm15,%xmm7
4358
4359# qhasm: xmm4 &= xmm11
4360# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
4361# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
4362pand %xmm11,%xmm2
4363
4364# qhasm: xmm5 &= xmm8
4365# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
4366# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
4367pand %xmm8,%xmm3
4368
4369# qhasm: xmm6 &= xmm13
4370# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
4371# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
4372pand %xmm13,%xmm5
4373
4374# qhasm: xmm7 |= xmm14
4375# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
4376# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
4377por %xmm14,%xmm7
4378
4379# qhasm: xmm3 ^= xmm4
4380# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
4381# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
4382pxor %xmm2,%xmm0
4383
4384# qhasm: xmm2 ^= xmm5
4385# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
4386# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
4387pxor %xmm3,%xmm1
4388
4389# qhasm: xmm1 ^= xmm6
4390# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
4391# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
4392pxor %xmm5,%xmm4
4393
4394# qhasm: xmm0 ^= xmm7
4395# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
4396# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
4397pxor %xmm7,%xmm6
4398
4399# qhasm: xmm4 = xmm3
4400# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
4401# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
4402movdqa %xmm0,%xmm2
4403
4404# qhasm: xmm4 ^= xmm2
4405# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
4406# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
4407pxor %xmm1,%xmm2
4408
4409# qhasm: xmm3 &= xmm1
4410# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
4411# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
4412pand %xmm4,%xmm0
4413
4414# qhasm: xmm6 = xmm0
4415# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
4416# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
4417movdqa %xmm6,%xmm3
4418
4419# qhasm: xmm6 ^= xmm3
4420# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
4421# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
4422pxor %xmm0,%xmm3
4423
4424# qhasm: xmm7 = xmm4
4425# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
4426# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
4427movdqa %xmm2,%xmm5
4428
4429# qhasm: xmm7 &= xmm6
4430# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
4431# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
4432pand %xmm3,%xmm5
4433
4434# qhasm: xmm7 ^= xmm2
4435# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
4436# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
4437pxor %xmm1,%xmm5
4438
4439# qhasm: xmm5 = xmm1
4440# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
4441# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
4442movdqa %xmm4,%xmm7
4443
4444# qhasm: xmm5 ^= xmm0
4445# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
4446# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
4447pxor %xmm6,%xmm7
4448
4449# qhasm: xmm3 ^= xmm2
4450# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
4451# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
4452pxor %xmm1,%xmm0
4453
4454# qhasm: xmm5 &= xmm3
4455# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
4456# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
4457pand %xmm0,%xmm7
4458
4459# qhasm: xmm5 ^= xmm0
4460# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
4461# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
4462pxor %xmm6,%xmm7
4463
4464# qhasm: xmm1 ^= xmm5
4465# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
4466# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
4467pxor %xmm7,%xmm4
4468
4469# qhasm: xmm2 = xmm6
4470# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
4471# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
4472movdqa %xmm3,%xmm0
4473
4474# qhasm: xmm2 ^= xmm5
4475# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
4476# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
4477pxor %xmm7,%xmm0
4478
4479# qhasm: xmm2 &= xmm0
4480# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
4481# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
4482pand %xmm6,%xmm0
4483
4484# qhasm: xmm1 ^= xmm2
4485# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
4486# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
4487pxor %xmm0,%xmm4
4488
4489# qhasm: xmm6 ^= xmm2
4490# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
4491# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
4492pxor %xmm0,%xmm3
4493
4494# qhasm: xmm6 &= xmm7
4495# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
4496# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
4497pand %xmm5,%xmm3
4498
4499# qhasm: xmm6 ^= xmm4
4500# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
4501# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
4502pxor %xmm2,%xmm3
4503
4504# qhasm: xmm4 = xmm14
4505# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
4506# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
4507movdqa %xmm14,%xmm0
4508
4509# qhasm: xmm0 = xmm13
4510# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
4511# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
4512movdqa %xmm13,%xmm1
4513
4514# qhasm: xmm2 = xmm7
4515# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
4516# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
4517movdqa %xmm5,%xmm2
4518
4519# qhasm: xmm2 ^= xmm6
4520# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
4521# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
4522pxor %xmm3,%xmm2
4523
4524# qhasm: xmm2 &= xmm14
4525# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
4526# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
4527pand %xmm14,%xmm2
4528
4529# qhasm: xmm14 ^= xmm13
4530# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
4531# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
4532pxor %xmm13,%xmm14
4533
4534# qhasm: xmm14 &= xmm6
4535# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
4536# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
4537pand %xmm3,%xmm14
4538
4539# qhasm: xmm13 &= xmm7
4540# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
4541# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
4542pand %xmm5,%xmm13
4543
4544# qhasm: xmm14 ^= xmm13
4545# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
4546# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
4547pxor %xmm13,%xmm14
4548
4549# qhasm: xmm13 ^= xmm2
4550# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
4551# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
4552pxor %xmm2,%xmm13
4553
4554# qhasm: xmm4 ^= xmm8
4555# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
4556# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
4557pxor %xmm8,%xmm0
4558
4559# qhasm: xmm0 ^= xmm11
4560# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
4561# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
4562pxor %xmm11,%xmm1
4563
4564# qhasm: xmm7 ^= xmm5
4565# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
4566# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
4567pxor %xmm7,%xmm5
4568
4569# qhasm: xmm6 ^= xmm1
4570# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
4571# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
4572pxor %xmm4,%xmm3
4573
4574# qhasm: xmm3 = xmm7
4575# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
4576# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
4577movdqa %xmm5,%xmm2
4578
4579# qhasm: xmm3 ^= xmm6
4580# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
4581# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
4582pxor %xmm3,%xmm2
4583
4584# qhasm: xmm3 &= xmm4
4585# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
4586# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
4587pand %xmm0,%xmm2
4588
4589# qhasm: xmm4 ^= xmm0
4590# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
4591# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
4592pxor %xmm1,%xmm0
4593
4594# qhasm: xmm4 &= xmm6
4595# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
4596# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
4597pand %xmm3,%xmm0
4598
4599# qhasm: xmm0 &= xmm7
4600# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
4601# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
4602pand %xmm5,%xmm1
4603
4604# qhasm: xmm0 ^= xmm4
4605# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
4606# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
4607pxor %xmm0,%xmm1
4608
4609# qhasm: xmm4 ^= xmm3
4610# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
4611# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
4612pxor %xmm2,%xmm0
4613
4614# qhasm: xmm2 = xmm5
4615# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
4616# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
4617movdqa %xmm7,%xmm2
4618
4619# qhasm: xmm2 ^= xmm1
4620# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
4621# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
4622pxor %xmm4,%xmm2
4623
4624# qhasm: xmm2 &= xmm8
4625# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
4626# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
4627pand %xmm8,%xmm2
4628
4629# qhasm: xmm8 ^= xmm11
4630# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
4631# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
4632pxor %xmm11,%xmm8
4633
4634# qhasm: xmm8 &= xmm1
4635# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
4636# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
4637pand %xmm4,%xmm8
4638
4639# qhasm: xmm11 &= xmm5
4640# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
4641# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
4642pand %xmm7,%xmm11
4643
4644# qhasm: xmm8 ^= xmm11
4645# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
4646# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
4647pxor %xmm11,%xmm8
4648
4649# qhasm: xmm11 ^= xmm2
4650# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
4651# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
4652pxor %xmm2,%xmm11
4653
4654# qhasm: xmm14 ^= xmm4
4655# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
4656# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
4657pxor %xmm0,%xmm14
4658
4659# qhasm: xmm8 ^= xmm4
4660# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
4661# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
4662pxor %xmm0,%xmm8
4663
4664# qhasm: xmm13 ^= xmm0
4665# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
4666# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
4667pxor %xmm1,%xmm13
4668
4669# qhasm: xmm11 ^= xmm0
4670# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
4671# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
4672pxor %xmm1,%xmm11
4673
4674# qhasm: xmm4 = xmm15
4675# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
4676# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
4677movdqa %xmm15,%xmm0
4678
4679# qhasm: xmm0 = xmm9
4680# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
4681# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
4682movdqa %xmm9,%xmm1
4683
4684# qhasm: xmm4 ^= xmm12
4685# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
4686# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
4687pxor %xmm12,%xmm0
4688
4689# qhasm: xmm0 ^= xmm10
4690# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
4691# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
4692pxor %xmm10,%xmm1
4693
4694# qhasm: xmm3 = xmm7
4695# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
4696# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
4697movdqa %xmm5,%xmm2
4698
4699# qhasm: xmm3 ^= xmm6
4700# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
4701# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
4702pxor %xmm3,%xmm2
4703
4704# qhasm: xmm3 &= xmm4
4705# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
4706# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
4707pand %xmm0,%xmm2
4708
4709# qhasm: xmm4 ^= xmm0
4710# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
4711# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
4712pxor %xmm1,%xmm0
4713
4714# qhasm: xmm4 &= xmm6
4715# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
4716# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
4717pand %xmm3,%xmm0
4718
4719# qhasm: xmm0 &= xmm7
4720# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
4721# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
4722pand %xmm5,%xmm1
4723
4724# qhasm: xmm0 ^= xmm4
4725# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
4726# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
4727pxor %xmm0,%xmm1
4728
4729# qhasm: xmm4 ^= xmm3
4730# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
4731# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
4732pxor %xmm2,%xmm0
4733
4734# qhasm: xmm2 = xmm5
4735# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
4736# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
4737movdqa %xmm7,%xmm2
4738
4739# qhasm: xmm2 ^= xmm1
4740# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
4741# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
4742pxor %xmm4,%xmm2
4743
4744# qhasm: xmm2 &= xmm12
4745# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
4746# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
4747pand %xmm12,%xmm2
4748
4749# qhasm: xmm12 ^= xmm10
4750# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
4751# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
4752pxor %xmm10,%xmm12
4753
4754# qhasm: xmm12 &= xmm1
4755# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
4756# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
4757pand %xmm4,%xmm12
4758
4759# qhasm: xmm10 &= xmm5
4760# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
4761# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
4762pand %xmm7,%xmm10
4763
4764# qhasm: xmm12 ^= xmm10
4765# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
4766# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
4767pxor %xmm10,%xmm12
4768
4769# qhasm: xmm10 ^= xmm2
4770# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
4771# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
4772pxor %xmm2,%xmm10
4773
4774# qhasm: xmm7 ^= xmm5
4775# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
4776# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
4777pxor %xmm7,%xmm5
4778
4779# qhasm: xmm6 ^= xmm1
4780# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
4781# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
4782pxor %xmm4,%xmm3
4783
4784# qhasm: xmm3 = xmm7
4785# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
4786# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
4787movdqa %xmm5,%xmm2
4788
4789# qhasm: xmm3 ^= xmm6
4790# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
4791# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
4792pxor %xmm3,%xmm2
4793
4794# qhasm: xmm3 &= xmm15
4795# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
4796# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
4797pand %xmm15,%xmm2
4798
4799# qhasm: xmm15 ^= xmm9
4800# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
4801# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
4802pxor %xmm9,%xmm15
4803
4804# qhasm: xmm15 &= xmm6
4805# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
4806# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
4807pand %xmm3,%xmm15
4808
4809# qhasm: xmm9 &= xmm7
4810# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
4811# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
4812pand %xmm5,%xmm9
4813
4814# qhasm: xmm15 ^= xmm9
4815# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
4816# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
4817pxor %xmm9,%xmm15
4818
4819# qhasm: xmm9 ^= xmm3
4820# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
4821# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
4822pxor %xmm2,%xmm9
4823
4824# qhasm: xmm15 ^= xmm4
4825# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
4826# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
4827pxor %xmm0,%xmm15
4828
4829# qhasm: xmm12 ^= xmm4
4830# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
4831# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
4832pxor %xmm0,%xmm12
4833
4834# qhasm: xmm9 ^= xmm0
4835# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
4836# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
4837pxor %xmm1,%xmm9
4838
4839# qhasm: xmm10 ^= xmm0
4840# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
4841# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
4842pxor %xmm1,%xmm10
4843
4844# qhasm: xmm15 ^= xmm8
4845# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
4846# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
4847pxor %xmm8,%xmm15
4848
4849# qhasm: xmm9 ^= xmm14
4850# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
4851# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
4852pxor %xmm14,%xmm9
4853
4854# qhasm: xmm12 ^= xmm15
4855# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
4856# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
4857pxor %xmm15,%xmm12
4858
4859# qhasm: xmm14 ^= xmm8
4860# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
4861# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
4862pxor %xmm8,%xmm14
4863
4864# qhasm: xmm8 ^= xmm9
4865# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
4866# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
4867pxor %xmm9,%xmm8
4868
4869# qhasm: xmm9 ^= xmm13
4870# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
4871# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
4872pxor %xmm13,%xmm9
4873
4874# qhasm: xmm13 ^= xmm10
4875# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
4876# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
4877pxor %xmm10,%xmm13
4878
4879# qhasm: xmm12 ^= xmm13
4880# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
4881# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
4882pxor %xmm13,%xmm12
4883
4884# qhasm: xmm10 ^= xmm11
4885# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
4886# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
4887pxor %xmm11,%xmm10
4888
4889# qhasm: xmm11 ^= xmm13
4890# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
4891# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
4892pxor %xmm13,%xmm11
4893
4894# qhasm: xmm14 ^= xmm11
4895# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
4896# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
4897pxor %xmm11,%xmm14
4898
4899# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
4900# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
4901# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
4902pshufd $0x93,%xmm8,%xmm0
4903
4904# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
4905# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
4906# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
4907pshufd $0x93,%xmm9,%xmm1
4908
4909# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
4910# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
4911# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
4912pshufd $0x93,%xmm12,%xmm2
4913
4914# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
4915# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
4916# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
4917pshufd $0x93,%xmm14,%xmm3
4918
4919# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
4920# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
4921# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
4922pshufd $0x93,%xmm11,%xmm4
4923
4924# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
4925# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
4926# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
4927pshufd $0x93,%xmm15,%xmm5
4928
4929# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
4930# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
4931# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
4932pshufd $0x93,%xmm10,%xmm6
4933
4934# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
4935# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
4936# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
4937pshufd $0x93,%xmm13,%xmm7
4938
4939# qhasm: xmm8 ^= xmm0
4940# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
4941# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
4942pxor %xmm0,%xmm8
4943
4944# qhasm: xmm9 ^= xmm1
4945# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
4946# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
4947pxor %xmm1,%xmm9
4948
4949# qhasm: xmm12 ^= xmm2
4950# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
4951# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
4952pxor %xmm2,%xmm12
4953
4954# qhasm: xmm14 ^= xmm3
4955# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
4956# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
4957pxor %xmm3,%xmm14
4958
4959# qhasm: xmm11 ^= xmm4
4960# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
4961# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
4962pxor %xmm4,%xmm11
4963
4964# qhasm: xmm15 ^= xmm5
4965# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
4966# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
4967pxor %xmm5,%xmm15
4968
4969# qhasm: xmm10 ^= xmm6
4970# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
4971# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
4972pxor %xmm6,%xmm10
4973
4974# qhasm: xmm13 ^= xmm7
4975# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
4976# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
4977pxor %xmm7,%xmm13
4978
4979# qhasm: xmm0 ^= xmm13
4980# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
4981# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
4982pxor %xmm13,%xmm0
4983
4984# qhasm: xmm1 ^= xmm8
4985# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
4986# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
4987pxor %xmm8,%xmm1
4988
4989# qhasm: xmm2 ^= xmm9
4990# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
4991# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
4992pxor %xmm9,%xmm2
4993
4994# qhasm: xmm1 ^= xmm13
4995# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
4996# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
4997pxor %xmm13,%xmm1
4998
4999# qhasm: xmm3 ^= xmm12
5000# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
5001# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
5002pxor %xmm12,%xmm3
5003
5004# qhasm: xmm4 ^= xmm14
5005# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
5006# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
5007pxor %xmm14,%xmm4
5008
5009# qhasm: xmm5 ^= xmm11
5010# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
5011# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
5012pxor %xmm11,%xmm5
5013
5014# qhasm: xmm3 ^= xmm13
5015# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
5016# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
5017pxor %xmm13,%xmm3
5018
5019# qhasm: xmm6 ^= xmm15
5020# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
5021# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
5022pxor %xmm15,%xmm6
5023
5024# qhasm: xmm7 ^= xmm10
5025# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
5026# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
5027pxor %xmm10,%xmm7
5028
5029# qhasm: xmm4 ^= xmm13
5030# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
5031# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
5032pxor %xmm13,%xmm4
5033
5034# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
5035# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
5036# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
5037pshufd $0x4E,%xmm8,%xmm8
5038
5039# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
5040# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
5041# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
5042pshufd $0x4E,%xmm9,%xmm9
5043
5044# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
5045# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
5046# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
5047pshufd $0x4E,%xmm12,%xmm12
5048
5049# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
5050# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
5051# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
5052pshufd $0x4E,%xmm14,%xmm14
5053
5054# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
5055# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
5056# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
5057pshufd $0x4E,%xmm11,%xmm11
5058
5059# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
5060# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
5061# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
5062pshufd $0x4E,%xmm15,%xmm15
5063
5064# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
5065# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
5066# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
5067pshufd $0x4E,%xmm10,%xmm10
5068
5069# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
5070# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
5071# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
5072pshufd $0x4E,%xmm13,%xmm13
5073
5074# qhasm: xmm0 ^= xmm8
5075# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
5076# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
5077pxor %xmm8,%xmm0
5078
5079# qhasm: xmm1 ^= xmm9
5080# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
5081# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
5082pxor %xmm9,%xmm1
5083
5084# qhasm: xmm2 ^= xmm12
5085# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
5086# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
5087pxor %xmm12,%xmm2
5088
5089# qhasm: xmm3 ^= xmm14
5090# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
5091# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
5092pxor %xmm14,%xmm3
5093
5094# qhasm: xmm4 ^= xmm11
5095# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
5096# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
5097pxor %xmm11,%xmm4
5098
5099# qhasm: xmm5 ^= xmm15
5100# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
5101# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
5102pxor %xmm15,%xmm5
5103
5104# qhasm: xmm6 ^= xmm10
5105# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
5106# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
5107pxor %xmm10,%xmm6
5108
5109# qhasm: xmm7 ^= xmm13
5110# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
5111# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
5112pxor %xmm13,%xmm7
5113
5114# qhasm: xmm0 ^= *(int128 *)(c + 512)
5115# asm 1: pxor 512(<c=int64#5),<xmm0=int6464#1
5116# asm 2: pxor 512(<c=%r8),<xmm0=%xmm0
5117pxor 512(%r8),%xmm0
5118
5119# qhasm: shuffle bytes of xmm0 by SR
5120# asm 1: pshufb SR,<xmm0=int6464#1
5121# asm 2: pshufb SR,<xmm0=%xmm0
5122pshufb SR,%xmm0
5123
5124# qhasm: xmm1 ^= *(int128 *)(c + 528)
5125# asm 1: pxor 528(<c=int64#5),<xmm1=int6464#2
5126# asm 2: pxor 528(<c=%r8),<xmm1=%xmm1
5127pxor 528(%r8),%xmm1
5128
5129# qhasm: shuffle bytes of xmm1 by SR
5130# asm 1: pshufb SR,<xmm1=int6464#2
5131# asm 2: pshufb SR,<xmm1=%xmm1
5132pshufb SR,%xmm1
5133
5134# qhasm: xmm2 ^= *(int128 *)(c + 544)
5135# asm 1: pxor 544(<c=int64#5),<xmm2=int6464#3
5136# asm 2: pxor 544(<c=%r8),<xmm2=%xmm2
5137pxor 544(%r8),%xmm2
5138
5139# qhasm: shuffle bytes of xmm2 by SR
5140# asm 1: pshufb SR,<xmm2=int6464#3
5141# asm 2: pshufb SR,<xmm2=%xmm2
5142pshufb SR,%xmm2
5143
5144# qhasm: xmm3 ^= *(int128 *)(c + 560)
5145# asm 1: pxor 560(<c=int64#5),<xmm3=int6464#4
5146# asm 2: pxor 560(<c=%r8),<xmm3=%xmm3
5147pxor 560(%r8),%xmm3
5148
5149# qhasm: shuffle bytes of xmm3 by SR
5150# asm 1: pshufb SR,<xmm3=int6464#4
5151# asm 2: pshufb SR,<xmm3=%xmm3
5152pshufb SR,%xmm3
5153
5154# qhasm: xmm4 ^= *(int128 *)(c + 576)
5155# asm 1: pxor 576(<c=int64#5),<xmm4=int6464#5
5156# asm 2: pxor 576(<c=%r8),<xmm4=%xmm4
5157pxor 576(%r8),%xmm4
5158
5159# qhasm: shuffle bytes of xmm4 by SR
5160# asm 1: pshufb SR,<xmm4=int6464#5
5161# asm 2: pshufb SR,<xmm4=%xmm4
5162pshufb SR,%xmm4
5163
5164# qhasm: xmm5 ^= *(int128 *)(c + 592)
5165# asm 1: pxor 592(<c=int64#5),<xmm5=int6464#6
5166# asm 2: pxor 592(<c=%r8),<xmm5=%xmm5
5167pxor 592(%r8),%xmm5
5168
5169# qhasm: shuffle bytes of xmm5 by SR
5170# asm 1: pshufb SR,<xmm5=int6464#6
5171# asm 2: pshufb SR,<xmm5=%xmm5
5172pshufb SR,%xmm5
5173
5174# qhasm: xmm6 ^= *(int128 *)(c + 608)
5175# asm 1: pxor 608(<c=int64#5),<xmm6=int6464#7
5176# asm 2: pxor 608(<c=%r8),<xmm6=%xmm6
5177pxor 608(%r8),%xmm6
5178
5179# qhasm: shuffle bytes of xmm6 by SR
5180# asm 1: pshufb SR,<xmm6=int6464#7
5181# asm 2: pshufb SR,<xmm6=%xmm6
5182pshufb SR,%xmm6
5183
5184# qhasm: xmm7 ^= *(int128 *)(c + 624)
5185# asm 1: pxor 624(<c=int64#5),<xmm7=int6464#8
5186# asm 2: pxor 624(<c=%r8),<xmm7=%xmm7
5187pxor 624(%r8),%xmm7
5188
5189# qhasm: shuffle bytes of xmm7 by SR
5190# asm 1: pshufb SR,<xmm7=int6464#8
5191# asm 2: pshufb SR,<xmm7=%xmm7
5192pshufb SR,%xmm7
5193
5194# qhasm: xmm5 ^= xmm6
5195# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
5196# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
5197pxor %xmm6,%xmm5
5198
5199# qhasm: xmm2 ^= xmm1
5200# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
5201# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
5202pxor %xmm1,%xmm2
5203
5204# qhasm: xmm5 ^= xmm0
5205# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
5206# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
5207pxor %xmm0,%xmm5
5208
5209# qhasm: xmm6 ^= xmm2
5210# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
5211# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
5212pxor %xmm2,%xmm6
5213
5214# qhasm: xmm3 ^= xmm0
5215# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
5216# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
5217pxor %xmm0,%xmm3
5218
5219# qhasm: xmm6 ^= xmm3
5220# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
5221# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
5222pxor %xmm3,%xmm6
5223
5224# qhasm: xmm3 ^= xmm7
5225# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
5226# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
5227pxor %xmm7,%xmm3
5228
5229# qhasm: xmm3 ^= xmm4
5230# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
5231# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
5232pxor %xmm4,%xmm3
5233
5234# qhasm: xmm7 ^= xmm5
5235# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
5236# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
5237pxor %xmm5,%xmm7
5238
5239# qhasm: xmm3 ^= xmm1
5240# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
5241# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
5242pxor %xmm1,%xmm3
5243
5244# qhasm: xmm4 ^= xmm5
5245# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
5246# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
5247pxor %xmm5,%xmm4
5248
5249# qhasm: xmm2 ^= xmm7
5250# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
5251# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
5252pxor %xmm7,%xmm2
5253
5254# qhasm: xmm1 ^= xmm5
5255# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
5256# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
5257pxor %xmm5,%xmm1
5258
5259# qhasm: xmm11 = xmm7
5260# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
5261# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
5262movdqa %xmm7,%xmm8
5263
5264# qhasm: xmm10 = xmm1
5265# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
5266# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
5267movdqa %xmm1,%xmm9
5268
5269# qhasm: xmm9 = xmm5
5270# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
5271# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
5272movdqa %xmm5,%xmm10
5273
5274# qhasm: xmm13 = xmm2
5275# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
5276# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
5277movdqa %xmm2,%xmm11
5278
5279# qhasm: xmm12 = xmm6
5280# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
5281# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
5282movdqa %xmm6,%xmm12
5283
5284# qhasm: xmm11 ^= xmm4
5285# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
5286# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
5287pxor %xmm4,%xmm8
5288
5289# qhasm: xmm10 ^= xmm2
5290# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
5291# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
5292pxor %xmm2,%xmm9
5293
5294# qhasm: xmm9 ^= xmm3
5295# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
5296# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
5297pxor %xmm3,%xmm10
5298
5299# qhasm: xmm13 ^= xmm4
5300# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
5301# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
5302pxor %xmm4,%xmm11
5303
5304# qhasm: xmm12 ^= xmm0
5305# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
5306# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
5307pxor %xmm0,%xmm12
5308
5309# qhasm: xmm14 = xmm11
5310# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
5311# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
5312movdqa %xmm8,%xmm13
5313
5314# qhasm: xmm8 = xmm10
5315# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
5316# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
5317movdqa %xmm9,%xmm14
5318
5319# qhasm: xmm15 = xmm11
5320# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
5321# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
5322movdqa %xmm8,%xmm15
5323
5324# qhasm: xmm10 |= xmm9
5325# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
5326# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
5327por %xmm10,%xmm9
5328
5329# qhasm: xmm11 |= xmm12
5330# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
5331# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
5332por %xmm12,%xmm8
5333
5334# qhasm: xmm15 ^= xmm8
5335# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
5336# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
5337pxor %xmm14,%xmm15
5338
5339# qhasm: xmm14 &= xmm12
5340# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
5341# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
5342pand %xmm12,%xmm13
5343
5344# qhasm: xmm8 &= xmm9
5345# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
5346# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
5347pand %xmm10,%xmm14
5348
5349# qhasm: xmm12 ^= xmm9
5350# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
5351# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
5352pxor %xmm10,%xmm12
5353
5354# qhasm: xmm15 &= xmm12
5355# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
5356# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
5357pand %xmm12,%xmm15
5358
5359# qhasm: xmm12 = xmm3
5360# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
5361# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
5362movdqa %xmm3,%xmm10
5363
5364# qhasm: xmm12 ^= xmm0
5365# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
5366# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
5367pxor %xmm0,%xmm10
5368
5369# qhasm: xmm13 &= xmm12
5370# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
5371# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
5372pand %xmm10,%xmm11
5373
5374# qhasm: xmm11 ^= xmm13
5375# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
5376# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
5377pxor %xmm11,%xmm8
5378
5379# qhasm: xmm10 ^= xmm13
5380# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
5381# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
5382pxor %xmm11,%xmm9
5383
5384# qhasm: xmm13 = xmm7
5385# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
5386# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
5387movdqa %xmm7,%xmm10
5388
5389# qhasm: xmm13 ^= xmm1
5390# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
5391# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
5392pxor %xmm1,%xmm10
5393
5394# qhasm: xmm12 = xmm5
5395# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
5396# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
5397movdqa %xmm5,%xmm11
5398
5399# qhasm: xmm9 = xmm13
5400# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
5401# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
5402movdqa %xmm10,%xmm12
5403
5404# qhasm: xmm12 ^= xmm6
5405# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
5406# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
5407pxor %xmm6,%xmm11
5408
5409# qhasm: xmm9 |= xmm12
5410# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
5411# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
5412por %xmm11,%xmm12
5413
5414# qhasm: xmm13 &= xmm12
5415# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
5416# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
5417pand %xmm11,%xmm10
5418
5419# qhasm: xmm8 ^= xmm13
5420# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
5421# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
5422pxor %xmm10,%xmm14
5423
5424# qhasm: xmm11 ^= xmm15
5425# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
5426# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
5427pxor %xmm15,%xmm8
5428
5429# qhasm: xmm10 ^= xmm14
5430# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
5431# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
5432pxor %xmm13,%xmm9
5433
5434# qhasm: xmm9 ^= xmm15
5435# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
5436# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
5437pxor %xmm15,%xmm12
5438
5439# qhasm: xmm8 ^= xmm14
5440# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
5441# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
5442pxor %xmm13,%xmm14
5443
5444# qhasm: xmm9 ^= xmm14
5445# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
5446# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
5447pxor %xmm13,%xmm12
5448
5449# qhasm: xmm12 = xmm2
5450# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
5451# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
5452movdqa %xmm2,%xmm10
5453
5454# qhasm: xmm13 = xmm4
5455# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
5456# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
5457movdqa %xmm4,%xmm11
5458
5459# qhasm: xmm14 = xmm1
5460# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
5461# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
5462movdqa %xmm1,%xmm13
5463
5464# qhasm: xmm15 = xmm7
5465# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
5466# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
5467movdqa %xmm7,%xmm15
5468
5469# qhasm: xmm12 &= xmm3
5470# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
5471# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
5472pand %xmm3,%xmm10
5473
5474# qhasm: xmm13 &= xmm0
5475# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
5476# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
5477pand %xmm0,%xmm11
5478
5479# qhasm: xmm14 &= xmm5
5480# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
5481# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
5482pand %xmm5,%xmm13
5483
5484# qhasm: xmm15 |= xmm6
5485# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
5486# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
5487por %xmm6,%xmm15
5488
5489# qhasm: xmm11 ^= xmm12
5490# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
5491# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
5492pxor %xmm10,%xmm8
5493
5494# qhasm: xmm10 ^= xmm13
5495# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
5496# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
5497pxor %xmm11,%xmm9
5498
5499# qhasm: xmm9 ^= xmm14
5500# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
5501# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
5502pxor %xmm13,%xmm12
5503
5504# qhasm: xmm8 ^= xmm15
5505# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
5506# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
5507pxor %xmm15,%xmm14
5508
5509# qhasm: xmm12 = xmm11
5510# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
5511# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
5512movdqa %xmm8,%xmm10
5513
5514# qhasm: xmm12 ^= xmm10
5515# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
5516# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
5517pxor %xmm9,%xmm10
5518
5519# qhasm: xmm11 &= xmm9
5520# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
5521# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
5522pand %xmm12,%xmm8
5523
5524# qhasm: xmm14 = xmm8
5525# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
5526# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
5527movdqa %xmm14,%xmm11
5528
5529# qhasm: xmm14 ^= xmm11
5530# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
5531# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
5532pxor %xmm8,%xmm11
5533
5534# qhasm: xmm15 = xmm12
5535# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
5536# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
5537movdqa %xmm10,%xmm13
5538
5539# qhasm: xmm15 &= xmm14
5540# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
5541# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
5542pand %xmm11,%xmm13
5543
5544# qhasm: xmm15 ^= xmm10
5545# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
5546# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
5547pxor %xmm9,%xmm13
5548
5549# qhasm: xmm13 = xmm9
5550# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
5551# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
5552movdqa %xmm12,%xmm15
5553
5554# qhasm: xmm13 ^= xmm8
5555# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
5556# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
5557pxor %xmm14,%xmm15
5558
5559# qhasm: xmm11 ^= xmm10
5560# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
5561# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
5562pxor %xmm9,%xmm8
5563
5564# qhasm: xmm13 &= xmm11
5565# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
5566# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
5567pand %xmm8,%xmm15
5568
5569# qhasm: xmm13 ^= xmm8
5570# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
5571# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
5572pxor %xmm14,%xmm15
5573
5574# qhasm: xmm9 ^= xmm13
5575# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
5576# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
5577pxor %xmm15,%xmm12
5578
5579# qhasm: xmm10 = xmm14
5580# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
5581# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
5582movdqa %xmm11,%xmm8
5583
5584# qhasm: xmm10 ^= xmm13
5585# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
5586# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
5587pxor %xmm15,%xmm8
5588
5589# qhasm: xmm10 &= xmm8
5590# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
5591# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
5592pand %xmm14,%xmm8
5593
5594# qhasm: xmm9 ^= xmm10
5595# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
5596# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
5597pxor %xmm8,%xmm12
5598
5599# qhasm: xmm14 ^= xmm10
5600# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
5601# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
5602pxor %xmm8,%xmm11
5603
5604# qhasm: xmm14 &= xmm15
5605# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
5606# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
5607pand %xmm13,%xmm11
5608
5609# qhasm: xmm14 ^= xmm12
5610# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
5611# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
5612pxor %xmm10,%xmm11
5613
5614# qhasm: xmm12 = xmm6
5615# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
5616# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
5617movdqa %xmm6,%xmm8
5618
5619# qhasm: xmm8 = xmm5
5620# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
5621# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
5622movdqa %xmm5,%xmm9
5623
5624# qhasm: xmm10 = xmm15
5625# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
5626# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
5627movdqa %xmm13,%xmm10
5628
5629# qhasm: xmm10 ^= xmm14
5630# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
5631# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
5632pxor %xmm11,%xmm10
5633
5634# qhasm: xmm10 &= xmm6
5635# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
5636# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
5637pand %xmm6,%xmm10
5638
5639# qhasm: xmm6 ^= xmm5
5640# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
5641# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
5642pxor %xmm5,%xmm6
5643
5644# qhasm: xmm6 &= xmm14
5645# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
5646# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
5647pand %xmm11,%xmm6
5648
5649# qhasm: xmm5 &= xmm15
5650# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
5651# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
5652pand %xmm13,%xmm5
5653
5654# qhasm: xmm6 ^= xmm5
5655# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
5656# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
5657pxor %xmm5,%xmm6
5658
5659# qhasm: xmm5 ^= xmm10
5660# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
5661# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
5662pxor %xmm10,%xmm5
5663
5664# qhasm: xmm12 ^= xmm0
5665# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
5666# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
5667pxor %xmm0,%xmm8
5668
5669# qhasm: xmm8 ^= xmm3
5670# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
5671# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
5672pxor %xmm3,%xmm9
5673
5674# qhasm: xmm15 ^= xmm13
5675# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
5676# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
5677pxor %xmm15,%xmm13
5678
5679# qhasm: xmm14 ^= xmm9
5680# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
5681# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
5682pxor %xmm12,%xmm11
5683
5684# qhasm: xmm11 = xmm15
5685# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5686# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5687movdqa %xmm13,%xmm10
5688
5689# qhasm: xmm11 ^= xmm14
5690# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5691# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5692pxor %xmm11,%xmm10
5693
5694# qhasm: xmm11 &= xmm12
5695# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
5696# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
5697pand %xmm8,%xmm10
5698
5699# qhasm: xmm12 ^= xmm8
5700# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
5701# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
5702pxor %xmm9,%xmm8
5703
5704# qhasm: xmm12 &= xmm14
5705# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
5706# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
5707pand %xmm11,%xmm8
5708
5709# qhasm: xmm8 &= xmm15
5710# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
5711# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
5712pand %xmm13,%xmm9
5713
5714# qhasm: xmm8 ^= xmm12
5715# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
5716# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
5717pxor %xmm8,%xmm9
5718
5719# qhasm: xmm12 ^= xmm11
5720# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
5721# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
5722pxor %xmm10,%xmm8
5723
5724# qhasm: xmm10 = xmm13
5725# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
5726# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
5727movdqa %xmm15,%xmm10
5728
5729# qhasm: xmm10 ^= xmm9
5730# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
5731# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
5732pxor %xmm12,%xmm10
5733
5734# qhasm: xmm10 &= xmm0
5735# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
5736# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
5737pand %xmm0,%xmm10
5738
5739# qhasm: xmm0 ^= xmm3
5740# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
5741# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
5742pxor %xmm3,%xmm0
5743
5744# qhasm: xmm0 &= xmm9
5745# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
5746# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
5747pand %xmm12,%xmm0
5748
5749# qhasm: xmm3 &= xmm13
5750# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
5751# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
5752pand %xmm15,%xmm3
5753
5754# qhasm: xmm0 ^= xmm3
5755# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
5756# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
5757pxor %xmm3,%xmm0
5758
5759# qhasm: xmm3 ^= xmm10
5760# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
5761# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
5762pxor %xmm10,%xmm3
5763
5764# qhasm: xmm6 ^= xmm12
5765# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
5766# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
5767pxor %xmm8,%xmm6
5768
5769# qhasm: xmm0 ^= xmm12
5770# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
5771# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
5772pxor %xmm8,%xmm0
5773
5774# qhasm: xmm5 ^= xmm8
5775# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
5776# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
5777pxor %xmm9,%xmm5
5778
5779# qhasm: xmm3 ^= xmm8
5780# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
5781# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
5782pxor %xmm9,%xmm3
5783
5784# qhasm: xmm12 = xmm7
5785# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
5786# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
5787movdqa %xmm7,%xmm8
5788
5789# qhasm: xmm8 = xmm1
5790# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
5791# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
5792movdqa %xmm1,%xmm9
5793
5794# qhasm: xmm12 ^= xmm4
5795# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
5796# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
5797pxor %xmm4,%xmm8
5798
5799# qhasm: xmm8 ^= xmm2
5800# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
5801# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
5802pxor %xmm2,%xmm9
5803
5804# qhasm: xmm11 = xmm15
5805# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5806# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5807movdqa %xmm13,%xmm10
5808
5809# qhasm: xmm11 ^= xmm14
5810# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5811# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5812pxor %xmm11,%xmm10
5813
5814# qhasm: xmm11 &= xmm12
5815# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
5816# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
5817pand %xmm8,%xmm10
5818
5819# qhasm: xmm12 ^= xmm8
5820# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
5821# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
5822pxor %xmm9,%xmm8
5823
5824# qhasm: xmm12 &= xmm14
5825# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
5826# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
5827pand %xmm11,%xmm8
5828
5829# qhasm: xmm8 &= xmm15
5830# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
5831# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
5832pand %xmm13,%xmm9
5833
5834# qhasm: xmm8 ^= xmm12
5835# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
5836# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
5837pxor %xmm8,%xmm9
5838
5839# qhasm: xmm12 ^= xmm11
5840# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
5841# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
5842pxor %xmm10,%xmm8
5843
5844# qhasm: xmm10 = xmm13
5845# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
5846# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
5847movdqa %xmm15,%xmm10
5848
5849# qhasm: xmm10 ^= xmm9
5850# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
5851# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
5852pxor %xmm12,%xmm10
5853
5854# qhasm: xmm10 &= xmm4
5855# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
5856# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
5857pand %xmm4,%xmm10
5858
5859# qhasm: xmm4 ^= xmm2
5860# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
5861# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
5862pxor %xmm2,%xmm4
5863
5864# qhasm: xmm4 &= xmm9
5865# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
5866# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
5867pand %xmm12,%xmm4
5868
5869# qhasm: xmm2 &= xmm13
5870# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
5871# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
5872pand %xmm15,%xmm2
5873
5874# qhasm: xmm4 ^= xmm2
5875# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
5876# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
5877pxor %xmm2,%xmm4
5878
5879# qhasm: xmm2 ^= xmm10
5880# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
5881# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
5882pxor %xmm10,%xmm2
5883
5884# qhasm: xmm15 ^= xmm13
5885# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
5886# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
5887pxor %xmm15,%xmm13
5888
5889# qhasm: xmm14 ^= xmm9
5890# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
5891# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
5892pxor %xmm12,%xmm11
5893
5894# qhasm: xmm11 = xmm15
5895# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
5896# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
5897movdqa %xmm13,%xmm10
5898
5899# qhasm: xmm11 ^= xmm14
5900# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
5901# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
5902pxor %xmm11,%xmm10
5903
5904# qhasm: xmm11 &= xmm7
5905# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
5906# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
5907pand %xmm7,%xmm10
5908
5909# qhasm: xmm7 ^= xmm1
5910# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
5911# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
5912pxor %xmm1,%xmm7
5913
5914# qhasm: xmm7 &= xmm14
5915# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
5916# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
5917pand %xmm11,%xmm7
5918
5919# qhasm: xmm1 &= xmm15
5920# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
5921# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
5922pand %xmm13,%xmm1
5923
5924# qhasm: xmm7 ^= xmm1
5925# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
5926# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
5927pxor %xmm1,%xmm7
5928
5929# qhasm: xmm1 ^= xmm11
5930# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
5931# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
5932pxor %xmm10,%xmm1
5933
5934# qhasm: xmm7 ^= xmm12
5935# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
5936# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
5937pxor %xmm8,%xmm7
5938
5939# qhasm: xmm4 ^= xmm12
5940# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
5941# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
5942pxor %xmm8,%xmm4
5943
5944# qhasm: xmm1 ^= xmm8
5945# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
5946# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
5947pxor %xmm9,%xmm1
5948
5949# qhasm: xmm2 ^= xmm8
5950# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
5951# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
5952pxor %xmm9,%xmm2
5953
5954# qhasm: xmm7 ^= xmm0
5955# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
5956# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
5957pxor %xmm0,%xmm7
5958
5959# qhasm: xmm1 ^= xmm6
5960# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
5961# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
5962pxor %xmm6,%xmm1
5963
5964# qhasm: xmm4 ^= xmm7
5965# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
5966# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
5967pxor %xmm7,%xmm4
5968
5969# qhasm: xmm6 ^= xmm0
5970# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
5971# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
5972pxor %xmm0,%xmm6
5973
5974# qhasm: xmm0 ^= xmm1
5975# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
5976# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
5977pxor %xmm1,%xmm0
5978
5979# qhasm: xmm1 ^= xmm5
5980# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
5981# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
5982pxor %xmm5,%xmm1
5983
5984# qhasm: xmm5 ^= xmm2
5985# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
5986# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
5987pxor %xmm2,%xmm5
5988
5989# qhasm: xmm4 ^= xmm5
5990# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
5991# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
5992pxor %xmm5,%xmm4
5993
5994# qhasm: xmm2 ^= xmm3
5995# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
5996# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
5997pxor %xmm3,%xmm2
5998
5999# qhasm: xmm3 ^= xmm5
6000# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
6001# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
6002pxor %xmm5,%xmm3
6003
6004# qhasm: xmm6 ^= xmm3
6005# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
6006# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
6007pxor %xmm3,%xmm6
6008
6009# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
6010# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
6011# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
6012pshufd $0x93,%xmm0,%xmm8
6013
6014# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
6015# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
6016# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
6017pshufd $0x93,%xmm1,%xmm9
6018
6019# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
6020# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
6021# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
6022pshufd $0x93,%xmm4,%xmm10
6023
6024# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
6025# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
6026# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
6027pshufd $0x93,%xmm6,%xmm11
6028
6029# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
6030# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
6031# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
6032pshufd $0x93,%xmm3,%xmm12
6033
6034# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
6035# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
6036# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
6037pshufd $0x93,%xmm7,%xmm13
6038
6039# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
6040# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
6041# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
6042pshufd $0x93,%xmm2,%xmm14
6043
6044# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
6045# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
6046# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
6047pshufd $0x93,%xmm5,%xmm15
6048
6049# qhasm: xmm0 ^= xmm8
6050# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
6051# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
6052pxor %xmm8,%xmm0
6053
6054# qhasm: xmm1 ^= xmm9
6055# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
6056# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
6057pxor %xmm9,%xmm1
6058
6059# qhasm: xmm4 ^= xmm10
6060# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
6061# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
6062pxor %xmm10,%xmm4
6063
6064# qhasm: xmm6 ^= xmm11
6065# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
6066# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
6067pxor %xmm11,%xmm6
6068
6069# qhasm: xmm3 ^= xmm12
6070# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
6071# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
6072pxor %xmm12,%xmm3
6073
6074# qhasm: xmm7 ^= xmm13
6075# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
6076# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
6077pxor %xmm13,%xmm7
6078
6079# qhasm: xmm2 ^= xmm14
6080# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
6081# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
6082pxor %xmm14,%xmm2
6083
6084# qhasm: xmm5 ^= xmm15
6085# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
6086# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
6087pxor %xmm15,%xmm5
6088
6089# qhasm: xmm8 ^= xmm5
6090# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
6091# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
6092pxor %xmm5,%xmm8
6093
6094# qhasm: xmm9 ^= xmm0
6095# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
6096# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
6097pxor %xmm0,%xmm9
6098
6099# qhasm: xmm10 ^= xmm1
6100# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
6101# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
6102pxor %xmm1,%xmm10
6103
6104# qhasm: xmm9 ^= xmm5
6105# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
6106# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
6107pxor %xmm5,%xmm9
6108
6109# qhasm: xmm11 ^= xmm4
6110# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
6111# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
6112pxor %xmm4,%xmm11
6113
6114# qhasm: xmm12 ^= xmm6
6115# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
6116# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
6117pxor %xmm6,%xmm12
6118
6119# qhasm: xmm13 ^= xmm3
6120# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
6121# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
6122pxor %xmm3,%xmm13
6123
6124# qhasm: xmm11 ^= xmm5
6125# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
6126# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
6127pxor %xmm5,%xmm11
6128
6129# qhasm: xmm14 ^= xmm7
6130# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
6131# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
6132pxor %xmm7,%xmm14
6133
6134# qhasm: xmm15 ^= xmm2
6135# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
6136# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
6137pxor %xmm2,%xmm15
6138
6139# qhasm: xmm12 ^= xmm5
6140# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
6141# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
6142pxor %xmm5,%xmm12
6143
6144# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
6145# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
6146# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
6147pshufd $0x4E,%xmm0,%xmm0
6148
6149# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
6150# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
6151# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
6152pshufd $0x4E,%xmm1,%xmm1
6153
6154# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
6155# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
6156# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
6157pshufd $0x4E,%xmm4,%xmm4
6158
6159# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
6160# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
6161# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
6162pshufd $0x4E,%xmm6,%xmm6
6163
6164# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
6165# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
6166# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
6167pshufd $0x4E,%xmm3,%xmm3
6168
6169# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
6170# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
6171# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
6172pshufd $0x4E,%xmm7,%xmm7
6173
6174# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
6175# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
6176# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
6177pshufd $0x4E,%xmm2,%xmm2
6178
6179# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
6180# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
6181# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
6182pshufd $0x4E,%xmm5,%xmm5
6183
6184# qhasm: xmm8 ^= xmm0
6185# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
6186# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
6187pxor %xmm0,%xmm8
6188
6189# qhasm: xmm9 ^= xmm1
6190# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
6191# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
6192pxor %xmm1,%xmm9
6193
6194# qhasm: xmm10 ^= xmm4
6195# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
6196# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
6197pxor %xmm4,%xmm10
6198
6199# qhasm: xmm11 ^= xmm6
6200# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
6201# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
6202pxor %xmm6,%xmm11
6203
6204# qhasm: xmm12 ^= xmm3
6205# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
6206# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
6207pxor %xmm3,%xmm12
6208
6209# qhasm: xmm13 ^= xmm7
6210# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
6211# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
6212pxor %xmm7,%xmm13
6213
6214# qhasm: xmm14 ^= xmm2
6215# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
6216# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
6217pxor %xmm2,%xmm14
6218
6219# qhasm: xmm15 ^= xmm5
6220# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
6221# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
6222pxor %xmm5,%xmm15
6223
6224# qhasm: xmm8 ^= *(int128 *)(c + 640)
6225# asm 1: pxor 640(<c=int64#5),<xmm8=int6464#9
6226# asm 2: pxor 640(<c=%r8),<xmm8=%xmm8
6227pxor 640(%r8),%xmm8
6228
6229# qhasm: shuffle bytes of xmm8 by SR
6230# asm 1: pshufb SR,<xmm8=int6464#9
6231# asm 2: pshufb SR,<xmm8=%xmm8
6232pshufb SR,%xmm8
6233
6234# qhasm: xmm9 ^= *(int128 *)(c + 656)
6235# asm 1: pxor 656(<c=int64#5),<xmm9=int6464#10
6236# asm 2: pxor 656(<c=%r8),<xmm9=%xmm9
6237pxor 656(%r8),%xmm9
6238
6239# qhasm: shuffle bytes of xmm9 by SR
6240# asm 1: pshufb SR,<xmm9=int6464#10
6241# asm 2: pshufb SR,<xmm9=%xmm9
6242pshufb SR,%xmm9
6243
6244# qhasm: xmm10 ^= *(int128 *)(c + 672)
6245# asm 1: pxor 672(<c=int64#5),<xmm10=int6464#11
6246# asm 2: pxor 672(<c=%r8),<xmm10=%xmm10
6247pxor 672(%r8),%xmm10
6248
6249# qhasm: shuffle bytes of xmm10 by SR
6250# asm 1: pshufb SR,<xmm10=int6464#11
6251# asm 2: pshufb SR,<xmm10=%xmm10
6252pshufb SR,%xmm10
6253
6254# qhasm: xmm11 ^= *(int128 *)(c + 688)
6255# asm 1: pxor 688(<c=int64#5),<xmm11=int6464#12
6256# asm 2: pxor 688(<c=%r8),<xmm11=%xmm11
6257pxor 688(%r8),%xmm11
6258
6259# qhasm: shuffle bytes of xmm11 by SR
6260# asm 1: pshufb SR,<xmm11=int6464#12
6261# asm 2: pshufb SR,<xmm11=%xmm11
6262pshufb SR,%xmm11
6263
6264# qhasm: xmm12 ^= *(int128 *)(c + 704)
6265# asm 1: pxor 704(<c=int64#5),<xmm12=int6464#13
6266# asm 2: pxor 704(<c=%r8),<xmm12=%xmm12
6267pxor 704(%r8),%xmm12
6268
6269# qhasm: shuffle bytes of xmm12 by SR
6270# asm 1: pshufb SR,<xmm12=int6464#13
6271# asm 2: pshufb SR,<xmm12=%xmm12
6272pshufb SR,%xmm12
6273
6274# qhasm: xmm13 ^= *(int128 *)(c + 720)
6275# asm 1: pxor 720(<c=int64#5),<xmm13=int6464#14
6276# asm 2: pxor 720(<c=%r8),<xmm13=%xmm13
6277pxor 720(%r8),%xmm13
6278
6279# qhasm: shuffle bytes of xmm13 by SR
6280# asm 1: pshufb SR,<xmm13=int6464#14
6281# asm 2: pshufb SR,<xmm13=%xmm13
6282pshufb SR,%xmm13
6283
6284# qhasm: xmm14 ^= *(int128 *)(c + 736)
6285# asm 1: pxor 736(<c=int64#5),<xmm14=int6464#15
6286# asm 2: pxor 736(<c=%r8),<xmm14=%xmm14
6287pxor 736(%r8),%xmm14
6288
6289# qhasm: shuffle bytes of xmm14 by SR
6290# asm 1: pshufb SR,<xmm14=int6464#15
6291# asm 2: pshufb SR,<xmm14=%xmm14
6292pshufb SR,%xmm14
6293
6294# qhasm: xmm15 ^= *(int128 *)(c + 752)
6295# asm 1: pxor 752(<c=int64#5),<xmm15=int6464#16
6296# asm 2: pxor 752(<c=%r8),<xmm15=%xmm15
6297pxor 752(%r8),%xmm15
6298
6299# qhasm: shuffle bytes of xmm15 by SR
6300# asm 1: pshufb SR,<xmm15=int6464#16
6301# asm 2: pshufb SR,<xmm15=%xmm15
6302pshufb SR,%xmm15
6303
6304# qhasm: xmm13 ^= xmm14
6305# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
6306# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
6307pxor %xmm14,%xmm13
6308
6309# qhasm: xmm10 ^= xmm9
6310# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
6311# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
6312pxor %xmm9,%xmm10
6313
6314# qhasm: xmm13 ^= xmm8
6315# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
6316# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
6317pxor %xmm8,%xmm13
6318
6319# qhasm: xmm14 ^= xmm10
6320# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
6321# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
6322pxor %xmm10,%xmm14
6323
6324# qhasm: xmm11 ^= xmm8
6325# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
6326# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
6327pxor %xmm8,%xmm11
6328
6329# qhasm: xmm14 ^= xmm11
6330# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
6331# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
6332pxor %xmm11,%xmm14
6333
6334# qhasm: xmm11 ^= xmm15
6335# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
6336# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
6337pxor %xmm15,%xmm11
6338
6339# qhasm: xmm11 ^= xmm12
6340# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
6341# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
6342pxor %xmm12,%xmm11
6343
6344# qhasm: xmm15 ^= xmm13
6345# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
6346# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
6347pxor %xmm13,%xmm15
6348
6349# qhasm: xmm11 ^= xmm9
6350# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
6351# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
6352pxor %xmm9,%xmm11
6353
6354# qhasm: xmm12 ^= xmm13
6355# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
6356# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
6357pxor %xmm13,%xmm12
6358
6359# qhasm: xmm10 ^= xmm15
6360# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
6361# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
6362pxor %xmm15,%xmm10
6363
6364# qhasm: xmm9 ^= xmm13
6365# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
6366# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
6367pxor %xmm13,%xmm9
6368
6369# qhasm: xmm3 = xmm15
6370# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
6371# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
6372movdqa %xmm15,%xmm0
6373
6374# qhasm: xmm2 = xmm9
6375# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
6376# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
6377movdqa %xmm9,%xmm1
6378
6379# qhasm: xmm1 = xmm13
6380# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
6381# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
6382movdqa %xmm13,%xmm2
6383
6384# qhasm: xmm5 = xmm10
6385# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
6386# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
6387movdqa %xmm10,%xmm3
6388
6389# qhasm: xmm4 = xmm14
6390# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
6391# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
6392movdqa %xmm14,%xmm4
6393
6394# qhasm: xmm3 ^= xmm12
6395# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
6396# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
6397pxor %xmm12,%xmm0
6398
6399# qhasm: xmm2 ^= xmm10
6400# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
6401# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
6402pxor %xmm10,%xmm1
6403
6404# qhasm: xmm1 ^= xmm11
6405# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
6406# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
6407pxor %xmm11,%xmm2
6408
6409# qhasm: xmm5 ^= xmm12
6410# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
6411# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
6412pxor %xmm12,%xmm3
6413
6414# qhasm: xmm4 ^= xmm8
6415# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
6416# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
6417pxor %xmm8,%xmm4
6418
6419# qhasm: xmm6 = xmm3
6420# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
6421# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
6422movdqa %xmm0,%xmm5
6423
6424# qhasm: xmm0 = xmm2
6425# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
6426# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
6427movdqa %xmm1,%xmm6
6428
6429# qhasm: xmm7 = xmm3
6430# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
6431# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
6432movdqa %xmm0,%xmm7
6433
6434# qhasm: xmm2 |= xmm1
6435# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
6436# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
6437por %xmm2,%xmm1
6438
6439# qhasm: xmm3 |= xmm4
6440# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
6441# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
6442por %xmm4,%xmm0
6443
6444# qhasm: xmm7 ^= xmm0
6445# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
6446# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
6447pxor %xmm6,%xmm7
6448
6449# qhasm: xmm6 &= xmm4
6450# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
6451# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
6452pand %xmm4,%xmm5
6453
6454# qhasm: xmm0 &= xmm1
6455# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
6456# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
6457pand %xmm2,%xmm6
6458
6459# qhasm: xmm4 ^= xmm1
6460# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
6461# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
6462pxor %xmm2,%xmm4
6463
6464# qhasm: xmm7 &= xmm4
6465# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
6466# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
6467pand %xmm4,%xmm7
6468
6469# qhasm: xmm4 = xmm11
6470# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
6471# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
6472movdqa %xmm11,%xmm2
6473
6474# qhasm: xmm4 ^= xmm8
6475# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
6476# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
6477pxor %xmm8,%xmm2
6478
6479# qhasm: xmm5 &= xmm4
6480# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
6481# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
6482pand %xmm2,%xmm3
6483
6484# qhasm: xmm3 ^= xmm5
6485# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
6486# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
6487pxor %xmm3,%xmm0
6488
6489# qhasm: xmm2 ^= xmm5
6490# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
6491# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
6492pxor %xmm3,%xmm1
6493
6494# qhasm: xmm5 = xmm15
6495# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
6496# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
6497movdqa %xmm15,%xmm2
6498
6499# qhasm: xmm5 ^= xmm9
6500# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
6501# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
6502pxor %xmm9,%xmm2
6503
6504# qhasm: xmm4 = xmm13
6505# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
6506# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
6507movdqa %xmm13,%xmm3
6508
6509# qhasm: xmm1 = xmm5
6510# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
6511# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
6512movdqa %xmm2,%xmm4
6513
6514# qhasm: xmm4 ^= xmm14
6515# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
6516# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
6517pxor %xmm14,%xmm3
6518
6519# qhasm: xmm1 |= xmm4
6520# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
6521# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
6522por %xmm3,%xmm4
6523
6524# qhasm: xmm5 &= xmm4
6525# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
6526# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
6527pand %xmm3,%xmm2
6528
6529# qhasm: xmm0 ^= xmm5
6530# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
6531# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
6532pxor %xmm2,%xmm6
6533
6534# qhasm: xmm3 ^= xmm7
6535# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
6536# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
6537pxor %xmm7,%xmm0
6538
6539# qhasm: xmm2 ^= xmm6
6540# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
6541# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
6542pxor %xmm5,%xmm1
6543
6544# qhasm: xmm1 ^= xmm7
6545# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
6546# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
6547pxor %xmm7,%xmm4
6548
6549# qhasm: xmm0 ^= xmm6
6550# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
6551# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
6552pxor %xmm5,%xmm6
6553
6554# qhasm: xmm1 ^= xmm6
6555# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
6556# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
6557pxor %xmm5,%xmm4
6558
6559# qhasm: xmm4 = xmm10
6560# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
6561# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
6562movdqa %xmm10,%xmm2
6563
6564# qhasm: xmm5 = xmm12
6565# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
6566# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
6567movdqa %xmm12,%xmm3
6568
6569# qhasm: xmm6 = xmm9
6570# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
6571# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
6572movdqa %xmm9,%xmm5
6573
6574# qhasm: xmm7 = xmm15
6575# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
6576# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
6577movdqa %xmm15,%xmm7
6578
6579# qhasm: xmm4 &= xmm11
6580# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
6581# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
6582pand %xmm11,%xmm2
6583
6584# qhasm: xmm5 &= xmm8
6585# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
6586# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
6587pand %xmm8,%xmm3
6588
6589# qhasm: xmm6 &= xmm13
6590# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
6591# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
6592pand %xmm13,%xmm5
6593
6594# qhasm: xmm7 |= xmm14
6595# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
6596# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
6597por %xmm14,%xmm7
6598
6599# qhasm: xmm3 ^= xmm4
6600# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
6601# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
6602pxor %xmm2,%xmm0
6603
6604# qhasm: xmm2 ^= xmm5
6605# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
6606# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
6607pxor %xmm3,%xmm1
6608
6609# qhasm: xmm1 ^= xmm6
6610# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
6611# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
6612pxor %xmm5,%xmm4
6613
6614# qhasm: xmm0 ^= xmm7
6615# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
6616# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
6617pxor %xmm7,%xmm6
6618
6619# qhasm: xmm4 = xmm3
6620# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
6621# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
6622movdqa %xmm0,%xmm2
6623
6624# qhasm: xmm4 ^= xmm2
6625# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
6626# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
6627pxor %xmm1,%xmm2
6628
6629# qhasm: xmm3 &= xmm1
6630# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
6631# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
6632pand %xmm4,%xmm0
6633
6634# qhasm: xmm6 = xmm0
6635# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
6636# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
6637movdqa %xmm6,%xmm3
6638
6639# qhasm: xmm6 ^= xmm3
6640# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
6641# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
6642pxor %xmm0,%xmm3
6643
6644# qhasm: xmm7 = xmm4
6645# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
6646# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
6647movdqa %xmm2,%xmm5
6648
6649# qhasm: xmm7 &= xmm6
6650# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
6651# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
6652pand %xmm3,%xmm5
6653
6654# qhasm: xmm7 ^= xmm2
6655# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
6656# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
6657pxor %xmm1,%xmm5
6658
6659# qhasm: xmm5 = xmm1
6660# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
6661# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
6662movdqa %xmm4,%xmm7
6663
6664# qhasm: xmm5 ^= xmm0
6665# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
6666# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
6667pxor %xmm6,%xmm7
6668
6669# qhasm: xmm3 ^= xmm2
6670# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
6671# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
6672pxor %xmm1,%xmm0
6673
6674# qhasm: xmm5 &= xmm3
6675# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
6676# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
6677pand %xmm0,%xmm7
6678
6679# qhasm: xmm5 ^= xmm0
6680# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
6681# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
6682pxor %xmm6,%xmm7
6683
6684# qhasm: xmm1 ^= xmm5
6685# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
6686# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
6687pxor %xmm7,%xmm4
6688
6689# qhasm: xmm2 = xmm6
6690# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
6691# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
6692movdqa %xmm3,%xmm0
6693
6694# qhasm: xmm2 ^= xmm5
6695# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
6696# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
6697pxor %xmm7,%xmm0
6698
6699# qhasm: xmm2 &= xmm0
6700# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
6701# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
6702pand %xmm6,%xmm0
6703
6704# qhasm: xmm1 ^= xmm2
6705# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
6706# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
6707pxor %xmm0,%xmm4
6708
6709# qhasm: xmm6 ^= xmm2
6710# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
6711# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
6712pxor %xmm0,%xmm3
6713
6714# qhasm: xmm6 &= xmm7
6715# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
6716# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
6717pand %xmm5,%xmm3
6718
6719# qhasm: xmm6 ^= xmm4
6720# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
6721# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
6722pxor %xmm2,%xmm3
6723
6724# qhasm: xmm4 = xmm14
6725# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
6726# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
6727movdqa %xmm14,%xmm0
6728
6729# qhasm: xmm0 = xmm13
6730# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
6731# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
6732movdqa %xmm13,%xmm1
6733
6734# qhasm: xmm2 = xmm7
6735# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
6736# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
6737movdqa %xmm5,%xmm2
6738
6739# qhasm: xmm2 ^= xmm6
6740# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
6741# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
6742pxor %xmm3,%xmm2
6743
6744# qhasm: xmm2 &= xmm14
6745# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
6746# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
6747pand %xmm14,%xmm2
6748
6749# qhasm: xmm14 ^= xmm13
6750# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
6751# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
6752pxor %xmm13,%xmm14
6753
6754# qhasm: xmm14 &= xmm6
6755# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
6756# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
6757pand %xmm3,%xmm14
6758
6759# qhasm: xmm13 &= xmm7
6760# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
6761# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
6762pand %xmm5,%xmm13
6763
6764# qhasm: xmm14 ^= xmm13
6765# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
6766# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
6767pxor %xmm13,%xmm14
6768
6769# qhasm: xmm13 ^= xmm2
6770# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
6771# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
6772pxor %xmm2,%xmm13
6773
6774# qhasm: xmm4 ^= xmm8
6775# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
6776# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
6777pxor %xmm8,%xmm0
6778
6779# qhasm: xmm0 ^= xmm11
6780# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
6781# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
6782pxor %xmm11,%xmm1
6783
6784# qhasm: xmm7 ^= xmm5
6785# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
6786# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
6787pxor %xmm7,%xmm5
6788
6789# qhasm: xmm6 ^= xmm1
6790# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
6791# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
6792pxor %xmm4,%xmm3
6793
6794# qhasm: xmm3 = xmm7
6795# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
6796# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
6797movdqa %xmm5,%xmm2
6798
6799# qhasm: xmm3 ^= xmm6
6800# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
6801# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
6802pxor %xmm3,%xmm2
6803
6804# qhasm: xmm3 &= xmm4
6805# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
6806# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
6807pand %xmm0,%xmm2
6808
6809# qhasm: xmm4 ^= xmm0
6810# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
6811# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
6812pxor %xmm1,%xmm0
6813
6814# qhasm: xmm4 &= xmm6
6815# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
6816# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
6817pand %xmm3,%xmm0
6818
6819# qhasm: xmm0 &= xmm7
6820# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
6821# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
6822pand %xmm5,%xmm1
6823
6824# qhasm: xmm0 ^= xmm4
6825# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
6826# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
6827pxor %xmm0,%xmm1
6828
6829# qhasm: xmm4 ^= xmm3
6830# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
6831# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
6832pxor %xmm2,%xmm0
6833
6834# qhasm: xmm2 = xmm5
6835# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
6836# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
6837movdqa %xmm7,%xmm2
6838
6839# qhasm: xmm2 ^= xmm1
6840# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
6841# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
6842pxor %xmm4,%xmm2
6843
6844# qhasm: xmm2 &= xmm8
6845# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
6846# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
6847pand %xmm8,%xmm2
6848
6849# qhasm: xmm8 ^= xmm11
6850# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
6851# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
6852pxor %xmm11,%xmm8
6853
6854# qhasm: xmm8 &= xmm1
6855# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
6856# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
6857pand %xmm4,%xmm8
6858
6859# qhasm: xmm11 &= xmm5
6860# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
6861# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
6862pand %xmm7,%xmm11
6863
6864# qhasm: xmm8 ^= xmm11
6865# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
6866# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
6867pxor %xmm11,%xmm8
6868
6869# qhasm: xmm11 ^= xmm2
6870# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
6871# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
6872pxor %xmm2,%xmm11
6873
6874# qhasm: xmm14 ^= xmm4
6875# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
6876# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
6877pxor %xmm0,%xmm14
6878
6879# qhasm: xmm8 ^= xmm4
6880# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
6881# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
6882pxor %xmm0,%xmm8
6883
6884# qhasm: xmm13 ^= xmm0
6885# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
6886# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
6887pxor %xmm1,%xmm13
6888
6889# qhasm: xmm11 ^= xmm0
6890# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
6891# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
6892pxor %xmm1,%xmm11
6893
6894# qhasm: xmm4 = xmm15
6895# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
6896# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
6897movdqa %xmm15,%xmm0
6898
6899# qhasm: xmm0 = xmm9
6900# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
6901# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
6902movdqa %xmm9,%xmm1
6903
6904# qhasm: xmm4 ^= xmm12
6905# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
6906# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
6907pxor %xmm12,%xmm0
6908
6909# qhasm: xmm0 ^= xmm10
6910# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
6911# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
6912pxor %xmm10,%xmm1
6913
6914# qhasm: xmm3 = xmm7
6915# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
6916# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
6917movdqa %xmm5,%xmm2
6918
6919# qhasm: xmm3 ^= xmm6
6920# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
6921# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
6922pxor %xmm3,%xmm2
6923
6924# qhasm: xmm3 &= xmm4
6925# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
6926# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
6927pand %xmm0,%xmm2
6928
6929# qhasm: xmm4 ^= xmm0
6930# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
6931# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
6932pxor %xmm1,%xmm0
6933
6934# qhasm: xmm4 &= xmm6
6935# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
6936# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
6937pand %xmm3,%xmm0
6938
6939# qhasm: xmm0 &= xmm7
6940# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
6941# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
6942pand %xmm5,%xmm1
6943
6944# qhasm: xmm0 ^= xmm4
6945# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
6946# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
6947pxor %xmm0,%xmm1
6948
6949# qhasm: xmm4 ^= xmm3
6950# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
6951# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
6952pxor %xmm2,%xmm0
6953
6954# qhasm: xmm2 = xmm5
6955# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
6956# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
6957movdqa %xmm7,%xmm2
6958
6959# qhasm: xmm2 ^= xmm1
6960# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
6961# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
6962pxor %xmm4,%xmm2
6963
6964# qhasm: xmm2 &= xmm12
6965# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
6966# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
6967pand %xmm12,%xmm2
6968
6969# qhasm: xmm12 ^= xmm10
6970# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
6971# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
6972pxor %xmm10,%xmm12
6973
6974# qhasm: xmm12 &= xmm1
6975# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
6976# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
6977pand %xmm4,%xmm12
6978
6979# qhasm: xmm10 &= xmm5
6980# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
6981# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
6982pand %xmm7,%xmm10
6983
6984# qhasm: xmm12 ^= xmm10
6985# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
6986# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
6987pxor %xmm10,%xmm12
6988
6989# qhasm: xmm10 ^= xmm2
6990# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
6991# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
6992pxor %xmm2,%xmm10
6993
6994# qhasm: xmm7 ^= xmm5
6995# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
6996# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
6997pxor %xmm7,%xmm5
6998
6999# qhasm: xmm6 ^= xmm1
7000# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
7001# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
7002pxor %xmm4,%xmm3
7003
7004# qhasm: xmm3 = xmm7
7005# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
7006# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
7007movdqa %xmm5,%xmm2
7008
7009# qhasm: xmm3 ^= xmm6
7010# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
7011# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
7012pxor %xmm3,%xmm2
7013
7014# qhasm: xmm3 &= xmm15
7015# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
7016# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
7017pand %xmm15,%xmm2
7018
7019# qhasm: xmm15 ^= xmm9
7020# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
7021# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
7022pxor %xmm9,%xmm15
7023
7024# qhasm: xmm15 &= xmm6
7025# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
7026# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
7027pand %xmm3,%xmm15
7028
7029# qhasm: xmm9 &= xmm7
7030# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
7031# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
7032pand %xmm5,%xmm9
7033
7034# qhasm: xmm15 ^= xmm9
7035# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
7036# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
7037pxor %xmm9,%xmm15
7038
7039# qhasm: xmm9 ^= xmm3
7040# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
7041# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
7042pxor %xmm2,%xmm9
7043
7044# qhasm: xmm15 ^= xmm4
7045# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
7046# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
7047pxor %xmm0,%xmm15
7048
7049# qhasm: xmm12 ^= xmm4
7050# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
7051# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
7052pxor %xmm0,%xmm12
7053
7054# qhasm: xmm9 ^= xmm0
7055# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
7056# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
7057pxor %xmm1,%xmm9
7058
7059# qhasm: xmm10 ^= xmm0
7060# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
7061# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
7062pxor %xmm1,%xmm10
7063
7064# qhasm: xmm15 ^= xmm8
7065# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
7066# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
7067pxor %xmm8,%xmm15
7068
7069# qhasm: xmm9 ^= xmm14
7070# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
7071# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
7072pxor %xmm14,%xmm9
7073
7074# qhasm: xmm12 ^= xmm15
7075# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
7076# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
7077pxor %xmm15,%xmm12
7078
7079# qhasm: xmm14 ^= xmm8
7080# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
7081# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
7082pxor %xmm8,%xmm14
7083
7084# qhasm: xmm8 ^= xmm9
7085# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
7086# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
7087pxor %xmm9,%xmm8
7088
7089# qhasm: xmm9 ^= xmm13
7090# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
7091# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
7092pxor %xmm13,%xmm9
7093
7094# qhasm: xmm13 ^= xmm10
7095# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
7096# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
7097pxor %xmm10,%xmm13
7098
7099# qhasm: xmm12 ^= xmm13
7100# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
7101# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
7102pxor %xmm13,%xmm12
7103
7104# qhasm: xmm10 ^= xmm11
7105# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
7106# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
7107pxor %xmm11,%xmm10
7108
7109# qhasm: xmm11 ^= xmm13
7110# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
7111# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
7112pxor %xmm13,%xmm11
7113
7114# qhasm: xmm14 ^= xmm11
7115# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
7116# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
7117pxor %xmm11,%xmm14
7118
7119# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
7120# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
7121# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
7122pshufd $0x93,%xmm8,%xmm0
7123
7124# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
7125# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
7126# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
7127pshufd $0x93,%xmm9,%xmm1
7128
7129# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
7130# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
7131# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
7132pshufd $0x93,%xmm12,%xmm2
7133
7134# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
7135# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
7136# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
7137pshufd $0x93,%xmm14,%xmm3
7138
7139# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
7140# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
7141# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
7142pshufd $0x93,%xmm11,%xmm4
7143
7144# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
7145# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
7146# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
7147pshufd $0x93,%xmm15,%xmm5
7148
7149# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
7150# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
7151# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
7152pshufd $0x93,%xmm10,%xmm6
7153
7154# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
7155# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
7156# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
7157pshufd $0x93,%xmm13,%xmm7
7158
7159# qhasm: xmm8 ^= xmm0
7160# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
7161# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
7162pxor %xmm0,%xmm8
7163
7164# qhasm: xmm9 ^= xmm1
7165# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
7166# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
7167pxor %xmm1,%xmm9
7168
7169# qhasm: xmm12 ^= xmm2
7170# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
7171# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
7172pxor %xmm2,%xmm12
7173
7174# qhasm: xmm14 ^= xmm3
7175# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
7176# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
7177pxor %xmm3,%xmm14
7178
7179# qhasm: xmm11 ^= xmm4
7180# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
7181# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
7182pxor %xmm4,%xmm11
7183
7184# qhasm: xmm15 ^= xmm5
7185# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
7186# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
7187pxor %xmm5,%xmm15
7188
7189# qhasm: xmm10 ^= xmm6
7190# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
7191# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
7192pxor %xmm6,%xmm10
7193
7194# qhasm: xmm13 ^= xmm7
7195# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
7196# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
7197pxor %xmm7,%xmm13
7198
7199# qhasm: xmm0 ^= xmm13
7200# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
7201# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
7202pxor %xmm13,%xmm0
7203
7204# qhasm: xmm1 ^= xmm8
7205# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
7206# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
7207pxor %xmm8,%xmm1
7208
7209# qhasm: xmm2 ^= xmm9
7210# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
7211# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
7212pxor %xmm9,%xmm2
7213
7214# qhasm: xmm1 ^= xmm13
7215# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
7216# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
7217pxor %xmm13,%xmm1
7218
7219# qhasm: xmm3 ^= xmm12
7220# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
7221# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
7222pxor %xmm12,%xmm3
7223
7224# qhasm: xmm4 ^= xmm14
7225# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
7226# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
7227pxor %xmm14,%xmm4
7228
7229# qhasm: xmm5 ^= xmm11
7230# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
7231# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
7232pxor %xmm11,%xmm5
7233
7234# qhasm: xmm3 ^= xmm13
7235# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
7236# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
7237pxor %xmm13,%xmm3
7238
7239# qhasm: xmm6 ^= xmm15
7240# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
7241# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
7242pxor %xmm15,%xmm6
7243
7244# qhasm: xmm7 ^= xmm10
7245# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
7246# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
7247pxor %xmm10,%xmm7
7248
7249# qhasm: xmm4 ^= xmm13
7250# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
7251# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
7252pxor %xmm13,%xmm4
7253
7254# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
7255# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
7256# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
7257pshufd $0x4E,%xmm8,%xmm8
7258
7259# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
7260# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
7261# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
7262pshufd $0x4E,%xmm9,%xmm9
7263
7264# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
7265# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
7266# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
7267pshufd $0x4E,%xmm12,%xmm12
7268
7269# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
7270# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
7271# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
7272pshufd $0x4E,%xmm14,%xmm14
7273
7274# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
7275# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
7276# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
7277pshufd $0x4E,%xmm11,%xmm11
7278
7279# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
7280# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
7281# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
7282pshufd $0x4E,%xmm15,%xmm15
7283
7284# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
7285# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
7286# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
7287pshufd $0x4E,%xmm10,%xmm10
7288
7289# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
7290# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
7291# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
7292pshufd $0x4E,%xmm13,%xmm13
7293
7294# qhasm: xmm0 ^= xmm8
7295# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
7296# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
7297pxor %xmm8,%xmm0
7298
7299# qhasm: xmm1 ^= xmm9
7300# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
7301# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
7302pxor %xmm9,%xmm1
7303
7304# qhasm: xmm2 ^= xmm12
7305# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
7306# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
7307pxor %xmm12,%xmm2
7308
7309# qhasm: xmm3 ^= xmm14
7310# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
7311# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
7312pxor %xmm14,%xmm3
7313
7314# qhasm: xmm4 ^= xmm11
7315# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
7316# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
7317pxor %xmm11,%xmm4
7318
7319# qhasm: xmm5 ^= xmm15
7320# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
7321# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
7322pxor %xmm15,%xmm5
7323
7324# qhasm: xmm6 ^= xmm10
7325# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
7326# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
7327pxor %xmm10,%xmm6
7328
7329# qhasm: xmm7 ^= xmm13
7330# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
7331# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
7332pxor %xmm13,%xmm7
7333
7334# qhasm: xmm0 ^= *(int128 *)(c + 768)
7335# asm 1: pxor 768(<c=int64#5),<xmm0=int6464#1
7336# asm 2: pxor 768(<c=%r8),<xmm0=%xmm0
7337pxor 768(%r8),%xmm0
7338
7339# qhasm: shuffle bytes of xmm0 by SR
7340# asm 1: pshufb SR,<xmm0=int6464#1
7341# asm 2: pshufb SR,<xmm0=%xmm0
7342pshufb SR,%xmm0
7343
7344# qhasm: xmm1 ^= *(int128 *)(c + 784)
7345# asm 1: pxor 784(<c=int64#5),<xmm1=int6464#2
7346# asm 2: pxor 784(<c=%r8),<xmm1=%xmm1
7347pxor 784(%r8),%xmm1
7348
7349# qhasm: shuffle bytes of xmm1 by SR
7350# asm 1: pshufb SR,<xmm1=int6464#2
7351# asm 2: pshufb SR,<xmm1=%xmm1
7352pshufb SR,%xmm1
7353
7354# qhasm: xmm2 ^= *(int128 *)(c + 800)
7355# asm 1: pxor 800(<c=int64#5),<xmm2=int6464#3
7356# asm 2: pxor 800(<c=%r8),<xmm2=%xmm2
7357pxor 800(%r8),%xmm2
7358
7359# qhasm: shuffle bytes of xmm2 by SR
7360# asm 1: pshufb SR,<xmm2=int6464#3
7361# asm 2: pshufb SR,<xmm2=%xmm2
7362pshufb SR,%xmm2
7363
7364# qhasm: xmm3 ^= *(int128 *)(c + 816)
7365# asm 1: pxor 816(<c=int64#5),<xmm3=int6464#4
7366# asm 2: pxor 816(<c=%r8),<xmm3=%xmm3
7367pxor 816(%r8),%xmm3
7368
7369# qhasm: shuffle bytes of xmm3 by SR
7370# asm 1: pshufb SR,<xmm3=int6464#4
7371# asm 2: pshufb SR,<xmm3=%xmm3
7372pshufb SR,%xmm3
7373
7374# qhasm: xmm4 ^= *(int128 *)(c + 832)
7375# asm 1: pxor 832(<c=int64#5),<xmm4=int6464#5
7376# asm 2: pxor 832(<c=%r8),<xmm4=%xmm4
7377pxor 832(%r8),%xmm4
7378
7379# qhasm: shuffle bytes of xmm4 by SR
7380# asm 1: pshufb SR,<xmm4=int6464#5
7381# asm 2: pshufb SR,<xmm4=%xmm4
7382pshufb SR,%xmm4
7383
7384# qhasm: xmm5 ^= *(int128 *)(c + 848)
7385# asm 1: pxor 848(<c=int64#5),<xmm5=int6464#6
7386# asm 2: pxor 848(<c=%r8),<xmm5=%xmm5
7387pxor 848(%r8),%xmm5
7388
7389# qhasm: shuffle bytes of xmm5 by SR
7390# asm 1: pshufb SR,<xmm5=int6464#6
7391# asm 2: pshufb SR,<xmm5=%xmm5
7392pshufb SR,%xmm5
7393
7394# qhasm: xmm6 ^= *(int128 *)(c + 864)
7395# asm 1: pxor 864(<c=int64#5),<xmm6=int6464#7
7396# asm 2: pxor 864(<c=%r8),<xmm6=%xmm6
7397pxor 864(%r8),%xmm6
7398
7399# qhasm: shuffle bytes of xmm6 by SR
7400# asm 1: pshufb SR,<xmm6=int6464#7
7401# asm 2: pshufb SR,<xmm6=%xmm6
7402pshufb SR,%xmm6
7403
7404# qhasm: xmm7 ^= *(int128 *)(c + 880)
7405# asm 1: pxor 880(<c=int64#5),<xmm7=int6464#8
7406# asm 2: pxor 880(<c=%r8),<xmm7=%xmm7
7407pxor 880(%r8),%xmm7
7408
7409# qhasm: shuffle bytes of xmm7 by SR
7410# asm 1: pshufb SR,<xmm7=int6464#8
7411# asm 2: pshufb SR,<xmm7=%xmm7
7412pshufb SR,%xmm7
7413
7414# qhasm: xmm5 ^= xmm6
7415# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
7416# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
7417pxor %xmm6,%xmm5
7418
7419# qhasm: xmm2 ^= xmm1
7420# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
7421# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
7422pxor %xmm1,%xmm2
7423
7424# qhasm: xmm5 ^= xmm0
7425# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
7426# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
7427pxor %xmm0,%xmm5
7428
7429# qhasm: xmm6 ^= xmm2
7430# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
7431# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
7432pxor %xmm2,%xmm6
7433
7434# qhasm: xmm3 ^= xmm0
7435# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
7436# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
7437pxor %xmm0,%xmm3
7438
7439# qhasm: xmm6 ^= xmm3
7440# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
7441# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
7442pxor %xmm3,%xmm6
7443
7444# qhasm: xmm3 ^= xmm7
7445# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
7446# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
7447pxor %xmm7,%xmm3
7448
7449# qhasm: xmm3 ^= xmm4
7450# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
7451# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
7452pxor %xmm4,%xmm3
7453
7454# qhasm: xmm7 ^= xmm5
7455# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
7456# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
7457pxor %xmm5,%xmm7
7458
7459# qhasm: xmm3 ^= xmm1
7460# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
7461# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
7462pxor %xmm1,%xmm3
7463
7464# qhasm: xmm4 ^= xmm5
7465# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
7466# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
7467pxor %xmm5,%xmm4
7468
7469# qhasm: xmm2 ^= xmm7
7470# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
7471# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
7472pxor %xmm7,%xmm2
7473
7474# qhasm: xmm1 ^= xmm5
7475# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
7476# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
7477pxor %xmm5,%xmm1
7478
7479# qhasm: xmm11 = xmm7
7480# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
7481# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
7482movdqa %xmm7,%xmm8
7483
7484# qhasm: xmm10 = xmm1
7485# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
7486# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
7487movdqa %xmm1,%xmm9
7488
7489# qhasm: xmm9 = xmm5
7490# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
7491# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
7492movdqa %xmm5,%xmm10
7493
7494# qhasm: xmm13 = xmm2
7495# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
7496# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
7497movdqa %xmm2,%xmm11
7498
7499# qhasm: xmm12 = xmm6
7500# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
7501# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
7502movdqa %xmm6,%xmm12
7503
7504# qhasm: xmm11 ^= xmm4
7505# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
7506# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
7507pxor %xmm4,%xmm8
7508
7509# qhasm: xmm10 ^= xmm2
7510# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
7511# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
7512pxor %xmm2,%xmm9
7513
7514# qhasm: xmm9 ^= xmm3
7515# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
7516# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
7517pxor %xmm3,%xmm10
7518
7519# qhasm: xmm13 ^= xmm4
7520# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
7521# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
7522pxor %xmm4,%xmm11
7523
7524# qhasm: xmm12 ^= xmm0
7525# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
7526# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
7527pxor %xmm0,%xmm12
7528
7529# qhasm: xmm14 = xmm11
7530# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
7531# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
7532movdqa %xmm8,%xmm13
7533
7534# qhasm: xmm8 = xmm10
7535# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
7536# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
7537movdqa %xmm9,%xmm14
7538
7539# qhasm: xmm15 = xmm11
7540# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
7541# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
7542movdqa %xmm8,%xmm15
7543
7544# qhasm: xmm10 |= xmm9
7545# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
7546# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
7547por %xmm10,%xmm9
7548
7549# qhasm: xmm11 |= xmm12
7550# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
7551# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
7552por %xmm12,%xmm8
7553
7554# qhasm: xmm15 ^= xmm8
7555# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
7556# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
7557pxor %xmm14,%xmm15
7558
7559# qhasm: xmm14 &= xmm12
7560# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
7561# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
7562pand %xmm12,%xmm13
7563
7564# qhasm: xmm8 &= xmm9
7565# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
7566# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
7567pand %xmm10,%xmm14
7568
7569# qhasm: xmm12 ^= xmm9
7570# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
7571# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
7572pxor %xmm10,%xmm12
7573
7574# qhasm: xmm15 &= xmm12
7575# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
7576# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
7577pand %xmm12,%xmm15
7578
7579# qhasm: xmm12 = xmm3
7580# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
7581# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
7582movdqa %xmm3,%xmm10
7583
7584# qhasm: xmm12 ^= xmm0
7585# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
7586# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
7587pxor %xmm0,%xmm10
7588
7589# qhasm: xmm13 &= xmm12
7590# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
7591# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
7592pand %xmm10,%xmm11
7593
7594# qhasm: xmm11 ^= xmm13
7595# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
7596# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
7597pxor %xmm11,%xmm8
7598
7599# qhasm: xmm10 ^= xmm13
7600# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
7601# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
7602pxor %xmm11,%xmm9
7603
7604# qhasm: xmm13 = xmm7
7605# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
7606# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
7607movdqa %xmm7,%xmm10
7608
7609# qhasm: xmm13 ^= xmm1
7610# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
7611# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
7612pxor %xmm1,%xmm10
7613
7614# qhasm: xmm12 = xmm5
7615# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
7616# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
7617movdqa %xmm5,%xmm11
7618
7619# qhasm: xmm9 = xmm13
7620# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
7621# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
7622movdqa %xmm10,%xmm12
7623
7624# qhasm: xmm12 ^= xmm6
7625# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
7626# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
7627pxor %xmm6,%xmm11
7628
7629# qhasm: xmm9 |= xmm12
7630# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
7631# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
7632por %xmm11,%xmm12
7633
7634# qhasm: xmm13 &= xmm12
7635# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
7636# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
7637pand %xmm11,%xmm10
7638
7639# qhasm: xmm8 ^= xmm13
7640# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
7641# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
7642pxor %xmm10,%xmm14
7643
7644# qhasm: xmm11 ^= xmm15
7645# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
7646# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
7647pxor %xmm15,%xmm8
7648
7649# qhasm: xmm10 ^= xmm14
7650# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
7651# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
7652pxor %xmm13,%xmm9
7653
7654# qhasm: xmm9 ^= xmm15
7655# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
7656# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
7657pxor %xmm15,%xmm12
7658
7659# qhasm: xmm8 ^= xmm14
7660# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
7661# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
7662pxor %xmm13,%xmm14
7663
7664# qhasm: xmm9 ^= xmm14
7665# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
7666# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
7667pxor %xmm13,%xmm12
7668
7669# qhasm: xmm12 = xmm2
7670# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
7671# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
7672movdqa %xmm2,%xmm10
7673
7674# qhasm: xmm13 = xmm4
7675# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
7676# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
7677movdqa %xmm4,%xmm11
7678
7679# qhasm: xmm14 = xmm1
7680# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
7681# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
7682movdqa %xmm1,%xmm13
7683
7684# qhasm: xmm15 = xmm7
7685# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
7686# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
7687movdqa %xmm7,%xmm15
7688
7689# qhasm: xmm12 &= xmm3
7690# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
7691# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
7692pand %xmm3,%xmm10
7693
7694# qhasm: xmm13 &= xmm0
7695# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
7696# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
7697pand %xmm0,%xmm11
7698
7699# qhasm: xmm14 &= xmm5
7700# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
7701# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
7702pand %xmm5,%xmm13
7703
7704# qhasm: xmm15 |= xmm6
7705# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
7706# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
7707por %xmm6,%xmm15
7708
7709# qhasm: xmm11 ^= xmm12
7710# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
7711# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
7712pxor %xmm10,%xmm8
7713
7714# qhasm: xmm10 ^= xmm13
7715# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
7716# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
7717pxor %xmm11,%xmm9
7718
7719# qhasm: xmm9 ^= xmm14
7720# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
7721# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
7722pxor %xmm13,%xmm12
7723
7724# qhasm: xmm8 ^= xmm15
7725# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
7726# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
7727pxor %xmm15,%xmm14
7728
7729# qhasm: xmm12 = xmm11
7730# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
7731# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
7732movdqa %xmm8,%xmm10
7733
7734# qhasm: xmm12 ^= xmm10
7735# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
7736# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
7737pxor %xmm9,%xmm10
7738
7739# qhasm: xmm11 &= xmm9
7740# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
7741# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
7742pand %xmm12,%xmm8
7743
7744# qhasm: xmm14 = xmm8
7745# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
7746# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
7747movdqa %xmm14,%xmm11
7748
7749# qhasm: xmm14 ^= xmm11
7750# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
7751# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
7752pxor %xmm8,%xmm11
7753
7754# qhasm: xmm15 = xmm12
7755# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
7756# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
7757movdqa %xmm10,%xmm13
7758
7759# qhasm: xmm15 &= xmm14
7760# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
7761# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
7762pand %xmm11,%xmm13
7763
7764# qhasm: xmm15 ^= xmm10
7765# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
7766# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
7767pxor %xmm9,%xmm13
7768
7769# qhasm: xmm13 = xmm9
7770# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
7771# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
7772movdqa %xmm12,%xmm15
7773
7774# qhasm: xmm13 ^= xmm8
7775# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
7776# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
7777pxor %xmm14,%xmm15
7778
7779# qhasm: xmm11 ^= xmm10
7780# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
7781# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
7782pxor %xmm9,%xmm8
7783
7784# qhasm: xmm13 &= xmm11
7785# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
7786# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
7787pand %xmm8,%xmm15
7788
7789# qhasm: xmm13 ^= xmm8
7790# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
7791# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
7792pxor %xmm14,%xmm15
7793
7794# qhasm: xmm9 ^= xmm13
7795# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
7796# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
7797pxor %xmm15,%xmm12
7798
7799# qhasm: xmm10 = xmm14
7800# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
7801# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
7802movdqa %xmm11,%xmm8
7803
7804# qhasm: xmm10 ^= xmm13
7805# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
7806# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
7807pxor %xmm15,%xmm8
7808
7809# qhasm: xmm10 &= xmm8
7810# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
7811# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
7812pand %xmm14,%xmm8
7813
7814# qhasm: xmm9 ^= xmm10
7815# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
7816# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
7817pxor %xmm8,%xmm12
7818
7819# qhasm: xmm14 ^= xmm10
7820# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
7821# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
7822pxor %xmm8,%xmm11
7823
7824# qhasm: xmm14 &= xmm15
7825# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
7826# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
7827pand %xmm13,%xmm11
7828
7829# qhasm: xmm14 ^= xmm12
7830# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
7831# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
7832pxor %xmm10,%xmm11
7833
7834# qhasm: xmm12 = xmm6
7835# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
7836# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
7837movdqa %xmm6,%xmm8
7838
7839# qhasm: xmm8 = xmm5
7840# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
7841# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
7842movdqa %xmm5,%xmm9
7843
7844# qhasm: xmm10 = xmm15
7845# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
7846# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
7847movdqa %xmm13,%xmm10
7848
7849# qhasm: xmm10 ^= xmm14
7850# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
7851# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
7852pxor %xmm11,%xmm10
7853
7854# qhasm: xmm10 &= xmm6
7855# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
7856# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
7857pand %xmm6,%xmm10
7858
7859# qhasm: xmm6 ^= xmm5
7860# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
7861# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
7862pxor %xmm5,%xmm6
7863
7864# qhasm: xmm6 &= xmm14
7865# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
7866# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
7867pand %xmm11,%xmm6
7868
7869# qhasm: xmm5 &= xmm15
7870# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
7871# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
7872pand %xmm13,%xmm5
7873
7874# qhasm: xmm6 ^= xmm5
7875# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
7876# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
7877pxor %xmm5,%xmm6
7878
7879# qhasm: xmm5 ^= xmm10
7880# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
7881# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
7882pxor %xmm10,%xmm5
7883
7884# qhasm: xmm12 ^= xmm0
7885# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
7886# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
7887pxor %xmm0,%xmm8
7888
7889# qhasm: xmm8 ^= xmm3
7890# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
7891# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
7892pxor %xmm3,%xmm9
7893
7894# qhasm: xmm15 ^= xmm13
7895# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
7896# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
7897pxor %xmm15,%xmm13
7898
7899# qhasm: xmm14 ^= xmm9
7900# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
7901# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
7902pxor %xmm12,%xmm11
7903
7904# qhasm: xmm11 = xmm15
7905# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
7906# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
7907movdqa %xmm13,%xmm10
7908
7909# qhasm: xmm11 ^= xmm14
7910# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
7911# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
7912pxor %xmm11,%xmm10
7913
7914# qhasm: xmm11 &= xmm12
7915# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
7916# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
7917pand %xmm8,%xmm10
7918
7919# qhasm: xmm12 ^= xmm8
7920# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
7921# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
7922pxor %xmm9,%xmm8
7923
7924# qhasm: xmm12 &= xmm14
7925# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
7926# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
7927pand %xmm11,%xmm8
7928
7929# qhasm: xmm8 &= xmm15
7930# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
7931# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
7932pand %xmm13,%xmm9
7933
7934# qhasm: xmm8 ^= xmm12
7935# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
7936# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
7937pxor %xmm8,%xmm9
7938
7939# qhasm: xmm12 ^= xmm11
7940# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
7941# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
7942pxor %xmm10,%xmm8
7943
7944# qhasm: xmm10 = xmm13
7945# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
7946# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
7947movdqa %xmm15,%xmm10
7948
7949# qhasm: xmm10 ^= xmm9
7950# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
7951# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
7952pxor %xmm12,%xmm10
7953
7954# qhasm: xmm10 &= xmm0
7955# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
7956# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
7957pand %xmm0,%xmm10
7958
7959# qhasm: xmm0 ^= xmm3
7960# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
7961# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
7962pxor %xmm3,%xmm0
7963
7964# qhasm: xmm0 &= xmm9
7965# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
7966# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
7967pand %xmm12,%xmm0
7968
7969# qhasm: xmm3 &= xmm13
7970# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
7971# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
7972pand %xmm15,%xmm3
7973
7974# qhasm: xmm0 ^= xmm3
7975# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
7976# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
7977pxor %xmm3,%xmm0
7978
7979# qhasm: xmm3 ^= xmm10
7980# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
7981# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
7982pxor %xmm10,%xmm3
7983
7984# qhasm: xmm6 ^= xmm12
7985# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
7986# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
7987pxor %xmm8,%xmm6
7988
7989# qhasm: xmm0 ^= xmm12
7990# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
7991# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
7992pxor %xmm8,%xmm0
7993
7994# qhasm: xmm5 ^= xmm8
7995# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
7996# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
7997pxor %xmm9,%xmm5
7998
7999# qhasm: xmm3 ^= xmm8
8000# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
8001# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
8002pxor %xmm9,%xmm3
8003
8004# qhasm: xmm12 = xmm7
8005# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
8006# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
8007movdqa %xmm7,%xmm8
8008
8009# qhasm: xmm8 = xmm1
8010# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
8011# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
8012movdqa %xmm1,%xmm9
8013
8014# qhasm: xmm12 ^= xmm4
8015# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
8016# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
8017pxor %xmm4,%xmm8
8018
8019# qhasm: xmm8 ^= xmm2
8020# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
8021# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
8022pxor %xmm2,%xmm9
8023
8024# qhasm: xmm11 = xmm15
8025# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
8026# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
8027movdqa %xmm13,%xmm10
8028
8029# qhasm: xmm11 ^= xmm14
8030# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
8031# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
8032pxor %xmm11,%xmm10
8033
8034# qhasm: xmm11 &= xmm12
8035# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
8036# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
8037pand %xmm8,%xmm10
8038
8039# qhasm: xmm12 ^= xmm8
8040# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
8041# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
8042pxor %xmm9,%xmm8
8043
8044# qhasm: xmm12 &= xmm14
8045# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
8046# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
8047pand %xmm11,%xmm8
8048
8049# qhasm: xmm8 &= xmm15
8050# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
8051# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
8052pand %xmm13,%xmm9
8053
8054# qhasm: xmm8 ^= xmm12
8055# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
8056# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
8057pxor %xmm8,%xmm9
8058
8059# qhasm: xmm12 ^= xmm11
8060# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
8061# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
8062pxor %xmm10,%xmm8
8063
8064# qhasm: xmm10 = xmm13
8065# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
8066# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
8067movdqa %xmm15,%xmm10
8068
8069# qhasm: xmm10 ^= xmm9
8070# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
8071# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
8072pxor %xmm12,%xmm10
8073
8074# qhasm: xmm10 &= xmm4
8075# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
8076# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
8077pand %xmm4,%xmm10
8078
8079# qhasm: xmm4 ^= xmm2
8080# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
8081# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
8082pxor %xmm2,%xmm4
8083
8084# qhasm: xmm4 &= xmm9
8085# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
8086# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
8087pand %xmm12,%xmm4
8088
8089# qhasm: xmm2 &= xmm13
8090# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
8091# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
8092pand %xmm15,%xmm2
8093
8094# qhasm: xmm4 ^= xmm2
8095# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
8096# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
8097pxor %xmm2,%xmm4
8098
8099# qhasm: xmm2 ^= xmm10
8100# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
8101# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
8102pxor %xmm10,%xmm2
8103
8104# qhasm: xmm15 ^= xmm13
8105# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
8106# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
8107pxor %xmm15,%xmm13
8108
8109# qhasm: xmm14 ^= xmm9
8110# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
8111# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
8112pxor %xmm12,%xmm11
8113
8114# qhasm: xmm11 = xmm15
8115# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
8116# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
8117movdqa %xmm13,%xmm10
8118
8119# qhasm: xmm11 ^= xmm14
8120# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
8121# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
8122pxor %xmm11,%xmm10
8123
8124# qhasm: xmm11 &= xmm7
8125# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
8126# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
8127pand %xmm7,%xmm10
8128
8129# qhasm: xmm7 ^= xmm1
8130# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
8131# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
8132pxor %xmm1,%xmm7
8133
8134# qhasm: xmm7 &= xmm14
8135# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
8136# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
8137pand %xmm11,%xmm7
8138
8139# qhasm: xmm1 &= xmm15
8140# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
8141# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
8142pand %xmm13,%xmm1
8143
8144# qhasm: xmm7 ^= xmm1
8145# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
8146# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
8147pxor %xmm1,%xmm7
8148
8149# qhasm: xmm1 ^= xmm11
8150# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
8151# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
8152pxor %xmm10,%xmm1
8153
8154# qhasm: xmm7 ^= xmm12
8155# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
8156# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
8157pxor %xmm8,%xmm7
8158
8159# qhasm: xmm4 ^= xmm12
8160# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
8161# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
8162pxor %xmm8,%xmm4
8163
8164# qhasm: xmm1 ^= xmm8
8165# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
8166# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
8167pxor %xmm9,%xmm1
8168
8169# qhasm: xmm2 ^= xmm8
8170# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
8171# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
8172pxor %xmm9,%xmm2
8173
8174# qhasm: xmm7 ^= xmm0
8175# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
8176# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
8177pxor %xmm0,%xmm7
8178
8179# qhasm: xmm1 ^= xmm6
8180# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
8181# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
8182pxor %xmm6,%xmm1
8183
8184# qhasm: xmm4 ^= xmm7
8185# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
8186# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
8187pxor %xmm7,%xmm4
8188
8189# qhasm: xmm6 ^= xmm0
8190# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
8191# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
8192pxor %xmm0,%xmm6
8193
8194# qhasm: xmm0 ^= xmm1
8195# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
8196# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
8197pxor %xmm1,%xmm0
8198
8199# qhasm: xmm1 ^= xmm5
8200# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
8201# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
8202pxor %xmm5,%xmm1
8203
8204# qhasm: xmm5 ^= xmm2
8205# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
8206# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
8207pxor %xmm2,%xmm5
8208
8209# qhasm: xmm4 ^= xmm5
8210# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
8211# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
8212pxor %xmm5,%xmm4
8213
8214# qhasm: xmm2 ^= xmm3
8215# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
8216# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
8217pxor %xmm3,%xmm2
8218
8219# qhasm: xmm3 ^= xmm5
8220# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
8221# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
8222pxor %xmm5,%xmm3
8223
8224# qhasm: xmm6 ^= xmm3
8225# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
8226# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
8227pxor %xmm3,%xmm6
8228
8229# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
8230# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
8231# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
8232pshufd $0x93,%xmm0,%xmm8
8233
8234# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
8235# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
8236# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
8237pshufd $0x93,%xmm1,%xmm9
8238
8239# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
8240# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
8241# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
8242pshufd $0x93,%xmm4,%xmm10
8243
8244# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
8245# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
8246# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
8247pshufd $0x93,%xmm6,%xmm11
8248
8249# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
8250# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
8251# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
8252pshufd $0x93,%xmm3,%xmm12
8253
8254# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
8255# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
8256# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
8257pshufd $0x93,%xmm7,%xmm13
8258
8259# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
8260# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
8261# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
8262pshufd $0x93,%xmm2,%xmm14
8263
8264# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
8265# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
8266# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
8267pshufd $0x93,%xmm5,%xmm15
8268
8269# qhasm: xmm0 ^= xmm8
8270# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
8271# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
8272pxor %xmm8,%xmm0
8273
8274# qhasm: xmm1 ^= xmm9
8275# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
8276# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
8277pxor %xmm9,%xmm1
8278
8279# qhasm: xmm4 ^= xmm10
8280# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
8281# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
8282pxor %xmm10,%xmm4
8283
8284# qhasm: xmm6 ^= xmm11
8285# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
8286# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
8287pxor %xmm11,%xmm6
8288
8289# qhasm: xmm3 ^= xmm12
8290# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
8291# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
8292pxor %xmm12,%xmm3
8293
8294# qhasm: xmm7 ^= xmm13
8295# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
8296# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
8297pxor %xmm13,%xmm7
8298
8299# qhasm: xmm2 ^= xmm14
8300# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
8301# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
8302pxor %xmm14,%xmm2
8303
8304# qhasm: xmm5 ^= xmm15
8305# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
8306# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
8307pxor %xmm15,%xmm5
8308
8309# qhasm: xmm8 ^= xmm5
8310# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
8311# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
8312pxor %xmm5,%xmm8
8313
8314# qhasm: xmm9 ^= xmm0
8315# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
8316# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
8317pxor %xmm0,%xmm9
8318
8319# qhasm: xmm10 ^= xmm1
8320# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
8321# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
8322pxor %xmm1,%xmm10
8323
8324# qhasm: xmm9 ^= xmm5
8325# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
8326# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
8327pxor %xmm5,%xmm9
8328
8329# qhasm: xmm11 ^= xmm4
8330# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
8331# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
8332pxor %xmm4,%xmm11
8333
8334# qhasm: xmm12 ^= xmm6
8335# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
8336# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
8337pxor %xmm6,%xmm12
8338
8339# qhasm: xmm13 ^= xmm3
8340# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
8341# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
8342pxor %xmm3,%xmm13
8343
8344# qhasm: xmm11 ^= xmm5
8345# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
8346# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
8347pxor %xmm5,%xmm11
8348
8349# qhasm: xmm14 ^= xmm7
8350# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
8351# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
8352pxor %xmm7,%xmm14
8353
8354# qhasm: xmm15 ^= xmm2
8355# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
8356# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
8357pxor %xmm2,%xmm15
8358
8359# qhasm: xmm12 ^= xmm5
8360# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
8361# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
8362pxor %xmm5,%xmm12
8363
8364# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
8365# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
8366# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
8367pshufd $0x4E,%xmm0,%xmm0
8368
8369# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
8370# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
8371# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
8372pshufd $0x4E,%xmm1,%xmm1
8373
8374# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
8375# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
8376# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
8377pshufd $0x4E,%xmm4,%xmm4
8378
8379# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
8380# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
8381# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
8382pshufd $0x4E,%xmm6,%xmm6
8383
8384# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
8385# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
8386# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
8387pshufd $0x4E,%xmm3,%xmm3
8388
8389# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
8390# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
8391# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
8392pshufd $0x4E,%xmm7,%xmm7
8393
8394# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
8395# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
8396# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
8397pshufd $0x4E,%xmm2,%xmm2
8398
8399# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
8400# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
8401# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
8402pshufd $0x4E,%xmm5,%xmm5
8403
8404# qhasm: xmm8 ^= xmm0
8405# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
8406# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
8407pxor %xmm0,%xmm8
8408
8409# qhasm: xmm9 ^= xmm1
8410# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
8411# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
8412pxor %xmm1,%xmm9
8413
8414# qhasm: xmm10 ^= xmm4
8415# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
8416# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
8417pxor %xmm4,%xmm10
8418
8419# qhasm: xmm11 ^= xmm6
8420# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
8421# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
8422pxor %xmm6,%xmm11
8423
8424# qhasm: xmm12 ^= xmm3
8425# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
8426# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
8427pxor %xmm3,%xmm12
8428
8429# qhasm: xmm13 ^= xmm7
8430# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
8431# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
8432pxor %xmm7,%xmm13
8433
8434# qhasm: xmm14 ^= xmm2
8435# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
8436# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
8437pxor %xmm2,%xmm14
8438
8439# qhasm: xmm15 ^= xmm5
8440# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
8441# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
8442pxor %xmm5,%xmm15
8443
8444# qhasm: xmm8 ^= *(int128 *)(c + 896)
8445# asm 1: pxor 896(<c=int64#5),<xmm8=int6464#9
8446# asm 2: pxor 896(<c=%r8),<xmm8=%xmm8
8447pxor 896(%r8),%xmm8
8448
8449# qhasm: shuffle bytes of xmm8 by SR
8450# asm 1: pshufb SR,<xmm8=int6464#9
8451# asm 2: pshufb SR,<xmm8=%xmm8
8452pshufb SR,%xmm8
8453
8454# qhasm: xmm9 ^= *(int128 *)(c + 912)
8455# asm 1: pxor 912(<c=int64#5),<xmm9=int6464#10
8456# asm 2: pxor 912(<c=%r8),<xmm9=%xmm9
8457pxor 912(%r8),%xmm9
8458
8459# qhasm: shuffle bytes of xmm9 by SR
8460# asm 1: pshufb SR,<xmm9=int6464#10
8461# asm 2: pshufb SR,<xmm9=%xmm9
8462pshufb SR,%xmm9
8463
8464# qhasm: xmm10 ^= *(int128 *)(c + 928)
8465# asm 1: pxor 928(<c=int64#5),<xmm10=int6464#11
8466# asm 2: pxor 928(<c=%r8),<xmm10=%xmm10
8467pxor 928(%r8),%xmm10
8468
8469# qhasm: shuffle bytes of xmm10 by SR
8470# asm 1: pshufb SR,<xmm10=int6464#11
8471# asm 2: pshufb SR,<xmm10=%xmm10
8472pshufb SR,%xmm10
8473
8474# qhasm: xmm11 ^= *(int128 *)(c + 944)
8475# asm 1: pxor 944(<c=int64#5),<xmm11=int6464#12
8476# asm 2: pxor 944(<c=%r8),<xmm11=%xmm11
8477pxor 944(%r8),%xmm11
8478
8479# qhasm: shuffle bytes of xmm11 by SR
8480# asm 1: pshufb SR,<xmm11=int6464#12
8481# asm 2: pshufb SR,<xmm11=%xmm11
8482pshufb SR,%xmm11
8483
8484# qhasm: xmm12 ^= *(int128 *)(c + 960)
8485# asm 1: pxor 960(<c=int64#5),<xmm12=int6464#13
8486# asm 2: pxor 960(<c=%r8),<xmm12=%xmm12
8487pxor 960(%r8),%xmm12
8488
8489# qhasm: shuffle bytes of xmm12 by SR
8490# asm 1: pshufb SR,<xmm12=int6464#13
8491# asm 2: pshufb SR,<xmm12=%xmm12
8492pshufb SR,%xmm12
8493
8494# qhasm: xmm13 ^= *(int128 *)(c + 976)
8495# asm 1: pxor 976(<c=int64#5),<xmm13=int6464#14
8496# asm 2: pxor 976(<c=%r8),<xmm13=%xmm13
8497pxor 976(%r8),%xmm13
8498
8499# qhasm: shuffle bytes of xmm13 by SR
8500# asm 1: pshufb SR,<xmm13=int6464#14
8501# asm 2: pshufb SR,<xmm13=%xmm13
8502pshufb SR,%xmm13
8503
8504# qhasm: xmm14 ^= *(int128 *)(c + 992)
8505# asm 1: pxor 992(<c=int64#5),<xmm14=int6464#15
8506# asm 2: pxor 992(<c=%r8),<xmm14=%xmm14
8507pxor 992(%r8),%xmm14
8508
8509# qhasm: shuffle bytes of xmm14 by SR
8510# asm 1: pshufb SR,<xmm14=int6464#15
8511# asm 2: pshufb SR,<xmm14=%xmm14
8512pshufb SR,%xmm14
8513
8514# qhasm: xmm15 ^= *(int128 *)(c + 1008)
8515# asm 1: pxor 1008(<c=int64#5),<xmm15=int6464#16
8516# asm 2: pxor 1008(<c=%r8),<xmm15=%xmm15
8517pxor 1008(%r8),%xmm15
8518
8519# qhasm: shuffle bytes of xmm15 by SR
8520# asm 1: pshufb SR,<xmm15=int6464#16
8521# asm 2: pshufb SR,<xmm15=%xmm15
8522pshufb SR,%xmm15
8523
8524# qhasm: xmm13 ^= xmm14
8525# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
8526# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
8527pxor %xmm14,%xmm13
8528
8529# qhasm: xmm10 ^= xmm9
8530# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
8531# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
8532pxor %xmm9,%xmm10
8533
8534# qhasm: xmm13 ^= xmm8
8535# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
8536# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
8537pxor %xmm8,%xmm13
8538
8539# qhasm: xmm14 ^= xmm10
8540# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
8541# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
8542pxor %xmm10,%xmm14
8543
8544# qhasm: xmm11 ^= xmm8
8545# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
8546# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
8547pxor %xmm8,%xmm11
8548
8549# qhasm: xmm14 ^= xmm11
8550# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
8551# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
8552pxor %xmm11,%xmm14
8553
8554# qhasm: xmm11 ^= xmm15
8555# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
8556# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
8557pxor %xmm15,%xmm11
8558
8559# qhasm: xmm11 ^= xmm12
8560# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
8561# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
8562pxor %xmm12,%xmm11
8563
8564# qhasm: xmm15 ^= xmm13
8565# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
8566# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
8567pxor %xmm13,%xmm15
8568
8569# qhasm: xmm11 ^= xmm9
8570# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
8571# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
8572pxor %xmm9,%xmm11
8573
8574# qhasm: xmm12 ^= xmm13
8575# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
8576# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
8577pxor %xmm13,%xmm12
8578
8579# qhasm: xmm10 ^= xmm15
8580# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
8581# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
8582pxor %xmm15,%xmm10
8583
8584# qhasm: xmm9 ^= xmm13
8585# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
8586# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
8587pxor %xmm13,%xmm9
8588
8589# qhasm: xmm3 = xmm15
8590# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
8591# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
8592movdqa %xmm15,%xmm0
8593
8594# qhasm: xmm2 = xmm9
8595# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
8596# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
8597movdqa %xmm9,%xmm1
8598
8599# qhasm: xmm1 = xmm13
8600# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
8601# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
8602movdqa %xmm13,%xmm2
8603
8604# qhasm: xmm5 = xmm10
8605# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
8606# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
8607movdqa %xmm10,%xmm3
8608
8609# qhasm: xmm4 = xmm14
8610# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
8611# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
8612movdqa %xmm14,%xmm4
8613
8614# qhasm: xmm3 ^= xmm12
8615# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
8616# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
8617pxor %xmm12,%xmm0
8618
8619# qhasm: xmm2 ^= xmm10
8620# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
8621# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
8622pxor %xmm10,%xmm1
8623
8624# qhasm: xmm1 ^= xmm11
8625# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
8626# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
8627pxor %xmm11,%xmm2
8628
8629# qhasm: xmm5 ^= xmm12
8630# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
8631# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
8632pxor %xmm12,%xmm3
8633
8634# qhasm: xmm4 ^= xmm8
8635# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
8636# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
8637pxor %xmm8,%xmm4
8638
8639# qhasm: xmm6 = xmm3
8640# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
8641# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
8642movdqa %xmm0,%xmm5
8643
8644# qhasm: xmm0 = xmm2
8645# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
8646# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
8647movdqa %xmm1,%xmm6
8648
8649# qhasm: xmm7 = xmm3
8650# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
8651# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
8652movdqa %xmm0,%xmm7
8653
8654# qhasm: xmm2 |= xmm1
8655# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
8656# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
8657por %xmm2,%xmm1
8658
8659# qhasm: xmm3 |= xmm4
8660# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
8661# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
8662por %xmm4,%xmm0
8663
8664# qhasm: xmm7 ^= xmm0
8665# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
8666# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
8667pxor %xmm6,%xmm7
8668
8669# qhasm: xmm6 &= xmm4
8670# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
8671# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
8672pand %xmm4,%xmm5
8673
8674# qhasm: xmm0 &= xmm1
8675# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
8676# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
8677pand %xmm2,%xmm6
8678
8679# qhasm: xmm4 ^= xmm1
8680# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
8681# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
8682pxor %xmm2,%xmm4
8683
8684# qhasm: xmm7 &= xmm4
8685# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
8686# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
8687pand %xmm4,%xmm7
8688
8689# qhasm: xmm4 = xmm11
8690# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
8691# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
8692movdqa %xmm11,%xmm2
8693
8694# qhasm: xmm4 ^= xmm8
8695# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
8696# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
8697pxor %xmm8,%xmm2
8698
8699# qhasm: xmm5 &= xmm4
8700# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
8701# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
8702pand %xmm2,%xmm3
8703
8704# qhasm: xmm3 ^= xmm5
8705# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
8706# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
8707pxor %xmm3,%xmm0
8708
8709# qhasm: xmm2 ^= xmm5
8710# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
8711# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
8712pxor %xmm3,%xmm1
8713
8714# qhasm: xmm5 = xmm15
8715# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
8716# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
8717movdqa %xmm15,%xmm2
8718
8719# qhasm: xmm5 ^= xmm9
8720# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
8721# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
8722pxor %xmm9,%xmm2
8723
8724# qhasm: xmm4 = xmm13
8725# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
8726# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
8727movdqa %xmm13,%xmm3
8728
8729# qhasm: xmm1 = xmm5
8730# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
8731# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
8732movdqa %xmm2,%xmm4
8733
8734# qhasm: xmm4 ^= xmm14
8735# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
8736# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
8737pxor %xmm14,%xmm3
8738
8739# qhasm: xmm1 |= xmm4
8740# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
8741# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
8742por %xmm3,%xmm4
8743
8744# qhasm: xmm5 &= xmm4
8745# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
8746# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
8747pand %xmm3,%xmm2
8748
8749# qhasm: xmm0 ^= xmm5
8750# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
8751# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
8752pxor %xmm2,%xmm6
8753
8754# qhasm: xmm3 ^= xmm7
8755# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
8756# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
8757pxor %xmm7,%xmm0
8758
8759# qhasm: xmm2 ^= xmm6
8760# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
8761# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
8762pxor %xmm5,%xmm1
8763
8764# qhasm: xmm1 ^= xmm7
8765# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
8766# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
8767pxor %xmm7,%xmm4
8768
8769# qhasm: xmm0 ^= xmm6
8770# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
8771# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
8772pxor %xmm5,%xmm6
8773
8774# qhasm: xmm1 ^= xmm6
8775# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
8776# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
8777pxor %xmm5,%xmm4
8778
8779# qhasm: xmm4 = xmm10
8780# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
8781# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
8782movdqa %xmm10,%xmm2
8783
8784# qhasm: xmm5 = xmm12
8785# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
8786# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
8787movdqa %xmm12,%xmm3
8788
8789# qhasm: xmm6 = xmm9
8790# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
8791# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
8792movdqa %xmm9,%xmm5
8793
8794# qhasm: xmm7 = xmm15
8795# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
8796# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
8797movdqa %xmm15,%xmm7
8798
8799# qhasm: xmm4 &= xmm11
8800# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
8801# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
8802pand %xmm11,%xmm2
8803
8804# qhasm: xmm5 &= xmm8
8805# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
8806# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
8807pand %xmm8,%xmm3
8808
8809# qhasm: xmm6 &= xmm13
8810# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
8811# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
8812pand %xmm13,%xmm5
8813
8814# qhasm: xmm7 |= xmm14
8815# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
8816# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
8817por %xmm14,%xmm7
8818
8819# qhasm: xmm3 ^= xmm4
8820# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
8821# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
8822pxor %xmm2,%xmm0
8823
8824# qhasm: xmm2 ^= xmm5
8825# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
8826# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
8827pxor %xmm3,%xmm1
8828
8829# qhasm: xmm1 ^= xmm6
8830# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
8831# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
8832pxor %xmm5,%xmm4
8833
8834# qhasm: xmm0 ^= xmm7
8835# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
8836# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
8837pxor %xmm7,%xmm6
8838
8839# qhasm: xmm4 = xmm3
8840# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
8841# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
8842movdqa %xmm0,%xmm2
8843
8844# qhasm: xmm4 ^= xmm2
8845# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
8846# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
8847pxor %xmm1,%xmm2
8848
8849# qhasm: xmm3 &= xmm1
8850# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
8851# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
8852pand %xmm4,%xmm0
8853
8854# qhasm: xmm6 = xmm0
8855# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
8856# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
8857movdqa %xmm6,%xmm3
8858
8859# qhasm: xmm6 ^= xmm3
8860# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
8861# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
8862pxor %xmm0,%xmm3
8863
8864# qhasm: xmm7 = xmm4
8865# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
8866# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
8867movdqa %xmm2,%xmm5
8868
8869# qhasm: xmm7 &= xmm6
8870# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
8871# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
8872pand %xmm3,%xmm5
8873
8874# qhasm: xmm7 ^= xmm2
8875# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
8876# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
8877pxor %xmm1,%xmm5
8878
8879# qhasm: xmm5 = xmm1
8880# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
8881# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
8882movdqa %xmm4,%xmm7
8883
8884# qhasm: xmm5 ^= xmm0
8885# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
8886# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
8887pxor %xmm6,%xmm7
8888
8889# qhasm: xmm3 ^= xmm2
8890# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
8891# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
8892pxor %xmm1,%xmm0
8893
8894# qhasm: xmm5 &= xmm3
8895# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
8896# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
8897pand %xmm0,%xmm7
8898
8899# qhasm: xmm5 ^= xmm0
8900# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
8901# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
8902pxor %xmm6,%xmm7
8903
8904# qhasm: xmm1 ^= xmm5
8905# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
8906# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
8907pxor %xmm7,%xmm4
8908
8909# qhasm: xmm2 = xmm6
8910# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
8911# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
8912movdqa %xmm3,%xmm0
8913
8914# qhasm: xmm2 ^= xmm5
8915# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
8916# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
8917pxor %xmm7,%xmm0
8918
8919# qhasm: xmm2 &= xmm0
8920# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
8921# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
8922pand %xmm6,%xmm0
8923
8924# qhasm: xmm1 ^= xmm2
8925# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
8926# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
8927pxor %xmm0,%xmm4
8928
8929# qhasm: xmm6 ^= xmm2
8930# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
8931# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
8932pxor %xmm0,%xmm3
8933
8934# qhasm: xmm6 &= xmm7
8935# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
8936# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
8937pand %xmm5,%xmm3
8938
8939# qhasm: xmm6 ^= xmm4
8940# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
8941# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
8942pxor %xmm2,%xmm3
8943
8944# qhasm: xmm4 = xmm14
8945# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
8946# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
8947movdqa %xmm14,%xmm0
8948
8949# qhasm: xmm0 = xmm13
8950# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
8951# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
8952movdqa %xmm13,%xmm1
8953
8954# qhasm: xmm2 = xmm7
8955# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
8956# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
8957movdqa %xmm5,%xmm2
8958
8959# qhasm: xmm2 ^= xmm6
8960# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
8961# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
8962pxor %xmm3,%xmm2
8963
8964# qhasm: xmm2 &= xmm14
8965# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
8966# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
8967pand %xmm14,%xmm2
8968
8969# qhasm: xmm14 ^= xmm13
8970# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
8971# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
8972pxor %xmm13,%xmm14
8973
8974# qhasm: xmm14 &= xmm6
8975# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
8976# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
8977pand %xmm3,%xmm14
8978
8979# qhasm: xmm13 &= xmm7
8980# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
8981# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
8982pand %xmm5,%xmm13
8983
8984# qhasm: xmm14 ^= xmm13
8985# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
8986# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
8987pxor %xmm13,%xmm14
8988
8989# qhasm: xmm13 ^= xmm2
8990# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
8991# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
8992pxor %xmm2,%xmm13
8993
8994# qhasm: xmm4 ^= xmm8
8995# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
8996# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
8997pxor %xmm8,%xmm0
8998
8999# qhasm: xmm0 ^= xmm11
9000# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
9001# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
9002pxor %xmm11,%xmm1
9003
9004# qhasm: xmm7 ^= xmm5
9005# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
9006# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
9007pxor %xmm7,%xmm5
9008
9009# qhasm: xmm6 ^= xmm1
9010# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
9011# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
9012pxor %xmm4,%xmm3
9013
9014# qhasm: xmm3 = xmm7
9015# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
9016# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
9017movdqa %xmm5,%xmm2
9018
9019# qhasm: xmm3 ^= xmm6
9020# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
9021# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
9022pxor %xmm3,%xmm2
9023
9024# qhasm: xmm3 &= xmm4
9025# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
9026# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
9027pand %xmm0,%xmm2
9028
9029# qhasm: xmm4 ^= xmm0
9030# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
9031# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
9032pxor %xmm1,%xmm0
9033
9034# qhasm: xmm4 &= xmm6
9035# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
9036# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
9037pand %xmm3,%xmm0
9038
9039# qhasm: xmm0 &= xmm7
9040# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
9041# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
9042pand %xmm5,%xmm1
9043
9044# qhasm: xmm0 ^= xmm4
9045# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
9046# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
9047pxor %xmm0,%xmm1
9048
9049# qhasm: xmm4 ^= xmm3
9050# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
9051# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
9052pxor %xmm2,%xmm0
9053
9054# qhasm: xmm2 = xmm5
9055# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
9056# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
9057movdqa %xmm7,%xmm2
9058
9059# qhasm: xmm2 ^= xmm1
9060# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
9061# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
9062pxor %xmm4,%xmm2
9063
9064# qhasm: xmm2 &= xmm8
9065# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
9066# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
9067pand %xmm8,%xmm2
9068
9069# qhasm: xmm8 ^= xmm11
9070# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
9071# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
9072pxor %xmm11,%xmm8
9073
9074# qhasm: xmm8 &= xmm1
9075# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
9076# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
9077pand %xmm4,%xmm8
9078
9079# qhasm: xmm11 &= xmm5
9080# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
9081# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
9082pand %xmm7,%xmm11
9083
9084# qhasm: xmm8 ^= xmm11
9085# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
9086# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
9087pxor %xmm11,%xmm8
9088
9089# qhasm: xmm11 ^= xmm2
9090# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
9091# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
9092pxor %xmm2,%xmm11
9093
9094# qhasm: xmm14 ^= xmm4
9095# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
9096# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
9097pxor %xmm0,%xmm14
9098
9099# qhasm: xmm8 ^= xmm4
9100# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
9101# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
9102pxor %xmm0,%xmm8
9103
9104# qhasm: xmm13 ^= xmm0
9105# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
9106# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
9107pxor %xmm1,%xmm13
9108
9109# qhasm: xmm11 ^= xmm0
9110# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
9111# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
9112pxor %xmm1,%xmm11
9113
9114# qhasm: xmm4 = xmm15
9115# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
9116# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
9117movdqa %xmm15,%xmm0
9118
9119# qhasm: xmm0 = xmm9
9120# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
9121# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
9122movdqa %xmm9,%xmm1
9123
9124# qhasm: xmm4 ^= xmm12
9125# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
9126# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
9127pxor %xmm12,%xmm0
9128
9129# qhasm: xmm0 ^= xmm10
9130# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
9131# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
9132pxor %xmm10,%xmm1
9133
9134# qhasm: xmm3 = xmm7
9135# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
9136# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
9137movdqa %xmm5,%xmm2
9138
9139# qhasm: xmm3 ^= xmm6
9140# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
9141# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
9142pxor %xmm3,%xmm2
9143
9144# qhasm: xmm3 &= xmm4
9145# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
9146# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
9147pand %xmm0,%xmm2
9148
9149# qhasm: xmm4 ^= xmm0
9150# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
9151# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
9152pxor %xmm1,%xmm0
9153
9154# qhasm: xmm4 &= xmm6
9155# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
9156# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
9157pand %xmm3,%xmm0
9158
9159# qhasm: xmm0 &= xmm7
9160# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
9161# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
9162pand %xmm5,%xmm1
9163
9164# qhasm: xmm0 ^= xmm4
9165# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
9166# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
9167pxor %xmm0,%xmm1
9168
9169# qhasm: xmm4 ^= xmm3
9170# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
9171# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
9172pxor %xmm2,%xmm0
9173
9174# qhasm: xmm2 = xmm5
9175# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
9176# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
9177movdqa %xmm7,%xmm2
9178
9179# qhasm: xmm2 ^= xmm1
9180# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
9181# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
9182pxor %xmm4,%xmm2
9183
9184# qhasm: xmm2 &= xmm12
9185# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
9186# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
9187pand %xmm12,%xmm2
9188
9189# qhasm: xmm12 ^= xmm10
9190# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
9191# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
9192pxor %xmm10,%xmm12
9193
9194# qhasm: xmm12 &= xmm1
9195# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
9196# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
9197pand %xmm4,%xmm12
9198
9199# qhasm: xmm10 &= xmm5
9200# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
9201# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
9202pand %xmm7,%xmm10
9203
9204# qhasm: xmm12 ^= xmm10
9205# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
9206# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
9207pxor %xmm10,%xmm12
9208
9209# qhasm: xmm10 ^= xmm2
9210# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
9211# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
9212pxor %xmm2,%xmm10
9213
9214# qhasm: xmm7 ^= xmm5
9215# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
9216# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
9217pxor %xmm7,%xmm5
9218
9219# qhasm: xmm6 ^= xmm1
9220# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
9221# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
9222pxor %xmm4,%xmm3
9223
9224# qhasm: xmm3 = xmm7
9225# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
9226# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
9227movdqa %xmm5,%xmm2
9228
9229# qhasm: xmm3 ^= xmm6
9230# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
9231# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
9232pxor %xmm3,%xmm2
9233
9234# qhasm: xmm3 &= xmm15
9235# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
9236# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
9237pand %xmm15,%xmm2
9238
9239# qhasm: xmm15 ^= xmm9
9240# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
9241# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
9242pxor %xmm9,%xmm15
9243
9244# qhasm: xmm15 &= xmm6
9245# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
9246# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
9247pand %xmm3,%xmm15
9248
9249# qhasm: xmm9 &= xmm7
9250# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
9251# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
9252pand %xmm5,%xmm9
9253
9254# qhasm: xmm15 ^= xmm9
9255# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
9256# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
9257pxor %xmm9,%xmm15
9258
9259# qhasm: xmm9 ^= xmm3
9260# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
9261# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
9262pxor %xmm2,%xmm9
9263
9264# qhasm: xmm15 ^= xmm4
9265# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
9266# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
9267pxor %xmm0,%xmm15
9268
9269# qhasm: xmm12 ^= xmm4
9270# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
9271# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
9272pxor %xmm0,%xmm12
9273
9274# qhasm: xmm9 ^= xmm0
9275# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
9276# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
9277pxor %xmm1,%xmm9
9278
9279# qhasm: xmm10 ^= xmm0
9280# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
9281# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
9282pxor %xmm1,%xmm10
9283
9284# qhasm: xmm15 ^= xmm8
9285# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
9286# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
9287pxor %xmm8,%xmm15
9288
9289# qhasm: xmm9 ^= xmm14
9290# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
9291# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
9292pxor %xmm14,%xmm9
9293
9294# qhasm: xmm12 ^= xmm15
9295# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
9296# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
9297pxor %xmm15,%xmm12
9298
9299# qhasm: xmm14 ^= xmm8
9300# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
9301# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
9302pxor %xmm8,%xmm14
9303
9304# qhasm: xmm8 ^= xmm9
9305# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
9306# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
9307pxor %xmm9,%xmm8
9308
9309# qhasm: xmm9 ^= xmm13
9310# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
9311# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
9312pxor %xmm13,%xmm9
9313
9314# qhasm: xmm13 ^= xmm10
9315# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
9316# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
9317pxor %xmm10,%xmm13
9318
9319# qhasm: xmm12 ^= xmm13
9320# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
9321# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
9322pxor %xmm13,%xmm12
9323
9324# qhasm: xmm10 ^= xmm11
9325# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
9326# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
9327pxor %xmm11,%xmm10
9328
9329# qhasm: xmm11 ^= xmm13
9330# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
9331# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
9332pxor %xmm13,%xmm11
9333
9334# qhasm: xmm14 ^= xmm11
9335# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
9336# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
9337pxor %xmm11,%xmm14
9338
9339# qhasm: xmm0 = shuffle dwords of xmm8 by 0x93
9340# asm 1: pshufd $0x93,<xmm8=int6464#9,>xmm0=int6464#1
9341# asm 2: pshufd $0x93,<xmm8=%xmm8,>xmm0=%xmm0
9342pshufd $0x93,%xmm8,%xmm0
9343
9344# qhasm: xmm1 = shuffle dwords of xmm9 by 0x93
9345# asm 1: pshufd $0x93,<xmm9=int6464#10,>xmm1=int6464#2
9346# asm 2: pshufd $0x93,<xmm9=%xmm9,>xmm1=%xmm1
9347pshufd $0x93,%xmm9,%xmm1
9348
9349# qhasm: xmm2 = shuffle dwords of xmm12 by 0x93
9350# asm 1: pshufd $0x93,<xmm12=int6464#13,>xmm2=int6464#3
9351# asm 2: pshufd $0x93,<xmm12=%xmm12,>xmm2=%xmm2
9352pshufd $0x93,%xmm12,%xmm2
9353
9354# qhasm: xmm3 = shuffle dwords of xmm14 by 0x93
9355# asm 1: pshufd $0x93,<xmm14=int6464#15,>xmm3=int6464#4
9356# asm 2: pshufd $0x93,<xmm14=%xmm14,>xmm3=%xmm3
9357pshufd $0x93,%xmm14,%xmm3
9358
9359# qhasm: xmm4 = shuffle dwords of xmm11 by 0x93
9360# asm 1: pshufd $0x93,<xmm11=int6464#12,>xmm4=int6464#5
9361# asm 2: pshufd $0x93,<xmm11=%xmm11,>xmm4=%xmm4
9362pshufd $0x93,%xmm11,%xmm4
9363
9364# qhasm: xmm5 = shuffle dwords of xmm15 by 0x93
9365# asm 1: pshufd $0x93,<xmm15=int6464#16,>xmm5=int6464#6
9366# asm 2: pshufd $0x93,<xmm15=%xmm15,>xmm5=%xmm5
9367pshufd $0x93,%xmm15,%xmm5
9368
9369# qhasm: xmm6 = shuffle dwords of xmm10 by 0x93
9370# asm 1: pshufd $0x93,<xmm10=int6464#11,>xmm6=int6464#7
9371# asm 2: pshufd $0x93,<xmm10=%xmm10,>xmm6=%xmm6
9372pshufd $0x93,%xmm10,%xmm6
9373
9374# qhasm: xmm7 = shuffle dwords of xmm13 by 0x93
9375# asm 1: pshufd $0x93,<xmm13=int6464#14,>xmm7=int6464#8
9376# asm 2: pshufd $0x93,<xmm13=%xmm13,>xmm7=%xmm7
9377pshufd $0x93,%xmm13,%xmm7
9378
9379# qhasm: xmm8 ^= xmm0
9380# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
9381# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
9382pxor %xmm0,%xmm8
9383
9384# qhasm: xmm9 ^= xmm1
9385# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
9386# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
9387pxor %xmm1,%xmm9
9388
9389# qhasm: xmm12 ^= xmm2
9390# asm 1: pxor <xmm2=int6464#3,<xmm12=int6464#13
9391# asm 2: pxor <xmm2=%xmm2,<xmm12=%xmm12
9392pxor %xmm2,%xmm12
9393
9394# qhasm: xmm14 ^= xmm3
9395# asm 1: pxor <xmm3=int6464#4,<xmm14=int6464#15
9396# asm 2: pxor <xmm3=%xmm3,<xmm14=%xmm14
9397pxor %xmm3,%xmm14
9398
9399# qhasm: xmm11 ^= xmm4
9400# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
9401# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
9402pxor %xmm4,%xmm11
9403
9404# qhasm: xmm15 ^= xmm5
9405# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
9406# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
9407pxor %xmm5,%xmm15
9408
9409# qhasm: xmm10 ^= xmm6
9410# asm 1: pxor <xmm6=int6464#7,<xmm10=int6464#11
9411# asm 2: pxor <xmm6=%xmm6,<xmm10=%xmm10
9412pxor %xmm6,%xmm10
9413
9414# qhasm: xmm13 ^= xmm7
9415# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
9416# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
9417pxor %xmm7,%xmm13
9418
9419# qhasm: xmm0 ^= xmm13
9420# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
9421# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
9422pxor %xmm13,%xmm0
9423
9424# qhasm: xmm1 ^= xmm8
9425# asm 1: pxor <xmm8=int6464#9,<xmm1=int6464#2
9426# asm 2: pxor <xmm8=%xmm8,<xmm1=%xmm1
9427pxor %xmm8,%xmm1
9428
9429# qhasm: xmm2 ^= xmm9
9430# asm 1: pxor <xmm9=int6464#10,<xmm2=int6464#3
9431# asm 2: pxor <xmm9=%xmm9,<xmm2=%xmm2
9432pxor %xmm9,%xmm2
9433
9434# qhasm: xmm1 ^= xmm13
9435# asm 1: pxor <xmm13=int6464#14,<xmm1=int6464#2
9436# asm 2: pxor <xmm13=%xmm13,<xmm1=%xmm1
9437pxor %xmm13,%xmm1
9438
9439# qhasm: xmm3 ^= xmm12
9440# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
9441# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
9442pxor %xmm12,%xmm3
9443
9444# qhasm: xmm4 ^= xmm14
9445# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#5
9446# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm4
9447pxor %xmm14,%xmm4
9448
9449# qhasm: xmm5 ^= xmm11
9450# asm 1: pxor <xmm11=int6464#12,<xmm5=int6464#6
9451# asm 2: pxor <xmm11=%xmm11,<xmm5=%xmm5
9452pxor %xmm11,%xmm5
9453
9454# qhasm: xmm3 ^= xmm13
9455# asm 1: pxor <xmm13=int6464#14,<xmm3=int6464#4
9456# asm 2: pxor <xmm13=%xmm13,<xmm3=%xmm3
9457pxor %xmm13,%xmm3
9458
9459# qhasm: xmm6 ^= xmm15
9460# asm 1: pxor <xmm15=int6464#16,<xmm6=int6464#7
9461# asm 2: pxor <xmm15=%xmm15,<xmm6=%xmm6
9462pxor %xmm15,%xmm6
9463
9464# qhasm: xmm7 ^= xmm10
9465# asm 1: pxor <xmm10=int6464#11,<xmm7=int6464#8
9466# asm 2: pxor <xmm10=%xmm10,<xmm7=%xmm7
9467pxor %xmm10,%xmm7
9468
9469# qhasm: xmm4 ^= xmm13
9470# asm 1: pxor <xmm13=int6464#14,<xmm4=int6464#5
9471# asm 2: pxor <xmm13=%xmm13,<xmm4=%xmm4
9472pxor %xmm13,%xmm4
9473
9474# qhasm: xmm8 = shuffle dwords of xmm8 by 0x4E
9475# asm 1: pshufd $0x4E,<xmm8=int6464#9,>xmm8=int6464#9
9476# asm 2: pshufd $0x4E,<xmm8=%xmm8,>xmm8=%xmm8
9477pshufd $0x4E,%xmm8,%xmm8
9478
9479# qhasm: xmm9 = shuffle dwords of xmm9 by 0x4E
9480# asm 1: pshufd $0x4E,<xmm9=int6464#10,>xmm9=int6464#10
9481# asm 2: pshufd $0x4E,<xmm9=%xmm9,>xmm9=%xmm9
9482pshufd $0x4E,%xmm9,%xmm9
9483
9484# qhasm: xmm12 = shuffle dwords of xmm12 by 0x4E
9485# asm 1: pshufd $0x4E,<xmm12=int6464#13,>xmm12=int6464#13
9486# asm 2: pshufd $0x4E,<xmm12=%xmm12,>xmm12=%xmm12
9487pshufd $0x4E,%xmm12,%xmm12
9488
9489# qhasm: xmm14 = shuffle dwords of xmm14 by 0x4E
9490# asm 1: pshufd $0x4E,<xmm14=int6464#15,>xmm14=int6464#15
9491# asm 2: pshufd $0x4E,<xmm14=%xmm14,>xmm14=%xmm14
9492pshufd $0x4E,%xmm14,%xmm14
9493
9494# qhasm: xmm11 = shuffle dwords of xmm11 by 0x4E
9495# asm 1: pshufd $0x4E,<xmm11=int6464#12,>xmm11=int6464#12
9496# asm 2: pshufd $0x4E,<xmm11=%xmm11,>xmm11=%xmm11
9497pshufd $0x4E,%xmm11,%xmm11
9498
9499# qhasm: xmm15 = shuffle dwords of xmm15 by 0x4E
9500# asm 1: pshufd $0x4E,<xmm15=int6464#16,>xmm15=int6464#16
9501# asm 2: pshufd $0x4E,<xmm15=%xmm15,>xmm15=%xmm15
9502pshufd $0x4E,%xmm15,%xmm15
9503
9504# qhasm: xmm10 = shuffle dwords of xmm10 by 0x4E
9505# asm 1: pshufd $0x4E,<xmm10=int6464#11,>xmm10=int6464#11
9506# asm 2: pshufd $0x4E,<xmm10=%xmm10,>xmm10=%xmm10
9507pshufd $0x4E,%xmm10,%xmm10
9508
9509# qhasm: xmm13 = shuffle dwords of xmm13 by 0x4E
9510# asm 1: pshufd $0x4E,<xmm13=int6464#14,>xmm13=int6464#14
9511# asm 2: pshufd $0x4E,<xmm13=%xmm13,>xmm13=%xmm13
9512pshufd $0x4E,%xmm13,%xmm13
9513
9514# qhasm: xmm0 ^= xmm8
9515# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
9516# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
9517pxor %xmm8,%xmm0
9518
9519# qhasm: xmm1 ^= xmm9
9520# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
9521# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
9522pxor %xmm9,%xmm1
9523
9524# qhasm: xmm2 ^= xmm12
9525# asm 1: pxor <xmm12=int6464#13,<xmm2=int6464#3
9526# asm 2: pxor <xmm12=%xmm12,<xmm2=%xmm2
9527pxor %xmm12,%xmm2
9528
9529# qhasm: xmm3 ^= xmm14
9530# asm 1: pxor <xmm14=int6464#15,<xmm3=int6464#4
9531# asm 2: pxor <xmm14=%xmm14,<xmm3=%xmm3
9532pxor %xmm14,%xmm3
9533
9534# qhasm: xmm4 ^= xmm11
9535# asm 1: pxor <xmm11=int6464#12,<xmm4=int6464#5
9536# asm 2: pxor <xmm11=%xmm11,<xmm4=%xmm4
9537pxor %xmm11,%xmm4
9538
9539# qhasm: xmm5 ^= xmm15
9540# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
9541# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
9542pxor %xmm15,%xmm5
9543
9544# qhasm: xmm6 ^= xmm10
9545# asm 1: pxor <xmm10=int6464#11,<xmm6=int6464#7
9546# asm 2: pxor <xmm10=%xmm10,<xmm6=%xmm6
9547pxor %xmm10,%xmm6
9548
9549# qhasm: xmm7 ^= xmm13
9550# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
9551# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
9552pxor %xmm13,%xmm7
9553
9554# qhasm: xmm0 ^= *(int128 *)(c + 1024)
9555# asm 1: pxor 1024(<c=int64#5),<xmm0=int6464#1
9556# asm 2: pxor 1024(<c=%r8),<xmm0=%xmm0
9557pxor 1024(%r8),%xmm0
9558
9559# qhasm: shuffle bytes of xmm0 by SR
9560# asm 1: pshufb SR,<xmm0=int6464#1
9561# asm 2: pshufb SR,<xmm0=%xmm0
9562pshufb SR,%xmm0
9563
9564# qhasm: xmm1 ^= *(int128 *)(c + 1040)
9565# asm 1: pxor 1040(<c=int64#5),<xmm1=int6464#2
9566# asm 2: pxor 1040(<c=%r8),<xmm1=%xmm1
9567pxor 1040(%r8),%xmm1
9568
9569# qhasm: shuffle bytes of xmm1 by SR
9570# asm 1: pshufb SR,<xmm1=int6464#2
9571# asm 2: pshufb SR,<xmm1=%xmm1
9572pshufb SR,%xmm1
9573
9574# qhasm: xmm2 ^= *(int128 *)(c + 1056)
9575# asm 1: pxor 1056(<c=int64#5),<xmm2=int6464#3
9576# asm 2: pxor 1056(<c=%r8),<xmm2=%xmm2
9577pxor 1056(%r8),%xmm2
9578
9579# qhasm: shuffle bytes of xmm2 by SR
9580# asm 1: pshufb SR,<xmm2=int6464#3
9581# asm 2: pshufb SR,<xmm2=%xmm2
9582pshufb SR,%xmm2
9583
9584# qhasm: xmm3 ^= *(int128 *)(c + 1072)
9585# asm 1: pxor 1072(<c=int64#5),<xmm3=int6464#4
9586# asm 2: pxor 1072(<c=%r8),<xmm3=%xmm3
9587pxor 1072(%r8),%xmm3
9588
9589# qhasm: shuffle bytes of xmm3 by SR
9590# asm 1: pshufb SR,<xmm3=int6464#4
9591# asm 2: pshufb SR,<xmm3=%xmm3
9592pshufb SR,%xmm3
9593
9594# qhasm: xmm4 ^= *(int128 *)(c + 1088)
9595# asm 1: pxor 1088(<c=int64#5),<xmm4=int6464#5
9596# asm 2: pxor 1088(<c=%r8),<xmm4=%xmm4
9597pxor 1088(%r8),%xmm4
9598
9599# qhasm: shuffle bytes of xmm4 by SR
9600# asm 1: pshufb SR,<xmm4=int6464#5
9601# asm 2: pshufb SR,<xmm4=%xmm4
9602pshufb SR,%xmm4
9603
9604# qhasm: xmm5 ^= *(int128 *)(c + 1104)
9605# asm 1: pxor 1104(<c=int64#5),<xmm5=int6464#6
9606# asm 2: pxor 1104(<c=%r8),<xmm5=%xmm5
9607pxor 1104(%r8),%xmm5
9608
9609# qhasm: shuffle bytes of xmm5 by SR
9610# asm 1: pshufb SR,<xmm5=int6464#6
9611# asm 2: pshufb SR,<xmm5=%xmm5
9612pshufb SR,%xmm5
9613
9614# qhasm: xmm6 ^= *(int128 *)(c + 1120)
9615# asm 1: pxor 1120(<c=int64#5),<xmm6=int6464#7
9616# asm 2: pxor 1120(<c=%r8),<xmm6=%xmm6
9617pxor 1120(%r8),%xmm6
9618
9619# qhasm: shuffle bytes of xmm6 by SR
9620# asm 1: pshufb SR,<xmm6=int6464#7
9621# asm 2: pshufb SR,<xmm6=%xmm6
9622pshufb SR,%xmm6
9623
9624# qhasm: xmm7 ^= *(int128 *)(c + 1136)
9625# asm 1: pxor 1136(<c=int64#5),<xmm7=int6464#8
9626# asm 2: pxor 1136(<c=%r8),<xmm7=%xmm7
9627pxor 1136(%r8),%xmm7
9628
9629# qhasm: shuffle bytes of xmm7 by SR
9630# asm 1: pshufb SR,<xmm7=int6464#8
9631# asm 2: pshufb SR,<xmm7=%xmm7
9632pshufb SR,%xmm7
9633
9634# qhasm: xmm5 ^= xmm6
9635# asm 1: pxor <xmm6=int6464#7,<xmm5=int6464#6
9636# asm 2: pxor <xmm6=%xmm6,<xmm5=%xmm5
9637pxor %xmm6,%xmm5
9638
9639# qhasm: xmm2 ^= xmm1
9640# asm 1: pxor <xmm1=int6464#2,<xmm2=int6464#3
9641# asm 2: pxor <xmm1=%xmm1,<xmm2=%xmm2
9642pxor %xmm1,%xmm2
9643
9644# qhasm: xmm5 ^= xmm0
9645# asm 1: pxor <xmm0=int6464#1,<xmm5=int6464#6
9646# asm 2: pxor <xmm0=%xmm0,<xmm5=%xmm5
9647pxor %xmm0,%xmm5
9648
9649# qhasm: xmm6 ^= xmm2
9650# asm 1: pxor <xmm2=int6464#3,<xmm6=int6464#7
9651# asm 2: pxor <xmm2=%xmm2,<xmm6=%xmm6
9652pxor %xmm2,%xmm6
9653
9654# qhasm: xmm3 ^= xmm0
9655# asm 1: pxor <xmm0=int6464#1,<xmm3=int6464#4
9656# asm 2: pxor <xmm0=%xmm0,<xmm3=%xmm3
9657pxor %xmm0,%xmm3
9658
9659# qhasm: xmm6 ^= xmm3
9660# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
9661# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
9662pxor %xmm3,%xmm6
9663
9664# qhasm: xmm3 ^= xmm7
9665# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#4
9666# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm3
9667pxor %xmm7,%xmm3
9668
9669# qhasm: xmm3 ^= xmm4
9670# asm 1: pxor <xmm4=int6464#5,<xmm3=int6464#4
9671# asm 2: pxor <xmm4=%xmm4,<xmm3=%xmm3
9672pxor %xmm4,%xmm3
9673
9674# qhasm: xmm7 ^= xmm5
9675# asm 1: pxor <xmm5=int6464#6,<xmm7=int6464#8
9676# asm 2: pxor <xmm5=%xmm5,<xmm7=%xmm7
9677pxor %xmm5,%xmm7
9678
9679# qhasm: xmm3 ^= xmm1
9680# asm 1: pxor <xmm1=int6464#2,<xmm3=int6464#4
9681# asm 2: pxor <xmm1=%xmm1,<xmm3=%xmm3
9682pxor %xmm1,%xmm3
9683
9684# qhasm: xmm4 ^= xmm5
9685# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
9686# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
9687pxor %xmm5,%xmm4
9688
9689# qhasm: xmm2 ^= xmm7
9690# asm 1: pxor <xmm7=int6464#8,<xmm2=int6464#3
9691# asm 2: pxor <xmm7=%xmm7,<xmm2=%xmm2
9692pxor %xmm7,%xmm2
9693
9694# qhasm: xmm1 ^= xmm5
9695# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
9696# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
9697pxor %xmm5,%xmm1
9698
9699# qhasm: xmm11 = xmm7
9700# asm 1: movdqa <xmm7=int6464#8,>xmm11=int6464#9
9701# asm 2: movdqa <xmm7=%xmm7,>xmm11=%xmm8
9702movdqa %xmm7,%xmm8
9703
9704# qhasm: xmm10 = xmm1
9705# asm 1: movdqa <xmm1=int6464#2,>xmm10=int6464#10
9706# asm 2: movdqa <xmm1=%xmm1,>xmm10=%xmm9
9707movdqa %xmm1,%xmm9
9708
9709# qhasm: xmm9 = xmm5
9710# asm 1: movdqa <xmm5=int6464#6,>xmm9=int6464#11
9711# asm 2: movdqa <xmm5=%xmm5,>xmm9=%xmm10
9712movdqa %xmm5,%xmm10
9713
9714# qhasm: xmm13 = xmm2
9715# asm 1: movdqa <xmm2=int6464#3,>xmm13=int6464#12
9716# asm 2: movdqa <xmm2=%xmm2,>xmm13=%xmm11
9717movdqa %xmm2,%xmm11
9718
9719# qhasm: xmm12 = xmm6
9720# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#13
9721# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm12
9722movdqa %xmm6,%xmm12
9723
9724# qhasm: xmm11 ^= xmm4
9725# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#9
9726# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm8
9727pxor %xmm4,%xmm8
9728
9729# qhasm: xmm10 ^= xmm2
9730# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#10
9731# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm9
9732pxor %xmm2,%xmm9
9733
9734# qhasm: xmm9 ^= xmm3
9735# asm 1: pxor <xmm3=int6464#4,<xmm9=int6464#11
9736# asm 2: pxor <xmm3=%xmm3,<xmm9=%xmm10
9737pxor %xmm3,%xmm10
9738
9739# qhasm: xmm13 ^= xmm4
9740# asm 1: pxor <xmm4=int6464#5,<xmm13=int6464#12
9741# asm 2: pxor <xmm4=%xmm4,<xmm13=%xmm11
9742pxor %xmm4,%xmm11
9743
9744# qhasm: xmm12 ^= xmm0
9745# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
9746# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
9747pxor %xmm0,%xmm12
9748
9749# qhasm: xmm14 = xmm11
9750# asm 1: movdqa <xmm11=int6464#9,>xmm14=int6464#14
9751# asm 2: movdqa <xmm11=%xmm8,>xmm14=%xmm13
9752movdqa %xmm8,%xmm13
9753
9754# qhasm: xmm8 = xmm10
9755# asm 1: movdqa <xmm10=int6464#10,>xmm8=int6464#15
9756# asm 2: movdqa <xmm10=%xmm9,>xmm8=%xmm14
9757movdqa %xmm9,%xmm14
9758
9759# qhasm: xmm15 = xmm11
9760# asm 1: movdqa <xmm11=int6464#9,>xmm15=int6464#16
9761# asm 2: movdqa <xmm11=%xmm8,>xmm15=%xmm15
9762movdqa %xmm8,%xmm15
9763
9764# qhasm: xmm10 |= xmm9
9765# asm 1: por <xmm9=int6464#11,<xmm10=int6464#10
9766# asm 2: por <xmm9=%xmm10,<xmm10=%xmm9
9767por %xmm10,%xmm9
9768
9769# qhasm: xmm11 |= xmm12
9770# asm 1: por <xmm12=int6464#13,<xmm11=int6464#9
9771# asm 2: por <xmm12=%xmm12,<xmm11=%xmm8
9772por %xmm12,%xmm8
9773
9774# qhasm: xmm15 ^= xmm8
9775# asm 1: pxor <xmm8=int6464#15,<xmm15=int6464#16
9776# asm 2: pxor <xmm8=%xmm14,<xmm15=%xmm15
9777pxor %xmm14,%xmm15
9778
9779# qhasm: xmm14 &= xmm12
9780# asm 1: pand <xmm12=int6464#13,<xmm14=int6464#14
9781# asm 2: pand <xmm12=%xmm12,<xmm14=%xmm13
9782pand %xmm12,%xmm13
9783
9784# qhasm: xmm8 &= xmm9
9785# asm 1: pand <xmm9=int6464#11,<xmm8=int6464#15
9786# asm 2: pand <xmm9=%xmm10,<xmm8=%xmm14
9787pand %xmm10,%xmm14
9788
9789# qhasm: xmm12 ^= xmm9
9790# asm 1: pxor <xmm9=int6464#11,<xmm12=int6464#13
9791# asm 2: pxor <xmm9=%xmm10,<xmm12=%xmm12
9792pxor %xmm10,%xmm12
9793
9794# qhasm: xmm15 &= xmm12
9795# asm 1: pand <xmm12=int6464#13,<xmm15=int6464#16
9796# asm 2: pand <xmm12=%xmm12,<xmm15=%xmm15
9797pand %xmm12,%xmm15
9798
9799# qhasm: xmm12 = xmm3
9800# asm 1: movdqa <xmm3=int6464#4,>xmm12=int6464#11
9801# asm 2: movdqa <xmm3=%xmm3,>xmm12=%xmm10
9802movdqa %xmm3,%xmm10
9803
9804# qhasm: xmm12 ^= xmm0
9805# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#11
9806# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm10
9807pxor %xmm0,%xmm10
9808
9809# qhasm: xmm13 &= xmm12
9810# asm 1: pand <xmm12=int6464#11,<xmm13=int6464#12
9811# asm 2: pand <xmm12=%xmm10,<xmm13=%xmm11
9812pand %xmm10,%xmm11
9813
9814# qhasm: xmm11 ^= xmm13
9815# asm 1: pxor <xmm13=int6464#12,<xmm11=int6464#9
9816# asm 2: pxor <xmm13=%xmm11,<xmm11=%xmm8
9817pxor %xmm11,%xmm8
9818
9819# qhasm: xmm10 ^= xmm13
9820# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
9821# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
9822pxor %xmm11,%xmm9
9823
9824# qhasm: xmm13 = xmm7
9825# asm 1: movdqa <xmm7=int6464#8,>xmm13=int6464#11
9826# asm 2: movdqa <xmm7=%xmm7,>xmm13=%xmm10
9827movdqa %xmm7,%xmm10
9828
9829# qhasm: xmm13 ^= xmm1
9830# asm 1: pxor <xmm1=int6464#2,<xmm13=int6464#11
9831# asm 2: pxor <xmm1=%xmm1,<xmm13=%xmm10
9832pxor %xmm1,%xmm10
9833
9834# qhasm: xmm12 = xmm5
9835# asm 1: movdqa <xmm5=int6464#6,>xmm12=int6464#12
9836# asm 2: movdqa <xmm5=%xmm5,>xmm12=%xmm11
9837movdqa %xmm5,%xmm11
9838
9839# qhasm: xmm9 = xmm13
9840# asm 1: movdqa <xmm13=int6464#11,>xmm9=int6464#13
9841# asm 2: movdqa <xmm13=%xmm10,>xmm9=%xmm12
9842movdqa %xmm10,%xmm12
9843
9844# qhasm: xmm12 ^= xmm6
9845# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#12
9846# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm11
9847pxor %xmm6,%xmm11
9848
9849# qhasm: xmm9 |= xmm12
9850# asm 1: por <xmm12=int6464#12,<xmm9=int6464#13
9851# asm 2: por <xmm12=%xmm11,<xmm9=%xmm12
9852por %xmm11,%xmm12
9853
9854# qhasm: xmm13 &= xmm12
9855# asm 1: pand <xmm12=int6464#12,<xmm13=int6464#11
9856# asm 2: pand <xmm12=%xmm11,<xmm13=%xmm10
9857pand %xmm11,%xmm10
9858
9859# qhasm: xmm8 ^= xmm13
9860# asm 1: pxor <xmm13=int6464#11,<xmm8=int6464#15
9861# asm 2: pxor <xmm13=%xmm10,<xmm8=%xmm14
9862pxor %xmm10,%xmm14
9863
9864# qhasm: xmm11 ^= xmm15
9865# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#9
9866# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm8
9867pxor %xmm15,%xmm8
9868
9869# qhasm: xmm10 ^= xmm14
9870# asm 1: pxor <xmm14=int6464#14,<xmm10=int6464#10
9871# asm 2: pxor <xmm14=%xmm13,<xmm10=%xmm9
9872pxor %xmm13,%xmm9
9873
9874# qhasm: xmm9 ^= xmm15
9875# asm 1: pxor <xmm15=int6464#16,<xmm9=int6464#13
9876# asm 2: pxor <xmm15=%xmm15,<xmm9=%xmm12
9877pxor %xmm15,%xmm12
9878
9879# qhasm: xmm8 ^= xmm14
9880# asm 1: pxor <xmm14=int6464#14,<xmm8=int6464#15
9881# asm 2: pxor <xmm14=%xmm13,<xmm8=%xmm14
9882pxor %xmm13,%xmm14
9883
9884# qhasm: xmm9 ^= xmm14
9885# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
9886# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
9887pxor %xmm13,%xmm12
9888
9889# qhasm: xmm12 = xmm2
9890# asm 1: movdqa <xmm2=int6464#3,>xmm12=int6464#11
9891# asm 2: movdqa <xmm2=%xmm2,>xmm12=%xmm10
9892movdqa %xmm2,%xmm10
9893
9894# qhasm: xmm13 = xmm4
9895# asm 1: movdqa <xmm4=int6464#5,>xmm13=int6464#12
9896# asm 2: movdqa <xmm4=%xmm4,>xmm13=%xmm11
9897movdqa %xmm4,%xmm11
9898
9899# qhasm: xmm14 = xmm1
9900# asm 1: movdqa <xmm1=int6464#2,>xmm14=int6464#14
9901# asm 2: movdqa <xmm1=%xmm1,>xmm14=%xmm13
9902movdqa %xmm1,%xmm13
9903
9904# qhasm: xmm15 = xmm7
9905# asm 1: movdqa <xmm7=int6464#8,>xmm15=int6464#16
9906# asm 2: movdqa <xmm7=%xmm7,>xmm15=%xmm15
9907movdqa %xmm7,%xmm15
9908
9909# qhasm: xmm12 &= xmm3
9910# asm 1: pand <xmm3=int6464#4,<xmm12=int6464#11
9911# asm 2: pand <xmm3=%xmm3,<xmm12=%xmm10
9912pand %xmm3,%xmm10
9913
9914# qhasm: xmm13 &= xmm0
9915# asm 1: pand <xmm0=int6464#1,<xmm13=int6464#12
9916# asm 2: pand <xmm0=%xmm0,<xmm13=%xmm11
9917pand %xmm0,%xmm11
9918
9919# qhasm: xmm14 &= xmm5
9920# asm 1: pand <xmm5=int6464#6,<xmm14=int6464#14
9921# asm 2: pand <xmm5=%xmm5,<xmm14=%xmm13
9922pand %xmm5,%xmm13
9923
9924# qhasm: xmm15 |= xmm6
9925# asm 1: por <xmm6=int6464#7,<xmm15=int6464#16
9926# asm 2: por <xmm6=%xmm6,<xmm15=%xmm15
9927por %xmm6,%xmm15
9928
9929# qhasm: xmm11 ^= xmm12
9930# asm 1: pxor <xmm12=int6464#11,<xmm11=int6464#9
9931# asm 2: pxor <xmm12=%xmm10,<xmm11=%xmm8
9932pxor %xmm10,%xmm8
9933
9934# qhasm: xmm10 ^= xmm13
9935# asm 1: pxor <xmm13=int6464#12,<xmm10=int6464#10
9936# asm 2: pxor <xmm13=%xmm11,<xmm10=%xmm9
9937pxor %xmm11,%xmm9
9938
9939# qhasm: xmm9 ^= xmm14
9940# asm 1: pxor <xmm14=int6464#14,<xmm9=int6464#13
9941# asm 2: pxor <xmm14=%xmm13,<xmm9=%xmm12
9942pxor %xmm13,%xmm12
9943
9944# qhasm: xmm8 ^= xmm15
9945# asm 1: pxor <xmm15=int6464#16,<xmm8=int6464#15
9946# asm 2: pxor <xmm15=%xmm15,<xmm8=%xmm14
9947pxor %xmm15,%xmm14
9948
9949# qhasm: xmm12 = xmm11
9950# asm 1: movdqa <xmm11=int6464#9,>xmm12=int6464#11
9951# asm 2: movdqa <xmm11=%xmm8,>xmm12=%xmm10
9952movdqa %xmm8,%xmm10
9953
9954# qhasm: xmm12 ^= xmm10
9955# asm 1: pxor <xmm10=int6464#10,<xmm12=int6464#11
9956# asm 2: pxor <xmm10=%xmm9,<xmm12=%xmm10
9957pxor %xmm9,%xmm10
9958
9959# qhasm: xmm11 &= xmm9
9960# asm 1: pand <xmm9=int6464#13,<xmm11=int6464#9
9961# asm 2: pand <xmm9=%xmm12,<xmm11=%xmm8
9962pand %xmm12,%xmm8
9963
9964# qhasm: xmm14 = xmm8
9965# asm 1: movdqa <xmm8=int6464#15,>xmm14=int6464#12
9966# asm 2: movdqa <xmm8=%xmm14,>xmm14=%xmm11
9967movdqa %xmm14,%xmm11
9968
9969# qhasm: xmm14 ^= xmm11
9970# asm 1: pxor <xmm11=int6464#9,<xmm14=int6464#12
9971# asm 2: pxor <xmm11=%xmm8,<xmm14=%xmm11
9972pxor %xmm8,%xmm11
9973
9974# qhasm: xmm15 = xmm12
9975# asm 1: movdqa <xmm12=int6464#11,>xmm15=int6464#14
9976# asm 2: movdqa <xmm12=%xmm10,>xmm15=%xmm13
9977movdqa %xmm10,%xmm13
9978
9979# qhasm: xmm15 &= xmm14
9980# asm 1: pand <xmm14=int6464#12,<xmm15=int6464#14
9981# asm 2: pand <xmm14=%xmm11,<xmm15=%xmm13
9982pand %xmm11,%xmm13
9983
9984# qhasm: xmm15 ^= xmm10
9985# asm 1: pxor <xmm10=int6464#10,<xmm15=int6464#14
9986# asm 2: pxor <xmm10=%xmm9,<xmm15=%xmm13
9987pxor %xmm9,%xmm13
9988
9989# qhasm: xmm13 = xmm9
9990# asm 1: movdqa <xmm9=int6464#13,>xmm13=int6464#16
9991# asm 2: movdqa <xmm9=%xmm12,>xmm13=%xmm15
9992movdqa %xmm12,%xmm15
9993
9994# qhasm: xmm13 ^= xmm8
9995# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
9996# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
9997pxor %xmm14,%xmm15
9998
9999# qhasm: xmm11 ^= xmm10
10000# asm 1: pxor <xmm10=int6464#10,<xmm11=int6464#9
10001# asm 2: pxor <xmm10=%xmm9,<xmm11=%xmm8
10002pxor %xmm9,%xmm8
10003
10004# qhasm: xmm13 &= xmm11
10005# asm 1: pand <xmm11=int6464#9,<xmm13=int6464#16
10006# asm 2: pand <xmm11=%xmm8,<xmm13=%xmm15
10007pand %xmm8,%xmm15
10008
10009# qhasm: xmm13 ^= xmm8
10010# asm 1: pxor <xmm8=int6464#15,<xmm13=int6464#16
10011# asm 2: pxor <xmm8=%xmm14,<xmm13=%xmm15
10012pxor %xmm14,%xmm15
10013
10014# qhasm: xmm9 ^= xmm13
10015# asm 1: pxor <xmm13=int6464#16,<xmm9=int6464#13
10016# asm 2: pxor <xmm13=%xmm15,<xmm9=%xmm12
10017pxor %xmm15,%xmm12
10018
10019# qhasm: xmm10 = xmm14
10020# asm 1: movdqa <xmm14=int6464#12,>xmm10=int6464#9
10021# asm 2: movdqa <xmm14=%xmm11,>xmm10=%xmm8
10022movdqa %xmm11,%xmm8
10023
10024# qhasm: xmm10 ^= xmm13
10025# asm 1: pxor <xmm13=int6464#16,<xmm10=int6464#9
10026# asm 2: pxor <xmm13=%xmm15,<xmm10=%xmm8
10027pxor %xmm15,%xmm8
10028
10029# qhasm: xmm10 &= xmm8
10030# asm 1: pand <xmm8=int6464#15,<xmm10=int6464#9
10031# asm 2: pand <xmm8=%xmm14,<xmm10=%xmm8
10032pand %xmm14,%xmm8
10033
10034# qhasm: xmm9 ^= xmm10
10035# asm 1: pxor <xmm10=int6464#9,<xmm9=int6464#13
10036# asm 2: pxor <xmm10=%xmm8,<xmm9=%xmm12
10037pxor %xmm8,%xmm12
10038
10039# qhasm: xmm14 ^= xmm10
10040# asm 1: pxor <xmm10=int6464#9,<xmm14=int6464#12
10041# asm 2: pxor <xmm10=%xmm8,<xmm14=%xmm11
10042pxor %xmm8,%xmm11
10043
10044# qhasm: xmm14 &= xmm15
10045# asm 1: pand <xmm15=int6464#14,<xmm14=int6464#12
10046# asm 2: pand <xmm15=%xmm13,<xmm14=%xmm11
10047pand %xmm13,%xmm11
10048
10049# qhasm: xmm14 ^= xmm12
10050# asm 1: pxor <xmm12=int6464#11,<xmm14=int6464#12
10051# asm 2: pxor <xmm12=%xmm10,<xmm14=%xmm11
10052pxor %xmm10,%xmm11
10053
10054# qhasm: xmm12 = xmm6
10055# asm 1: movdqa <xmm6=int6464#7,>xmm12=int6464#9
10056# asm 2: movdqa <xmm6=%xmm6,>xmm12=%xmm8
10057movdqa %xmm6,%xmm8
10058
10059# qhasm: xmm8 = xmm5
10060# asm 1: movdqa <xmm5=int6464#6,>xmm8=int6464#10
10061# asm 2: movdqa <xmm5=%xmm5,>xmm8=%xmm9
10062movdqa %xmm5,%xmm9
10063
10064# qhasm: xmm10 = xmm15
10065# asm 1: movdqa <xmm15=int6464#14,>xmm10=int6464#11
10066# asm 2: movdqa <xmm15=%xmm13,>xmm10=%xmm10
10067movdqa %xmm13,%xmm10
10068
10069# qhasm: xmm10 ^= xmm14
10070# asm 1: pxor <xmm14=int6464#12,<xmm10=int6464#11
10071# asm 2: pxor <xmm14=%xmm11,<xmm10=%xmm10
10072pxor %xmm11,%xmm10
10073
10074# qhasm: xmm10 &= xmm6
10075# asm 1: pand <xmm6=int6464#7,<xmm10=int6464#11
10076# asm 2: pand <xmm6=%xmm6,<xmm10=%xmm10
10077pand %xmm6,%xmm10
10078
10079# qhasm: xmm6 ^= xmm5
10080# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
10081# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
10082pxor %xmm5,%xmm6
10083
10084# qhasm: xmm6 &= xmm14
10085# asm 1: pand <xmm14=int6464#12,<xmm6=int6464#7
10086# asm 2: pand <xmm14=%xmm11,<xmm6=%xmm6
10087pand %xmm11,%xmm6
10088
10089# qhasm: xmm5 &= xmm15
10090# asm 1: pand <xmm15=int6464#14,<xmm5=int6464#6
10091# asm 2: pand <xmm15=%xmm13,<xmm5=%xmm5
10092pand %xmm13,%xmm5
10093
10094# qhasm: xmm6 ^= xmm5
10095# asm 1: pxor <xmm5=int6464#6,<xmm6=int6464#7
10096# asm 2: pxor <xmm5=%xmm5,<xmm6=%xmm6
10097pxor %xmm5,%xmm6
10098
10099# qhasm: xmm5 ^= xmm10
10100# asm 1: pxor <xmm10=int6464#11,<xmm5=int6464#6
10101# asm 2: pxor <xmm10=%xmm10,<xmm5=%xmm5
10102pxor %xmm10,%xmm5
10103
10104# qhasm: xmm12 ^= xmm0
10105# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#9
10106# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm8
10107pxor %xmm0,%xmm8
10108
10109# qhasm: xmm8 ^= xmm3
10110# asm 1: pxor <xmm3=int6464#4,<xmm8=int6464#10
10111# asm 2: pxor <xmm3=%xmm3,<xmm8=%xmm9
10112pxor %xmm3,%xmm9
10113
10114# qhasm: xmm15 ^= xmm13
10115# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
10116# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
10117pxor %xmm15,%xmm13
10118
10119# qhasm: xmm14 ^= xmm9
10120# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
10121# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
10122pxor %xmm12,%xmm11
10123
10124# qhasm: xmm11 = xmm15
10125# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10126# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10127movdqa %xmm13,%xmm10
10128
10129# qhasm: xmm11 ^= xmm14
10130# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10131# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10132pxor %xmm11,%xmm10
10133
10134# qhasm: xmm11 &= xmm12
10135# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
10136# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
10137pand %xmm8,%xmm10
10138
10139# qhasm: xmm12 ^= xmm8
10140# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
10141# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
10142pxor %xmm9,%xmm8
10143
10144# qhasm: xmm12 &= xmm14
10145# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
10146# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
10147pand %xmm11,%xmm8
10148
10149# qhasm: xmm8 &= xmm15
10150# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
10151# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
10152pand %xmm13,%xmm9
10153
10154# qhasm: xmm8 ^= xmm12
10155# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
10156# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
10157pxor %xmm8,%xmm9
10158
10159# qhasm: xmm12 ^= xmm11
10160# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
10161# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
10162pxor %xmm10,%xmm8
10163
10164# qhasm: xmm10 = xmm13
10165# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
10166# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
10167movdqa %xmm15,%xmm10
10168
10169# qhasm: xmm10 ^= xmm9
10170# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
10171# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
10172pxor %xmm12,%xmm10
10173
10174# qhasm: xmm10 &= xmm0
10175# asm 1: pand <xmm0=int6464#1,<xmm10=int6464#11
10176# asm 2: pand <xmm0=%xmm0,<xmm10=%xmm10
10177pand %xmm0,%xmm10
10178
10179# qhasm: xmm0 ^= xmm3
10180# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
10181# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
10182pxor %xmm3,%xmm0
10183
10184# qhasm: xmm0 &= xmm9
10185# asm 1: pand <xmm9=int6464#13,<xmm0=int6464#1
10186# asm 2: pand <xmm9=%xmm12,<xmm0=%xmm0
10187pand %xmm12,%xmm0
10188
10189# qhasm: xmm3 &= xmm13
10190# asm 1: pand <xmm13=int6464#16,<xmm3=int6464#4
10191# asm 2: pand <xmm13=%xmm15,<xmm3=%xmm3
10192pand %xmm15,%xmm3
10193
10194# qhasm: xmm0 ^= xmm3
10195# asm 1: pxor <xmm3=int6464#4,<xmm0=int6464#1
10196# asm 2: pxor <xmm3=%xmm3,<xmm0=%xmm0
10197pxor %xmm3,%xmm0
10198
10199# qhasm: xmm3 ^= xmm10
10200# asm 1: pxor <xmm10=int6464#11,<xmm3=int6464#4
10201# asm 2: pxor <xmm10=%xmm10,<xmm3=%xmm3
10202pxor %xmm10,%xmm3
10203
10204# qhasm: xmm6 ^= xmm12
10205# asm 1: pxor <xmm12=int6464#9,<xmm6=int6464#7
10206# asm 2: pxor <xmm12=%xmm8,<xmm6=%xmm6
10207pxor %xmm8,%xmm6
10208
10209# qhasm: xmm0 ^= xmm12
10210# asm 1: pxor <xmm12=int6464#9,<xmm0=int6464#1
10211# asm 2: pxor <xmm12=%xmm8,<xmm0=%xmm0
10212pxor %xmm8,%xmm0
10213
10214# qhasm: xmm5 ^= xmm8
10215# asm 1: pxor <xmm8=int6464#10,<xmm5=int6464#6
10216# asm 2: pxor <xmm8=%xmm9,<xmm5=%xmm5
10217pxor %xmm9,%xmm5
10218
10219# qhasm: xmm3 ^= xmm8
10220# asm 1: pxor <xmm8=int6464#10,<xmm3=int6464#4
10221# asm 2: pxor <xmm8=%xmm9,<xmm3=%xmm3
10222pxor %xmm9,%xmm3
10223
10224# qhasm: xmm12 = xmm7
10225# asm 1: movdqa <xmm7=int6464#8,>xmm12=int6464#9
10226# asm 2: movdqa <xmm7=%xmm7,>xmm12=%xmm8
10227movdqa %xmm7,%xmm8
10228
10229# qhasm: xmm8 = xmm1
10230# asm 1: movdqa <xmm1=int6464#2,>xmm8=int6464#10
10231# asm 2: movdqa <xmm1=%xmm1,>xmm8=%xmm9
10232movdqa %xmm1,%xmm9
10233
10234# qhasm: xmm12 ^= xmm4
10235# asm 1: pxor <xmm4=int6464#5,<xmm12=int6464#9
10236# asm 2: pxor <xmm4=%xmm4,<xmm12=%xmm8
10237pxor %xmm4,%xmm8
10238
10239# qhasm: xmm8 ^= xmm2
10240# asm 1: pxor <xmm2=int6464#3,<xmm8=int6464#10
10241# asm 2: pxor <xmm2=%xmm2,<xmm8=%xmm9
10242pxor %xmm2,%xmm9
10243
10244# qhasm: xmm11 = xmm15
10245# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10246# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10247movdqa %xmm13,%xmm10
10248
10249# qhasm: xmm11 ^= xmm14
10250# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10251# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10252pxor %xmm11,%xmm10
10253
10254# qhasm: xmm11 &= xmm12
10255# asm 1: pand <xmm12=int6464#9,<xmm11=int6464#11
10256# asm 2: pand <xmm12=%xmm8,<xmm11=%xmm10
10257pand %xmm8,%xmm10
10258
10259# qhasm: xmm12 ^= xmm8
10260# asm 1: pxor <xmm8=int6464#10,<xmm12=int6464#9
10261# asm 2: pxor <xmm8=%xmm9,<xmm12=%xmm8
10262pxor %xmm9,%xmm8
10263
10264# qhasm: xmm12 &= xmm14
10265# asm 1: pand <xmm14=int6464#12,<xmm12=int6464#9
10266# asm 2: pand <xmm14=%xmm11,<xmm12=%xmm8
10267pand %xmm11,%xmm8
10268
10269# qhasm: xmm8 &= xmm15
10270# asm 1: pand <xmm15=int6464#14,<xmm8=int6464#10
10271# asm 2: pand <xmm15=%xmm13,<xmm8=%xmm9
10272pand %xmm13,%xmm9
10273
10274# qhasm: xmm8 ^= xmm12
10275# asm 1: pxor <xmm12=int6464#9,<xmm8=int6464#10
10276# asm 2: pxor <xmm12=%xmm8,<xmm8=%xmm9
10277pxor %xmm8,%xmm9
10278
10279# qhasm: xmm12 ^= xmm11
10280# asm 1: pxor <xmm11=int6464#11,<xmm12=int6464#9
10281# asm 2: pxor <xmm11=%xmm10,<xmm12=%xmm8
10282pxor %xmm10,%xmm8
10283
10284# qhasm: xmm10 = xmm13
10285# asm 1: movdqa <xmm13=int6464#16,>xmm10=int6464#11
10286# asm 2: movdqa <xmm13=%xmm15,>xmm10=%xmm10
10287movdqa %xmm15,%xmm10
10288
10289# qhasm: xmm10 ^= xmm9
10290# asm 1: pxor <xmm9=int6464#13,<xmm10=int6464#11
10291# asm 2: pxor <xmm9=%xmm12,<xmm10=%xmm10
10292pxor %xmm12,%xmm10
10293
10294# qhasm: xmm10 &= xmm4
10295# asm 1: pand <xmm4=int6464#5,<xmm10=int6464#11
10296# asm 2: pand <xmm4=%xmm4,<xmm10=%xmm10
10297pand %xmm4,%xmm10
10298
10299# qhasm: xmm4 ^= xmm2
10300# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
10301# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
10302pxor %xmm2,%xmm4
10303
10304# qhasm: xmm4 &= xmm9
10305# asm 1: pand <xmm9=int6464#13,<xmm4=int6464#5
10306# asm 2: pand <xmm9=%xmm12,<xmm4=%xmm4
10307pand %xmm12,%xmm4
10308
10309# qhasm: xmm2 &= xmm13
10310# asm 1: pand <xmm13=int6464#16,<xmm2=int6464#3
10311# asm 2: pand <xmm13=%xmm15,<xmm2=%xmm2
10312pand %xmm15,%xmm2
10313
10314# qhasm: xmm4 ^= xmm2
10315# asm 1: pxor <xmm2=int6464#3,<xmm4=int6464#5
10316# asm 2: pxor <xmm2=%xmm2,<xmm4=%xmm4
10317pxor %xmm2,%xmm4
10318
10319# qhasm: xmm2 ^= xmm10
10320# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#3
10321# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm2
10322pxor %xmm10,%xmm2
10323
10324# qhasm: xmm15 ^= xmm13
10325# asm 1: pxor <xmm13=int6464#16,<xmm15=int6464#14
10326# asm 2: pxor <xmm13=%xmm15,<xmm15=%xmm13
10327pxor %xmm15,%xmm13
10328
10329# qhasm: xmm14 ^= xmm9
10330# asm 1: pxor <xmm9=int6464#13,<xmm14=int6464#12
10331# asm 2: pxor <xmm9=%xmm12,<xmm14=%xmm11
10332pxor %xmm12,%xmm11
10333
10334# qhasm: xmm11 = xmm15
10335# asm 1: movdqa <xmm15=int6464#14,>xmm11=int6464#11
10336# asm 2: movdqa <xmm15=%xmm13,>xmm11=%xmm10
10337movdqa %xmm13,%xmm10
10338
10339# qhasm: xmm11 ^= xmm14
10340# asm 1: pxor <xmm14=int6464#12,<xmm11=int6464#11
10341# asm 2: pxor <xmm14=%xmm11,<xmm11=%xmm10
10342pxor %xmm11,%xmm10
10343
10344# qhasm: xmm11 &= xmm7
10345# asm 1: pand <xmm7=int6464#8,<xmm11=int6464#11
10346# asm 2: pand <xmm7=%xmm7,<xmm11=%xmm10
10347pand %xmm7,%xmm10
10348
10349# qhasm: xmm7 ^= xmm1
10350# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
10351# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
10352pxor %xmm1,%xmm7
10353
10354# qhasm: xmm7 &= xmm14
10355# asm 1: pand <xmm14=int6464#12,<xmm7=int6464#8
10356# asm 2: pand <xmm14=%xmm11,<xmm7=%xmm7
10357pand %xmm11,%xmm7
10358
10359# qhasm: xmm1 &= xmm15
10360# asm 1: pand <xmm15=int6464#14,<xmm1=int6464#2
10361# asm 2: pand <xmm15=%xmm13,<xmm1=%xmm1
10362pand %xmm13,%xmm1
10363
10364# qhasm: xmm7 ^= xmm1
10365# asm 1: pxor <xmm1=int6464#2,<xmm7=int6464#8
10366# asm 2: pxor <xmm1=%xmm1,<xmm7=%xmm7
10367pxor %xmm1,%xmm7
10368
10369# qhasm: xmm1 ^= xmm11
10370# asm 1: pxor <xmm11=int6464#11,<xmm1=int6464#2
10371# asm 2: pxor <xmm11=%xmm10,<xmm1=%xmm1
10372pxor %xmm10,%xmm1
10373
10374# qhasm: xmm7 ^= xmm12
10375# asm 1: pxor <xmm12=int6464#9,<xmm7=int6464#8
10376# asm 2: pxor <xmm12=%xmm8,<xmm7=%xmm7
10377pxor %xmm8,%xmm7
10378
10379# qhasm: xmm4 ^= xmm12
10380# asm 1: pxor <xmm12=int6464#9,<xmm4=int6464#5
10381# asm 2: pxor <xmm12=%xmm8,<xmm4=%xmm4
10382pxor %xmm8,%xmm4
10383
10384# qhasm: xmm1 ^= xmm8
10385# asm 1: pxor <xmm8=int6464#10,<xmm1=int6464#2
10386# asm 2: pxor <xmm8=%xmm9,<xmm1=%xmm1
10387pxor %xmm9,%xmm1
10388
10389# qhasm: xmm2 ^= xmm8
10390# asm 1: pxor <xmm8=int6464#10,<xmm2=int6464#3
10391# asm 2: pxor <xmm8=%xmm9,<xmm2=%xmm2
10392pxor %xmm9,%xmm2
10393
10394# qhasm: xmm7 ^= xmm0
10395# asm 1: pxor <xmm0=int6464#1,<xmm7=int6464#8
10396# asm 2: pxor <xmm0=%xmm0,<xmm7=%xmm7
10397pxor %xmm0,%xmm7
10398
10399# qhasm: xmm1 ^= xmm6
10400# asm 1: pxor <xmm6=int6464#7,<xmm1=int6464#2
10401# asm 2: pxor <xmm6=%xmm6,<xmm1=%xmm1
10402pxor %xmm6,%xmm1
10403
10404# qhasm: xmm4 ^= xmm7
10405# asm 1: pxor <xmm7=int6464#8,<xmm4=int6464#5
10406# asm 2: pxor <xmm7=%xmm7,<xmm4=%xmm4
10407pxor %xmm7,%xmm4
10408
10409# qhasm: xmm6 ^= xmm0
10410# asm 1: pxor <xmm0=int6464#1,<xmm6=int6464#7
10411# asm 2: pxor <xmm0=%xmm0,<xmm6=%xmm6
10412pxor %xmm0,%xmm6
10413
10414# qhasm: xmm0 ^= xmm1
10415# asm 1: pxor <xmm1=int6464#2,<xmm0=int6464#1
10416# asm 2: pxor <xmm1=%xmm1,<xmm0=%xmm0
10417pxor %xmm1,%xmm0
10418
10419# qhasm: xmm1 ^= xmm5
10420# asm 1: pxor <xmm5=int6464#6,<xmm1=int6464#2
10421# asm 2: pxor <xmm5=%xmm5,<xmm1=%xmm1
10422pxor %xmm5,%xmm1
10423
10424# qhasm: xmm5 ^= xmm2
10425# asm 1: pxor <xmm2=int6464#3,<xmm5=int6464#6
10426# asm 2: pxor <xmm2=%xmm2,<xmm5=%xmm5
10427pxor %xmm2,%xmm5
10428
10429# qhasm: xmm4 ^= xmm5
10430# asm 1: pxor <xmm5=int6464#6,<xmm4=int6464#5
10431# asm 2: pxor <xmm5=%xmm5,<xmm4=%xmm4
10432pxor %xmm5,%xmm4
10433
10434# qhasm: xmm2 ^= xmm3
10435# asm 1: pxor <xmm3=int6464#4,<xmm2=int6464#3
10436# asm 2: pxor <xmm3=%xmm3,<xmm2=%xmm2
10437pxor %xmm3,%xmm2
10438
10439# qhasm: xmm3 ^= xmm5
10440# asm 1: pxor <xmm5=int6464#6,<xmm3=int6464#4
10441# asm 2: pxor <xmm5=%xmm5,<xmm3=%xmm3
10442pxor %xmm5,%xmm3
10443
10444# qhasm: xmm6 ^= xmm3
10445# asm 1: pxor <xmm3=int6464#4,<xmm6=int6464#7
10446# asm 2: pxor <xmm3=%xmm3,<xmm6=%xmm6
10447pxor %xmm3,%xmm6
10448
10449# qhasm: xmm8 = shuffle dwords of xmm0 by 0x93
10450# asm 1: pshufd $0x93,<xmm0=int6464#1,>xmm8=int6464#9
10451# asm 2: pshufd $0x93,<xmm0=%xmm0,>xmm8=%xmm8
10452pshufd $0x93,%xmm0,%xmm8
10453
10454# qhasm: xmm9 = shuffle dwords of xmm1 by 0x93
10455# asm 1: pshufd $0x93,<xmm1=int6464#2,>xmm9=int6464#10
10456# asm 2: pshufd $0x93,<xmm1=%xmm1,>xmm9=%xmm9
10457pshufd $0x93,%xmm1,%xmm9
10458
10459# qhasm: xmm10 = shuffle dwords of xmm4 by 0x93
10460# asm 1: pshufd $0x93,<xmm4=int6464#5,>xmm10=int6464#11
10461# asm 2: pshufd $0x93,<xmm4=%xmm4,>xmm10=%xmm10
10462pshufd $0x93,%xmm4,%xmm10
10463
10464# qhasm: xmm11 = shuffle dwords of xmm6 by 0x93
10465# asm 1: pshufd $0x93,<xmm6=int6464#7,>xmm11=int6464#12
10466# asm 2: pshufd $0x93,<xmm6=%xmm6,>xmm11=%xmm11
10467pshufd $0x93,%xmm6,%xmm11
10468
10469# qhasm: xmm12 = shuffle dwords of xmm3 by 0x93
10470# asm 1: pshufd $0x93,<xmm3=int6464#4,>xmm12=int6464#13
10471# asm 2: pshufd $0x93,<xmm3=%xmm3,>xmm12=%xmm12
10472pshufd $0x93,%xmm3,%xmm12
10473
10474# qhasm: xmm13 = shuffle dwords of xmm7 by 0x93
10475# asm 1: pshufd $0x93,<xmm7=int6464#8,>xmm13=int6464#14
10476# asm 2: pshufd $0x93,<xmm7=%xmm7,>xmm13=%xmm13
10477pshufd $0x93,%xmm7,%xmm13
10478
10479# qhasm: xmm14 = shuffle dwords of xmm2 by 0x93
10480# asm 1: pshufd $0x93,<xmm2=int6464#3,>xmm14=int6464#15
10481# asm 2: pshufd $0x93,<xmm2=%xmm2,>xmm14=%xmm14
10482pshufd $0x93,%xmm2,%xmm14
10483
10484# qhasm: xmm15 = shuffle dwords of xmm5 by 0x93
10485# asm 1: pshufd $0x93,<xmm5=int6464#6,>xmm15=int6464#16
10486# asm 2: pshufd $0x93,<xmm5=%xmm5,>xmm15=%xmm15
10487pshufd $0x93,%xmm5,%xmm15
10488
10489# qhasm: xmm0 ^= xmm8
10490# asm 1: pxor <xmm8=int6464#9,<xmm0=int6464#1
10491# asm 2: pxor <xmm8=%xmm8,<xmm0=%xmm0
10492pxor %xmm8,%xmm0
10493
10494# qhasm: xmm1 ^= xmm9
10495# asm 1: pxor <xmm9=int6464#10,<xmm1=int6464#2
10496# asm 2: pxor <xmm9=%xmm9,<xmm1=%xmm1
10497pxor %xmm9,%xmm1
10498
10499# qhasm: xmm4 ^= xmm10
10500# asm 1: pxor <xmm10=int6464#11,<xmm4=int6464#5
10501# asm 2: pxor <xmm10=%xmm10,<xmm4=%xmm4
10502pxor %xmm10,%xmm4
10503
10504# qhasm: xmm6 ^= xmm11
10505# asm 1: pxor <xmm11=int6464#12,<xmm6=int6464#7
10506# asm 2: pxor <xmm11=%xmm11,<xmm6=%xmm6
10507pxor %xmm11,%xmm6
10508
10509# qhasm: xmm3 ^= xmm12
10510# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#4
10511# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm3
10512pxor %xmm12,%xmm3
10513
10514# qhasm: xmm7 ^= xmm13
10515# asm 1: pxor <xmm13=int6464#14,<xmm7=int6464#8
10516# asm 2: pxor <xmm13=%xmm13,<xmm7=%xmm7
10517pxor %xmm13,%xmm7
10518
10519# qhasm: xmm2 ^= xmm14
10520# asm 1: pxor <xmm14=int6464#15,<xmm2=int6464#3
10521# asm 2: pxor <xmm14=%xmm14,<xmm2=%xmm2
10522pxor %xmm14,%xmm2
10523
10524# qhasm: xmm5 ^= xmm15
10525# asm 1: pxor <xmm15=int6464#16,<xmm5=int6464#6
10526# asm 2: pxor <xmm15=%xmm15,<xmm5=%xmm5
10527pxor %xmm15,%xmm5
10528
10529# qhasm: xmm8 ^= xmm5
10530# asm 1: pxor <xmm5=int6464#6,<xmm8=int6464#9
10531# asm 2: pxor <xmm5=%xmm5,<xmm8=%xmm8
10532pxor %xmm5,%xmm8
10533
10534# qhasm: xmm9 ^= xmm0
10535# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
10536# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
10537pxor %xmm0,%xmm9
10538
10539# qhasm: xmm10 ^= xmm1
10540# asm 1: pxor <xmm1=int6464#2,<xmm10=int6464#11
10541# asm 2: pxor <xmm1=%xmm1,<xmm10=%xmm10
10542pxor %xmm1,%xmm10
10543
10544# qhasm: xmm9 ^= xmm5
10545# asm 1: pxor <xmm5=int6464#6,<xmm9=int6464#10
10546# asm 2: pxor <xmm5=%xmm5,<xmm9=%xmm9
10547pxor %xmm5,%xmm9
10548
10549# qhasm: xmm11 ^= xmm4
10550# asm 1: pxor <xmm4=int6464#5,<xmm11=int6464#12
10551# asm 2: pxor <xmm4=%xmm4,<xmm11=%xmm11
10552pxor %xmm4,%xmm11
10553
10554# qhasm: xmm12 ^= xmm6
10555# asm 1: pxor <xmm6=int6464#7,<xmm12=int6464#13
10556# asm 2: pxor <xmm6=%xmm6,<xmm12=%xmm12
10557pxor %xmm6,%xmm12
10558
10559# qhasm: xmm13 ^= xmm3
10560# asm 1: pxor <xmm3=int6464#4,<xmm13=int6464#14
10561# asm 2: pxor <xmm3=%xmm3,<xmm13=%xmm13
10562pxor %xmm3,%xmm13
10563
10564# qhasm: xmm11 ^= xmm5
10565# asm 1: pxor <xmm5=int6464#6,<xmm11=int6464#12
10566# asm 2: pxor <xmm5=%xmm5,<xmm11=%xmm11
10567pxor %xmm5,%xmm11
10568
10569# qhasm: xmm14 ^= xmm7
10570# asm 1: pxor <xmm7=int6464#8,<xmm14=int6464#15
10571# asm 2: pxor <xmm7=%xmm7,<xmm14=%xmm14
10572pxor %xmm7,%xmm14
10573
10574# qhasm: xmm15 ^= xmm2
10575# asm 1: pxor <xmm2=int6464#3,<xmm15=int6464#16
10576# asm 2: pxor <xmm2=%xmm2,<xmm15=%xmm15
10577pxor %xmm2,%xmm15
10578
10579# qhasm: xmm12 ^= xmm5
10580# asm 1: pxor <xmm5=int6464#6,<xmm12=int6464#13
10581# asm 2: pxor <xmm5=%xmm5,<xmm12=%xmm12
10582pxor %xmm5,%xmm12
10583
10584# qhasm: xmm0 = shuffle dwords of xmm0 by 0x4E
10585# asm 1: pshufd $0x4E,<xmm0=int6464#1,>xmm0=int6464#1
10586# asm 2: pshufd $0x4E,<xmm0=%xmm0,>xmm0=%xmm0
10587pshufd $0x4E,%xmm0,%xmm0
10588
10589# qhasm: xmm1 = shuffle dwords of xmm1 by 0x4E
10590# asm 1: pshufd $0x4E,<xmm1=int6464#2,>xmm1=int6464#2
10591# asm 2: pshufd $0x4E,<xmm1=%xmm1,>xmm1=%xmm1
10592pshufd $0x4E,%xmm1,%xmm1
10593
10594# qhasm: xmm4 = shuffle dwords of xmm4 by 0x4E
10595# asm 1: pshufd $0x4E,<xmm4=int6464#5,>xmm4=int6464#5
10596# asm 2: pshufd $0x4E,<xmm4=%xmm4,>xmm4=%xmm4
10597pshufd $0x4E,%xmm4,%xmm4
10598
10599# qhasm: xmm6 = shuffle dwords of xmm6 by 0x4E
10600# asm 1: pshufd $0x4E,<xmm6=int6464#7,>xmm6=int6464#7
10601# asm 2: pshufd $0x4E,<xmm6=%xmm6,>xmm6=%xmm6
10602pshufd $0x4E,%xmm6,%xmm6
10603
10604# qhasm: xmm3 = shuffle dwords of xmm3 by 0x4E
10605# asm 1: pshufd $0x4E,<xmm3=int6464#4,>xmm3=int6464#4
10606# asm 2: pshufd $0x4E,<xmm3=%xmm3,>xmm3=%xmm3
10607pshufd $0x4E,%xmm3,%xmm3
10608
10609# qhasm: xmm7 = shuffle dwords of xmm7 by 0x4E
10610# asm 1: pshufd $0x4E,<xmm7=int6464#8,>xmm7=int6464#8
10611# asm 2: pshufd $0x4E,<xmm7=%xmm7,>xmm7=%xmm7
10612pshufd $0x4E,%xmm7,%xmm7
10613
10614# qhasm: xmm2 = shuffle dwords of xmm2 by 0x4E
10615# asm 1: pshufd $0x4E,<xmm2=int6464#3,>xmm2=int6464#3
10616# asm 2: pshufd $0x4E,<xmm2=%xmm2,>xmm2=%xmm2
10617pshufd $0x4E,%xmm2,%xmm2
10618
10619# qhasm: xmm5 = shuffle dwords of xmm5 by 0x4E
10620# asm 1: pshufd $0x4E,<xmm5=int6464#6,>xmm5=int6464#6
10621# asm 2: pshufd $0x4E,<xmm5=%xmm5,>xmm5=%xmm5
10622pshufd $0x4E,%xmm5,%xmm5
10623
10624# qhasm: xmm8 ^= xmm0
10625# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
10626# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
10627pxor %xmm0,%xmm8
10628
10629# qhasm: xmm9 ^= xmm1
10630# asm 1: pxor <xmm1=int6464#2,<xmm9=int6464#10
10631# asm 2: pxor <xmm1=%xmm1,<xmm9=%xmm9
10632pxor %xmm1,%xmm9
10633
10634# qhasm: xmm10 ^= xmm4
10635# asm 1: pxor <xmm4=int6464#5,<xmm10=int6464#11
10636# asm 2: pxor <xmm4=%xmm4,<xmm10=%xmm10
10637pxor %xmm4,%xmm10
10638
10639# qhasm: xmm11 ^= xmm6
10640# asm 1: pxor <xmm6=int6464#7,<xmm11=int6464#12
10641# asm 2: pxor <xmm6=%xmm6,<xmm11=%xmm11
10642pxor %xmm6,%xmm11
10643
10644# qhasm: xmm12 ^= xmm3
10645# asm 1: pxor <xmm3=int6464#4,<xmm12=int6464#13
10646# asm 2: pxor <xmm3=%xmm3,<xmm12=%xmm12
10647pxor %xmm3,%xmm12
10648
10649# qhasm: xmm13 ^= xmm7
10650# asm 1: pxor <xmm7=int6464#8,<xmm13=int6464#14
10651# asm 2: pxor <xmm7=%xmm7,<xmm13=%xmm13
10652pxor %xmm7,%xmm13
10653
10654# qhasm: xmm14 ^= xmm2
10655# asm 1: pxor <xmm2=int6464#3,<xmm14=int6464#15
10656# asm 2: pxor <xmm2=%xmm2,<xmm14=%xmm14
10657pxor %xmm2,%xmm14
10658
10659# qhasm: xmm15 ^= xmm5
10660# asm 1: pxor <xmm5=int6464#6,<xmm15=int6464#16
10661# asm 2: pxor <xmm5=%xmm5,<xmm15=%xmm15
10662pxor %xmm5,%xmm15
10663
10664# qhasm: xmm8 ^= *(int128 *)(c + 1152)
10665# asm 1: pxor 1152(<c=int64#5),<xmm8=int6464#9
10666# asm 2: pxor 1152(<c=%r8),<xmm8=%xmm8
10667pxor 1152(%r8),%xmm8
10668
10669# qhasm: shuffle bytes of xmm8 by SRM0
10670# asm 1: pshufb SRM0,<xmm8=int6464#9
10671# asm 2: pshufb SRM0,<xmm8=%xmm8
10672pshufb SRM0,%xmm8
10673
10674# qhasm: xmm9 ^= *(int128 *)(c + 1168)
10675# asm 1: pxor 1168(<c=int64#5),<xmm9=int6464#10
10676# asm 2: pxor 1168(<c=%r8),<xmm9=%xmm9
10677pxor 1168(%r8),%xmm9
10678
10679# qhasm: shuffle bytes of xmm9 by SRM0
10680# asm 1: pshufb SRM0,<xmm9=int6464#10
10681# asm 2: pshufb SRM0,<xmm9=%xmm9
10682pshufb SRM0,%xmm9
10683
10684# qhasm: xmm10 ^= *(int128 *)(c + 1184)
10685# asm 1: pxor 1184(<c=int64#5),<xmm10=int6464#11
10686# asm 2: pxor 1184(<c=%r8),<xmm10=%xmm10
10687pxor 1184(%r8),%xmm10
10688
10689# qhasm: shuffle bytes of xmm10 by SRM0
10690# asm 1: pshufb SRM0,<xmm10=int6464#11
10691# asm 2: pshufb SRM0,<xmm10=%xmm10
10692pshufb SRM0,%xmm10
10693
10694# qhasm: xmm11 ^= *(int128 *)(c + 1200)
10695# asm 1: pxor 1200(<c=int64#5),<xmm11=int6464#12
10696# asm 2: pxor 1200(<c=%r8),<xmm11=%xmm11
10697pxor 1200(%r8),%xmm11
10698
10699# qhasm: shuffle bytes of xmm11 by SRM0
10700# asm 1: pshufb SRM0,<xmm11=int6464#12
10701# asm 2: pshufb SRM0,<xmm11=%xmm11
10702pshufb SRM0,%xmm11
10703
10704# qhasm: xmm12 ^= *(int128 *)(c + 1216)
10705# asm 1: pxor 1216(<c=int64#5),<xmm12=int6464#13
10706# asm 2: pxor 1216(<c=%r8),<xmm12=%xmm12
10707pxor 1216(%r8),%xmm12
10708
10709# qhasm: shuffle bytes of xmm12 by SRM0
10710# asm 1: pshufb SRM0,<xmm12=int6464#13
10711# asm 2: pshufb SRM0,<xmm12=%xmm12
10712pshufb SRM0,%xmm12
10713
10714# qhasm: xmm13 ^= *(int128 *)(c + 1232)
10715# asm 1: pxor 1232(<c=int64#5),<xmm13=int6464#14
10716# asm 2: pxor 1232(<c=%r8),<xmm13=%xmm13
10717pxor 1232(%r8),%xmm13
10718
10719# qhasm: shuffle bytes of xmm13 by SRM0
10720# asm 1: pshufb SRM0,<xmm13=int6464#14
10721# asm 2: pshufb SRM0,<xmm13=%xmm13
10722pshufb SRM0,%xmm13
10723
10724# qhasm: xmm14 ^= *(int128 *)(c + 1248)
10725# asm 1: pxor 1248(<c=int64#5),<xmm14=int6464#15
10726# asm 2: pxor 1248(<c=%r8),<xmm14=%xmm14
10727pxor 1248(%r8),%xmm14
10728
10729# qhasm: shuffle bytes of xmm14 by SRM0
10730# asm 1: pshufb SRM0,<xmm14=int6464#15
10731# asm 2: pshufb SRM0,<xmm14=%xmm14
10732pshufb SRM0,%xmm14
10733
10734# qhasm: xmm15 ^= *(int128 *)(c + 1264)
10735# asm 1: pxor 1264(<c=int64#5),<xmm15=int6464#16
10736# asm 2: pxor 1264(<c=%r8),<xmm15=%xmm15
10737pxor 1264(%r8),%xmm15
10738
10739# qhasm: shuffle bytes of xmm15 by SRM0
10740# asm 1: pshufb SRM0,<xmm15=int6464#16
10741# asm 2: pshufb SRM0,<xmm15=%xmm15
10742pshufb SRM0,%xmm15
10743
10744# qhasm: xmm13 ^= xmm14
10745# asm 1: pxor <xmm14=int6464#15,<xmm13=int6464#14
10746# asm 2: pxor <xmm14=%xmm14,<xmm13=%xmm13
10747pxor %xmm14,%xmm13
10748
10749# qhasm: xmm10 ^= xmm9
10750# asm 1: pxor <xmm9=int6464#10,<xmm10=int6464#11
10751# asm 2: pxor <xmm9=%xmm9,<xmm10=%xmm10
10752pxor %xmm9,%xmm10
10753
10754# qhasm: xmm13 ^= xmm8
10755# asm 1: pxor <xmm8=int6464#9,<xmm13=int6464#14
10756# asm 2: pxor <xmm8=%xmm8,<xmm13=%xmm13
10757pxor %xmm8,%xmm13
10758
10759# qhasm: xmm14 ^= xmm10
10760# asm 1: pxor <xmm10=int6464#11,<xmm14=int6464#15
10761# asm 2: pxor <xmm10=%xmm10,<xmm14=%xmm14
10762pxor %xmm10,%xmm14
10763
10764# qhasm: xmm11 ^= xmm8
10765# asm 1: pxor <xmm8=int6464#9,<xmm11=int6464#12
10766# asm 2: pxor <xmm8=%xmm8,<xmm11=%xmm11
10767pxor %xmm8,%xmm11
10768
10769# qhasm: xmm14 ^= xmm11
10770# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
10771# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
10772pxor %xmm11,%xmm14
10773
10774# qhasm: xmm11 ^= xmm15
10775# asm 1: pxor <xmm15=int6464#16,<xmm11=int6464#12
10776# asm 2: pxor <xmm15=%xmm15,<xmm11=%xmm11
10777pxor %xmm15,%xmm11
10778
10779# qhasm: xmm11 ^= xmm12
10780# asm 1: pxor <xmm12=int6464#13,<xmm11=int6464#12
10781# asm 2: pxor <xmm12=%xmm12,<xmm11=%xmm11
10782pxor %xmm12,%xmm11
10783
10784# qhasm: xmm15 ^= xmm13
10785# asm 1: pxor <xmm13=int6464#14,<xmm15=int6464#16
10786# asm 2: pxor <xmm13=%xmm13,<xmm15=%xmm15
10787pxor %xmm13,%xmm15
10788
10789# qhasm: xmm11 ^= xmm9
10790# asm 1: pxor <xmm9=int6464#10,<xmm11=int6464#12
10791# asm 2: pxor <xmm9=%xmm9,<xmm11=%xmm11
10792pxor %xmm9,%xmm11
10793
10794# qhasm: xmm12 ^= xmm13
10795# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
10796# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
10797pxor %xmm13,%xmm12
10798
10799# qhasm: xmm10 ^= xmm15
10800# asm 1: pxor <xmm15=int6464#16,<xmm10=int6464#11
10801# asm 2: pxor <xmm15=%xmm15,<xmm10=%xmm10
10802pxor %xmm15,%xmm10
10803
10804# qhasm: xmm9 ^= xmm13
10805# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
10806# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
10807pxor %xmm13,%xmm9
10808
10809# qhasm: xmm3 = xmm15
10810# asm 1: movdqa <xmm15=int6464#16,>xmm3=int6464#1
10811# asm 2: movdqa <xmm15=%xmm15,>xmm3=%xmm0
10812movdqa %xmm15,%xmm0
10813
10814# qhasm: xmm2 = xmm9
10815# asm 1: movdqa <xmm9=int6464#10,>xmm2=int6464#2
10816# asm 2: movdqa <xmm9=%xmm9,>xmm2=%xmm1
10817movdqa %xmm9,%xmm1
10818
10819# qhasm: xmm1 = xmm13
10820# asm 1: movdqa <xmm13=int6464#14,>xmm1=int6464#3
10821# asm 2: movdqa <xmm13=%xmm13,>xmm1=%xmm2
10822movdqa %xmm13,%xmm2
10823
10824# qhasm: xmm5 = xmm10
10825# asm 1: movdqa <xmm10=int6464#11,>xmm5=int6464#4
10826# asm 2: movdqa <xmm10=%xmm10,>xmm5=%xmm3
10827movdqa %xmm10,%xmm3
10828
10829# qhasm: xmm4 = xmm14
10830# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#5
10831# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm4
10832movdqa %xmm14,%xmm4
10833
10834# qhasm: xmm3 ^= xmm12
10835# asm 1: pxor <xmm12=int6464#13,<xmm3=int6464#1
10836# asm 2: pxor <xmm12=%xmm12,<xmm3=%xmm0
10837pxor %xmm12,%xmm0
10838
10839# qhasm: xmm2 ^= xmm10
10840# asm 1: pxor <xmm10=int6464#11,<xmm2=int6464#2
10841# asm 2: pxor <xmm10=%xmm10,<xmm2=%xmm1
10842pxor %xmm10,%xmm1
10843
10844# qhasm: xmm1 ^= xmm11
10845# asm 1: pxor <xmm11=int6464#12,<xmm1=int6464#3
10846# asm 2: pxor <xmm11=%xmm11,<xmm1=%xmm2
10847pxor %xmm11,%xmm2
10848
10849# qhasm: xmm5 ^= xmm12
10850# asm 1: pxor <xmm12=int6464#13,<xmm5=int6464#4
10851# asm 2: pxor <xmm12=%xmm12,<xmm5=%xmm3
10852pxor %xmm12,%xmm3
10853
10854# qhasm: xmm4 ^= xmm8
10855# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#5
10856# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm4
10857pxor %xmm8,%xmm4
10858
10859# qhasm: xmm6 = xmm3
10860# asm 1: movdqa <xmm3=int6464#1,>xmm6=int6464#6
10861# asm 2: movdqa <xmm3=%xmm0,>xmm6=%xmm5
10862movdqa %xmm0,%xmm5
10863
10864# qhasm: xmm0 = xmm2
10865# asm 1: movdqa <xmm2=int6464#2,>xmm0=int6464#7
10866# asm 2: movdqa <xmm2=%xmm1,>xmm0=%xmm6
10867movdqa %xmm1,%xmm6
10868
10869# qhasm: xmm7 = xmm3
10870# asm 1: movdqa <xmm3=int6464#1,>xmm7=int6464#8
10871# asm 2: movdqa <xmm3=%xmm0,>xmm7=%xmm7
10872movdqa %xmm0,%xmm7
10873
10874# qhasm: xmm2 |= xmm1
10875# asm 1: por <xmm1=int6464#3,<xmm2=int6464#2
10876# asm 2: por <xmm1=%xmm2,<xmm2=%xmm1
10877por %xmm2,%xmm1
10878
10879# qhasm: xmm3 |= xmm4
10880# asm 1: por <xmm4=int6464#5,<xmm3=int6464#1
10881# asm 2: por <xmm4=%xmm4,<xmm3=%xmm0
10882por %xmm4,%xmm0
10883
10884# qhasm: xmm7 ^= xmm0
10885# asm 1: pxor <xmm0=int6464#7,<xmm7=int6464#8
10886# asm 2: pxor <xmm0=%xmm6,<xmm7=%xmm7
10887pxor %xmm6,%xmm7
10888
10889# qhasm: xmm6 &= xmm4
10890# asm 1: pand <xmm4=int6464#5,<xmm6=int6464#6
10891# asm 2: pand <xmm4=%xmm4,<xmm6=%xmm5
10892pand %xmm4,%xmm5
10893
10894# qhasm: xmm0 &= xmm1
10895# asm 1: pand <xmm1=int6464#3,<xmm0=int6464#7
10896# asm 2: pand <xmm1=%xmm2,<xmm0=%xmm6
10897pand %xmm2,%xmm6
10898
10899# qhasm: xmm4 ^= xmm1
10900# asm 1: pxor <xmm1=int6464#3,<xmm4=int6464#5
10901# asm 2: pxor <xmm1=%xmm2,<xmm4=%xmm4
10902pxor %xmm2,%xmm4
10903
10904# qhasm: xmm7 &= xmm4
10905# asm 1: pand <xmm4=int6464#5,<xmm7=int6464#8
10906# asm 2: pand <xmm4=%xmm4,<xmm7=%xmm7
10907pand %xmm4,%xmm7
10908
10909# qhasm: xmm4 = xmm11
10910# asm 1: movdqa <xmm11=int6464#12,>xmm4=int6464#3
10911# asm 2: movdqa <xmm11=%xmm11,>xmm4=%xmm2
10912movdqa %xmm11,%xmm2
10913
10914# qhasm: xmm4 ^= xmm8
10915# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#3
10916# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm2
10917pxor %xmm8,%xmm2
10918
10919# qhasm: xmm5 &= xmm4
10920# asm 1: pand <xmm4=int6464#3,<xmm5=int6464#4
10921# asm 2: pand <xmm4=%xmm2,<xmm5=%xmm3
10922pand %xmm2,%xmm3
10923
10924# qhasm: xmm3 ^= xmm5
10925# asm 1: pxor <xmm5=int6464#4,<xmm3=int6464#1
10926# asm 2: pxor <xmm5=%xmm3,<xmm3=%xmm0
10927pxor %xmm3,%xmm0
10928
10929# qhasm: xmm2 ^= xmm5
10930# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
10931# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
10932pxor %xmm3,%xmm1
10933
10934# qhasm: xmm5 = xmm15
10935# asm 1: movdqa <xmm15=int6464#16,>xmm5=int6464#3
10936# asm 2: movdqa <xmm15=%xmm15,>xmm5=%xmm2
10937movdqa %xmm15,%xmm2
10938
10939# qhasm: xmm5 ^= xmm9
10940# asm 1: pxor <xmm9=int6464#10,<xmm5=int6464#3
10941# asm 2: pxor <xmm9=%xmm9,<xmm5=%xmm2
10942pxor %xmm9,%xmm2
10943
10944# qhasm: xmm4 = xmm13
10945# asm 1: movdqa <xmm13=int6464#14,>xmm4=int6464#4
10946# asm 2: movdqa <xmm13=%xmm13,>xmm4=%xmm3
10947movdqa %xmm13,%xmm3
10948
10949# qhasm: xmm1 = xmm5
10950# asm 1: movdqa <xmm5=int6464#3,>xmm1=int6464#5
10951# asm 2: movdqa <xmm5=%xmm2,>xmm1=%xmm4
10952movdqa %xmm2,%xmm4
10953
10954# qhasm: xmm4 ^= xmm14
10955# asm 1: pxor <xmm14=int6464#15,<xmm4=int6464#4
10956# asm 2: pxor <xmm14=%xmm14,<xmm4=%xmm3
10957pxor %xmm14,%xmm3
10958
10959# qhasm: xmm1 |= xmm4
10960# asm 1: por <xmm4=int6464#4,<xmm1=int6464#5
10961# asm 2: por <xmm4=%xmm3,<xmm1=%xmm4
10962por %xmm3,%xmm4
10963
10964# qhasm: xmm5 &= xmm4
10965# asm 1: pand <xmm4=int6464#4,<xmm5=int6464#3
10966# asm 2: pand <xmm4=%xmm3,<xmm5=%xmm2
10967pand %xmm3,%xmm2
10968
10969# qhasm: xmm0 ^= xmm5
10970# asm 1: pxor <xmm5=int6464#3,<xmm0=int6464#7
10971# asm 2: pxor <xmm5=%xmm2,<xmm0=%xmm6
10972pxor %xmm2,%xmm6
10973
10974# qhasm: xmm3 ^= xmm7
10975# asm 1: pxor <xmm7=int6464#8,<xmm3=int6464#1
10976# asm 2: pxor <xmm7=%xmm7,<xmm3=%xmm0
10977pxor %xmm7,%xmm0
10978
10979# qhasm: xmm2 ^= xmm6
10980# asm 1: pxor <xmm6=int6464#6,<xmm2=int6464#2
10981# asm 2: pxor <xmm6=%xmm5,<xmm2=%xmm1
10982pxor %xmm5,%xmm1
10983
10984# qhasm: xmm1 ^= xmm7
10985# asm 1: pxor <xmm7=int6464#8,<xmm1=int6464#5
10986# asm 2: pxor <xmm7=%xmm7,<xmm1=%xmm4
10987pxor %xmm7,%xmm4
10988
10989# qhasm: xmm0 ^= xmm6
10990# asm 1: pxor <xmm6=int6464#6,<xmm0=int6464#7
10991# asm 2: pxor <xmm6=%xmm5,<xmm0=%xmm6
10992pxor %xmm5,%xmm6
10993
10994# qhasm: xmm1 ^= xmm6
10995# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
10996# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
10997pxor %xmm5,%xmm4
10998
10999# qhasm: xmm4 = xmm10
11000# asm 1: movdqa <xmm10=int6464#11,>xmm4=int6464#3
11001# asm 2: movdqa <xmm10=%xmm10,>xmm4=%xmm2
11002movdqa %xmm10,%xmm2
11003
11004# qhasm: xmm5 = xmm12
11005# asm 1: movdqa <xmm12=int6464#13,>xmm5=int6464#4
11006# asm 2: movdqa <xmm12=%xmm12,>xmm5=%xmm3
11007movdqa %xmm12,%xmm3
11008
11009# qhasm: xmm6 = xmm9
11010# asm 1: movdqa <xmm9=int6464#10,>xmm6=int6464#6
11011# asm 2: movdqa <xmm9=%xmm9,>xmm6=%xmm5
11012movdqa %xmm9,%xmm5
11013
11014# qhasm: xmm7 = xmm15
11015# asm 1: movdqa <xmm15=int6464#16,>xmm7=int6464#8
11016# asm 2: movdqa <xmm15=%xmm15,>xmm7=%xmm7
11017movdqa %xmm15,%xmm7
11018
11019# qhasm: xmm4 &= xmm11
11020# asm 1: pand <xmm11=int6464#12,<xmm4=int6464#3
11021# asm 2: pand <xmm11=%xmm11,<xmm4=%xmm2
11022pand %xmm11,%xmm2
11023
11024# qhasm: xmm5 &= xmm8
11025# asm 1: pand <xmm8=int6464#9,<xmm5=int6464#4
11026# asm 2: pand <xmm8=%xmm8,<xmm5=%xmm3
11027pand %xmm8,%xmm3
11028
11029# qhasm: xmm6 &= xmm13
11030# asm 1: pand <xmm13=int6464#14,<xmm6=int6464#6
11031# asm 2: pand <xmm13=%xmm13,<xmm6=%xmm5
11032pand %xmm13,%xmm5
11033
11034# qhasm: xmm7 |= xmm14
11035# asm 1: por <xmm14=int6464#15,<xmm7=int6464#8
11036# asm 2: por <xmm14=%xmm14,<xmm7=%xmm7
11037por %xmm14,%xmm7
11038
11039# qhasm: xmm3 ^= xmm4
11040# asm 1: pxor <xmm4=int6464#3,<xmm3=int6464#1
11041# asm 2: pxor <xmm4=%xmm2,<xmm3=%xmm0
11042pxor %xmm2,%xmm0
11043
11044# qhasm: xmm2 ^= xmm5
11045# asm 1: pxor <xmm5=int6464#4,<xmm2=int6464#2
11046# asm 2: pxor <xmm5=%xmm3,<xmm2=%xmm1
11047pxor %xmm3,%xmm1
11048
11049# qhasm: xmm1 ^= xmm6
11050# asm 1: pxor <xmm6=int6464#6,<xmm1=int6464#5
11051# asm 2: pxor <xmm6=%xmm5,<xmm1=%xmm4
11052pxor %xmm5,%xmm4
11053
11054# qhasm: xmm0 ^= xmm7
11055# asm 1: pxor <xmm7=int6464#8,<xmm0=int6464#7
11056# asm 2: pxor <xmm7=%xmm7,<xmm0=%xmm6
11057pxor %xmm7,%xmm6
11058
11059# qhasm: xmm4 = xmm3
11060# asm 1: movdqa <xmm3=int6464#1,>xmm4=int6464#3
11061# asm 2: movdqa <xmm3=%xmm0,>xmm4=%xmm2
11062movdqa %xmm0,%xmm2
11063
11064# qhasm: xmm4 ^= xmm2
11065# asm 1: pxor <xmm2=int6464#2,<xmm4=int6464#3
11066# asm 2: pxor <xmm2=%xmm1,<xmm4=%xmm2
11067pxor %xmm1,%xmm2
11068
11069# qhasm: xmm3 &= xmm1
11070# asm 1: pand <xmm1=int6464#5,<xmm3=int6464#1
11071# asm 2: pand <xmm1=%xmm4,<xmm3=%xmm0
11072pand %xmm4,%xmm0
11073
11074# qhasm: xmm6 = xmm0
11075# asm 1: movdqa <xmm0=int6464#7,>xmm6=int6464#4
11076# asm 2: movdqa <xmm0=%xmm6,>xmm6=%xmm3
11077movdqa %xmm6,%xmm3
11078
11079# qhasm: xmm6 ^= xmm3
11080# asm 1: pxor <xmm3=int6464#1,<xmm6=int6464#4
11081# asm 2: pxor <xmm3=%xmm0,<xmm6=%xmm3
11082pxor %xmm0,%xmm3
11083
11084# qhasm: xmm7 = xmm4
11085# asm 1: movdqa <xmm4=int6464#3,>xmm7=int6464#6
11086# asm 2: movdqa <xmm4=%xmm2,>xmm7=%xmm5
11087movdqa %xmm2,%xmm5
11088
11089# qhasm: xmm7 &= xmm6
11090# asm 1: pand <xmm6=int6464#4,<xmm7=int6464#6
11091# asm 2: pand <xmm6=%xmm3,<xmm7=%xmm5
11092pand %xmm3,%xmm5
11093
11094# qhasm: xmm7 ^= xmm2
11095# asm 1: pxor <xmm2=int6464#2,<xmm7=int6464#6
11096# asm 2: pxor <xmm2=%xmm1,<xmm7=%xmm5
11097pxor %xmm1,%xmm5
11098
11099# qhasm: xmm5 = xmm1
11100# asm 1: movdqa <xmm1=int6464#5,>xmm5=int6464#8
11101# asm 2: movdqa <xmm1=%xmm4,>xmm5=%xmm7
11102movdqa %xmm4,%xmm7
11103
11104# qhasm: xmm5 ^= xmm0
11105# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
11106# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
11107pxor %xmm6,%xmm7
11108
11109# qhasm: xmm3 ^= xmm2
11110# asm 1: pxor <xmm2=int6464#2,<xmm3=int6464#1
11111# asm 2: pxor <xmm2=%xmm1,<xmm3=%xmm0
11112pxor %xmm1,%xmm0
11113
11114# qhasm: xmm5 &= xmm3
11115# asm 1: pand <xmm3=int6464#1,<xmm5=int6464#8
11116# asm 2: pand <xmm3=%xmm0,<xmm5=%xmm7
11117pand %xmm0,%xmm7
11118
11119# qhasm: xmm5 ^= xmm0
11120# asm 1: pxor <xmm0=int6464#7,<xmm5=int6464#8
11121# asm 2: pxor <xmm0=%xmm6,<xmm5=%xmm7
11122pxor %xmm6,%xmm7
11123
11124# qhasm: xmm1 ^= xmm5
11125# asm 1: pxor <xmm5=int6464#8,<xmm1=int6464#5
11126# asm 2: pxor <xmm5=%xmm7,<xmm1=%xmm4
11127pxor %xmm7,%xmm4
11128
11129# qhasm: xmm2 = xmm6
11130# asm 1: movdqa <xmm6=int6464#4,>xmm2=int6464#1
11131# asm 2: movdqa <xmm6=%xmm3,>xmm2=%xmm0
11132movdqa %xmm3,%xmm0
11133
11134# qhasm: xmm2 ^= xmm5
11135# asm 1: pxor <xmm5=int6464#8,<xmm2=int6464#1
11136# asm 2: pxor <xmm5=%xmm7,<xmm2=%xmm0
11137pxor %xmm7,%xmm0
11138
11139# qhasm: xmm2 &= xmm0
11140# asm 1: pand <xmm0=int6464#7,<xmm2=int6464#1
11141# asm 2: pand <xmm0=%xmm6,<xmm2=%xmm0
11142pand %xmm6,%xmm0
11143
11144# qhasm: xmm1 ^= xmm2
11145# asm 1: pxor <xmm2=int6464#1,<xmm1=int6464#5
11146# asm 2: pxor <xmm2=%xmm0,<xmm1=%xmm4
11147pxor %xmm0,%xmm4
11148
11149# qhasm: xmm6 ^= xmm2
11150# asm 1: pxor <xmm2=int6464#1,<xmm6=int6464#4
11151# asm 2: pxor <xmm2=%xmm0,<xmm6=%xmm3
11152pxor %xmm0,%xmm3
11153
11154# qhasm: xmm6 &= xmm7
11155# asm 1: pand <xmm7=int6464#6,<xmm6=int6464#4
11156# asm 2: pand <xmm7=%xmm5,<xmm6=%xmm3
11157pand %xmm5,%xmm3
11158
11159# qhasm: xmm6 ^= xmm4
11160# asm 1: pxor <xmm4=int6464#3,<xmm6=int6464#4
11161# asm 2: pxor <xmm4=%xmm2,<xmm6=%xmm3
11162pxor %xmm2,%xmm3
11163
11164# qhasm: xmm4 = xmm14
11165# asm 1: movdqa <xmm14=int6464#15,>xmm4=int6464#1
11166# asm 2: movdqa <xmm14=%xmm14,>xmm4=%xmm0
11167movdqa %xmm14,%xmm0
11168
11169# qhasm: xmm0 = xmm13
11170# asm 1: movdqa <xmm13=int6464#14,>xmm0=int6464#2
11171# asm 2: movdqa <xmm13=%xmm13,>xmm0=%xmm1
11172movdqa %xmm13,%xmm1
11173
11174# qhasm: xmm2 = xmm7
11175# asm 1: movdqa <xmm7=int6464#6,>xmm2=int6464#3
11176# asm 2: movdqa <xmm7=%xmm5,>xmm2=%xmm2
11177movdqa %xmm5,%xmm2
11178
11179# qhasm: xmm2 ^= xmm6
11180# asm 1: pxor <xmm6=int6464#4,<xmm2=int6464#3
11181# asm 2: pxor <xmm6=%xmm3,<xmm2=%xmm2
11182pxor %xmm3,%xmm2
11183
11184# qhasm: xmm2 &= xmm14
11185# asm 1: pand <xmm14=int6464#15,<xmm2=int6464#3
11186# asm 2: pand <xmm14=%xmm14,<xmm2=%xmm2
11187pand %xmm14,%xmm2
11188
11189# qhasm: xmm14 ^= xmm13
11190# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
11191# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
11192pxor %xmm13,%xmm14
11193
11194# qhasm: xmm14 &= xmm6
11195# asm 1: pand <xmm6=int6464#4,<xmm14=int6464#15
11196# asm 2: pand <xmm6=%xmm3,<xmm14=%xmm14
11197pand %xmm3,%xmm14
11198
11199# qhasm: xmm13 &= xmm7
11200# asm 1: pand <xmm7=int6464#6,<xmm13=int6464#14
11201# asm 2: pand <xmm7=%xmm5,<xmm13=%xmm13
11202pand %xmm5,%xmm13
11203
11204# qhasm: xmm14 ^= xmm13
11205# asm 1: pxor <xmm13=int6464#14,<xmm14=int6464#15
11206# asm 2: pxor <xmm13=%xmm13,<xmm14=%xmm14
11207pxor %xmm13,%xmm14
11208
11209# qhasm: xmm13 ^= xmm2
11210# asm 1: pxor <xmm2=int6464#3,<xmm13=int6464#14
11211# asm 2: pxor <xmm2=%xmm2,<xmm13=%xmm13
11212pxor %xmm2,%xmm13
11213
11214# qhasm: xmm4 ^= xmm8
11215# asm 1: pxor <xmm8=int6464#9,<xmm4=int6464#1
11216# asm 2: pxor <xmm8=%xmm8,<xmm4=%xmm0
11217pxor %xmm8,%xmm0
11218
11219# qhasm: xmm0 ^= xmm11
11220# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#2
11221# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm1
11222pxor %xmm11,%xmm1
11223
11224# qhasm: xmm7 ^= xmm5
11225# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
11226# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
11227pxor %xmm7,%xmm5
11228
11229# qhasm: xmm6 ^= xmm1
11230# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
11231# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
11232pxor %xmm4,%xmm3
11233
11234# qhasm: xmm3 = xmm7
11235# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
11236# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
11237movdqa %xmm5,%xmm2
11238
11239# qhasm: xmm3 ^= xmm6
11240# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
11241# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
11242pxor %xmm3,%xmm2
11243
11244# qhasm: xmm3 &= xmm4
11245# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
11246# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
11247pand %xmm0,%xmm2
11248
11249# qhasm: xmm4 ^= xmm0
11250# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
11251# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
11252pxor %xmm1,%xmm0
11253
11254# qhasm: xmm4 &= xmm6
11255# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
11256# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
11257pand %xmm3,%xmm0
11258
11259# qhasm: xmm0 &= xmm7
11260# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
11261# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
11262pand %xmm5,%xmm1
11263
11264# qhasm: xmm0 ^= xmm4
11265# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
11266# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
11267pxor %xmm0,%xmm1
11268
11269# qhasm: xmm4 ^= xmm3
11270# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
11271# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
11272pxor %xmm2,%xmm0
11273
11274# qhasm: xmm2 = xmm5
11275# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
11276# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
11277movdqa %xmm7,%xmm2
11278
11279# qhasm: xmm2 ^= xmm1
11280# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
11281# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
11282pxor %xmm4,%xmm2
11283
11284# qhasm: xmm2 &= xmm8
11285# asm 1: pand <xmm8=int6464#9,<xmm2=int6464#3
11286# asm 2: pand <xmm8=%xmm8,<xmm2=%xmm2
11287pand %xmm8,%xmm2
11288
11289# qhasm: xmm8 ^= xmm11
11290# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
11291# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
11292pxor %xmm11,%xmm8
11293
11294# qhasm: xmm8 &= xmm1
11295# asm 1: pand <xmm1=int6464#5,<xmm8=int6464#9
11296# asm 2: pand <xmm1=%xmm4,<xmm8=%xmm8
11297pand %xmm4,%xmm8
11298
11299# qhasm: xmm11 &= xmm5
11300# asm 1: pand <xmm5=int6464#8,<xmm11=int6464#12
11301# asm 2: pand <xmm5=%xmm7,<xmm11=%xmm11
11302pand %xmm7,%xmm11
11303
11304# qhasm: xmm8 ^= xmm11
11305# asm 1: pxor <xmm11=int6464#12,<xmm8=int6464#9
11306# asm 2: pxor <xmm11=%xmm11,<xmm8=%xmm8
11307pxor %xmm11,%xmm8
11308
11309# qhasm: xmm11 ^= xmm2
11310# asm 1: pxor <xmm2=int6464#3,<xmm11=int6464#12
11311# asm 2: pxor <xmm2=%xmm2,<xmm11=%xmm11
11312pxor %xmm2,%xmm11
11313
11314# qhasm: xmm14 ^= xmm4
11315# asm 1: pxor <xmm4=int6464#1,<xmm14=int6464#15
11316# asm 2: pxor <xmm4=%xmm0,<xmm14=%xmm14
11317pxor %xmm0,%xmm14
11318
11319# qhasm: xmm8 ^= xmm4
11320# asm 1: pxor <xmm4=int6464#1,<xmm8=int6464#9
11321# asm 2: pxor <xmm4=%xmm0,<xmm8=%xmm8
11322pxor %xmm0,%xmm8
11323
11324# qhasm: xmm13 ^= xmm0
11325# asm 1: pxor <xmm0=int6464#2,<xmm13=int6464#14
11326# asm 2: pxor <xmm0=%xmm1,<xmm13=%xmm13
11327pxor %xmm1,%xmm13
11328
11329# qhasm: xmm11 ^= xmm0
11330# asm 1: pxor <xmm0=int6464#2,<xmm11=int6464#12
11331# asm 2: pxor <xmm0=%xmm1,<xmm11=%xmm11
11332pxor %xmm1,%xmm11
11333
11334# qhasm: xmm4 = xmm15
11335# asm 1: movdqa <xmm15=int6464#16,>xmm4=int6464#1
11336# asm 2: movdqa <xmm15=%xmm15,>xmm4=%xmm0
11337movdqa %xmm15,%xmm0
11338
11339# qhasm: xmm0 = xmm9
11340# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#2
11341# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm1
11342movdqa %xmm9,%xmm1
11343
11344# qhasm: xmm4 ^= xmm12
11345# asm 1: pxor <xmm12=int6464#13,<xmm4=int6464#1
11346# asm 2: pxor <xmm12=%xmm12,<xmm4=%xmm0
11347pxor %xmm12,%xmm0
11348
11349# qhasm: xmm0 ^= xmm10
11350# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#2
11351# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm1
11352pxor %xmm10,%xmm1
11353
11354# qhasm: xmm3 = xmm7
11355# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
11356# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
11357movdqa %xmm5,%xmm2
11358
11359# qhasm: xmm3 ^= xmm6
11360# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
11361# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
11362pxor %xmm3,%xmm2
11363
11364# qhasm: xmm3 &= xmm4
11365# asm 1: pand <xmm4=int6464#1,<xmm3=int6464#3
11366# asm 2: pand <xmm4=%xmm0,<xmm3=%xmm2
11367pand %xmm0,%xmm2
11368
11369# qhasm: xmm4 ^= xmm0
11370# asm 1: pxor <xmm0=int6464#2,<xmm4=int6464#1
11371# asm 2: pxor <xmm0=%xmm1,<xmm4=%xmm0
11372pxor %xmm1,%xmm0
11373
11374# qhasm: xmm4 &= xmm6
11375# asm 1: pand <xmm6=int6464#4,<xmm4=int6464#1
11376# asm 2: pand <xmm6=%xmm3,<xmm4=%xmm0
11377pand %xmm3,%xmm0
11378
11379# qhasm: xmm0 &= xmm7
11380# asm 1: pand <xmm7=int6464#6,<xmm0=int6464#2
11381# asm 2: pand <xmm7=%xmm5,<xmm0=%xmm1
11382pand %xmm5,%xmm1
11383
11384# qhasm: xmm0 ^= xmm4
11385# asm 1: pxor <xmm4=int6464#1,<xmm0=int6464#2
11386# asm 2: pxor <xmm4=%xmm0,<xmm0=%xmm1
11387pxor %xmm0,%xmm1
11388
11389# qhasm: xmm4 ^= xmm3
11390# asm 1: pxor <xmm3=int6464#3,<xmm4=int6464#1
11391# asm 2: pxor <xmm3=%xmm2,<xmm4=%xmm0
11392pxor %xmm2,%xmm0
11393
11394# qhasm: xmm2 = xmm5
11395# asm 1: movdqa <xmm5=int6464#8,>xmm2=int6464#3
11396# asm 2: movdqa <xmm5=%xmm7,>xmm2=%xmm2
11397movdqa %xmm7,%xmm2
11398
11399# qhasm: xmm2 ^= xmm1
11400# asm 1: pxor <xmm1=int6464#5,<xmm2=int6464#3
11401# asm 2: pxor <xmm1=%xmm4,<xmm2=%xmm2
11402pxor %xmm4,%xmm2
11403
11404# qhasm: xmm2 &= xmm12
11405# asm 1: pand <xmm12=int6464#13,<xmm2=int6464#3
11406# asm 2: pand <xmm12=%xmm12,<xmm2=%xmm2
11407pand %xmm12,%xmm2
11408
11409# qhasm: xmm12 ^= xmm10
11410# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
11411# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
11412pxor %xmm10,%xmm12
11413
11414# qhasm: xmm12 &= xmm1
11415# asm 1: pand <xmm1=int6464#5,<xmm12=int6464#13
11416# asm 2: pand <xmm1=%xmm4,<xmm12=%xmm12
11417pand %xmm4,%xmm12
11418
11419# qhasm: xmm10 &= xmm5
11420# asm 1: pand <xmm5=int6464#8,<xmm10=int6464#11
11421# asm 2: pand <xmm5=%xmm7,<xmm10=%xmm10
11422pand %xmm7,%xmm10
11423
11424# qhasm: xmm12 ^= xmm10
11425# asm 1: pxor <xmm10=int6464#11,<xmm12=int6464#13
11426# asm 2: pxor <xmm10=%xmm10,<xmm12=%xmm12
11427pxor %xmm10,%xmm12
11428
11429# qhasm: xmm10 ^= xmm2
11430# asm 1: pxor <xmm2=int6464#3,<xmm10=int6464#11
11431# asm 2: pxor <xmm2=%xmm2,<xmm10=%xmm10
11432pxor %xmm2,%xmm10
11433
11434# qhasm: xmm7 ^= xmm5
11435# asm 1: pxor <xmm5=int6464#8,<xmm7=int6464#6
11436# asm 2: pxor <xmm5=%xmm7,<xmm7=%xmm5
11437pxor %xmm7,%xmm5
11438
11439# qhasm: xmm6 ^= xmm1
11440# asm 1: pxor <xmm1=int6464#5,<xmm6=int6464#4
11441# asm 2: pxor <xmm1=%xmm4,<xmm6=%xmm3
11442pxor %xmm4,%xmm3
11443
11444# qhasm: xmm3 = xmm7
11445# asm 1: movdqa <xmm7=int6464#6,>xmm3=int6464#3
11446# asm 2: movdqa <xmm7=%xmm5,>xmm3=%xmm2
11447movdqa %xmm5,%xmm2
11448
11449# qhasm: xmm3 ^= xmm6
11450# asm 1: pxor <xmm6=int6464#4,<xmm3=int6464#3
11451# asm 2: pxor <xmm6=%xmm3,<xmm3=%xmm2
11452pxor %xmm3,%xmm2
11453
11454# qhasm: xmm3 &= xmm15
11455# asm 1: pand <xmm15=int6464#16,<xmm3=int6464#3
11456# asm 2: pand <xmm15=%xmm15,<xmm3=%xmm2
11457pand %xmm15,%xmm2
11458
11459# qhasm: xmm15 ^= xmm9
11460# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
11461# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
11462pxor %xmm9,%xmm15
11463
11464# qhasm: xmm15 &= xmm6
11465# asm 1: pand <xmm6=int6464#4,<xmm15=int6464#16
11466# asm 2: pand <xmm6=%xmm3,<xmm15=%xmm15
11467pand %xmm3,%xmm15
11468
11469# qhasm: xmm9 &= xmm7
11470# asm 1: pand <xmm7=int6464#6,<xmm9=int6464#10
11471# asm 2: pand <xmm7=%xmm5,<xmm9=%xmm9
11472pand %xmm5,%xmm9
11473
11474# qhasm: xmm15 ^= xmm9
11475# asm 1: pxor <xmm9=int6464#10,<xmm15=int6464#16
11476# asm 2: pxor <xmm9=%xmm9,<xmm15=%xmm15
11477pxor %xmm9,%xmm15
11478
11479# qhasm: xmm9 ^= xmm3
11480# asm 1: pxor <xmm3=int6464#3,<xmm9=int6464#10
11481# asm 2: pxor <xmm3=%xmm2,<xmm9=%xmm9
11482pxor %xmm2,%xmm9
11483
11484# qhasm: xmm15 ^= xmm4
11485# asm 1: pxor <xmm4=int6464#1,<xmm15=int6464#16
11486# asm 2: pxor <xmm4=%xmm0,<xmm15=%xmm15
11487pxor %xmm0,%xmm15
11488
11489# qhasm: xmm12 ^= xmm4
11490# asm 1: pxor <xmm4=int6464#1,<xmm12=int6464#13
11491# asm 2: pxor <xmm4=%xmm0,<xmm12=%xmm12
11492pxor %xmm0,%xmm12
11493
11494# qhasm: xmm9 ^= xmm0
11495# asm 1: pxor <xmm0=int6464#2,<xmm9=int6464#10
11496# asm 2: pxor <xmm0=%xmm1,<xmm9=%xmm9
11497pxor %xmm1,%xmm9
11498
11499# qhasm: xmm10 ^= xmm0
11500# asm 1: pxor <xmm0=int6464#2,<xmm10=int6464#11
11501# asm 2: pxor <xmm0=%xmm1,<xmm10=%xmm10
11502pxor %xmm1,%xmm10
11503
11504# qhasm: xmm15 ^= xmm8
11505# asm 1: pxor <xmm8=int6464#9,<xmm15=int6464#16
11506# asm 2: pxor <xmm8=%xmm8,<xmm15=%xmm15
11507pxor %xmm8,%xmm15
11508
11509# qhasm: xmm9 ^= xmm14
11510# asm 1: pxor <xmm14=int6464#15,<xmm9=int6464#10
11511# asm 2: pxor <xmm14=%xmm14,<xmm9=%xmm9
11512pxor %xmm14,%xmm9
11513
11514# qhasm: xmm12 ^= xmm15
11515# asm 1: pxor <xmm15=int6464#16,<xmm12=int6464#13
11516# asm 2: pxor <xmm15=%xmm15,<xmm12=%xmm12
11517pxor %xmm15,%xmm12
11518
11519# qhasm: xmm14 ^= xmm8
11520# asm 1: pxor <xmm8=int6464#9,<xmm14=int6464#15
11521# asm 2: pxor <xmm8=%xmm8,<xmm14=%xmm14
11522pxor %xmm8,%xmm14
11523
11524# qhasm: xmm8 ^= xmm9
11525# asm 1: pxor <xmm9=int6464#10,<xmm8=int6464#9
11526# asm 2: pxor <xmm9=%xmm9,<xmm8=%xmm8
11527pxor %xmm9,%xmm8
11528
11529# qhasm: xmm9 ^= xmm13
11530# asm 1: pxor <xmm13=int6464#14,<xmm9=int6464#10
11531# asm 2: pxor <xmm13=%xmm13,<xmm9=%xmm9
11532pxor %xmm13,%xmm9
11533
11534# qhasm: xmm13 ^= xmm10
11535# asm 1: pxor <xmm10=int6464#11,<xmm13=int6464#14
11536# asm 2: pxor <xmm10=%xmm10,<xmm13=%xmm13
11537pxor %xmm10,%xmm13
11538
11539# qhasm: xmm12 ^= xmm13
11540# asm 1: pxor <xmm13=int6464#14,<xmm12=int6464#13
11541# asm 2: pxor <xmm13=%xmm13,<xmm12=%xmm12
11542pxor %xmm13,%xmm12
11543
11544# qhasm: xmm10 ^= xmm11
11545# asm 1: pxor <xmm11=int6464#12,<xmm10=int6464#11
11546# asm 2: pxor <xmm11=%xmm11,<xmm10=%xmm10
11547pxor %xmm11,%xmm10
11548
11549# qhasm: xmm11 ^= xmm13
11550# asm 1: pxor <xmm13=int6464#14,<xmm11=int6464#12
11551# asm 2: pxor <xmm13=%xmm13,<xmm11=%xmm11
11552pxor %xmm13,%xmm11
11553
11554# qhasm: xmm14 ^= xmm11
11555# asm 1: pxor <xmm11=int6464#12,<xmm14=int6464#15
11556# asm 2: pxor <xmm11=%xmm11,<xmm14=%xmm14
11557pxor %xmm11,%xmm14
11558
11559# qhasm: xmm8 ^= *(int128 *)(c + 1280)
11560# asm 1: pxor 1280(<c=int64#5),<xmm8=int6464#9
11561# asm 2: pxor 1280(<c=%r8),<xmm8=%xmm8
11562pxor 1280(%r8),%xmm8
11563
11564# qhasm: xmm9 ^= *(int128 *)(c + 1296)
11565# asm 1: pxor 1296(<c=int64#5),<xmm9=int6464#10
11566# asm 2: pxor 1296(<c=%r8),<xmm9=%xmm9
11567pxor 1296(%r8),%xmm9
11568
11569# qhasm: xmm12 ^= *(int128 *)(c + 1312)
11570# asm 1: pxor 1312(<c=int64#5),<xmm12=int6464#13
11571# asm 2: pxor 1312(<c=%r8),<xmm12=%xmm12
11572pxor 1312(%r8),%xmm12
11573
11574# qhasm: xmm14 ^= *(int128 *)(c + 1328)
11575# asm 1: pxor 1328(<c=int64#5),<xmm14=int6464#15
11576# asm 2: pxor 1328(<c=%r8),<xmm14=%xmm14
11577pxor 1328(%r8),%xmm14
11578
11579# qhasm: xmm11 ^= *(int128 *)(c + 1344)
11580# asm 1: pxor 1344(<c=int64#5),<xmm11=int6464#12
11581# asm 2: pxor 1344(<c=%r8),<xmm11=%xmm11
11582pxor 1344(%r8),%xmm11
11583
11584# qhasm: xmm15 ^= *(int128 *)(c + 1360)
11585# asm 1: pxor 1360(<c=int64#5),<xmm15=int6464#16
11586# asm 2: pxor 1360(<c=%r8),<xmm15=%xmm15
11587pxor 1360(%r8),%xmm15
11588
11589# qhasm: xmm10 ^= *(int128 *)(c + 1376)
11590# asm 1: pxor 1376(<c=int64#5),<xmm10=int6464#11
11591# asm 2: pxor 1376(<c=%r8),<xmm10=%xmm10
11592pxor 1376(%r8),%xmm10
11593
11594# qhasm: xmm13 ^= *(int128 *)(c + 1392)
11595# asm 1: pxor 1392(<c=int64#5),<xmm13=int6464#14
11596# asm 2: pxor 1392(<c=%r8),<xmm13=%xmm13
11597pxor 1392(%r8),%xmm13
11598
11599# qhasm: xmm0 = xmm10
11600# asm 1: movdqa <xmm10=int6464#11,>xmm0=int6464#1
11601# asm 2: movdqa <xmm10=%xmm10,>xmm0=%xmm0
11602movdqa %xmm10,%xmm0
11603
11604# qhasm: uint6464 xmm0 >>= 1
11605# asm 1: psrlq $1,<xmm0=int6464#1
11606# asm 2: psrlq $1,<xmm0=%xmm0
11607psrlq $1,%xmm0
11608
11609# qhasm: xmm0 ^= xmm13
11610# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
11611# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
11612pxor %xmm13,%xmm0
11613
11614# qhasm: xmm0 &= BS0
11615# asm 1: pand BS0,<xmm0=int6464#1
11616# asm 2: pand BS0,<xmm0=%xmm0
11617pand BS0,%xmm0
11618
11619# qhasm: xmm13 ^= xmm0
11620# asm 1: pxor <xmm0=int6464#1,<xmm13=int6464#14
11621# asm 2: pxor <xmm0=%xmm0,<xmm13=%xmm13
11622pxor %xmm0,%xmm13
11623
11624# qhasm: uint6464 xmm0 <<= 1
11625# asm 1: psllq $1,<xmm0=int6464#1
11626# asm 2: psllq $1,<xmm0=%xmm0
11627psllq $1,%xmm0
11628
11629# qhasm: xmm10 ^= xmm0
11630# asm 1: pxor <xmm0=int6464#1,<xmm10=int6464#11
11631# asm 2: pxor <xmm0=%xmm0,<xmm10=%xmm10
11632pxor %xmm0,%xmm10
11633
11634# qhasm: xmm0 = xmm11
11635# asm 1: movdqa <xmm11=int6464#12,>xmm0=int6464#1
11636# asm 2: movdqa <xmm11=%xmm11,>xmm0=%xmm0
11637movdqa %xmm11,%xmm0
11638
11639# qhasm: uint6464 xmm0 >>= 1
11640# asm 1: psrlq $1,<xmm0=int6464#1
11641# asm 2: psrlq $1,<xmm0=%xmm0
11642psrlq $1,%xmm0
11643
11644# qhasm: xmm0 ^= xmm15
11645# asm 1: pxor <xmm15=int6464#16,<xmm0=int6464#1
11646# asm 2: pxor <xmm15=%xmm15,<xmm0=%xmm0
11647pxor %xmm15,%xmm0
11648
11649# qhasm: xmm0 &= BS0
11650# asm 1: pand BS0,<xmm0=int6464#1
11651# asm 2: pand BS0,<xmm0=%xmm0
11652pand BS0,%xmm0
11653
11654# qhasm: xmm15 ^= xmm0
11655# asm 1: pxor <xmm0=int6464#1,<xmm15=int6464#16
11656# asm 2: pxor <xmm0=%xmm0,<xmm15=%xmm15
11657pxor %xmm0,%xmm15
11658
11659# qhasm: uint6464 xmm0 <<= 1
11660# asm 1: psllq $1,<xmm0=int6464#1
11661# asm 2: psllq $1,<xmm0=%xmm0
11662psllq $1,%xmm0
11663
11664# qhasm: xmm11 ^= xmm0
11665# asm 1: pxor <xmm0=int6464#1,<xmm11=int6464#12
11666# asm 2: pxor <xmm0=%xmm0,<xmm11=%xmm11
11667pxor %xmm0,%xmm11
11668
11669# qhasm: xmm0 = xmm12
11670# asm 1: movdqa <xmm12=int6464#13,>xmm0=int6464#1
11671# asm 2: movdqa <xmm12=%xmm12,>xmm0=%xmm0
11672movdqa %xmm12,%xmm0
11673
11674# qhasm: uint6464 xmm0 >>= 1
11675# asm 1: psrlq $1,<xmm0=int6464#1
11676# asm 2: psrlq $1,<xmm0=%xmm0
11677psrlq $1,%xmm0
11678
11679# qhasm: xmm0 ^= xmm14
11680# asm 1: pxor <xmm14=int6464#15,<xmm0=int6464#1
11681# asm 2: pxor <xmm14=%xmm14,<xmm0=%xmm0
11682pxor %xmm14,%xmm0
11683
11684# qhasm: xmm0 &= BS0
11685# asm 1: pand BS0,<xmm0=int6464#1
11686# asm 2: pand BS0,<xmm0=%xmm0
11687pand BS0,%xmm0
11688
11689# qhasm: xmm14 ^= xmm0
11690# asm 1: pxor <xmm0=int6464#1,<xmm14=int6464#15
11691# asm 2: pxor <xmm0=%xmm0,<xmm14=%xmm14
11692pxor %xmm0,%xmm14
11693
11694# qhasm: uint6464 xmm0 <<= 1
11695# asm 1: psllq $1,<xmm0=int6464#1
11696# asm 2: psllq $1,<xmm0=%xmm0
11697psllq $1,%xmm0
11698
11699# qhasm: xmm12 ^= xmm0
11700# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11701# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11702pxor %xmm0,%xmm12
11703
11704# qhasm: xmm0 = xmm8
11705# asm 1: movdqa <xmm8=int6464#9,>xmm0=int6464#1
11706# asm 2: movdqa <xmm8=%xmm8,>xmm0=%xmm0
11707movdqa %xmm8,%xmm0
11708
11709# qhasm: uint6464 xmm0 >>= 1
11710# asm 1: psrlq $1,<xmm0=int6464#1
11711# asm 2: psrlq $1,<xmm0=%xmm0
11712psrlq $1,%xmm0
11713
11714# qhasm: xmm0 ^= xmm9
11715# asm 1: pxor <xmm9=int6464#10,<xmm0=int6464#1
11716# asm 2: pxor <xmm9=%xmm9,<xmm0=%xmm0
11717pxor %xmm9,%xmm0
11718
11719# qhasm: xmm0 &= BS0
11720# asm 1: pand BS0,<xmm0=int6464#1
11721# asm 2: pand BS0,<xmm0=%xmm0
11722pand BS0,%xmm0
11723
11724# qhasm: xmm9 ^= xmm0
11725# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
11726# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
11727pxor %xmm0,%xmm9
11728
11729# qhasm: uint6464 xmm0 <<= 1
11730# asm 1: psllq $1,<xmm0=int6464#1
11731# asm 2: psllq $1,<xmm0=%xmm0
11732psllq $1,%xmm0
11733
11734# qhasm: xmm8 ^= xmm0
11735# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
11736# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
11737pxor %xmm0,%xmm8
11738
11739# qhasm: xmm0 = xmm15
11740# asm 1: movdqa <xmm15=int6464#16,>xmm0=int6464#1
11741# asm 2: movdqa <xmm15=%xmm15,>xmm0=%xmm0
11742movdqa %xmm15,%xmm0
11743
11744# qhasm: uint6464 xmm0 >>= 2
11745# asm 1: psrlq $2,<xmm0=int6464#1
11746# asm 2: psrlq $2,<xmm0=%xmm0
11747psrlq $2,%xmm0
11748
11749# qhasm: xmm0 ^= xmm13
11750# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
11751# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
11752pxor %xmm13,%xmm0
11753
11754# qhasm: xmm0 &= BS1
11755# asm 1: pand BS1,<xmm0=int6464#1
11756# asm 2: pand BS1,<xmm0=%xmm0
11757pand BS1,%xmm0
11758
11759# qhasm: xmm13 ^= xmm0
11760# asm 1: pxor <xmm0=int6464#1,<xmm13=int6464#14
11761# asm 2: pxor <xmm0=%xmm0,<xmm13=%xmm13
11762pxor %xmm0,%xmm13
11763
11764# qhasm: uint6464 xmm0 <<= 2
11765# asm 1: psllq $2,<xmm0=int6464#1
11766# asm 2: psllq $2,<xmm0=%xmm0
11767psllq $2,%xmm0
11768
11769# qhasm: xmm15 ^= xmm0
11770# asm 1: pxor <xmm0=int6464#1,<xmm15=int6464#16
11771# asm 2: pxor <xmm0=%xmm0,<xmm15=%xmm15
11772pxor %xmm0,%xmm15
11773
11774# qhasm: xmm0 = xmm11
11775# asm 1: movdqa <xmm11=int6464#12,>xmm0=int6464#1
11776# asm 2: movdqa <xmm11=%xmm11,>xmm0=%xmm0
11777movdqa %xmm11,%xmm0
11778
11779# qhasm: uint6464 xmm0 >>= 2
11780# asm 1: psrlq $2,<xmm0=int6464#1
11781# asm 2: psrlq $2,<xmm0=%xmm0
11782psrlq $2,%xmm0
11783
11784# qhasm: xmm0 ^= xmm10
11785# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#1
11786# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm0
11787pxor %xmm10,%xmm0
11788
11789# qhasm: xmm0 &= BS1
11790# asm 1: pand BS1,<xmm0=int6464#1
11791# asm 2: pand BS1,<xmm0=%xmm0
11792pand BS1,%xmm0
11793
11794# qhasm: xmm10 ^= xmm0
11795# asm 1: pxor <xmm0=int6464#1,<xmm10=int6464#11
11796# asm 2: pxor <xmm0=%xmm0,<xmm10=%xmm10
11797pxor %xmm0,%xmm10
11798
11799# qhasm: uint6464 xmm0 <<= 2
11800# asm 1: psllq $2,<xmm0=int6464#1
11801# asm 2: psllq $2,<xmm0=%xmm0
11802psllq $2,%xmm0
11803
11804# qhasm: xmm11 ^= xmm0
11805# asm 1: pxor <xmm0=int6464#1,<xmm11=int6464#12
11806# asm 2: pxor <xmm0=%xmm0,<xmm11=%xmm11
11807pxor %xmm0,%xmm11
11808
11809# qhasm: xmm0 = xmm9
11810# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#1
11811# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm0
11812movdqa %xmm9,%xmm0
11813
11814# qhasm: uint6464 xmm0 >>= 2
11815# asm 1: psrlq $2,<xmm0=int6464#1
11816# asm 2: psrlq $2,<xmm0=%xmm0
11817psrlq $2,%xmm0
11818
11819# qhasm: xmm0 ^= xmm14
11820# asm 1: pxor <xmm14=int6464#15,<xmm0=int6464#1
11821# asm 2: pxor <xmm14=%xmm14,<xmm0=%xmm0
11822pxor %xmm14,%xmm0
11823
11824# qhasm: xmm0 &= BS1
11825# asm 1: pand BS1,<xmm0=int6464#1
11826# asm 2: pand BS1,<xmm0=%xmm0
11827pand BS1,%xmm0
11828
11829# qhasm: xmm14 ^= xmm0
11830# asm 1: pxor <xmm0=int6464#1,<xmm14=int6464#15
11831# asm 2: pxor <xmm0=%xmm0,<xmm14=%xmm14
11832pxor %xmm0,%xmm14
11833
11834# qhasm: uint6464 xmm0 <<= 2
11835# asm 1: psllq $2,<xmm0=int6464#1
11836# asm 2: psllq $2,<xmm0=%xmm0
11837psllq $2,%xmm0
11838
11839# qhasm: xmm9 ^= xmm0
11840# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
11841# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
11842pxor %xmm0,%xmm9
11843
11844# qhasm: xmm0 = xmm8
11845# asm 1: movdqa <xmm8=int6464#9,>xmm0=int6464#1
11846# asm 2: movdqa <xmm8=%xmm8,>xmm0=%xmm0
11847movdqa %xmm8,%xmm0
11848
11849# qhasm: uint6464 xmm0 >>= 2
11850# asm 1: psrlq $2,<xmm0=int6464#1
11851# asm 2: psrlq $2,<xmm0=%xmm0
11852psrlq $2,%xmm0
11853
11854# qhasm: xmm0 ^= xmm12
11855# asm 1: pxor <xmm12=int6464#13,<xmm0=int6464#1
11856# asm 2: pxor <xmm12=%xmm12,<xmm0=%xmm0
11857pxor %xmm12,%xmm0
11858
11859# qhasm: xmm0 &= BS1
11860# asm 1: pand BS1,<xmm0=int6464#1
11861# asm 2: pand BS1,<xmm0=%xmm0
11862pand BS1,%xmm0
11863
11864# qhasm: xmm12 ^= xmm0
11865# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11866# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11867pxor %xmm0,%xmm12
11868
11869# qhasm: uint6464 xmm0 <<= 2
11870# asm 1: psllq $2,<xmm0=int6464#1
11871# asm 2: psllq $2,<xmm0=%xmm0
11872psllq $2,%xmm0
11873
11874# qhasm: xmm8 ^= xmm0
11875# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
11876# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
11877pxor %xmm0,%xmm8
11878
11879# qhasm: xmm0 = xmm14
11880# asm 1: movdqa <xmm14=int6464#15,>xmm0=int6464#1
11881# asm 2: movdqa <xmm14=%xmm14,>xmm0=%xmm0
11882movdqa %xmm14,%xmm0
11883
11884# qhasm: uint6464 xmm0 >>= 4
11885# asm 1: psrlq $4,<xmm0=int6464#1
11886# asm 2: psrlq $4,<xmm0=%xmm0
11887psrlq $4,%xmm0
11888
11889# qhasm: xmm0 ^= xmm13
11890# asm 1: pxor <xmm13=int6464#14,<xmm0=int6464#1
11891# asm 2: pxor <xmm13=%xmm13,<xmm0=%xmm0
11892pxor %xmm13,%xmm0
11893
11894# qhasm: xmm0 &= BS2
11895# asm 1: pand BS2,<xmm0=int6464#1
11896# asm 2: pand BS2,<xmm0=%xmm0
11897pand BS2,%xmm0
11898
11899# qhasm: xmm13 ^= xmm0
11900# asm 1: pxor <xmm0=int6464#1,<xmm13=int6464#14
11901# asm 2: pxor <xmm0=%xmm0,<xmm13=%xmm13
11902pxor %xmm0,%xmm13
11903
11904# qhasm: uint6464 xmm0 <<= 4
11905# asm 1: psllq $4,<xmm0=int6464#1
11906# asm 2: psllq $4,<xmm0=%xmm0
11907psllq $4,%xmm0
11908
11909# qhasm: xmm14 ^= xmm0
11910# asm 1: pxor <xmm0=int6464#1,<xmm14=int6464#15
11911# asm 2: pxor <xmm0=%xmm0,<xmm14=%xmm14
11912pxor %xmm0,%xmm14
11913
11914# qhasm: xmm0 = xmm12
11915# asm 1: movdqa <xmm12=int6464#13,>xmm0=int6464#1
11916# asm 2: movdqa <xmm12=%xmm12,>xmm0=%xmm0
11917movdqa %xmm12,%xmm0
11918
11919# qhasm: uint6464 xmm0 >>= 4
11920# asm 1: psrlq $4,<xmm0=int6464#1
11921# asm 2: psrlq $4,<xmm0=%xmm0
11922psrlq $4,%xmm0
11923
11924# qhasm: xmm0 ^= xmm10
11925# asm 1: pxor <xmm10=int6464#11,<xmm0=int6464#1
11926# asm 2: pxor <xmm10=%xmm10,<xmm0=%xmm0
11927pxor %xmm10,%xmm0
11928
11929# qhasm: xmm0 &= BS2
11930# asm 1: pand BS2,<xmm0=int6464#1
11931# asm 2: pand BS2,<xmm0=%xmm0
11932pand BS2,%xmm0
11933
11934# qhasm: xmm10 ^= xmm0
11935# asm 1: pxor <xmm0=int6464#1,<xmm10=int6464#11
11936# asm 2: pxor <xmm0=%xmm0,<xmm10=%xmm10
11937pxor %xmm0,%xmm10
11938
11939# qhasm: uint6464 xmm0 <<= 4
11940# asm 1: psllq $4,<xmm0=int6464#1
11941# asm 2: psllq $4,<xmm0=%xmm0
11942psllq $4,%xmm0
11943
11944# qhasm: xmm12 ^= xmm0
11945# asm 1: pxor <xmm0=int6464#1,<xmm12=int6464#13
11946# asm 2: pxor <xmm0=%xmm0,<xmm12=%xmm12
11947pxor %xmm0,%xmm12
11948
11949# qhasm: xmm0 = xmm9
11950# asm 1: movdqa <xmm9=int6464#10,>xmm0=int6464#1
11951# asm 2: movdqa <xmm9=%xmm9,>xmm0=%xmm0
11952movdqa %xmm9,%xmm0
11953
11954# qhasm: uint6464 xmm0 >>= 4
11955# asm 1: psrlq $4,<xmm0=int6464#1
11956# asm 2: psrlq $4,<xmm0=%xmm0
11957psrlq $4,%xmm0
11958
11959# qhasm: xmm0 ^= xmm15
11960# asm 1: pxor <xmm15=int6464#16,<xmm0=int6464#1
11961# asm 2: pxor <xmm15=%xmm15,<xmm0=%xmm0
11962pxor %xmm15,%xmm0
11963
11964# qhasm: xmm0 &= BS2
11965# asm 1: pand BS2,<xmm0=int6464#1
11966# asm 2: pand BS2,<xmm0=%xmm0
11967pand BS2,%xmm0
11968
11969# qhasm: xmm15 ^= xmm0
11970# asm 1: pxor <xmm0=int6464#1,<xmm15=int6464#16
11971# asm 2: pxor <xmm0=%xmm0,<xmm15=%xmm15
11972pxor %xmm0,%xmm15
11973
11974# qhasm: uint6464 xmm0 <<= 4
11975# asm 1: psllq $4,<xmm0=int6464#1
11976# asm 2: psllq $4,<xmm0=%xmm0
11977psllq $4,%xmm0
11978
11979# qhasm: xmm9 ^= xmm0
11980# asm 1: pxor <xmm0=int6464#1,<xmm9=int6464#10
11981# asm 2: pxor <xmm0=%xmm0,<xmm9=%xmm9
11982pxor %xmm0,%xmm9
11983
11984# qhasm: xmm0 = xmm8
11985# asm 1: movdqa <xmm8=int6464#9,>xmm0=int6464#1
11986# asm 2: movdqa <xmm8=%xmm8,>xmm0=%xmm0
11987movdqa %xmm8,%xmm0
11988
11989# qhasm: uint6464 xmm0 >>= 4
11990# asm 1: psrlq $4,<xmm0=int6464#1
11991# asm 2: psrlq $4,<xmm0=%xmm0
11992psrlq $4,%xmm0
11993
11994# qhasm: xmm0 ^= xmm11
11995# asm 1: pxor <xmm11=int6464#12,<xmm0=int6464#1
11996# asm 2: pxor <xmm11=%xmm11,<xmm0=%xmm0
11997pxor %xmm11,%xmm0
11998
11999# qhasm: xmm0 &= BS2
12000# asm 1: pand BS2,<xmm0=int6464#1
12001# asm 2: pand BS2,<xmm0=%xmm0
12002pand BS2,%xmm0
12003
12004# qhasm: xmm11 ^= xmm0
12005# asm 1: pxor <xmm0=int6464#1,<xmm11=int6464#12
12006# asm 2: pxor <xmm0=%xmm0,<xmm11=%xmm11
12007pxor %xmm0,%xmm11
12008
12009# qhasm: uint6464 xmm0 <<= 4
12010# asm 1: psllq $4,<xmm0=int6464#1
12011# asm 2: psllq $4,<xmm0=%xmm0
12012psllq $4,%xmm0
12013
12014# qhasm: xmm8 ^= xmm0
12015# asm 1: pxor <xmm0=int6464#1,<xmm8=int6464#9
12016# asm 2: pxor <xmm0=%xmm0,<xmm8=%xmm8
12017pxor %xmm0,%xmm8
12018
12019# qhasm: unsigned<? =? len-128
12020# asm 1: cmp $128,<len=int64#3
12021# asm 2: cmp $128,<len=%rdx
12022cmp $128,%rdx
12023# comment:fp stack unchanged by jump
12024
12025# qhasm: goto partial if unsigned<
12026jb ._partial
12027# comment:fp stack unchanged by jump
12028
12029# qhasm: goto full if =
12030je ._full
12031
12032# qhasm: tmp = *(uint32 *)(np + 12)
12033# asm 1: movl 12(<np=int64#4),>tmp=int64#6d
12034# asm 2: movl 12(<np=%rcx),>tmp=%r9d
12035movl 12(%rcx),%r9d
12036
12037# qhasm: (uint32) bswap tmp
12038# asm 1: bswap <tmp=int64#6d
12039# asm 2: bswap <tmp=%r9d
12040bswap %r9d
12041
12042# qhasm: tmp += 8
12043# asm 1: add $8,<tmp=int64#6
12044# asm 2: add $8,<tmp=%r9
12045add $8,%r9
12046
12047# qhasm: (uint32) bswap tmp
12048# asm 1: bswap <tmp=int64#6d
12049# asm 2: bswap <tmp=%r9d
12050bswap %r9d
12051
12052# qhasm: *(uint32 *)(np + 12) = tmp
12053# asm 1: movl <tmp=int64#6d,12(<np=int64#4)
12054# asm 2: movl <tmp=%r9d,12(<np=%rcx)
12055movl %r9d,12(%rcx)
12056
12057# qhasm: xmm8 ^= *(int128 *)(inp + 0)
12058# asm 1: pxor 0(<inp=int64#2),<xmm8=int6464#9
12059# asm 2: pxor 0(<inp=%rsi),<xmm8=%xmm8
12060pxor 0(%rsi),%xmm8
12061
12062# qhasm: xmm9 ^= *(int128 *)(inp + 16)
12063# asm 1: pxor 16(<inp=int64#2),<xmm9=int6464#10
12064# asm 2: pxor 16(<inp=%rsi),<xmm9=%xmm9
12065pxor 16(%rsi),%xmm9
12066
12067# qhasm: xmm12 ^= *(int128 *)(inp + 32)
12068# asm 1: pxor 32(<inp=int64#2),<xmm12=int6464#13
12069# asm 2: pxor 32(<inp=%rsi),<xmm12=%xmm12
12070pxor 32(%rsi),%xmm12
12071
12072# qhasm: xmm14 ^= *(int128 *)(inp + 48)
12073# asm 1: pxor 48(<inp=int64#2),<xmm14=int6464#15
12074# asm 2: pxor 48(<inp=%rsi),<xmm14=%xmm14
12075pxor 48(%rsi),%xmm14
12076
12077# qhasm: xmm11 ^= *(int128 *)(inp + 64)
12078# asm 1: pxor 64(<inp=int64#2),<xmm11=int6464#12
12079# asm 2: pxor 64(<inp=%rsi),<xmm11=%xmm11
12080pxor 64(%rsi),%xmm11
12081
12082# qhasm: xmm15 ^= *(int128 *)(inp + 80)
12083# asm 1: pxor 80(<inp=int64#2),<xmm15=int6464#16
12084# asm 2: pxor 80(<inp=%rsi),<xmm15=%xmm15
12085pxor 80(%rsi),%xmm15
12086
12087# qhasm: xmm10 ^= *(int128 *)(inp + 96)
12088# asm 1: pxor 96(<inp=int64#2),<xmm10=int6464#11
12089# asm 2: pxor 96(<inp=%rsi),<xmm10=%xmm10
12090pxor 96(%rsi),%xmm10
12091
12092# qhasm: xmm13 ^= *(int128 *)(inp + 112)
12093# asm 1: pxor 112(<inp=int64#2),<xmm13=int6464#14
12094# asm 2: pxor 112(<inp=%rsi),<xmm13=%xmm13
12095pxor 112(%rsi),%xmm13
12096
12097# qhasm: *(int128 *) (outp + 0) = xmm8
12098# asm 1: movdqa <xmm8=int6464#9,0(<outp=int64#1)
12099# asm 2: movdqa <xmm8=%xmm8,0(<outp=%rdi)
12100movdqa %xmm8,0(%rdi)
12101
12102# qhasm: *(int128 *) (outp + 16) = xmm9
12103# asm 1: movdqa <xmm9=int6464#10,16(<outp=int64#1)
12104# asm 2: movdqa <xmm9=%xmm9,16(<outp=%rdi)
12105movdqa %xmm9,16(%rdi)
12106
12107# qhasm: *(int128 *) (outp + 32) = xmm12
12108# asm 1: movdqa <xmm12=int6464#13,32(<outp=int64#1)
12109# asm 2: movdqa <xmm12=%xmm12,32(<outp=%rdi)
12110movdqa %xmm12,32(%rdi)
12111
12112# qhasm: *(int128 *) (outp + 48) = xmm14
12113# asm 1: movdqa <xmm14=int6464#15,48(<outp=int64#1)
12114# asm 2: movdqa <xmm14=%xmm14,48(<outp=%rdi)
12115movdqa %xmm14,48(%rdi)
12116
12117# qhasm: *(int128 *) (outp + 64) = xmm11
12118# asm 1: movdqa <xmm11=int6464#12,64(<outp=int64#1)
12119# asm 2: movdqa <xmm11=%xmm11,64(<outp=%rdi)
12120movdqa %xmm11,64(%rdi)
12121
12122# qhasm: *(int128 *) (outp + 80) = xmm15
12123# asm 1: movdqa <xmm15=int6464#16,80(<outp=int64#1)
12124# asm 2: movdqa <xmm15=%xmm15,80(<outp=%rdi)
12125movdqa %xmm15,80(%rdi)
12126
12127# qhasm: *(int128 *) (outp + 96) = xmm10
12128# asm 1: movdqa <xmm10=int6464#11,96(<outp=int64#1)
12129# asm 2: movdqa <xmm10=%xmm10,96(<outp=%rdi)
12130movdqa %xmm10,96(%rdi)
12131
12132# qhasm: *(int128 *) (outp + 112) = xmm13
12133# asm 1: movdqa <xmm13=int6464#14,112(<outp=int64#1)
12134# asm 2: movdqa <xmm13=%xmm13,112(<outp=%rdi)
12135movdqa %xmm13,112(%rdi)
12136
12137# qhasm: len -= 128
12138# asm 1: sub $128,<len=int64#3
12139# asm 2: sub $128,<len=%rdx
12140sub $128,%rdx
12141
12142# qhasm: inp += 128
12143# asm 1: add $128,<inp=int64#2
12144# asm 2: add $128,<inp=%rsi
12145add $128,%rsi
12146
12147# qhasm: outp += 128
12148# asm 1: add $128,<outp=int64#1
12149# asm 2: add $128,<outp=%rdi
12150add $128,%rdi
12151# comment:fp stack unchanged by jump
12152
12153# qhasm: goto enc_block
12154jmp ._enc_block
12155
12156# qhasm: partial:
12157._partial:
12158
12159# qhasm: lensav = len
12160# asm 1: mov <len=int64#3,>lensav=int64#5
12161# asm 2: mov <len=%rdx,>lensav=%r8
12162mov %rdx,%r8
12163
12164# qhasm: (uint32) len >>= 4
12165# asm 1: shr $4,<len=int64#3d
12166# asm 2: shr $4,<len=%edx
12167shr $4,%edx
12168
12169# qhasm: tmp = *(uint32 *)(np + 12)
12170# asm 1: movl 12(<np=int64#4),>tmp=int64#6d
12171# asm 2: movl 12(<np=%rcx),>tmp=%r9d
12172movl 12(%rcx),%r9d
12173
12174# qhasm: (uint32) bswap tmp
12175# asm 1: bswap <tmp=int64#6d
12176# asm 2: bswap <tmp=%r9d
12177bswap %r9d
12178
12179# qhasm: tmp += len
12180# asm 1: add <len=int64#3,<tmp=int64#6
12181# asm 2: add <len=%rdx,<tmp=%r9
12182add %rdx,%r9
12183
12184# qhasm: (uint32) bswap tmp
12185# asm 1: bswap <tmp=int64#6d
12186# asm 2: bswap <tmp=%r9d
12187bswap %r9d
12188
12189# qhasm: *(uint32 *)(np + 12) = tmp
12190# asm 1: movl <tmp=int64#6d,12(<np=int64#4)
12191# asm 2: movl <tmp=%r9d,12(<np=%rcx)
12192movl %r9d,12(%rcx)
12193
12194# qhasm: blp = &bl
12195# asm 1: leaq <bl=stack1024#1,>blp=int64#3
12196# asm 2: leaq <bl=32(%rsp),>blp=%rdx
12197leaq 32(%rsp),%rdx
12198
12199# qhasm: *(int128 *)(blp + 0) = xmm8
12200# asm 1: movdqa <xmm8=int6464#9,0(<blp=int64#3)
12201# asm 2: movdqa <xmm8=%xmm8,0(<blp=%rdx)
12202movdqa %xmm8,0(%rdx)
12203
12204# qhasm: *(int128 *)(blp + 16) = xmm9
12205# asm 1: movdqa <xmm9=int6464#10,16(<blp=int64#3)
12206# asm 2: movdqa <xmm9=%xmm9,16(<blp=%rdx)
12207movdqa %xmm9,16(%rdx)
12208
12209# qhasm: *(int128 *)(blp + 32) = xmm12
12210# asm 1: movdqa <xmm12=int6464#13,32(<blp=int64#3)
12211# asm 2: movdqa <xmm12=%xmm12,32(<blp=%rdx)
12212movdqa %xmm12,32(%rdx)
12213
12214# qhasm: *(int128 *)(blp + 48) = xmm14
12215# asm 1: movdqa <xmm14=int6464#15,48(<blp=int64#3)
12216# asm 2: movdqa <xmm14=%xmm14,48(<blp=%rdx)
12217movdqa %xmm14,48(%rdx)
12218
12219# qhasm: *(int128 *)(blp + 64) = xmm11
12220# asm 1: movdqa <xmm11=int6464#12,64(<blp=int64#3)
12221# asm 2: movdqa <xmm11=%xmm11,64(<blp=%rdx)
12222movdqa %xmm11,64(%rdx)
12223
12224# qhasm: *(int128 *)(blp + 80) = xmm15
12225# asm 1: movdqa <xmm15=int6464#16,80(<blp=int64#3)
12226# asm 2: movdqa <xmm15=%xmm15,80(<blp=%rdx)
12227movdqa %xmm15,80(%rdx)
12228
12229# qhasm: *(int128 *)(blp + 96) = xmm10
12230# asm 1: movdqa <xmm10=int6464#11,96(<blp=int64#3)
12231# asm 2: movdqa <xmm10=%xmm10,96(<blp=%rdx)
12232movdqa %xmm10,96(%rdx)
12233
12234# qhasm: *(int128 *)(blp + 112) = xmm13
12235# asm 1: movdqa <xmm13=int6464#14,112(<blp=int64#3)
12236# asm 2: movdqa <xmm13=%xmm13,112(<blp=%rdx)
12237movdqa %xmm13,112(%rdx)
12238
12239# qhasm: bytes:
12240._bytes:
12241
12242# qhasm: =? lensav-0
12243# asm 1: cmp $0,<lensav=int64#5
12244# asm 2: cmp $0,<lensav=%r8
12245cmp $0,%r8
12246# comment:fp stack unchanged by jump
12247
12248# qhasm: goto end if =
12249je ._end
12250
12251# qhasm: b = *(uint8 *)(blp + 0)
12252# asm 1: movzbq 0(<blp=int64#3),>b=int64#4
12253# asm 2: movzbq 0(<blp=%rdx),>b=%rcx
12254movzbq 0(%rdx),%rcx
12255
12256# qhasm: (uint8) b ^= *(uint8 *)(inp + 0)
12257# asm 1: xorb 0(<inp=int64#2),<b=int64#4b
12258# asm 2: xorb 0(<inp=%rsi),<b=%cl
12259xorb 0(%rsi),%cl
12260
12261# qhasm: *(uint8 *)(outp + 0) = b
12262# asm 1: movb <b=int64#4b,0(<outp=int64#1)
12263# asm 2: movb <b=%cl,0(<outp=%rdi)
12264movb %cl,0(%rdi)
12265
12266# qhasm: blp += 1
12267# asm 1: add $1,<blp=int64#3
12268# asm 2: add $1,<blp=%rdx
12269add $1,%rdx
12270
12271# qhasm: inp +=1
12272# asm 1: add $1,<inp=int64#2
12273# asm 2: add $1,<inp=%rsi
12274add $1,%rsi
12275
12276# qhasm: outp +=1
12277# asm 1: add $1,<outp=int64#1
12278# asm 2: add $1,<outp=%rdi
12279add $1,%rdi
12280
12281# qhasm: lensav -= 1
12282# asm 1: sub $1,<lensav=int64#5
12283# asm 2: sub $1,<lensav=%r8
12284sub $1,%r8
12285# comment:fp stack unchanged by jump
12286
12287# qhasm: goto bytes
12288jmp ._bytes
12289
12290# qhasm: full:
12291._full:
12292
12293# qhasm: tmp = *(uint32 *)(np + 12)
12294# asm 1: movl 12(<np=int64#4),>tmp=int64#3d
12295# asm 2: movl 12(<np=%rcx),>tmp=%edx
12296movl 12(%rcx),%edx
12297
12298# qhasm: (uint32) bswap tmp
12299# asm 1: bswap <tmp=int64#3d
12300# asm 2: bswap <tmp=%edx
12301bswap %edx
12302
12303# qhasm: tmp += 8
12304# asm 1: add $8,<tmp=int64#3
12305# asm 2: add $8,<tmp=%rdx
12306add $8,%rdx
12307
12308# qhasm: (uint32) bswap tmp
12309# asm 1: bswap <tmp=int64#3d
12310# asm 2: bswap <tmp=%edx
12311bswap %edx
12312
12313# qhasm: *(uint32 *)(np + 12) = tmp
12314# asm 1: movl <tmp=int64#3d,12(<np=int64#4)
12315# asm 2: movl <tmp=%edx,12(<np=%rcx)
12316movl %edx,12(%rcx)
12317
12318# qhasm: xmm8 ^= *(int128 *)(inp + 0)
12319# asm 1: pxor 0(<inp=int64#2),<xmm8=int6464#9
12320# asm 2: pxor 0(<inp=%rsi),<xmm8=%xmm8
12321pxor 0(%rsi),%xmm8
12322
12323# qhasm: xmm9 ^= *(int128 *)(inp + 16)
12324# asm 1: pxor 16(<inp=int64#2),<xmm9=int6464#10
12325# asm 2: pxor 16(<inp=%rsi),<xmm9=%xmm9
12326pxor 16(%rsi),%xmm9
12327
12328# qhasm: xmm12 ^= *(int128 *)(inp + 32)
12329# asm 1: pxor 32(<inp=int64#2),<xmm12=int6464#13
12330# asm 2: pxor 32(<inp=%rsi),<xmm12=%xmm12
12331pxor 32(%rsi),%xmm12
12332
12333# qhasm: xmm14 ^= *(int128 *)(inp + 48)
12334# asm 1: pxor 48(<inp=int64#2),<xmm14=int6464#15
12335# asm 2: pxor 48(<inp=%rsi),<xmm14=%xmm14
12336pxor 48(%rsi),%xmm14
12337
12338# qhasm: xmm11 ^= *(int128 *)(inp + 64)
12339# asm 1: pxor 64(<inp=int64#2),<xmm11=int6464#12
12340# asm 2: pxor 64(<inp=%rsi),<xmm11=%xmm11
12341pxor 64(%rsi),%xmm11
12342
12343# qhasm: xmm15 ^= *(int128 *)(inp + 80)
12344# asm 1: pxor 80(<inp=int64#2),<xmm15=int6464#16
12345# asm 2: pxor 80(<inp=%rsi),<xmm15=%xmm15
12346pxor 80(%rsi),%xmm15
12347
12348# qhasm: xmm10 ^= *(int128 *)(inp + 96)
12349# asm 1: pxor 96(<inp=int64#2),<xmm10=int6464#11
12350# asm 2: pxor 96(<inp=%rsi),<xmm10=%xmm10
12351pxor 96(%rsi),%xmm10
12352
12353# qhasm: xmm13 ^= *(int128 *)(inp + 112)
12354# asm 1: pxor 112(<inp=int64#2),<xmm13=int6464#14
12355# asm 2: pxor 112(<inp=%rsi),<xmm13=%xmm13
12356pxor 112(%rsi),%xmm13
12357
12358# qhasm: *(int128 *) (outp + 0) = xmm8
12359# asm 1: movdqa <xmm8=int6464#9,0(<outp=int64#1)
12360# asm 2: movdqa <xmm8=%xmm8,0(<outp=%rdi)
12361movdqa %xmm8,0(%rdi)
12362
12363# qhasm: *(int128 *) (outp + 16) = xmm9
12364# asm 1: movdqa <xmm9=int6464#10,16(<outp=int64#1)
12365# asm 2: movdqa <xmm9=%xmm9,16(<outp=%rdi)
12366movdqa %xmm9,16(%rdi)
12367
12368# qhasm: *(int128 *) (outp + 32) = xmm12
12369# asm 1: movdqa <xmm12=int6464#13,32(<outp=int64#1)
12370# asm 2: movdqa <xmm12=%xmm12,32(<outp=%rdi)
12371movdqa %xmm12,32(%rdi)
12372
12373# qhasm: *(int128 *) (outp + 48) = xmm14
12374# asm 1: movdqa <xmm14=int6464#15,48(<outp=int64#1)
12375# asm 2: movdqa <xmm14=%xmm14,48(<outp=%rdi)
12376movdqa %xmm14,48(%rdi)
12377
12378# qhasm: *(int128 *) (outp + 64) = xmm11
12379# asm 1: movdqa <xmm11=int6464#12,64(<outp=int64#1)
12380# asm 2: movdqa <xmm11=%xmm11,64(<outp=%rdi)
12381movdqa %xmm11,64(%rdi)
12382
12383# qhasm: *(int128 *) (outp + 80) = xmm15
12384# asm 1: movdqa <xmm15=int6464#16,80(<outp=int64#1)
12385# asm 2: movdqa <xmm15=%xmm15,80(<outp=%rdi)
12386movdqa %xmm15,80(%rdi)
12387
12388# qhasm: *(int128 *) (outp + 96) = xmm10
12389# asm 1: movdqa <xmm10=int6464#11,96(<outp=int64#1)
12390# asm 2: movdqa <xmm10=%xmm10,96(<outp=%rdi)
12391movdqa %xmm10,96(%rdi)
12392
12393# qhasm: *(int128 *) (outp + 112) = xmm13
12394# asm 1: movdqa <xmm13=int6464#14,112(<outp=int64#1)
12395# asm 2: movdqa <xmm13=%xmm13,112(<outp=%rdi)
12396movdqa %xmm13,112(%rdi)
12397# comment:fp stack unchanged by fallthrough
12398
12399# qhasm: end:
12400._end:
12401
12402# qhasm: leave
12403add %r11,%rsp
12404mov %rdi,%rax
12405mov %rsi,%rdx
12406xor %rax,%rax
12407ret
diff --git a/nacl/crypto_stream/aes128ctr/portable/afternm.c b/nacl/crypto_stream/aes128ctr/portable/afternm.c
new file mode 100644
index 00000000..93c96e42
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/afternm.c
@@ -0,0 +1,158 @@
1/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper
2 * Date: 2009-03-19
3 * Public domain */
4
5#include "int128.h"
6#include "common.h"
7#include "consts.h"
8#include "crypto_stream.h"
9
10int crypto_stream_afternm(unsigned char *outp, unsigned long long len, const unsigned char *noncep, const unsigned char *c)
11{
12
13 int128 xmm0;
14 int128 xmm1;
15 int128 xmm2;
16 int128 xmm3;
17 int128 xmm4;
18 int128 xmm5;
19 int128 xmm6;
20 int128 xmm7;
21
22 int128 xmm8;
23 int128 xmm9;
24 int128 xmm10;
25 int128 xmm11;
26 int128 xmm12;
27 int128 xmm13;
28 int128 xmm14;
29 int128 xmm15;
30
31 int128 nonce_stack;
32 unsigned long long lensav;
33 unsigned char bl[128];
34 unsigned char *blp;
35 unsigned char b;
36
37 uint32 tmp;
38
39 /* Copy nonce on the stack */
40 copy2(&nonce_stack, (int128 *) (noncep + 0));
41 unsigned char *np = (unsigned char *)&nonce_stack;
42
43 enc_block:
44
45 xmm0 = *(int128 *) (np + 0);
46 copy2(&xmm1, &xmm0);
47 shufb(&xmm1, SWAP32);
48 copy2(&xmm2, &xmm1);
49 copy2(&xmm3, &xmm1);
50 copy2(&xmm4, &xmm1);
51 copy2(&xmm5, &xmm1);
52 copy2(&xmm6, &xmm1);
53 copy2(&xmm7, &xmm1);
54
55 add_uint32_big(&xmm1, 1);
56 add_uint32_big(&xmm2, 2);
57 add_uint32_big(&xmm3, 3);
58 add_uint32_big(&xmm4, 4);
59 add_uint32_big(&xmm5, 5);
60 add_uint32_big(&xmm6, 6);
61 add_uint32_big(&xmm7, 7);
62
63 shufb(&xmm0, M0);
64 shufb(&xmm1, M0SWAP);
65 shufb(&xmm2, M0SWAP);
66 shufb(&xmm3, M0SWAP);
67 shufb(&xmm4, M0SWAP);
68 shufb(&xmm5, M0SWAP);
69 shufb(&xmm6, M0SWAP);
70 shufb(&xmm7, M0SWAP);
71
72 bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8)
73
74 aesround( 1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
75 aesround( 2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
76 aesround( 3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
77 aesround( 4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
78 aesround( 5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
79 aesround( 6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
80 aesround( 7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
81 aesround( 8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
82 aesround( 9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
83 lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
84
85 bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0)
86
87 if(len < 128) goto partial;
88 if(len == 128) goto full;
89
90 tmp = load32_bigendian(np + 12);
91 tmp += 8;
92 store32_bigendian(np + 12, tmp);
93
94 *(int128 *) (outp + 0) = xmm8;
95 *(int128 *) (outp + 16) = xmm9;
96 *(int128 *) (outp + 32) = xmm12;
97 *(int128 *) (outp + 48) = xmm14;
98 *(int128 *) (outp + 64) = xmm11;
99 *(int128 *) (outp + 80) = xmm15;
100 *(int128 *) (outp + 96) = xmm10;
101 *(int128 *) (outp + 112) = xmm13;
102
103 len -= 128;
104 outp += 128;
105
106 goto enc_block;
107
108 partial:
109
110 lensav = len;
111 len >>= 4;
112
113 tmp = load32_bigendian(np + 12);
114 tmp += len;
115 store32_bigendian(np + 12, tmp);
116
117 blp = bl;
118 *(int128 *)(blp + 0) = xmm8;
119 *(int128 *)(blp + 16) = xmm9;
120 *(int128 *)(blp + 32) = xmm12;
121 *(int128 *)(blp + 48) = xmm14;
122 *(int128 *)(blp + 64) = xmm11;
123 *(int128 *)(blp + 80) = xmm15;
124 *(int128 *)(blp + 96) = xmm10;
125 *(int128 *)(blp + 112) = xmm13;
126
127 bytes:
128
129 if(lensav == 0) goto end;
130
131 b = blp[0];
132 *(unsigned char *)(outp + 0) = b;
133
134 blp += 1;
135 outp +=1;
136 lensav -= 1;
137
138 goto bytes;
139
140 full:
141
142 tmp = load32_bigendian(np + 12);
143 tmp += 8;
144 store32_bigendian(np + 12, tmp);
145
146 *(int128 *) (outp + 0) = xmm8;
147 *(int128 *) (outp + 16) = xmm9;
148 *(int128 *) (outp + 32) = xmm12;
149 *(int128 *) (outp + 48) = xmm14;
150 *(int128 *) (outp + 64) = xmm11;
151 *(int128 *) (outp + 80) = xmm15;
152 *(int128 *) (outp + 96) = xmm10;
153 *(int128 *) (outp + 112) = xmm13;
154
155 end:
156 return 0;
157
158}
diff --git a/nacl/crypto_stream/aes128ctr/portable/api.h b/nacl/crypto_stream/aes128ctr/portable/api.h
new file mode 100644
index 00000000..62fc8d88
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/api.h
@@ -0,0 +1,3 @@
1#define CRYPTO_KEYBYTES 16
2#define CRYPTO_NONCEBYTES 16
3#define CRYPTO_BEFORENMBYTES 1408
diff --git a/nacl/crypto_stream/aes128ctr/portable/beforenm.c b/nacl/crypto_stream/aes128ctr/portable/beforenm.c
new file mode 100644
index 00000000..8fa2673d
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/beforenm.c
@@ -0,0 +1,59 @@
1/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper
2 * Date: 2009-03-19
3 * Public domain */
4
5#include "consts.h"
6#include "int128.h"
7#include "common.h"
8#include "crypto_stream.h"
9
10int crypto_stream_beforenm(unsigned char *c, const unsigned char *k)
11{
12
13 /*
14 int64 x0;
15 int64 x1;
16 int64 x2;
17 int64 x3;
18 int64 e;
19 int64 q0;
20 int64 q1;
21 int64 q2;
22 int64 q3;
23 */
24
25 int128 xmm0;
26 int128 xmm1;
27 int128 xmm2;
28 int128 xmm3;
29 int128 xmm4;
30 int128 xmm5;
31 int128 xmm6;
32 int128 xmm7;
33 int128 xmm8;
34 int128 xmm9;
35 int128 xmm10;
36 int128 xmm11;
37 int128 xmm12;
38 int128 xmm13;
39 int128 xmm14;
40 int128 xmm15;
41 int128 t;
42
43 bitslicekey0(k, c)
44
45 keyexpbs1(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
46 keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm1);, 2,c)
47 keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm6);, 3,c)
48 keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 4,c)
49
50 keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 5,c)
51 keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm5);, 6,c)
52 keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 7,c)
53 keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm7);, 8,c)
54
55 keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm0); xor_rcon(&xmm1); xor_rcon(&xmm6); xor_rcon(&xmm3);, 9,c)
56 keyexpbs10(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
57
58 return 0;
59}
diff --git a/nacl/crypto_stream/aes128ctr/portable/common.c b/nacl/crypto_stream/aes128ctr/portable/common.c
new file mode 100644
index 00000000..14a28cc6
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/common.c
@@ -0,0 +1,64 @@
1#include "common.h"
2
3uint32 load32_bigendian(const unsigned char *x)
4{
5 return
6 (uint32) (x[3]) \
7 | (((uint32) (x[2])) << 8) \
8 | (((uint32) (x[1])) << 16) \
9 | (((uint32) (x[0])) << 24)
10 ;
11}
12
13void store32_bigendian(unsigned char *x,uint32 u)
14{
15 x[3] = u; u >>= 8;
16 x[2] = u; u >>= 8;
17 x[1] = u; u >>= 8;
18 x[0] = u;
19}
20
21uint32 load32_littleendian(const unsigned char *x)
22{
23 return
24 (uint32) (x[0]) \
25 | (((uint32) (x[1])) << 8) \
26 | (((uint32) (x[2])) << 16) \
27 | (((uint32) (x[3])) << 24)
28 ;
29}
30
31void store32_littleendian(unsigned char *x,uint32 u)
32{
33 x[0] = u; u >>= 8;
34 x[1] = u; u >>= 8;
35 x[2] = u; u >>= 8;
36 x[3] = u;
37}
38
39
40uint64 load64_littleendian(const unsigned char *x)
41{
42 return
43 (uint64) (x[0]) \
44 | (((uint64) (x[1])) << 8) \
45 | (((uint64) (x[2])) << 16) \
46 | (((uint64) (x[3])) << 24)
47 | (((uint64) (x[4])) << 32)
48 | (((uint64) (x[5])) << 40)
49 | (((uint64) (x[6])) << 48)
50 | (((uint64) (x[7])) << 56)
51 ;
52}
53
54void store64_littleendian(unsigned char *x,uint64 u)
55{
56 x[0] = u; u >>= 8;
57 x[1] = u; u >>= 8;
58 x[2] = u; u >>= 8;
59 x[3] = u; u >>= 8;
60 x[4] = u; u >>= 8;
61 x[5] = u; u >>= 8;
62 x[6] = u; u >>= 8;
63 x[7] = u;
64}
diff --git a/nacl/crypto_stream/aes128ctr/portable/common.h b/nacl/crypto_stream/aes128ctr/portable/common.h
new file mode 100644
index 00000000..0f723332
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/common.h
@@ -0,0 +1,788 @@
1/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper
2 Date: 2009-03-19
3 Public domain */
4#ifndef COMMON_H
5#define COMMON_H
6
7#include "types.h"
8
9#define load32_bigendian crypto_stream_aes128ctr_portable_load32_bigendian
10uint32 load32_bigendian(const unsigned char *x);
11
12#define store32_bigendian crypto_stream_aes128ctr_portable_store32_bigendian
13void store32_bigendian(unsigned char *x,uint32 u);
14
15#define load32_littleendian crypto_stream_aes128ctr_portable_load32_littleendian
16uint32 load32_littleendian(const unsigned char *x);
17
18#define store32_littleendian crypto_stream_aes128ctr_portable_store32_littleendian
19void store32_littleendian(unsigned char *x,uint32 u);
20
21#define load64_littleendian crypto_stream_aes128ctr_portable_load64_littleendian
22uint64 load64_littleendian(const unsigned char *x);
23
24#define store64_littleendian crypto_stream_aes128ctr_portable_store64_littleendian
25void store64_littleendian(unsigned char *x,uint64 u);
26
27/* Macros required only for key expansion */
28
29#define keyexpbs1(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \
30 rotbyte(&b0);\
31 rotbyte(&b1);\
32 rotbyte(&b2);\
33 rotbyte(&b3);\
34 rotbyte(&b4);\
35 rotbyte(&b5);\
36 rotbyte(&b6);\
37 rotbyte(&b7);\
38 ;\
39 sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\
40 ;\
41 xor_rcon(&b0);\
42 shufb(&b0, EXPB0);\
43 shufb(&b1, EXPB0);\
44 shufb(&b4, EXPB0);\
45 shufb(&b6, EXPB0);\
46 shufb(&b3, EXPB0);\
47 shufb(&b7, EXPB0);\
48 shufb(&b2, EXPB0);\
49 shufb(&b5, EXPB0);\
50 shufb(&b0, EXPB0);\
51 ;\
52 t0 = *(int128 *)(bskey + 0);\
53 t1 = *(int128 *)(bskey + 16);\
54 t2 = *(int128 *)(bskey + 32);\
55 t3 = *(int128 *)(bskey + 48);\
56 t4 = *(int128 *)(bskey + 64);\
57 t5 = *(int128 *)(bskey + 80);\
58 t6 = *(int128 *)(bskey + 96);\
59 t7 = *(int128 *)(bskey + 112);\
60 ;\
61 xor2(&b0, &t0);\
62 xor2(&b1, &t1);\
63 xor2(&b4, &t2);\
64 xor2(&b6, &t3);\
65 xor2(&b3, &t4);\
66 xor2(&b7, &t5);\
67 xor2(&b2, &t6);\
68 xor2(&b5, &t7);\
69 ;\
70 rshift32_littleendian(&t0, 8);\
71 rshift32_littleendian(&t1, 8);\
72 rshift32_littleendian(&t2, 8);\
73 rshift32_littleendian(&t3, 8);\
74 rshift32_littleendian(&t4, 8);\
75 rshift32_littleendian(&t5, 8);\
76 rshift32_littleendian(&t6, 8);\
77 rshift32_littleendian(&t7, 8);\
78 ;\
79 xor2(&b0, &t0);\
80 xor2(&b1, &t1);\
81 xor2(&b4, &t2);\
82 xor2(&b6, &t3);\
83 xor2(&b3, &t4);\
84 xor2(&b7, &t5);\
85 xor2(&b2, &t6);\
86 xor2(&b5, &t7);\
87 ;\
88 rshift32_littleendian(&t0, 8);\
89 rshift32_littleendian(&t1, 8);\
90 rshift32_littleendian(&t2, 8);\
91 rshift32_littleendian(&t3, 8);\
92 rshift32_littleendian(&t4, 8);\
93 rshift32_littleendian(&t5, 8);\
94 rshift32_littleendian(&t6, 8);\
95 rshift32_littleendian(&t7, 8);\
96 ;\
97 xor2(&b0, &t0);\
98 xor2(&b1, &t1);\
99 xor2(&b4, &t2);\
100 xor2(&b6, &t3);\
101 xor2(&b3, &t4);\
102 xor2(&b7, &t5);\
103 xor2(&b2, &t6);\
104 xor2(&b5, &t7);\
105 ;\
106 rshift32_littleendian(&t0, 8);\
107 rshift32_littleendian(&t1, 8);\
108 rshift32_littleendian(&t2, 8);\
109 rshift32_littleendian(&t3, 8);\
110 rshift32_littleendian(&t4, 8);\
111 rshift32_littleendian(&t5, 8);\
112 rshift32_littleendian(&t6, 8);\
113 rshift32_littleendian(&t7, 8);\
114 ;\
115 xor2(&b0, &t0);\
116 xor2(&b1, &t1);\
117 xor2(&b4, &t2);\
118 xor2(&b6, &t3);\
119 xor2(&b3, &t4);\
120 xor2(&b7, &t5);\
121 xor2(&b2, &t6);\
122 xor2(&b5, &t7);\
123 ;\
124 *(int128 *)(bskey + 128) = b0;\
125 *(int128 *)(bskey + 144) = b1;\
126 *(int128 *)(bskey + 160) = b4;\
127 *(int128 *)(bskey + 176) = b6;\
128 *(int128 *)(bskey + 192) = b3;\
129 *(int128 *)(bskey + 208) = b7;\
130 *(int128 *)(bskey + 224) = b2;\
131 *(int128 *)(bskey + 240) = b5;\
132
133#define keyexpbs10(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) ;\
134 toggle(&b0);\
135 toggle(&b1);\
136 toggle(&b5);\
137 toggle(&b6);\
138 rotbyte(&b0);\
139 rotbyte(&b1);\
140 rotbyte(&b2);\
141 rotbyte(&b3);\
142 rotbyte(&b4);\
143 rotbyte(&b5);\
144 rotbyte(&b6);\
145 rotbyte(&b7);\
146 ;\
147 sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\
148 ;\
149 xor_rcon(&b1);\
150 xor_rcon(&b4);\
151 xor_rcon(&b3);\
152 xor_rcon(&b7);\
153 shufb(&b0, EXPB0);\
154 shufb(&b1, EXPB0);\
155 shufb(&b4, EXPB0);\
156 shufb(&b6, EXPB0);\
157 shufb(&b3, EXPB0);\
158 shufb(&b7, EXPB0);\
159 shufb(&b2, EXPB0);\
160 shufb(&b5, EXPB0);\
161 ;\
162 t0 = *(int128 *)(bskey + 9 * 128 + 0);\
163 t1 = *(int128 *)(bskey + 9 * 128 + 16);\
164 t2 = *(int128 *)(bskey + 9 * 128 + 32);\
165 t3 = *(int128 *)(bskey + 9 * 128 + 48);\
166 t4 = *(int128 *)(bskey + 9 * 128 + 64);\
167 t5 = *(int128 *)(bskey + 9 * 128 + 80);\
168 t6 = *(int128 *)(bskey + 9 * 128 + 96);\
169 t7 = *(int128 *)(bskey + 9 * 128 + 112);\
170 ;\
171 toggle(&t0);\
172 toggle(&t1);\
173 toggle(&t5);\
174 toggle(&t6);\
175 ;\
176 xor2(&b0, &t0);\
177 xor2(&b1, &t1);\
178 xor2(&b4, &t2);\
179 xor2(&b6, &t3);\
180 xor2(&b3, &t4);\
181 xor2(&b7, &t5);\
182 xor2(&b2, &t6);\
183 xor2(&b5, &t7);\
184 ;\
185 rshift32_littleendian(&t0, 8);\
186 rshift32_littleendian(&t1, 8);\
187 rshift32_littleendian(&t2, 8);\
188 rshift32_littleendian(&t3, 8);\
189 rshift32_littleendian(&t4, 8);\
190 rshift32_littleendian(&t5, 8);\
191 rshift32_littleendian(&t6, 8);\
192 rshift32_littleendian(&t7, 8);\
193 ;\
194 xor2(&b0, &t0);\
195 xor2(&b1, &t1);\
196 xor2(&b4, &t2);\
197 xor2(&b6, &t3);\
198 xor2(&b3, &t4);\
199 xor2(&b7, &t5);\
200 xor2(&b2, &t6);\
201 xor2(&b5, &t7);\
202 ;\
203 rshift32_littleendian(&t0, 8);\
204 rshift32_littleendian(&t1, 8);\
205 rshift32_littleendian(&t2, 8);\
206 rshift32_littleendian(&t3, 8);\
207 rshift32_littleendian(&t4, 8);\
208 rshift32_littleendian(&t5, 8);\
209 rshift32_littleendian(&t6, 8);\
210 rshift32_littleendian(&t7, 8);\
211 ;\
212 xor2(&b0, &t0);\
213 xor2(&b1, &t1);\
214 xor2(&b4, &t2);\
215 xor2(&b6, &t3);\
216 xor2(&b3, &t4);\
217 xor2(&b7, &t5);\
218 xor2(&b2, &t6);\
219 xor2(&b5, &t7);\
220 ;\
221 rshift32_littleendian(&t0, 8);\
222 rshift32_littleendian(&t1, 8);\
223 rshift32_littleendian(&t2, 8);\
224 rshift32_littleendian(&t3, 8);\
225 rshift32_littleendian(&t4, 8);\
226 rshift32_littleendian(&t5, 8);\
227 rshift32_littleendian(&t6, 8);\
228 rshift32_littleendian(&t7, 8);\
229 ;\
230 xor2(&b0, &t0);\
231 xor2(&b1, &t1);\
232 xor2(&b4, &t2);\
233 xor2(&b6, &t3);\
234 xor2(&b3, &t4);\
235 xor2(&b7, &t5);\
236 xor2(&b2, &t6);\
237 xor2(&b5, &t7);\
238 ;\
239 shufb(&b0, M0);\
240 shufb(&b1, M0);\
241 shufb(&b2, M0);\
242 shufb(&b3, M0);\
243 shufb(&b4, M0);\
244 shufb(&b5, M0);\
245 shufb(&b6, M0);\
246 shufb(&b7, M0);\
247 ;\
248 *(int128 *)(bskey + 1280) = b0;\
249 *(int128 *)(bskey + 1296) = b1;\
250 *(int128 *)(bskey + 1312) = b4;\
251 *(int128 *)(bskey + 1328) = b6;\
252 *(int128 *)(bskey + 1344) = b3;\
253 *(int128 *)(bskey + 1360) = b7;\
254 *(int128 *)(bskey + 1376) = b2;\
255 *(int128 *)(bskey + 1392) = b5;\
256
257
258#define keyexpbs(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, rcon, i, bskey) \
259 toggle(&b0);\
260 toggle(&b1);\
261 toggle(&b5);\
262 toggle(&b6);\
263 rotbyte(&b0);\
264 rotbyte(&b1);\
265 rotbyte(&b2);\
266 rotbyte(&b3);\
267 rotbyte(&b4);\
268 rotbyte(&b5);\
269 rotbyte(&b6);\
270 rotbyte(&b7);\
271 ;\
272 sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\
273 ;\
274 rcon;\
275 shufb(&b0, EXPB0);\
276 shufb(&b1, EXPB0);\
277 shufb(&b4, EXPB0);\
278 shufb(&b6, EXPB0);\
279 shufb(&b3, EXPB0);\
280 shufb(&b7, EXPB0);\
281 shufb(&b2, EXPB0);\
282 shufb(&b5, EXPB0);\
283 ;\
284 t0 = *(int128 *)(bskey + (i-1) * 128 + 0);\
285 t1 = *(int128 *)(bskey + (i-1) * 128 + 16);\
286 t2 = *(int128 *)(bskey + (i-1) * 128 + 32);\
287 t3 = *(int128 *)(bskey + (i-1) * 128 + 48);\
288 t4 = *(int128 *)(bskey + (i-1) * 128 + 64);\
289 t5 = *(int128 *)(bskey + (i-1) * 128 + 80);\
290 t6 = *(int128 *)(bskey + (i-1) * 128 + 96);\
291 t7 = *(int128 *)(bskey + (i-1) * 128 + 112);\
292 ;\
293 toggle(&t0);\
294 toggle(&t1);\
295 toggle(&t5);\
296 toggle(&t6);\
297 ;\
298 xor2(&b0, &t0);\
299 xor2(&b1, &t1);\
300 xor2(&b4, &t2);\
301 xor2(&b6, &t3);\
302 xor2(&b3, &t4);\
303 xor2(&b7, &t5);\
304 xor2(&b2, &t6);\
305 xor2(&b5, &t7);\
306 ;\
307 rshift32_littleendian(&t0, 8);\
308 rshift32_littleendian(&t1, 8);\
309 rshift32_littleendian(&t2, 8);\
310 rshift32_littleendian(&t3, 8);\
311 rshift32_littleendian(&t4, 8);\
312 rshift32_littleendian(&t5, 8);\
313 rshift32_littleendian(&t6, 8);\
314 rshift32_littleendian(&t7, 8);\
315 ;\
316 xor2(&b0, &t0);\
317 xor2(&b1, &t1);\
318 xor2(&b4, &t2);\
319 xor2(&b6, &t3);\
320 xor2(&b3, &t4);\
321 xor2(&b7, &t5);\
322 xor2(&b2, &t6);\
323 xor2(&b5, &t7);\
324 ;\
325 rshift32_littleendian(&t0, 8);\
326 rshift32_littleendian(&t1, 8);\
327 rshift32_littleendian(&t2, 8);\
328 rshift32_littleendian(&t3, 8);\
329 rshift32_littleendian(&t4, 8);\
330 rshift32_littleendian(&t5, 8);\
331 rshift32_littleendian(&t6, 8);\
332 rshift32_littleendian(&t7, 8);\
333 ;\
334 xor2(&b0, &t0);\
335 xor2(&b1, &t1);\
336 xor2(&b4, &t2);\
337 xor2(&b6, &t3);\
338 xor2(&b3, &t4);\
339 xor2(&b7, &t5);\
340 xor2(&b2, &t6);\
341 xor2(&b5, &t7);\
342 ;\
343 rshift32_littleendian(&t0, 8);\
344 rshift32_littleendian(&t1, 8);\
345 rshift32_littleendian(&t2, 8);\
346 rshift32_littleendian(&t3, 8);\
347 rshift32_littleendian(&t4, 8);\
348 rshift32_littleendian(&t5, 8);\
349 rshift32_littleendian(&t6, 8);\
350 rshift32_littleendian(&t7, 8);\
351 ;\
352 xor2(&b0, &t0);\
353 xor2(&b1, &t1);\
354 xor2(&b4, &t2);\
355 xor2(&b6, &t3);\
356 xor2(&b3, &t4);\
357 xor2(&b7, &t5);\
358 xor2(&b2, &t6);\
359 xor2(&b5, &t7);\
360 ;\
361 *(int128 *)(bskey + i*128 + 0) = b0;\
362 *(int128 *)(bskey + i*128 + 16) = b1;\
363 *(int128 *)(bskey + i*128 + 32) = b4;\
364 *(int128 *)(bskey + i*128 + 48) = b6;\
365 *(int128 *)(bskey + i*128 + 64) = b3;\
366 *(int128 *)(bskey + i*128 + 80) = b7;\
367 *(int128 *)(bskey + i*128 + 96) = b2;\
368 *(int128 *)(bskey + i*128 + 112) = b5;\
369
370/* Macros used in multiple contexts */
371
372#define bitslicekey0(key, bskey) \
373 xmm0 = *(int128 *) (key + 0);\
374 shufb(&xmm0, M0);\
375 copy2(&xmm1, &xmm0);\
376 copy2(&xmm2, &xmm0);\
377 copy2(&xmm3, &xmm0);\
378 copy2(&xmm4, &xmm0);\
379 copy2(&xmm5, &xmm0);\
380 copy2(&xmm6, &xmm0);\
381 copy2(&xmm7, &xmm0);\
382 ;\
383 bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\
384 ;\
385 *(int128 *) (bskey + 0) = xmm0;\
386 *(int128 *) (bskey + 16) = xmm1;\
387 *(int128 *) (bskey + 32) = xmm2;\
388 *(int128 *) (bskey + 48) = xmm3;\
389 *(int128 *) (bskey + 64) = xmm4;\
390 *(int128 *) (bskey + 80) = xmm5;\
391 *(int128 *) (bskey + 96) = xmm6;\
392 *(int128 *) (bskey + 112) = xmm7;\
393
394
395#define bitslicekey10(key, bskey) \
396 xmm0 = *(int128 *) (key + 0);\
397 copy2(xmm1, xmm0);\
398 copy2(xmm2, xmm0);\
399 copy2(xmm3, xmm0);\
400 copy2(xmm4, xmm0);\
401 copy2(xmm5, xmm0);\
402 copy2(xmm6, xmm0);\
403 copy2(xmm7, xmm0);\
404 ;\
405 bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\
406 ;\
407 toggle(&xmm6);\
408 toggle(&xmm5);\
409 toggle(&xmm1);\
410 toggle(&xmm0);\
411 ;\
412 *(int128 *) (bskey + 0 + 1280) = xmm0;\
413 *(int128 *) (bskey + 16 + 1280) = xmm1;\
414 *(int128 *) (bskey + 32 + 1280) = xmm2;\
415 *(int128 *) (bskey + 48 + 1280) = xmm3;\
416 *(int128 *) (bskey + 64 + 1280) = xmm4;\
417 *(int128 *) (bskey + 80 + 1280) = xmm5;\
418 *(int128 *) (bskey + 96 + 1280) = xmm6;\
419 *(int128 *) (bskey + 112 + 1280) = xmm7;\
420
421
422#define bitslicekey(i,key,bskey) \
423 xmm0 = *(int128 *) (key + 0);\
424 shufb(&xmm0, M0);\
425 copy2(&xmm1, &xmm0);\
426 copy2(&xmm2, &xmm0);\
427 copy2(&xmm3, &xmm0);\
428 copy2(&xmm4, &xmm0);\
429 copy2(&xmm5, &xmm0);\
430 copy2(&xmm6, &xmm0);\
431 copy2(&xmm7, &xmm0);\
432 ;\
433 bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\
434 ;\
435 toggle(&xmm6);\
436 toggle(&xmm5);\
437 toggle(&xmm1);\
438 toggle(&xmm0);\
439 ;\
440 *(int128 *) (bskey + 0 + 128*i) = xmm0;\
441 *(int128 *) (bskey + 16 + 128*i) = xmm1;\
442 *(int128 *) (bskey + 32 + 128*i) = xmm2;\
443 *(int128 *) (bskey + 48 + 128*i) = xmm3;\
444 *(int128 *) (bskey + 64 + 128*i) = xmm4;\
445 *(int128 *) (bskey + 80 + 128*i) = xmm5;\
446 *(int128 *) (bskey + 96 + 128*i) = xmm6;\
447 *(int128 *) (bskey + 112 + 128*i) = xmm7;\
448
449
450#define bitslice(x0, x1, x2, x3, x4, x5, x6, x7, t) \
451 swapmove(x0, x1, 1, BS0, t);\
452 swapmove(x2, x3, 1, BS0, t);\
453 swapmove(x4, x5, 1, BS0, t);\
454 swapmove(x6, x7, 1, BS0, t);\
455 ;\
456 swapmove(x0, x2, 2, BS1, t);\
457 swapmove(x1, x3, 2, BS1, t);\
458 swapmove(x4, x6, 2, BS1, t);\
459 swapmove(x5, x7, 2, BS1, t);\
460 ;\
461 swapmove(x0, x4, 4, BS2, t);\
462 swapmove(x1, x5, 4, BS2, t);\
463 swapmove(x2, x6, 4, BS2, t);\
464 swapmove(x3, x7, 4, BS2, t);\
465
466
467#define swapmove(a, b, n, m, t) \
468 copy2(&t, &b);\
469 rshift64_littleendian(&t, n);\
470 xor2(&t, &a);\
471 and2(&t, &m);\
472 xor2(&a, &t);\
473 lshift64_littleendian(&t, n);\
474 xor2(&b, &t);
475
476#define rotbyte(x) \
477 shufb(x, ROTB) /* TODO: Make faster */
478
479
480/* Macros used for encryption (and decryption) */
481
482#define shiftrows(x0, x1, x2, x3, x4, x5, x6, x7, i, M, bskey) \
483 xor2(&x0, (int128 *)(bskey + 128*(i-1) + 0));\
484 shufb(&x0, M);\
485 xor2(&x1, (int128 *)(bskey + 128*(i-1) + 16));\
486 shufb(&x1, M);\
487 xor2(&x2, (int128 *)(bskey + 128*(i-1) + 32));\
488 shufb(&x2, M);\
489 xor2(&x3, (int128 *)(bskey + 128*(i-1) + 48));\
490 shufb(&x3, M);\
491 xor2(&x4, (int128 *)(bskey + 128*(i-1) + 64));\
492 shufb(&x4, M);\
493 xor2(&x5, (int128 *)(bskey + 128*(i-1) + 80));\
494 shufb(&x5, M);\
495 xor2(&x6, (int128 *)(bskey + 128*(i-1) + 96));\
496 shufb(&x6, M);\
497 xor2(&x7, (int128 *)(bskey + 128*(i-1) + 112));\
498 shufb(&x7, M);\
499
500
501#define mixcolumns(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, t7) \
502 shufd(&t0, &x0, 0x93);\
503 shufd(&t1, &x1, 0x93);\
504 shufd(&t2, &x2, 0x93);\
505 shufd(&t3, &x3, 0x93);\
506 shufd(&t4, &x4, 0x93);\
507 shufd(&t5, &x5, 0x93);\
508 shufd(&t6, &x6, 0x93);\
509 shufd(&t7, &x7, 0x93);\
510 ;\
511 xor2(&x0, &t0);\
512 xor2(&x1, &t1);\
513 xor2(&x2, &t2);\
514 xor2(&x3, &t3);\
515 xor2(&x4, &t4);\
516 xor2(&x5, &t5);\
517 xor2(&x6, &t6);\
518 xor2(&x7, &t7);\
519 ;\
520 xor2(&t0, &x7);\
521 xor2(&t1, &x0);\
522 xor2(&t2, &x1);\
523 xor2(&t1, &x7);\
524 xor2(&t3, &x2);\
525 xor2(&t4, &x3);\
526 xor2(&t5, &x4);\
527 xor2(&t3, &x7);\
528 xor2(&t6, &x5);\
529 xor2(&t7, &x6);\
530 xor2(&t4, &x7);\
531 ;\
532 shufd(&x0, &x0, 0x4e);\
533 shufd(&x1, &x1, 0x4e);\
534 shufd(&x2, &x2, 0x4e);\
535 shufd(&x3, &x3, 0x4e);\
536 shufd(&x4, &x4, 0x4e);\
537 shufd(&x5, &x5, 0x4e);\
538 shufd(&x6, &x6, 0x4e);\
539 shufd(&x7, &x7, 0x4e);\
540 ;\
541 xor2(&t0, &x0);\
542 xor2(&t1, &x1);\
543 xor2(&t2, &x2);\
544 xor2(&t3, &x3);\
545 xor2(&t4, &x4);\
546 xor2(&t5, &x5);\
547 xor2(&t6, &x6);\
548 xor2(&t7, &x7);\
549
550
551#define aesround(i, b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \
552 shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, i, SR, bskey);\
553 sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\
554 mixcolumns(b0, b1, b4, b6, b3, b7, b2, b5, t0, t1, t2, t3, t4, t5, t6, t7);\
555
556
557#define lastround(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \
558 shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, 10, SRM0, bskey);\
559 sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\
560 xor2(&b0,(int128 *)(bskey + 128*10));\
561 xor2(&b1,(int128 *)(bskey + 128*10+16));\
562 xor2(&b4,(int128 *)(bskey + 128*10+32));\
563 xor2(&b6,(int128 *)(bskey + 128*10+48));\
564 xor2(&b3,(int128 *)(bskey + 128*10+64));\
565 xor2(&b7,(int128 *)(bskey + 128*10+80));\
566 xor2(&b2,(int128 *)(bskey + 128*10+96));\
567 xor2(&b5,(int128 *)(bskey + 128*10+112));\
568
569
570#define sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, s0, s1, s2, s3) \
571 InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7); \
572 Inv_GF256(b6, b5, b0, b3, b7, b1, b4, b2, t0, t1, t2, t3, s0, s1, s2, s3); \
573 OutBasisChange(b7, b1, b4, b2, b6, b5, b0, b3); \
574
575
576#define InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \
577 xor2(&b5, &b6);\
578 xor2(&b2, &b1);\
579 xor2(&b5, &b0);\
580 xor2(&b6, &b2);\
581 xor2(&b3, &b0);\
582 ;\
583 xor2(&b6, &b3);\
584 xor2(&b3, &b7);\
585 xor2(&b3, &b4);\
586 xor2(&b7, &b5);\
587 xor2(&b3, &b1);\
588 ;\
589 xor2(&b4, &b5);\
590 xor2(&b2, &b7);\
591 xor2(&b1, &b5);\
592
593#define OutBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \
594 xor2(&b0, &b6);\
595 xor2(&b1, &b4);\
596 xor2(&b2, &b0);\
597 xor2(&b4, &b6);\
598 xor2(&b6, &b1);\
599 ;\
600 xor2(&b1, &b5);\
601 xor2(&b5, &b3);\
602 xor2(&b2, &b5);\
603 xor2(&b3, &b7);\
604 xor2(&b7, &b5);\
605 ;\
606 xor2(&b4, &b7);\
607
608#define Mul_GF4(x0, x1, y0, y1, t0) \
609 copy2(&t0, &y0);\
610 xor2(&t0, &y1);\
611 and2(&t0, &x0);\
612 xor2(&x0, &x1);\
613 and2(&x0, &y1);\
614 and2(&x1, &y0);\
615 xor2(&x0, &x1);\
616 xor2(&x1, &t0);\
617
618#define Mul_GF4_N(x0, x1, y0, y1, t0) \
619 copy2(&t0, &y0);\
620 xor2(&t0, &y1);\
621 and2(&t0, &x0);\
622 xor2(&x0, &x1);\
623 and2(&x0, &y1);\
624 and2(&x1, &y0);\
625 xor2(&x1, &x0);\
626 xor2(&x0, &t0);\
627
628#define Mul_GF4_2(x0, x1, x2, x3, y0, y1, t0, t1) \
629 copy2(&t0, = y0);\
630 xor2(&t0, &y1);\
631 copy2(&t1, &t0);\
632 and2(&t0, &x0);\
633 and2(&t1, &x2);\
634 xor2(&x0, &x1);\
635 xor2(&x2, &x3);\
636 and2(&x0, &y1);\
637 and2(&x2, &y1);\
638 and2(&x1, &y0);\
639 and2(&x3, &y0);\
640 xor2(&x0, &x1);\
641 xor2(&x2, &x3);\
642 xor2(&x1, &t0);\
643 xor2(&x3, &t1);\
644
645#define Mul_GF16(x0, x1, x2, x3, y0, y1, y2, y3, t0, t1, t2, t3) \
646 copy2(&t0, &x0);\
647 copy2(&t1, &x1);\
648 Mul_GF4(x0, x1, y0, y1, t2);\
649 xor2(&t0, &x2);\
650 xor2(&t1, &x3);\
651 xor2(&y0, &y2);\
652 xor2(&y1, &y3);\
653 Mul_GF4_N(t0, t1, y0, y1, t2);\
654 Mul_GF4(x2, x3, y2, y3, t3);\
655 ;\
656 xor2(&x0, &t0);\
657 xor2(&x2, &t0);\
658 xor2(&x1, &t1);\
659 xor2(&x3, &t1);\
660
661#define Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, t0, t1, t2, t3) \
662 copy2(&t0, &x0);\
663 copy2(&t1, &x1);\
664 Mul_GF4(x0, x1, y0, y1, t2);\
665 xor2(&t0, &x2);\
666 xor2(&t1, &x3);\
667 xor2(&y0, &y2);\
668 xor2(&y1, &y3);\
669 Mul_GF4_N(t0, t1, y0, y1, t3);\
670 Mul_GF4(x2, x3, y2, y3, t2);\
671 ;\
672 xor2(&x0, &t0);\
673 xor2(&x2, &t0);\
674 xor2(&x1, &t1);\
675 xor2(&x3, &t1);\
676 ;\
677 copy2(&t0, &x4);\
678 copy2(&t1, &x5);\
679 xor2(&t0, &x6);\
680 xor2(&t1, &x7);\
681 Mul_GF4_N(t0, t1, y0, y1, t3);\
682 Mul_GF4(x6, x7, y2, y3, t2);\
683 xor2(&y0, &y2);\
684 xor2(&y1, &y3);\
685 Mul_GF4(x4, x5, y0, y1, t3);\
686 ;\
687 xor2(&x4, &t0);\
688 xor2(&x6, &t0);\
689 xor2(&x5, &t1);\
690 xor2(&x7, &t1);\
691
692#define Inv_GF16(x0, x1, x2, x3, t0, t1, t2, t3) \
693 copy2(&t0, &x1);\
694 copy2(&t1, &x0);\
695 and2(&t0, &x3);\
696 or2(&t1, &x2);\
697 copy2(&t2, &x1);\
698 copy2(&t3, &x0);\
699 or2(&t2, &x2);\
700 or2(&t3, &x3);\
701 xor2(&t2, &t3);\
702 ;\
703 xor2(&t0, &t2);\
704 xor2(&t1, &t2);\
705 ;\
706 Mul_GF4_2(x0, x1, x2, x3, t1, t0, t2, t3);\
707
708
709#define Inv_GF256(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, s0, s1, s2, s3) \
710 copy2(&t3, &x4);\
711 copy2(&t2, &x5);\
712 copy2(&t1, &x1);\
713 copy2(&s1, &x7);\
714 copy2(&s0, &x0);\
715 ;\
716 xor2(&t3, &x6);\
717 xor2(&t2, &x7);\
718 xor2(&t1, &x3);\
719 xor2(&s1, &x6);\
720 xor2(&s0, &x2);\
721 ;\
722 copy2(&s2, &t3);\
723 copy2(&t0, &t2);\
724 copy2(&s3, &t3);\
725 ;\
726 or2(&t2, &t1);\
727 or2(&t3, &s0);\
728 xor2(&s3, &t0);\
729 and2(&s2, &s0);\
730 and2(&t0, &t1);\
731 xor2(&s0, &t1);\
732 and2(&s3, &s0);\
733 copy2(&s0, &x3);\
734 xor2(&s0, &x2);\
735 and2(&s1, &s0);\
736 xor2(&t3, &s1);\
737 xor2(&t2, &s1);\
738 copy2(&s1, &x4);\
739 xor2(&s1, &x5);\
740 copy2(&s0, &x1);\
741 copy2(&t1, &s1);\
742 xor2(&s0, &x0);\
743 or2(&t1, &s0);\
744 and2(&s1, &s0);\
745 xor2(&t0, &s1);\
746 xor2(&t3, &s3);\
747 xor2(&t2, &s2);\
748 xor2(&t1, &s3);\
749 xor2(&t0, &s2);\
750 xor2(&t1, &s2);\
751 copy2(&s0, &x7);\
752 copy2(&s1, &x6);\
753 copy2(&s2, &x5);\
754 copy2(&s3, &x4);\
755 and2(&s0, &x3);\
756 and2(&s1, &x2);\
757 and2(&s2, &x1);\
758 or2(&s3, &x0);\
759 xor2(&t3, &s0);\
760 xor2(&t2, &s1);\
761 xor2(&t1, &s2);\
762 xor2(&t0, &s3);\
763 ;\
764 copy2(&s0, &t3);\
765 xor2(&s0, &t2);\
766 and2(&t3, &t1);\
767 copy2(&s2, &t0);\
768 xor2(&s2, &t3);\
769 copy2(&s3, &s0);\
770 and2(&s3, &s2);\
771 xor2(&s3, &t2);\
772 copy2(&s1, &t1);\
773 xor2(&s1, &t0);\
774 xor2(&t3, &t2);\
775 and2(&s1, &t3);\
776 xor2(&s1, &t0);\
777 xor2(&t1, &s1);\
778 copy2(&t2, &s2);\
779 xor2(&t2, &s1);\
780 and2(&t2, &t0);\
781 xor2(&t1, &t2);\
782 xor2(&s2, &t2);\
783 and2(&s2, &s3);\
784 xor2(&s2, &s0);\
785 ;\
786 Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, s3, s2, s1, t1, s0, t0, t2, t3);\
787
788#endif
diff --git a/nacl/crypto_stream/aes128ctr/portable/consts.c b/nacl/crypto_stream/aes128ctr/portable/consts.c
new file mode 100644
index 00000000..ed2835db
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/consts.c
@@ -0,0 +1,14 @@
1#include "consts.h"
2
3const unsigned char ROTB[16] = {0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08};
4const unsigned char M0[16] = {0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00};
5const unsigned char EXPB0[16] = {0x03, 0x03, 0x03, 0x03, 0x07, 0x07, 0x07, 0x07, 0x0b, 0x0b, 0x0b, 0x0b, 0x0f, 0x0f, 0x0f, 0x0f};
6
7const unsigned char SWAP32[16] = {0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c};
8const unsigned char M0SWAP[16] = {0x0c, 0x08, 0x04, 0x00, 0x0d, 0x09, 0x05, 0x01, 0x0e, 0x0a, 0x06, 0x02, 0x0f, 0x0b, 0x07, 0x03};
9const unsigned char SR[16] = {0x01, 0x02, 0x03, 0x00, 0x06, 0x07, 0x04, 0x05, 0x0b, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x0f};
10const unsigned char SRM0[16] = {0x0f, 0x0a, 0x05, 0x00, 0x0e, 0x09, 0x04, 0x03, 0x0d, 0x08, 0x07, 0x02, 0x0c, 0x0b, 0x06, 0x01};
11
12const int128 BS0 = {0x5555555555555555ULL, 0x5555555555555555ULL};
13const int128 BS1 = {0x3333333333333333ULL, 0x3333333333333333ULL};
14const int128 BS2 = {0x0f0f0f0f0f0f0f0fULL, 0x0f0f0f0f0f0f0f0fULL};
diff --git a/nacl/crypto_stream/aes128ctr/portable/consts.h b/nacl/crypto_stream/aes128ctr/portable/consts.h
new file mode 100644
index 00000000..4c50360b
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/consts.h
@@ -0,0 +1,28 @@
1#ifndef CONSTS_H
2#define CONSTS_H
3
4#include "int128.h"
5
6#define ROTB crypto_stream_aes128ctr_portable_ROTB
7#define M0 crypto_stream_aes128ctr_portable_M0
8#define EXPB0 crypto_stream_aes128ctr_portable_EXPB0
9#define SWAP32 crypto_stream_aes128ctr_portable_SWAP32
10#define M0SWAP crypto_stream_aes128ctr_portable_M0SWAP
11#define SR crypto_stream_aes128ctr_portable_SR
12#define SRM0 crypto_stream_aes128ctr_portable_SRM0
13#define BS0 crypto_stream_aes128ctr_portable_BS0
14#define BS1 crypto_stream_aes128ctr_portable_BS1
15#define BS2 crypto_stream_aes128ctr_portable_BS2
16
17extern const unsigned char ROTB[16];
18extern const unsigned char M0[16];
19extern const unsigned char EXPB0[16];
20extern const unsigned char SWAP32[16];
21extern const unsigned char M0SWAP[16];
22extern const unsigned char SR[16];
23extern const unsigned char SRM0[16];
24extern const int128 BS0;
25extern const int128 BS1;
26extern const int128 BS2;
27
28#endif
diff --git a/nacl/crypto_stream/aes128ctr/portable/int128.c b/nacl/crypto_stream/aes128ctr/portable/int128.c
new file mode 100644
index 00000000..25894d42
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/int128.c
@@ -0,0 +1,128 @@
1#include "int128.h"
2#include "common.h"
3
4void xor2(int128 *r, const int128 *x)
5{
6 r->a ^= x->a;
7 r->b ^= x->b;
8}
9
10void and2(int128 *r, const int128 *x)
11{
12 r->a &= x->a;
13 r->b &= x->b;
14}
15
16void or2(int128 *r, const int128 *x)
17{
18 r->a |= x->a;
19 r->b |= x->b;
20}
21
22void copy2(int128 *r, const int128 *x)
23{
24 r->a = x->a;
25 r->b = x->b;
26}
27
28void shufb(int128 *r, const unsigned char *l)
29{
30 int128 t;
31 copy2(&t,r);
32 unsigned char *cr = (unsigned char *)r;
33 unsigned char *ct = (unsigned char *)&t;
34 cr[0] = ct[l[0]];
35 cr[1] = ct[l[1]];
36 cr[2] = ct[l[2]];
37 cr[3] = ct[l[3]];
38 cr[4] = ct[l[4]];
39 cr[5] = ct[l[5]];
40 cr[6] = ct[l[6]];
41 cr[7] = ct[l[7]];
42 cr[8] = ct[l[8]];
43 cr[9] = ct[l[9]];
44 cr[10] = ct[l[10]];
45 cr[11] = ct[l[11]];
46 cr[12] = ct[l[12]];
47 cr[13] = ct[l[13]];
48 cr[14] = ct[l[14]];
49 cr[15] = ct[l[15]];
50}
51
52void shufd(int128 *r, const int128 *x, const unsigned int c)
53{
54 int128 t;
55 uint32 *tp = (uint32 *)&t;
56 uint32 *xp = (uint32 *)x;
57 tp[0] = xp[c&3];
58 tp[1] = xp[(c>>2)&3];
59 tp[2] = xp[(c>>4)&3];
60 tp[3] = xp[(c>>6)&3];
61 copy2(r,&t);
62}
63
64void rshift32_littleendian(int128 *r, const unsigned int n)
65{
66 unsigned char *rp = (unsigned char *)r;
67 uint32 t;
68 t = load32_littleendian(rp);
69 t >>= n;
70 store32_littleendian(rp, t);
71 t = load32_littleendian(rp+4);
72 t >>= n;
73 store32_littleendian(rp+4, t);
74 t = load32_littleendian(rp+8);
75 t >>= n;
76 store32_littleendian(rp+8, t);
77 t = load32_littleendian(rp+12);
78 t >>= n;
79 store32_littleendian(rp+12, t);
80}
81
82void rshift64_littleendian(int128 *r, const unsigned int n)
83{
84 unsigned char *rp = (unsigned char *)r;
85 uint64 t;
86 t = load64_littleendian(rp);
87 t >>= n;
88 store64_littleendian(rp, t);
89 t = load64_littleendian(rp+8);
90 t >>= n;
91 store64_littleendian(rp+8, t);
92}
93
94void lshift64_littleendian(int128 *r, const unsigned int n)
95{
96 unsigned char *rp = (unsigned char *)r;
97 uint64 t;
98 t = load64_littleendian(rp);
99 t <<= n;
100 store64_littleendian(rp, t);
101 t = load64_littleendian(rp+8);
102 t <<= n;
103 store64_littleendian(rp+8, t);
104}
105
106void toggle(int128 *r)
107{
108 r->a ^= 0xffffffffffffffffULL;
109 r->b ^= 0xffffffffffffffffULL;
110}
111
112void xor_rcon(int128 *r)
113{
114 unsigned char *rp = (unsigned char *)r;
115 uint32 t;
116 t = load32_littleendian(rp+12);
117 t ^= 0xffffffff;
118 store32_littleendian(rp+12, t);
119}
120
121void add_uint32_big(int128 *r, uint32 x)
122{
123 unsigned char *rp = (unsigned char *)r;
124 uint32 t;
125 t = load32_littleendian(rp+12);
126 t += x;
127 store32_littleendian(rp+12, t);
128}
diff --git a/nacl/crypto_stream/aes128ctr/portable/int128.h b/nacl/crypto_stream/aes128ctr/portable/int128.h
new file mode 100644
index 00000000..7099e5b1
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/int128.h
@@ -0,0 +1,47 @@
1#ifndef INT128_H
2#define INT128_H
3
4#include "common.h"
5
6typedef struct{
7 unsigned long long a;
8 unsigned long long b;
9} int128;
10
11#define xor2 crypto_stream_aes128ctr_portable_xor2
12void xor2(int128 *r, const int128 *x);
13
14#define and2 crypto_stream_aes128ctr_portable_and2
15void and2(int128 *r, const int128 *x);
16
17#define or2 crypto_stream_aes128ctr_portable_or2
18void or2(int128 *r, const int128 *x);
19
20#define copy2 crypto_stream_aes128ctr_portable_copy2
21void copy2(int128 *r, const int128 *x);
22
23#define shufb crypto_stream_aes128ctr_portable_shufb
24void shufb(int128 *r, const unsigned char *l);
25
26#define shufd crypto_stream_aes128ctr_portable_shufd
27void shufd(int128 *r, const int128 *x, const unsigned int c);
28
29#define rshift32_littleendian crypto_stream_aes128ctr_portable_rshift32_littleendian
30void rshift32_littleendian(int128 *r, const unsigned int n);
31
32#define rshift64_littleendian crypto_stream_aes128ctr_portable_rshift64_littleendian
33void rshift64_littleendian(int128 *r, const unsigned int n);
34
35#define lshift64_littleendian crypto_stream_aes128ctr_portable_lshift64_littleendian
36void lshift64_littleendian(int128 *r, const unsigned int n);
37
38#define toggle crypto_stream_aes128ctr_portable_toggle
39void toggle(int128 *r);
40
41#define xor_rcon crypto_stream_aes128ctr_portable_xor_rcon
42void xor_rcon(int128 *r);
43
44#define add_uint32_big crypto_stream_aes128ctr_portable_add_uint32_big
45void add_uint32_big(int128 *r, uint32 x);
46
47#endif
diff --git a/nacl/crypto_stream/aes128ctr/portable/stream.c b/nacl/crypto_stream/aes128ctr/portable/stream.c
new file mode 100644
index 00000000..963fa8c1
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/stream.c
@@ -0,0 +1,28 @@
1#include "crypto_stream.h"
2
3int crypto_stream(
4 unsigned char *out,
5 unsigned long long outlen,
6 const unsigned char *n,
7 const unsigned char *k
8 )
9{
10 unsigned char d[crypto_stream_BEFORENMBYTES];
11 crypto_stream_beforenm(d, k);
12 crypto_stream_afternm(out, outlen, n, d);
13 return 0;
14}
15
16int crypto_stream_xor(
17 unsigned char *out,
18 const unsigned char *in,
19 unsigned long long inlen,
20 const unsigned char *n,
21 const unsigned char *k
22 )
23{
24 unsigned char d[crypto_stream_BEFORENMBYTES];
25 crypto_stream_beforenm(d, k);
26 crypto_stream_xor_afternm(out, in, inlen, n, d);
27 return 0;
28}
diff --git a/nacl/crypto_stream/aes128ctr/portable/types.h b/nacl/crypto_stream/aes128ctr/portable/types.h
new file mode 100644
index 00000000..6aa502fc
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/types.h
@@ -0,0 +1,10 @@
1#ifndef TYPES_H
2#define TYPES_H
3
4#include "crypto_uint32.h"
5typedef crypto_uint32 uint32;
6
7#include "crypto_uint64.h"
8typedef crypto_uint64 uint64;
9
10#endif
diff --git a/nacl/crypto_stream/aes128ctr/portable/xor_afternm.c b/nacl/crypto_stream/aes128ctr/portable/xor_afternm.c
new file mode 100644
index 00000000..f2ff8ff6
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/portable/xor_afternm.c
@@ -0,0 +1,180 @@
1/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper
2 * Date: 2009-03-19
3 * Public domain */
4
5#include <stdio.h>
6#include "int128.h"
7#include "common.h"
8#include "consts.h"
9#include "crypto_stream.h"
10
11int crypto_stream_xor_afternm(unsigned char *outp, const unsigned char *inp, unsigned long long len, const unsigned char *noncep, const unsigned char *c)
12{
13
14 int128 xmm0;
15 int128 xmm1;
16 int128 xmm2;
17 int128 xmm3;
18 int128 xmm4;
19 int128 xmm5;
20 int128 xmm6;
21 int128 xmm7;
22
23 int128 xmm8;
24 int128 xmm9;
25 int128 xmm10;
26 int128 xmm11;
27 int128 xmm12;
28 int128 xmm13;
29 int128 xmm14;
30 int128 xmm15;
31
32 int128 nonce_stack;
33 unsigned long long lensav;
34 unsigned char bl[128];
35 unsigned char *blp;
36 unsigned char b;
37
38 uint32 tmp;
39
40 /* Copy nonce on the stack */
41 copy2(&nonce_stack, (int128 *) (noncep + 0));
42 unsigned char *np = (unsigned char *)&nonce_stack;
43
44 enc_block:
45
46 xmm0 = *(int128 *) (np + 0);
47 copy2(&xmm1, &xmm0);
48 shufb(&xmm1, SWAP32);
49 copy2(&xmm2, &xmm1);
50 copy2(&xmm3, &xmm1);
51 copy2(&xmm4, &xmm1);
52 copy2(&xmm5, &xmm1);
53 copy2(&xmm6, &xmm1);
54 copy2(&xmm7, &xmm1);
55
56 add_uint32_big(&xmm1, 1);
57 add_uint32_big(&xmm2, 2);
58 add_uint32_big(&xmm3, 3);
59 add_uint32_big(&xmm4, 4);
60 add_uint32_big(&xmm5, 5);
61 add_uint32_big(&xmm6, 6);
62 add_uint32_big(&xmm7, 7);
63
64 shufb(&xmm0, M0);
65 shufb(&xmm1, M0SWAP);
66 shufb(&xmm2, M0SWAP);
67 shufb(&xmm3, M0SWAP);
68 shufb(&xmm4, M0SWAP);
69 shufb(&xmm5, M0SWAP);
70 shufb(&xmm6, M0SWAP);
71 shufb(&xmm7, M0SWAP);
72
73 bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8)
74
75 aesround( 1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
76 aesround( 2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
77 aesround( 3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
78 aesround( 4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
79 aesround( 5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
80 aesround( 6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
81 aesround( 7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
82 aesround( 8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
83 aesround( 9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c)
84 lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c)
85
86 bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0)
87
88 if(len < 128) goto partial;
89 if(len == 128) goto full;
90
91 tmp = load32_bigendian(np + 12);
92 tmp += 8;
93 store32_bigendian(np + 12, tmp);
94
95 xor2(&xmm8, (int128 *)(inp + 0));
96 xor2(&xmm9, (int128 *)(inp + 16));
97 xor2(&xmm12, (int128 *)(inp + 32));
98 xor2(&xmm14, (int128 *)(inp + 48));
99 xor2(&xmm11, (int128 *)(inp + 64));
100 xor2(&xmm15, (int128 *)(inp + 80));
101 xor2(&xmm10, (int128 *)(inp + 96));
102 xor2(&xmm13, (int128 *)(inp + 112));
103
104 *(int128 *) (outp + 0) = xmm8;
105 *(int128 *) (outp + 16) = xmm9;
106 *(int128 *) (outp + 32) = xmm12;
107 *(int128 *) (outp + 48) = xmm14;
108 *(int128 *) (outp + 64) = xmm11;
109 *(int128 *) (outp + 80) = xmm15;
110 *(int128 *) (outp + 96) = xmm10;
111 *(int128 *) (outp + 112) = xmm13;
112
113 len -= 128;
114 inp += 128;
115 outp += 128;
116
117 goto enc_block;
118
119 partial:
120
121 lensav = len;
122 len >>= 4;
123
124 tmp = load32_bigendian(np + 12);
125 tmp += len;
126 store32_bigendian(np + 12, tmp);
127
128 blp = bl;
129 *(int128 *)(blp + 0) = xmm8;
130 *(int128 *)(blp + 16) = xmm9;
131 *(int128 *)(blp + 32) = xmm12;
132 *(int128 *)(blp + 48) = xmm14;
133 *(int128 *)(blp + 64) = xmm11;
134 *(int128 *)(blp + 80) = xmm15;
135 *(int128 *)(blp + 96) = xmm10;
136 *(int128 *)(blp + 112) = xmm13;
137
138 bytes:
139
140 if(lensav == 0) goto end;
141
142 b = blp[0];
143 b ^= *(unsigned char *)(inp + 0);
144 *(unsigned char *)(outp + 0) = b;
145
146 blp += 1;
147 inp +=1;
148 outp +=1;
149 lensav -= 1;
150
151 goto bytes;
152
153 full:
154
155 tmp = load32_bigendian(np + 12);
156 tmp += 8;
157 store32_bigendian(np + 12, tmp);
158
159 xor2(&xmm8, (int128 *)(inp + 0));
160 xor2(&xmm9, (int128 *)(inp + 16));
161 xor2(&xmm12, (int128 *)(inp + 32));
162 xor2(&xmm14, (int128 *)(inp + 48));
163 xor2(&xmm11, (int128 *)(inp + 64));
164 xor2(&xmm15, (int128 *)(inp + 80));
165 xor2(&xmm10, (int128 *)(inp + 96));
166 xor2(&xmm13, (int128 *)(inp + 112));
167
168 *(int128 *) (outp + 0) = xmm8;
169 *(int128 *) (outp + 16) = xmm9;
170 *(int128 *) (outp + 32) = xmm12;
171 *(int128 *) (outp + 48) = xmm14;
172 *(int128 *) (outp + 64) = xmm11;
173 *(int128 *) (outp + 80) = xmm15;
174 *(int128 *) (outp + 96) = xmm10;
175 *(int128 *) (outp + 112) = xmm13;
176
177 end:
178 return 0;
179
180}
diff --git a/nacl/crypto_stream/aes128ctr/used b/nacl/crypto_stream/aes128ctr/used
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/nacl/crypto_stream/aes128ctr/used
diff --git a/nacl/crypto_stream/measure.c b/nacl/crypto_stream/measure.c
new file mode 100644
index 00000000..ff3ab610
--- /dev/null
+++ b/nacl/crypto_stream/measure.c
@@ -0,0 +1,73 @@
1#include <stdlib.h>
2#include "randombytes.h"
3#include "cpucycles.h"
4#include "crypto_stream.h"
5
6extern void printentry(long long,const char *,long long *,long long);
7extern unsigned char *alignedcalloc(unsigned long long);
8extern const char *primitiveimplementation;
9extern const char *implementationversion;
10extern const char *sizenames[];
11extern const long long sizes[];
12extern void allocate(void);
13extern void measure(void);
14
15const char *primitiveimplementation = crypto_stream_IMPLEMENTATION;
16const char *implementationversion = crypto_stream_VERSION;
17const char *sizenames[] = { "keybytes", "noncebytes", 0 };
18const long long sizes[] = { crypto_stream_KEYBYTES, crypto_stream_NONCEBYTES };
19
20#define MAXTEST_BYTES 4096
21#ifdef SUPERCOP
22#define MGAP 8192
23#else
24#define MGAP 8
25#endif
26
27static unsigned char *k;
28static unsigned char *n;
29static unsigned char *m;
30static unsigned char *c;
31
32void preallocate(void)
33{
34}
35
36void allocate(void)
37{
38 k = alignedcalloc(crypto_stream_KEYBYTES);
39 n = alignedcalloc(crypto_stream_NONCEBYTES);
40 m = alignedcalloc(MAXTEST_BYTES);
41 c = alignedcalloc(MAXTEST_BYTES);
42}
43
44#define TIMINGS 15
45static long long cycles[TIMINGS + 1];
46
47void measure(void)
48{
49 int i;
50 int loop;
51 int mlen;
52
53 for (loop = 0;loop < LOOPS;++loop) {
54 for (mlen = 0;mlen <= MAXTEST_BYTES;mlen += 1 + mlen / MGAP) {
55 randombytes(k,crypto_stream_KEYBYTES);
56 randombytes(n,crypto_stream_NONCEBYTES);
57 randombytes(m,mlen);
58 randombytes(c,mlen);
59 for (i = 0;i <= TIMINGS;++i) {
60 cycles[i] = cpucycles();
61 crypto_stream(c,mlen,n,k);
62 }
63 for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i];
64 printentry(mlen,"cycles",cycles,TIMINGS);
65 for (i = 0;i <= TIMINGS;++i) {
66 cycles[i] = cpucycles();
67 crypto_stream_xor(c,m,mlen,n,k);
68 }
69 for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i];
70 printentry(mlen,"xor_cycles",cycles,TIMINGS);
71 }
72 }
73}
diff --git a/nacl/crypto_stream/salsa20/amd64_xmm6/api.h b/nacl/crypto_stream/salsa20/amd64_xmm6/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/amd64_xmm6/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa20/amd64_xmm6/implementors b/nacl/crypto_stream/salsa20/amd64_xmm6/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/amd64_xmm6/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa20/amd64_xmm6/stream.s b/nacl/crypto_stream/salsa20/amd64_xmm6/stream.s
new file mode 100644
index 00000000..82a897f7
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/amd64_xmm6/stream.s
@@ -0,0 +1,4823 @@
1
2# qhasm: int64 r11_caller
3
4# qhasm: int64 r12_caller
5
6# qhasm: int64 r13_caller
7
8# qhasm: int64 r14_caller
9
10# qhasm: int64 r15_caller
11
12# qhasm: int64 rbx_caller
13
14# qhasm: int64 rbp_caller
15
16# qhasm: caller r11_caller
17
18# qhasm: caller r12_caller
19
20# qhasm: caller r13_caller
21
22# qhasm: caller r14_caller
23
24# qhasm: caller r15_caller
25
26# qhasm: caller rbx_caller
27
28# qhasm: caller rbp_caller
29
30# qhasm: stack64 r11_stack
31
32# qhasm: stack64 r12_stack
33
34# qhasm: stack64 r13_stack
35
36# qhasm: stack64 r14_stack
37
38# qhasm: stack64 r15_stack
39
40# qhasm: stack64 rbx_stack
41
42# qhasm: stack64 rbp_stack
43
44# qhasm: int64 a
45
46# qhasm: int64 arg1
47
48# qhasm: int64 arg2
49
50# qhasm: int64 arg3
51
52# qhasm: int64 arg4
53
54# qhasm: int64 arg5
55
56# qhasm: input arg1
57
58# qhasm: input arg2
59
60# qhasm: input arg3
61
62# qhasm: input arg4
63
64# qhasm: input arg5
65
66# qhasm: int64 k
67
68# qhasm: int64 kbits
69
70# qhasm: int64 iv
71
72# qhasm: int64 i
73
74# qhasm: stack128 x0
75
76# qhasm: stack128 x1
77
78# qhasm: stack128 x2
79
80# qhasm: stack128 x3
81
82# qhasm: int64 m
83
84# qhasm: int64 out
85
86# qhasm: int64 bytes
87
88# qhasm: stack32 eax_stack
89
90# qhasm: stack32 ebx_stack
91
92# qhasm: stack32 esi_stack
93
94# qhasm: stack32 edi_stack
95
96# qhasm: stack32 ebp_stack
97
98# qhasm: int6464 diag0
99
100# qhasm: int6464 diag1
101
102# qhasm: int6464 diag2
103
104# qhasm: int6464 diag3
105
106# qhasm: int6464 a0
107
108# qhasm: int6464 a1
109
110# qhasm: int6464 a2
111
112# qhasm: int6464 a3
113
114# qhasm: int6464 a4
115
116# qhasm: int6464 a5
117
118# qhasm: int6464 a6
119
120# qhasm: int6464 a7
121
122# qhasm: int6464 b0
123
124# qhasm: int6464 b1
125
126# qhasm: int6464 b2
127
128# qhasm: int6464 b3
129
130# qhasm: int6464 b4
131
132# qhasm: int6464 b5
133
134# qhasm: int6464 b6
135
136# qhasm: int6464 b7
137
138# qhasm: int6464 z0
139
140# qhasm: int6464 z1
141
142# qhasm: int6464 z2
143
144# qhasm: int6464 z3
145
146# qhasm: int6464 z4
147
148# qhasm: int6464 z5
149
150# qhasm: int6464 z6
151
152# qhasm: int6464 z7
153
154# qhasm: int6464 z8
155
156# qhasm: int6464 z9
157
158# qhasm: int6464 z10
159
160# qhasm: int6464 z11
161
162# qhasm: int6464 z12
163
164# qhasm: int6464 z13
165
166# qhasm: int6464 z14
167
168# qhasm: int6464 z15
169
170# qhasm: stack128 z0_stack
171
172# qhasm: stack128 z1_stack
173
174# qhasm: stack128 z2_stack
175
176# qhasm: stack128 z3_stack
177
178# qhasm: stack128 z4_stack
179
180# qhasm: stack128 z5_stack
181
182# qhasm: stack128 z6_stack
183
184# qhasm: stack128 z7_stack
185
186# qhasm: stack128 z8_stack
187
188# qhasm: stack128 z9_stack
189
190# qhasm: stack128 z10_stack
191
192# qhasm: stack128 z11_stack
193
194# qhasm: stack128 z12_stack
195
196# qhasm: stack128 z13_stack
197
198# qhasm: stack128 z14_stack
199
200# qhasm: stack128 z15_stack
201
202# qhasm: int6464 y0
203
204# qhasm: int6464 y1
205
206# qhasm: int6464 y2
207
208# qhasm: int6464 y3
209
210# qhasm: int6464 y4
211
212# qhasm: int6464 y5
213
214# qhasm: int6464 y6
215
216# qhasm: int6464 y7
217
218# qhasm: int6464 y8
219
220# qhasm: int6464 y9
221
222# qhasm: int6464 y10
223
224# qhasm: int6464 y11
225
226# qhasm: int6464 y12
227
228# qhasm: int6464 y13
229
230# qhasm: int6464 y14
231
232# qhasm: int6464 y15
233
234# qhasm: int6464 r0
235
236# qhasm: int6464 r1
237
238# qhasm: int6464 r2
239
240# qhasm: int6464 r3
241
242# qhasm: int6464 r4
243
244# qhasm: int6464 r5
245
246# qhasm: int6464 r6
247
248# qhasm: int6464 r7
249
250# qhasm: int6464 r8
251
252# qhasm: int6464 r9
253
254# qhasm: int6464 r10
255
256# qhasm: int6464 r11
257
258# qhasm: int6464 r12
259
260# qhasm: int6464 r13
261
262# qhasm: int6464 r14
263
264# qhasm: int6464 r15
265
266# qhasm: stack128 orig0
267
268# qhasm: stack128 orig1
269
270# qhasm: stack128 orig2
271
272# qhasm: stack128 orig3
273
274# qhasm: stack128 orig4
275
276# qhasm: stack128 orig5
277
278# qhasm: stack128 orig6
279
280# qhasm: stack128 orig7
281
282# qhasm: stack128 orig8
283
284# qhasm: stack128 orig9
285
286# qhasm: stack128 orig10
287
288# qhasm: stack128 orig11
289
290# qhasm: stack128 orig12
291
292# qhasm: stack128 orig13
293
294# qhasm: stack128 orig14
295
296# qhasm: stack128 orig15
297
298# qhasm: int64 in0
299
300# qhasm: int64 in1
301
302# qhasm: int64 in2
303
304# qhasm: int64 in3
305
306# qhasm: int64 in4
307
308# qhasm: int64 in5
309
310# qhasm: int64 in6
311
312# qhasm: int64 in7
313
314# qhasm: int64 in8
315
316# qhasm: int64 in9
317
318# qhasm: int64 in10
319
320# qhasm: int64 in11
321
322# qhasm: int64 in12
323
324# qhasm: int64 in13
325
326# qhasm: int64 in14
327
328# qhasm: int64 in15
329
330# qhasm: stack512 tmp
331
332# qhasm: int64 ctarget
333
334# qhasm: stack64 bytes_backup
335
336# qhasm: enter crypto_stream_salsa20_amd64_xmm6
337.text
338.p2align 5
339.globl _crypto_stream_salsa20_amd64_xmm6
340.globl crypto_stream_salsa20_amd64_xmm6
341_crypto_stream_salsa20_amd64_xmm6:
342crypto_stream_salsa20_amd64_xmm6:
343mov %rsp,%r11
344and $31,%r11
345add $480,%r11
346sub %r11,%rsp
347
348# qhasm: r11_stack = r11_caller
349# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
350# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
351movq %r11,352(%rsp)
352
353# qhasm: r12_stack = r12_caller
354# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
355# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
356movq %r12,360(%rsp)
357
358# qhasm: r13_stack = r13_caller
359# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
360# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
361movq %r13,368(%rsp)
362
363# qhasm: r14_stack = r14_caller
364# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
365# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
366movq %r14,376(%rsp)
367
368# qhasm: r15_stack = r15_caller
369# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
370# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
371movq %r15,384(%rsp)
372
373# qhasm: rbx_stack = rbx_caller
374# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
375# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
376movq %rbx,392(%rsp)
377
378# qhasm: rbp_stack = rbp_caller
379# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
380# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
381movq %rbp,400(%rsp)
382
383# qhasm: bytes = arg2
384# asm 1: mov <arg2=int64#2,>bytes=int64#6
385# asm 2: mov <arg2=%rsi,>bytes=%r9
386mov %rsi,%r9
387
388# qhasm: out = arg1
389# asm 1: mov <arg1=int64#1,>out=int64#1
390# asm 2: mov <arg1=%rdi,>out=%rdi
391mov %rdi,%rdi
392
393# qhasm: m = out
394# asm 1: mov <out=int64#1,>m=int64#2
395# asm 2: mov <out=%rdi,>m=%rsi
396mov %rdi,%rsi
397
398# qhasm: iv = arg3
399# asm 1: mov <arg3=int64#3,>iv=int64#3
400# asm 2: mov <arg3=%rdx,>iv=%rdx
401mov %rdx,%rdx
402
403# qhasm: k = arg4
404# asm 1: mov <arg4=int64#4,>k=int64#8
405# asm 2: mov <arg4=%rcx,>k=%r10
406mov %rcx,%r10
407
408# qhasm: unsigned>? bytes - 0
409# asm 1: cmp $0,<bytes=int64#6
410# asm 2: cmp $0,<bytes=%r9
411cmp $0,%r9
412# comment:fp stack unchanged by jump
413
414# qhasm: goto done if !unsigned>
415jbe ._done
416
417# qhasm: a = 0
418# asm 1: mov $0,>a=int64#7
419# asm 2: mov $0,>a=%rax
420mov $0,%rax
421
422# qhasm: i = bytes
423# asm 1: mov <bytes=int64#6,>i=int64#4
424# asm 2: mov <bytes=%r9,>i=%rcx
425mov %r9,%rcx
426
427# qhasm: while (i) { *out++ = a; --i }
428rep stosb
429
430# qhasm: out -= bytes
431# asm 1: sub <bytes=int64#6,<out=int64#1
432# asm 2: sub <bytes=%r9,<out=%rdi
433sub %r9,%rdi
434# comment:fp stack unchanged by jump
435
436# qhasm: goto start
437jmp ._start
438
439# qhasm: enter crypto_stream_salsa20_amd64_xmm6_xor
440.text
441.p2align 5
442.globl _crypto_stream_salsa20_amd64_xmm6_xor
443.globl crypto_stream_salsa20_amd64_xmm6_xor
444_crypto_stream_salsa20_amd64_xmm6_xor:
445crypto_stream_salsa20_amd64_xmm6_xor:
446mov %rsp,%r11
447and $31,%r11
448add $480,%r11
449sub %r11,%rsp
450
451# qhasm: r11_stack = r11_caller
452# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
453# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
454movq %r11,352(%rsp)
455
456# qhasm: r12_stack = r12_caller
457# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
458# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
459movq %r12,360(%rsp)
460
461# qhasm: r13_stack = r13_caller
462# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
463# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
464movq %r13,368(%rsp)
465
466# qhasm: r14_stack = r14_caller
467# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
468# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
469movq %r14,376(%rsp)
470
471# qhasm: r15_stack = r15_caller
472# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
473# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
474movq %r15,384(%rsp)
475
476# qhasm: rbx_stack = rbx_caller
477# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
478# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
479movq %rbx,392(%rsp)
480
481# qhasm: rbp_stack = rbp_caller
482# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
483# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
484movq %rbp,400(%rsp)
485
486# qhasm: out = arg1
487# asm 1: mov <arg1=int64#1,>out=int64#1
488# asm 2: mov <arg1=%rdi,>out=%rdi
489mov %rdi,%rdi
490
491# qhasm: m = arg2
492# asm 1: mov <arg2=int64#2,>m=int64#2
493# asm 2: mov <arg2=%rsi,>m=%rsi
494mov %rsi,%rsi
495
496# qhasm: bytes = arg3
497# asm 1: mov <arg3=int64#3,>bytes=int64#6
498# asm 2: mov <arg3=%rdx,>bytes=%r9
499mov %rdx,%r9
500
501# qhasm: iv = arg4
502# asm 1: mov <arg4=int64#4,>iv=int64#3
503# asm 2: mov <arg4=%rcx,>iv=%rdx
504mov %rcx,%rdx
505
506# qhasm: k = arg5
507# asm 1: mov <arg5=int64#5,>k=int64#8
508# asm 2: mov <arg5=%r8,>k=%r10
509mov %r8,%r10
510
511# qhasm: unsigned>? bytes - 0
512# asm 1: cmp $0,<bytes=int64#6
513# asm 2: cmp $0,<bytes=%r9
514cmp $0,%r9
515# comment:fp stack unchanged by jump
516
517# qhasm: goto done if !unsigned>
518jbe ._done
519# comment:fp stack unchanged by fallthrough
520
521# qhasm: start:
522._start:
523
524# qhasm: in12 = *(uint32 *) (k + 20)
525# asm 1: movl 20(<k=int64#8),>in12=int64#4d
526# asm 2: movl 20(<k=%r10),>in12=%ecx
527movl 20(%r10),%ecx
528
529# qhasm: in1 = *(uint32 *) (k + 0)
530# asm 1: movl 0(<k=int64#8),>in1=int64#5d
531# asm 2: movl 0(<k=%r10),>in1=%r8d
532movl 0(%r10),%r8d
533
534# qhasm: in6 = *(uint32 *) (iv + 0)
535# asm 1: movl 0(<iv=int64#3),>in6=int64#7d
536# asm 2: movl 0(<iv=%rdx),>in6=%eax
537movl 0(%rdx),%eax
538
539# qhasm: in11 = *(uint32 *) (k + 16)
540# asm 1: movl 16(<k=int64#8),>in11=int64#9d
541# asm 2: movl 16(<k=%r10),>in11=%r11d
542movl 16(%r10),%r11d
543
544# qhasm: ((uint32 *)&x1)[0] = in12
545# asm 1: movl <in12=int64#4d,>x1=stack128#1
546# asm 2: movl <in12=%ecx,>x1=0(%rsp)
547movl %ecx,0(%rsp)
548
549# qhasm: ((uint32 *)&x1)[1] = in1
550# asm 1: movl <in1=int64#5d,4+<x1=stack128#1
551# asm 2: movl <in1=%r8d,4+<x1=0(%rsp)
552movl %r8d,4+0(%rsp)
553
554# qhasm: ((uint32 *)&x1)[2] = in6
555# asm 1: movl <in6=int64#7d,8+<x1=stack128#1
556# asm 2: movl <in6=%eax,8+<x1=0(%rsp)
557movl %eax,8+0(%rsp)
558
559# qhasm: ((uint32 *)&x1)[3] = in11
560# asm 1: movl <in11=int64#9d,12+<x1=stack128#1
561# asm 2: movl <in11=%r11d,12+<x1=0(%rsp)
562movl %r11d,12+0(%rsp)
563
564# qhasm: in8 = 0
565# asm 1: mov $0,>in8=int64#4
566# asm 2: mov $0,>in8=%rcx
567mov $0,%rcx
568
569# qhasm: in13 = *(uint32 *) (k + 24)
570# asm 1: movl 24(<k=int64#8),>in13=int64#5d
571# asm 2: movl 24(<k=%r10),>in13=%r8d
572movl 24(%r10),%r8d
573
574# qhasm: in2 = *(uint32 *) (k + 4)
575# asm 1: movl 4(<k=int64#8),>in2=int64#7d
576# asm 2: movl 4(<k=%r10),>in2=%eax
577movl 4(%r10),%eax
578
579# qhasm: in7 = *(uint32 *) (iv + 4)
580# asm 1: movl 4(<iv=int64#3),>in7=int64#3d
581# asm 2: movl 4(<iv=%rdx),>in7=%edx
582movl 4(%rdx),%edx
583
584# qhasm: ((uint32 *)&x2)[0] = in8
585# asm 1: movl <in8=int64#4d,>x2=stack128#2
586# asm 2: movl <in8=%ecx,>x2=16(%rsp)
587movl %ecx,16(%rsp)
588
589# qhasm: ((uint32 *)&x2)[1] = in13
590# asm 1: movl <in13=int64#5d,4+<x2=stack128#2
591# asm 2: movl <in13=%r8d,4+<x2=16(%rsp)
592movl %r8d,4+16(%rsp)
593
594# qhasm: ((uint32 *)&x2)[2] = in2
595# asm 1: movl <in2=int64#7d,8+<x2=stack128#2
596# asm 2: movl <in2=%eax,8+<x2=16(%rsp)
597movl %eax,8+16(%rsp)
598
599# qhasm: ((uint32 *)&x2)[3] = in7
600# asm 1: movl <in7=int64#3d,12+<x2=stack128#2
601# asm 2: movl <in7=%edx,12+<x2=16(%rsp)
602movl %edx,12+16(%rsp)
603
604# qhasm: in4 = *(uint32 *) (k + 12)
605# asm 1: movl 12(<k=int64#8),>in4=int64#3d
606# asm 2: movl 12(<k=%r10),>in4=%edx
607movl 12(%r10),%edx
608
609# qhasm: in9 = 0
610# asm 1: mov $0,>in9=int64#4
611# asm 2: mov $0,>in9=%rcx
612mov $0,%rcx
613
614# qhasm: in14 = *(uint32 *) (k + 28)
615# asm 1: movl 28(<k=int64#8),>in14=int64#5d
616# asm 2: movl 28(<k=%r10),>in14=%r8d
617movl 28(%r10),%r8d
618
619# qhasm: in3 = *(uint32 *) (k + 8)
620# asm 1: movl 8(<k=int64#8),>in3=int64#7d
621# asm 2: movl 8(<k=%r10),>in3=%eax
622movl 8(%r10),%eax
623
624# qhasm: ((uint32 *)&x3)[0] = in4
625# asm 1: movl <in4=int64#3d,>x3=stack128#3
626# asm 2: movl <in4=%edx,>x3=32(%rsp)
627movl %edx,32(%rsp)
628
629# qhasm: ((uint32 *)&x3)[1] = in9
630# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
631# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
632movl %ecx,4+32(%rsp)
633
634# qhasm: ((uint32 *)&x3)[2] = in14
635# asm 1: movl <in14=int64#5d,8+<x3=stack128#3
636# asm 2: movl <in14=%r8d,8+<x3=32(%rsp)
637movl %r8d,8+32(%rsp)
638
639# qhasm: ((uint32 *)&x3)[3] = in3
640# asm 1: movl <in3=int64#7d,12+<x3=stack128#3
641# asm 2: movl <in3=%eax,12+<x3=32(%rsp)
642movl %eax,12+32(%rsp)
643
644# qhasm: in0 = 1634760805
645# asm 1: mov $1634760805,>in0=int64#3
646# asm 2: mov $1634760805,>in0=%rdx
647mov $1634760805,%rdx
648
649# qhasm: in5 = 857760878
650# asm 1: mov $857760878,>in5=int64#4
651# asm 2: mov $857760878,>in5=%rcx
652mov $857760878,%rcx
653
654# qhasm: in10 = 2036477234
655# asm 1: mov $2036477234,>in10=int64#5
656# asm 2: mov $2036477234,>in10=%r8
657mov $2036477234,%r8
658
659# qhasm: in15 = 1797285236
660# asm 1: mov $1797285236,>in15=int64#7
661# asm 2: mov $1797285236,>in15=%rax
662mov $1797285236,%rax
663
664# qhasm: ((uint32 *)&x0)[0] = in0
665# asm 1: movl <in0=int64#3d,>x0=stack128#4
666# asm 2: movl <in0=%edx,>x0=48(%rsp)
667movl %edx,48(%rsp)
668
669# qhasm: ((uint32 *)&x0)[1] = in5
670# asm 1: movl <in5=int64#4d,4+<x0=stack128#4
671# asm 2: movl <in5=%ecx,4+<x0=48(%rsp)
672movl %ecx,4+48(%rsp)
673
674# qhasm: ((uint32 *)&x0)[2] = in10
675# asm 1: movl <in10=int64#5d,8+<x0=stack128#4
676# asm 2: movl <in10=%r8d,8+<x0=48(%rsp)
677movl %r8d,8+48(%rsp)
678
679# qhasm: ((uint32 *)&x0)[3] = in15
680# asm 1: movl <in15=int64#7d,12+<x0=stack128#4
681# asm 2: movl <in15=%eax,12+<x0=48(%rsp)
682movl %eax,12+48(%rsp)
683
684# qhasm: unsigned<? bytes - 256
685# asm 1: cmp $256,<bytes=int64#6
686# asm 2: cmp $256,<bytes=%r9
687cmp $256,%r9
688# comment:fp stack unchanged by jump
689
690# qhasm: goto bytesbetween1and255 if unsigned<
691jb ._bytesbetween1and255
692
693# qhasm: z0 = x0
694# asm 1: movdqa <x0=stack128#4,>z0=int6464#1
695# asm 2: movdqa <x0=48(%rsp),>z0=%xmm0
696movdqa 48(%rsp),%xmm0
697
698# qhasm: z5 = z0[1,1,1,1]
699# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
700# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
701pshufd $0x55,%xmm0,%xmm1
702
703# qhasm: z10 = z0[2,2,2,2]
704# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
705# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
706pshufd $0xaa,%xmm0,%xmm2
707
708# qhasm: z15 = z0[3,3,3,3]
709# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
710# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
711pshufd $0xff,%xmm0,%xmm3
712
713# qhasm: z0 = z0[0,0,0,0]
714# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
715# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
716pshufd $0x00,%xmm0,%xmm0
717
718# qhasm: orig5 = z5
719# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
720# asm 2: movdqa <z5=%xmm1,>orig5=64(%rsp)
721movdqa %xmm1,64(%rsp)
722
723# qhasm: orig10 = z10
724# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
725# asm 2: movdqa <z10=%xmm2,>orig10=80(%rsp)
726movdqa %xmm2,80(%rsp)
727
728# qhasm: orig15 = z15
729# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
730# asm 2: movdqa <z15=%xmm3,>orig15=96(%rsp)
731movdqa %xmm3,96(%rsp)
732
733# qhasm: orig0 = z0
734# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
735# asm 2: movdqa <z0=%xmm0,>orig0=112(%rsp)
736movdqa %xmm0,112(%rsp)
737
738# qhasm: z1 = x1
739# asm 1: movdqa <x1=stack128#1,>z1=int6464#1
740# asm 2: movdqa <x1=0(%rsp),>z1=%xmm0
741movdqa 0(%rsp),%xmm0
742
743# qhasm: z6 = z1[2,2,2,2]
744# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
745# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
746pshufd $0xaa,%xmm0,%xmm1
747
748# qhasm: z11 = z1[3,3,3,3]
749# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
750# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
751pshufd $0xff,%xmm0,%xmm2
752
753# qhasm: z12 = z1[0,0,0,0]
754# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
755# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
756pshufd $0x00,%xmm0,%xmm3
757
758# qhasm: z1 = z1[1,1,1,1]
759# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
760# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
761pshufd $0x55,%xmm0,%xmm0
762
763# qhasm: orig6 = z6
764# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
765# asm 2: movdqa <z6=%xmm1,>orig6=128(%rsp)
766movdqa %xmm1,128(%rsp)
767
768# qhasm: orig11 = z11
769# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
770# asm 2: movdqa <z11=%xmm2,>orig11=144(%rsp)
771movdqa %xmm2,144(%rsp)
772
773# qhasm: orig12 = z12
774# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
775# asm 2: movdqa <z12=%xmm3,>orig12=160(%rsp)
776movdqa %xmm3,160(%rsp)
777
778# qhasm: orig1 = z1
779# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
780# asm 2: movdqa <z1=%xmm0,>orig1=176(%rsp)
781movdqa %xmm0,176(%rsp)
782
783# qhasm: z2 = x2
784# asm 1: movdqa <x2=stack128#2,>z2=int6464#1
785# asm 2: movdqa <x2=16(%rsp),>z2=%xmm0
786movdqa 16(%rsp),%xmm0
787
788# qhasm: z7 = z2[3,3,3,3]
789# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
790# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
791pshufd $0xff,%xmm0,%xmm1
792
793# qhasm: z13 = z2[1,1,1,1]
794# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
795# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
796pshufd $0x55,%xmm0,%xmm2
797
798# qhasm: z2 = z2[2,2,2,2]
799# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
800# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
801pshufd $0xaa,%xmm0,%xmm0
802
803# qhasm: orig7 = z7
804# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
805# asm 2: movdqa <z7=%xmm1,>orig7=192(%rsp)
806movdqa %xmm1,192(%rsp)
807
808# qhasm: orig13 = z13
809# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
810# asm 2: movdqa <z13=%xmm2,>orig13=208(%rsp)
811movdqa %xmm2,208(%rsp)
812
813# qhasm: orig2 = z2
814# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
815# asm 2: movdqa <z2=%xmm0,>orig2=224(%rsp)
816movdqa %xmm0,224(%rsp)
817
818# qhasm: z3 = x3
819# asm 1: movdqa <x3=stack128#3,>z3=int6464#1
820# asm 2: movdqa <x3=32(%rsp),>z3=%xmm0
821movdqa 32(%rsp),%xmm0
822
823# qhasm: z4 = z3[0,0,0,0]
824# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
825# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
826pshufd $0x00,%xmm0,%xmm1
827
828# qhasm: z14 = z3[2,2,2,2]
829# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
830# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
831pshufd $0xaa,%xmm0,%xmm2
832
833# qhasm: z3 = z3[3,3,3,3]
834# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
835# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
836pshufd $0xff,%xmm0,%xmm0
837
838# qhasm: orig4 = z4
839# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
840# asm 2: movdqa <z4=%xmm1,>orig4=240(%rsp)
841movdqa %xmm1,240(%rsp)
842
843# qhasm: orig14 = z14
844# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
845# asm 2: movdqa <z14=%xmm2,>orig14=256(%rsp)
846movdqa %xmm2,256(%rsp)
847
848# qhasm: orig3 = z3
849# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
850# asm 2: movdqa <z3=%xmm0,>orig3=272(%rsp)
851movdqa %xmm0,272(%rsp)
852
853# qhasm: bytesatleast256:
854._bytesatleast256:
855
856# qhasm: in8 = ((uint32 *)&x2)[0]
857# asm 1: movl <x2=stack128#2,>in8=int64#3d
858# asm 2: movl <x2=16(%rsp),>in8=%edx
859movl 16(%rsp),%edx
860
861# qhasm: in9 = ((uint32 *)&x3)[1]
862# asm 1: movl 4+<x3=stack128#3,>in9=int64#4d
863# asm 2: movl 4+<x3=32(%rsp),>in9=%ecx
864movl 4+32(%rsp),%ecx
865
866# qhasm: ((uint32 *) &orig8)[0] = in8
867# asm 1: movl <in8=int64#3d,>orig8=stack128#19
868# asm 2: movl <in8=%edx,>orig8=288(%rsp)
869movl %edx,288(%rsp)
870
871# qhasm: ((uint32 *) &orig9)[0] = in9
872# asm 1: movl <in9=int64#4d,>orig9=stack128#20
873# asm 2: movl <in9=%ecx,>orig9=304(%rsp)
874movl %ecx,304(%rsp)
875
876# qhasm: in8 += 1
877# asm 1: add $1,<in8=int64#3
878# asm 2: add $1,<in8=%rdx
879add $1,%rdx
880
881# qhasm: in9 <<= 32
882# asm 1: shl $32,<in9=int64#4
883# asm 2: shl $32,<in9=%rcx
884shl $32,%rcx
885
886# qhasm: in8 += in9
887# asm 1: add <in9=int64#4,<in8=int64#3
888# asm 2: add <in9=%rcx,<in8=%rdx
889add %rcx,%rdx
890
891# qhasm: in9 = in8
892# asm 1: mov <in8=int64#3,>in9=int64#4
893# asm 2: mov <in8=%rdx,>in9=%rcx
894mov %rdx,%rcx
895
896# qhasm: (uint64) in9 >>= 32
897# asm 1: shr $32,<in9=int64#4
898# asm 2: shr $32,<in9=%rcx
899shr $32,%rcx
900
901# qhasm: ((uint32 *) &orig8)[1] = in8
902# asm 1: movl <in8=int64#3d,4+<orig8=stack128#19
903# asm 2: movl <in8=%edx,4+<orig8=288(%rsp)
904movl %edx,4+288(%rsp)
905
906# qhasm: ((uint32 *) &orig9)[1] = in9
907# asm 1: movl <in9=int64#4d,4+<orig9=stack128#20
908# asm 2: movl <in9=%ecx,4+<orig9=304(%rsp)
909movl %ecx,4+304(%rsp)
910
911# qhasm: in8 += 1
912# asm 1: add $1,<in8=int64#3
913# asm 2: add $1,<in8=%rdx
914add $1,%rdx
915
916# qhasm: in9 <<= 32
917# asm 1: shl $32,<in9=int64#4
918# asm 2: shl $32,<in9=%rcx
919shl $32,%rcx
920
921# qhasm: in8 += in9
922# asm 1: add <in9=int64#4,<in8=int64#3
923# asm 2: add <in9=%rcx,<in8=%rdx
924add %rcx,%rdx
925
926# qhasm: in9 = in8
927# asm 1: mov <in8=int64#3,>in9=int64#4
928# asm 2: mov <in8=%rdx,>in9=%rcx
929mov %rdx,%rcx
930
931# qhasm: (uint64) in9 >>= 32
932# asm 1: shr $32,<in9=int64#4
933# asm 2: shr $32,<in9=%rcx
934shr $32,%rcx
935
936# qhasm: ((uint32 *) &orig8)[2] = in8
937# asm 1: movl <in8=int64#3d,8+<orig8=stack128#19
938# asm 2: movl <in8=%edx,8+<orig8=288(%rsp)
939movl %edx,8+288(%rsp)
940
941# qhasm: ((uint32 *) &orig9)[2] = in9
942# asm 1: movl <in9=int64#4d,8+<orig9=stack128#20
943# asm 2: movl <in9=%ecx,8+<orig9=304(%rsp)
944movl %ecx,8+304(%rsp)
945
946# qhasm: in8 += 1
947# asm 1: add $1,<in8=int64#3
948# asm 2: add $1,<in8=%rdx
949add $1,%rdx
950
951# qhasm: in9 <<= 32
952# asm 1: shl $32,<in9=int64#4
953# asm 2: shl $32,<in9=%rcx
954shl $32,%rcx
955
956# qhasm: in8 += in9
957# asm 1: add <in9=int64#4,<in8=int64#3
958# asm 2: add <in9=%rcx,<in8=%rdx
959add %rcx,%rdx
960
961# qhasm: in9 = in8
962# asm 1: mov <in8=int64#3,>in9=int64#4
963# asm 2: mov <in8=%rdx,>in9=%rcx
964mov %rdx,%rcx
965
966# qhasm: (uint64) in9 >>= 32
967# asm 1: shr $32,<in9=int64#4
968# asm 2: shr $32,<in9=%rcx
969shr $32,%rcx
970
971# qhasm: ((uint32 *) &orig8)[3] = in8
972# asm 1: movl <in8=int64#3d,12+<orig8=stack128#19
973# asm 2: movl <in8=%edx,12+<orig8=288(%rsp)
974movl %edx,12+288(%rsp)
975
976# qhasm: ((uint32 *) &orig9)[3] = in9
977# asm 1: movl <in9=int64#4d,12+<orig9=stack128#20
978# asm 2: movl <in9=%ecx,12+<orig9=304(%rsp)
979movl %ecx,12+304(%rsp)
980
981# qhasm: in8 += 1
982# asm 1: add $1,<in8=int64#3
983# asm 2: add $1,<in8=%rdx
984add $1,%rdx
985
986# qhasm: in9 <<= 32
987# asm 1: shl $32,<in9=int64#4
988# asm 2: shl $32,<in9=%rcx
989shl $32,%rcx
990
991# qhasm: in8 += in9
992# asm 1: add <in9=int64#4,<in8=int64#3
993# asm 2: add <in9=%rcx,<in8=%rdx
994add %rcx,%rdx
995
996# qhasm: in9 = in8
997# asm 1: mov <in8=int64#3,>in9=int64#4
998# asm 2: mov <in8=%rdx,>in9=%rcx
999mov %rdx,%rcx
1000
1001# qhasm: (uint64) in9 >>= 32
1002# asm 1: shr $32,<in9=int64#4
1003# asm 2: shr $32,<in9=%rcx
1004shr $32,%rcx
1005
1006# qhasm: ((uint32 *)&x2)[0] = in8
1007# asm 1: movl <in8=int64#3d,>x2=stack128#2
1008# asm 2: movl <in8=%edx,>x2=16(%rsp)
1009movl %edx,16(%rsp)
1010
1011# qhasm: ((uint32 *)&x3)[1] = in9
1012# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
1013# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
1014movl %ecx,4+32(%rsp)
1015
1016# qhasm: bytes_backup = bytes
1017# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
1018# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
1019movq %r9,408(%rsp)
1020
1021# qhasm: i = 20
1022# asm 1: mov $20,>i=int64#3
1023# asm 2: mov $20,>i=%rdx
1024mov $20,%rdx
1025
1026# qhasm: z5 = orig5
1027# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
1028# asm 2: movdqa <orig5=64(%rsp),>z5=%xmm0
1029movdqa 64(%rsp),%xmm0
1030
1031# qhasm: z10 = orig10
1032# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
1033# asm 2: movdqa <orig10=80(%rsp),>z10=%xmm1
1034movdqa 80(%rsp),%xmm1
1035
1036# qhasm: z15 = orig15
1037# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
1038# asm 2: movdqa <orig15=96(%rsp),>z15=%xmm2
1039movdqa 96(%rsp),%xmm2
1040
1041# qhasm: z14 = orig14
1042# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
1043# asm 2: movdqa <orig14=256(%rsp),>z14=%xmm3
1044movdqa 256(%rsp),%xmm3
1045
1046# qhasm: z3 = orig3
1047# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
1048# asm 2: movdqa <orig3=272(%rsp),>z3=%xmm4
1049movdqa 272(%rsp),%xmm4
1050
1051# qhasm: z6 = orig6
1052# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
1053# asm 2: movdqa <orig6=128(%rsp),>z6=%xmm5
1054movdqa 128(%rsp),%xmm5
1055
1056# qhasm: z11 = orig11
1057# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
1058# asm 2: movdqa <orig11=144(%rsp),>z11=%xmm6
1059movdqa 144(%rsp),%xmm6
1060
1061# qhasm: z1 = orig1
1062# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
1063# asm 2: movdqa <orig1=176(%rsp),>z1=%xmm7
1064movdqa 176(%rsp),%xmm7
1065
1066# qhasm: z7 = orig7
1067# asm 1: movdqa <orig7=stack128#13,>z7=int6464#9
1068# asm 2: movdqa <orig7=192(%rsp),>z7=%xmm8
1069movdqa 192(%rsp),%xmm8
1070
1071# qhasm: z13 = orig13
1072# asm 1: movdqa <orig13=stack128#14,>z13=int6464#10
1073# asm 2: movdqa <orig13=208(%rsp),>z13=%xmm9
1074movdqa 208(%rsp),%xmm9
1075
1076# qhasm: z2 = orig2
1077# asm 1: movdqa <orig2=stack128#15,>z2=int6464#11
1078# asm 2: movdqa <orig2=224(%rsp),>z2=%xmm10
1079movdqa 224(%rsp),%xmm10
1080
1081# qhasm: z9 = orig9
1082# asm 1: movdqa <orig9=stack128#20,>z9=int6464#12
1083# asm 2: movdqa <orig9=304(%rsp),>z9=%xmm11
1084movdqa 304(%rsp),%xmm11
1085
1086# qhasm: z0 = orig0
1087# asm 1: movdqa <orig0=stack128#8,>z0=int6464#13
1088# asm 2: movdqa <orig0=112(%rsp),>z0=%xmm12
1089movdqa 112(%rsp),%xmm12
1090
1091# qhasm: z12 = orig12
1092# asm 1: movdqa <orig12=stack128#11,>z12=int6464#14
1093# asm 2: movdqa <orig12=160(%rsp),>z12=%xmm13
1094movdqa 160(%rsp),%xmm13
1095
1096# qhasm: z4 = orig4
1097# asm 1: movdqa <orig4=stack128#16,>z4=int6464#15
1098# asm 2: movdqa <orig4=240(%rsp),>z4=%xmm14
1099movdqa 240(%rsp),%xmm14
1100
1101# qhasm: z8 = orig8
1102# asm 1: movdqa <orig8=stack128#19,>z8=int6464#16
1103# asm 2: movdqa <orig8=288(%rsp),>z8=%xmm15
1104movdqa 288(%rsp),%xmm15
1105
1106# qhasm: mainloop1:
1107._mainloop1:
1108
1109# qhasm: z10_stack = z10
1110# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1111# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1112movdqa %xmm1,320(%rsp)
1113
1114# qhasm: z15_stack = z15
1115# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1116# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1117movdqa %xmm2,336(%rsp)
1118
1119# qhasm: y4 = z12
1120# asm 1: movdqa <z12=int6464#14,>y4=int6464#2
1121# asm 2: movdqa <z12=%xmm13,>y4=%xmm1
1122movdqa %xmm13,%xmm1
1123
1124# qhasm: uint32323232 y4 += z0
1125# asm 1: paddd <z0=int6464#13,<y4=int6464#2
1126# asm 2: paddd <z0=%xmm12,<y4=%xmm1
1127paddd %xmm12,%xmm1
1128
1129# qhasm: r4 = y4
1130# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1131# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1132movdqa %xmm1,%xmm2
1133
1134# qhasm: uint32323232 y4 <<= 7
1135# asm 1: pslld $7,<y4=int6464#2
1136# asm 2: pslld $7,<y4=%xmm1
1137pslld $7,%xmm1
1138
1139# qhasm: z4 ^= y4
1140# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1141# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1142pxor %xmm1,%xmm14
1143
1144# qhasm: uint32323232 r4 >>= 25
1145# asm 1: psrld $25,<r4=int6464#3
1146# asm 2: psrld $25,<r4=%xmm2
1147psrld $25,%xmm2
1148
1149# qhasm: z4 ^= r4
1150# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1151# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1152pxor %xmm2,%xmm14
1153
1154# qhasm: y9 = z1
1155# asm 1: movdqa <z1=int6464#8,>y9=int6464#2
1156# asm 2: movdqa <z1=%xmm7,>y9=%xmm1
1157movdqa %xmm7,%xmm1
1158
1159# qhasm: uint32323232 y9 += z5
1160# asm 1: paddd <z5=int6464#1,<y9=int6464#2
1161# asm 2: paddd <z5=%xmm0,<y9=%xmm1
1162paddd %xmm0,%xmm1
1163
1164# qhasm: r9 = y9
1165# asm 1: movdqa <y9=int6464#2,>r9=int6464#3
1166# asm 2: movdqa <y9=%xmm1,>r9=%xmm2
1167movdqa %xmm1,%xmm2
1168
1169# qhasm: uint32323232 y9 <<= 7
1170# asm 1: pslld $7,<y9=int6464#2
1171# asm 2: pslld $7,<y9=%xmm1
1172pslld $7,%xmm1
1173
1174# qhasm: z9 ^= y9
1175# asm 1: pxor <y9=int6464#2,<z9=int6464#12
1176# asm 2: pxor <y9=%xmm1,<z9=%xmm11
1177pxor %xmm1,%xmm11
1178
1179# qhasm: uint32323232 r9 >>= 25
1180# asm 1: psrld $25,<r9=int6464#3
1181# asm 2: psrld $25,<r9=%xmm2
1182psrld $25,%xmm2
1183
1184# qhasm: z9 ^= r9
1185# asm 1: pxor <r9=int6464#3,<z9=int6464#12
1186# asm 2: pxor <r9=%xmm2,<z9=%xmm11
1187pxor %xmm2,%xmm11
1188
1189# qhasm: y8 = z0
1190# asm 1: movdqa <z0=int6464#13,>y8=int6464#2
1191# asm 2: movdqa <z0=%xmm12,>y8=%xmm1
1192movdqa %xmm12,%xmm1
1193
1194# qhasm: uint32323232 y8 += z4
1195# asm 1: paddd <z4=int6464#15,<y8=int6464#2
1196# asm 2: paddd <z4=%xmm14,<y8=%xmm1
1197paddd %xmm14,%xmm1
1198
1199# qhasm: r8 = y8
1200# asm 1: movdqa <y8=int6464#2,>r8=int6464#3
1201# asm 2: movdqa <y8=%xmm1,>r8=%xmm2
1202movdqa %xmm1,%xmm2
1203
1204# qhasm: uint32323232 y8 <<= 9
1205# asm 1: pslld $9,<y8=int6464#2
1206# asm 2: pslld $9,<y8=%xmm1
1207pslld $9,%xmm1
1208
1209# qhasm: z8 ^= y8
1210# asm 1: pxor <y8=int6464#2,<z8=int6464#16
1211# asm 2: pxor <y8=%xmm1,<z8=%xmm15
1212pxor %xmm1,%xmm15
1213
1214# qhasm: uint32323232 r8 >>= 23
1215# asm 1: psrld $23,<r8=int6464#3
1216# asm 2: psrld $23,<r8=%xmm2
1217psrld $23,%xmm2
1218
1219# qhasm: z8 ^= r8
1220# asm 1: pxor <r8=int6464#3,<z8=int6464#16
1221# asm 2: pxor <r8=%xmm2,<z8=%xmm15
1222pxor %xmm2,%xmm15
1223
1224# qhasm: y13 = z5
1225# asm 1: movdqa <z5=int6464#1,>y13=int6464#2
1226# asm 2: movdqa <z5=%xmm0,>y13=%xmm1
1227movdqa %xmm0,%xmm1
1228
1229# qhasm: uint32323232 y13 += z9
1230# asm 1: paddd <z9=int6464#12,<y13=int6464#2
1231# asm 2: paddd <z9=%xmm11,<y13=%xmm1
1232paddd %xmm11,%xmm1
1233
1234# qhasm: r13 = y13
1235# asm 1: movdqa <y13=int6464#2,>r13=int6464#3
1236# asm 2: movdqa <y13=%xmm1,>r13=%xmm2
1237movdqa %xmm1,%xmm2
1238
1239# qhasm: uint32323232 y13 <<= 9
1240# asm 1: pslld $9,<y13=int6464#2
1241# asm 2: pslld $9,<y13=%xmm1
1242pslld $9,%xmm1
1243
1244# qhasm: z13 ^= y13
1245# asm 1: pxor <y13=int6464#2,<z13=int6464#10
1246# asm 2: pxor <y13=%xmm1,<z13=%xmm9
1247pxor %xmm1,%xmm9
1248
1249# qhasm: uint32323232 r13 >>= 23
1250# asm 1: psrld $23,<r13=int6464#3
1251# asm 2: psrld $23,<r13=%xmm2
1252psrld $23,%xmm2
1253
1254# qhasm: z13 ^= r13
1255# asm 1: pxor <r13=int6464#3,<z13=int6464#10
1256# asm 2: pxor <r13=%xmm2,<z13=%xmm9
1257pxor %xmm2,%xmm9
1258
1259# qhasm: y12 = z4
1260# asm 1: movdqa <z4=int6464#15,>y12=int6464#2
1261# asm 2: movdqa <z4=%xmm14,>y12=%xmm1
1262movdqa %xmm14,%xmm1
1263
1264# qhasm: uint32323232 y12 += z8
1265# asm 1: paddd <z8=int6464#16,<y12=int6464#2
1266# asm 2: paddd <z8=%xmm15,<y12=%xmm1
1267paddd %xmm15,%xmm1
1268
1269# qhasm: r12 = y12
1270# asm 1: movdqa <y12=int6464#2,>r12=int6464#3
1271# asm 2: movdqa <y12=%xmm1,>r12=%xmm2
1272movdqa %xmm1,%xmm2
1273
1274# qhasm: uint32323232 y12 <<= 13
1275# asm 1: pslld $13,<y12=int6464#2
1276# asm 2: pslld $13,<y12=%xmm1
1277pslld $13,%xmm1
1278
1279# qhasm: z12 ^= y12
1280# asm 1: pxor <y12=int6464#2,<z12=int6464#14
1281# asm 2: pxor <y12=%xmm1,<z12=%xmm13
1282pxor %xmm1,%xmm13
1283
1284# qhasm: uint32323232 r12 >>= 19
1285# asm 1: psrld $19,<r12=int6464#3
1286# asm 2: psrld $19,<r12=%xmm2
1287psrld $19,%xmm2
1288
1289# qhasm: z12 ^= r12
1290# asm 1: pxor <r12=int6464#3,<z12=int6464#14
1291# asm 2: pxor <r12=%xmm2,<z12=%xmm13
1292pxor %xmm2,%xmm13
1293
1294# qhasm: y1 = z9
1295# asm 1: movdqa <z9=int6464#12,>y1=int6464#2
1296# asm 2: movdqa <z9=%xmm11,>y1=%xmm1
1297movdqa %xmm11,%xmm1
1298
1299# qhasm: uint32323232 y1 += z13
1300# asm 1: paddd <z13=int6464#10,<y1=int6464#2
1301# asm 2: paddd <z13=%xmm9,<y1=%xmm1
1302paddd %xmm9,%xmm1
1303
1304# qhasm: r1 = y1
1305# asm 1: movdqa <y1=int6464#2,>r1=int6464#3
1306# asm 2: movdqa <y1=%xmm1,>r1=%xmm2
1307movdqa %xmm1,%xmm2
1308
1309# qhasm: uint32323232 y1 <<= 13
1310# asm 1: pslld $13,<y1=int6464#2
1311# asm 2: pslld $13,<y1=%xmm1
1312pslld $13,%xmm1
1313
1314# qhasm: z1 ^= y1
1315# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1316# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1317pxor %xmm1,%xmm7
1318
1319# qhasm: uint32323232 r1 >>= 19
1320# asm 1: psrld $19,<r1=int6464#3
1321# asm 2: psrld $19,<r1=%xmm2
1322psrld $19,%xmm2
1323
1324# qhasm: z1 ^= r1
1325# asm 1: pxor <r1=int6464#3,<z1=int6464#8
1326# asm 2: pxor <r1=%xmm2,<z1=%xmm7
1327pxor %xmm2,%xmm7
1328
1329# qhasm: y0 = z8
1330# asm 1: movdqa <z8=int6464#16,>y0=int6464#2
1331# asm 2: movdqa <z8=%xmm15,>y0=%xmm1
1332movdqa %xmm15,%xmm1
1333
1334# qhasm: uint32323232 y0 += z12
1335# asm 1: paddd <z12=int6464#14,<y0=int6464#2
1336# asm 2: paddd <z12=%xmm13,<y0=%xmm1
1337paddd %xmm13,%xmm1
1338
1339# qhasm: r0 = y0
1340# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1341# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1342movdqa %xmm1,%xmm2
1343
1344# qhasm: uint32323232 y0 <<= 18
1345# asm 1: pslld $18,<y0=int6464#2
1346# asm 2: pslld $18,<y0=%xmm1
1347pslld $18,%xmm1
1348
1349# qhasm: z0 ^= y0
1350# asm 1: pxor <y0=int6464#2,<z0=int6464#13
1351# asm 2: pxor <y0=%xmm1,<z0=%xmm12
1352pxor %xmm1,%xmm12
1353
1354# qhasm: uint32323232 r0 >>= 14
1355# asm 1: psrld $14,<r0=int6464#3
1356# asm 2: psrld $14,<r0=%xmm2
1357psrld $14,%xmm2
1358
1359# qhasm: z0 ^= r0
1360# asm 1: pxor <r0=int6464#3,<z0=int6464#13
1361# asm 2: pxor <r0=%xmm2,<z0=%xmm12
1362pxor %xmm2,%xmm12
1363
1364# qhasm: z10 = z10_stack
1365# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1366# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1367movdqa 320(%rsp),%xmm1
1368
1369# qhasm: z0_stack = z0
1370# asm 1: movdqa <z0=int6464#13,>z0_stack=stack128#21
1371# asm 2: movdqa <z0=%xmm12,>z0_stack=320(%rsp)
1372movdqa %xmm12,320(%rsp)
1373
1374# qhasm: y5 = z13
1375# asm 1: movdqa <z13=int6464#10,>y5=int6464#3
1376# asm 2: movdqa <z13=%xmm9,>y5=%xmm2
1377movdqa %xmm9,%xmm2
1378
1379# qhasm: uint32323232 y5 += z1
1380# asm 1: paddd <z1=int6464#8,<y5=int6464#3
1381# asm 2: paddd <z1=%xmm7,<y5=%xmm2
1382paddd %xmm7,%xmm2
1383
1384# qhasm: r5 = y5
1385# asm 1: movdqa <y5=int6464#3,>r5=int6464#13
1386# asm 2: movdqa <y5=%xmm2,>r5=%xmm12
1387movdqa %xmm2,%xmm12
1388
1389# qhasm: uint32323232 y5 <<= 18
1390# asm 1: pslld $18,<y5=int6464#3
1391# asm 2: pslld $18,<y5=%xmm2
1392pslld $18,%xmm2
1393
1394# qhasm: z5 ^= y5
1395# asm 1: pxor <y5=int6464#3,<z5=int6464#1
1396# asm 2: pxor <y5=%xmm2,<z5=%xmm0
1397pxor %xmm2,%xmm0
1398
1399# qhasm: uint32323232 r5 >>= 14
1400# asm 1: psrld $14,<r5=int6464#13
1401# asm 2: psrld $14,<r5=%xmm12
1402psrld $14,%xmm12
1403
1404# qhasm: z5 ^= r5
1405# asm 1: pxor <r5=int6464#13,<z5=int6464#1
1406# asm 2: pxor <r5=%xmm12,<z5=%xmm0
1407pxor %xmm12,%xmm0
1408
1409# qhasm: y14 = z6
1410# asm 1: movdqa <z6=int6464#6,>y14=int6464#3
1411# asm 2: movdqa <z6=%xmm5,>y14=%xmm2
1412movdqa %xmm5,%xmm2
1413
1414# qhasm: uint32323232 y14 += z10
1415# asm 1: paddd <z10=int6464#2,<y14=int6464#3
1416# asm 2: paddd <z10=%xmm1,<y14=%xmm2
1417paddd %xmm1,%xmm2
1418
1419# qhasm: r14 = y14
1420# asm 1: movdqa <y14=int6464#3,>r14=int6464#13
1421# asm 2: movdqa <y14=%xmm2,>r14=%xmm12
1422movdqa %xmm2,%xmm12
1423
1424# qhasm: uint32323232 y14 <<= 7
1425# asm 1: pslld $7,<y14=int6464#3
1426# asm 2: pslld $7,<y14=%xmm2
1427pslld $7,%xmm2
1428
1429# qhasm: z14 ^= y14
1430# asm 1: pxor <y14=int6464#3,<z14=int6464#4
1431# asm 2: pxor <y14=%xmm2,<z14=%xmm3
1432pxor %xmm2,%xmm3
1433
1434# qhasm: uint32323232 r14 >>= 25
1435# asm 1: psrld $25,<r14=int6464#13
1436# asm 2: psrld $25,<r14=%xmm12
1437psrld $25,%xmm12
1438
1439# qhasm: z14 ^= r14
1440# asm 1: pxor <r14=int6464#13,<z14=int6464#4
1441# asm 2: pxor <r14=%xmm12,<z14=%xmm3
1442pxor %xmm12,%xmm3
1443
1444# qhasm: z15 = z15_stack
1445# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
1446# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
1447movdqa 336(%rsp),%xmm2
1448
1449# qhasm: z5_stack = z5
1450# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#22
1451# asm 2: movdqa <z5=%xmm0,>z5_stack=336(%rsp)
1452movdqa %xmm0,336(%rsp)
1453
1454# qhasm: y3 = z11
1455# asm 1: movdqa <z11=int6464#7,>y3=int6464#1
1456# asm 2: movdqa <z11=%xmm6,>y3=%xmm0
1457movdqa %xmm6,%xmm0
1458
1459# qhasm: uint32323232 y3 += z15
1460# asm 1: paddd <z15=int6464#3,<y3=int6464#1
1461# asm 2: paddd <z15=%xmm2,<y3=%xmm0
1462paddd %xmm2,%xmm0
1463
1464# qhasm: r3 = y3
1465# asm 1: movdqa <y3=int6464#1,>r3=int6464#13
1466# asm 2: movdqa <y3=%xmm0,>r3=%xmm12
1467movdqa %xmm0,%xmm12
1468
1469# qhasm: uint32323232 y3 <<= 7
1470# asm 1: pslld $7,<y3=int6464#1
1471# asm 2: pslld $7,<y3=%xmm0
1472pslld $7,%xmm0
1473
1474# qhasm: z3 ^= y3
1475# asm 1: pxor <y3=int6464#1,<z3=int6464#5
1476# asm 2: pxor <y3=%xmm0,<z3=%xmm4
1477pxor %xmm0,%xmm4
1478
1479# qhasm: uint32323232 r3 >>= 25
1480# asm 1: psrld $25,<r3=int6464#13
1481# asm 2: psrld $25,<r3=%xmm12
1482psrld $25,%xmm12
1483
1484# qhasm: z3 ^= r3
1485# asm 1: pxor <r3=int6464#13,<z3=int6464#5
1486# asm 2: pxor <r3=%xmm12,<z3=%xmm4
1487pxor %xmm12,%xmm4
1488
1489# qhasm: y2 = z10
1490# asm 1: movdqa <z10=int6464#2,>y2=int6464#1
1491# asm 2: movdqa <z10=%xmm1,>y2=%xmm0
1492movdqa %xmm1,%xmm0
1493
1494# qhasm: uint32323232 y2 += z14
1495# asm 1: paddd <z14=int6464#4,<y2=int6464#1
1496# asm 2: paddd <z14=%xmm3,<y2=%xmm0
1497paddd %xmm3,%xmm0
1498
1499# qhasm: r2 = y2
1500# asm 1: movdqa <y2=int6464#1,>r2=int6464#13
1501# asm 2: movdqa <y2=%xmm0,>r2=%xmm12
1502movdqa %xmm0,%xmm12
1503
1504# qhasm: uint32323232 y2 <<= 9
1505# asm 1: pslld $9,<y2=int6464#1
1506# asm 2: pslld $9,<y2=%xmm0
1507pslld $9,%xmm0
1508
1509# qhasm: z2 ^= y2
1510# asm 1: pxor <y2=int6464#1,<z2=int6464#11
1511# asm 2: pxor <y2=%xmm0,<z2=%xmm10
1512pxor %xmm0,%xmm10
1513
1514# qhasm: uint32323232 r2 >>= 23
1515# asm 1: psrld $23,<r2=int6464#13
1516# asm 2: psrld $23,<r2=%xmm12
1517psrld $23,%xmm12
1518
1519# qhasm: z2 ^= r2
1520# asm 1: pxor <r2=int6464#13,<z2=int6464#11
1521# asm 2: pxor <r2=%xmm12,<z2=%xmm10
1522pxor %xmm12,%xmm10
1523
1524# qhasm: y7 = z15
1525# asm 1: movdqa <z15=int6464#3,>y7=int6464#1
1526# asm 2: movdqa <z15=%xmm2,>y7=%xmm0
1527movdqa %xmm2,%xmm0
1528
1529# qhasm: uint32323232 y7 += z3
1530# asm 1: paddd <z3=int6464#5,<y7=int6464#1
1531# asm 2: paddd <z3=%xmm4,<y7=%xmm0
1532paddd %xmm4,%xmm0
1533
1534# qhasm: r7 = y7
1535# asm 1: movdqa <y7=int6464#1,>r7=int6464#13
1536# asm 2: movdqa <y7=%xmm0,>r7=%xmm12
1537movdqa %xmm0,%xmm12
1538
1539# qhasm: uint32323232 y7 <<= 9
1540# asm 1: pslld $9,<y7=int6464#1
1541# asm 2: pslld $9,<y7=%xmm0
1542pslld $9,%xmm0
1543
1544# qhasm: z7 ^= y7
1545# asm 1: pxor <y7=int6464#1,<z7=int6464#9
1546# asm 2: pxor <y7=%xmm0,<z7=%xmm8
1547pxor %xmm0,%xmm8
1548
1549# qhasm: uint32323232 r7 >>= 23
1550# asm 1: psrld $23,<r7=int6464#13
1551# asm 2: psrld $23,<r7=%xmm12
1552psrld $23,%xmm12
1553
1554# qhasm: z7 ^= r7
1555# asm 1: pxor <r7=int6464#13,<z7=int6464#9
1556# asm 2: pxor <r7=%xmm12,<z7=%xmm8
1557pxor %xmm12,%xmm8
1558
1559# qhasm: y6 = z14
1560# asm 1: movdqa <z14=int6464#4,>y6=int6464#1
1561# asm 2: movdqa <z14=%xmm3,>y6=%xmm0
1562movdqa %xmm3,%xmm0
1563
1564# qhasm: uint32323232 y6 += z2
1565# asm 1: paddd <z2=int6464#11,<y6=int6464#1
1566# asm 2: paddd <z2=%xmm10,<y6=%xmm0
1567paddd %xmm10,%xmm0
1568
1569# qhasm: r6 = y6
1570# asm 1: movdqa <y6=int6464#1,>r6=int6464#13
1571# asm 2: movdqa <y6=%xmm0,>r6=%xmm12
1572movdqa %xmm0,%xmm12
1573
1574# qhasm: uint32323232 y6 <<= 13
1575# asm 1: pslld $13,<y6=int6464#1
1576# asm 2: pslld $13,<y6=%xmm0
1577pslld $13,%xmm0
1578
1579# qhasm: z6 ^= y6
1580# asm 1: pxor <y6=int6464#1,<z6=int6464#6
1581# asm 2: pxor <y6=%xmm0,<z6=%xmm5
1582pxor %xmm0,%xmm5
1583
1584# qhasm: uint32323232 r6 >>= 19
1585# asm 1: psrld $19,<r6=int6464#13
1586# asm 2: psrld $19,<r6=%xmm12
1587psrld $19,%xmm12
1588
1589# qhasm: z6 ^= r6
1590# asm 1: pxor <r6=int6464#13,<z6=int6464#6
1591# asm 2: pxor <r6=%xmm12,<z6=%xmm5
1592pxor %xmm12,%xmm5
1593
1594# qhasm: y11 = z3
1595# asm 1: movdqa <z3=int6464#5,>y11=int6464#1
1596# asm 2: movdqa <z3=%xmm4,>y11=%xmm0
1597movdqa %xmm4,%xmm0
1598
1599# qhasm: uint32323232 y11 += z7
1600# asm 1: paddd <z7=int6464#9,<y11=int6464#1
1601# asm 2: paddd <z7=%xmm8,<y11=%xmm0
1602paddd %xmm8,%xmm0
1603
1604# qhasm: r11 = y11
1605# asm 1: movdqa <y11=int6464#1,>r11=int6464#13
1606# asm 2: movdqa <y11=%xmm0,>r11=%xmm12
1607movdqa %xmm0,%xmm12
1608
1609# qhasm: uint32323232 y11 <<= 13
1610# asm 1: pslld $13,<y11=int6464#1
1611# asm 2: pslld $13,<y11=%xmm0
1612pslld $13,%xmm0
1613
1614# qhasm: z11 ^= y11
1615# asm 1: pxor <y11=int6464#1,<z11=int6464#7
1616# asm 2: pxor <y11=%xmm0,<z11=%xmm6
1617pxor %xmm0,%xmm6
1618
1619# qhasm: uint32323232 r11 >>= 19
1620# asm 1: psrld $19,<r11=int6464#13
1621# asm 2: psrld $19,<r11=%xmm12
1622psrld $19,%xmm12
1623
1624# qhasm: z11 ^= r11
1625# asm 1: pxor <r11=int6464#13,<z11=int6464#7
1626# asm 2: pxor <r11=%xmm12,<z11=%xmm6
1627pxor %xmm12,%xmm6
1628
1629# qhasm: y10 = z2
1630# asm 1: movdqa <z2=int6464#11,>y10=int6464#1
1631# asm 2: movdqa <z2=%xmm10,>y10=%xmm0
1632movdqa %xmm10,%xmm0
1633
1634# qhasm: uint32323232 y10 += z6
1635# asm 1: paddd <z6=int6464#6,<y10=int6464#1
1636# asm 2: paddd <z6=%xmm5,<y10=%xmm0
1637paddd %xmm5,%xmm0
1638
1639# qhasm: r10 = y10
1640# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
1641# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
1642movdqa %xmm0,%xmm12
1643
1644# qhasm: uint32323232 y10 <<= 18
1645# asm 1: pslld $18,<y10=int6464#1
1646# asm 2: pslld $18,<y10=%xmm0
1647pslld $18,%xmm0
1648
1649# qhasm: z10 ^= y10
1650# asm 1: pxor <y10=int6464#1,<z10=int6464#2
1651# asm 2: pxor <y10=%xmm0,<z10=%xmm1
1652pxor %xmm0,%xmm1
1653
1654# qhasm: uint32323232 r10 >>= 14
1655# asm 1: psrld $14,<r10=int6464#13
1656# asm 2: psrld $14,<r10=%xmm12
1657psrld $14,%xmm12
1658
1659# qhasm: z10 ^= r10
1660# asm 1: pxor <r10=int6464#13,<z10=int6464#2
1661# asm 2: pxor <r10=%xmm12,<z10=%xmm1
1662pxor %xmm12,%xmm1
1663
1664# qhasm: z0 = z0_stack
1665# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#1
1666# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm0
1667movdqa 320(%rsp),%xmm0
1668
1669# qhasm: z10_stack = z10
1670# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1671# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1672movdqa %xmm1,320(%rsp)
1673
1674# qhasm: y1 = z3
1675# asm 1: movdqa <z3=int6464#5,>y1=int6464#2
1676# asm 2: movdqa <z3=%xmm4,>y1=%xmm1
1677movdqa %xmm4,%xmm1
1678
1679# qhasm: uint32323232 y1 += z0
1680# asm 1: paddd <z0=int6464#1,<y1=int6464#2
1681# asm 2: paddd <z0=%xmm0,<y1=%xmm1
1682paddd %xmm0,%xmm1
1683
1684# qhasm: r1 = y1
1685# asm 1: movdqa <y1=int6464#2,>r1=int6464#13
1686# asm 2: movdqa <y1=%xmm1,>r1=%xmm12
1687movdqa %xmm1,%xmm12
1688
1689# qhasm: uint32323232 y1 <<= 7
1690# asm 1: pslld $7,<y1=int6464#2
1691# asm 2: pslld $7,<y1=%xmm1
1692pslld $7,%xmm1
1693
1694# qhasm: z1 ^= y1
1695# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1696# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1697pxor %xmm1,%xmm7
1698
1699# qhasm: uint32323232 r1 >>= 25
1700# asm 1: psrld $25,<r1=int6464#13
1701# asm 2: psrld $25,<r1=%xmm12
1702psrld $25,%xmm12
1703
1704# qhasm: z1 ^= r1
1705# asm 1: pxor <r1=int6464#13,<z1=int6464#8
1706# asm 2: pxor <r1=%xmm12,<z1=%xmm7
1707pxor %xmm12,%xmm7
1708
1709# qhasm: y15 = z7
1710# asm 1: movdqa <z7=int6464#9,>y15=int6464#2
1711# asm 2: movdqa <z7=%xmm8,>y15=%xmm1
1712movdqa %xmm8,%xmm1
1713
1714# qhasm: uint32323232 y15 += z11
1715# asm 1: paddd <z11=int6464#7,<y15=int6464#2
1716# asm 2: paddd <z11=%xmm6,<y15=%xmm1
1717paddd %xmm6,%xmm1
1718
1719# qhasm: r15 = y15
1720# asm 1: movdqa <y15=int6464#2,>r15=int6464#13
1721# asm 2: movdqa <y15=%xmm1,>r15=%xmm12
1722movdqa %xmm1,%xmm12
1723
1724# qhasm: uint32323232 y15 <<= 18
1725# asm 1: pslld $18,<y15=int6464#2
1726# asm 2: pslld $18,<y15=%xmm1
1727pslld $18,%xmm1
1728
1729# qhasm: z15 ^= y15
1730# asm 1: pxor <y15=int6464#2,<z15=int6464#3
1731# asm 2: pxor <y15=%xmm1,<z15=%xmm2
1732pxor %xmm1,%xmm2
1733
1734# qhasm: uint32323232 r15 >>= 14
1735# asm 1: psrld $14,<r15=int6464#13
1736# asm 2: psrld $14,<r15=%xmm12
1737psrld $14,%xmm12
1738
1739# qhasm: z15 ^= r15
1740# asm 1: pxor <r15=int6464#13,<z15=int6464#3
1741# asm 2: pxor <r15=%xmm12,<z15=%xmm2
1742pxor %xmm12,%xmm2
1743
1744# qhasm: z5 = z5_stack
1745# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#13
1746# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm12
1747movdqa 336(%rsp),%xmm12
1748
1749# qhasm: z15_stack = z15
1750# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1751# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1752movdqa %xmm2,336(%rsp)
1753
1754# qhasm: y6 = z4
1755# asm 1: movdqa <z4=int6464#15,>y6=int6464#2
1756# asm 2: movdqa <z4=%xmm14,>y6=%xmm1
1757movdqa %xmm14,%xmm1
1758
1759# qhasm: uint32323232 y6 += z5
1760# asm 1: paddd <z5=int6464#13,<y6=int6464#2
1761# asm 2: paddd <z5=%xmm12,<y6=%xmm1
1762paddd %xmm12,%xmm1
1763
1764# qhasm: r6 = y6
1765# asm 1: movdqa <y6=int6464#2,>r6=int6464#3
1766# asm 2: movdqa <y6=%xmm1,>r6=%xmm2
1767movdqa %xmm1,%xmm2
1768
1769# qhasm: uint32323232 y6 <<= 7
1770# asm 1: pslld $7,<y6=int6464#2
1771# asm 2: pslld $7,<y6=%xmm1
1772pslld $7,%xmm1
1773
1774# qhasm: z6 ^= y6
1775# asm 1: pxor <y6=int6464#2,<z6=int6464#6
1776# asm 2: pxor <y6=%xmm1,<z6=%xmm5
1777pxor %xmm1,%xmm5
1778
1779# qhasm: uint32323232 r6 >>= 25
1780# asm 1: psrld $25,<r6=int6464#3
1781# asm 2: psrld $25,<r6=%xmm2
1782psrld $25,%xmm2
1783
1784# qhasm: z6 ^= r6
1785# asm 1: pxor <r6=int6464#3,<z6=int6464#6
1786# asm 2: pxor <r6=%xmm2,<z6=%xmm5
1787pxor %xmm2,%xmm5
1788
1789# qhasm: y2 = z0
1790# asm 1: movdqa <z0=int6464#1,>y2=int6464#2
1791# asm 2: movdqa <z0=%xmm0,>y2=%xmm1
1792movdqa %xmm0,%xmm1
1793
1794# qhasm: uint32323232 y2 += z1
1795# asm 1: paddd <z1=int6464#8,<y2=int6464#2
1796# asm 2: paddd <z1=%xmm7,<y2=%xmm1
1797paddd %xmm7,%xmm1
1798
1799# qhasm: r2 = y2
1800# asm 1: movdqa <y2=int6464#2,>r2=int6464#3
1801# asm 2: movdqa <y2=%xmm1,>r2=%xmm2
1802movdqa %xmm1,%xmm2
1803
1804# qhasm: uint32323232 y2 <<= 9
1805# asm 1: pslld $9,<y2=int6464#2
1806# asm 2: pslld $9,<y2=%xmm1
1807pslld $9,%xmm1
1808
1809# qhasm: z2 ^= y2
1810# asm 1: pxor <y2=int6464#2,<z2=int6464#11
1811# asm 2: pxor <y2=%xmm1,<z2=%xmm10
1812pxor %xmm1,%xmm10
1813
1814# qhasm: uint32323232 r2 >>= 23
1815# asm 1: psrld $23,<r2=int6464#3
1816# asm 2: psrld $23,<r2=%xmm2
1817psrld $23,%xmm2
1818
1819# qhasm: z2 ^= r2
1820# asm 1: pxor <r2=int6464#3,<z2=int6464#11
1821# asm 2: pxor <r2=%xmm2,<z2=%xmm10
1822pxor %xmm2,%xmm10
1823
1824# qhasm: y7 = z5
1825# asm 1: movdqa <z5=int6464#13,>y7=int6464#2
1826# asm 2: movdqa <z5=%xmm12,>y7=%xmm1
1827movdqa %xmm12,%xmm1
1828
1829# qhasm: uint32323232 y7 += z6
1830# asm 1: paddd <z6=int6464#6,<y7=int6464#2
1831# asm 2: paddd <z6=%xmm5,<y7=%xmm1
1832paddd %xmm5,%xmm1
1833
1834# qhasm: r7 = y7
1835# asm 1: movdqa <y7=int6464#2,>r7=int6464#3
1836# asm 2: movdqa <y7=%xmm1,>r7=%xmm2
1837movdqa %xmm1,%xmm2
1838
1839# qhasm: uint32323232 y7 <<= 9
1840# asm 1: pslld $9,<y7=int6464#2
1841# asm 2: pslld $9,<y7=%xmm1
1842pslld $9,%xmm1
1843
1844# qhasm: z7 ^= y7
1845# asm 1: pxor <y7=int6464#2,<z7=int6464#9
1846# asm 2: pxor <y7=%xmm1,<z7=%xmm8
1847pxor %xmm1,%xmm8
1848
1849# qhasm: uint32323232 r7 >>= 23
1850# asm 1: psrld $23,<r7=int6464#3
1851# asm 2: psrld $23,<r7=%xmm2
1852psrld $23,%xmm2
1853
1854# qhasm: z7 ^= r7
1855# asm 1: pxor <r7=int6464#3,<z7=int6464#9
1856# asm 2: pxor <r7=%xmm2,<z7=%xmm8
1857pxor %xmm2,%xmm8
1858
1859# qhasm: y3 = z1
1860# asm 1: movdqa <z1=int6464#8,>y3=int6464#2
1861# asm 2: movdqa <z1=%xmm7,>y3=%xmm1
1862movdqa %xmm7,%xmm1
1863
1864# qhasm: uint32323232 y3 += z2
1865# asm 1: paddd <z2=int6464#11,<y3=int6464#2
1866# asm 2: paddd <z2=%xmm10,<y3=%xmm1
1867paddd %xmm10,%xmm1
1868
1869# qhasm: r3 = y3
1870# asm 1: movdqa <y3=int6464#2,>r3=int6464#3
1871# asm 2: movdqa <y3=%xmm1,>r3=%xmm2
1872movdqa %xmm1,%xmm2
1873
1874# qhasm: uint32323232 y3 <<= 13
1875# asm 1: pslld $13,<y3=int6464#2
1876# asm 2: pslld $13,<y3=%xmm1
1877pslld $13,%xmm1
1878
1879# qhasm: z3 ^= y3
1880# asm 1: pxor <y3=int6464#2,<z3=int6464#5
1881# asm 2: pxor <y3=%xmm1,<z3=%xmm4
1882pxor %xmm1,%xmm4
1883
1884# qhasm: uint32323232 r3 >>= 19
1885# asm 1: psrld $19,<r3=int6464#3
1886# asm 2: psrld $19,<r3=%xmm2
1887psrld $19,%xmm2
1888
1889# qhasm: z3 ^= r3
1890# asm 1: pxor <r3=int6464#3,<z3=int6464#5
1891# asm 2: pxor <r3=%xmm2,<z3=%xmm4
1892pxor %xmm2,%xmm4
1893
1894# qhasm: y4 = z6
1895# asm 1: movdqa <z6=int6464#6,>y4=int6464#2
1896# asm 2: movdqa <z6=%xmm5,>y4=%xmm1
1897movdqa %xmm5,%xmm1
1898
1899# qhasm: uint32323232 y4 += z7
1900# asm 1: paddd <z7=int6464#9,<y4=int6464#2
1901# asm 2: paddd <z7=%xmm8,<y4=%xmm1
1902paddd %xmm8,%xmm1
1903
1904# qhasm: r4 = y4
1905# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1906# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1907movdqa %xmm1,%xmm2
1908
1909# qhasm: uint32323232 y4 <<= 13
1910# asm 1: pslld $13,<y4=int6464#2
1911# asm 2: pslld $13,<y4=%xmm1
1912pslld $13,%xmm1
1913
1914# qhasm: z4 ^= y4
1915# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1916# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1917pxor %xmm1,%xmm14
1918
1919# qhasm: uint32323232 r4 >>= 19
1920# asm 1: psrld $19,<r4=int6464#3
1921# asm 2: psrld $19,<r4=%xmm2
1922psrld $19,%xmm2
1923
1924# qhasm: z4 ^= r4
1925# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1926# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1927pxor %xmm2,%xmm14
1928
1929# qhasm: y0 = z2
1930# asm 1: movdqa <z2=int6464#11,>y0=int6464#2
1931# asm 2: movdqa <z2=%xmm10,>y0=%xmm1
1932movdqa %xmm10,%xmm1
1933
1934# qhasm: uint32323232 y0 += z3
1935# asm 1: paddd <z3=int6464#5,<y0=int6464#2
1936# asm 2: paddd <z3=%xmm4,<y0=%xmm1
1937paddd %xmm4,%xmm1
1938
1939# qhasm: r0 = y0
1940# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1941# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1942movdqa %xmm1,%xmm2
1943
1944# qhasm: uint32323232 y0 <<= 18
1945# asm 1: pslld $18,<y0=int6464#2
1946# asm 2: pslld $18,<y0=%xmm1
1947pslld $18,%xmm1
1948
1949# qhasm: z0 ^= y0
1950# asm 1: pxor <y0=int6464#2,<z0=int6464#1
1951# asm 2: pxor <y0=%xmm1,<z0=%xmm0
1952pxor %xmm1,%xmm0
1953
1954# qhasm: uint32323232 r0 >>= 14
1955# asm 1: psrld $14,<r0=int6464#3
1956# asm 2: psrld $14,<r0=%xmm2
1957psrld $14,%xmm2
1958
1959# qhasm: z0 ^= r0
1960# asm 1: pxor <r0=int6464#3,<z0=int6464#1
1961# asm 2: pxor <r0=%xmm2,<z0=%xmm0
1962pxor %xmm2,%xmm0
1963
1964# qhasm: z10 = z10_stack
1965# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1966# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1967movdqa 320(%rsp),%xmm1
1968
1969# qhasm: z0_stack = z0
1970# asm 1: movdqa <z0=int6464#1,>z0_stack=stack128#21
1971# asm 2: movdqa <z0=%xmm0,>z0_stack=320(%rsp)
1972movdqa %xmm0,320(%rsp)
1973
1974# qhasm: y5 = z7
1975# asm 1: movdqa <z7=int6464#9,>y5=int6464#1
1976# asm 2: movdqa <z7=%xmm8,>y5=%xmm0
1977movdqa %xmm8,%xmm0
1978
1979# qhasm: uint32323232 y5 += z4
1980# asm 1: paddd <z4=int6464#15,<y5=int6464#1
1981# asm 2: paddd <z4=%xmm14,<y5=%xmm0
1982paddd %xmm14,%xmm0
1983
1984# qhasm: r5 = y5
1985# asm 1: movdqa <y5=int6464#1,>r5=int6464#3
1986# asm 2: movdqa <y5=%xmm0,>r5=%xmm2
1987movdqa %xmm0,%xmm2
1988
1989# qhasm: uint32323232 y5 <<= 18
1990# asm 1: pslld $18,<y5=int6464#1
1991# asm 2: pslld $18,<y5=%xmm0
1992pslld $18,%xmm0
1993
1994# qhasm: z5 ^= y5
1995# asm 1: pxor <y5=int6464#1,<z5=int6464#13
1996# asm 2: pxor <y5=%xmm0,<z5=%xmm12
1997pxor %xmm0,%xmm12
1998
1999# qhasm: uint32323232 r5 >>= 14
2000# asm 1: psrld $14,<r5=int6464#3
2001# asm 2: psrld $14,<r5=%xmm2
2002psrld $14,%xmm2
2003
2004# qhasm: z5 ^= r5
2005# asm 1: pxor <r5=int6464#3,<z5=int6464#13
2006# asm 2: pxor <r5=%xmm2,<z5=%xmm12
2007pxor %xmm2,%xmm12
2008
2009# qhasm: y11 = z9
2010# asm 1: movdqa <z9=int6464#12,>y11=int6464#1
2011# asm 2: movdqa <z9=%xmm11,>y11=%xmm0
2012movdqa %xmm11,%xmm0
2013
2014# qhasm: uint32323232 y11 += z10
2015# asm 1: paddd <z10=int6464#2,<y11=int6464#1
2016# asm 2: paddd <z10=%xmm1,<y11=%xmm0
2017paddd %xmm1,%xmm0
2018
2019# qhasm: r11 = y11
2020# asm 1: movdqa <y11=int6464#1,>r11=int6464#3
2021# asm 2: movdqa <y11=%xmm0,>r11=%xmm2
2022movdqa %xmm0,%xmm2
2023
2024# qhasm: uint32323232 y11 <<= 7
2025# asm 1: pslld $7,<y11=int6464#1
2026# asm 2: pslld $7,<y11=%xmm0
2027pslld $7,%xmm0
2028
2029# qhasm: z11 ^= y11
2030# asm 1: pxor <y11=int6464#1,<z11=int6464#7
2031# asm 2: pxor <y11=%xmm0,<z11=%xmm6
2032pxor %xmm0,%xmm6
2033
2034# qhasm: uint32323232 r11 >>= 25
2035# asm 1: psrld $25,<r11=int6464#3
2036# asm 2: psrld $25,<r11=%xmm2
2037psrld $25,%xmm2
2038
2039# qhasm: z11 ^= r11
2040# asm 1: pxor <r11=int6464#3,<z11=int6464#7
2041# asm 2: pxor <r11=%xmm2,<z11=%xmm6
2042pxor %xmm2,%xmm6
2043
2044# qhasm: z15 = z15_stack
2045# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
2046# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
2047movdqa 336(%rsp),%xmm2
2048
2049# qhasm: z5_stack = z5
2050# asm 1: movdqa <z5=int6464#13,>z5_stack=stack128#22
2051# asm 2: movdqa <z5=%xmm12,>z5_stack=336(%rsp)
2052movdqa %xmm12,336(%rsp)
2053
2054# qhasm: y12 = z14
2055# asm 1: movdqa <z14=int6464#4,>y12=int6464#1
2056# asm 2: movdqa <z14=%xmm3,>y12=%xmm0
2057movdqa %xmm3,%xmm0
2058
2059# qhasm: uint32323232 y12 += z15
2060# asm 1: paddd <z15=int6464#3,<y12=int6464#1
2061# asm 2: paddd <z15=%xmm2,<y12=%xmm0
2062paddd %xmm2,%xmm0
2063
2064# qhasm: r12 = y12
2065# asm 1: movdqa <y12=int6464#1,>r12=int6464#13
2066# asm 2: movdqa <y12=%xmm0,>r12=%xmm12
2067movdqa %xmm0,%xmm12
2068
2069# qhasm: uint32323232 y12 <<= 7
2070# asm 1: pslld $7,<y12=int6464#1
2071# asm 2: pslld $7,<y12=%xmm0
2072pslld $7,%xmm0
2073
2074# qhasm: z12 ^= y12
2075# asm 1: pxor <y12=int6464#1,<z12=int6464#14
2076# asm 2: pxor <y12=%xmm0,<z12=%xmm13
2077pxor %xmm0,%xmm13
2078
2079# qhasm: uint32323232 r12 >>= 25
2080# asm 1: psrld $25,<r12=int6464#13
2081# asm 2: psrld $25,<r12=%xmm12
2082psrld $25,%xmm12
2083
2084# qhasm: z12 ^= r12
2085# asm 1: pxor <r12=int6464#13,<z12=int6464#14
2086# asm 2: pxor <r12=%xmm12,<z12=%xmm13
2087pxor %xmm12,%xmm13
2088
2089# qhasm: y8 = z10
2090# asm 1: movdqa <z10=int6464#2,>y8=int6464#1
2091# asm 2: movdqa <z10=%xmm1,>y8=%xmm0
2092movdqa %xmm1,%xmm0
2093
2094# qhasm: uint32323232 y8 += z11
2095# asm 1: paddd <z11=int6464#7,<y8=int6464#1
2096# asm 2: paddd <z11=%xmm6,<y8=%xmm0
2097paddd %xmm6,%xmm0
2098
2099# qhasm: r8 = y8
2100# asm 1: movdqa <y8=int6464#1,>r8=int6464#13
2101# asm 2: movdqa <y8=%xmm0,>r8=%xmm12
2102movdqa %xmm0,%xmm12
2103
2104# qhasm: uint32323232 y8 <<= 9
2105# asm 1: pslld $9,<y8=int6464#1
2106# asm 2: pslld $9,<y8=%xmm0
2107pslld $9,%xmm0
2108
2109# qhasm: z8 ^= y8
2110# asm 1: pxor <y8=int6464#1,<z8=int6464#16
2111# asm 2: pxor <y8=%xmm0,<z8=%xmm15
2112pxor %xmm0,%xmm15
2113
2114# qhasm: uint32323232 r8 >>= 23
2115# asm 1: psrld $23,<r8=int6464#13
2116# asm 2: psrld $23,<r8=%xmm12
2117psrld $23,%xmm12
2118
2119# qhasm: z8 ^= r8
2120# asm 1: pxor <r8=int6464#13,<z8=int6464#16
2121# asm 2: pxor <r8=%xmm12,<z8=%xmm15
2122pxor %xmm12,%xmm15
2123
2124# qhasm: y13 = z15
2125# asm 1: movdqa <z15=int6464#3,>y13=int6464#1
2126# asm 2: movdqa <z15=%xmm2,>y13=%xmm0
2127movdqa %xmm2,%xmm0
2128
2129# qhasm: uint32323232 y13 += z12
2130# asm 1: paddd <z12=int6464#14,<y13=int6464#1
2131# asm 2: paddd <z12=%xmm13,<y13=%xmm0
2132paddd %xmm13,%xmm0
2133
2134# qhasm: r13 = y13
2135# asm 1: movdqa <y13=int6464#1,>r13=int6464#13
2136# asm 2: movdqa <y13=%xmm0,>r13=%xmm12
2137movdqa %xmm0,%xmm12
2138
2139# qhasm: uint32323232 y13 <<= 9
2140# asm 1: pslld $9,<y13=int6464#1
2141# asm 2: pslld $9,<y13=%xmm0
2142pslld $9,%xmm0
2143
2144# qhasm: z13 ^= y13
2145# asm 1: pxor <y13=int6464#1,<z13=int6464#10
2146# asm 2: pxor <y13=%xmm0,<z13=%xmm9
2147pxor %xmm0,%xmm9
2148
2149# qhasm: uint32323232 r13 >>= 23
2150# asm 1: psrld $23,<r13=int6464#13
2151# asm 2: psrld $23,<r13=%xmm12
2152psrld $23,%xmm12
2153
2154# qhasm: z13 ^= r13
2155# asm 1: pxor <r13=int6464#13,<z13=int6464#10
2156# asm 2: pxor <r13=%xmm12,<z13=%xmm9
2157pxor %xmm12,%xmm9
2158
2159# qhasm: y9 = z11
2160# asm 1: movdqa <z11=int6464#7,>y9=int6464#1
2161# asm 2: movdqa <z11=%xmm6,>y9=%xmm0
2162movdqa %xmm6,%xmm0
2163
2164# qhasm: uint32323232 y9 += z8
2165# asm 1: paddd <z8=int6464#16,<y9=int6464#1
2166# asm 2: paddd <z8=%xmm15,<y9=%xmm0
2167paddd %xmm15,%xmm0
2168
2169# qhasm: r9 = y9
2170# asm 1: movdqa <y9=int6464#1,>r9=int6464#13
2171# asm 2: movdqa <y9=%xmm0,>r9=%xmm12
2172movdqa %xmm0,%xmm12
2173
2174# qhasm: uint32323232 y9 <<= 13
2175# asm 1: pslld $13,<y9=int6464#1
2176# asm 2: pslld $13,<y9=%xmm0
2177pslld $13,%xmm0
2178
2179# qhasm: z9 ^= y9
2180# asm 1: pxor <y9=int6464#1,<z9=int6464#12
2181# asm 2: pxor <y9=%xmm0,<z9=%xmm11
2182pxor %xmm0,%xmm11
2183
2184# qhasm: uint32323232 r9 >>= 19
2185# asm 1: psrld $19,<r9=int6464#13
2186# asm 2: psrld $19,<r9=%xmm12
2187psrld $19,%xmm12
2188
2189# qhasm: z9 ^= r9
2190# asm 1: pxor <r9=int6464#13,<z9=int6464#12
2191# asm 2: pxor <r9=%xmm12,<z9=%xmm11
2192pxor %xmm12,%xmm11
2193
2194# qhasm: y14 = z12
2195# asm 1: movdqa <z12=int6464#14,>y14=int6464#1
2196# asm 2: movdqa <z12=%xmm13,>y14=%xmm0
2197movdqa %xmm13,%xmm0
2198
2199# qhasm: uint32323232 y14 += z13
2200# asm 1: paddd <z13=int6464#10,<y14=int6464#1
2201# asm 2: paddd <z13=%xmm9,<y14=%xmm0
2202paddd %xmm9,%xmm0
2203
2204# qhasm: r14 = y14
2205# asm 1: movdqa <y14=int6464#1,>r14=int6464#13
2206# asm 2: movdqa <y14=%xmm0,>r14=%xmm12
2207movdqa %xmm0,%xmm12
2208
2209# qhasm: uint32323232 y14 <<= 13
2210# asm 1: pslld $13,<y14=int6464#1
2211# asm 2: pslld $13,<y14=%xmm0
2212pslld $13,%xmm0
2213
2214# qhasm: z14 ^= y14
2215# asm 1: pxor <y14=int6464#1,<z14=int6464#4
2216# asm 2: pxor <y14=%xmm0,<z14=%xmm3
2217pxor %xmm0,%xmm3
2218
2219# qhasm: uint32323232 r14 >>= 19
2220# asm 1: psrld $19,<r14=int6464#13
2221# asm 2: psrld $19,<r14=%xmm12
2222psrld $19,%xmm12
2223
2224# qhasm: z14 ^= r14
2225# asm 1: pxor <r14=int6464#13,<z14=int6464#4
2226# asm 2: pxor <r14=%xmm12,<z14=%xmm3
2227pxor %xmm12,%xmm3
2228
2229# qhasm: y10 = z8
2230# asm 1: movdqa <z8=int6464#16,>y10=int6464#1
2231# asm 2: movdqa <z8=%xmm15,>y10=%xmm0
2232movdqa %xmm15,%xmm0
2233
2234# qhasm: uint32323232 y10 += z9
2235# asm 1: paddd <z9=int6464#12,<y10=int6464#1
2236# asm 2: paddd <z9=%xmm11,<y10=%xmm0
2237paddd %xmm11,%xmm0
2238
2239# qhasm: r10 = y10
2240# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
2241# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
2242movdqa %xmm0,%xmm12
2243
2244# qhasm: uint32323232 y10 <<= 18
2245# asm 1: pslld $18,<y10=int6464#1
2246# asm 2: pslld $18,<y10=%xmm0
2247pslld $18,%xmm0
2248
2249# qhasm: z10 ^= y10
2250# asm 1: pxor <y10=int6464#1,<z10=int6464#2
2251# asm 2: pxor <y10=%xmm0,<z10=%xmm1
2252pxor %xmm0,%xmm1
2253
2254# qhasm: uint32323232 r10 >>= 14
2255# asm 1: psrld $14,<r10=int6464#13
2256# asm 2: psrld $14,<r10=%xmm12
2257psrld $14,%xmm12
2258
2259# qhasm: z10 ^= r10
2260# asm 1: pxor <r10=int6464#13,<z10=int6464#2
2261# asm 2: pxor <r10=%xmm12,<z10=%xmm1
2262pxor %xmm12,%xmm1
2263
2264# qhasm: y15 = z13
2265# asm 1: movdqa <z13=int6464#10,>y15=int6464#1
2266# asm 2: movdqa <z13=%xmm9,>y15=%xmm0
2267movdqa %xmm9,%xmm0
2268
2269# qhasm: uint32323232 y15 += z14
2270# asm 1: paddd <z14=int6464#4,<y15=int6464#1
2271# asm 2: paddd <z14=%xmm3,<y15=%xmm0
2272paddd %xmm3,%xmm0
2273
2274# qhasm: r15 = y15
2275# asm 1: movdqa <y15=int6464#1,>r15=int6464#13
2276# asm 2: movdqa <y15=%xmm0,>r15=%xmm12
2277movdqa %xmm0,%xmm12
2278
2279# qhasm: uint32323232 y15 <<= 18
2280# asm 1: pslld $18,<y15=int6464#1
2281# asm 2: pslld $18,<y15=%xmm0
2282pslld $18,%xmm0
2283
2284# qhasm: z15 ^= y15
2285# asm 1: pxor <y15=int6464#1,<z15=int6464#3
2286# asm 2: pxor <y15=%xmm0,<z15=%xmm2
2287pxor %xmm0,%xmm2
2288
2289# qhasm: uint32323232 r15 >>= 14
2290# asm 1: psrld $14,<r15=int6464#13
2291# asm 2: psrld $14,<r15=%xmm12
2292psrld $14,%xmm12
2293
2294# qhasm: z15 ^= r15
2295# asm 1: pxor <r15=int6464#13,<z15=int6464#3
2296# asm 2: pxor <r15=%xmm12,<z15=%xmm2
2297pxor %xmm12,%xmm2
2298
2299# qhasm: z0 = z0_stack
2300# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#13
2301# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm12
2302movdqa 320(%rsp),%xmm12
2303
2304# qhasm: z5 = z5_stack
2305# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#1
2306# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm0
2307movdqa 336(%rsp),%xmm0
2308
2309# qhasm: unsigned>? i -= 2
2310# asm 1: sub $2,<i=int64#3
2311# asm 2: sub $2,<i=%rdx
2312sub $2,%rdx
2313# comment:fp stack unchanged by jump
2314
2315# qhasm: goto mainloop1 if unsigned>
2316ja ._mainloop1
2317
2318# qhasm: uint32323232 z0 += orig0
2319# asm 1: paddd <orig0=stack128#8,<z0=int6464#13
2320# asm 2: paddd <orig0=112(%rsp),<z0=%xmm12
2321paddd 112(%rsp),%xmm12
2322
2323# qhasm: uint32323232 z1 += orig1
2324# asm 1: paddd <orig1=stack128#12,<z1=int6464#8
2325# asm 2: paddd <orig1=176(%rsp),<z1=%xmm7
2326paddd 176(%rsp),%xmm7
2327
2328# qhasm: uint32323232 z2 += orig2
2329# asm 1: paddd <orig2=stack128#15,<z2=int6464#11
2330# asm 2: paddd <orig2=224(%rsp),<z2=%xmm10
2331paddd 224(%rsp),%xmm10
2332
2333# qhasm: uint32323232 z3 += orig3
2334# asm 1: paddd <orig3=stack128#18,<z3=int6464#5
2335# asm 2: paddd <orig3=272(%rsp),<z3=%xmm4
2336paddd 272(%rsp),%xmm4
2337
2338# qhasm: in0 = z0
2339# asm 1: movd <z0=int6464#13,>in0=int64#3
2340# asm 2: movd <z0=%xmm12,>in0=%rdx
2341movd %xmm12,%rdx
2342
2343# qhasm: in1 = z1
2344# asm 1: movd <z1=int6464#8,>in1=int64#4
2345# asm 2: movd <z1=%xmm7,>in1=%rcx
2346movd %xmm7,%rcx
2347
2348# qhasm: in2 = z2
2349# asm 1: movd <z2=int6464#11,>in2=int64#5
2350# asm 2: movd <z2=%xmm10,>in2=%r8
2351movd %xmm10,%r8
2352
2353# qhasm: in3 = z3
2354# asm 1: movd <z3=int6464#5,>in3=int64#6
2355# asm 2: movd <z3=%xmm4,>in3=%r9
2356movd %xmm4,%r9
2357
2358# qhasm: z0 <<<= 96
2359# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2360# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2361pshufd $0x39,%xmm12,%xmm12
2362
2363# qhasm: z1 <<<= 96
2364# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2365# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2366pshufd $0x39,%xmm7,%xmm7
2367
2368# qhasm: z2 <<<= 96
2369# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2370# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2371pshufd $0x39,%xmm10,%xmm10
2372
2373# qhasm: z3 <<<= 96
2374# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2375# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2376pshufd $0x39,%xmm4,%xmm4
2377
2378# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
2379# asm 1: xorl 0(<m=int64#2),<in0=int64#3d
2380# asm 2: xorl 0(<m=%rsi),<in0=%edx
2381xorl 0(%rsi),%edx
2382
2383# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
2384# asm 1: xorl 4(<m=int64#2),<in1=int64#4d
2385# asm 2: xorl 4(<m=%rsi),<in1=%ecx
2386xorl 4(%rsi),%ecx
2387
2388# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
2389# asm 1: xorl 8(<m=int64#2),<in2=int64#5d
2390# asm 2: xorl 8(<m=%rsi),<in2=%r8d
2391xorl 8(%rsi),%r8d
2392
2393# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
2394# asm 1: xorl 12(<m=int64#2),<in3=int64#6d
2395# asm 2: xorl 12(<m=%rsi),<in3=%r9d
2396xorl 12(%rsi),%r9d
2397
2398# qhasm: *(uint32 *) (out + 0) = in0
2399# asm 1: movl <in0=int64#3d,0(<out=int64#1)
2400# asm 2: movl <in0=%edx,0(<out=%rdi)
2401movl %edx,0(%rdi)
2402
2403# qhasm: *(uint32 *) (out + 4) = in1
2404# asm 1: movl <in1=int64#4d,4(<out=int64#1)
2405# asm 2: movl <in1=%ecx,4(<out=%rdi)
2406movl %ecx,4(%rdi)
2407
2408# qhasm: *(uint32 *) (out + 8) = in2
2409# asm 1: movl <in2=int64#5d,8(<out=int64#1)
2410# asm 2: movl <in2=%r8d,8(<out=%rdi)
2411movl %r8d,8(%rdi)
2412
2413# qhasm: *(uint32 *) (out + 12) = in3
2414# asm 1: movl <in3=int64#6d,12(<out=int64#1)
2415# asm 2: movl <in3=%r9d,12(<out=%rdi)
2416movl %r9d,12(%rdi)
2417
2418# qhasm: in0 = z0
2419# asm 1: movd <z0=int6464#13,>in0=int64#3
2420# asm 2: movd <z0=%xmm12,>in0=%rdx
2421movd %xmm12,%rdx
2422
2423# qhasm: in1 = z1
2424# asm 1: movd <z1=int6464#8,>in1=int64#4
2425# asm 2: movd <z1=%xmm7,>in1=%rcx
2426movd %xmm7,%rcx
2427
2428# qhasm: in2 = z2
2429# asm 1: movd <z2=int6464#11,>in2=int64#5
2430# asm 2: movd <z2=%xmm10,>in2=%r8
2431movd %xmm10,%r8
2432
2433# qhasm: in3 = z3
2434# asm 1: movd <z3=int6464#5,>in3=int64#6
2435# asm 2: movd <z3=%xmm4,>in3=%r9
2436movd %xmm4,%r9
2437
2438# qhasm: z0 <<<= 96
2439# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2440# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2441pshufd $0x39,%xmm12,%xmm12
2442
2443# qhasm: z1 <<<= 96
2444# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2445# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2446pshufd $0x39,%xmm7,%xmm7
2447
2448# qhasm: z2 <<<= 96
2449# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2450# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2451pshufd $0x39,%xmm10,%xmm10
2452
2453# qhasm: z3 <<<= 96
2454# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2455# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2456pshufd $0x39,%xmm4,%xmm4
2457
2458# qhasm: (uint32) in0 ^= *(uint32 *) (m + 64)
2459# asm 1: xorl 64(<m=int64#2),<in0=int64#3d
2460# asm 2: xorl 64(<m=%rsi),<in0=%edx
2461xorl 64(%rsi),%edx
2462
2463# qhasm: (uint32) in1 ^= *(uint32 *) (m + 68)
2464# asm 1: xorl 68(<m=int64#2),<in1=int64#4d
2465# asm 2: xorl 68(<m=%rsi),<in1=%ecx
2466xorl 68(%rsi),%ecx
2467
2468# qhasm: (uint32) in2 ^= *(uint32 *) (m + 72)
2469# asm 1: xorl 72(<m=int64#2),<in2=int64#5d
2470# asm 2: xorl 72(<m=%rsi),<in2=%r8d
2471xorl 72(%rsi),%r8d
2472
2473# qhasm: (uint32) in3 ^= *(uint32 *) (m + 76)
2474# asm 1: xorl 76(<m=int64#2),<in3=int64#6d
2475# asm 2: xorl 76(<m=%rsi),<in3=%r9d
2476xorl 76(%rsi),%r9d
2477
2478# qhasm: *(uint32 *) (out + 64) = in0
2479# asm 1: movl <in0=int64#3d,64(<out=int64#1)
2480# asm 2: movl <in0=%edx,64(<out=%rdi)
2481movl %edx,64(%rdi)
2482
2483# qhasm: *(uint32 *) (out + 68) = in1
2484# asm 1: movl <in1=int64#4d,68(<out=int64#1)
2485# asm 2: movl <in1=%ecx,68(<out=%rdi)
2486movl %ecx,68(%rdi)
2487
2488# qhasm: *(uint32 *) (out + 72) = in2
2489# asm 1: movl <in2=int64#5d,72(<out=int64#1)
2490# asm 2: movl <in2=%r8d,72(<out=%rdi)
2491movl %r8d,72(%rdi)
2492
2493# qhasm: *(uint32 *) (out + 76) = in3
2494# asm 1: movl <in3=int64#6d,76(<out=int64#1)
2495# asm 2: movl <in3=%r9d,76(<out=%rdi)
2496movl %r9d,76(%rdi)
2497
2498# qhasm: in0 = z0
2499# asm 1: movd <z0=int6464#13,>in0=int64#3
2500# asm 2: movd <z0=%xmm12,>in0=%rdx
2501movd %xmm12,%rdx
2502
2503# qhasm: in1 = z1
2504# asm 1: movd <z1=int6464#8,>in1=int64#4
2505# asm 2: movd <z1=%xmm7,>in1=%rcx
2506movd %xmm7,%rcx
2507
2508# qhasm: in2 = z2
2509# asm 1: movd <z2=int6464#11,>in2=int64#5
2510# asm 2: movd <z2=%xmm10,>in2=%r8
2511movd %xmm10,%r8
2512
2513# qhasm: in3 = z3
2514# asm 1: movd <z3=int6464#5,>in3=int64#6
2515# asm 2: movd <z3=%xmm4,>in3=%r9
2516movd %xmm4,%r9
2517
2518# qhasm: z0 <<<= 96
2519# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2520# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2521pshufd $0x39,%xmm12,%xmm12
2522
2523# qhasm: z1 <<<= 96
2524# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2525# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2526pshufd $0x39,%xmm7,%xmm7
2527
2528# qhasm: z2 <<<= 96
2529# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2530# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2531pshufd $0x39,%xmm10,%xmm10
2532
2533# qhasm: z3 <<<= 96
2534# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2535# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2536pshufd $0x39,%xmm4,%xmm4
2537
2538# qhasm: (uint32) in0 ^= *(uint32 *) (m + 128)
2539# asm 1: xorl 128(<m=int64#2),<in0=int64#3d
2540# asm 2: xorl 128(<m=%rsi),<in0=%edx
2541xorl 128(%rsi),%edx
2542
2543# qhasm: (uint32) in1 ^= *(uint32 *) (m + 132)
2544# asm 1: xorl 132(<m=int64#2),<in1=int64#4d
2545# asm 2: xorl 132(<m=%rsi),<in1=%ecx
2546xorl 132(%rsi),%ecx
2547
2548# qhasm: (uint32) in2 ^= *(uint32 *) (m + 136)
2549# asm 1: xorl 136(<m=int64#2),<in2=int64#5d
2550# asm 2: xorl 136(<m=%rsi),<in2=%r8d
2551xorl 136(%rsi),%r8d
2552
2553# qhasm: (uint32) in3 ^= *(uint32 *) (m + 140)
2554# asm 1: xorl 140(<m=int64#2),<in3=int64#6d
2555# asm 2: xorl 140(<m=%rsi),<in3=%r9d
2556xorl 140(%rsi),%r9d
2557
2558# qhasm: *(uint32 *) (out + 128) = in0
2559# asm 1: movl <in0=int64#3d,128(<out=int64#1)
2560# asm 2: movl <in0=%edx,128(<out=%rdi)
2561movl %edx,128(%rdi)
2562
2563# qhasm: *(uint32 *) (out + 132) = in1
2564# asm 1: movl <in1=int64#4d,132(<out=int64#1)
2565# asm 2: movl <in1=%ecx,132(<out=%rdi)
2566movl %ecx,132(%rdi)
2567
2568# qhasm: *(uint32 *) (out + 136) = in2
2569# asm 1: movl <in2=int64#5d,136(<out=int64#1)
2570# asm 2: movl <in2=%r8d,136(<out=%rdi)
2571movl %r8d,136(%rdi)
2572
2573# qhasm: *(uint32 *) (out + 140) = in3
2574# asm 1: movl <in3=int64#6d,140(<out=int64#1)
2575# asm 2: movl <in3=%r9d,140(<out=%rdi)
2576movl %r9d,140(%rdi)
2577
2578# qhasm: in0 = z0
2579# asm 1: movd <z0=int6464#13,>in0=int64#3
2580# asm 2: movd <z0=%xmm12,>in0=%rdx
2581movd %xmm12,%rdx
2582
2583# qhasm: in1 = z1
2584# asm 1: movd <z1=int6464#8,>in1=int64#4
2585# asm 2: movd <z1=%xmm7,>in1=%rcx
2586movd %xmm7,%rcx
2587
2588# qhasm: in2 = z2
2589# asm 1: movd <z2=int6464#11,>in2=int64#5
2590# asm 2: movd <z2=%xmm10,>in2=%r8
2591movd %xmm10,%r8
2592
2593# qhasm: in3 = z3
2594# asm 1: movd <z3=int6464#5,>in3=int64#6
2595# asm 2: movd <z3=%xmm4,>in3=%r9
2596movd %xmm4,%r9
2597
2598# qhasm: (uint32) in0 ^= *(uint32 *) (m + 192)
2599# asm 1: xorl 192(<m=int64#2),<in0=int64#3d
2600# asm 2: xorl 192(<m=%rsi),<in0=%edx
2601xorl 192(%rsi),%edx
2602
2603# qhasm: (uint32) in1 ^= *(uint32 *) (m + 196)
2604# asm 1: xorl 196(<m=int64#2),<in1=int64#4d
2605# asm 2: xorl 196(<m=%rsi),<in1=%ecx
2606xorl 196(%rsi),%ecx
2607
2608# qhasm: (uint32) in2 ^= *(uint32 *) (m + 200)
2609# asm 1: xorl 200(<m=int64#2),<in2=int64#5d
2610# asm 2: xorl 200(<m=%rsi),<in2=%r8d
2611xorl 200(%rsi),%r8d
2612
2613# qhasm: (uint32) in3 ^= *(uint32 *) (m + 204)
2614# asm 1: xorl 204(<m=int64#2),<in3=int64#6d
2615# asm 2: xorl 204(<m=%rsi),<in3=%r9d
2616xorl 204(%rsi),%r9d
2617
2618# qhasm: *(uint32 *) (out + 192) = in0
2619# asm 1: movl <in0=int64#3d,192(<out=int64#1)
2620# asm 2: movl <in0=%edx,192(<out=%rdi)
2621movl %edx,192(%rdi)
2622
2623# qhasm: *(uint32 *) (out + 196) = in1
2624# asm 1: movl <in1=int64#4d,196(<out=int64#1)
2625# asm 2: movl <in1=%ecx,196(<out=%rdi)
2626movl %ecx,196(%rdi)
2627
2628# qhasm: *(uint32 *) (out + 200) = in2
2629# asm 1: movl <in2=int64#5d,200(<out=int64#1)
2630# asm 2: movl <in2=%r8d,200(<out=%rdi)
2631movl %r8d,200(%rdi)
2632
2633# qhasm: *(uint32 *) (out + 204) = in3
2634# asm 1: movl <in3=int64#6d,204(<out=int64#1)
2635# asm 2: movl <in3=%r9d,204(<out=%rdi)
2636movl %r9d,204(%rdi)
2637
2638# qhasm: uint32323232 z4 += orig4
2639# asm 1: paddd <orig4=stack128#16,<z4=int6464#15
2640# asm 2: paddd <orig4=240(%rsp),<z4=%xmm14
2641paddd 240(%rsp),%xmm14
2642
2643# qhasm: uint32323232 z5 += orig5
2644# asm 1: paddd <orig5=stack128#5,<z5=int6464#1
2645# asm 2: paddd <orig5=64(%rsp),<z5=%xmm0
2646paddd 64(%rsp),%xmm0
2647
2648# qhasm: uint32323232 z6 += orig6
2649# asm 1: paddd <orig6=stack128#9,<z6=int6464#6
2650# asm 2: paddd <orig6=128(%rsp),<z6=%xmm5
2651paddd 128(%rsp),%xmm5
2652
2653# qhasm: uint32323232 z7 += orig7
2654# asm 1: paddd <orig7=stack128#13,<z7=int6464#9
2655# asm 2: paddd <orig7=192(%rsp),<z7=%xmm8
2656paddd 192(%rsp),%xmm8
2657
2658# qhasm: in4 = z4
2659# asm 1: movd <z4=int6464#15,>in4=int64#3
2660# asm 2: movd <z4=%xmm14,>in4=%rdx
2661movd %xmm14,%rdx
2662
2663# qhasm: in5 = z5
2664# asm 1: movd <z5=int6464#1,>in5=int64#4
2665# asm 2: movd <z5=%xmm0,>in5=%rcx
2666movd %xmm0,%rcx
2667
2668# qhasm: in6 = z6
2669# asm 1: movd <z6=int6464#6,>in6=int64#5
2670# asm 2: movd <z6=%xmm5,>in6=%r8
2671movd %xmm5,%r8
2672
2673# qhasm: in7 = z7
2674# asm 1: movd <z7=int6464#9,>in7=int64#6
2675# asm 2: movd <z7=%xmm8,>in7=%r9
2676movd %xmm8,%r9
2677
2678# qhasm: z4 <<<= 96
2679# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2680# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2681pshufd $0x39,%xmm14,%xmm14
2682
2683# qhasm: z5 <<<= 96
2684# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2685# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2686pshufd $0x39,%xmm0,%xmm0
2687
2688# qhasm: z6 <<<= 96
2689# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2690# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2691pshufd $0x39,%xmm5,%xmm5
2692
2693# qhasm: z7 <<<= 96
2694# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2695# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2696pshufd $0x39,%xmm8,%xmm8
2697
2698# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
2699# asm 1: xorl 16(<m=int64#2),<in4=int64#3d
2700# asm 2: xorl 16(<m=%rsi),<in4=%edx
2701xorl 16(%rsi),%edx
2702
2703# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
2704# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
2705# asm 2: xorl 20(<m=%rsi),<in5=%ecx
2706xorl 20(%rsi),%ecx
2707
2708# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
2709# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
2710# asm 2: xorl 24(<m=%rsi),<in6=%r8d
2711xorl 24(%rsi),%r8d
2712
2713# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
2714# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
2715# asm 2: xorl 28(<m=%rsi),<in7=%r9d
2716xorl 28(%rsi),%r9d
2717
2718# qhasm: *(uint32 *) (out + 16) = in4
2719# asm 1: movl <in4=int64#3d,16(<out=int64#1)
2720# asm 2: movl <in4=%edx,16(<out=%rdi)
2721movl %edx,16(%rdi)
2722
2723# qhasm: *(uint32 *) (out + 20) = in5
2724# asm 1: movl <in5=int64#4d,20(<out=int64#1)
2725# asm 2: movl <in5=%ecx,20(<out=%rdi)
2726movl %ecx,20(%rdi)
2727
2728# qhasm: *(uint32 *) (out + 24) = in6
2729# asm 1: movl <in6=int64#5d,24(<out=int64#1)
2730# asm 2: movl <in6=%r8d,24(<out=%rdi)
2731movl %r8d,24(%rdi)
2732
2733# qhasm: *(uint32 *) (out + 28) = in7
2734# asm 1: movl <in7=int64#6d,28(<out=int64#1)
2735# asm 2: movl <in7=%r9d,28(<out=%rdi)
2736movl %r9d,28(%rdi)
2737
2738# qhasm: in4 = z4
2739# asm 1: movd <z4=int6464#15,>in4=int64#3
2740# asm 2: movd <z4=%xmm14,>in4=%rdx
2741movd %xmm14,%rdx
2742
2743# qhasm: in5 = z5
2744# asm 1: movd <z5=int6464#1,>in5=int64#4
2745# asm 2: movd <z5=%xmm0,>in5=%rcx
2746movd %xmm0,%rcx
2747
2748# qhasm: in6 = z6
2749# asm 1: movd <z6=int6464#6,>in6=int64#5
2750# asm 2: movd <z6=%xmm5,>in6=%r8
2751movd %xmm5,%r8
2752
2753# qhasm: in7 = z7
2754# asm 1: movd <z7=int6464#9,>in7=int64#6
2755# asm 2: movd <z7=%xmm8,>in7=%r9
2756movd %xmm8,%r9
2757
2758# qhasm: z4 <<<= 96
2759# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2760# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2761pshufd $0x39,%xmm14,%xmm14
2762
2763# qhasm: z5 <<<= 96
2764# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2765# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2766pshufd $0x39,%xmm0,%xmm0
2767
2768# qhasm: z6 <<<= 96
2769# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2770# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2771pshufd $0x39,%xmm5,%xmm5
2772
2773# qhasm: z7 <<<= 96
2774# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2775# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2776pshufd $0x39,%xmm8,%xmm8
2777
2778# qhasm: (uint32) in4 ^= *(uint32 *) (m + 80)
2779# asm 1: xorl 80(<m=int64#2),<in4=int64#3d
2780# asm 2: xorl 80(<m=%rsi),<in4=%edx
2781xorl 80(%rsi),%edx
2782
2783# qhasm: (uint32) in5 ^= *(uint32 *) (m + 84)
2784# asm 1: xorl 84(<m=int64#2),<in5=int64#4d
2785# asm 2: xorl 84(<m=%rsi),<in5=%ecx
2786xorl 84(%rsi),%ecx
2787
2788# qhasm: (uint32) in6 ^= *(uint32 *) (m + 88)
2789# asm 1: xorl 88(<m=int64#2),<in6=int64#5d
2790# asm 2: xorl 88(<m=%rsi),<in6=%r8d
2791xorl 88(%rsi),%r8d
2792
2793# qhasm: (uint32) in7 ^= *(uint32 *) (m + 92)
2794# asm 1: xorl 92(<m=int64#2),<in7=int64#6d
2795# asm 2: xorl 92(<m=%rsi),<in7=%r9d
2796xorl 92(%rsi),%r9d
2797
2798# qhasm: *(uint32 *) (out + 80) = in4
2799# asm 1: movl <in4=int64#3d,80(<out=int64#1)
2800# asm 2: movl <in4=%edx,80(<out=%rdi)
2801movl %edx,80(%rdi)
2802
2803# qhasm: *(uint32 *) (out + 84) = in5
2804# asm 1: movl <in5=int64#4d,84(<out=int64#1)
2805# asm 2: movl <in5=%ecx,84(<out=%rdi)
2806movl %ecx,84(%rdi)
2807
2808# qhasm: *(uint32 *) (out + 88) = in6
2809# asm 1: movl <in6=int64#5d,88(<out=int64#1)
2810# asm 2: movl <in6=%r8d,88(<out=%rdi)
2811movl %r8d,88(%rdi)
2812
2813# qhasm: *(uint32 *) (out + 92) = in7
2814# asm 1: movl <in7=int64#6d,92(<out=int64#1)
2815# asm 2: movl <in7=%r9d,92(<out=%rdi)
2816movl %r9d,92(%rdi)
2817
2818# qhasm: in4 = z4
2819# asm 1: movd <z4=int6464#15,>in4=int64#3
2820# asm 2: movd <z4=%xmm14,>in4=%rdx
2821movd %xmm14,%rdx
2822
2823# qhasm: in5 = z5
2824# asm 1: movd <z5=int6464#1,>in5=int64#4
2825# asm 2: movd <z5=%xmm0,>in5=%rcx
2826movd %xmm0,%rcx
2827
2828# qhasm: in6 = z6
2829# asm 1: movd <z6=int6464#6,>in6=int64#5
2830# asm 2: movd <z6=%xmm5,>in6=%r8
2831movd %xmm5,%r8
2832
2833# qhasm: in7 = z7
2834# asm 1: movd <z7=int6464#9,>in7=int64#6
2835# asm 2: movd <z7=%xmm8,>in7=%r9
2836movd %xmm8,%r9
2837
2838# qhasm: z4 <<<= 96
2839# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2840# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2841pshufd $0x39,%xmm14,%xmm14
2842
2843# qhasm: z5 <<<= 96
2844# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2845# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2846pshufd $0x39,%xmm0,%xmm0
2847
2848# qhasm: z6 <<<= 96
2849# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2850# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2851pshufd $0x39,%xmm5,%xmm5
2852
2853# qhasm: z7 <<<= 96
2854# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2855# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2856pshufd $0x39,%xmm8,%xmm8
2857
2858# qhasm: (uint32) in4 ^= *(uint32 *) (m + 144)
2859# asm 1: xorl 144(<m=int64#2),<in4=int64#3d
2860# asm 2: xorl 144(<m=%rsi),<in4=%edx
2861xorl 144(%rsi),%edx
2862
2863# qhasm: (uint32) in5 ^= *(uint32 *) (m + 148)
2864# asm 1: xorl 148(<m=int64#2),<in5=int64#4d
2865# asm 2: xorl 148(<m=%rsi),<in5=%ecx
2866xorl 148(%rsi),%ecx
2867
2868# qhasm: (uint32) in6 ^= *(uint32 *) (m + 152)
2869# asm 1: xorl 152(<m=int64#2),<in6=int64#5d
2870# asm 2: xorl 152(<m=%rsi),<in6=%r8d
2871xorl 152(%rsi),%r8d
2872
2873# qhasm: (uint32) in7 ^= *(uint32 *) (m + 156)
2874# asm 1: xorl 156(<m=int64#2),<in7=int64#6d
2875# asm 2: xorl 156(<m=%rsi),<in7=%r9d
2876xorl 156(%rsi),%r9d
2877
2878# qhasm: *(uint32 *) (out + 144) = in4
2879# asm 1: movl <in4=int64#3d,144(<out=int64#1)
2880# asm 2: movl <in4=%edx,144(<out=%rdi)
2881movl %edx,144(%rdi)
2882
2883# qhasm: *(uint32 *) (out + 148) = in5
2884# asm 1: movl <in5=int64#4d,148(<out=int64#1)
2885# asm 2: movl <in5=%ecx,148(<out=%rdi)
2886movl %ecx,148(%rdi)
2887
2888# qhasm: *(uint32 *) (out + 152) = in6
2889# asm 1: movl <in6=int64#5d,152(<out=int64#1)
2890# asm 2: movl <in6=%r8d,152(<out=%rdi)
2891movl %r8d,152(%rdi)
2892
2893# qhasm: *(uint32 *) (out + 156) = in7
2894# asm 1: movl <in7=int64#6d,156(<out=int64#1)
2895# asm 2: movl <in7=%r9d,156(<out=%rdi)
2896movl %r9d,156(%rdi)
2897
2898# qhasm: in4 = z4
2899# asm 1: movd <z4=int6464#15,>in4=int64#3
2900# asm 2: movd <z4=%xmm14,>in4=%rdx
2901movd %xmm14,%rdx
2902
2903# qhasm: in5 = z5
2904# asm 1: movd <z5=int6464#1,>in5=int64#4
2905# asm 2: movd <z5=%xmm0,>in5=%rcx
2906movd %xmm0,%rcx
2907
2908# qhasm: in6 = z6
2909# asm 1: movd <z6=int6464#6,>in6=int64#5
2910# asm 2: movd <z6=%xmm5,>in6=%r8
2911movd %xmm5,%r8
2912
2913# qhasm: in7 = z7
2914# asm 1: movd <z7=int6464#9,>in7=int64#6
2915# asm 2: movd <z7=%xmm8,>in7=%r9
2916movd %xmm8,%r9
2917
2918# qhasm: (uint32) in4 ^= *(uint32 *) (m + 208)
2919# asm 1: xorl 208(<m=int64#2),<in4=int64#3d
2920# asm 2: xorl 208(<m=%rsi),<in4=%edx
2921xorl 208(%rsi),%edx
2922
2923# qhasm: (uint32) in5 ^= *(uint32 *) (m + 212)
2924# asm 1: xorl 212(<m=int64#2),<in5=int64#4d
2925# asm 2: xorl 212(<m=%rsi),<in5=%ecx
2926xorl 212(%rsi),%ecx
2927
2928# qhasm: (uint32) in6 ^= *(uint32 *) (m + 216)
2929# asm 1: xorl 216(<m=int64#2),<in6=int64#5d
2930# asm 2: xorl 216(<m=%rsi),<in6=%r8d
2931xorl 216(%rsi),%r8d
2932
2933# qhasm: (uint32) in7 ^= *(uint32 *) (m + 220)
2934# asm 1: xorl 220(<m=int64#2),<in7=int64#6d
2935# asm 2: xorl 220(<m=%rsi),<in7=%r9d
2936xorl 220(%rsi),%r9d
2937
2938# qhasm: *(uint32 *) (out + 208) = in4
2939# asm 1: movl <in4=int64#3d,208(<out=int64#1)
2940# asm 2: movl <in4=%edx,208(<out=%rdi)
2941movl %edx,208(%rdi)
2942
2943# qhasm: *(uint32 *) (out + 212) = in5
2944# asm 1: movl <in5=int64#4d,212(<out=int64#1)
2945# asm 2: movl <in5=%ecx,212(<out=%rdi)
2946movl %ecx,212(%rdi)
2947
2948# qhasm: *(uint32 *) (out + 216) = in6
2949# asm 1: movl <in6=int64#5d,216(<out=int64#1)
2950# asm 2: movl <in6=%r8d,216(<out=%rdi)
2951movl %r8d,216(%rdi)
2952
2953# qhasm: *(uint32 *) (out + 220) = in7
2954# asm 1: movl <in7=int64#6d,220(<out=int64#1)
2955# asm 2: movl <in7=%r9d,220(<out=%rdi)
2956movl %r9d,220(%rdi)
2957
2958# qhasm: uint32323232 z8 += orig8
2959# asm 1: paddd <orig8=stack128#19,<z8=int6464#16
2960# asm 2: paddd <orig8=288(%rsp),<z8=%xmm15
2961paddd 288(%rsp),%xmm15
2962
2963# qhasm: uint32323232 z9 += orig9
2964# asm 1: paddd <orig9=stack128#20,<z9=int6464#12
2965# asm 2: paddd <orig9=304(%rsp),<z9=%xmm11
2966paddd 304(%rsp),%xmm11
2967
2968# qhasm: uint32323232 z10 += orig10
2969# asm 1: paddd <orig10=stack128#6,<z10=int6464#2
2970# asm 2: paddd <orig10=80(%rsp),<z10=%xmm1
2971paddd 80(%rsp),%xmm1
2972
2973# qhasm: uint32323232 z11 += orig11
2974# asm 1: paddd <orig11=stack128#10,<z11=int6464#7
2975# asm 2: paddd <orig11=144(%rsp),<z11=%xmm6
2976paddd 144(%rsp),%xmm6
2977
2978# qhasm: in8 = z8
2979# asm 1: movd <z8=int6464#16,>in8=int64#3
2980# asm 2: movd <z8=%xmm15,>in8=%rdx
2981movd %xmm15,%rdx
2982
2983# qhasm: in9 = z9
2984# asm 1: movd <z9=int6464#12,>in9=int64#4
2985# asm 2: movd <z9=%xmm11,>in9=%rcx
2986movd %xmm11,%rcx
2987
2988# qhasm: in10 = z10
2989# asm 1: movd <z10=int6464#2,>in10=int64#5
2990# asm 2: movd <z10=%xmm1,>in10=%r8
2991movd %xmm1,%r8
2992
2993# qhasm: in11 = z11
2994# asm 1: movd <z11=int6464#7,>in11=int64#6
2995# asm 2: movd <z11=%xmm6,>in11=%r9
2996movd %xmm6,%r9
2997
2998# qhasm: z8 <<<= 96
2999# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3000# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3001pshufd $0x39,%xmm15,%xmm15
3002
3003# qhasm: z9 <<<= 96
3004# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3005# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3006pshufd $0x39,%xmm11,%xmm11
3007
3008# qhasm: z10 <<<= 96
3009# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3010# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3011pshufd $0x39,%xmm1,%xmm1
3012
3013# qhasm: z11 <<<= 96
3014# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3015# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3016pshufd $0x39,%xmm6,%xmm6
3017
3018# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
3019# asm 1: xorl 32(<m=int64#2),<in8=int64#3d
3020# asm 2: xorl 32(<m=%rsi),<in8=%edx
3021xorl 32(%rsi),%edx
3022
3023# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
3024# asm 1: xorl 36(<m=int64#2),<in9=int64#4d
3025# asm 2: xorl 36(<m=%rsi),<in9=%ecx
3026xorl 36(%rsi),%ecx
3027
3028# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
3029# asm 1: xorl 40(<m=int64#2),<in10=int64#5d
3030# asm 2: xorl 40(<m=%rsi),<in10=%r8d
3031xorl 40(%rsi),%r8d
3032
3033# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
3034# asm 1: xorl 44(<m=int64#2),<in11=int64#6d
3035# asm 2: xorl 44(<m=%rsi),<in11=%r9d
3036xorl 44(%rsi),%r9d
3037
3038# qhasm: *(uint32 *) (out + 32) = in8
3039# asm 1: movl <in8=int64#3d,32(<out=int64#1)
3040# asm 2: movl <in8=%edx,32(<out=%rdi)
3041movl %edx,32(%rdi)
3042
3043# qhasm: *(uint32 *) (out + 36) = in9
3044# asm 1: movl <in9=int64#4d,36(<out=int64#1)
3045# asm 2: movl <in9=%ecx,36(<out=%rdi)
3046movl %ecx,36(%rdi)
3047
3048# qhasm: *(uint32 *) (out + 40) = in10
3049# asm 1: movl <in10=int64#5d,40(<out=int64#1)
3050# asm 2: movl <in10=%r8d,40(<out=%rdi)
3051movl %r8d,40(%rdi)
3052
3053# qhasm: *(uint32 *) (out + 44) = in11
3054# asm 1: movl <in11=int64#6d,44(<out=int64#1)
3055# asm 2: movl <in11=%r9d,44(<out=%rdi)
3056movl %r9d,44(%rdi)
3057
3058# qhasm: in8 = z8
3059# asm 1: movd <z8=int6464#16,>in8=int64#3
3060# asm 2: movd <z8=%xmm15,>in8=%rdx
3061movd %xmm15,%rdx
3062
3063# qhasm: in9 = z9
3064# asm 1: movd <z9=int6464#12,>in9=int64#4
3065# asm 2: movd <z9=%xmm11,>in9=%rcx
3066movd %xmm11,%rcx
3067
3068# qhasm: in10 = z10
3069# asm 1: movd <z10=int6464#2,>in10=int64#5
3070# asm 2: movd <z10=%xmm1,>in10=%r8
3071movd %xmm1,%r8
3072
3073# qhasm: in11 = z11
3074# asm 1: movd <z11=int6464#7,>in11=int64#6
3075# asm 2: movd <z11=%xmm6,>in11=%r9
3076movd %xmm6,%r9
3077
3078# qhasm: z8 <<<= 96
3079# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3080# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3081pshufd $0x39,%xmm15,%xmm15
3082
3083# qhasm: z9 <<<= 96
3084# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3085# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3086pshufd $0x39,%xmm11,%xmm11
3087
3088# qhasm: z10 <<<= 96
3089# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3090# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3091pshufd $0x39,%xmm1,%xmm1
3092
3093# qhasm: z11 <<<= 96
3094# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3095# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3096pshufd $0x39,%xmm6,%xmm6
3097
3098# qhasm: (uint32) in8 ^= *(uint32 *) (m + 96)
3099# asm 1: xorl 96(<m=int64#2),<in8=int64#3d
3100# asm 2: xorl 96(<m=%rsi),<in8=%edx
3101xorl 96(%rsi),%edx
3102
3103# qhasm: (uint32) in9 ^= *(uint32 *) (m + 100)
3104# asm 1: xorl 100(<m=int64#2),<in9=int64#4d
3105# asm 2: xorl 100(<m=%rsi),<in9=%ecx
3106xorl 100(%rsi),%ecx
3107
3108# qhasm: (uint32) in10 ^= *(uint32 *) (m + 104)
3109# asm 1: xorl 104(<m=int64#2),<in10=int64#5d
3110# asm 2: xorl 104(<m=%rsi),<in10=%r8d
3111xorl 104(%rsi),%r8d
3112
3113# qhasm: (uint32) in11 ^= *(uint32 *) (m + 108)
3114# asm 1: xorl 108(<m=int64#2),<in11=int64#6d
3115# asm 2: xorl 108(<m=%rsi),<in11=%r9d
3116xorl 108(%rsi),%r9d
3117
3118# qhasm: *(uint32 *) (out + 96) = in8
3119# asm 1: movl <in8=int64#3d,96(<out=int64#1)
3120# asm 2: movl <in8=%edx,96(<out=%rdi)
3121movl %edx,96(%rdi)
3122
3123# qhasm: *(uint32 *) (out + 100) = in9
3124# asm 1: movl <in9=int64#4d,100(<out=int64#1)
3125# asm 2: movl <in9=%ecx,100(<out=%rdi)
3126movl %ecx,100(%rdi)
3127
3128# qhasm: *(uint32 *) (out + 104) = in10
3129# asm 1: movl <in10=int64#5d,104(<out=int64#1)
3130# asm 2: movl <in10=%r8d,104(<out=%rdi)
3131movl %r8d,104(%rdi)
3132
3133# qhasm: *(uint32 *) (out + 108) = in11
3134# asm 1: movl <in11=int64#6d,108(<out=int64#1)
3135# asm 2: movl <in11=%r9d,108(<out=%rdi)
3136movl %r9d,108(%rdi)
3137
3138# qhasm: in8 = z8
3139# asm 1: movd <z8=int6464#16,>in8=int64#3
3140# asm 2: movd <z8=%xmm15,>in8=%rdx
3141movd %xmm15,%rdx
3142
3143# qhasm: in9 = z9
3144# asm 1: movd <z9=int6464#12,>in9=int64#4
3145# asm 2: movd <z9=%xmm11,>in9=%rcx
3146movd %xmm11,%rcx
3147
3148# qhasm: in10 = z10
3149# asm 1: movd <z10=int6464#2,>in10=int64#5
3150# asm 2: movd <z10=%xmm1,>in10=%r8
3151movd %xmm1,%r8
3152
3153# qhasm: in11 = z11
3154# asm 1: movd <z11=int6464#7,>in11=int64#6
3155# asm 2: movd <z11=%xmm6,>in11=%r9
3156movd %xmm6,%r9
3157
3158# qhasm: z8 <<<= 96
3159# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3160# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3161pshufd $0x39,%xmm15,%xmm15
3162
3163# qhasm: z9 <<<= 96
3164# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3165# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3166pshufd $0x39,%xmm11,%xmm11
3167
3168# qhasm: z10 <<<= 96
3169# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3170# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3171pshufd $0x39,%xmm1,%xmm1
3172
3173# qhasm: z11 <<<= 96
3174# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3175# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3176pshufd $0x39,%xmm6,%xmm6
3177
3178# qhasm: (uint32) in8 ^= *(uint32 *) (m + 160)
3179# asm 1: xorl 160(<m=int64#2),<in8=int64#3d
3180# asm 2: xorl 160(<m=%rsi),<in8=%edx
3181xorl 160(%rsi),%edx
3182
3183# qhasm: (uint32) in9 ^= *(uint32 *) (m + 164)
3184# asm 1: xorl 164(<m=int64#2),<in9=int64#4d
3185# asm 2: xorl 164(<m=%rsi),<in9=%ecx
3186xorl 164(%rsi),%ecx
3187
3188# qhasm: (uint32) in10 ^= *(uint32 *) (m + 168)
3189# asm 1: xorl 168(<m=int64#2),<in10=int64#5d
3190# asm 2: xorl 168(<m=%rsi),<in10=%r8d
3191xorl 168(%rsi),%r8d
3192
3193# qhasm: (uint32) in11 ^= *(uint32 *) (m + 172)
3194# asm 1: xorl 172(<m=int64#2),<in11=int64#6d
3195# asm 2: xorl 172(<m=%rsi),<in11=%r9d
3196xorl 172(%rsi),%r9d
3197
3198# qhasm: *(uint32 *) (out + 160) = in8
3199# asm 1: movl <in8=int64#3d,160(<out=int64#1)
3200# asm 2: movl <in8=%edx,160(<out=%rdi)
3201movl %edx,160(%rdi)
3202
3203# qhasm: *(uint32 *) (out + 164) = in9
3204# asm 1: movl <in9=int64#4d,164(<out=int64#1)
3205# asm 2: movl <in9=%ecx,164(<out=%rdi)
3206movl %ecx,164(%rdi)
3207
3208# qhasm: *(uint32 *) (out + 168) = in10
3209# asm 1: movl <in10=int64#5d,168(<out=int64#1)
3210# asm 2: movl <in10=%r8d,168(<out=%rdi)
3211movl %r8d,168(%rdi)
3212
3213# qhasm: *(uint32 *) (out + 172) = in11
3214# asm 1: movl <in11=int64#6d,172(<out=int64#1)
3215# asm 2: movl <in11=%r9d,172(<out=%rdi)
3216movl %r9d,172(%rdi)
3217
3218# qhasm: in8 = z8
3219# asm 1: movd <z8=int6464#16,>in8=int64#3
3220# asm 2: movd <z8=%xmm15,>in8=%rdx
3221movd %xmm15,%rdx
3222
3223# qhasm: in9 = z9
3224# asm 1: movd <z9=int6464#12,>in9=int64#4
3225# asm 2: movd <z9=%xmm11,>in9=%rcx
3226movd %xmm11,%rcx
3227
3228# qhasm: in10 = z10
3229# asm 1: movd <z10=int6464#2,>in10=int64#5
3230# asm 2: movd <z10=%xmm1,>in10=%r8
3231movd %xmm1,%r8
3232
3233# qhasm: in11 = z11
3234# asm 1: movd <z11=int6464#7,>in11=int64#6
3235# asm 2: movd <z11=%xmm6,>in11=%r9
3236movd %xmm6,%r9
3237
3238# qhasm: (uint32) in8 ^= *(uint32 *) (m + 224)
3239# asm 1: xorl 224(<m=int64#2),<in8=int64#3d
3240# asm 2: xorl 224(<m=%rsi),<in8=%edx
3241xorl 224(%rsi),%edx
3242
3243# qhasm: (uint32) in9 ^= *(uint32 *) (m + 228)
3244# asm 1: xorl 228(<m=int64#2),<in9=int64#4d
3245# asm 2: xorl 228(<m=%rsi),<in9=%ecx
3246xorl 228(%rsi),%ecx
3247
3248# qhasm: (uint32) in10 ^= *(uint32 *) (m + 232)
3249# asm 1: xorl 232(<m=int64#2),<in10=int64#5d
3250# asm 2: xorl 232(<m=%rsi),<in10=%r8d
3251xorl 232(%rsi),%r8d
3252
3253# qhasm: (uint32) in11 ^= *(uint32 *) (m + 236)
3254# asm 1: xorl 236(<m=int64#2),<in11=int64#6d
3255# asm 2: xorl 236(<m=%rsi),<in11=%r9d
3256xorl 236(%rsi),%r9d
3257
3258# qhasm: *(uint32 *) (out + 224) = in8
3259# asm 1: movl <in8=int64#3d,224(<out=int64#1)
3260# asm 2: movl <in8=%edx,224(<out=%rdi)
3261movl %edx,224(%rdi)
3262
3263# qhasm: *(uint32 *) (out + 228) = in9
3264# asm 1: movl <in9=int64#4d,228(<out=int64#1)
3265# asm 2: movl <in9=%ecx,228(<out=%rdi)
3266movl %ecx,228(%rdi)
3267
3268# qhasm: *(uint32 *) (out + 232) = in10
3269# asm 1: movl <in10=int64#5d,232(<out=int64#1)
3270# asm 2: movl <in10=%r8d,232(<out=%rdi)
3271movl %r8d,232(%rdi)
3272
3273# qhasm: *(uint32 *) (out + 236) = in11
3274# asm 1: movl <in11=int64#6d,236(<out=int64#1)
3275# asm 2: movl <in11=%r9d,236(<out=%rdi)
3276movl %r9d,236(%rdi)
3277
3278# qhasm: uint32323232 z12 += orig12
3279# asm 1: paddd <orig12=stack128#11,<z12=int6464#14
3280# asm 2: paddd <orig12=160(%rsp),<z12=%xmm13
3281paddd 160(%rsp),%xmm13
3282
3283# qhasm: uint32323232 z13 += orig13
3284# asm 1: paddd <orig13=stack128#14,<z13=int6464#10
3285# asm 2: paddd <orig13=208(%rsp),<z13=%xmm9
3286paddd 208(%rsp),%xmm9
3287
3288# qhasm: uint32323232 z14 += orig14
3289# asm 1: paddd <orig14=stack128#17,<z14=int6464#4
3290# asm 2: paddd <orig14=256(%rsp),<z14=%xmm3
3291paddd 256(%rsp),%xmm3
3292
3293# qhasm: uint32323232 z15 += orig15
3294# asm 1: paddd <orig15=stack128#7,<z15=int6464#3
3295# asm 2: paddd <orig15=96(%rsp),<z15=%xmm2
3296paddd 96(%rsp),%xmm2
3297
3298# qhasm: in12 = z12
3299# asm 1: movd <z12=int6464#14,>in12=int64#3
3300# asm 2: movd <z12=%xmm13,>in12=%rdx
3301movd %xmm13,%rdx
3302
3303# qhasm: in13 = z13
3304# asm 1: movd <z13=int6464#10,>in13=int64#4
3305# asm 2: movd <z13=%xmm9,>in13=%rcx
3306movd %xmm9,%rcx
3307
3308# qhasm: in14 = z14
3309# asm 1: movd <z14=int6464#4,>in14=int64#5
3310# asm 2: movd <z14=%xmm3,>in14=%r8
3311movd %xmm3,%r8
3312
3313# qhasm: in15 = z15
3314# asm 1: movd <z15=int6464#3,>in15=int64#6
3315# asm 2: movd <z15=%xmm2,>in15=%r9
3316movd %xmm2,%r9
3317
3318# qhasm: z12 <<<= 96
3319# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3320# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3321pshufd $0x39,%xmm13,%xmm13
3322
3323# qhasm: z13 <<<= 96
3324# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3325# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3326pshufd $0x39,%xmm9,%xmm9
3327
3328# qhasm: z14 <<<= 96
3329# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3330# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3331pshufd $0x39,%xmm3,%xmm3
3332
3333# qhasm: z15 <<<= 96
3334# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3335# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3336pshufd $0x39,%xmm2,%xmm2
3337
3338# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
3339# asm 1: xorl 48(<m=int64#2),<in12=int64#3d
3340# asm 2: xorl 48(<m=%rsi),<in12=%edx
3341xorl 48(%rsi),%edx
3342
3343# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
3344# asm 1: xorl 52(<m=int64#2),<in13=int64#4d
3345# asm 2: xorl 52(<m=%rsi),<in13=%ecx
3346xorl 52(%rsi),%ecx
3347
3348# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
3349# asm 1: xorl 56(<m=int64#2),<in14=int64#5d
3350# asm 2: xorl 56(<m=%rsi),<in14=%r8d
3351xorl 56(%rsi),%r8d
3352
3353# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
3354# asm 1: xorl 60(<m=int64#2),<in15=int64#6d
3355# asm 2: xorl 60(<m=%rsi),<in15=%r9d
3356xorl 60(%rsi),%r9d
3357
3358# qhasm: *(uint32 *) (out + 48) = in12
3359# asm 1: movl <in12=int64#3d,48(<out=int64#1)
3360# asm 2: movl <in12=%edx,48(<out=%rdi)
3361movl %edx,48(%rdi)
3362
3363# qhasm: *(uint32 *) (out + 52) = in13
3364# asm 1: movl <in13=int64#4d,52(<out=int64#1)
3365# asm 2: movl <in13=%ecx,52(<out=%rdi)
3366movl %ecx,52(%rdi)
3367
3368# qhasm: *(uint32 *) (out + 56) = in14
3369# asm 1: movl <in14=int64#5d,56(<out=int64#1)
3370# asm 2: movl <in14=%r8d,56(<out=%rdi)
3371movl %r8d,56(%rdi)
3372
3373# qhasm: *(uint32 *) (out + 60) = in15
3374# asm 1: movl <in15=int64#6d,60(<out=int64#1)
3375# asm 2: movl <in15=%r9d,60(<out=%rdi)
3376movl %r9d,60(%rdi)
3377
3378# qhasm: in12 = z12
3379# asm 1: movd <z12=int6464#14,>in12=int64#3
3380# asm 2: movd <z12=%xmm13,>in12=%rdx
3381movd %xmm13,%rdx
3382
3383# qhasm: in13 = z13
3384# asm 1: movd <z13=int6464#10,>in13=int64#4
3385# asm 2: movd <z13=%xmm9,>in13=%rcx
3386movd %xmm9,%rcx
3387
3388# qhasm: in14 = z14
3389# asm 1: movd <z14=int6464#4,>in14=int64#5
3390# asm 2: movd <z14=%xmm3,>in14=%r8
3391movd %xmm3,%r8
3392
3393# qhasm: in15 = z15
3394# asm 1: movd <z15=int6464#3,>in15=int64#6
3395# asm 2: movd <z15=%xmm2,>in15=%r9
3396movd %xmm2,%r9
3397
3398# qhasm: z12 <<<= 96
3399# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3400# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3401pshufd $0x39,%xmm13,%xmm13
3402
3403# qhasm: z13 <<<= 96
3404# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3405# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3406pshufd $0x39,%xmm9,%xmm9
3407
3408# qhasm: z14 <<<= 96
3409# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3410# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3411pshufd $0x39,%xmm3,%xmm3
3412
3413# qhasm: z15 <<<= 96
3414# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3415# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3416pshufd $0x39,%xmm2,%xmm2
3417
3418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 112)
3419# asm 1: xorl 112(<m=int64#2),<in12=int64#3d
3420# asm 2: xorl 112(<m=%rsi),<in12=%edx
3421xorl 112(%rsi),%edx
3422
3423# qhasm: (uint32) in13 ^= *(uint32 *) (m + 116)
3424# asm 1: xorl 116(<m=int64#2),<in13=int64#4d
3425# asm 2: xorl 116(<m=%rsi),<in13=%ecx
3426xorl 116(%rsi),%ecx
3427
3428# qhasm: (uint32) in14 ^= *(uint32 *) (m + 120)
3429# asm 1: xorl 120(<m=int64#2),<in14=int64#5d
3430# asm 2: xorl 120(<m=%rsi),<in14=%r8d
3431xorl 120(%rsi),%r8d
3432
3433# qhasm: (uint32) in15 ^= *(uint32 *) (m + 124)
3434# asm 1: xorl 124(<m=int64#2),<in15=int64#6d
3435# asm 2: xorl 124(<m=%rsi),<in15=%r9d
3436xorl 124(%rsi),%r9d
3437
3438# qhasm: *(uint32 *) (out + 112) = in12
3439# asm 1: movl <in12=int64#3d,112(<out=int64#1)
3440# asm 2: movl <in12=%edx,112(<out=%rdi)
3441movl %edx,112(%rdi)
3442
3443# qhasm: *(uint32 *) (out + 116) = in13
3444# asm 1: movl <in13=int64#4d,116(<out=int64#1)
3445# asm 2: movl <in13=%ecx,116(<out=%rdi)
3446movl %ecx,116(%rdi)
3447
3448# qhasm: *(uint32 *) (out + 120) = in14
3449# asm 1: movl <in14=int64#5d,120(<out=int64#1)
3450# asm 2: movl <in14=%r8d,120(<out=%rdi)
3451movl %r8d,120(%rdi)
3452
3453# qhasm: *(uint32 *) (out + 124) = in15
3454# asm 1: movl <in15=int64#6d,124(<out=int64#1)
3455# asm 2: movl <in15=%r9d,124(<out=%rdi)
3456movl %r9d,124(%rdi)
3457
3458# qhasm: in12 = z12
3459# asm 1: movd <z12=int6464#14,>in12=int64#3
3460# asm 2: movd <z12=%xmm13,>in12=%rdx
3461movd %xmm13,%rdx
3462
3463# qhasm: in13 = z13
3464# asm 1: movd <z13=int6464#10,>in13=int64#4
3465# asm 2: movd <z13=%xmm9,>in13=%rcx
3466movd %xmm9,%rcx
3467
3468# qhasm: in14 = z14
3469# asm 1: movd <z14=int6464#4,>in14=int64#5
3470# asm 2: movd <z14=%xmm3,>in14=%r8
3471movd %xmm3,%r8
3472
3473# qhasm: in15 = z15
3474# asm 1: movd <z15=int6464#3,>in15=int64#6
3475# asm 2: movd <z15=%xmm2,>in15=%r9
3476movd %xmm2,%r9
3477
3478# qhasm: z12 <<<= 96
3479# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3480# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3481pshufd $0x39,%xmm13,%xmm13
3482
3483# qhasm: z13 <<<= 96
3484# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3485# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3486pshufd $0x39,%xmm9,%xmm9
3487
3488# qhasm: z14 <<<= 96
3489# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3490# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3491pshufd $0x39,%xmm3,%xmm3
3492
3493# qhasm: z15 <<<= 96
3494# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3495# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3496pshufd $0x39,%xmm2,%xmm2
3497
3498# qhasm: (uint32) in12 ^= *(uint32 *) (m + 176)
3499# asm 1: xorl 176(<m=int64#2),<in12=int64#3d
3500# asm 2: xorl 176(<m=%rsi),<in12=%edx
3501xorl 176(%rsi),%edx
3502
3503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 180)
3504# asm 1: xorl 180(<m=int64#2),<in13=int64#4d
3505# asm 2: xorl 180(<m=%rsi),<in13=%ecx
3506xorl 180(%rsi),%ecx
3507
3508# qhasm: (uint32) in14 ^= *(uint32 *) (m + 184)
3509# asm 1: xorl 184(<m=int64#2),<in14=int64#5d
3510# asm 2: xorl 184(<m=%rsi),<in14=%r8d
3511xorl 184(%rsi),%r8d
3512
3513# qhasm: (uint32) in15 ^= *(uint32 *) (m + 188)
3514# asm 1: xorl 188(<m=int64#2),<in15=int64#6d
3515# asm 2: xorl 188(<m=%rsi),<in15=%r9d
3516xorl 188(%rsi),%r9d
3517
3518# qhasm: *(uint32 *) (out + 176) = in12
3519# asm 1: movl <in12=int64#3d,176(<out=int64#1)
3520# asm 2: movl <in12=%edx,176(<out=%rdi)
3521movl %edx,176(%rdi)
3522
3523# qhasm: *(uint32 *) (out + 180) = in13
3524# asm 1: movl <in13=int64#4d,180(<out=int64#1)
3525# asm 2: movl <in13=%ecx,180(<out=%rdi)
3526movl %ecx,180(%rdi)
3527
3528# qhasm: *(uint32 *) (out + 184) = in14
3529# asm 1: movl <in14=int64#5d,184(<out=int64#1)
3530# asm 2: movl <in14=%r8d,184(<out=%rdi)
3531movl %r8d,184(%rdi)
3532
3533# qhasm: *(uint32 *) (out + 188) = in15
3534# asm 1: movl <in15=int64#6d,188(<out=int64#1)
3535# asm 2: movl <in15=%r9d,188(<out=%rdi)
3536movl %r9d,188(%rdi)
3537
3538# qhasm: in12 = z12
3539# asm 1: movd <z12=int6464#14,>in12=int64#3
3540# asm 2: movd <z12=%xmm13,>in12=%rdx
3541movd %xmm13,%rdx
3542
3543# qhasm: in13 = z13
3544# asm 1: movd <z13=int6464#10,>in13=int64#4
3545# asm 2: movd <z13=%xmm9,>in13=%rcx
3546movd %xmm9,%rcx
3547
3548# qhasm: in14 = z14
3549# asm 1: movd <z14=int6464#4,>in14=int64#5
3550# asm 2: movd <z14=%xmm3,>in14=%r8
3551movd %xmm3,%r8
3552
3553# qhasm: in15 = z15
3554# asm 1: movd <z15=int6464#3,>in15=int64#6
3555# asm 2: movd <z15=%xmm2,>in15=%r9
3556movd %xmm2,%r9
3557
3558# qhasm: (uint32) in12 ^= *(uint32 *) (m + 240)
3559# asm 1: xorl 240(<m=int64#2),<in12=int64#3d
3560# asm 2: xorl 240(<m=%rsi),<in12=%edx
3561xorl 240(%rsi),%edx
3562
3563# qhasm: (uint32) in13 ^= *(uint32 *) (m + 244)
3564# asm 1: xorl 244(<m=int64#2),<in13=int64#4d
3565# asm 2: xorl 244(<m=%rsi),<in13=%ecx
3566xorl 244(%rsi),%ecx
3567
3568# qhasm: (uint32) in14 ^= *(uint32 *) (m + 248)
3569# asm 1: xorl 248(<m=int64#2),<in14=int64#5d
3570# asm 2: xorl 248(<m=%rsi),<in14=%r8d
3571xorl 248(%rsi),%r8d
3572
3573# qhasm: (uint32) in15 ^= *(uint32 *) (m + 252)
3574# asm 1: xorl 252(<m=int64#2),<in15=int64#6d
3575# asm 2: xorl 252(<m=%rsi),<in15=%r9d
3576xorl 252(%rsi),%r9d
3577
3578# qhasm: *(uint32 *) (out + 240) = in12
3579# asm 1: movl <in12=int64#3d,240(<out=int64#1)
3580# asm 2: movl <in12=%edx,240(<out=%rdi)
3581movl %edx,240(%rdi)
3582
3583# qhasm: *(uint32 *) (out + 244) = in13
3584# asm 1: movl <in13=int64#4d,244(<out=int64#1)
3585# asm 2: movl <in13=%ecx,244(<out=%rdi)
3586movl %ecx,244(%rdi)
3587
3588# qhasm: *(uint32 *) (out + 248) = in14
3589# asm 1: movl <in14=int64#5d,248(<out=int64#1)
3590# asm 2: movl <in14=%r8d,248(<out=%rdi)
3591movl %r8d,248(%rdi)
3592
3593# qhasm: *(uint32 *) (out + 252) = in15
3594# asm 1: movl <in15=int64#6d,252(<out=int64#1)
3595# asm 2: movl <in15=%r9d,252(<out=%rdi)
3596movl %r9d,252(%rdi)
3597
3598# qhasm: bytes = bytes_backup
3599# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
3600# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
3601movq 408(%rsp),%r9
3602
3603# qhasm: bytes -= 256
3604# asm 1: sub $256,<bytes=int64#6
3605# asm 2: sub $256,<bytes=%r9
3606sub $256,%r9
3607
3608# qhasm: m += 256
3609# asm 1: add $256,<m=int64#2
3610# asm 2: add $256,<m=%rsi
3611add $256,%rsi
3612
3613# qhasm: out += 256
3614# asm 1: add $256,<out=int64#1
3615# asm 2: add $256,<out=%rdi
3616add $256,%rdi
3617
3618# qhasm: unsigned<? bytes - 256
3619# asm 1: cmp $256,<bytes=int64#6
3620# asm 2: cmp $256,<bytes=%r9
3621cmp $256,%r9
3622# comment:fp stack unchanged by jump
3623
3624# qhasm: goto bytesatleast256 if !unsigned<
3625jae ._bytesatleast256
3626
3627# qhasm: unsigned>? bytes - 0
3628# asm 1: cmp $0,<bytes=int64#6
3629# asm 2: cmp $0,<bytes=%r9
3630cmp $0,%r9
3631# comment:fp stack unchanged by jump
3632
3633# qhasm: goto done if !unsigned>
3634jbe ._done
3635# comment:fp stack unchanged by fallthrough
3636
3637# qhasm: bytesbetween1and255:
3638._bytesbetween1and255:
3639
3640# qhasm: unsigned<? bytes - 64
3641# asm 1: cmp $64,<bytes=int64#6
3642# asm 2: cmp $64,<bytes=%r9
3643cmp $64,%r9
3644# comment:fp stack unchanged by jump
3645
3646# qhasm: goto nocopy if !unsigned<
3647jae ._nocopy
3648
3649# qhasm: ctarget = out
3650# asm 1: mov <out=int64#1,>ctarget=int64#3
3651# asm 2: mov <out=%rdi,>ctarget=%rdx
3652mov %rdi,%rdx
3653
3654# qhasm: out = &tmp
3655# asm 1: leaq <tmp=stack512#1,>out=int64#1
3656# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3657leaq 416(%rsp),%rdi
3658
3659# qhasm: i = bytes
3660# asm 1: mov <bytes=int64#6,>i=int64#4
3661# asm 2: mov <bytes=%r9,>i=%rcx
3662mov %r9,%rcx
3663
3664# qhasm: while (i) { *out++ = *m++; --i }
3665rep movsb
3666
3667# qhasm: out = &tmp
3668# asm 1: leaq <tmp=stack512#1,>out=int64#1
3669# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3670leaq 416(%rsp),%rdi
3671
3672# qhasm: m = &tmp
3673# asm 1: leaq <tmp=stack512#1,>m=int64#2
3674# asm 2: leaq <tmp=416(%rsp),>m=%rsi
3675leaq 416(%rsp),%rsi
3676# comment:fp stack unchanged by fallthrough
3677
3678# qhasm: nocopy:
3679._nocopy:
3680
3681# qhasm: bytes_backup = bytes
3682# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
3683# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
3684movq %r9,408(%rsp)
3685
3686# qhasm: diag0 = x0
3687# asm 1: movdqa <x0=stack128#4,>diag0=int6464#1
3688# asm 2: movdqa <x0=48(%rsp),>diag0=%xmm0
3689movdqa 48(%rsp),%xmm0
3690
3691# qhasm: diag1 = x1
3692# asm 1: movdqa <x1=stack128#1,>diag1=int6464#2
3693# asm 2: movdqa <x1=0(%rsp),>diag1=%xmm1
3694movdqa 0(%rsp),%xmm1
3695
3696# qhasm: diag2 = x2
3697# asm 1: movdqa <x2=stack128#2,>diag2=int6464#3
3698# asm 2: movdqa <x2=16(%rsp),>diag2=%xmm2
3699movdqa 16(%rsp),%xmm2
3700
3701# qhasm: diag3 = x3
3702# asm 1: movdqa <x3=stack128#3,>diag3=int6464#4
3703# asm 2: movdqa <x3=32(%rsp),>diag3=%xmm3
3704movdqa 32(%rsp),%xmm3
3705
3706# qhasm: a0 = diag1
3707# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3708# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3709movdqa %xmm1,%xmm4
3710
3711# qhasm: i = 20
3712# asm 1: mov $20,>i=int64#4
3713# asm 2: mov $20,>i=%rcx
3714mov $20,%rcx
3715
3716# qhasm: mainloop2:
3717._mainloop2:
3718
3719# qhasm: uint32323232 a0 += diag0
3720# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
3721# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
3722paddd %xmm0,%xmm4
3723
3724# qhasm: a1 = diag0
3725# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
3726# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
3727movdqa %xmm0,%xmm5
3728
3729# qhasm: b0 = a0
3730# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
3731# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
3732movdqa %xmm4,%xmm6
3733
3734# qhasm: uint32323232 a0 <<= 7
3735# asm 1: pslld $7,<a0=int6464#5
3736# asm 2: pslld $7,<a0=%xmm4
3737pslld $7,%xmm4
3738
3739# qhasm: uint32323232 b0 >>= 25
3740# asm 1: psrld $25,<b0=int6464#7
3741# asm 2: psrld $25,<b0=%xmm6
3742psrld $25,%xmm6
3743
3744# qhasm: diag3 ^= a0
3745# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
3746# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
3747pxor %xmm4,%xmm3
3748
3749# qhasm: diag3 ^= b0
3750# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
3751# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
3752pxor %xmm6,%xmm3
3753
3754# qhasm: uint32323232 a1 += diag3
3755# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
3756# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
3757paddd %xmm3,%xmm5
3758
3759# qhasm: a2 = diag3
3760# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
3761# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
3762movdqa %xmm3,%xmm4
3763
3764# qhasm: b1 = a1
3765# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
3766# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
3767movdqa %xmm5,%xmm6
3768
3769# qhasm: uint32323232 a1 <<= 9
3770# asm 1: pslld $9,<a1=int6464#6
3771# asm 2: pslld $9,<a1=%xmm5
3772pslld $9,%xmm5
3773
3774# qhasm: uint32323232 b1 >>= 23
3775# asm 1: psrld $23,<b1=int6464#7
3776# asm 2: psrld $23,<b1=%xmm6
3777psrld $23,%xmm6
3778
3779# qhasm: diag2 ^= a1
3780# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
3781# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
3782pxor %xmm5,%xmm2
3783
3784# qhasm: diag3 <<<= 32
3785# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
3786# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
3787pshufd $0x93,%xmm3,%xmm3
3788
3789# qhasm: diag2 ^= b1
3790# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
3791# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
3792pxor %xmm6,%xmm2
3793
3794# qhasm: uint32323232 a2 += diag2
3795# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
3796# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
3797paddd %xmm2,%xmm4
3798
3799# qhasm: a3 = diag2
3800# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
3801# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
3802movdqa %xmm2,%xmm5
3803
3804# qhasm: b2 = a2
3805# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
3806# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
3807movdqa %xmm4,%xmm6
3808
3809# qhasm: uint32323232 a2 <<= 13
3810# asm 1: pslld $13,<a2=int6464#5
3811# asm 2: pslld $13,<a2=%xmm4
3812pslld $13,%xmm4
3813
3814# qhasm: uint32323232 b2 >>= 19
3815# asm 1: psrld $19,<b2=int6464#7
3816# asm 2: psrld $19,<b2=%xmm6
3817psrld $19,%xmm6
3818
3819# qhasm: diag1 ^= a2
3820# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
3821# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
3822pxor %xmm4,%xmm1
3823
3824# qhasm: diag2 <<<= 64
3825# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3826# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3827pshufd $0x4e,%xmm2,%xmm2
3828
3829# qhasm: diag1 ^= b2
3830# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
3831# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
3832pxor %xmm6,%xmm1
3833
3834# qhasm: uint32323232 a3 += diag1
3835# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
3836# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
3837paddd %xmm1,%xmm5
3838
3839# qhasm: a4 = diag3
3840# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
3841# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
3842movdqa %xmm3,%xmm4
3843
3844# qhasm: b3 = a3
3845# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
3846# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
3847movdqa %xmm5,%xmm6
3848
3849# qhasm: uint32323232 a3 <<= 18
3850# asm 1: pslld $18,<a3=int6464#6
3851# asm 2: pslld $18,<a3=%xmm5
3852pslld $18,%xmm5
3853
3854# qhasm: uint32323232 b3 >>= 14
3855# asm 1: psrld $14,<b3=int6464#7
3856# asm 2: psrld $14,<b3=%xmm6
3857psrld $14,%xmm6
3858
3859# qhasm: diag0 ^= a3
3860# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
3861# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
3862pxor %xmm5,%xmm0
3863
3864# qhasm: diag1 <<<= 96
3865# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
3866# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
3867pshufd $0x39,%xmm1,%xmm1
3868
3869# qhasm: diag0 ^= b3
3870# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
3871# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
3872pxor %xmm6,%xmm0
3873
3874# qhasm: uint32323232 a4 += diag0
3875# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
3876# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
3877paddd %xmm0,%xmm4
3878
3879# qhasm: a5 = diag0
3880# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
3881# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
3882movdqa %xmm0,%xmm5
3883
3884# qhasm: b4 = a4
3885# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
3886# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
3887movdqa %xmm4,%xmm6
3888
3889# qhasm: uint32323232 a4 <<= 7
3890# asm 1: pslld $7,<a4=int6464#5
3891# asm 2: pslld $7,<a4=%xmm4
3892pslld $7,%xmm4
3893
3894# qhasm: uint32323232 b4 >>= 25
3895# asm 1: psrld $25,<b4=int6464#7
3896# asm 2: psrld $25,<b4=%xmm6
3897psrld $25,%xmm6
3898
3899# qhasm: diag1 ^= a4
3900# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
3901# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
3902pxor %xmm4,%xmm1
3903
3904# qhasm: diag1 ^= b4
3905# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
3906# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
3907pxor %xmm6,%xmm1
3908
3909# qhasm: uint32323232 a5 += diag1
3910# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
3911# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
3912paddd %xmm1,%xmm5
3913
3914# qhasm: a6 = diag1
3915# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
3916# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
3917movdqa %xmm1,%xmm4
3918
3919# qhasm: b5 = a5
3920# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
3921# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
3922movdqa %xmm5,%xmm6
3923
3924# qhasm: uint32323232 a5 <<= 9
3925# asm 1: pslld $9,<a5=int6464#6
3926# asm 2: pslld $9,<a5=%xmm5
3927pslld $9,%xmm5
3928
3929# qhasm: uint32323232 b5 >>= 23
3930# asm 1: psrld $23,<b5=int6464#7
3931# asm 2: psrld $23,<b5=%xmm6
3932psrld $23,%xmm6
3933
3934# qhasm: diag2 ^= a5
3935# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
3936# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
3937pxor %xmm5,%xmm2
3938
3939# qhasm: diag1 <<<= 32
3940# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
3941# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
3942pshufd $0x93,%xmm1,%xmm1
3943
3944# qhasm: diag2 ^= b5
3945# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
3946# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
3947pxor %xmm6,%xmm2
3948
3949# qhasm: uint32323232 a6 += diag2
3950# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
3951# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
3952paddd %xmm2,%xmm4
3953
3954# qhasm: a7 = diag2
3955# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
3956# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
3957movdqa %xmm2,%xmm5
3958
3959# qhasm: b6 = a6
3960# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
3961# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
3962movdqa %xmm4,%xmm6
3963
3964# qhasm: uint32323232 a6 <<= 13
3965# asm 1: pslld $13,<a6=int6464#5
3966# asm 2: pslld $13,<a6=%xmm4
3967pslld $13,%xmm4
3968
3969# qhasm: uint32323232 b6 >>= 19
3970# asm 1: psrld $19,<b6=int6464#7
3971# asm 2: psrld $19,<b6=%xmm6
3972psrld $19,%xmm6
3973
3974# qhasm: diag3 ^= a6
3975# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
3976# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
3977pxor %xmm4,%xmm3
3978
3979# qhasm: diag2 <<<= 64
3980# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3981# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3982pshufd $0x4e,%xmm2,%xmm2
3983
3984# qhasm: diag3 ^= b6
3985# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
3986# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
3987pxor %xmm6,%xmm3
3988
3989# qhasm: uint32323232 a7 += diag3
3990# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
3991# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
3992paddd %xmm3,%xmm5
3993
3994# qhasm: a0 = diag1
3995# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3996# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3997movdqa %xmm1,%xmm4
3998
3999# qhasm: b7 = a7
4000# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4001# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4002movdqa %xmm5,%xmm6
4003
4004# qhasm: uint32323232 a7 <<= 18
4005# asm 1: pslld $18,<a7=int6464#6
4006# asm 2: pslld $18,<a7=%xmm5
4007pslld $18,%xmm5
4008
4009# qhasm: uint32323232 b7 >>= 14
4010# asm 1: psrld $14,<b7=int6464#7
4011# asm 2: psrld $14,<b7=%xmm6
4012psrld $14,%xmm6
4013
4014# qhasm: diag0 ^= a7
4015# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4016# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4017pxor %xmm5,%xmm0
4018
4019# qhasm: diag3 <<<= 96
4020# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4021# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4022pshufd $0x39,%xmm3,%xmm3
4023
4024# qhasm: diag0 ^= b7
4025# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4026# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4027pxor %xmm6,%xmm0
4028
4029# qhasm: uint32323232 a0 += diag0
4030# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4031# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4032paddd %xmm0,%xmm4
4033
4034# qhasm: a1 = diag0
4035# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4036# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4037movdqa %xmm0,%xmm5
4038
4039# qhasm: b0 = a0
4040# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4041# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4042movdqa %xmm4,%xmm6
4043
4044# qhasm: uint32323232 a0 <<= 7
4045# asm 1: pslld $7,<a0=int6464#5
4046# asm 2: pslld $7,<a0=%xmm4
4047pslld $7,%xmm4
4048
4049# qhasm: uint32323232 b0 >>= 25
4050# asm 1: psrld $25,<b0=int6464#7
4051# asm 2: psrld $25,<b0=%xmm6
4052psrld $25,%xmm6
4053
4054# qhasm: diag3 ^= a0
4055# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4056# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4057pxor %xmm4,%xmm3
4058
4059# qhasm: diag3 ^= b0
4060# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4061# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4062pxor %xmm6,%xmm3
4063
4064# qhasm: uint32323232 a1 += diag3
4065# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4066# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4067paddd %xmm3,%xmm5
4068
4069# qhasm: a2 = diag3
4070# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4071# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4072movdqa %xmm3,%xmm4
4073
4074# qhasm: b1 = a1
4075# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4076# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4077movdqa %xmm5,%xmm6
4078
4079# qhasm: uint32323232 a1 <<= 9
4080# asm 1: pslld $9,<a1=int6464#6
4081# asm 2: pslld $9,<a1=%xmm5
4082pslld $9,%xmm5
4083
4084# qhasm: uint32323232 b1 >>= 23
4085# asm 1: psrld $23,<b1=int6464#7
4086# asm 2: psrld $23,<b1=%xmm6
4087psrld $23,%xmm6
4088
4089# qhasm: diag2 ^= a1
4090# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4091# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4092pxor %xmm5,%xmm2
4093
4094# qhasm: diag3 <<<= 32
4095# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4096# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4097pshufd $0x93,%xmm3,%xmm3
4098
4099# qhasm: diag2 ^= b1
4100# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4101# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4102pxor %xmm6,%xmm2
4103
4104# qhasm: uint32323232 a2 += diag2
4105# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4106# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4107paddd %xmm2,%xmm4
4108
4109# qhasm: a3 = diag2
4110# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4111# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4112movdqa %xmm2,%xmm5
4113
4114# qhasm: b2 = a2
4115# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4116# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4117movdqa %xmm4,%xmm6
4118
4119# qhasm: uint32323232 a2 <<= 13
4120# asm 1: pslld $13,<a2=int6464#5
4121# asm 2: pslld $13,<a2=%xmm4
4122pslld $13,%xmm4
4123
4124# qhasm: uint32323232 b2 >>= 19
4125# asm 1: psrld $19,<b2=int6464#7
4126# asm 2: psrld $19,<b2=%xmm6
4127psrld $19,%xmm6
4128
4129# qhasm: diag1 ^= a2
4130# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4131# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4132pxor %xmm4,%xmm1
4133
4134# qhasm: diag2 <<<= 64
4135# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4136# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4137pshufd $0x4e,%xmm2,%xmm2
4138
4139# qhasm: diag1 ^= b2
4140# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4141# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4142pxor %xmm6,%xmm1
4143
4144# qhasm: uint32323232 a3 += diag1
4145# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4146# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4147paddd %xmm1,%xmm5
4148
4149# qhasm: a4 = diag3
4150# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4151# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4152movdqa %xmm3,%xmm4
4153
4154# qhasm: b3 = a3
4155# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4156# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4157movdqa %xmm5,%xmm6
4158
4159# qhasm: uint32323232 a3 <<= 18
4160# asm 1: pslld $18,<a3=int6464#6
4161# asm 2: pslld $18,<a3=%xmm5
4162pslld $18,%xmm5
4163
4164# qhasm: uint32323232 b3 >>= 14
4165# asm 1: psrld $14,<b3=int6464#7
4166# asm 2: psrld $14,<b3=%xmm6
4167psrld $14,%xmm6
4168
4169# qhasm: diag0 ^= a3
4170# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4171# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4172pxor %xmm5,%xmm0
4173
4174# qhasm: diag1 <<<= 96
4175# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4176# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4177pshufd $0x39,%xmm1,%xmm1
4178
4179# qhasm: diag0 ^= b3
4180# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4181# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4182pxor %xmm6,%xmm0
4183
4184# qhasm: uint32323232 a4 += diag0
4185# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4186# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4187paddd %xmm0,%xmm4
4188
4189# qhasm: a5 = diag0
4190# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4191# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4192movdqa %xmm0,%xmm5
4193
4194# qhasm: b4 = a4
4195# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4196# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4197movdqa %xmm4,%xmm6
4198
4199# qhasm: uint32323232 a4 <<= 7
4200# asm 1: pslld $7,<a4=int6464#5
4201# asm 2: pslld $7,<a4=%xmm4
4202pslld $7,%xmm4
4203
4204# qhasm: uint32323232 b4 >>= 25
4205# asm 1: psrld $25,<b4=int6464#7
4206# asm 2: psrld $25,<b4=%xmm6
4207psrld $25,%xmm6
4208
4209# qhasm: diag1 ^= a4
4210# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4211# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4212pxor %xmm4,%xmm1
4213
4214# qhasm: diag1 ^= b4
4215# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4216# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4217pxor %xmm6,%xmm1
4218
4219# qhasm: uint32323232 a5 += diag1
4220# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4221# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4222paddd %xmm1,%xmm5
4223
4224# qhasm: a6 = diag1
4225# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4226# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4227movdqa %xmm1,%xmm4
4228
4229# qhasm: b5 = a5
4230# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4231# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4232movdqa %xmm5,%xmm6
4233
4234# qhasm: uint32323232 a5 <<= 9
4235# asm 1: pslld $9,<a5=int6464#6
4236# asm 2: pslld $9,<a5=%xmm5
4237pslld $9,%xmm5
4238
4239# qhasm: uint32323232 b5 >>= 23
4240# asm 1: psrld $23,<b5=int6464#7
4241# asm 2: psrld $23,<b5=%xmm6
4242psrld $23,%xmm6
4243
4244# qhasm: diag2 ^= a5
4245# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4246# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4247pxor %xmm5,%xmm2
4248
4249# qhasm: diag1 <<<= 32
4250# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4251# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4252pshufd $0x93,%xmm1,%xmm1
4253
4254# qhasm: diag2 ^= b5
4255# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4256# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4257pxor %xmm6,%xmm2
4258
4259# qhasm: uint32323232 a6 += diag2
4260# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4261# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4262paddd %xmm2,%xmm4
4263
4264# qhasm: a7 = diag2
4265# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4266# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4267movdqa %xmm2,%xmm5
4268
4269# qhasm: b6 = a6
4270# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4271# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4272movdqa %xmm4,%xmm6
4273
4274# qhasm: uint32323232 a6 <<= 13
4275# asm 1: pslld $13,<a6=int6464#5
4276# asm 2: pslld $13,<a6=%xmm4
4277pslld $13,%xmm4
4278
4279# qhasm: uint32323232 b6 >>= 19
4280# asm 1: psrld $19,<b6=int6464#7
4281# asm 2: psrld $19,<b6=%xmm6
4282psrld $19,%xmm6
4283
4284# qhasm: diag3 ^= a6
4285# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4286# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4287pxor %xmm4,%xmm3
4288
4289# qhasm: diag2 <<<= 64
4290# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4291# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4292pshufd $0x4e,%xmm2,%xmm2
4293
4294# qhasm: diag3 ^= b6
4295# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4296# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4297pxor %xmm6,%xmm3
4298
4299# qhasm: unsigned>? i -= 4
4300# asm 1: sub $4,<i=int64#4
4301# asm 2: sub $4,<i=%rcx
4302sub $4,%rcx
4303
4304# qhasm: uint32323232 a7 += diag3
4305# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4306# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4307paddd %xmm3,%xmm5
4308
4309# qhasm: a0 = diag1
4310# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4311# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4312movdqa %xmm1,%xmm4
4313
4314# qhasm: b7 = a7
4315# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4316# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4317movdqa %xmm5,%xmm6
4318
4319# qhasm: uint32323232 a7 <<= 18
4320# asm 1: pslld $18,<a7=int6464#6
4321# asm 2: pslld $18,<a7=%xmm5
4322pslld $18,%xmm5
4323
4324# qhasm: b0 = 0
4325# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4326# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4327pxor %xmm7,%xmm7
4328
4329# qhasm: uint32323232 b7 >>= 14
4330# asm 1: psrld $14,<b7=int6464#7
4331# asm 2: psrld $14,<b7=%xmm6
4332psrld $14,%xmm6
4333
4334# qhasm: diag0 ^= a7
4335# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4336# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4337pxor %xmm5,%xmm0
4338
4339# qhasm: diag3 <<<= 96
4340# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4341# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4342pshufd $0x39,%xmm3,%xmm3
4343
4344# qhasm: diag0 ^= b7
4345# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4346# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4347pxor %xmm6,%xmm0
4348# comment:fp stack unchanged by jump
4349
4350# qhasm: goto mainloop2 if unsigned>
4351ja ._mainloop2
4352
4353# qhasm: uint32323232 diag0 += x0
4354# asm 1: paddd <x0=stack128#4,<diag0=int6464#1
4355# asm 2: paddd <x0=48(%rsp),<diag0=%xmm0
4356paddd 48(%rsp),%xmm0
4357
4358# qhasm: uint32323232 diag1 += x1
4359# asm 1: paddd <x1=stack128#1,<diag1=int6464#2
4360# asm 2: paddd <x1=0(%rsp),<diag1=%xmm1
4361paddd 0(%rsp),%xmm1
4362
4363# qhasm: uint32323232 diag2 += x2
4364# asm 1: paddd <x2=stack128#2,<diag2=int6464#3
4365# asm 2: paddd <x2=16(%rsp),<diag2=%xmm2
4366paddd 16(%rsp),%xmm2
4367
4368# qhasm: uint32323232 diag3 += x3
4369# asm 1: paddd <x3=stack128#3,<diag3=int6464#4
4370# asm 2: paddd <x3=32(%rsp),<diag3=%xmm3
4371paddd 32(%rsp),%xmm3
4372
4373# qhasm: in0 = diag0
4374# asm 1: movd <diag0=int6464#1,>in0=int64#4
4375# asm 2: movd <diag0=%xmm0,>in0=%rcx
4376movd %xmm0,%rcx
4377
4378# qhasm: in12 = diag1
4379# asm 1: movd <diag1=int6464#2,>in12=int64#5
4380# asm 2: movd <diag1=%xmm1,>in12=%r8
4381movd %xmm1,%r8
4382
4383# qhasm: in8 = diag2
4384# asm 1: movd <diag2=int6464#3,>in8=int64#6
4385# asm 2: movd <diag2=%xmm2,>in8=%r9
4386movd %xmm2,%r9
4387
4388# qhasm: in4 = diag3
4389# asm 1: movd <diag3=int6464#4,>in4=int64#7
4390# asm 2: movd <diag3=%xmm3,>in4=%rax
4391movd %xmm3,%rax
4392
4393# qhasm: diag0 <<<= 96
4394# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4395# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4396pshufd $0x39,%xmm0,%xmm0
4397
4398# qhasm: diag1 <<<= 96
4399# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4400# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4401pshufd $0x39,%xmm1,%xmm1
4402
4403# qhasm: diag2 <<<= 96
4404# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4405# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4406pshufd $0x39,%xmm2,%xmm2
4407
4408# qhasm: diag3 <<<= 96
4409# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4410# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4411pshufd $0x39,%xmm3,%xmm3
4412
4413# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
4414# asm 1: xorl 0(<m=int64#2),<in0=int64#4d
4415# asm 2: xorl 0(<m=%rsi),<in0=%ecx
4416xorl 0(%rsi),%ecx
4417
4418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
4419# asm 1: xorl 48(<m=int64#2),<in12=int64#5d
4420# asm 2: xorl 48(<m=%rsi),<in12=%r8d
4421xorl 48(%rsi),%r8d
4422
4423# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
4424# asm 1: xorl 32(<m=int64#2),<in8=int64#6d
4425# asm 2: xorl 32(<m=%rsi),<in8=%r9d
4426xorl 32(%rsi),%r9d
4427
4428# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
4429# asm 1: xorl 16(<m=int64#2),<in4=int64#7d
4430# asm 2: xorl 16(<m=%rsi),<in4=%eax
4431xorl 16(%rsi),%eax
4432
4433# qhasm: *(uint32 *) (out + 0) = in0
4434# asm 1: movl <in0=int64#4d,0(<out=int64#1)
4435# asm 2: movl <in0=%ecx,0(<out=%rdi)
4436movl %ecx,0(%rdi)
4437
4438# qhasm: *(uint32 *) (out + 48) = in12
4439# asm 1: movl <in12=int64#5d,48(<out=int64#1)
4440# asm 2: movl <in12=%r8d,48(<out=%rdi)
4441movl %r8d,48(%rdi)
4442
4443# qhasm: *(uint32 *) (out + 32) = in8
4444# asm 1: movl <in8=int64#6d,32(<out=int64#1)
4445# asm 2: movl <in8=%r9d,32(<out=%rdi)
4446movl %r9d,32(%rdi)
4447
4448# qhasm: *(uint32 *) (out + 16) = in4
4449# asm 1: movl <in4=int64#7d,16(<out=int64#1)
4450# asm 2: movl <in4=%eax,16(<out=%rdi)
4451movl %eax,16(%rdi)
4452
4453# qhasm: in5 = diag0
4454# asm 1: movd <diag0=int6464#1,>in5=int64#4
4455# asm 2: movd <diag0=%xmm0,>in5=%rcx
4456movd %xmm0,%rcx
4457
4458# qhasm: in1 = diag1
4459# asm 1: movd <diag1=int6464#2,>in1=int64#5
4460# asm 2: movd <diag1=%xmm1,>in1=%r8
4461movd %xmm1,%r8
4462
4463# qhasm: in13 = diag2
4464# asm 1: movd <diag2=int6464#3,>in13=int64#6
4465# asm 2: movd <diag2=%xmm2,>in13=%r9
4466movd %xmm2,%r9
4467
4468# qhasm: in9 = diag3
4469# asm 1: movd <diag3=int6464#4,>in9=int64#7
4470# asm 2: movd <diag3=%xmm3,>in9=%rax
4471movd %xmm3,%rax
4472
4473# qhasm: diag0 <<<= 96
4474# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4475# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4476pshufd $0x39,%xmm0,%xmm0
4477
4478# qhasm: diag1 <<<= 96
4479# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4480# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4481pshufd $0x39,%xmm1,%xmm1
4482
4483# qhasm: diag2 <<<= 96
4484# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4485# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4486pshufd $0x39,%xmm2,%xmm2
4487
4488# qhasm: diag3 <<<= 96
4489# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4490# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4491pshufd $0x39,%xmm3,%xmm3
4492
4493# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
4494# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
4495# asm 2: xorl 20(<m=%rsi),<in5=%ecx
4496xorl 20(%rsi),%ecx
4497
4498# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
4499# asm 1: xorl 4(<m=int64#2),<in1=int64#5d
4500# asm 2: xorl 4(<m=%rsi),<in1=%r8d
4501xorl 4(%rsi),%r8d
4502
4503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
4504# asm 1: xorl 52(<m=int64#2),<in13=int64#6d
4505# asm 2: xorl 52(<m=%rsi),<in13=%r9d
4506xorl 52(%rsi),%r9d
4507
4508# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
4509# asm 1: xorl 36(<m=int64#2),<in9=int64#7d
4510# asm 2: xorl 36(<m=%rsi),<in9=%eax
4511xorl 36(%rsi),%eax
4512
4513# qhasm: *(uint32 *) (out + 20) = in5
4514# asm 1: movl <in5=int64#4d,20(<out=int64#1)
4515# asm 2: movl <in5=%ecx,20(<out=%rdi)
4516movl %ecx,20(%rdi)
4517
4518# qhasm: *(uint32 *) (out + 4) = in1
4519# asm 1: movl <in1=int64#5d,4(<out=int64#1)
4520# asm 2: movl <in1=%r8d,4(<out=%rdi)
4521movl %r8d,4(%rdi)
4522
4523# qhasm: *(uint32 *) (out + 52) = in13
4524# asm 1: movl <in13=int64#6d,52(<out=int64#1)
4525# asm 2: movl <in13=%r9d,52(<out=%rdi)
4526movl %r9d,52(%rdi)
4527
4528# qhasm: *(uint32 *) (out + 36) = in9
4529# asm 1: movl <in9=int64#7d,36(<out=int64#1)
4530# asm 2: movl <in9=%eax,36(<out=%rdi)
4531movl %eax,36(%rdi)
4532
4533# qhasm: in10 = diag0
4534# asm 1: movd <diag0=int6464#1,>in10=int64#4
4535# asm 2: movd <diag0=%xmm0,>in10=%rcx
4536movd %xmm0,%rcx
4537
4538# qhasm: in6 = diag1
4539# asm 1: movd <diag1=int6464#2,>in6=int64#5
4540# asm 2: movd <diag1=%xmm1,>in6=%r8
4541movd %xmm1,%r8
4542
4543# qhasm: in2 = diag2
4544# asm 1: movd <diag2=int6464#3,>in2=int64#6
4545# asm 2: movd <diag2=%xmm2,>in2=%r9
4546movd %xmm2,%r9
4547
4548# qhasm: in14 = diag3
4549# asm 1: movd <diag3=int6464#4,>in14=int64#7
4550# asm 2: movd <diag3=%xmm3,>in14=%rax
4551movd %xmm3,%rax
4552
4553# qhasm: diag0 <<<= 96
4554# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4555# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4556pshufd $0x39,%xmm0,%xmm0
4557
4558# qhasm: diag1 <<<= 96
4559# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4560# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4561pshufd $0x39,%xmm1,%xmm1
4562
4563# qhasm: diag2 <<<= 96
4564# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4565# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4566pshufd $0x39,%xmm2,%xmm2
4567
4568# qhasm: diag3 <<<= 96
4569# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4570# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4571pshufd $0x39,%xmm3,%xmm3
4572
4573# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
4574# asm 1: xorl 40(<m=int64#2),<in10=int64#4d
4575# asm 2: xorl 40(<m=%rsi),<in10=%ecx
4576xorl 40(%rsi),%ecx
4577
4578# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
4579# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
4580# asm 2: xorl 24(<m=%rsi),<in6=%r8d
4581xorl 24(%rsi),%r8d
4582
4583# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
4584# asm 1: xorl 8(<m=int64#2),<in2=int64#6d
4585# asm 2: xorl 8(<m=%rsi),<in2=%r9d
4586xorl 8(%rsi),%r9d
4587
4588# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
4589# asm 1: xorl 56(<m=int64#2),<in14=int64#7d
4590# asm 2: xorl 56(<m=%rsi),<in14=%eax
4591xorl 56(%rsi),%eax
4592
4593# qhasm: *(uint32 *) (out + 40) = in10
4594# asm 1: movl <in10=int64#4d,40(<out=int64#1)
4595# asm 2: movl <in10=%ecx,40(<out=%rdi)
4596movl %ecx,40(%rdi)
4597
4598# qhasm: *(uint32 *) (out + 24) = in6
4599# asm 1: movl <in6=int64#5d,24(<out=int64#1)
4600# asm 2: movl <in6=%r8d,24(<out=%rdi)
4601movl %r8d,24(%rdi)
4602
4603# qhasm: *(uint32 *) (out + 8) = in2
4604# asm 1: movl <in2=int64#6d,8(<out=int64#1)
4605# asm 2: movl <in2=%r9d,8(<out=%rdi)
4606movl %r9d,8(%rdi)
4607
4608# qhasm: *(uint32 *) (out + 56) = in14
4609# asm 1: movl <in14=int64#7d,56(<out=int64#1)
4610# asm 2: movl <in14=%eax,56(<out=%rdi)
4611movl %eax,56(%rdi)
4612
4613# qhasm: in15 = diag0
4614# asm 1: movd <diag0=int6464#1,>in15=int64#4
4615# asm 2: movd <diag0=%xmm0,>in15=%rcx
4616movd %xmm0,%rcx
4617
4618# qhasm: in11 = diag1
4619# asm 1: movd <diag1=int6464#2,>in11=int64#5
4620# asm 2: movd <diag1=%xmm1,>in11=%r8
4621movd %xmm1,%r8
4622
4623# qhasm: in7 = diag2
4624# asm 1: movd <diag2=int6464#3,>in7=int64#6
4625# asm 2: movd <diag2=%xmm2,>in7=%r9
4626movd %xmm2,%r9
4627
4628# qhasm: in3 = diag3
4629# asm 1: movd <diag3=int6464#4,>in3=int64#7
4630# asm 2: movd <diag3=%xmm3,>in3=%rax
4631movd %xmm3,%rax
4632
4633# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
4634# asm 1: xorl 60(<m=int64#2),<in15=int64#4d
4635# asm 2: xorl 60(<m=%rsi),<in15=%ecx
4636xorl 60(%rsi),%ecx
4637
4638# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
4639# asm 1: xorl 44(<m=int64#2),<in11=int64#5d
4640# asm 2: xorl 44(<m=%rsi),<in11=%r8d
4641xorl 44(%rsi),%r8d
4642
4643# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
4644# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
4645# asm 2: xorl 28(<m=%rsi),<in7=%r9d
4646xorl 28(%rsi),%r9d
4647
4648# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
4649# asm 1: xorl 12(<m=int64#2),<in3=int64#7d
4650# asm 2: xorl 12(<m=%rsi),<in3=%eax
4651xorl 12(%rsi),%eax
4652
4653# qhasm: *(uint32 *) (out + 60) = in15
4654# asm 1: movl <in15=int64#4d,60(<out=int64#1)
4655# asm 2: movl <in15=%ecx,60(<out=%rdi)
4656movl %ecx,60(%rdi)
4657
4658# qhasm: *(uint32 *) (out + 44) = in11
4659# asm 1: movl <in11=int64#5d,44(<out=int64#1)
4660# asm 2: movl <in11=%r8d,44(<out=%rdi)
4661movl %r8d,44(%rdi)
4662
4663# qhasm: *(uint32 *) (out + 28) = in7
4664# asm 1: movl <in7=int64#6d,28(<out=int64#1)
4665# asm 2: movl <in7=%r9d,28(<out=%rdi)
4666movl %r9d,28(%rdi)
4667
4668# qhasm: *(uint32 *) (out + 12) = in3
4669# asm 1: movl <in3=int64#7d,12(<out=int64#1)
4670# asm 2: movl <in3=%eax,12(<out=%rdi)
4671movl %eax,12(%rdi)
4672
4673# qhasm: bytes = bytes_backup
4674# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
4675# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
4676movq 408(%rsp),%r9
4677
4678# qhasm: in8 = ((uint32 *)&x2)[0]
4679# asm 1: movl <x2=stack128#2,>in8=int64#4d
4680# asm 2: movl <x2=16(%rsp),>in8=%ecx
4681movl 16(%rsp),%ecx
4682
4683# qhasm: in9 = ((uint32 *)&x3)[1]
4684# asm 1: movl 4+<x3=stack128#3,>in9=int64#5d
4685# asm 2: movl 4+<x3=32(%rsp),>in9=%r8d
4686movl 4+32(%rsp),%r8d
4687
4688# qhasm: in8 += 1
4689# asm 1: add $1,<in8=int64#4
4690# asm 2: add $1,<in8=%rcx
4691add $1,%rcx
4692
4693# qhasm: in9 <<= 32
4694# asm 1: shl $32,<in9=int64#5
4695# asm 2: shl $32,<in9=%r8
4696shl $32,%r8
4697
4698# qhasm: in8 += in9
4699# asm 1: add <in9=int64#5,<in8=int64#4
4700# asm 2: add <in9=%r8,<in8=%rcx
4701add %r8,%rcx
4702
4703# qhasm: in9 = in8
4704# asm 1: mov <in8=int64#4,>in9=int64#5
4705# asm 2: mov <in8=%rcx,>in9=%r8
4706mov %rcx,%r8
4707
4708# qhasm: (uint64) in9 >>= 32
4709# asm 1: shr $32,<in9=int64#5
4710# asm 2: shr $32,<in9=%r8
4711shr $32,%r8
4712
4713# qhasm: ((uint32 *)&x2)[0] = in8
4714# asm 1: movl <in8=int64#4d,>x2=stack128#2
4715# asm 2: movl <in8=%ecx,>x2=16(%rsp)
4716movl %ecx,16(%rsp)
4717
4718# qhasm: ((uint32 *)&x3)[1] = in9
4719# asm 1: movl <in9=int64#5d,4+<x3=stack128#3
4720# asm 2: movl <in9=%r8d,4+<x3=32(%rsp)
4721movl %r8d,4+32(%rsp)
4722
4723# qhasm: unsigned>? unsigned<? bytes - 64
4724# asm 1: cmp $64,<bytes=int64#6
4725# asm 2: cmp $64,<bytes=%r9
4726cmp $64,%r9
4727# comment:fp stack unchanged by jump
4728
4729# qhasm: goto bytesatleast65 if unsigned>
4730ja ._bytesatleast65
4731# comment:fp stack unchanged by jump
4732
4733# qhasm: goto bytesatleast64 if !unsigned<
4734jae ._bytesatleast64
4735
4736# qhasm: m = out
4737# asm 1: mov <out=int64#1,>m=int64#2
4738# asm 2: mov <out=%rdi,>m=%rsi
4739mov %rdi,%rsi
4740
4741# qhasm: out = ctarget
4742# asm 1: mov <ctarget=int64#3,>out=int64#1
4743# asm 2: mov <ctarget=%rdx,>out=%rdi
4744mov %rdx,%rdi
4745
4746# qhasm: i = bytes
4747# asm 1: mov <bytes=int64#6,>i=int64#4
4748# asm 2: mov <bytes=%r9,>i=%rcx
4749mov %r9,%rcx
4750
4751# qhasm: while (i) { *out++ = *m++; --i }
4752rep movsb
4753# comment:fp stack unchanged by fallthrough
4754
4755# qhasm: bytesatleast64:
4756._bytesatleast64:
4757# comment:fp stack unchanged by fallthrough
4758
4759# qhasm: done:
4760._done:
4761
4762# qhasm: r11_caller = r11_stack
4763# asm 1: movq <r11_stack=stack64#1,>r11_caller=int64#9
4764# asm 2: movq <r11_stack=352(%rsp),>r11_caller=%r11
4765movq 352(%rsp),%r11
4766
4767# qhasm: r12_caller = r12_stack
4768# asm 1: movq <r12_stack=stack64#2,>r12_caller=int64#10
4769# asm 2: movq <r12_stack=360(%rsp),>r12_caller=%r12
4770movq 360(%rsp),%r12
4771
4772# qhasm: r13_caller = r13_stack
4773# asm 1: movq <r13_stack=stack64#3,>r13_caller=int64#11
4774# asm 2: movq <r13_stack=368(%rsp),>r13_caller=%r13
4775movq 368(%rsp),%r13
4776
4777# qhasm: r14_caller = r14_stack
4778# asm 1: movq <r14_stack=stack64#4,>r14_caller=int64#12
4779# asm 2: movq <r14_stack=376(%rsp),>r14_caller=%r14
4780movq 376(%rsp),%r14
4781
4782# qhasm: r15_caller = r15_stack
4783# asm 1: movq <r15_stack=stack64#5,>r15_caller=int64#13
4784# asm 2: movq <r15_stack=384(%rsp),>r15_caller=%r15
4785movq 384(%rsp),%r15
4786
4787# qhasm: rbx_caller = rbx_stack
4788# asm 1: movq <rbx_stack=stack64#6,>rbx_caller=int64#14
4789# asm 2: movq <rbx_stack=392(%rsp),>rbx_caller=%rbx
4790movq 392(%rsp),%rbx
4791
4792# qhasm: rbp_caller = rbp_stack
4793# asm 1: movq <rbp_stack=stack64#7,>rbp_caller=int64#15
4794# asm 2: movq <rbp_stack=400(%rsp),>rbp_caller=%rbp
4795movq 400(%rsp),%rbp
4796
4797# qhasm: leave
4798add %r11,%rsp
4799xor %rax,%rax
4800xor %rdx,%rdx
4801ret
4802
4803# qhasm: bytesatleast65:
4804._bytesatleast65:
4805
4806# qhasm: bytes -= 64
4807# asm 1: sub $64,<bytes=int64#6
4808# asm 2: sub $64,<bytes=%r9
4809sub $64,%r9
4810
4811# qhasm: out += 64
4812# asm 1: add $64,<out=int64#1
4813# asm 2: add $64,<out=%rdi
4814add $64,%rdi
4815
4816# qhasm: m += 64
4817# asm 1: add $64,<m=int64#2
4818# asm 2: add $64,<m=%rsi
4819add $64,%rsi
4820# comment:fp stack unchanged by jump
4821
4822# qhasm: goto bytesbetween1and255
4823jmp ._bytesbetween1and255
diff --git a/nacl/crypto_stream/salsa20/checksum b/nacl/crypto_stream/salsa20/checksum
new file mode 100644
index 00000000..78ff05f4
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/checksum
@@ -0,0 +1 @@
44a3966eabcd3a2b13faca2150e38f2b7e6bac187d626618f50a9f875158ae78
diff --git a/nacl/crypto_stream/salsa20/ref/api.h b/nacl/crypto_stream/salsa20/ref/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/ref/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa20/ref/implementors b/nacl/crypto_stream/salsa20/ref/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/ref/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa20/ref/stream.c b/nacl/crypto_stream/salsa20/ref/stream.c
new file mode 100644
index 00000000..2f0262eb
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/ref/stream.c
@@ -0,0 +1,49 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_salsa20.h"
8#include "crypto_stream.h"
9
10typedef unsigned int uint32;
11
12static const unsigned char sigma[16] = "expand 32-byte k";
13
14int crypto_stream(
15 unsigned char *c,unsigned long long clen,
16 const unsigned char *n,
17 const unsigned char *k
18)
19{
20 unsigned char in[16];
21 unsigned char block[64];
22 int i;
23 unsigned int u;
24
25 if (!clen) return 0;
26
27 for (i = 0;i < 8;++i) in[i] = n[i];
28 for (i = 8;i < 16;++i) in[i] = 0;
29
30 while (clen >= 64) {
31 crypto_core_salsa20(c,in,k,sigma);
32
33 u = 1;
34 for (i = 8;i < 16;++i) {
35 u += (unsigned int) in[i];
36 in[i] = u;
37 u >>= 8;
38 }
39
40 clen -= 64;
41 c += 64;
42 }
43
44 if (clen) {
45 crypto_core_salsa20(block,in,k,sigma);
46 for (i = 0;i < clen;++i) c[i] = block[i];
47 }
48 return 0;
49}
diff --git a/nacl/crypto_stream/salsa20/ref/xor.c b/nacl/crypto_stream/salsa20/ref/xor.c
new file mode 100644
index 00000000..11c7e9f0
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/ref/xor.c
@@ -0,0 +1,52 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_salsa20.h"
8#include "crypto_stream.h"
9
10typedef unsigned int uint32;
11
12static const unsigned char sigma[16] = "expand 32-byte k";
13
14int crypto_stream_xor(
15 unsigned char *c,
16 const unsigned char *m,unsigned long long mlen,
17 const unsigned char *n,
18 const unsigned char *k
19)
20{
21 unsigned char in[16];
22 unsigned char block[64];
23 int i;
24 unsigned int u;
25
26 if (!mlen) return 0;
27
28 for (i = 0;i < 8;++i) in[i] = n[i];
29 for (i = 8;i < 16;++i) in[i] = 0;
30
31 while (mlen >= 64) {
32 crypto_core_salsa20(block,in,k,sigma);
33 for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i];
34
35 u = 1;
36 for (i = 8;i < 16;++i) {
37 u += (unsigned int) in[i];
38 in[i] = u;
39 u >>= 8;
40 }
41
42 mlen -= 64;
43 c += 64;
44 m += 64;
45 }
46
47 if (mlen) {
48 crypto_core_salsa20(block,in,k,sigma);
49 for (i = 0;i < mlen;++i) c[i] = m[i] ^ block[i];
50 }
51 return 0;
52}
diff --git a/nacl/crypto_stream/salsa20/used b/nacl/crypto_stream/salsa20/used
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/used
diff --git a/nacl/crypto_stream/salsa20/x86_xmm5/api.h b/nacl/crypto_stream/salsa20/x86_xmm5/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/x86_xmm5/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa20/x86_xmm5/implementors b/nacl/crypto_stream/salsa20/x86_xmm5/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/x86_xmm5/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa20/x86_xmm5/stream.s b/nacl/crypto_stream/salsa20/x86_xmm5/stream.s
new file mode 100644
index 00000000..9e32ea43
--- /dev/null
+++ b/nacl/crypto_stream/salsa20/x86_xmm5/stream.s
@@ -0,0 +1,5078 @@
1
2# qhasm: int32 a
3
4# qhasm: stack32 arg1
5
6# qhasm: stack32 arg2
7
8# qhasm: stack32 arg3
9
10# qhasm: stack32 arg4
11
12# qhasm: stack32 arg5
13
14# qhasm: stack32 arg6
15
16# qhasm: input arg1
17
18# qhasm: input arg2
19
20# qhasm: input arg3
21
22# qhasm: input arg4
23
24# qhasm: input arg5
25
26# qhasm: input arg6
27
28# qhasm: int32 eax
29
30# qhasm: int32 ebx
31
32# qhasm: int32 esi
33
34# qhasm: int32 edi
35
36# qhasm: int32 ebp
37
38# qhasm: caller eax
39
40# qhasm: caller ebx
41
42# qhasm: caller esi
43
44# qhasm: caller edi
45
46# qhasm: caller ebp
47
48# qhasm: int32 k
49
50# qhasm: int32 kbits
51
52# qhasm: int32 iv
53
54# qhasm: int32 i
55
56# qhasm: stack128 x0
57
58# qhasm: stack128 x1
59
60# qhasm: stack128 x2
61
62# qhasm: stack128 x3
63
64# qhasm: int32 m
65
66# qhasm: stack32 out_stack
67
68# qhasm: int32 out
69
70# qhasm: stack32 bytes_stack
71
72# qhasm: int32 bytes
73
74# qhasm: stack32 eax_stack
75
76# qhasm: stack32 ebx_stack
77
78# qhasm: stack32 esi_stack
79
80# qhasm: stack32 edi_stack
81
82# qhasm: stack32 ebp_stack
83
84# qhasm: int6464 diag0
85
86# qhasm: int6464 diag1
87
88# qhasm: int6464 diag2
89
90# qhasm: int6464 diag3
91
92# qhasm: int6464 a0
93
94# qhasm: int6464 a1
95
96# qhasm: int6464 a2
97
98# qhasm: int6464 a3
99
100# qhasm: int6464 a4
101
102# qhasm: int6464 a5
103
104# qhasm: int6464 a6
105
106# qhasm: int6464 a7
107
108# qhasm: int6464 b0
109
110# qhasm: int6464 b1
111
112# qhasm: int6464 b2
113
114# qhasm: int6464 b3
115
116# qhasm: int6464 b4
117
118# qhasm: int6464 b5
119
120# qhasm: int6464 b6
121
122# qhasm: int6464 b7
123
124# qhasm: int6464 z0
125
126# qhasm: int6464 z1
127
128# qhasm: int6464 z2
129
130# qhasm: int6464 z3
131
132# qhasm: int6464 z4
133
134# qhasm: int6464 z5
135
136# qhasm: int6464 z6
137
138# qhasm: int6464 z7
139
140# qhasm: int6464 z8
141
142# qhasm: int6464 z9
143
144# qhasm: int6464 z10
145
146# qhasm: int6464 z11
147
148# qhasm: int6464 z12
149
150# qhasm: int6464 z13
151
152# qhasm: int6464 z14
153
154# qhasm: int6464 z15
155
156# qhasm: stack128 z0_stack
157
158# qhasm: stack128 z1_stack
159
160# qhasm: stack128 z2_stack
161
162# qhasm: stack128 z3_stack
163
164# qhasm: stack128 z4_stack
165
166# qhasm: stack128 z5_stack
167
168# qhasm: stack128 z6_stack
169
170# qhasm: stack128 z7_stack
171
172# qhasm: stack128 z8_stack
173
174# qhasm: stack128 z9_stack
175
176# qhasm: stack128 z10_stack
177
178# qhasm: stack128 z11_stack
179
180# qhasm: stack128 z12_stack
181
182# qhasm: stack128 z13_stack
183
184# qhasm: stack128 z14_stack
185
186# qhasm: stack128 z15_stack
187
188# qhasm: stack128 orig0
189
190# qhasm: stack128 orig1
191
192# qhasm: stack128 orig2
193
194# qhasm: stack128 orig3
195
196# qhasm: stack128 orig4
197
198# qhasm: stack128 orig5
199
200# qhasm: stack128 orig6
201
202# qhasm: stack128 orig7
203
204# qhasm: stack128 orig8
205
206# qhasm: stack128 orig9
207
208# qhasm: stack128 orig10
209
210# qhasm: stack128 orig11
211
212# qhasm: stack128 orig12
213
214# qhasm: stack128 orig13
215
216# qhasm: stack128 orig14
217
218# qhasm: stack128 orig15
219
220# qhasm: int6464 p
221
222# qhasm: int6464 q
223
224# qhasm: int6464 r
225
226# qhasm: int6464 s
227
228# qhasm: int6464 t
229
230# qhasm: int6464 u
231
232# qhasm: int6464 v
233
234# qhasm: int6464 w
235
236# qhasm: int6464 mp
237
238# qhasm: int6464 mq
239
240# qhasm: int6464 mr
241
242# qhasm: int6464 ms
243
244# qhasm: int6464 mt
245
246# qhasm: int6464 mu
247
248# qhasm: int6464 mv
249
250# qhasm: int6464 mw
251
252# qhasm: int32 in0
253
254# qhasm: int32 in1
255
256# qhasm: int32 in2
257
258# qhasm: int32 in3
259
260# qhasm: int32 in4
261
262# qhasm: int32 in5
263
264# qhasm: int32 in6
265
266# qhasm: int32 in7
267
268# qhasm: int32 in8
269
270# qhasm: int32 in9
271
272# qhasm: int32 in10
273
274# qhasm: int32 in11
275
276# qhasm: int32 in12
277
278# qhasm: int32 in13
279
280# qhasm: int32 in14
281
282# qhasm: int32 in15
283
284# qhasm: stack512 tmp
285
286# qhasm: stack32 ctarget
287
288# qhasm: enter crypto_stream_salsa20_x86_xmm5
289.text
290.p2align 5
291.globl _crypto_stream_salsa20_x86_xmm5
292.globl crypto_stream_salsa20_x86_xmm5
293_crypto_stream_salsa20_x86_xmm5:
294crypto_stream_salsa20_x86_xmm5:
295mov %esp,%eax
296and $31,%eax
297add $704,%eax
298sub %eax,%esp
299
300# qhasm: eax_stack = eax
301# asm 1: movl <eax=int32#1,>eax_stack=stack32#1
302# asm 2: movl <eax=%eax,>eax_stack=0(%esp)
303movl %eax,0(%esp)
304
305# qhasm: ebx_stack = ebx
306# asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
307# asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
308movl %ebx,4(%esp)
309
310# qhasm: esi_stack = esi
311# asm 1: movl <esi=int32#5,>esi_stack=stack32#3
312# asm 2: movl <esi=%esi,>esi_stack=8(%esp)
313movl %esi,8(%esp)
314
315# qhasm: edi_stack = edi
316# asm 1: movl <edi=int32#6,>edi_stack=stack32#4
317# asm 2: movl <edi=%edi,>edi_stack=12(%esp)
318movl %edi,12(%esp)
319
320# qhasm: ebp_stack = ebp
321# asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
322# asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
323movl %ebp,16(%esp)
324
325# qhasm: bytes = arg2
326# asm 1: movl <arg2=stack32#-2,>bytes=int32#3
327# asm 2: movl <arg2=8(%esp,%eax),>bytes=%edx
328movl 8(%esp,%eax),%edx
329
330# qhasm: out = arg1
331# asm 1: movl <arg1=stack32#-1,>out=int32#6
332# asm 2: movl <arg1=4(%esp,%eax),>out=%edi
333movl 4(%esp,%eax),%edi
334
335# qhasm: m = out
336# asm 1: mov <out=int32#6,>m=int32#5
337# asm 2: mov <out=%edi,>m=%esi
338mov %edi,%esi
339
340# qhasm: iv = arg4
341# asm 1: movl <arg4=stack32#-4,>iv=int32#4
342# asm 2: movl <arg4=16(%esp,%eax),>iv=%ebx
343movl 16(%esp,%eax),%ebx
344
345# qhasm: k = arg5
346# asm 1: movl <arg5=stack32#-5,>k=int32#7
347# asm 2: movl <arg5=20(%esp,%eax),>k=%ebp
348movl 20(%esp,%eax),%ebp
349
350# qhasm: unsigned>? bytes - 0
351# asm 1: cmp $0,<bytes=int32#3
352# asm 2: cmp $0,<bytes=%edx
353cmp $0,%edx
354# comment:fp stack unchanged by jump
355
356# qhasm: goto done if !unsigned>
357jbe ._done
358
359# qhasm: a = 0
360# asm 1: mov $0,>a=int32#1
361# asm 2: mov $0,>a=%eax
362mov $0,%eax
363
364# qhasm: i = bytes
365# asm 1: mov <bytes=int32#3,>i=int32#2
366# asm 2: mov <bytes=%edx,>i=%ecx
367mov %edx,%ecx
368
369# qhasm: while (i) { *out++ = a; --i }
370rep stosb
371
372# qhasm: out -= bytes
373# asm 1: subl <bytes=int32#3,<out=int32#6
374# asm 2: subl <bytes=%edx,<out=%edi
375subl %edx,%edi
376# comment:fp stack unchanged by jump
377
378# qhasm: goto start
379jmp ._start
380
381# qhasm: enter crypto_stream_salsa20_x86_xmm5_xor
382.text
383.p2align 5
384.globl _crypto_stream_salsa20_x86_xmm5_xor
385.globl crypto_stream_salsa20_x86_xmm5_xor
386_crypto_stream_salsa20_x86_xmm5_xor:
387crypto_stream_salsa20_x86_xmm5_xor:
388mov %esp,%eax
389and $31,%eax
390add $704,%eax
391sub %eax,%esp
392
393# qhasm: eax_stack = eax
394# asm 1: movl <eax=int32#1,>eax_stack=stack32#1
395# asm 2: movl <eax=%eax,>eax_stack=0(%esp)
396movl %eax,0(%esp)
397
398# qhasm: ebx_stack = ebx
399# asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
400# asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
401movl %ebx,4(%esp)
402
403# qhasm: esi_stack = esi
404# asm 1: movl <esi=int32#5,>esi_stack=stack32#3
405# asm 2: movl <esi=%esi,>esi_stack=8(%esp)
406movl %esi,8(%esp)
407
408# qhasm: edi_stack = edi
409# asm 1: movl <edi=int32#6,>edi_stack=stack32#4
410# asm 2: movl <edi=%edi,>edi_stack=12(%esp)
411movl %edi,12(%esp)
412
413# qhasm: ebp_stack = ebp
414# asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
415# asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
416movl %ebp,16(%esp)
417
418# qhasm: out = arg1
419# asm 1: movl <arg1=stack32#-1,>out=int32#6
420# asm 2: movl <arg1=4(%esp,%eax),>out=%edi
421movl 4(%esp,%eax),%edi
422
423# qhasm: m = arg2
424# asm 1: movl <arg2=stack32#-2,>m=int32#5
425# asm 2: movl <arg2=8(%esp,%eax),>m=%esi
426movl 8(%esp,%eax),%esi
427
428# qhasm: bytes = arg3
429# asm 1: movl <arg3=stack32#-3,>bytes=int32#3
430# asm 2: movl <arg3=12(%esp,%eax),>bytes=%edx
431movl 12(%esp,%eax),%edx
432
433# qhasm: iv = arg5
434# asm 1: movl <arg5=stack32#-5,>iv=int32#4
435# asm 2: movl <arg5=20(%esp,%eax),>iv=%ebx
436movl 20(%esp,%eax),%ebx
437
438# qhasm: k = arg6
439# asm 1: movl <arg6=stack32#-6,>k=int32#7
440# asm 2: movl <arg6=24(%esp,%eax),>k=%ebp
441movl 24(%esp,%eax),%ebp
442
443# qhasm: unsigned>? bytes - 0
444# asm 1: cmp $0,<bytes=int32#3
445# asm 2: cmp $0,<bytes=%edx
446cmp $0,%edx
447# comment:fp stack unchanged by jump
448
449# qhasm: goto done if !unsigned>
450jbe ._done
451# comment:fp stack unchanged by fallthrough
452
453# qhasm: start:
454._start:
455
456# qhasm: out_stack = out
457# asm 1: movl <out=int32#6,>out_stack=stack32#6
458# asm 2: movl <out=%edi,>out_stack=20(%esp)
459movl %edi,20(%esp)
460
461# qhasm: bytes_stack = bytes
462# asm 1: movl <bytes=int32#3,>bytes_stack=stack32#7
463# asm 2: movl <bytes=%edx,>bytes_stack=24(%esp)
464movl %edx,24(%esp)
465
466# qhasm: in4 = *(uint32 *) (k + 12)
467# asm 1: movl 12(<k=int32#7),>in4=int32#1
468# asm 2: movl 12(<k=%ebp),>in4=%eax
469movl 12(%ebp),%eax
470
471# qhasm: in12 = *(uint32 *) (k + 20)
472# asm 1: movl 20(<k=int32#7),>in12=int32#2
473# asm 2: movl 20(<k=%ebp),>in12=%ecx
474movl 20(%ebp),%ecx
475
476# qhasm: ((uint32 *)&x3)[0] = in4
477# asm 1: movl <in4=int32#1,>x3=stack128#1
478# asm 2: movl <in4=%eax,>x3=32(%esp)
479movl %eax,32(%esp)
480
481# qhasm: ((uint32 *)&x1)[0] = in12
482# asm 1: movl <in12=int32#2,>x1=stack128#2
483# asm 2: movl <in12=%ecx,>x1=48(%esp)
484movl %ecx,48(%esp)
485
486# qhasm: in0 = 1634760805
487# asm 1: mov $1634760805,>in0=int32#1
488# asm 2: mov $1634760805,>in0=%eax
489mov $1634760805,%eax
490
491# qhasm: in8 = 0
492# asm 1: mov $0,>in8=int32#2
493# asm 2: mov $0,>in8=%ecx
494mov $0,%ecx
495
496# qhasm: ((uint32 *)&x0)[0] = in0
497# asm 1: movl <in0=int32#1,>x0=stack128#3
498# asm 2: movl <in0=%eax,>x0=64(%esp)
499movl %eax,64(%esp)
500
501# qhasm: ((uint32 *)&x2)[0] = in8
502# asm 1: movl <in8=int32#2,>x2=stack128#4
503# asm 2: movl <in8=%ecx,>x2=80(%esp)
504movl %ecx,80(%esp)
505
506# qhasm: in6 = *(uint32 *) (iv + 0)
507# asm 1: movl 0(<iv=int32#4),>in6=int32#1
508# asm 2: movl 0(<iv=%ebx),>in6=%eax
509movl 0(%ebx),%eax
510
511# qhasm: in7 = *(uint32 *) (iv + 4)
512# asm 1: movl 4(<iv=int32#4),>in7=int32#2
513# asm 2: movl 4(<iv=%ebx),>in7=%ecx
514movl 4(%ebx),%ecx
515
516# qhasm: ((uint32 *)&x1)[2] = in6
517# asm 1: movl <in6=int32#1,8+<x1=stack128#2
518# asm 2: movl <in6=%eax,8+<x1=48(%esp)
519movl %eax,8+48(%esp)
520
521# qhasm: ((uint32 *)&x2)[3] = in7
522# asm 1: movl <in7=int32#2,12+<x2=stack128#4
523# asm 2: movl <in7=%ecx,12+<x2=80(%esp)
524movl %ecx,12+80(%esp)
525
526# qhasm: in9 = 0
527# asm 1: mov $0,>in9=int32#1
528# asm 2: mov $0,>in9=%eax
529mov $0,%eax
530
531# qhasm: in10 = 2036477234
532# asm 1: mov $2036477234,>in10=int32#2
533# asm 2: mov $2036477234,>in10=%ecx
534mov $2036477234,%ecx
535
536# qhasm: ((uint32 *)&x3)[1] = in9
537# asm 1: movl <in9=int32#1,4+<x3=stack128#1
538# asm 2: movl <in9=%eax,4+<x3=32(%esp)
539movl %eax,4+32(%esp)
540
541# qhasm: ((uint32 *)&x0)[2] = in10
542# asm 1: movl <in10=int32#2,8+<x0=stack128#3
543# asm 2: movl <in10=%ecx,8+<x0=64(%esp)
544movl %ecx,8+64(%esp)
545
546# qhasm: in1 = *(uint32 *) (k + 0)
547# asm 1: movl 0(<k=int32#7),>in1=int32#1
548# asm 2: movl 0(<k=%ebp),>in1=%eax
549movl 0(%ebp),%eax
550
551# qhasm: in2 = *(uint32 *) (k + 4)
552# asm 1: movl 4(<k=int32#7),>in2=int32#2
553# asm 2: movl 4(<k=%ebp),>in2=%ecx
554movl 4(%ebp),%ecx
555
556# qhasm: in3 = *(uint32 *) (k + 8)
557# asm 1: movl 8(<k=int32#7),>in3=int32#3
558# asm 2: movl 8(<k=%ebp),>in3=%edx
559movl 8(%ebp),%edx
560
561# qhasm: in5 = 857760878
562# asm 1: mov $857760878,>in5=int32#4
563# asm 2: mov $857760878,>in5=%ebx
564mov $857760878,%ebx
565
566# qhasm: ((uint32 *)&x1)[1] = in1
567# asm 1: movl <in1=int32#1,4+<x1=stack128#2
568# asm 2: movl <in1=%eax,4+<x1=48(%esp)
569movl %eax,4+48(%esp)
570
571# qhasm: ((uint32 *)&x2)[2] = in2
572# asm 1: movl <in2=int32#2,8+<x2=stack128#4
573# asm 2: movl <in2=%ecx,8+<x2=80(%esp)
574movl %ecx,8+80(%esp)
575
576# qhasm: ((uint32 *)&x3)[3] = in3
577# asm 1: movl <in3=int32#3,12+<x3=stack128#1
578# asm 2: movl <in3=%edx,12+<x3=32(%esp)
579movl %edx,12+32(%esp)
580
581# qhasm: ((uint32 *)&x0)[1] = in5
582# asm 1: movl <in5=int32#4,4+<x0=stack128#3
583# asm 2: movl <in5=%ebx,4+<x0=64(%esp)
584movl %ebx,4+64(%esp)
585
586# qhasm: in11 = *(uint32 *) (k + 16)
587# asm 1: movl 16(<k=int32#7),>in11=int32#1
588# asm 2: movl 16(<k=%ebp),>in11=%eax
589movl 16(%ebp),%eax
590
591# qhasm: in13 = *(uint32 *) (k + 24)
592# asm 1: movl 24(<k=int32#7),>in13=int32#2
593# asm 2: movl 24(<k=%ebp),>in13=%ecx
594movl 24(%ebp),%ecx
595
596# qhasm: in14 = *(uint32 *) (k + 28)
597# asm 1: movl 28(<k=int32#7),>in14=int32#3
598# asm 2: movl 28(<k=%ebp),>in14=%edx
599movl 28(%ebp),%edx
600
601# qhasm: in15 = 1797285236
602# asm 1: mov $1797285236,>in15=int32#4
603# asm 2: mov $1797285236,>in15=%ebx
604mov $1797285236,%ebx
605
606# qhasm: ((uint32 *)&x1)[3] = in11
607# asm 1: movl <in11=int32#1,12+<x1=stack128#2
608# asm 2: movl <in11=%eax,12+<x1=48(%esp)
609movl %eax,12+48(%esp)
610
611# qhasm: ((uint32 *)&x2)[1] = in13
612# asm 1: movl <in13=int32#2,4+<x2=stack128#4
613# asm 2: movl <in13=%ecx,4+<x2=80(%esp)
614movl %ecx,4+80(%esp)
615
616# qhasm: ((uint32 *)&x3)[2] = in14
617# asm 1: movl <in14=int32#3,8+<x3=stack128#1
618# asm 2: movl <in14=%edx,8+<x3=32(%esp)
619movl %edx,8+32(%esp)
620
621# qhasm: ((uint32 *)&x0)[3] = in15
622# asm 1: movl <in15=int32#4,12+<x0=stack128#3
623# asm 2: movl <in15=%ebx,12+<x0=64(%esp)
624movl %ebx,12+64(%esp)
625
626# qhasm: bytes = bytes_stack
627# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
628# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
629movl 24(%esp),%eax
630
631# qhasm: unsigned<? bytes - 256
632# asm 1: cmp $256,<bytes=int32#1
633# asm 2: cmp $256,<bytes=%eax
634cmp $256,%eax
635# comment:fp stack unchanged by jump
636
637# qhasm: goto bytesbetween1and255 if unsigned<
638jb ._bytesbetween1and255
639
640# qhasm: z0 = x0
641# asm 1: movdqa <x0=stack128#3,>z0=int6464#1
642# asm 2: movdqa <x0=64(%esp),>z0=%xmm0
643movdqa 64(%esp),%xmm0
644
645# qhasm: z5 = z0[1,1,1,1]
646# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
647# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
648pshufd $0x55,%xmm0,%xmm1
649
650# qhasm: z10 = z0[2,2,2,2]
651# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
652# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
653pshufd $0xaa,%xmm0,%xmm2
654
655# qhasm: z15 = z0[3,3,3,3]
656# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
657# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
658pshufd $0xff,%xmm0,%xmm3
659
660# qhasm: z0 = z0[0,0,0,0]
661# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
662# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
663pshufd $0x00,%xmm0,%xmm0
664
665# qhasm: orig5 = z5
666# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
667# asm 2: movdqa <z5=%xmm1,>orig5=96(%esp)
668movdqa %xmm1,96(%esp)
669
670# qhasm: orig10 = z10
671# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
672# asm 2: movdqa <z10=%xmm2,>orig10=112(%esp)
673movdqa %xmm2,112(%esp)
674
675# qhasm: orig15 = z15
676# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
677# asm 2: movdqa <z15=%xmm3,>orig15=128(%esp)
678movdqa %xmm3,128(%esp)
679
680# qhasm: orig0 = z0
681# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
682# asm 2: movdqa <z0=%xmm0,>orig0=144(%esp)
683movdqa %xmm0,144(%esp)
684
685# qhasm: z1 = x1
686# asm 1: movdqa <x1=stack128#2,>z1=int6464#1
687# asm 2: movdqa <x1=48(%esp),>z1=%xmm0
688movdqa 48(%esp),%xmm0
689
690# qhasm: z6 = z1[2,2,2,2]
691# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
692# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
693pshufd $0xaa,%xmm0,%xmm1
694
695# qhasm: z11 = z1[3,3,3,3]
696# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
697# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
698pshufd $0xff,%xmm0,%xmm2
699
700# qhasm: z12 = z1[0,0,0,0]
701# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
702# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
703pshufd $0x00,%xmm0,%xmm3
704
705# qhasm: z1 = z1[1,1,1,1]
706# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
707# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
708pshufd $0x55,%xmm0,%xmm0
709
710# qhasm: orig6 = z6
711# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
712# asm 2: movdqa <z6=%xmm1,>orig6=160(%esp)
713movdqa %xmm1,160(%esp)
714
715# qhasm: orig11 = z11
716# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
717# asm 2: movdqa <z11=%xmm2,>orig11=176(%esp)
718movdqa %xmm2,176(%esp)
719
720# qhasm: orig12 = z12
721# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
722# asm 2: movdqa <z12=%xmm3,>orig12=192(%esp)
723movdqa %xmm3,192(%esp)
724
725# qhasm: orig1 = z1
726# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
727# asm 2: movdqa <z1=%xmm0,>orig1=208(%esp)
728movdqa %xmm0,208(%esp)
729
730# qhasm: z2 = x2
731# asm 1: movdqa <x2=stack128#4,>z2=int6464#1
732# asm 2: movdqa <x2=80(%esp),>z2=%xmm0
733movdqa 80(%esp),%xmm0
734
735# qhasm: z7 = z2[3,3,3,3]
736# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
737# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
738pshufd $0xff,%xmm0,%xmm1
739
740# qhasm: z13 = z2[1,1,1,1]
741# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
742# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
743pshufd $0x55,%xmm0,%xmm2
744
745# qhasm: z2 = z2[2,2,2,2]
746# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
747# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
748pshufd $0xaa,%xmm0,%xmm0
749
750# qhasm: orig7 = z7
751# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
752# asm 2: movdqa <z7=%xmm1,>orig7=224(%esp)
753movdqa %xmm1,224(%esp)
754
755# qhasm: orig13 = z13
756# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
757# asm 2: movdqa <z13=%xmm2,>orig13=240(%esp)
758movdqa %xmm2,240(%esp)
759
760# qhasm: orig2 = z2
761# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
762# asm 2: movdqa <z2=%xmm0,>orig2=256(%esp)
763movdqa %xmm0,256(%esp)
764
765# qhasm: z3 = x3
766# asm 1: movdqa <x3=stack128#1,>z3=int6464#1
767# asm 2: movdqa <x3=32(%esp),>z3=%xmm0
768movdqa 32(%esp),%xmm0
769
770# qhasm: z4 = z3[0,0,0,0]
771# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
772# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
773pshufd $0x00,%xmm0,%xmm1
774
775# qhasm: z14 = z3[2,2,2,2]
776# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
777# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
778pshufd $0xaa,%xmm0,%xmm2
779
780# qhasm: z3 = z3[3,3,3,3]
781# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
782# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
783pshufd $0xff,%xmm0,%xmm0
784
785# qhasm: orig4 = z4
786# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
787# asm 2: movdqa <z4=%xmm1,>orig4=272(%esp)
788movdqa %xmm1,272(%esp)
789
790# qhasm: orig14 = z14
791# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
792# asm 2: movdqa <z14=%xmm2,>orig14=288(%esp)
793movdqa %xmm2,288(%esp)
794
795# qhasm: orig3 = z3
796# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
797# asm 2: movdqa <z3=%xmm0,>orig3=304(%esp)
798movdqa %xmm0,304(%esp)
799
800# qhasm: bytesatleast256:
801._bytesatleast256:
802
803# qhasm: in8 = ((uint32 *)&x2)[0]
804# asm 1: movl <x2=stack128#4,>in8=int32#2
805# asm 2: movl <x2=80(%esp),>in8=%ecx
806movl 80(%esp),%ecx
807
808# qhasm: in9 = ((uint32 *)&x3)[1]
809# asm 1: movl 4+<x3=stack128#1,>in9=int32#3
810# asm 2: movl 4+<x3=32(%esp),>in9=%edx
811movl 4+32(%esp),%edx
812
813# qhasm: ((uint32 *) &orig8)[0] = in8
814# asm 1: movl <in8=int32#2,>orig8=stack128#19
815# asm 2: movl <in8=%ecx,>orig8=320(%esp)
816movl %ecx,320(%esp)
817
818# qhasm: ((uint32 *) &orig9)[0] = in9
819# asm 1: movl <in9=int32#3,>orig9=stack128#20
820# asm 2: movl <in9=%edx,>orig9=336(%esp)
821movl %edx,336(%esp)
822
823# qhasm: carry? in8 += 1
824# asm 1: add $1,<in8=int32#2
825# asm 2: add $1,<in8=%ecx
826add $1,%ecx
827
828# qhasm: in9 += 0 + carry
829# asm 1: adc $0,<in9=int32#3
830# asm 2: adc $0,<in9=%edx
831adc $0,%edx
832
833# qhasm: ((uint32 *) &orig8)[1] = in8
834# asm 1: movl <in8=int32#2,4+<orig8=stack128#19
835# asm 2: movl <in8=%ecx,4+<orig8=320(%esp)
836movl %ecx,4+320(%esp)
837
838# qhasm: ((uint32 *) &orig9)[1] = in9
839# asm 1: movl <in9=int32#3,4+<orig9=stack128#20
840# asm 2: movl <in9=%edx,4+<orig9=336(%esp)
841movl %edx,4+336(%esp)
842
843# qhasm: carry? in8 += 1
844# asm 1: add $1,<in8=int32#2
845# asm 2: add $1,<in8=%ecx
846add $1,%ecx
847
848# qhasm: in9 += 0 + carry
849# asm 1: adc $0,<in9=int32#3
850# asm 2: adc $0,<in9=%edx
851adc $0,%edx
852
853# qhasm: ((uint32 *) &orig8)[2] = in8
854# asm 1: movl <in8=int32#2,8+<orig8=stack128#19
855# asm 2: movl <in8=%ecx,8+<orig8=320(%esp)
856movl %ecx,8+320(%esp)
857
858# qhasm: ((uint32 *) &orig9)[2] = in9
859# asm 1: movl <in9=int32#3,8+<orig9=stack128#20
860# asm 2: movl <in9=%edx,8+<orig9=336(%esp)
861movl %edx,8+336(%esp)
862
863# qhasm: carry? in8 += 1
864# asm 1: add $1,<in8=int32#2
865# asm 2: add $1,<in8=%ecx
866add $1,%ecx
867
868# qhasm: in9 += 0 + carry
869# asm 1: adc $0,<in9=int32#3
870# asm 2: adc $0,<in9=%edx
871adc $0,%edx
872
873# qhasm: ((uint32 *) &orig8)[3] = in8
874# asm 1: movl <in8=int32#2,12+<orig8=stack128#19
875# asm 2: movl <in8=%ecx,12+<orig8=320(%esp)
876movl %ecx,12+320(%esp)
877
878# qhasm: ((uint32 *) &orig9)[3] = in9
879# asm 1: movl <in9=int32#3,12+<orig9=stack128#20
880# asm 2: movl <in9=%edx,12+<orig9=336(%esp)
881movl %edx,12+336(%esp)
882
883# qhasm: carry? in8 += 1
884# asm 1: add $1,<in8=int32#2
885# asm 2: add $1,<in8=%ecx
886add $1,%ecx
887
888# qhasm: in9 += 0 + carry
889# asm 1: adc $0,<in9=int32#3
890# asm 2: adc $0,<in9=%edx
891adc $0,%edx
892
893# qhasm: ((uint32 *)&x2)[0] = in8
894# asm 1: movl <in8=int32#2,>x2=stack128#4
895# asm 2: movl <in8=%ecx,>x2=80(%esp)
896movl %ecx,80(%esp)
897
898# qhasm: ((uint32 *)&x3)[1] = in9
899# asm 1: movl <in9=int32#3,4+<x3=stack128#1
900# asm 2: movl <in9=%edx,4+<x3=32(%esp)
901movl %edx,4+32(%esp)
902
903# qhasm: bytes_stack = bytes
904# asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
905# asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
906movl %eax,24(%esp)
907
908# qhasm: i = 20
909# asm 1: mov $20,>i=int32#1
910# asm 2: mov $20,>i=%eax
911mov $20,%eax
912
913# qhasm: z5 = orig5
914# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
915# asm 2: movdqa <orig5=96(%esp),>z5=%xmm0
916movdqa 96(%esp),%xmm0
917
918# qhasm: z10 = orig10
919# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
920# asm 2: movdqa <orig10=112(%esp),>z10=%xmm1
921movdqa 112(%esp),%xmm1
922
923# qhasm: z15 = orig15
924# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
925# asm 2: movdqa <orig15=128(%esp),>z15=%xmm2
926movdqa 128(%esp),%xmm2
927
928# qhasm: z14 = orig14
929# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
930# asm 2: movdqa <orig14=288(%esp),>z14=%xmm3
931movdqa 288(%esp),%xmm3
932
933# qhasm: z3 = orig3
934# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
935# asm 2: movdqa <orig3=304(%esp),>z3=%xmm4
936movdqa 304(%esp),%xmm4
937
938# qhasm: z6 = orig6
939# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
940# asm 2: movdqa <orig6=160(%esp),>z6=%xmm5
941movdqa 160(%esp),%xmm5
942
943# qhasm: z11 = orig11
944# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
945# asm 2: movdqa <orig11=176(%esp),>z11=%xmm6
946movdqa 176(%esp),%xmm6
947
948# qhasm: z1 = orig1
949# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
950# asm 2: movdqa <orig1=208(%esp),>z1=%xmm7
951movdqa 208(%esp),%xmm7
952
953# qhasm: z5_stack = z5
954# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#21
955# asm 2: movdqa <z5=%xmm0,>z5_stack=352(%esp)
956movdqa %xmm0,352(%esp)
957
958# qhasm: z10_stack = z10
959# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#22
960# asm 2: movdqa <z10=%xmm1,>z10_stack=368(%esp)
961movdqa %xmm1,368(%esp)
962
963# qhasm: z15_stack = z15
964# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#23
965# asm 2: movdqa <z15=%xmm2,>z15_stack=384(%esp)
966movdqa %xmm2,384(%esp)
967
968# qhasm: z14_stack = z14
969# asm 1: movdqa <z14=int6464#4,>z14_stack=stack128#24
970# asm 2: movdqa <z14=%xmm3,>z14_stack=400(%esp)
971movdqa %xmm3,400(%esp)
972
973# qhasm: z3_stack = z3
974# asm 1: movdqa <z3=int6464#5,>z3_stack=stack128#25
975# asm 2: movdqa <z3=%xmm4,>z3_stack=416(%esp)
976movdqa %xmm4,416(%esp)
977
978# qhasm: z6_stack = z6
979# asm 1: movdqa <z6=int6464#6,>z6_stack=stack128#26
980# asm 2: movdqa <z6=%xmm5,>z6_stack=432(%esp)
981movdqa %xmm5,432(%esp)
982
983# qhasm: z11_stack = z11
984# asm 1: movdqa <z11=int6464#7,>z11_stack=stack128#27
985# asm 2: movdqa <z11=%xmm6,>z11_stack=448(%esp)
986movdqa %xmm6,448(%esp)
987
988# qhasm: z1_stack = z1
989# asm 1: movdqa <z1=int6464#8,>z1_stack=stack128#28
990# asm 2: movdqa <z1=%xmm7,>z1_stack=464(%esp)
991movdqa %xmm7,464(%esp)
992
993# qhasm: z7 = orig7
994# asm 1: movdqa <orig7=stack128#13,>z7=int6464#5
995# asm 2: movdqa <orig7=224(%esp),>z7=%xmm4
996movdqa 224(%esp),%xmm4
997
998# qhasm: z13 = orig13
999# asm 1: movdqa <orig13=stack128#14,>z13=int6464#6
1000# asm 2: movdqa <orig13=240(%esp),>z13=%xmm5
1001movdqa 240(%esp),%xmm5
1002
1003# qhasm: z2 = orig2
1004# asm 1: movdqa <orig2=stack128#15,>z2=int6464#7
1005# asm 2: movdqa <orig2=256(%esp),>z2=%xmm6
1006movdqa 256(%esp),%xmm6
1007
1008# qhasm: z9 = orig9
1009# asm 1: movdqa <orig9=stack128#20,>z9=int6464#8
1010# asm 2: movdqa <orig9=336(%esp),>z9=%xmm7
1011movdqa 336(%esp),%xmm7
1012
1013# qhasm: p = orig0
1014# asm 1: movdqa <orig0=stack128#8,>p=int6464#1
1015# asm 2: movdqa <orig0=144(%esp),>p=%xmm0
1016movdqa 144(%esp),%xmm0
1017
1018# qhasm: t = orig12
1019# asm 1: movdqa <orig12=stack128#11,>t=int6464#3
1020# asm 2: movdqa <orig12=192(%esp),>t=%xmm2
1021movdqa 192(%esp),%xmm2
1022
1023# qhasm: q = orig4
1024# asm 1: movdqa <orig4=stack128#16,>q=int6464#4
1025# asm 2: movdqa <orig4=272(%esp),>q=%xmm3
1026movdqa 272(%esp),%xmm3
1027
1028# qhasm: r = orig8
1029# asm 1: movdqa <orig8=stack128#19,>r=int6464#2
1030# asm 2: movdqa <orig8=320(%esp),>r=%xmm1
1031movdqa 320(%esp),%xmm1
1032
1033# qhasm: z7_stack = z7
1034# asm 1: movdqa <z7=int6464#5,>z7_stack=stack128#29
1035# asm 2: movdqa <z7=%xmm4,>z7_stack=480(%esp)
1036movdqa %xmm4,480(%esp)
1037
1038# qhasm: z13_stack = z13
1039# asm 1: movdqa <z13=int6464#6,>z13_stack=stack128#30
1040# asm 2: movdqa <z13=%xmm5,>z13_stack=496(%esp)
1041movdqa %xmm5,496(%esp)
1042
1043# qhasm: z2_stack = z2
1044# asm 1: movdqa <z2=int6464#7,>z2_stack=stack128#31
1045# asm 2: movdqa <z2=%xmm6,>z2_stack=512(%esp)
1046movdqa %xmm6,512(%esp)
1047
1048# qhasm: z9_stack = z9
1049# asm 1: movdqa <z9=int6464#8,>z9_stack=stack128#32
1050# asm 2: movdqa <z9=%xmm7,>z9_stack=528(%esp)
1051movdqa %xmm7,528(%esp)
1052
1053# qhasm: z0_stack = p
1054# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1055# asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1056movdqa %xmm0,544(%esp)
1057
1058# qhasm: z12_stack = t
1059# asm 1: movdqa <t=int6464#3,>z12_stack=stack128#34
1060# asm 2: movdqa <t=%xmm2,>z12_stack=560(%esp)
1061movdqa %xmm2,560(%esp)
1062
1063# qhasm: z4_stack = q
1064# asm 1: movdqa <q=int6464#4,>z4_stack=stack128#35
1065# asm 2: movdqa <q=%xmm3,>z4_stack=576(%esp)
1066movdqa %xmm3,576(%esp)
1067
1068# qhasm: z8_stack = r
1069# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#36
1070# asm 2: movdqa <r=%xmm1,>z8_stack=592(%esp)
1071movdqa %xmm1,592(%esp)
1072
1073# qhasm: mainloop1:
1074._mainloop1:
1075
1076# qhasm: assign xmm0 to p
1077
1078# qhasm: assign xmm1 to r
1079
1080# qhasm: assign xmm2 to t
1081
1082# qhasm: assign xmm3 to q
1083
1084# qhasm: s = t
1085# asm 1: movdqa <t=int6464#3,>s=int6464#7
1086# asm 2: movdqa <t=%xmm2,>s=%xmm6
1087movdqa %xmm2,%xmm6
1088
1089# qhasm: uint32323232 t += p
1090# asm 1: paddd <p=int6464#1,<t=int6464#3
1091# asm 2: paddd <p=%xmm0,<t=%xmm2
1092paddd %xmm0,%xmm2
1093
1094# qhasm: u = t
1095# asm 1: movdqa <t=int6464#3,>u=int6464#5
1096# asm 2: movdqa <t=%xmm2,>u=%xmm4
1097movdqa %xmm2,%xmm4
1098
1099# qhasm: uint32323232 t >>= 25
1100# asm 1: psrld $25,<t=int6464#3
1101# asm 2: psrld $25,<t=%xmm2
1102psrld $25,%xmm2
1103
1104# qhasm: q ^= t
1105# asm 1: pxor <t=int6464#3,<q=int6464#4
1106# asm 2: pxor <t=%xmm2,<q=%xmm3
1107pxor %xmm2,%xmm3
1108
1109# qhasm: uint32323232 u <<= 7
1110# asm 1: pslld $7,<u=int6464#5
1111# asm 2: pslld $7,<u=%xmm4
1112pslld $7,%xmm4
1113
1114# qhasm: q ^= u
1115# asm 1: pxor <u=int6464#5,<q=int6464#4
1116# asm 2: pxor <u=%xmm4,<q=%xmm3
1117pxor %xmm4,%xmm3
1118
1119# qhasm: z4_stack = q
1120# asm 1: movdqa <q=int6464#4,>z4_stack=stack128#33
1121# asm 2: movdqa <q=%xmm3,>z4_stack=544(%esp)
1122movdqa %xmm3,544(%esp)
1123
1124# qhasm: t = p
1125# asm 1: movdqa <p=int6464#1,>t=int6464#3
1126# asm 2: movdqa <p=%xmm0,>t=%xmm2
1127movdqa %xmm0,%xmm2
1128
1129# qhasm: uint32323232 t += q
1130# asm 1: paddd <q=int6464#4,<t=int6464#3
1131# asm 2: paddd <q=%xmm3,<t=%xmm2
1132paddd %xmm3,%xmm2
1133
1134# qhasm: u = t
1135# asm 1: movdqa <t=int6464#3,>u=int6464#5
1136# asm 2: movdqa <t=%xmm2,>u=%xmm4
1137movdqa %xmm2,%xmm4
1138
1139# qhasm: uint32323232 t >>= 23
1140# asm 1: psrld $23,<t=int6464#3
1141# asm 2: psrld $23,<t=%xmm2
1142psrld $23,%xmm2
1143
1144# qhasm: r ^= t
1145# asm 1: pxor <t=int6464#3,<r=int6464#2
1146# asm 2: pxor <t=%xmm2,<r=%xmm1
1147pxor %xmm2,%xmm1
1148
1149# qhasm: uint32323232 u <<= 9
1150# asm 1: pslld $9,<u=int6464#5
1151# asm 2: pslld $9,<u=%xmm4
1152pslld $9,%xmm4
1153
1154# qhasm: r ^= u
1155# asm 1: pxor <u=int6464#5,<r=int6464#2
1156# asm 2: pxor <u=%xmm4,<r=%xmm1
1157pxor %xmm4,%xmm1
1158
1159# qhasm: z8_stack = r
1160# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#34
1161# asm 2: movdqa <r=%xmm1,>z8_stack=560(%esp)
1162movdqa %xmm1,560(%esp)
1163
1164# qhasm: uint32323232 q += r
1165# asm 1: paddd <r=int6464#2,<q=int6464#4
1166# asm 2: paddd <r=%xmm1,<q=%xmm3
1167paddd %xmm1,%xmm3
1168
1169# qhasm: u = q
1170# asm 1: movdqa <q=int6464#4,>u=int6464#3
1171# asm 2: movdqa <q=%xmm3,>u=%xmm2
1172movdqa %xmm3,%xmm2
1173
1174# qhasm: uint32323232 q >>= 19
1175# asm 1: psrld $19,<q=int6464#4
1176# asm 2: psrld $19,<q=%xmm3
1177psrld $19,%xmm3
1178
1179# qhasm: s ^= q
1180# asm 1: pxor <q=int6464#4,<s=int6464#7
1181# asm 2: pxor <q=%xmm3,<s=%xmm6
1182pxor %xmm3,%xmm6
1183
1184# qhasm: uint32323232 u <<= 13
1185# asm 1: pslld $13,<u=int6464#3
1186# asm 2: pslld $13,<u=%xmm2
1187pslld $13,%xmm2
1188
1189# qhasm: s ^= u
1190# asm 1: pxor <u=int6464#3,<s=int6464#7
1191# asm 2: pxor <u=%xmm2,<s=%xmm6
1192pxor %xmm2,%xmm6
1193
1194# qhasm: mt = z1_stack
1195# asm 1: movdqa <z1_stack=stack128#28,>mt=int6464#3
1196# asm 2: movdqa <z1_stack=464(%esp),>mt=%xmm2
1197movdqa 464(%esp),%xmm2
1198
1199# qhasm: mp = z5_stack
1200# asm 1: movdqa <z5_stack=stack128#21,>mp=int6464#5
1201# asm 2: movdqa <z5_stack=352(%esp),>mp=%xmm4
1202movdqa 352(%esp),%xmm4
1203
1204# qhasm: mq = z9_stack
1205# asm 1: movdqa <z9_stack=stack128#32,>mq=int6464#4
1206# asm 2: movdqa <z9_stack=528(%esp),>mq=%xmm3
1207movdqa 528(%esp),%xmm3
1208
1209# qhasm: mr = z13_stack
1210# asm 1: movdqa <z13_stack=stack128#30,>mr=int6464#6
1211# asm 2: movdqa <z13_stack=496(%esp),>mr=%xmm5
1212movdqa 496(%esp),%xmm5
1213
1214# qhasm: z12_stack = s
1215# asm 1: movdqa <s=int6464#7,>z12_stack=stack128#30
1216# asm 2: movdqa <s=%xmm6,>z12_stack=496(%esp)
1217movdqa %xmm6,496(%esp)
1218
1219# qhasm: uint32323232 r += s
1220# asm 1: paddd <s=int6464#7,<r=int6464#2
1221# asm 2: paddd <s=%xmm6,<r=%xmm1
1222paddd %xmm6,%xmm1
1223
1224# qhasm: u = r
1225# asm 1: movdqa <r=int6464#2,>u=int6464#7
1226# asm 2: movdqa <r=%xmm1,>u=%xmm6
1227movdqa %xmm1,%xmm6
1228
1229# qhasm: uint32323232 r >>= 14
1230# asm 1: psrld $14,<r=int6464#2
1231# asm 2: psrld $14,<r=%xmm1
1232psrld $14,%xmm1
1233
1234# qhasm: p ^= r
1235# asm 1: pxor <r=int6464#2,<p=int6464#1
1236# asm 2: pxor <r=%xmm1,<p=%xmm0
1237pxor %xmm1,%xmm0
1238
1239# qhasm: uint32323232 u <<= 18
1240# asm 1: pslld $18,<u=int6464#7
1241# asm 2: pslld $18,<u=%xmm6
1242pslld $18,%xmm6
1243
1244# qhasm: p ^= u
1245# asm 1: pxor <u=int6464#7,<p=int6464#1
1246# asm 2: pxor <u=%xmm6,<p=%xmm0
1247pxor %xmm6,%xmm0
1248
1249# qhasm: z0_stack = p
1250# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#21
1251# asm 2: movdqa <p=%xmm0,>z0_stack=352(%esp)
1252movdqa %xmm0,352(%esp)
1253
1254# qhasm: assign xmm2 to mt
1255
1256# qhasm: assign xmm3 to mq
1257
1258# qhasm: assign xmm4 to mp
1259
1260# qhasm: assign xmm5 to mr
1261
1262# qhasm: ms = mt
1263# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1264# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1265movdqa %xmm2,%xmm6
1266
1267# qhasm: uint32323232 mt += mp
1268# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1269# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1270paddd %xmm4,%xmm2
1271
1272# qhasm: mu = mt
1273# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1274# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1275movdqa %xmm2,%xmm0
1276
1277# qhasm: uint32323232 mt >>= 25
1278# asm 1: psrld $25,<mt=int6464#3
1279# asm 2: psrld $25,<mt=%xmm2
1280psrld $25,%xmm2
1281
1282# qhasm: mq ^= mt
1283# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1284# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1285pxor %xmm2,%xmm3
1286
1287# qhasm: uint32323232 mu <<= 7
1288# asm 1: pslld $7,<mu=int6464#1
1289# asm 2: pslld $7,<mu=%xmm0
1290pslld $7,%xmm0
1291
1292# qhasm: mq ^= mu
1293# asm 1: pxor <mu=int6464#1,<mq=int6464#4
1294# asm 2: pxor <mu=%xmm0,<mq=%xmm3
1295pxor %xmm0,%xmm3
1296
1297# qhasm: z9_stack = mq
1298# asm 1: movdqa <mq=int6464#4,>z9_stack=stack128#32
1299# asm 2: movdqa <mq=%xmm3,>z9_stack=528(%esp)
1300movdqa %xmm3,528(%esp)
1301
1302# qhasm: mt = mp
1303# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1304# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1305movdqa %xmm4,%xmm0
1306
1307# qhasm: uint32323232 mt += mq
1308# asm 1: paddd <mq=int6464#4,<mt=int6464#1
1309# asm 2: paddd <mq=%xmm3,<mt=%xmm0
1310paddd %xmm3,%xmm0
1311
1312# qhasm: mu = mt
1313# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1314# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1315movdqa %xmm0,%xmm1
1316
1317# qhasm: uint32323232 mt >>= 23
1318# asm 1: psrld $23,<mt=int6464#1
1319# asm 2: psrld $23,<mt=%xmm0
1320psrld $23,%xmm0
1321
1322# qhasm: mr ^= mt
1323# asm 1: pxor <mt=int6464#1,<mr=int6464#6
1324# asm 2: pxor <mt=%xmm0,<mr=%xmm5
1325pxor %xmm0,%xmm5
1326
1327# qhasm: uint32323232 mu <<= 9
1328# asm 1: pslld $9,<mu=int6464#2
1329# asm 2: pslld $9,<mu=%xmm1
1330pslld $9,%xmm1
1331
1332# qhasm: mr ^= mu
1333# asm 1: pxor <mu=int6464#2,<mr=int6464#6
1334# asm 2: pxor <mu=%xmm1,<mr=%xmm5
1335pxor %xmm1,%xmm5
1336
1337# qhasm: z13_stack = mr
1338# asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#35
1339# asm 2: movdqa <mr=%xmm5,>z13_stack=576(%esp)
1340movdqa %xmm5,576(%esp)
1341
1342# qhasm: uint32323232 mq += mr
1343# asm 1: paddd <mr=int6464#6,<mq=int6464#4
1344# asm 2: paddd <mr=%xmm5,<mq=%xmm3
1345paddd %xmm5,%xmm3
1346
1347# qhasm: mu = mq
1348# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1349# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1350movdqa %xmm3,%xmm0
1351
1352# qhasm: uint32323232 mq >>= 19
1353# asm 1: psrld $19,<mq=int6464#4
1354# asm 2: psrld $19,<mq=%xmm3
1355psrld $19,%xmm3
1356
1357# qhasm: ms ^= mq
1358# asm 1: pxor <mq=int6464#4,<ms=int6464#7
1359# asm 2: pxor <mq=%xmm3,<ms=%xmm6
1360pxor %xmm3,%xmm6
1361
1362# qhasm: uint32323232 mu <<= 13
1363# asm 1: pslld $13,<mu=int6464#1
1364# asm 2: pslld $13,<mu=%xmm0
1365pslld $13,%xmm0
1366
1367# qhasm: ms ^= mu
1368# asm 1: pxor <mu=int6464#1,<ms=int6464#7
1369# asm 2: pxor <mu=%xmm0,<ms=%xmm6
1370pxor %xmm0,%xmm6
1371
1372# qhasm: t = z6_stack
1373# asm 1: movdqa <z6_stack=stack128#26,>t=int6464#3
1374# asm 2: movdqa <z6_stack=432(%esp),>t=%xmm2
1375movdqa 432(%esp),%xmm2
1376
1377# qhasm: p = z10_stack
1378# asm 1: movdqa <z10_stack=stack128#22,>p=int6464#1
1379# asm 2: movdqa <z10_stack=368(%esp),>p=%xmm0
1380movdqa 368(%esp),%xmm0
1381
1382# qhasm: q = z14_stack
1383# asm 1: movdqa <z14_stack=stack128#24,>q=int6464#4
1384# asm 2: movdqa <z14_stack=400(%esp),>q=%xmm3
1385movdqa 400(%esp),%xmm3
1386
1387# qhasm: r = z2_stack
1388# asm 1: movdqa <z2_stack=stack128#31,>r=int6464#2
1389# asm 2: movdqa <z2_stack=512(%esp),>r=%xmm1
1390movdqa 512(%esp),%xmm1
1391
1392# qhasm: z1_stack = ms
1393# asm 1: movdqa <ms=int6464#7,>z1_stack=stack128#22
1394# asm 2: movdqa <ms=%xmm6,>z1_stack=368(%esp)
1395movdqa %xmm6,368(%esp)
1396
1397# qhasm: uint32323232 mr += ms
1398# asm 1: paddd <ms=int6464#7,<mr=int6464#6
1399# asm 2: paddd <ms=%xmm6,<mr=%xmm5
1400paddd %xmm6,%xmm5
1401
1402# qhasm: mu = mr
1403# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1404# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1405movdqa %xmm5,%xmm6
1406
1407# qhasm: uint32323232 mr >>= 14
1408# asm 1: psrld $14,<mr=int6464#6
1409# asm 2: psrld $14,<mr=%xmm5
1410psrld $14,%xmm5
1411
1412# qhasm: mp ^= mr
1413# asm 1: pxor <mr=int6464#6,<mp=int6464#5
1414# asm 2: pxor <mr=%xmm5,<mp=%xmm4
1415pxor %xmm5,%xmm4
1416
1417# qhasm: uint32323232 mu <<= 18
1418# asm 1: pslld $18,<mu=int6464#7
1419# asm 2: pslld $18,<mu=%xmm6
1420pslld $18,%xmm6
1421
1422# qhasm: mp ^= mu
1423# asm 1: pxor <mu=int6464#7,<mp=int6464#5
1424# asm 2: pxor <mu=%xmm6,<mp=%xmm4
1425pxor %xmm6,%xmm4
1426
1427# qhasm: z5_stack = mp
1428# asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#24
1429# asm 2: movdqa <mp=%xmm4,>z5_stack=400(%esp)
1430movdqa %xmm4,400(%esp)
1431
1432# qhasm: assign xmm0 to p
1433
1434# qhasm: assign xmm1 to r
1435
1436# qhasm: assign xmm2 to t
1437
1438# qhasm: assign xmm3 to q
1439
1440# qhasm: s = t
1441# asm 1: movdqa <t=int6464#3,>s=int6464#7
1442# asm 2: movdqa <t=%xmm2,>s=%xmm6
1443movdqa %xmm2,%xmm6
1444
1445# qhasm: uint32323232 t += p
1446# asm 1: paddd <p=int6464#1,<t=int6464#3
1447# asm 2: paddd <p=%xmm0,<t=%xmm2
1448paddd %xmm0,%xmm2
1449
1450# qhasm: u = t
1451# asm 1: movdqa <t=int6464#3,>u=int6464#5
1452# asm 2: movdqa <t=%xmm2,>u=%xmm4
1453movdqa %xmm2,%xmm4
1454
1455# qhasm: uint32323232 t >>= 25
1456# asm 1: psrld $25,<t=int6464#3
1457# asm 2: psrld $25,<t=%xmm2
1458psrld $25,%xmm2
1459
1460# qhasm: q ^= t
1461# asm 1: pxor <t=int6464#3,<q=int6464#4
1462# asm 2: pxor <t=%xmm2,<q=%xmm3
1463pxor %xmm2,%xmm3
1464
1465# qhasm: uint32323232 u <<= 7
1466# asm 1: pslld $7,<u=int6464#5
1467# asm 2: pslld $7,<u=%xmm4
1468pslld $7,%xmm4
1469
1470# qhasm: q ^= u
1471# asm 1: pxor <u=int6464#5,<q=int6464#4
1472# asm 2: pxor <u=%xmm4,<q=%xmm3
1473pxor %xmm4,%xmm3
1474
1475# qhasm: z14_stack = q
1476# asm 1: movdqa <q=int6464#4,>z14_stack=stack128#36
1477# asm 2: movdqa <q=%xmm3,>z14_stack=592(%esp)
1478movdqa %xmm3,592(%esp)
1479
1480# qhasm: t = p
1481# asm 1: movdqa <p=int6464#1,>t=int6464#3
1482# asm 2: movdqa <p=%xmm0,>t=%xmm2
1483movdqa %xmm0,%xmm2
1484
1485# qhasm: uint32323232 t += q
1486# asm 1: paddd <q=int6464#4,<t=int6464#3
1487# asm 2: paddd <q=%xmm3,<t=%xmm2
1488paddd %xmm3,%xmm2
1489
1490# qhasm: u = t
1491# asm 1: movdqa <t=int6464#3,>u=int6464#5
1492# asm 2: movdqa <t=%xmm2,>u=%xmm4
1493movdqa %xmm2,%xmm4
1494
1495# qhasm: uint32323232 t >>= 23
1496# asm 1: psrld $23,<t=int6464#3
1497# asm 2: psrld $23,<t=%xmm2
1498psrld $23,%xmm2
1499
1500# qhasm: r ^= t
1501# asm 1: pxor <t=int6464#3,<r=int6464#2
1502# asm 2: pxor <t=%xmm2,<r=%xmm1
1503pxor %xmm2,%xmm1
1504
1505# qhasm: uint32323232 u <<= 9
1506# asm 1: pslld $9,<u=int6464#5
1507# asm 2: pslld $9,<u=%xmm4
1508pslld $9,%xmm4
1509
1510# qhasm: r ^= u
1511# asm 1: pxor <u=int6464#5,<r=int6464#2
1512# asm 2: pxor <u=%xmm4,<r=%xmm1
1513pxor %xmm4,%xmm1
1514
1515# qhasm: z2_stack = r
1516# asm 1: movdqa <r=int6464#2,>z2_stack=stack128#26
1517# asm 2: movdqa <r=%xmm1,>z2_stack=432(%esp)
1518movdqa %xmm1,432(%esp)
1519
1520# qhasm: uint32323232 q += r
1521# asm 1: paddd <r=int6464#2,<q=int6464#4
1522# asm 2: paddd <r=%xmm1,<q=%xmm3
1523paddd %xmm1,%xmm3
1524
1525# qhasm: u = q
1526# asm 1: movdqa <q=int6464#4,>u=int6464#3
1527# asm 2: movdqa <q=%xmm3,>u=%xmm2
1528movdqa %xmm3,%xmm2
1529
1530# qhasm: uint32323232 q >>= 19
1531# asm 1: psrld $19,<q=int6464#4
1532# asm 2: psrld $19,<q=%xmm3
1533psrld $19,%xmm3
1534
1535# qhasm: s ^= q
1536# asm 1: pxor <q=int6464#4,<s=int6464#7
1537# asm 2: pxor <q=%xmm3,<s=%xmm6
1538pxor %xmm3,%xmm6
1539
1540# qhasm: uint32323232 u <<= 13
1541# asm 1: pslld $13,<u=int6464#3
1542# asm 2: pslld $13,<u=%xmm2
1543pslld $13,%xmm2
1544
1545# qhasm: s ^= u
1546# asm 1: pxor <u=int6464#3,<s=int6464#7
1547# asm 2: pxor <u=%xmm2,<s=%xmm6
1548pxor %xmm2,%xmm6
1549
1550# qhasm: mt = z11_stack
1551# asm 1: movdqa <z11_stack=stack128#27,>mt=int6464#3
1552# asm 2: movdqa <z11_stack=448(%esp),>mt=%xmm2
1553movdqa 448(%esp),%xmm2
1554
1555# qhasm: mp = z15_stack
1556# asm 1: movdqa <z15_stack=stack128#23,>mp=int6464#5
1557# asm 2: movdqa <z15_stack=384(%esp),>mp=%xmm4
1558movdqa 384(%esp),%xmm4
1559
1560# qhasm: mq = z3_stack
1561# asm 1: movdqa <z3_stack=stack128#25,>mq=int6464#4
1562# asm 2: movdqa <z3_stack=416(%esp),>mq=%xmm3
1563movdqa 416(%esp),%xmm3
1564
1565# qhasm: mr = z7_stack
1566# asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1567# asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1568movdqa 480(%esp),%xmm5
1569
1570# qhasm: z6_stack = s
1571# asm 1: movdqa <s=int6464#7,>z6_stack=stack128#23
1572# asm 2: movdqa <s=%xmm6,>z6_stack=384(%esp)
1573movdqa %xmm6,384(%esp)
1574
1575# qhasm: uint32323232 r += s
1576# asm 1: paddd <s=int6464#7,<r=int6464#2
1577# asm 2: paddd <s=%xmm6,<r=%xmm1
1578paddd %xmm6,%xmm1
1579
1580# qhasm: u = r
1581# asm 1: movdqa <r=int6464#2,>u=int6464#7
1582# asm 2: movdqa <r=%xmm1,>u=%xmm6
1583movdqa %xmm1,%xmm6
1584
1585# qhasm: uint32323232 r >>= 14
1586# asm 1: psrld $14,<r=int6464#2
1587# asm 2: psrld $14,<r=%xmm1
1588psrld $14,%xmm1
1589
1590# qhasm: p ^= r
1591# asm 1: pxor <r=int6464#2,<p=int6464#1
1592# asm 2: pxor <r=%xmm1,<p=%xmm0
1593pxor %xmm1,%xmm0
1594
1595# qhasm: uint32323232 u <<= 18
1596# asm 1: pslld $18,<u=int6464#7
1597# asm 2: pslld $18,<u=%xmm6
1598pslld $18,%xmm6
1599
1600# qhasm: p ^= u
1601# asm 1: pxor <u=int6464#7,<p=int6464#1
1602# asm 2: pxor <u=%xmm6,<p=%xmm0
1603pxor %xmm6,%xmm0
1604
1605# qhasm: z10_stack = p
1606# asm 1: movdqa <p=int6464#1,>z10_stack=stack128#27
1607# asm 2: movdqa <p=%xmm0,>z10_stack=448(%esp)
1608movdqa %xmm0,448(%esp)
1609
1610# qhasm: assign xmm2 to mt
1611
1612# qhasm: assign xmm3 to mq
1613
1614# qhasm: assign xmm4 to mp
1615
1616# qhasm: assign xmm5 to mr
1617
1618# qhasm: ms = mt
1619# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1620# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1621movdqa %xmm2,%xmm6
1622
1623# qhasm: uint32323232 mt += mp
1624# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1625# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1626paddd %xmm4,%xmm2
1627
1628# qhasm: mu = mt
1629# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1630# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1631movdqa %xmm2,%xmm0
1632
1633# qhasm: uint32323232 mt >>= 25
1634# asm 1: psrld $25,<mt=int6464#3
1635# asm 2: psrld $25,<mt=%xmm2
1636psrld $25,%xmm2
1637
1638# qhasm: mq ^= mt
1639# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1640# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1641pxor %xmm2,%xmm3
1642
1643# qhasm: uint32323232 mu <<= 7
1644# asm 1: pslld $7,<mu=int6464#1
1645# asm 2: pslld $7,<mu=%xmm0
1646pslld $7,%xmm0
1647
1648# qhasm: mq ^= mu
1649# asm 1: pxor <mu=int6464#1,<mq=int6464#4
1650# asm 2: pxor <mu=%xmm0,<mq=%xmm3
1651pxor %xmm0,%xmm3
1652
1653# qhasm: z3_stack = mq
1654# asm 1: movdqa <mq=int6464#4,>z3_stack=stack128#25
1655# asm 2: movdqa <mq=%xmm3,>z3_stack=416(%esp)
1656movdqa %xmm3,416(%esp)
1657
1658# qhasm: mt = mp
1659# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1660# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1661movdqa %xmm4,%xmm0
1662
1663# qhasm: uint32323232 mt += mq
1664# asm 1: paddd <mq=int6464#4,<mt=int6464#1
1665# asm 2: paddd <mq=%xmm3,<mt=%xmm0
1666paddd %xmm3,%xmm0
1667
1668# qhasm: mu = mt
1669# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1670# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1671movdqa %xmm0,%xmm1
1672
1673# qhasm: uint32323232 mt >>= 23
1674# asm 1: psrld $23,<mt=int6464#1
1675# asm 2: psrld $23,<mt=%xmm0
1676psrld $23,%xmm0
1677
1678# qhasm: mr ^= mt
1679# asm 1: pxor <mt=int6464#1,<mr=int6464#6
1680# asm 2: pxor <mt=%xmm0,<mr=%xmm5
1681pxor %xmm0,%xmm5
1682
1683# qhasm: uint32323232 mu <<= 9
1684# asm 1: pslld $9,<mu=int6464#2
1685# asm 2: pslld $9,<mu=%xmm1
1686pslld $9,%xmm1
1687
1688# qhasm: mr ^= mu
1689# asm 1: pxor <mu=int6464#2,<mr=int6464#6
1690# asm 2: pxor <mu=%xmm1,<mr=%xmm5
1691pxor %xmm1,%xmm5
1692
1693# qhasm: z7_stack = mr
1694# asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
1695# asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
1696movdqa %xmm5,480(%esp)
1697
1698# qhasm: uint32323232 mq += mr
1699# asm 1: paddd <mr=int6464#6,<mq=int6464#4
1700# asm 2: paddd <mr=%xmm5,<mq=%xmm3
1701paddd %xmm5,%xmm3
1702
1703# qhasm: mu = mq
1704# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1705# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1706movdqa %xmm3,%xmm0
1707
1708# qhasm: uint32323232 mq >>= 19
1709# asm 1: psrld $19,<mq=int6464#4
1710# asm 2: psrld $19,<mq=%xmm3
1711psrld $19,%xmm3
1712
1713# qhasm: ms ^= mq
1714# asm 1: pxor <mq=int6464#4,<ms=int6464#7
1715# asm 2: pxor <mq=%xmm3,<ms=%xmm6
1716pxor %xmm3,%xmm6
1717
1718# qhasm: uint32323232 mu <<= 13
1719# asm 1: pslld $13,<mu=int6464#1
1720# asm 2: pslld $13,<mu=%xmm0
1721pslld $13,%xmm0
1722
1723# qhasm: ms ^= mu
1724# asm 1: pxor <mu=int6464#1,<ms=int6464#7
1725# asm 2: pxor <mu=%xmm0,<ms=%xmm6
1726pxor %xmm0,%xmm6
1727
1728# qhasm: t = z3_stack
1729# asm 1: movdqa <z3_stack=stack128#25,>t=int6464#3
1730# asm 2: movdqa <z3_stack=416(%esp),>t=%xmm2
1731movdqa 416(%esp),%xmm2
1732
1733# qhasm: p = z0_stack
1734# asm 1: movdqa <z0_stack=stack128#21,>p=int6464#1
1735# asm 2: movdqa <z0_stack=352(%esp),>p=%xmm0
1736movdqa 352(%esp),%xmm0
1737
1738# qhasm: q = z1_stack
1739# asm 1: movdqa <z1_stack=stack128#22,>q=int6464#4
1740# asm 2: movdqa <z1_stack=368(%esp),>q=%xmm3
1741movdqa 368(%esp),%xmm3
1742
1743# qhasm: r = z2_stack
1744# asm 1: movdqa <z2_stack=stack128#26,>r=int6464#2
1745# asm 2: movdqa <z2_stack=432(%esp),>r=%xmm1
1746movdqa 432(%esp),%xmm1
1747
1748# qhasm: z11_stack = ms
1749# asm 1: movdqa <ms=int6464#7,>z11_stack=stack128#21
1750# asm 2: movdqa <ms=%xmm6,>z11_stack=352(%esp)
1751movdqa %xmm6,352(%esp)
1752
1753# qhasm: uint32323232 mr += ms
1754# asm 1: paddd <ms=int6464#7,<mr=int6464#6
1755# asm 2: paddd <ms=%xmm6,<mr=%xmm5
1756paddd %xmm6,%xmm5
1757
1758# qhasm: mu = mr
1759# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1760# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1761movdqa %xmm5,%xmm6
1762
1763# qhasm: uint32323232 mr >>= 14
1764# asm 1: psrld $14,<mr=int6464#6
1765# asm 2: psrld $14,<mr=%xmm5
1766psrld $14,%xmm5
1767
1768# qhasm: mp ^= mr
1769# asm 1: pxor <mr=int6464#6,<mp=int6464#5
1770# asm 2: pxor <mr=%xmm5,<mp=%xmm4
1771pxor %xmm5,%xmm4
1772
1773# qhasm: uint32323232 mu <<= 18
1774# asm 1: pslld $18,<mu=int6464#7
1775# asm 2: pslld $18,<mu=%xmm6
1776pslld $18,%xmm6
1777
1778# qhasm: mp ^= mu
1779# asm 1: pxor <mu=int6464#7,<mp=int6464#5
1780# asm 2: pxor <mu=%xmm6,<mp=%xmm4
1781pxor %xmm6,%xmm4
1782
1783# qhasm: z15_stack = mp
1784# asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#22
1785# asm 2: movdqa <mp=%xmm4,>z15_stack=368(%esp)
1786movdqa %xmm4,368(%esp)
1787
1788# qhasm: assign xmm0 to p
1789
1790# qhasm: assign xmm1 to r
1791
1792# qhasm: assign xmm2 to t
1793
1794# qhasm: assign xmm3 to q
1795
1796# qhasm: s = t
1797# asm 1: movdqa <t=int6464#3,>s=int6464#7
1798# asm 2: movdqa <t=%xmm2,>s=%xmm6
1799movdqa %xmm2,%xmm6
1800
1801# qhasm: uint32323232 t += p
1802# asm 1: paddd <p=int6464#1,<t=int6464#3
1803# asm 2: paddd <p=%xmm0,<t=%xmm2
1804paddd %xmm0,%xmm2
1805
1806# qhasm: u = t
1807# asm 1: movdqa <t=int6464#3,>u=int6464#5
1808# asm 2: movdqa <t=%xmm2,>u=%xmm4
1809movdqa %xmm2,%xmm4
1810
1811# qhasm: uint32323232 t >>= 25
1812# asm 1: psrld $25,<t=int6464#3
1813# asm 2: psrld $25,<t=%xmm2
1814psrld $25,%xmm2
1815
1816# qhasm: q ^= t
1817# asm 1: pxor <t=int6464#3,<q=int6464#4
1818# asm 2: pxor <t=%xmm2,<q=%xmm3
1819pxor %xmm2,%xmm3
1820
1821# qhasm: uint32323232 u <<= 7
1822# asm 1: pslld $7,<u=int6464#5
1823# asm 2: pslld $7,<u=%xmm4
1824pslld $7,%xmm4
1825
1826# qhasm: q ^= u
1827# asm 1: pxor <u=int6464#5,<q=int6464#4
1828# asm 2: pxor <u=%xmm4,<q=%xmm3
1829pxor %xmm4,%xmm3
1830
1831# qhasm: z1_stack = q
1832# asm 1: movdqa <q=int6464#4,>z1_stack=stack128#28
1833# asm 2: movdqa <q=%xmm3,>z1_stack=464(%esp)
1834movdqa %xmm3,464(%esp)
1835
1836# qhasm: t = p
1837# asm 1: movdqa <p=int6464#1,>t=int6464#3
1838# asm 2: movdqa <p=%xmm0,>t=%xmm2
1839movdqa %xmm0,%xmm2
1840
1841# qhasm: uint32323232 t += q
1842# asm 1: paddd <q=int6464#4,<t=int6464#3
1843# asm 2: paddd <q=%xmm3,<t=%xmm2
1844paddd %xmm3,%xmm2
1845
1846# qhasm: u = t
1847# asm 1: movdqa <t=int6464#3,>u=int6464#5
1848# asm 2: movdqa <t=%xmm2,>u=%xmm4
1849movdqa %xmm2,%xmm4
1850
1851# qhasm: uint32323232 t >>= 23
1852# asm 1: psrld $23,<t=int6464#3
1853# asm 2: psrld $23,<t=%xmm2
1854psrld $23,%xmm2
1855
1856# qhasm: r ^= t
1857# asm 1: pxor <t=int6464#3,<r=int6464#2
1858# asm 2: pxor <t=%xmm2,<r=%xmm1
1859pxor %xmm2,%xmm1
1860
1861# qhasm: uint32323232 u <<= 9
1862# asm 1: pslld $9,<u=int6464#5
1863# asm 2: pslld $9,<u=%xmm4
1864pslld $9,%xmm4
1865
1866# qhasm: r ^= u
1867# asm 1: pxor <u=int6464#5,<r=int6464#2
1868# asm 2: pxor <u=%xmm4,<r=%xmm1
1869pxor %xmm4,%xmm1
1870
1871# qhasm: z2_stack = r
1872# asm 1: movdqa <r=int6464#2,>z2_stack=stack128#31
1873# asm 2: movdqa <r=%xmm1,>z2_stack=512(%esp)
1874movdqa %xmm1,512(%esp)
1875
1876# qhasm: uint32323232 q += r
1877# asm 1: paddd <r=int6464#2,<q=int6464#4
1878# asm 2: paddd <r=%xmm1,<q=%xmm3
1879paddd %xmm1,%xmm3
1880
1881# qhasm: u = q
1882# asm 1: movdqa <q=int6464#4,>u=int6464#3
1883# asm 2: movdqa <q=%xmm3,>u=%xmm2
1884movdqa %xmm3,%xmm2
1885
1886# qhasm: uint32323232 q >>= 19
1887# asm 1: psrld $19,<q=int6464#4
1888# asm 2: psrld $19,<q=%xmm3
1889psrld $19,%xmm3
1890
1891# qhasm: s ^= q
1892# asm 1: pxor <q=int6464#4,<s=int6464#7
1893# asm 2: pxor <q=%xmm3,<s=%xmm6
1894pxor %xmm3,%xmm6
1895
1896# qhasm: uint32323232 u <<= 13
1897# asm 1: pslld $13,<u=int6464#3
1898# asm 2: pslld $13,<u=%xmm2
1899pslld $13,%xmm2
1900
1901# qhasm: s ^= u
1902# asm 1: pxor <u=int6464#3,<s=int6464#7
1903# asm 2: pxor <u=%xmm2,<s=%xmm6
1904pxor %xmm2,%xmm6
1905
1906# qhasm: mt = z4_stack
1907# asm 1: movdqa <z4_stack=stack128#33,>mt=int6464#3
1908# asm 2: movdqa <z4_stack=544(%esp),>mt=%xmm2
1909movdqa 544(%esp),%xmm2
1910
1911# qhasm: mp = z5_stack
1912# asm 1: movdqa <z5_stack=stack128#24,>mp=int6464#5
1913# asm 2: movdqa <z5_stack=400(%esp),>mp=%xmm4
1914movdqa 400(%esp),%xmm4
1915
1916# qhasm: mq = z6_stack
1917# asm 1: movdqa <z6_stack=stack128#23,>mq=int6464#4
1918# asm 2: movdqa <z6_stack=384(%esp),>mq=%xmm3
1919movdqa 384(%esp),%xmm3
1920
1921# qhasm: mr = z7_stack
1922# asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1923# asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1924movdqa 480(%esp),%xmm5
1925
1926# qhasm: z3_stack = s
1927# asm 1: movdqa <s=int6464#7,>z3_stack=stack128#25
1928# asm 2: movdqa <s=%xmm6,>z3_stack=416(%esp)
1929movdqa %xmm6,416(%esp)
1930
1931# qhasm: uint32323232 r += s
1932# asm 1: paddd <s=int6464#7,<r=int6464#2
1933# asm 2: paddd <s=%xmm6,<r=%xmm1
1934paddd %xmm6,%xmm1
1935
1936# qhasm: u = r
1937# asm 1: movdqa <r=int6464#2,>u=int6464#7
1938# asm 2: movdqa <r=%xmm1,>u=%xmm6
1939movdqa %xmm1,%xmm6
1940
1941# qhasm: uint32323232 r >>= 14
1942# asm 1: psrld $14,<r=int6464#2
1943# asm 2: psrld $14,<r=%xmm1
1944psrld $14,%xmm1
1945
1946# qhasm: p ^= r
1947# asm 1: pxor <r=int6464#2,<p=int6464#1
1948# asm 2: pxor <r=%xmm1,<p=%xmm0
1949pxor %xmm1,%xmm0
1950
1951# qhasm: uint32323232 u <<= 18
1952# asm 1: pslld $18,<u=int6464#7
1953# asm 2: pslld $18,<u=%xmm6
1954pslld $18,%xmm6
1955
1956# qhasm: p ^= u
1957# asm 1: pxor <u=int6464#7,<p=int6464#1
1958# asm 2: pxor <u=%xmm6,<p=%xmm0
1959pxor %xmm6,%xmm0
1960
1961# qhasm: z0_stack = p
1962# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1963# asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1964movdqa %xmm0,544(%esp)
1965
1966# qhasm: assign xmm2 to mt
1967
1968# qhasm: assign xmm3 to mq
1969
1970# qhasm: assign xmm4 to mp
1971
1972# qhasm: assign xmm5 to mr
1973
1974# qhasm: ms = mt
1975# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1976# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1977movdqa %xmm2,%xmm6
1978
1979# qhasm: uint32323232 mt += mp
1980# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1981# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1982paddd %xmm4,%xmm2
1983
1984# qhasm: mu = mt
1985# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1986# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1987movdqa %xmm2,%xmm0
1988
1989# qhasm: uint32323232 mt >>= 25
1990# asm 1: psrld $25,<mt=int6464#3
1991# asm 2: psrld $25,<mt=%xmm2
1992psrld $25,%xmm2
1993
1994# qhasm: mq ^= mt
1995# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1996# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1997pxor %xmm2,%xmm3
1998
1999# qhasm: uint32323232 mu <<= 7
2000# asm 1: pslld $7,<mu=int6464#1
2001# asm 2: pslld $7,<mu=%xmm0
2002pslld $7,%xmm0
2003
2004# qhasm: mq ^= mu
2005# asm 1: pxor <mu=int6464#1,<mq=int6464#4
2006# asm 2: pxor <mu=%xmm0,<mq=%xmm3
2007pxor %xmm0,%xmm3
2008
2009# qhasm: z6_stack = mq
2010# asm 1: movdqa <mq=int6464#4,>z6_stack=stack128#26
2011# asm 2: movdqa <mq=%xmm3,>z6_stack=432(%esp)
2012movdqa %xmm3,432(%esp)
2013
2014# qhasm: mt = mp
2015# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2016# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2017movdqa %xmm4,%xmm0
2018
2019# qhasm: uint32323232 mt += mq
2020# asm 1: paddd <mq=int6464#4,<mt=int6464#1
2021# asm 2: paddd <mq=%xmm3,<mt=%xmm0
2022paddd %xmm3,%xmm0
2023
2024# qhasm: mu = mt
2025# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2026# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2027movdqa %xmm0,%xmm1
2028
2029# qhasm: uint32323232 mt >>= 23
2030# asm 1: psrld $23,<mt=int6464#1
2031# asm 2: psrld $23,<mt=%xmm0
2032psrld $23,%xmm0
2033
2034# qhasm: mr ^= mt
2035# asm 1: pxor <mt=int6464#1,<mr=int6464#6
2036# asm 2: pxor <mt=%xmm0,<mr=%xmm5
2037pxor %xmm0,%xmm5
2038
2039# qhasm: uint32323232 mu <<= 9
2040# asm 1: pslld $9,<mu=int6464#2
2041# asm 2: pslld $9,<mu=%xmm1
2042pslld $9,%xmm1
2043
2044# qhasm: mr ^= mu
2045# asm 1: pxor <mu=int6464#2,<mr=int6464#6
2046# asm 2: pxor <mu=%xmm1,<mr=%xmm5
2047pxor %xmm1,%xmm5
2048
2049# qhasm: z7_stack = mr
2050# asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
2051# asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
2052movdqa %xmm5,480(%esp)
2053
2054# qhasm: uint32323232 mq += mr
2055# asm 1: paddd <mr=int6464#6,<mq=int6464#4
2056# asm 2: paddd <mr=%xmm5,<mq=%xmm3
2057paddd %xmm5,%xmm3
2058
2059# qhasm: mu = mq
2060# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2061# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2062movdqa %xmm3,%xmm0
2063
2064# qhasm: uint32323232 mq >>= 19
2065# asm 1: psrld $19,<mq=int6464#4
2066# asm 2: psrld $19,<mq=%xmm3
2067psrld $19,%xmm3
2068
2069# qhasm: ms ^= mq
2070# asm 1: pxor <mq=int6464#4,<ms=int6464#7
2071# asm 2: pxor <mq=%xmm3,<ms=%xmm6
2072pxor %xmm3,%xmm6
2073
2074# qhasm: uint32323232 mu <<= 13
2075# asm 1: pslld $13,<mu=int6464#1
2076# asm 2: pslld $13,<mu=%xmm0
2077pslld $13,%xmm0
2078
2079# qhasm: ms ^= mu
2080# asm 1: pxor <mu=int6464#1,<ms=int6464#7
2081# asm 2: pxor <mu=%xmm0,<ms=%xmm6
2082pxor %xmm0,%xmm6
2083
2084# qhasm: t = z9_stack
2085# asm 1: movdqa <z9_stack=stack128#32,>t=int6464#3
2086# asm 2: movdqa <z9_stack=528(%esp),>t=%xmm2
2087movdqa 528(%esp),%xmm2
2088
2089# qhasm: p = z10_stack
2090# asm 1: movdqa <z10_stack=stack128#27,>p=int6464#1
2091# asm 2: movdqa <z10_stack=448(%esp),>p=%xmm0
2092movdqa 448(%esp),%xmm0
2093
2094# qhasm: q = z11_stack
2095# asm 1: movdqa <z11_stack=stack128#21,>q=int6464#4
2096# asm 2: movdqa <z11_stack=352(%esp),>q=%xmm3
2097movdqa 352(%esp),%xmm3
2098
2099# qhasm: r = z8_stack
2100# asm 1: movdqa <z8_stack=stack128#34,>r=int6464#2
2101# asm 2: movdqa <z8_stack=560(%esp),>r=%xmm1
2102movdqa 560(%esp),%xmm1
2103
2104# qhasm: z4_stack = ms
2105# asm 1: movdqa <ms=int6464#7,>z4_stack=stack128#34
2106# asm 2: movdqa <ms=%xmm6,>z4_stack=560(%esp)
2107movdqa %xmm6,560(%esp)
2108
2109# qhasm: uint32323232 mr += ms
2110# asm 1: paddd <ms=int6464#7,<mr=int6464#6
2111# asm 2: paddd <ms=%xmm6,<mr=%xmm5
2112paddd %xmm6,%xmm5
2113
2114# qhasm: mu = mr
2115# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2116# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2117movdqa %xmm5,%xmm6
2118
2119# qhasm: uint32323232 mr >>= 14
2120# asm 1: psrld $14,<mr=int6464#6
2121# asm 2: psrld $14,<mr=%xmm5
2122psrld $14,%xmm5
2123
2124# qhasm: mp ^= mr
2125# asm 1: pxor <mr=int6464#6,<mp=int6464#5
2126# asm 2: pxor <mr=%xmm5,<mp=%xmm4
2127pxor %xmm5,%xmm4
2128
2129# qhasm: uint32323232 mu <<= 18
2130# asm 1: pslld $18,<mu=int6464#7
2131# asm 2: pslld $18,<mu=%xmm6
2132pslld $18,%xmm6
2133
2134# qhasm: mp ^= mu
2135# asm 1: pxor <mu=int6464#7,<mp=int6464#5
2136# asm 2: pxor <mu=%xmm6,<mp=%xmm4
2137pxor %xmm6,%xmm4
2138
2139# qhasm: z5_stack = mp
2140# asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#21
2141# asm 2: movdqa <mp=%xmm4,>z5_stack=352(%esp)
2142movdqa %xmm4,352(%esp)
2143
2144# qhasm: assign xmm0 to p
2145
2146# qhasm: assign xmm1 to r
2147
2148# qhasm: assign xmm2 to t
2149
2150# qhasm: assign xmm3 to q
2151
2152# qhasm: s = t
2153# asm 1: movdqa <t=int6464#3,>s=int6464#7
2154# asm 2: movdqa <t=%xmm2,>s=%xmm6
2155movdqa %xmm2,%xmm6
2156
2157# qhasm: uint32323232 t += p
2158# asm 1: paddd <p=int6464#1,<t=int6464#3
2159# asm 2: paddd <p=%xmm0,<t=%xmm2
2160paddd %xmm0,%xmm2
2161
2162# qhasm: u = t
2163# asm 1: movdqa <t=int6464#3,>u=int6464#5
2164# asm 2: movdqa <t=%xmm2,>u=%xmm4
2165movdqa %xmm2,%xmm4
2166
2167# qhasm: uint32323232 t >>= 25
2168# asm 1: psrld $25,<t=int6464#3
2169# asm 2: psrld $25,<t=%xmm2
2170psrld $25,%xmm2
2171
2172# qhasm: q ^= t
2173# asm 1: pxor <t=int6464#3,<q=int6464#4
2174# asm 2: pxor <t=%xmm2,<q=%xmm3
2175pxor %xmm2,%xmm3
2176
2177# qhasm: uint32323232 u <<= 7
2178# asm 1: pslld $7,<u=int6464#5
2179# asm 2: pslld $7,<u=%xmm4
2180pslld $7,%xmm4
2181
2182# qhasm: q ^= u
2183# asm 1: pxor <u=int6464#5,<q=int6464#4
2184# asm 2: pxor <u=%xmm4,<q=%xmm3
2185pxor %xmm4,%xmm3
2186
2187# qhasm: z11_stack = q
2188# asm 1: movdqa <q=int6464#4,>z11_stack=stack128#27
2189# asm 2: movdqa <q=%xmm3,>z11_stack=448(%esp)
2190movdqa %xmm3,448(%esp)
2191
2192# qhasm: t = p
2193# asm 1: movdqa <p=int6464#1,>t=int6464#3
2194# asm 2: movdqa <p=%xmm0,>t=%xmm2
2195movdqa %xmm0,%xmm2
2196
2197# qhasm: uint32323232 t += q
2198# asm 1: paddd <q=int6464#4,<t=int6464#3
2199# asm 2: paddd <q=%xmm3,<t=%xmm2
2200paddd %xmm3,%xmm2
2201
2202# qhasm: u = t
2203# asm 1: movdqa <t=int6464#3,>u=int6464#5
2204# asm 2: movdqa <t=%xmm2,>u=%xmm4
2205movdqa %xmm2,%xmm4
2206
2207# qhasm: uint32323232 t >>= 23
2208# asm 1: psrld $23,<t=int6464#3
2209# asm 2: psrld $23,<t=%xmm2
2210psrld $23,%xmm2
2211
2212# qhasm: r ^= t
2213# asm 1: pxor <t=int6464#3,<r=int6464#2
2214# asm 2: pxor <t=%xmm2,<r=%xmm1
2215pxor %xmm2,%xmm1
2216
2217# qhasm: uint32323232 u <<= 9
2218# asm 1: pslld $9,<u=int6464#5
2219# asm 2: pslld $9,<u=%xmm4
2220pslld $9,%xmm4
2221
2222# qhasm: r ^= u
2223# asm 1: pxor <u=int6464#5,<r=int6464#2
2224# asm 2: pxor <u=%xmm4,<r=%xmm1
2225pxor %xmm4,%xmm1
2226
2227# qhasm: z8_stack = r
2228# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#37
2229# asm 2: movdqa <r=%xmm1,>z8_stack=608(%esp)
2230movdqa %xmm1,608(%esp)
2231
2232# qhasm: uint32323232 q += r
2233# asm 1: paddd <r=int6464#2,<q=int6464#4
2234# asm 2: paddd <r=%xmm1,<q=%xmm3
2235paddd %xmm1,%xmm3
2236
2237# qhasm: u = q
2238# asm 1: movdqa <q=int6464#4,>u=int6464#3
2239# asm 2: movdqa <q=%xmm3,>u=%xmm2
2240movdqa %xmm3,%xmm2
2241
2242# qhasm: uint32323232 q >>= 19
2243# asm 1: psrld $19,<q=int6464#4
2244# asm 2: psrld $19,<q=%xmm3
2245psrld $19,%xmm3
2246
2247# qhasm: s ^= q
2248# asm 1: pxor <q=int6464#4,<s=int6464#7
2249# asm 2: pxor <q=%xmm3,<s=%xmm6
2250pxor %xmm3,%xmm6
2251
2252# qhasm: uint32323232 u <<= 13
2253# asm 1: pslld $13,<u=int6464#3
2254# asm 2: pslld $13,<u=%xmm2
2255pslld $13,%xmm2
2256
2257# qhasm: s ^= u
2258# asm 1: pxor <u=int6464#3,<s=int6464#7
2259# asm 2: pxor <u=%xmm2,<s=%xmm6
2260pxor %xmm2,%xmm6
2261
2262# qhasm: mt = z14_stack
2263# asm 1: movdqa <z14_stack=stack128#36,>mt=int6464#3
2264# asm 2: movdqa <z14_stack=592(%esp),>mt=%xmm2
2265movdqa 592(%esp),%xmm2
2266
2267# qhasm: mp = z15_stack
2268# asm 1: movdqa <z15_stack=stack128#22,>mp=int6464#5
2269# asm 2: movdqa <z15_stack=368(%esp),>mp=%xmm4
2270movdqa 368(%esp),%xmm4
2271
2272# qhasm: mq = z12_stack
2273# asm 1: movdqa <z12_stack=stack128#30,>mq=int6464#4
2274# asm 2: movdqa <z12_stack=496(%esp),>mq=%xmm3
2275movdqa 496(%esp),%xmm3
2276
2277# qhasm: mr = z13_stack
2278# asm 1: movdqa <z13_stack=stack128#35,>mr=int6464#6
2279# asm 2: movdqa <z13_stack=576(%esp),>mr=%xmm5
2280movdqa 576(%esp),%xmm5
2281
2282# qhasm: z9_stack = s
2283# asm 1: movdqa <s=int6464#7,>z9_stack=stack128#32
2284# asm 2: movdqa <s=%xmm6,>z9_stack=528(%esp)
2285movdqa %xmm6,528(%esp)
2286
2287# qhasm: uint32323232 r += s
2288# asm 1: paddd <s=int6464#7,<r=int6464#2
2289# asm 2: paddd <s=%xmm6,<r=%xmm1
2290paddd %xmm6,%xmm1
2291
2292# qhasm: u = r
2293# asm 1: movdqa <r=int6464#2,>u=int6464#7
2294# asm 2: movdqa <r=%xmm1,>u=%xmm6
2295movdqa %xmm1,%xmm6
2296
2297# qhasm: uint32323232 r >>= 14
2298# asm 1: psrld $14,<r=int6464#2
2299# asm 2: psrld $14,<r=%xmm1
2300psrld $14,%xmm1
2301
2302# qhasm: p ^= r
2303# asm 1: pxor <r=int6464#2,<p=int6464#1
2304# asm 2: pxor <r=%xmm1,<p=%xmm0
2305pxor %xmm1,%xmm0
2306
2307# qhasm: uint32323232 u <<= 18
2308# asm 1: pslld $18,<u=int6464#7
2309# asm 2: pslld $18,<u=%xmm6
2310pslld $18,%xmm6
2311
2312# qhasm: p ^= u
2313# asm 1: pxor <u=int6464#7,<p=int6464#1
2314# asm 2: pxor <u=%xmm6,<p=%xmm0
2315pxor %xmm6,%xmm0
2316
2317# qhasm: z10_stack = p
2318# asm 1: movdqa <p=int6464#1,>z10_stack=stack128#22
2319# asm 2: movdqa <p=%xmm0,>z10_stack=368(%esp)
2320movdqa %xmm0,368(%esp)
2321
2322# qhasm: assign xmm2 to mt
2323
2324# qhasm: assign xmm3 to mq
2325
2326# qhasm: assign xmm4 to mp
2327
2328# qhasm: assign xmm5 to mr
2329
2330# qhasm: ms = mt
2331# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
2332# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
2333movdqa %xmm2,%xmm6
2334
2335# qhasm: uint32323232 mt += mp
2336# asm 1: paddd <mp=int6464#5,<mt=int6464#3
2337# asm 2: paddd <mp=%xmm4,<mt=%xmm2
2338paddd %xmm4,%xmm2
2339
2340# qhasm: mu = mt
2341# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
2342# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
2343movdqa %xmm2,%xmm0
2344
2345# qhasm: uint32323232 mt >>= 25
2346# asm 1: psrld $25,<mt=int6464#3
2347# asm 2: psrld $25,<mt=%xmm2
2348psrld $25,%xmm2
2349
2350# qhasm: mq ^= mt
2351# asm 1: pxor <mt=int6464#3,<mq=int6464#4
2352# asm 2: pxor <mt=%xmm2,<mq=%xmm3
2353pxor %xmm2,%xmm3
2354
2355# qhasm: uint32323232 mu <<= 7
2356# asm 1: pslld $7,<mu=int6464#1
2357# asm 2: pslld $7,<mu=%xmm0
2358pslld $7,%xmm0
2359
2360# qhasm: mq ^= mu
2361# asm 1: pxor <mu=int6464#1,<mq=int6464#4
2362# asm 2: pxor <mu=%xmm0,<mq=%xmm3
2363pxor %xmm0,%xmm3
2364
2365# qhasm: z12_stack = mq
2366# asm 1: movdqa <mq=int6464#4,>z12_stack=stack128#35
2367# asm 2: movdqa <mq=%xmm3,>z12_stack=576(%esp)
2368movdqa %xmm3,576(%esp)
2369
2370# qhasm: mt = mp
2371# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2372# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2373movdqa %xmm4,%xmm0
2374
2375# qhasm: uint32323232 mt += mq
2376# asm 1: paddd <mq=int6464#4,<mt=int6464#1
2377# asm 2: paddd <mq=%xmm3,<mt=%xmm0
2378paddd %xmm3,%xmm0
2379
2380# qhasm: mu = mt
2381# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2382# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2383movdqa %xmm0,%xmm1
2384
2385# qhasm: uint32323232 mt >>= 23
2386# asm 1: psrld $23,<mt=int6464#1
2387# asm 2: psrld $23,<mt=%xmm0
2388psrld $23,%xmm0
2389
2390# qhasm: mr ^= mt
2391# asm 1: pxor <mt=int6464#1,<mr=int6464#6
2392# asm 2: pxor <mt=%xmm0,<mr=%xmm5
2393pxor %xmm0,%xmm5
2394
2395# qhasm: uint32323232 mu <<= 9
2396# asm 1: pslld $9,<mu=int6464#2
2397# asm 2: pslld $9,<mu=%xmm1
2398pslld $9,%xmm1
2399
2400# qhasm: mr ^= mu
2401# asm 1: pxor <mu=int6464#2,<mr=int6464#6
2402# asm 2: pxor <mu=%xmm1,<mr=%xmm5
2403pxor %xmm1,%xmm5
2404
2405# qhasm: z13_stack = mr
2406# asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#30
2407# asm 2: movdqa <mr=%xmm5,>z13_stack=496(%esp)
2408movdqa %xmm5,496(%esp)
2409
2410# qhasm: uint32323232 mq += mr
2411# asm 1: paddd <mr=int6464#6,<mq=int6464#4
2412# asm 2: paddd <mr=%xmm5,<mq=%xmm3
2413paddd %xmm5,%xmm3
2414
2415# qhasm: mu = mq
2416# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2417# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2418movdqa %xmm3,%xmm0
2419
2420# qhasm: uint32323232 mq >>= 19
2421# asm 1: psrld $19,<mq=int6464#4
2422# asm 2: psrld $19,<mq=%xmm3
2423psrld $19,%xmm3
2424
2425# qhasm: ms ^= mq
2426# asm 1: pxor <mq=int6464#4,<ms=int6464#7
2427# asm 2: pxor <mq=%xmm3,<ms=%xmm6
2428pxor %xmm3,%xmm6
2429
2430# qhasm: uint32323232 mu <<= 13
2431# asm 1: pslld $13,<mu=int6464#1
2432# asm 2: pslld $13,<mu=%xmm0
2433pslld $13,%xmm0
2434
2435# qhasm: ms ^= mu
2436# asm 1: pxor <mu=int6464#1,<ms=int6464#7
2437# asm 2: pxor <mu=%xmm0,<ms=%xmm6
2438pxor %xmm0,%xmm6
2439
2440# qhasm: t = z12_stack
2441# asm 1: movdqa <z12_stack=stack128#35,>t=int6464#3
2442# asm 2: movdqa <z12_stack=576(%esp),>t=%xmm2
2443movdqa 576(%esp),%xmm2
2444
2445# qhasm: p = z0_stack
2446# asm 1: movdqa <z0_stack=stack128#33,>p=int6464#1
2447# asm 2: movdqa <z0_stack=544(%esp),>p=%xmm0
2448movdqa 544(%esp),%xmm0
2449
2450# qhasm: q = z4_stack
2451# asm 1: movdqa <z4_stack=stack128#34,>q=int6464#4
2452# asm 2: movdqa <z4_stack=560(%esp),>q=%xmm3
2453movdqa 560(%esp),%xmm3
2454
2455# qhasm: r = z8_stack
2456# asm 1: movdqa <z8_stack=stack128#37,>r=int6464#2
2457# asm 2: movdqa <z8_stack=608(%esp),>r=%xmm1
2458movdqa 608(%esp),%xmm1
2459
2460# qhasm: z14_stack = ms
2461# asm 1: movdqa <ms=int6464#7,>z14_stack=stack128#24
2462# asm 2: movdqa <ms=%xmm6,>z14_stack=400(%esp)
2463movdqa %xmm6,400(%esp)
2464
2465# qhasm: uint32323232 mr += ms
2466# asm 1: paddd <ms=int6464#7,<mr=int6464#6
2467# asm 2: paddd <ms=%xmm6,<mr=%xmm5
2468paddd %xmm6,%xmm5
2469
2470# qhasm: mu = mr
2471# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2472# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2473movdqa %xmm5,%xmm6
2474
2475# qhasm: uint32323232 mr >>= 14
2476# asm 1: psrld $14,<mr=int6464#6
2477# asm 2: psrld $14,<mr=%xmm5
2478psrld $14,%xmm5
2479
2480# qhasm: mp ^= mr
2481# asm 1: pxor <mr=int6464#6,<mp=int6464#5
2482# asm 2: pxor <mr=%xmm5,<mp=%xmm4
2483pxor %xmm5,%xmm4
2484
2485# qhasm: uint32323232 mu <<= 18
2486# asm 1: pslld $18,<mu=int6464#7
2487# asm 2: pslld $18,<mu=%xmm6
2488pslld $18,%xmm6
2489
2490# qhasm: mp ^= mu
2491# asm 1: pxor <mu=int6464#7,<mp=int6464#5
2492# asm 2: pxor <mu=%xmm6,<mp=%xmm4
2493pxor %xmm6,%xmm4
2494
2495# qhasm: z15_stack = mp
2496# asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#23
2497# asm 2: movdqa <mp=%xmm4,>z15_stack=384(%esp)
2498movdqa %xmm4,384(%esp)
2499
2500# qhasm: unsigned>? i -= 2
2501# asm 1: sub $2,<i=int32#1
2502# asm 2: sub $2,<i=%eax
2503sub $2,%eax
2504# comment:fp stack unchanged by jump
2505
2506# qhasm: goto mainloop1 if unsigned>
2507ja ._mainloop1
2508
2509# qhasm: out = out_stack
2510# asm 1: movl <out_stack=stack32#6,>out=int32#6
2511# asm 2: movl <out_stack=20(%esp),>out=%edi
2512movl 20(%esp),%edi
2513
2514# qhasm: z0 = z0_stack
2515# asm 1: movdqa <z0_stack=stack128#33,>z0=int6464#1
2516# asm 2: movdqa <z0_stack=544(%esp),>z0=%xmm0
2517movdqa 544(%esp),%xmm0
2518
2519# qhasm: z1 = z1_stack
2520# asm 1: movdqa <z1_stack=stack128#28,>z1=int6464#2
2521# asm 2: movdqa <z1_stack=464(%esp),>z1=%xmm1
2522movdqa 464(%esp),%xmm1
2523
2524# qhasm: z2 = z2_stack
2525# asm 1: movdqa <z2_stack=stack128#31,>z2=int6464#3
2526# asm 2: movdqa <z2_stack=512(%esp),>z2=%xmm2
2527movdqa 512(%esp),%xmm2
2528
2529# qhasm: z3 = z3_stack
2530# asm 1: movdqa <z3_stack=stack128#25,>z3=int6464#4
2531# asm 2: movdqa <z3_stack=416(%esp),>z3=%xmm3
2532movdqa 416(%esp),%xmm3
2533
2534# qhasm: uint32323232 z0 += orig0
2535# asm 1: paddd <orig0=stack128#8,<z0=int6464#1
2536# asm 2: paddd <orig0=144(%esp),<z0=%xmm0
2537paddd 144(%esp),%xmm0
2538
2539# qhasm: uint32323232 z1 += orig1
2540# asm 1: paddd <orig1=stack128#12,<z1=int6464#2
2541# asm 2: paddd <orig1=208(%esp),<z1=%xmm1
2542paddd 208(%esp),%xmm1
2543
2544# qhasm: uint32323232 z2 += orig2
2545# asm 1: paddd <orig2=stack128#15,<z2=int6464#3
2546# asm 2: paddd <orig2=256(%esp),<z2=%xmm2
2547paddd 256(%esp),%xmm2
2548
2549# qhasm: uint32323232 z3 += orig3
2550# asm 1: paddd <orig3=stack128#18,<z3=int6464#4
2551# asm 2: paddd <orig3=304(%esp),<z3=%xmm3
2552paddd 304(%esp),%xmm3
2553
2554# qhasm: in0 = z0
2555# asm 1: movd <z0=int6464#1,>in0=int32#1
2556# asm 2: movd <z0=%xmm0,>in0=%eax
2557movd %xmm0,%eax
2558
2559# qhasm: in1 = z1
2560# asm 1: movd <z1=int6464#2,>in1=int32#2
2561# asm 2: movd <z1=%xmm1,>in1=%ecx
2562movd %xmm1,%ecx
2563
2564# qhasm: in2 = z2
2565# asm 1: movd <z2=int6464#3,>in2=int32#3
2566# asm 2: movd <z2=%xmm2,>in2=%edx
2567movd %xmm2,%edx
2568
2569# qhasm: in3 = z3
2570# asm 1: movd <z3=int6464#4,>in3=int32#4
2571# asm 2: movd <z3=%xmm3,>in3=%ebx
2572movd %xmm3,%ebx
2573
2574# qhasm: z0 <<<= 96
2575# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2576# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2577pshufd $0x39,%xmm0,%xmm0
2578
2579# qhasm: z1 <<<= 96
2580# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2581# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2582pshufd $0x39,%xmm1,%xmm1
2583
2584# qhasm: z2 <<<= 96
2585# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2586# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2587pshufd $0x39,%xmm2,%xmm2
2588
2589# qhasm: z3 <<<= 96
2590# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2591# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2592pshufd $0x39,%xmm3,%xmm3
2593
2594# qhasm: in0 ^= *(uint32 *) (m + 0)
2595# asm 1: xorl 0(<m=int32#5),<in0=int32#1
2596# asm 2: xorl 0(<m=%esi),<in0=%eax
2597xorl 0(%esi),%eax
2598
2599# qhasm: in1 ^= *(uint32 *) (m + 4)
2600# asm 1: xorl 4(<m=int32#5),<in1=int32#2
2601# asm 2: xorl 4(<m=%esi),<in1=%ecx
2602xorl 4(%esi),%ecx
2603
2604# qhasm: in2 ^= *(uint32 *) (m + 8)
2605# asm 1: xorl 8(<m=int32#5),<in2=int32#3
2606# asm 2: xorl 8(<m=%esi),<in2=%edx
2607xorl 8(%esi),%edx
2608
2609# qhasm: in3 ^= *(uint32 *) (m + 12)
2610# asm 1: xorl 12(<m=int32#5),<in3=int32#4
2611# asm 2: xorl 12(<m=%esi),<in3=%ebx
2612xorl 12(%esi),%ebx
2613
2614# qhasm: *(uint32 *) (out + 0) = in0
2615# asm 1: movl <in0=int32#1,0(<out=int32#6)
2616# asm 2: movl <in0=%eax,0(<out=%edi)
2617movl %eax,0(%edi)
2618
2619# qhasm: *(uint32 *) (out + 4) = in1
2620# asm 1: movl <in1=int32#2,4(<out=int32#6)
2621# asm 2: movl <in1=%ecx,4(<out=%edi)
2622movl %ecx,4(%edi)
2623
2624# qhasm: *(uint32 *) (out + 8) = in2
2625# asm 1: movl <in2=int32#3,8(<out=int32#6)
2626# asm 2: movl <in2=%edx,8(<out=%edi)
2627movl %edx,8(%edi)
2628
2629# qhasm: *(uint32 *) (out + 12) = in3
2630# asm 1: movl <in3=int32#4,12(<out=int32#6)
2631# asm 2: movl <in3=%ebx,12(<out=%edi)
2632movl %ebx,12(%edi)
2633
2634# qhasm: in0 = z0
2635# asm 1: movd <z0=int6464#1,>in0=int32#1
2636# asm 2: movd <z0=%xmm0,>in0=%eax
2637movd %xmm0,%eax
2638
2639# qhasm: in1 = z1
2640# asm 1: movd <z1=int6464#2,>in1=int32#2
2641# asm 2: movd <z1=%xmm1,>in1=%ecx
2642movd %xmm1,%ecx
2643
2644# qhasm: in2 = z2
2645# asm 1: movd <z2=int6464#3,>in2=int32#3
2646# asm 2: movd <z2=%xmm2,>in2=%edx
2647movd %xmm2,%edx
2648
2649# qhasm: in3 = z3
2650# asm 1: movd <z3=int6464#4,>in3=int32#4
2651# asm 2: movd <z3=%xmm3,>in3=%ebx
2652movd %xmm3,%ebx
2653
2654# qhasm: z0 <<<= 96
2655# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2656# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2657pshufd $0x39,%xmm0,%xmm0
2658
2659# qhasm: z1 <<<= 96
2660# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2661# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2662pshufd $0x39,%xmm1,%xmm1
2663
2664# qhasm: z2 <<<= 96
2665# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2666# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2667pshufd $0x39,%xmm2,%xmm2
2668
2669# qhasm: z3 <<<= 96
2670# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2671# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2672pshufd $0x39,%xmm3,%xmm3
2673
2674# qhasm: in0 ^= *(uint32 *) (m + 64)
2675# asm 1: xorl 64(<m=int32#5),<in0=int32#1
2676# asm 2: xorl 64(<m=%esi),<in0=%eax
2677xorl 64(%esi),%eax
2678
2679# qhasm: in1 ^= *(uint32 *) (m + 68)
2680# asm 1: xorl 68(<m=int32#5),<in1=int32#2
2681# asm 2: xorl 68(<m=%esi),<in1=%ecx
2682xorl 68(%esi),%ecx
2683
2684# qhasm: in2 ^= *(uint32 *) (m + 72)
2685# asm 1: xorl 72(<m=int32#5),<in2=int32#3
2686# asm 2: xorl 72(<m=%esi),<in2=%edx
2687xorl 72(%esi),%edx
2688
2689# qhasm: in3 ^= *(uint32 *) (m + 76)
2690# asm 1: xorl 76(<m=int32#5),<in3=int32#4
2691# asm 2: xorl 76(<m=%esi),<in3=%ebx
2692xorl 76(%esi),%ebx
2693
2694# qhasm: *(uint32 *) (out + 64) = in0
2695# asm 1: movl <in0=int32#1,64(<out=int32#6)
2696# asm 2: movl <in0=%eax,64(<out=%edi)
2697movl %eax,64(%edi)
2698
2699# qhasm: *(uint32 *) (out + 68) = in1
2700# asm 1: movl <in1=int32#2,68(<out=int32#6)
2701# asm 2: movl <in1=%ecx,68(<out=%edi)
2702movl %ecx,68(%edi)
2703
2704# qhasm: *(uint32 *) (out + 72) = in2
2705# asm 1: movl <in2=int32#3,72(<out=int32#6)
2706# asm 2: movl <in2=%edx,72(<out=%edi)
2707movl %edx,72(%edi)
2708
2709# qhasm: *(uint32 *) (out + 76) = in3
2710# asm 1: movl <in3=int32#4,76(<out=int32#6)
2711# asm 2: movl <in3=%ebx,76(<out=%edi)
2712movl %ebx,76(%edi)
2713
2714# qhasm: in0 = z0
2715# asm 1: movd <z0=int6464#1,>in0=int32#1
2716# asm 2: movd <z0=%xmm0,>in0=%eax
2717movd %xmm0,%eax
2718
2719# qhasm: in1 = z1
2720# asm 1: movd <z1=int6464#2,>in1=int32#2
2721# asm 2: movd <z1=%xmm1,>in1=%ecx
2722movd %xmm1,%ecx
2723
2724# qhasm: in2 = z2
2725# asm 1: movd <z2=int6464#3,>in2=int32#3
2726# asm 2: movd <z2=%xmm2,>in2=%edx
2727movd %xmm2,%edx
2728
2729# qhasm: in3 = z3
2730# asm 1: movd <z3=int6464#4,>in3=int32#4
2731# asm 2: movd <z3=%xmm3,>in3=%ebx
2732movd %xmm3,%ebx
2733
2734# qhasm: z0 <<<= 96
2735# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2736# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2737pshufd $0x39,%xmm0,%xmm0
2738
2739# qhasm: z1 <<<= 96
2740# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2741# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2742pshufd $0x39,%xmm1,%xmm1
2743
2744# qhasm: z2 <<<= 96
2745# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2746# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2747pshufd $0x39,%xmm2,%xmm2
2748
2749# qhasm: z3 <<<= 96
2750# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2751# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2752pshufd $0x39,%xmm3,%xmm3
2753
2754# qhasm: in0 ^= *(uint32 *) (m + 128)
2755# asm 1: xorl 128(<m=int32#5),<in0=int32#1
2756# asm 2: xorl 128(<m=%esi),<in0=%eax
2757xorl 128(%esi),%eax
2758
2759# qhasm: in1 ^= *(uint32 *) (m + 132)
2760# asm 1: xorl 132(<m=int32#5),<in1=int32#2
2761# asm 2: xorl 132(<m=%esi),<in1=%ecx
2762xorl 132(%esi),%ecx
2763
2764# qhasm: in2 ^= *(uint32 *) (m + 136)
2765# asm 1: xorl 136(<m=int32#5),<in2=int32#3
2766# asm 2: xorl 136(<m=%esi),<in2=%edx
2767xorl 136(%esi),%edx
2768
2769# qhasm: in3 ^= *(uint32 *) (m + 140)
2770# asm 1: xorl 140(<m=int32#5),<in3=int32#4
2771# asm 2: xorl 140(<m=%esi),<in3=%ebx
2772xorl 140(%esi),%ebx
2773
2774# qhasm: *(uint32 *) (out + 128) = in0
2775# asm 1: movl <in0=int32#1,128(<out=int32#6)
2776# asm 2: movl <in0=%eax,128(<out=%edi)
2777movl %eax,128(%edi)
2778
2779# qhasm: *(uint32 *) (out + 132) = in1
2780# asm 1: movl <in1=int32#2,132(<out=int32#6)
2781# asm 2: movl <in1=%ecx,132(<out=%edi)
2782movl %ecx,132(%edi)
2783
2784# qhasm: *(uint32 *) (out + 136) = in2
2785# asm 1: movl <in2=int32#3,136(<out=int32#6)
2786# asm 2: movl <in2=%edx,136(<out=%edi)
2787movl %edx,136(%edi)
2788
2789# qhasm: *(uint32 *) (out + 140) = in3
2790# asm 1: movl <in3=int32#4,140(<out=int32#6)
2791# asm 2: movl <in3=%ebx,140(<out=%edi)
2792movl %ebx,140(%edi)
2793
2794# qhasm: in0 = z0
2795# asm 1: movd <z0=int6464#1,>in0=int32#1
2796# asm 2: movd <z0=%xmm0,>in0=%eax
2797movd %xmm0,%eax
2798
2799# qhasm: in1 = z1
2800# asm 1: movd <z1=int6464#2,>in1=int32#2
2801# asm 2: movd <z1=%xmm1,>in1=%ecx
2802movd %xmm1,%ecx
2803
2804# qhasm: in2 = z2
2805# asm 1: movd <z2=int6464#3,>in2=int32#3
2806# asm 2: movd <z2=%xmm2,>in2=%edx
2807movd %xmm2,%edx
2808
2809# qhasm: in3 = z3
2810# asm 1: movd <z3=int6464#4,>in3=int32#4
2811# asm 2: movd <z3=%xmm3,>in3=%ebx
2812movd %xmm3,%ebx
2813
2814# qhasm: in0 ^= *(uint32 *) (m + 192)
2815# asm 1: xorl 192(<m=int32#5),<in0=int32#1
2816# asm 2: xorl 192(<m=%esi),<in0=%eax
2817xorl 192(%esi),%eax
2818
2819# qhasm: in1 ^= *(uint32 *) (m + 196)
2820# asm 1: xorl 196(<m=int32#5),<in1=int32#2
2821# asm 2: xorl 196(<m=%esi),<in1=%ecx
2822xorl 196(%esi),%ecx
2823
2824# qhasm: in2 ^= *(uint32 *) (m + 200)
2825# asm 1: xorl 200(<m=int32#5),<in2=int32#3
2826# asm 2: xorl 200(<m=%esi),<in2=%edx
2827xorl 200(%esi),%edx
2828
2829# qhasm: in3 ^= *(uint32 *) (m + 204)
2830# asm 1: xorl 204(<m=int32#5),<in3=int32#4
2831# asm 2: xorl 204(<m=%esi),<in3=%ebx
2832xorl 204(%esi),%ebx
2833
2834# qhasm: *(uint32 *) (out + 192) = in0
2835# asm 1: movl <in0=int32#1,192(<out=int32#6)
2836# asm 2: movl <in0=%eax,192(<out=%edi)
2837movl %eax,192(%edi)
2838
2839# qhasm: *(uint32 *) (out + 196) = in1
2840# asm 1: movl <in1=int32#2,196(<out=int32#6)
2841# asm 2: movl <in1=%ecx,196(<out=%edi)
2842movl %ecx,196(%edi)
2843
2844# qhasm: *(uint32 *) (out + 200) = in2
2845# asm 1: movl <in2=int32#3,200(<out=int32#6)
2846# asm 2: movl <in2=%edx,200(<out=%edi)
2847movl %edx,200(%edi)
2848
2849# qhasm: *(uint32 *) (out + 204) = in3
2850# asm 1: movl <in3=int32#4,204(<out=int32#6)
2851# asm 2: movl <in3=%ebx,204(<out=%edi)
2852movl %ebx,204(%edi)
2853
2854# qhasm: z4 = z4_stack
2855# asm 1: movdqa <z4_stack=stack128#34,>z4=int6464#1
2856# asm 2: movdqa <z4_stack=560(%esp),>z4=%xmm0
2857movdqa 560(%esp),%xmm0
2858
2859# qhasm: z5 = z5_stack
2860# asm 1: movdqa <z5_stack=stack128#21,>z5=int6464#2
2861# asm 2: movdqa <z5_stack=352(%esp),>z5=%xmm1
2862movdqa 352(%esp),%xmm1
2863
2864# qhasm: z6 = z6_stack
2865# asm 1: movdqa <z6_stack=stack128#26,>z6=int6464#3
2866# asm 2: movdqa <z6_stack=432(%esp),>z6=%xmm2
2867movdqa 432(%esp),%xmm2
2868
2869# qhasm: z7 = z7_stack
2870# asm 1: movdqa <z7_stack=stack128#29,>z7=int6464#4
2871# asm 2: movdqa <z7_stack=480(%esp),>z7=%xmm3
2872movdqa 480(%esp),%xmm3
2873
2874# qhasm: uint32323232 z4 += orig4
2875# asm 1: paddd <orig4=stack128#16,<z4=int6464#1
2876# asm 2: paddd <orig4=272(%esp),<z4=%xmm0
2877paddd 272(%esp),%xmm0
2878
2879# qhasm: uint32323232 z5 += orig5
2880# asm 1: paddd <orig5=stack128#5,<z5=int6464#2
2881# asm 2: paddd <orig5=96(%esp),<z5=%xmm1
2882paddd 96(%esp),%xmm1
2883
2884# qhasm: uint32323232 z6 += orig6
2885# asm 1: paddd <orig6=stack128#9,<z6=int6464#3
2886# asm 2: paddd <orig6=160(%esp),<z6=%xmm2
2887paddd 160(%esp),%xmm2
2888
2889# qhasm: uint32323232 z7 += orig7
2890# asm 1: paddd <orig7=stack128#13,<z7=int6464#4
2891# asm 2: paddd <orig7=224(%esp),<z7=%xmm3
2892paddd 224(%esp),%xmm3
2893
2894# qhasm: in4 = z4
2895# asm 1: movd <z4=int6464#1,>in4=int32#1
2896# asm 2: movd <z4=%xmm0,>in4=%eax
2897movd %xmm0,%eax
2898
2899# qhasm: in5 = z5
2900# asm 1: movd <z5=int6464#2,>in5=int32#2
2901# asm 2: movd <z5=%xmm1,>in5=%ecx
2902movd %xmm1,%ecx
2903
2904# qhasm: in6 = z6
2905# asm 1: movd <z6=int6464#3,>in6=int32#3
2906# asm 2: movd <z6=%xmm2,>in6=%edx
2907movd %xmm2,%edx
2908
2909# qhasm: in7 = z7
2910# asm 1: movd <z7=int6464#4,>in7=int32#4
2911# asm 2: movd <z7=%xmm3,>in7=%ebx
2912movd %xmm3,%ebx
2913
2914# qhasm: z4 <<<= 96
2915# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2916# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2917pshufd $0x39,%xmm0,%xmm0
2918
2919# qhasm: z5 <<<= 96
2920# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
2921# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
2922pshufd $0x39,%xmm1,%xmm1
2923
2924# qhasm: z6 <<<= 96
2925# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
2926# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
2927pshufd $0x39,%xmm2,%xmm2
2928
2929# qhasm: z7 <<<= 96
2930# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
2931# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
2932pshufd $0x39,%xmm3,%xmm3
2933
2934# qhasm: in4 ^= *(uint32 *) (m + 16)
2935# asm 1: xorl 16(<m=int32#5),<in4=int32#1
2936# asm 2: xorl 16(<m=%esi),<in4=%eax
2937xorl 16(%esi),%eax
2938
2939# qhasm: in5 ^= *(uint32 *) (m + 20)
2940# asm 1: xorl 20(<m=int32#5),<in5=int32#2
2941# asm 2: xorl 20(<m=%esi),<in5=%ecx
2942xorl 20(%esi),%ecx
2943
2944# qhasm: in6 ^= *(uint32 *) (m + 24)
2945# asm 1: xorl 24(<m=int32#5),<in6=int32#3
2946# asm 2: xorl 24(<m=%esi),<in6=%edx
2947xorl 24(%esi),%edx
2948
2949# qhasm: in7 ^= *(uint32 *) (m + 28)
2950# asm 1: xorl 28(<m=int32#5),<in7=int32#4
2951# asm 2: xorl 28(<m=%esi),<in7=%ebx
2952xorl 28(%esi),%ebx
2953
2954# qhasm: *(uint32 *) (out + 16) = in4
2955# asm 1: movl <in4=int32#1,16(<out=int32#6)
2956# asm 2: movl <in4=%eax,16(<out=%edi)
2957movl %eax,16(%edi)
2958
2959# qhasm: *(uint32 *) (out + 20) = in5
2960# asm 1: movl <in5=int32#2,20(<out=int32#6)
2961# asm 2: movl <in5=%ecx,20(<out=%edi)
2962movl %ecx,20(%edi)
2963
2964# qhasm: *(uint32 *) (out + 24) = in6
2965# asm 1: movl <in6=int32#3,24(<out=int32#6)
2966# asm 2: movl <in6=%edx,24(<out=%edi)
2967movl %edx,24(%edi)
2968
2969# qhasm: *(uint32 *) (out + 28) = in7
2970# asm 1: movl <in7=int32#4,28(<out=int32#6)
2971# asm 2: movl <in7=%ebx,28(<out=%edi)
2972movl %ebx,28(%edi)
2973
2974# qhasm: in4 = z4
2975# asm 1: movd <z4=int6464#1,>in4=int32#1
2976# asm 2: movd <z4=%xmm0,>in4=%eax
2977movd %xmm0,%eax
2978
2979# qhasm: in5 = z5
2980# asm 1: movd <z5=int6464#2,>in5=int32#2
2981# asm 2: movd <z5=%xmm1,>in5=%ecx
2982movd %xmm1,%ecx
2983
2984# qhasm: in6 = z6
2985# asm 1: movd <z6=int6464#3,>in6=int32#3
2986# asm 2: movd <z6=%xmm2,>in6=%edx
2987movd %xmm2,%edx
2988
2989# qhasm: in7 = z7
2990# asm 1: movd <z7=int6464#4,>in7=int32#4
2991# asm 2: movd <z7=%xmm3,>in7=%ebx
2992movd %xmm3,%ebx
2993
2994# qhasm: z4 <<<= 96
2995# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2996# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2997pshufd $0x39,%xmm0,%xmm0
2998
2999# qhasm: z5 <<<= 96
3000# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3001# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3002pshufd $0x39,%xmm1,%xmm1
3003
3004# qhasm: z6 <<<= 96
3005# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3006# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3007pshufd $0x39,%xmm2,%xmm2
3008
3009# qhasm: z7 <<<= 96
3010# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3011# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3012pshufd $0x39,%xmm3,%xmm3
3013
3014# qhasm: in4 ^= *(uint32 *) (m + 80)
3015# asm 1: xorl 80(<m=int32#5),<in4=int32#1
3016# asm 2: xorl 80(<m=%esi),<in4=%eax
3017xorl 80(%esi),%eax
3018
3019# qhasm: in5 ^= *(uint32 *) (m + 84)
3020# asm 1: xorl 84(<m=int32#5),<in5=int32#2
3021# asm 2: xorl 84(<m=%esi),<in5=%ecx
3022xorl 84(%esi),%ecx
3023
3024# qhasm: in6 ^= *(uint32 *) (m + 88)
3025# asm 1: xorl 88(<m=int32#5),<in6=int32#3
3026# asm 2: xorl 88(<m=%esi),<in6=%edx
3027xorl 88(%esi),%edx
3028
3029# qhasm: in7 ^= *(uint32 *) (m + 92)
3030# asm 1: xorl 92(<m=int32#5),<in7=int32#4
3031# asm 2: xorl 92(<m=%esi),<in7=%ebx
3032xorl 92(%esi),%ebx
3033
3034# qhasm: *(uint32 *) (out + 80) = in4
3035# asm 1: movl <in4=int32#1,80(<out=int32#6)
3036# asm 2: movl <in4=%eax,80(<out=%edi)
3037movl %eax,80(%edi)
3038
3039# qhasm: *(uint32 *) (out + 84) = in5
3040# asm 1: movl <in5=int32#2,84(<out=int32#6)
3041# asm 2: movl <in5=%ecx,84(<out=%edi)
3042movl %ecx,84(%edi)
3043
3044# qhasm: *(uint32 *) (out + 88) = in6
3045# asm 1: movl <in6=int32#3,88(<out=int32#6)
3046# asm 2: movl <in6=%edx,88(<out=%edi)
3047movl %edx,88(%edi)
3048
3049# qhasm: *(uint32 *) (out + 92) = in7
3050# asm 1: movl <in7=int32#4,92(<out=int32#6)
3051# asm 2: movl <in7=%ebx,92(<out=%edi)
3052movl %ebx,92(%edi)
3053
3054# qhasm: in4 = z4
3055# asm 1: movd <z4=int6464#1,>in4=int32#1
3056# asm 2: movd <z4=%xmm0,>in4=%eax
3057movd %xmm0,%eax
3058
3059# qhasm: in5 = z5
3060# asm 1: movd <z5=int6464#2,>in5=int32#2
3061# asm 2: movd <z5=%xmm1,>in5=%ecx
3062movd %xmm1,%ecx
3063
3064# qhasm: in6 = z6
3065# asm 1: movd <z6=int6464#3,>in6=int32#3
3066# asm 2: movd <z6=%xmm2,>in6=%edx
3067movd %xmm2,%edx
3068
3069# qhasm: in7 = z7
3070# asm 1: movd <z7=int6464#4,>in7=int32#4
3071# asm 2: movd <z7=%xmm3,>in7=%ebx
3072movd %xmm3,%ebx
3073
3074# qhasm: z4 <<<= 96
3075# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
3076# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
3077pshufd $0x39,%xmm0,%xmm0
3078
3079# qhasm: z5 <<<= 96
3080# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3081# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3082pshufd $0x39,%xmm1,%xmm1
3083
3084# qhasm: z6 <<<= 96
3085# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3086# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3087pshufd $0x39,%xmm2,%xmm2
3088
3089# qhasm: z7 <<<= 96
3090# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3091# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3092pshufd $0x39,%xmm3,%xmm3
3093
3094# qhasm: in4 ^= *(uint32 *) (m + 144)
3095# asm 1: xorl 144(<m=int32#5),<in4=int32#1
3096# asm 2: xorl 144(<m=%esi),<in4=%eax
3097xorl 144(%esi),%eax
3098
3099# qhasm: in5 ^= *(uint32 *) (m + 148)
3100# asm 1: xorl 148(<m=int32#5),<in5=int32#2
3101# asm 2: xorl 148(<m=%esi),<in5=%ecx
3102xorl 148(%esi),%ecx
3103
3104# qhasm: in6 ^= *(uint32 *) (m + 152)
3105# asm 1: xorl 152(<m=int32#5),<in6=int32#3
3106# asm 2: xorl 152(<m=%esi),<in6=%edx
3107xorl 152(%esi),%edx
3108
3109# qhasm: in7 ^= *(uint32 *) (m + 156)
3110# asm 1: xorl 156(<m=int32#5),<in7=int32#4
3111# asm 2: xorl 156(<m=%esi),<in7=%ebx
3112xorl 156(%esi),%ebx
3113
3114# qhasm: *(uint32 *) (out + 144) = in4
3115# asm 1: movl <in4=int32#1,144(<out=int32#6)
3116# asm 2: movl <in4=%eax,144(<out=%edi)
3117movl %eax,144(%edi)
3118
3119# qhasm: *(uint32 *) (out + 148) = in5
3120# asm 1: movl <in5=int32#2,148(<out=int32#6)
3121# asm 2: movl <in5=%ecx,148(<out=%edi)
3122movl %ecx,148(%edi)
3123
3124# qhasm: *(uint32 *) (out + 152) = in6
3125# asm 1: movl <in6=int32#3,152(<out=int32#6)
3126# asm 2: movl <in6=%edx,152(<out=%edi)
3127movl %edx,152(%edi)
3128
3129# qhasm: *(uint32 *) (out + 156) = in7
3130# asm 1: movl <in7=int32#4,156(<out=int32#6)
3131# asm 2: movl <in7=%ebx,156(<out=%edi)
3132movl %ebx,156(%edi)
3133
3134# qhasm: in4 = z4
3135# asm 1: movd <z4=int6464#1,>in4=int32#1
3136# asm 2: movd <z4=%xmm0,>in4=%eax
3137movd %xmm0,%eax
3138
3139# qhasm: in5 = z5
3140# asm 1: movd <z5=int6464#2,>in5=int32#2
3141# asm 2: movd <z5=%xmm1,>in5=%ecx
3142movd %xmm1,%ecx
3143
3144# qhasm: in6 = z6
3145# asm 1: movd <z6=int6464#3,>in6=int32#3
3146# asm 2: movd <z6=%xmm2,>in6=%edx
3147movd %xmm2,%edx
3148
3149# qhasm: in7 = z7
3150# asm 1: movd <z7=int6464#4,>in7=int32#4
3151# asm 2: movd <z7=%xmm3,>in7=%ebx
3152movd %xmm3,%ebx
3153
3154# qhasm: in4 ^= *(uint32 *) (m + 208)
3155# asm 1: xorl 208(<m=int32#5),<in4=int32#1
3156# asm 2: xorl 208(<m=%esi),<in4=%eax
3157xorl 208(%esi),%eax
3158
3159# qhasm: in5 ^= *(uint32 *) (m + 212)
3160# asm 1: xorl 212(<m=int32#5),<in5=int32#2
3161# asm 2: xorl 212(<m=%esi),<in5=%ecx
3162xorl 212(%esi),%ecx
3163
3164# qhasm: in6 ^= *(uint32 *) (m + 216)
3165# asm 1: xorl 216(<m=int32#5),<in6=int32#3
3166# asm 2: xorl 216(<m=%esi),<in6=%edx
3167xorl 216(%esi),%edx
3168
3169# qhasm: in7 ^= *(uint32 *) (m + 220)
3170# asm 1: xorl 220(<m=int32#5),<in7=int32#4
3171# asm 2: xorl 220(<m=%esi),<in7=%ebx
3172xorl 220(%esi),%ebx
3173
3174# qhasm: *(uint32 *) (out + 208) = in4
3175# asm 1: movl <in4=int32#1,208(<out=int32#6)
3176# asm 2: movl <in4=%eax,208(<out=%edi)
3177movl %eax,208(%edi)
3178
3179# qhasm: *(uint32 *) (out + 212) = in5
3180# asm 1: movl <in5=int32#2,212(<out=int32#6)
3181# asm 2: movl <in5=%ecx,212(<out=%edi)
3182movl %ecx,212(%edi)
3183
3184# qhasm: *(uint32 *) (out + 216) = in6
3185# asm 1: movl <in6=int32#3,216(<out=int32#6)
3186# asm 2: movl <in6=%edx,216(<out=%edi)
3187movl %edx,216(%edi)
3188
3189# qhasm: *(uint32 *) (out + 220) = in7
3190# asm 1: movl <in7=int32#4,220(<out=int32#6)
3191# asm 2: movl <in7=%ebx,220(<out=%edi)
3192movl %ebx,220(%edi)
3193
3194# qhasm: z8 = z8_stack
3195# asm 1: movdqa <z8_stack=stack128#37,>z8=int6464#1
3196# asm 2: movdqa <z8_stack=608(%esp),>z8=%xmm0
3197movdqa 608(%esp),%xmm0
3198
3199# qhasm: z9 = z9_stack
3200# asm 1: movdqa <z9_stack=stack128#32,>z9=int6464#2
3201# asm 2: movdqa <z9_stack=528(%esp),>z9=%xmm1
3202movdqa 528(%esp),%xmm1
3203
3204# qhasm: z10 = z10_stack
3205# asm 1: movdqa <z10_stack=stack128#22,>z10=int6464#3
3206# asm 2: movdqa <z10_stack=368(%esp),>z10=%xmm2
3207movdqa 368(%esp),%xmm2
3208
3209# qhasm: z11 = z11_stack
3210# asm 1: movdqa <z11_stack=stack128#27,>z11=int6464#4
3211# asm 2: movdqa <z11_stack=448(%esp),>z11=%xmm3
3212movdqa 448(%esp),%xmm3
3213
3214# qhasm: uint32323232 z8 += orig8
3215# asm 1: paddd <orig8=stack128#19,<z8=int6464#1
3216# asm 2: paddd <orig8=320(%esp),<z8=%xmm0
3217paddd 320(%esp),%xmm0
3218
3219# qhasm: uint32323232 z9 += orig9
3220# asm 1: paddd <orig9=stack128#20,<z9=int6464#2
3221# asm 2: paddd <orig9=336(%esp),<z9=%xmm1
3222paddd 336(%esp),%xmm1
3223
3224# qhasm: uint32323232 z10 += orig10
3225# asm 1: paddd <orig10=stack128#6,<z10=int6464#3
3226# asm 2: paddd <orig10=112(%esp),<z10=%xmm2
3227paddd 112(%esp),%xmm2
3228
3229# qhasm: uint32323232 z11 += orig11
3230# asm 1: paddd <orig11=stack128#10,<z11=int6464#4
3231# asm 2: paddd <orig11=176(%esp),<z11=%xmm3
3232paddd 176(%esp),%xmm3
3233
3234# qhasm: in8 = z8
3235# asm 1: movd <z8=int6464#1,>in8=int32#1
3236# asm 2: movd <z8=%xmm0,>in8=%eax
3237movd %xmm0,%eax
3238
3239# qhasm: in9 = z9
3240# asm 1: movd <z9=int6464#2,>in9=int32#2
3241# asm 2: movd <z9=%xmm1,>in9=%ecx
3242movd %xmm1,%ecx
3243
3244# qhasm: in10 = z10
3245# asm 1: movd <z10=int6464#3,>in10=int32#3
3246# asm 2: movd <z10=%xmm2,>in10=%edx
3247movd %xmm2,%edx
3248
3249# qhasm: in11 = z11
3250# asm 1: movd <z11=int6464#4,>in11=int32#4
3251# asm 2: movd <z11=%xmm3,>in11=%ebx
3252movd %xmm3,%ebx
3253
3254# qhasm: z8 <<<= 96
3255# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3256# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3257pshufd $0x39,%xmm0,%xmm0
3258
3259# qhasm: z9 <<<= 96
3260# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3261# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3262pshufd $0x39,%xmm1,%xmm1
3263
3264# qhasm: z10 <<<= 96
3265# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3266# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3267pshufd $0x39,%xmm2,%xmm2
3268
3269# qhasm: z11 <<<= 96
3270# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3271# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3272pshufd $0x39,%xmm3,%xmm3
3273
3274# qhasm: in8 ^= *(uint32 *) (m + 32)
3275# asm 1: xorl 32(<m=int32#5),<in8=int32#1
3276# asm 2: xorl 32(<m=%esi),<in8=%eax
3277xorl 32(%esi),%eax
3278
3279# qhasm: in9 ^= *(uint32 *) (m + 36)
3280# asm 1: xorl 36(<m=int32#5),<in9=int32#2
3281# asm 2: xorl 36(<m=%esi),<in9=%ecx
3282xorl 36(%esi),%ecx
3283
3284# qhasm: in10 ^= *(uint32 *) (m + 40)
3285# asm 1: xorl 40(<m=int32#5),<in10=int32#3
3286# asm 2: xorl 40(<m=%esi),<in10=%edx
3287xorl 40(%esi),%edx
3288
3289# qhasm: in11 ^= *(uint32 *) (m + 44)
3290# asm 1: xorl 44(<m=int32#5),<in11=int32#4
3291# asm 2: xorl 44(<m=%esi),<in11=%ebx
3292xorl 44(%esi),%ebx
3293
3294# qhasm: *(uint32 *) (out + 32) = in8
3295# asm 1: movl <in8=int32#1,32(<out=int32#6)
3296# asm 2: movl <in8=%eax,32(<out=%edi)
3297movl %eax,32(%edi)
3298
3299# qhasm: *(uint32 *) (out + 36) = in9
3300# asm 1: movl <in9=int32#2,36(<out=int32#6)
3301# asm 2: movl <in9=%ecx,36(<out=%edi)
3302movl %ecx,36(%edi)
3303
3304# qhasm: *(uint32 *) (out + 40) = in10
3305# asm 1: movl <in10=int32#3,40(<out=int32#6)
3306# asm 2: movl <in10=%edx,40(<out=%edi)
3307movl %edx,40(%edi)
3308
3309# qhasm: *(uint32 *) (out + 44) = in11
3310# asm 1: movl <in11=int32#4,44(<out=int32#6)
3311# asm 2: movl <in11=%ebx,44(<out=%edi)
3312movl %ebx,44(%edi)
3313
3314# qhasm: in8 = z8
3315# asm 1: movd <z8=int6464#1,>in8=int32#1
3316# asm 2: movd <z8=%xmm0,>in8=%eax
3317movd %xmm0,%eax
3318
3319# qhasm: in9 = z9
3320# asm 1: movd <z9=int6464#2,>in9=int32#2
3321# asm 2: movd <z9=%xmm1,>in9=%ecx
3322movd %xmm1,%ecx
3323
3324# qhasm: in10 = z10
3325# asm 1: movd <z10=int6464#3,>in10=int32#3
3326# asm 2: movd <z10=%xmm2,>in10=%edx
3327movd %xmm2,%edx
3328
3329# qhasm: in11 = z11
3330# asm 1: movd <z11=int6464#4,>in11=int32#4
3331# asm 2: movd <z11=%xmm3,>in11=%ebx
3332movd %xmm3,%ebx
3333
3334# qhasm: z8 <<<= 96
3335# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3336# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3337pshufd $0x39,%xmm0,%xmm0
3338
3339# qhasm: z9 <<<= 96
3340# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3341# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3342pshufd $0x39,%xmm1,%xmm1
3343
3344# qhasm: z10 <<<= 96
3345# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3346# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3347pshufd $0x39,%xmm2,%xmm2
3348
3349# qhasm: z11 <<<= 96
3350# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3351# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3352pshufd $0x39,%xmm3,%xmm3
3353
3354# qhasm: in8 ^= *(uint32 *) (m + 96)
3355# asm 1: xorl 96(<m=int32#5),<in8=int32#1
3356# asm 2: xorl 96(<m=%esi),<in8=%eax
3357xorl 96(%esi),%eax
3358
3359# qhasm: in9 ^= *(uint32 *) (m + 100)
3360# asm 1: xorl 100(<m=int32#5),<in9=int32#2
3361# asm 2: xorl 100(<m=%esi),<in9=%ecx
3362xorl 100(%esi),%ecx
3363
3364# qhasm: in10 ^= *(uint32 *) (m + 104)
3365# asm 1: xorl 104(<m=int32#5),<in10=int32#3
3366# asm 2: xorl 104(<m=%esi),<in10=%edx
3367xorl 104(%esi),%edx
3368
3369# qhasm: in11 ^= *(uint32 *) (m + 108)
3370# asm 1: xorl 108(<m=int32#5),<in11=int32#4
3371# asm 2: xorl 108(<m=%esi),<in11=%ebx
3372xorl 108(%esi),%ebx
3373
3374# qhasm: *(uint32 *) (out + 96) = in8
3375# asm 1: movl <in8=int32#1,96(<out=int32#6)
3376# asm 2: movl <in8=%eax,96(<out=%edi)
3377movl %eax,96(%edi)
3378
3379# qhasm: *(uint32 *) (out + 100) = in9
3380# asm 1: movl <in9=int32#2,100(<out=int32#6)
3381# asm 2: movl <in9=%ecx,100(<out=%edi)
3382movl %ecx,100(%edi)
3383
3384# qhasm: *(uint32 *) (out + 104) = in10
3385# asm 1: movl <in10=int32#3,104(<out=int32#6)
3386# asm 2: movl <in10=%edx,104(<out=%edi)
3387movl %edx,104(%edi)
3388
3389# qhasm: *(uint32 *) (out + 108) = in11
3390# asm 1: movl <in11=int32#4,108(<out=int32#6)
3391# asm 2: movl <in11=%ebx,108(<out=%edi)
3392movl %ebx,108(%edi)
3393
3394# qhasm: in8 = z8
3395# asm 1: movd <z8=int6464#1,>in8=int32#1
3396# asm 2: movd <z8=%xmm0,>in8=%eax
3397movd %xmm0,%eax
3398
3399# qhasm: in9 = z9
3400# asm 1: movd <z9=int6464#2,>in9=int32#2
3401# asm 2: movd <z9=%xmm1,>in9=%ecx
3402movd %xmm1,%ecx
3403
3404# qhasm: in10 = z10
3405# asm 1: movd <z10=int6464#3,>in10=int32#3
3406# asm 2: movd <z10=%xmm2,>in10=%edx
3407movd %xmm2,%edx
3408
3409# qhasm: in11 = z11
3410# asm 1: movd <z11=int6464#4,>in11=int32#4
3411# asm 2: movd <z11=%xmm3,>in11=%ebx
3412movd %xmm3,%ebx
3413
3414# qhasm: z8 <<<= 96
3415# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3416# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3417pshufd $0x39,%xmm0,%xmm0
3418
3419# qhasm: z9 <<<= 96
3420# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3421# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3422pshufd $0x39,%xmm1,%xmm1
3423
3424# qhasm: z10 <<<= 96
3425# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3426# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3427pshufd $0x39,%xmm2,%xmm2
3428
3429# qhasm: z11 <<<= 96
3430# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3431# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3432pshufd $0x39,%xmm3,%xmm3
3433
3434# qhasm: in8 ^= *(uint32 *) (m + 160)
3435# asm 1: xorl 160(<m=int32#5),<in8=int32#1
3436# asm 2: xorl 160(<m=%esi),<in8=%eax
3437xorl 160(%esi),%eax
3438
3439# qhasm: in9 ^= *(uint32 *) (m + 164)
3440# asm 1: xorl 164(<m=int32#5),<in9=int32#2
3441# asm 2: xorl 164(<m=%esi),<in9=%ecx
3442xorl 164(%esi),%ecx
3443
3444# qhasm: in10 ^= *(uint32 *) (m + 168)
3445# asm 1: xorl 168(<m=int32#5),<in10=int32#3
3446# asm 2: xorl 168(<m=%esi),<in10=%edx
3447xorl 168(%esi),%edx
3448
3449# qhasm: in11 ^= *(uint32 *) (m + 172)
3450# asm 1: xorl 172(<m=int32#5),<in11=int32#4
3451# asm 2: xorl 172(<m=%esi),<in11=%ebx
3452xorl 172(%esi),%ebx
3453
3454# qhasm: *(uint32 *) (out + 160) = in8
3455# asm 1: movl <in8=int32#1,160(<out=int32#6)
3456# asm 2: movl <in8=%eax,160(<out=%edi)
3457movl %eax,160(%edi)
3458
3459# qhasm: *(uint32 *) (out + 164) = in9
3460# asm 1: movl <in9=int32#2,164(<out=int32#6)
3461# asm 2: movl <in9=%ecx,164(<out=%edi)
3462movl %ecx,164(%edi)
3463
3464# qhasm: *(uint32 *) (out + 168) = in10
3465# asm 1: movl <in10=int32#3,168(<out=int32#6)
3466# asm 2: movl <in10=%edx,168(<out=%edi)
3467movl %edx,168(%edi)
3468
3469# qhasm: *(uint32 *) (out + 172) = in11
3470# asm 1: movl <in11=int32#4,172(<out=int32#6)
3471# asm 2: movl <in11=%ebx,172(<out=%edi)
3472movl %ebx,172(%edi)
3473
3474# qhasm: in8 = z8
3475# asm 1: movd <z8=int6464#1,>in8=int32#1
3476# asm 2: movd <z8=%xmm0,>in8=%eax
3477movd %xmm0,%eax
3478
3479# qhasm: in9 = z9
3480# asm 1: movd <z9=int6464#2,>in9=int32#2
3481# asm 2: movd <z9=%xmm1,>in9=%ecx
3482movd %xmm1,%ecx
3483
3484# qhasm: in10 = z10
3485# asm 1: movd <z10=int6464#3,>in10=int32#3
3486# asm 2: movd <z10=%xmm2,>in10=%edx
3487movd %xmm2,%edx
3488
3489# qhasm: in11 = z11
3490# asm 1: movd <z11=int6464#4,>in11=int32#4
3491# asm 2: movd <z11=%xmm3,>in11=%ebx
3492movd %xmm3,%ebx
3493
3494# qhasm: in8 ^= *(uint32 *) (m + 224)
3495# asm 1: xorl 224(<m=int32#5),<in8=int32#1
3496# asm 2: xorl 224(<m=%esi),<in8=%eax
3497xorl 224(%esi),%eax
3498
3499# qhasm: in9 ^= *(uint32 *) (m + 228)
3500# asm 1: xorl 228(<m=int32#5),<in9=int32#2
3501# asm 2: xorl 228(<m=%esi),<in9=%ecx
3502xorl 228(%esi),%ecx
3503
3504# qhasm: in10 ^= *(uint32 *) (m + 232)
3505# asm 1: xorl 232(<m=int32#5),<in10=int32#3
3506# asm 2: xorl 232(<m=%esi),<in10=%edx
3507xorl 232(%esi),%edx
3508
3509# qhasm: in11 ^= *(uint32 *) (m + 236)
3510# asm 1: xorl 236(<m=int32#5),<in11=int32#4
3511# asm 2: xorl 236(<m=%esi),<in11=%ebx
3512xorl 236(%esi),%ebx
3513
3514# qhasm: *(uint32 *) (out + 224) = in8
3515# asm 1: movl <in8=int32#1,224(<out=int32#6)
3516# asm 2: movl <in8=%eax,224(<out=%edi)
3517movl %eax,224(%edi)
3518
3519# qhasm: *(uint32 *) (out + 228) = in9
3520# asm 1: movl <in9=int32#2,228(<out=int32#6)
3521# asm 2: movl <in9=%ecx,228(<out=%edi)
3522movl %ecx,228(%edi)
3523
3524# qhasm: *(uint32 *) (out + 232) = in10
3525# asm 1: movl <in10=int32#3,232(<out=int32#6)
3526# asm 2: movl <in10=%edx,232(<out=%edi)
3527movl %edx,232(%edi)
3528
3529# qhasm: *(uint32 *) (out + 236) = in11
3530# asm 1: movl <in11=int32#4,236(<out=int32#6)
3531# asm 2: movl <in11=%ebx,236(<out=%edi)
3532movl %ebx,236(%edi)
3533
3534# qhasm: z12 = z12_stack
3535# asm 1: movdqa <z12_stack=stack128#35,>z12=int6464#1
3536# asm 2: movdqa <z12_stack=576(%esp),>z12=%xmm0
3537movdqa 576(%esp),%xmm0
3538
3539# qhasm: z13 = z13_stack
3540# asm 1: movdqa <z13_stack=stack128#30,>z13=int6464#2
3541# asm 2: movdqa <z13_stack=496(%esp),>z13=%xmm1
3542movdqa 496(%esp),%xmm1
3543
3544# qhasm: z14 = z14_stack
3545# asm 1: movdqa <z14_stack=stack128#24,>z14=int6464#3
3546# asm 2: movdqa <z14_stack=400(%esp),>z14=%xmm2
3547movdqa 400(%esp),%xmm2
3548
3549# qhasm: z15 = z15_stack
3550# asm 1: movdqa <z15_stack=stack128#23,>z15=int6464#4
3551# asm 2: movdqa <z15_stack=384(%esp),>z15=%xmm3
3552movdqa 384(%esp),%xmm3
3553
3554# qhasm: uint32323232 z12 += orig12
3555# asm 1: paddd <orig12=stack128#11,<z12=int6464#1
3556# asm 2: paddd <orig12=192(%esp),<z12=%xmm0
3557paddd 192(%esp),%xmm0
3558
3559# qhasm: uint32323232 z13 += orig13
3560# asm 1: paddd <orig13=stack128#14,<z13=int6464#2
3561# asm 2: paddd <orig13=240(%esp),<z13=%xmm1
3562paddd 240(%esp),%xmm1
3563
3564# qhasm: uint32323232 z14 += orig14
3565# asm 1: paddd <orig14=stack128#17,<z14=int6464#3
3566# asm 2: paddd <orig14=288(%esp),<z14=%xmm2
3567paddd 288(%esp),%xmm2
3568
3569# qhasm: uint32323232 z15 += orig15
3570# asm 1: paddd <orig15=stack128#7,<z15=int6464#4
3571# asm 2: paddd <orig15=128(%esp),<z15=%xmm3
3572paddd 128(%esp),%xmm3
3573
3574# qhasm: in12 = z12
3575# asm 1: movd <z12=int6464#1,>in12=int32#1
3576# asm 2: movd <z12=%xmm0,>in12=%eax
3577movd %xmm0,%eax
3578
3579# qhasm: in13 = z13
3580# asm 1: movd <z13=int6464#2,>in13=int32#2
3581# asm 2: movd <z13=%xmm1,>in13=%ecx
3582movd %xmm1,%ecx
3583
3584# qhasm: in14 = z14
3585# asm 1: movd <z14=int6464#3,>in14=int32#3
3586# asm 2: movd <z14=%xmm2,>in14=%edx
3587movd %xmm2,%edx
3588
3589# qhasm: in15 = z15
3590# asm 1: movd <z15=int6464#4,>in15=int32#4
3591# asm 2: movd <z15=%xmm3,>in15=%ebx
3592movd %xmm3,%ebx
3593
3594# qhasm: z12 <<<= 96
3595# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3596# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3597pshufd $0x39,%xmm0,%xmm0
3598
3599# qhasm: z13 <<<= 96
3600# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3601# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3602pshufd $0x39,%xmm1,%xmm1
3603
3604# qhasm: z14 <<<= 96
3605# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3606# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3607pshufd $0x39,%xmm2,%xmm2
3608
3609# qhasm: z15 <<<= 96
3610# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3611# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3612pshufd $0x39,%xmm3,%xmm3
3613
3614# qhasm: in12 ^= *(uint32 *) (m + 48)
3615# asm 1: xorl 48(<m=int32#5),<in12=int32#1
3616# asm 2: xorl 48(<m=%esi),<in12=%eax
3617xorl 48(%esi),%eax
3618
3619# qhasm: in13 ^= *(uint32 *) (m + 52)
3620# asm 1: xorl 52(<m=int32#5),<in13=int32#2
3621# asm 2: xorl 52(<m=%esi),<in13=%ecx
3622xorl 52(%esi),%ecx
3623
3624# qhasm: in14 ^= *(uint32 *) (m + 56)
3625# asm 1: xorl 56(<m=int32#5),<in14=int32#3
3626# asm 2: xorl 56(<m=%esi),<in14=%edx
3627xorl 56(%esi),%edx
3628
3629# qhasm: in15 ^= *(uint32 *) (m + 60)
3630# asm 1: xorl 60(<m=int32#5),<in15=int32#4
3631# asm 2: xorl 60(<m=%esi),<in15=%ebx
3632xorl 60(%esi),%ebx
3633
3634# qhasm: *(uint32 *) (out + 48) = in12
3635# asm 1: movl <in12=int32#1,48(<out=int32#6)
3636# asm 2: movl <in12=%eax,48(<out=%edi)
3637movl %eax,48(%edi)
3638
3639# qhasm: *(uint32 *) (out + 52) = in13
3640# asm 1: movl <in13=int32#2,52(<out=int32#6)
3641# asm 2: movl <in13=%ecx,52(<out=%edi)
3642movl %ecx,52(%edi)
3643
3644# qhasm: *(uint32 *) (out + 56) = in14
3645# asm 1: movl <in14=int32#3,56(<out=int32#6)
3646# asm 2: movl <in14=%edx,56(<out=%edi)
3647movl %edx,56(%edi)
3648
3649# qhasm: *(uint32 *) (out + 60) = in15
3650# asm 1: movl <in15=int32#4,60(<out=int32#6)
3651# asm 2: movl <in15=%ebx,60(<out=%edi)
3652movl %ebx,60(%edi)
3653
3654# qhasm: in12 = z12
3655# asm 1: movd <z12=int6464#1,>in12=int32#1
3656# asm 2: movd <z12=%xmm0,>in12=%eax
3657movd %xmm0,%eax
3658
3659# qhasm: in13 = z13
3660# asm 1: movd <z13=int6464#2,>in13=int32#2
3661# asm 2: movd <z13=%xmm1,>in13=%ecx
3662movd %xmm1,%ecx
3663
3664# qhasm: in14 = z14
3665# asm 1: movd <z14=int6464#3,>in14=int32#3
3666# asm 2: movd <z14=%xmm2,>in14=%edx
3667movd %xmm2,%edx
3668
3669# qhasm: in15 = z15
3670# asm 1: movd <z15=int6464#4,>in15=int32#4
3671# asm 2: movd <z15=%xmm3,>in15=%ebx
3672movd %xmm3,%ebx
3673
3674# qhasm: z12 <<<= 96
3675# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3676# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3677pshufd $0x39,%xmm0,%xmm0
3678
3679# qhasm: z13 <<<= 96
3680# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3681# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3682pshufd $0x39,%xmm1,%xmm1
3683
3684# qhasm: z14 <<<= 96
3685# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3686# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3687pshufd $0x39,%xmm2,%xmm2
3688
3689# qhasm: z15 <<<= 96
3690# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3691# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3692pshufd $0x39,%xmm3,%xmm3
3693
3694# qhasm: in12 ^= *(uint32 *) (m + 112)
3695# asm 1: xorl 112(<m=int32#5),<in12=int32#1
3696# asm 2: xorl 112(<m=%esi),<in12=%eax
3697xorl 112(%esi),%eax
3698
3699# qhasm: in13 ^= *(uint32 *) (m + 116)
3700# asm 1: xorl 116(<m=int32#5),<in13=int32#2
3701# asm 2: xorl 116(<m=%esi),<in13=%ecx
3702xorl 116(%esi),%ecx
3703
3704# qhasm: in14 ^= *(uint32 *) (m + 120)
3705# asm 1: xorl 120(<m=int32#5),<in14=int32#3
3706# asm 2: xorl 120(<m=%esi),<in14=%edx
3707xorl 120(%esi),%edx
3708
3709# qhasm: in15 ^= *(uint32 *) (m + 124)
3710# asm 1: xorl 124(<m=int32#5),<in15=int32#4
3711# asm 2: xorl 124(<m=%esi),<in15=%ebx
3712xorl 124(%esi),%ebx
3713
3714# qhasm: *(uint32 *) (out + 112) = in12
3715# asm 1: movl <in12=int32#1,112(<out=int32#6)
3716# asm 2: movl <in12=%eax,112(<out=%edi)
3717movl %eax,112(%edi)
3718
3719# qhasm: *(uint32 *) (out + 116) = in13
3720# asm 1: movl <in13=int32#2,116(<out=int32#6)
3721# asm 2: movl <in13=%ecx,116(<out=%edi)
3722movl %ecx,116(%edi)
3723
3724# qhasm: *(uint32 *) (out + 120) = in14
3725# asm 1: movl <in14=int32#3,120(<out=int32#6)
3726# asm 2: movl <in14=%edx,120(<out=%edi)
3727movl %edx,120(%edi)
3728
3729# qhasm: *(uint32 *) (out + 124) = in15
3730# asm 1: movl <in15=int32#4,124(<out=int32#6)
3731# asm 2: movl <in15=%ebx,124(<out=%edi)
3732movl %ebx,124(%edi)
3733
3734# qhasm: in12 = z12
3735# asm 1: movd <z12=int6464#1,>in12=int32#1
3736# asm 2: movd <z12=%xmm0,>in12=%eax
3737movd %xmm0,%eax
3738
3739# qhasm: in13 = z13
3740# asm 1: movd <z13=int6464#2,>in13=int32#2
3741# asm 2: movd <z13=%xmm1,>in13=%ecx
3742movd %xmm1,%ecx
3743
3744# qhasm: in14 = z14
3745# asm 1: movd <z14=int6464#3,>in14=int32#3
3746# asm 2: movd <z14=%xmm2,>in14=%edx
3747movd %xmm2,%edx
3748
3749# qhasm: in15 = z15
3750# asm 1: movd <z15=int6464#4,>in15=int32#4
3751# asm 2: movd <z15=%xmm3,>in15=%ebx
3752movd %xmm3,%ebx
3753
3754# qhasm: z12 <<<= 96
3755# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3756# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3757pshufd $0x39,%xmm0,%xmm0
3758
3759# qhasm: z13 <<<= 96
3760# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3761# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3762pshufd $0x39,%xmm1,%xmm1
3763
3764# qhasm: z14 <<<= 96
3765# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3766# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3767pshufd $0x39,%xmm2,%xmm2
3768
3769# qhasm: z15 <<<= 96
3770# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3771# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3772pshufd $0x39,%xmm3,%xmm3
3773
3774# qhasm: in12 ^= *(uint32 *) (m + 176)
3775# asm 1: xorl 176(<m=int32#5),<in12=int32#1
3776# asm 2: xorl 176(<m=%esi),<in12=%eax
3777xorl 176(%esi),%eax
3778
3779# qhasm: in13 ^= *(uint32 *) (m + 180)
3780# asm 1: xorl 180(<m=int32#5),<in13=int32#2
3781# asm 2: xorl 180(<m=%esi),<in13=%ecx
3782xorl 180(%esi),%ecx
3783
3784# qhasm: in14 ^= *(uint32 *) (m + 184)
3785# asm 1: xorl 184(<m=int32#5),<in14=int32#3
3786# asm 2: xorl 184(<m=%esi),<in14=%edx
3787xorl 184(%esi),%edx
3788
3789# qhasm: in15 ^= *(uint32 *) (m + 188)
3790# asm 1: xorl 188(<m=int32#5),<in15=int32#4
3791# asm 2: xorl 188(<m=%esi),<in15=%ebx
3792xorl 188(%esi),%ebx
3793
3794# qhasm: *(uint32 *) (out + 176) = in12
3795# asm 1: movl <in12=int32#1,176(<out=int32#6)
3796# asm 2: movl <in12=%eax,176(<out=%edi)
3797movl %eax,176(%edi)
3798
3799# qhasm: *(uint32 *) (out + 180) = in13
3800# asm 1: movl <in13=int32#2,180(<out=int32#6)
3801# asm 2: movl <in13=%ecx,180(<out=%edi)
3802movl %ecx,180(%edi)
3803
3804# qhasm: *(uint32 *) (out + 184) = in14
3805# asm 1: movl <in14=int32#3,184(<out=int32#6)
3806# asm 2: movl <in14=%edx,184(<out=%edi)
3807movl %edx,184(%edi)
3808
3809# qhasm: *(uint32 *) (out + 188) = in15
3810# asm 1: movl <in15=int32#4,188(<out=int32#6)
3811# asm 2: movl <in15=%ebx,188(<out=%edi)
3812movl %ebx,188(%edi)
3813
3814# qhasm: in12 = z12
3815# asm 1: movd <z12=int6464#1,>in12=int32#1
3816# asm 2: movd <z12=%xmm0,>in12=%eax
3817movd %xmm0,%eax
3818
3819# qhasm: in13 = z13
3820# asm 1: movd <z13=int6464#2,>in13=int32#2
3821# asm 2: movd <z13=%xmm1,>in13=%ecx
3822movd %xmm1,%ecx
3823
3824# qhasm: in14 = z14
3825# asm 1: movd <z14=int6464#3,>in14=int32#3
3826# asm 2: movd <z14=%xmm2,>in14=%edx
3827movd %xmm2,%edx
3828
3829# qhasm: in15 = z15
3830# asm 1: movd <z15=int6464#4,>in15=int32#4
3831# asm 2: movd <z15=%xmm3,>in15=%ebx
3832movd %xmm3,%ebx
3833
3834# qhasm: in12 ^= *(uint32 *) (m + 240)
3835# asm 1: xorl 240(<m=int32#5),<in12=int32#1
3836# asm 2: xorl 240(<m=%esi),<in12=%eax
3837xorl 240(%esi),%eax
3838
3839# qhasm: in13 ^= *(uint32 *) (m + 244)
3840# asm 1: xorl 244(<m=int32#5),<in13=int32#2
3841# asm 2: xorl 244(<m=%esi),<in13=%ecx
3842xorl 244(%esi),%ecx
3843
3844# qhasm: in14 ^= *(uint32 *) (m + 248)
3845# asm 1: xorl 248(<m=int32#5),<in14=int32#3
3846# asm 2: xorl 248(<m=%esi),<in14=%edx
3847xorl 248(%esi),%edx
3848
3849# qhasm: in15 ^= *(uint32 *) (m + 252)
3850# asm 1: xorl 252(<m=int32#5),<in15=int32#4
3851# asm 2: xorl 252(<m=%esi),<in15=%ebx
3852xorl 252(%esi),%ebx
3853
3854# qhasm: *(uint32 *) (out + 240) = in12
3855# asm 1: movl <in12=int32#1,240(<out=int32#6)
3856# asm 2: movl <in12=%eax,240(<out=%edi)
3857movl %eax,240(%edi)
3858
3859# qhasm: *(uint32 *) (out + 244) = in13
3860# asm 1: movl <in13=int32#2,244(<out=int32#6)
3861# asm 2: movl <in13=%ecx,244(<out=%edi)
3862movl %ecx,244(%edi)
3863
3864# qhasm: *(uint32 *) (out + 248) = in14
3865# asm 1: movl <in14=int32#3,248(<out=int32#6)
3866# asm 2: movl <in14=%edx,248(<out=%edi)
3867movl %edx,248(%edi)
3868
3869# qhasm: *(uint32 *) (out + 252) = in15
3870# asm 1: movl <in15=int32#4,252(<out=int32#6)
3871# asm 2: movl <in15=%ebx,252(<out=%edi)
3872movl %ebx,252(%edi)
3873
3874# qhasm: bytes = bytes_stack
3875# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
3876# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
3877movl 24(%esp),%eax
3878
3879# qhasm: bytes -= 256
3880# asm 1: sub $256,<bytes=int32#1
3881# asm 2: sub $256,<bytes=%eax
3882sub $256,%eax
3883
3884# qhasm: m += 256
3885# asm 1: add $256,<m=int32#5
3886# asm 2: add $256,<m=%esi
3887add $256,%esi
3888
3889# qhasm: out += 256
3890# asm 1: add $256,<out=int32#6
3891# asm 2: add $256,<out=%edi
3892add $256,%edi
3893
3894# qhasm: out_stack = out
3895# asm 1: movl <out=int32#6,>out_stack=stack32#6
3896# asm 2: movl <out=%edi,>out_stack=20(%esp)
3897movl %edi,20(%esp)
3898
3899# qhasm: unsigned<? bytes - 256
3900# asm 1: cmp $256,<bytes=int32#1
3901# asm 2: cmp $256,<bytes=%eax
3902cmp $256,%eax
3903# comment:fp stack unchanged by jump
3904
3905# qhasm: goto bytesatleast256 if !unsigned<
3906jae ._bytesatleast256
3907
3908# qhasm: unsigned>? bytes - 0
3909# asm 1: cmp $0,<bytes=int32#1
3910# asm 2: cmp $0,<bytes=%eax
3911cmp $0,%eax
3912# comment:fp stack unchanged by jump
3913
3914# qhasm: goto done if !unsigned>
3915jbe ._done
3916# comment:fp stack unchanged by fallthrough
3917
3918# qhasm: bytesbetween1and255:
3919._bytesbetween1and255:
3920
3921# qhasm: unsigned<? bytes - 64
3922# asm 1: cmp $64,<bytes=int32#1
3923# asm 2: cmp $64,<bytes=%eax
3924cmp $64,%eax
3925# comment:fp stack unchanged by jump
3926
3927# qhasm: goto nocopy if !unsigned<
3928jae ._nocopy
3929
3930# qhasm: ctarget = out
3931# asm 1: movl <out=int32#6,>ctarget=stack32#6
3932# asm 2: movl <out=%edi,>ctarget=20(%esp)
3933movl %edi,20(%esp)
3934
3935# qhasm: out = &tmp
3936# asm 1: leal <tmp=stack512#1,>out=int32#6
3937# asm 2: leal <tmp=640(%esp),>out=%edi
3938leal 640(%esp),%edi
3939
3940# qhasm: i = bytes
3941# asm 1: mov <bytes=int32#1,>i=int32#2
3942# asm 2: mov <bytes=%eax,>i=%ecx
3943mov %eax,%ecx
3944
3945# qhasm: while (i) { *out++ = *m++; --i }
3946rep movsb
3947
3948# qhasm: out = &tmp
3949# asm 1: leal <tmp=stack512#1,>out=int32#6
3950# asm 2: leal <tmp=640(%esp),>out=%edi
3951leal 640(%esp),%edi
3952
3953# qhasm: m = &tmp
3954# asm 1: leal <tmp=stack512#1,>m=int32#5
3955# asm 2: leal <tmp=640(%esp),>m=%esi
3956leal 640(%esp),%esi
3957# comment:fp stack unchanged by fallthrough
3958
3959# qhasm: nocopy:
3960._nocopy:
3961
3962# qhasm: bytes_stack = bytes
3963# asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
3964# asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
3965movl %eax,24(%esp)
3966
3967# qhasm: diag0 = x0
3968# asm 1: movdqa <x0=stack128#3,>diag0=int6464#1
3969# asm 2: movdqa <x0=64(%esp),>diag0=%xmm0
3970movdqa 64(%esp),%xmm0
3971
3972# qhasm: diag1 = x1
3973# asm 1: movdqa <x1=stack128#2,>diag1=int6464#2
3974# asm 2: movdqa <x1=48(%esp),>diag1=%xmm1
3975movdqa 48(%esp),%xmm1
3976
3977# qhasm: diag2 = x2
3978# asm 1: movdqa <x2=stack128#4,>diag2=int6464#3
3979# asm 2: movdqa <x2=80(%esp),>diag2=%xmm2
3980movdqa 80(%esp),%xmm2
3981
3982# qhasm: diag3 = x3
3983# asm 1: movdqa <x3=stack128#1,>diag3=int6464#4
3984# asm 2: movdqa <x3=32(%esp),>diag3=%xmm3
3985movdqa 32(%esp),%xmm3
3986
3987# qhasm: a0 = diag1
3988# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3989# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3990movdqa %xmm1,%xmm4
3991
3992# qhasm: i = 20
3993# asm 1: mov $20,>i=int32#1
3994# asm 2: mov $20,>i=%eax
3995mov $20,%eax
3996
3997# qhasm: mainloop2:
3998._mainloop2:
3999
4000# qhasm: uint32323232 a0 += diag0
4001# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4002# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4003paddd %xmm0,%xmm4
4004
4005# qhasm: a1 = diag0
4006# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4007# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4008movdqa %xmm0,%xmm5
4009
4010# qhasm: b0 = a0
4011# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4012# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4013movdqa %xmm4,%xmm6
4014
4015# qhasm: uint32323232 a0 <<= 7
4016# asm 1: pslld $7,<a0=int6464#5
4017# asm 2: pslld $7,<a0=%xmm4
4018pslld $7,%xmm4
4019
4020# qhasm: uint32323232 b0 >>= 25
4021# asm 1: psrld $25,<b0=int6464#7
4022# asm 2: psrld $25,<b0=%xmm6
4023psrld $25,%xmm6
4024
4025# qhasm: diag3 ^= a0
4026# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4027# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4028pxor %xmm4,%xmm3
4029
4030# qhasm: diag3 ^= b0
4031# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4032# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4033pxor %xmm6,%xmm3
4034
4035# qhasm: uint32323232 a1 += diag3
4036# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4037# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4038paddd %xmm3,%xmm5
4039
4040# qhasm: a2 = diag3
4041# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4042# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4043movdqa %xmm3,%xmm4
4044
4045# qhasm: b1 = a1
4046# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4047# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4048movdqa %xmm5,%xmm6
4049
4050# qhasm: uint32323232 a1 <<= 9
4051# asm 1: pslld $9,<a1=int6464#6
4052# asm 2: pslld $9,<a1=%xmm5
4053pslld $9,%xmm5
4054
4055# qhasm: uint32323232 b1 >>= 23
4056# asm 1: psrld $23,<b1=int6464#7
4057# asm 2: psrld $23,<b1=%xmm6
4058psrld $23,%xmm6
4059
4060# qhasm: diag2 ^= a1
4061# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4062# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4063pxor %xmm5,%xmm2
4064
4065# qhasm: diag3 <<<= 32
4066# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4067# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4068pshufd $0x93,%xmm3,%xmm3
4069
4070# qhasm: diag2 ^= b1
4071# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4072# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4073pxor %xmm6,%xmm2
4074
4075# qhasm: uint32323232 a2 += diag2
4076# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4077# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4078paddd %xmm2,%xmm4
4079
4080# qhasm: a3 = diag2
4081# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4082# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4083movdqa %xmm2,%xmm5
4084
4085# qhasm: b2 = a2
4086# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4087# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4088movdqa %xmm4,%xmm6
4089
4090# qhasm: uint32323232 a2 <<= 13
4091# asm 1: pslld $13,<a2=int6464#5
4092# asm 2: pslld $13,<a2=%xmm4
4093pslld $13,%xmm4
4094
4095# qhasm: uint32323232 b2 >>= 19
4096# asm 1: psrld $19,<b2=int6464#7
4097# asm 2: psrld $19,<b2=%xmm6
4098psrld $19,%xmm6
4099
4100# qhasm: diag1 ^= a2
4101# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4102# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4103pxor %xmm4,%xmm1
4104
4105# qhasm: diag2 <<<= 64
4106# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4107# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4108pshufd $0x4e,%xmm2,%xmm2
4109
4110# qhasm: diag1 ^= b2
4111# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4112# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4113pxor %xmm6,%xmm1
4114
4115# qhasm: uint32323232 a3 += diag1
4116# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4117# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4118paddd %xmm1,%xmm5
4119
4120# qhasm: a4 = diag3
4121# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4122# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4123movdqa %xmm3,%xmm4
4124
4125# qhasm: b3 = a3
4126# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4127# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4128movdqa %xmm5,%xmm6
4129
4130# qhasm: uint32323232 a3 <<= 18
4131# asm 1: pslld $18,<a3=int6464#6
4132# asm 2: pslld $18,<a3=%xmm5
4133pslld $18,%xmm5
4134
4135# qhasm: uint32323232 b3 >>= 14
4136# asm 1: psrld $14,<b3=int6464#7
4137# asm 2: psrld $14,<b3=%xmm6
4138psrld $14,%xmm6
4139
4140# qhasm: diag0 ^= a3
4141# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4142# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4143pxor %xmm5,%xmm0
4144
4145# qhasm: diag1 <<<= 96
4146# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4147# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4148pshufd $0x39,%xmm1,%xmm1
4149
4150# qhasm: diag0 ^= b3
4151# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4152# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4153pxor %xmm6,%xmm0
4154
4155# qhasm: uint32323232 a4 += diag0
4156# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4157# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4158paddd %xmm0,%xmm4
4159
4160# qhasm: a5 = diag0
4161# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4162# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4163movdqa %xmm0,%xmm5
4164
4165# qhasm: b4 = a4
4166# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4167# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4168movdqa %xmm4,%xmm6
4169
4170# qhasm: uint32323232 a4 <<= 7
4171# asm 1: pslld $7,<a4=int6464#5
4172# asm 2: pslld $7,<a4=%xmm4
4173pslld $7,%xmm4
4174
4175# qhasm: uint32323232 b4 >>= 25
4176# asm 1: psrld $25,<b4=int6464#7
4177# asm 2: psrld $25,<b4=%xmm6
4178psrld $25,%xmm6
4179
4180# qhasm: diag1 ^= a4
4181# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4182# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4183pxor %xmm4,%xmm1
4184
4185# qhasm: diag1 ^= b4
4186# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4187# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4188pxor %xmm6,%xmm1
4189
4190# qhasm: uint32323232 a5 += diag1
4191# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4192# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4193paddd %xmm1,%xmm5
4194
4195# qhasm: a6 = diag1
4196# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4197# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4198movdqa %xmm1,%xmm4
4199
4200# qhasm: b5 = a5
4201# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4202# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4203movdqa %xmm5,%xmm6
4204
4205# qhasm: uint32323232 a5 <<= 9
4206# asm 1: pslld $9,<a5=int6464#6
4207# asm 2: pslld $9,<a5=%xmm5
4208pslld $9,%xmm5
4209
4210# qhasm: uint32323232 b5 >>= 23
4211# asm 1: psrld $23,<b5=int6464#7
4212# asm 2: psrld $23,<b5=%xmm6
4213psrld $23,%xmm6
4214
4215# qhasm: diag2 ^= a5
4216# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4217# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4218pxor %xmm5,%xmm2
4219
4220# qhasm: diag1 <<<= 32
4221# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4222# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4223pshufd $0x93,%xmm1,%xmm1
4224
4225# qhasm: diag2 ^= b5
4226# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4227# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4228pxor %xmm6,%xmm2
4229
4230# qhasm: uint32323232 a6 += diag2
4231# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4232# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4233paddd %xmm2,%xmm4
4234
4235# qhasm: a7 = diag2
4236# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4237# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4238movdqa %xmm2,%xmm5
4239
4240# qhasm: b6 = a6
4241# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4242# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4243movdqa %xmm4,%xmm6
4244
4245# qhasm: uint32323232 a6 <<= 13
4246# asm 1: pslld $13,<a6=int6464#5
4247# asm 2: pslld $13,<a6=%xmm4
4248pslld $13,%xmm4
4249
4250# qhasm: uint32323232 b6 >>= 19
4251# asm 1: psrld $19,<b6=int6464#7
4252# asm 2: psrld $19,<b6=%xmm6
4253psrld $19,%xmm6
4254
4255# qhasm: diag3 ^= a6
4256# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4257# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4258pxor %xmm4,%xmm3
4259
4260# qhasm: diag2 <<<= 64
4261# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4262# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4263pshufd $0x4e,%xmm2,%xmm2
4264
4265# qhasm: diag3 ^= b6
4266# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4267# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4268pxor %xmm6,%xmm3
4269
4270# qhasm: uint32323232 a7 += diag3
4271# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4272# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4273paddd %xmm3,%xmm5
4274
4275# qhasm: a0 = diag1
4276# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4277# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4278movdqa %xmm1,%xmm4
4279
4280# qhasm: b7 = a7
4281# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4282# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4283movdqa %xmm5,%xmm6
4284
4285# qhasm: uint32323232 a7 <<= 18
4286# asm 1: pslld $18,<a7=int6464#6
4287# asm 2: pslld $18,<a7=%xmm5
4288pslld $18,%xmm5
4289
4290# qhasm: uint32323232 b7 >>= 14
4291# asm 1: psrld $14,<b7=int6464#7
4292# asm 2: psrld $14,<b7=%xmm6
4293psrld $14,%xmm6
4294
4295# qhasm: diag0 ^= a7
4296# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4297# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4298pxor %xmm5,%xmm0
4299
4300# qhasm: diag3 <<<= 96
4301# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4302# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4303pshufd $0x39,%xmm3,%xmm3
4304
4305# qhasm: diag0 ^= b7
4306# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4307# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4308pxor %xmm6,%xmm0
4309
4310# qhasm: uint32323232 a0 += diag0
4311# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4312# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4313paddd %xmm0,%xmm4
4314
4315# qhasm: a1 = diag0
4316# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4317# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4318movdqa %xmm0,%xmm5
4319
4320# qhasm: b0 = a0
4321# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4322# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4323movdqa %xmm4,%xmm6
4324
4325# qhasm: uint32323232 a0 <<= 7
4326# asm 1: pslld $7,<a0=int6464#5
4327# asm 2: pslld $7,<a0=%xmm4
4328pslld $7,%xmm4
4329
4330# qhasm: uint32323232 b0 >>= 25
4331# asm 1: psrld $25,<b0=int6464#7
4332# asm 2: psrld $25,<b0=%xmm6
4333psrld $25,%xmm6
4334
4335# qhasm: diag3 ^= a0
4336# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4337# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4338pxor %xmm4,%xmm3
4339
4340# qhasm: diag3 ^= b0
4341# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4342# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4343pxor %xmm6,%xmm3
4344
4345# qhasm: uint32323232 a1 += diag3
4346# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4347# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4348paddd %xmm3,%xmm5
4349
4350# qhasm: a2 = diag3
4351# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4352# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4353movdqa %xmm3,%xmm4
4354
4355# qhasm: b1 = a1
4356# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4357# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4358movdqa %xmm5,%xmm6
4359
4360# qhasm: uint32323232 a1 <<= 9
4361# asm 1: pslld $9,<a1=int6464#6
4362# asm 2: pslld $9,<a1=%xmm5
4363pslld $9,%xmm5
4364
4365# qhasm: uint32323232 b1 >>= 23
4366# asm 1: psrld $23,<b1=int6464#7
4367# asm 2: psrld $23,<b1=%xmm6
4368psrld $23,%xmm6
4369
4370# qhasm: diag2 ^= a1
4371# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4372# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4373pxor %xmm5,%xmm2
4374
4375# qhasm: diag3 <<<= 32
4376# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4377# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4378pshufd $0x93,%xmm3,%xmm3
4379
4380# qhasm: diag2 ^= b1
4381# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4382# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4383pxor %xmm6,%xmm2
4384
4385# qhasm: uint32323232 a2 += diag2
4386# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4387# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4388paddd %xmm2,%xmm4
4389
4390# qhasm: a3 = diag2
4391# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4392# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4393movdqa %xmm2,%xmm5
4394
4395# qhasm: b2 = a2
4396# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4397# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4398movdqa %xmm4,%xmm6
4399
4400# qhasm: uint32323232 a2 <<= 13
4401# asm 1: pslld $13,<a2=int6464#5
4402# asm 2: pslld $13,<a2=%xmm4
4403pslld $13,%xmm4
4404
4405# qhasm: uint32323232 b2 >>= 19
4406# asm 1: psrld $19,<b2=int6464#7
4407# asm 2: psrld $19,<b2=%xmm6
4408psrld $19,%xmm6
4409
4410# qhasm: diag1 ^= a2
4411# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4412# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4413pxor %xmm4,%xmm1
4414
4415# qhasm: diag2 <<<= 64
4416# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4417# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4418pshufd $0x4e,%xmm2,%xmm2
4419
4420# qhasm: diag1 ^= b2
4421# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4422# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4423pxor %xmm6,%xmm1
4424
4425# qhasm: uint32323232 a3 += diag1
4426# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4427# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4428paddd %xmm1,%xmm5
4429
4430# qhasm: a4 = diag3
4431# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4432# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4433movdqa %xmm3,%xmm4
4434
4435# qhasm: b3 = a3
4436# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4437# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4438movdqa %xmm5,%xmm6
4439
4440# qhasm: uint32323232 a3 <<= 18
4441# asm 1: pslld $18,<a3=int6464#6
4442# asm 2: pslld $18,<a3=%xmm5
4443pslld $18,%xmm5
4444
4445# qhasm: uint32323232 b3 >>= 14
4446# asm 1: psrld $14,<b3=int6464#7
4447# asm 2: psrld $14,<b3=%xmm6
4448psrld $14,%xmm6
4449
4450# qhasm: diag0 ^= a3
4451# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4452# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4453pxor %xmm5,%xmm0
4454
4455# qhasm: diag1 <<<= 96
4456# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4457# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4458pshufd $0x39,%xmm1,%xmm1
4459
4460# qhasm: diag0 ^= b3
4461# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4462# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4463pxor %xmm6,%xmm0
4464
4465# qhasm: uint32323232 a4 += diag0
4466# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4467# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4468paddd %xmm0,%xmm4
4469
4470# qhasm: a5 = diag0
4471# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4472# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4473movdqa %xmm0,%xmm5
4474
4475# qhasm: b4 = a4
4476# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4477# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4478movdqa %xmm4,%xmm6
4479
4480# qhasm: uint32323232 a4 <<= 7
4481# asm 1: pslld $7,<a4=int6464#5
4482# asm 2: pslld $7,<a4=%xmm4
4483pslld $7,%xmm4
4484
4485# qhasm: uint32323232 b4 >>= 25
4486# asm 1: psrld $25,<b4=int6464#7
4487# asm 2: psrld $25,<b4=%xmm6
4488psrld $25,%xmm6
4489
4490# qhasm: diag1 ^= a4
4491# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4492# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4493pxor %xmm4,%xmm1
4494
4495# qhasm: diag1 ^= b4
4496# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4497# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4498pxor %xmm6,%xmm1
4499
4500# qhasm: uint32323232 a5 += diag1
4501# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4502# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4503paddd %xmm1,%xmm5
4504
4505# qhasm: a6 = diag1
4506# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4507# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4508movdqa %xmm1,%xmm4
4509
4510# qhasm: b5 = a5
4511# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4512# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4513movdqa %xmm5,%xmm6
4514
4515# qhasm: uint32323232 a5 <<= 9
4516# asm 1: pslld $9,<a5=int6464#6
4517# asm 2: pslld $9,<a5=%xmm5
4518pslld $9,%xmm5
4519
4520# qhasm: uint32323232 b5 >>= 23
4521# asm 1: psrld $23,<b5=int6464#7
4522# asm 2: psrld $23,<b5=%xmm6
4523psrld $23,%xmm6
4524
4525# qhasm: diag2 ^= a5
4526# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4527# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4528pxor %xmm5,%xmm2
4529
4530# qhasm: diag1 <<<= 32
4531# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4532# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4533pshufd $0x93,%xmm1,%xmm1
4534
4535# qhasm: diag2 ^= b5
4536# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4537# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4538pxor %xmm6,%xmm2
4539
4540# qhasm: uint32323232 a6 += diag2
4541# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4542# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4543paddd %xmm2,%xmm4
4544
4545# qhasm: a7 = diag2
4546# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4547# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4548movdqa %xmm2,%xmm5
4549
4550# qhasm: b6 = a6
4551# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4552# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4553movdqa %xmm4,%xmm6
4554
4555# qhasm: uint32323232 a6 <<= 13
4556# asm 1: pslld $13,<a6=int6464#5
4557# asm 2: pslld $13,<a6=%xmm4
4558pslld $13,%xmm4
4559
4560# qhasm: uint32323232 b6 >>= 19
4561# asm 1: psrld $19,<b6=int6464#7
4562# asm 2: psrld $19,<b6=%xmm6
4563psrld $19,%xmm6
4564
4565# qhasm: diag3 ^= a6
4566# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4567# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4568pxor %xmm4,%xmm3
4569
4570# qhasm: diag2 <<<= 64
4571# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4572# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4573pshufd $0x4e,%xmm2,%xmm2
4574
4575# qhasm: diag3 ^= b6
4576# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4577# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4578pxor %xmm6,%xmm3
4579
4580# qhasm: unsigned>? i -= 4
4581# asm 1: sub $4,<i=int32#1
4582# asm 2: sub $4,<i=%eax
4583sub $4,%eax
4584
4585# qhasm: uint32323232 a7 += diag3
4586# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4587# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4588paddd %xmm3,%xmm5
4589
4590# qhasm: a0 = diag1
4591# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4592# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4593movdqa %xmm1,%xmm4
4594
4595# qhasm: b7 = a7
4596# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4597# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4598movdqa %xmm5,%xmm6
4599
4600# qhasm: uint32323232 a7 <<= 18
4601# asm 1: pslld $18,<a7=int6464#6
4602# asm 2: pslld $18,<a7=%xmm5
4603pslld $18,%xmm5
4604
4605# qhasm: b0 = 0
4606# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4607# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4608pxor %xmm7,%xmm7
4609
4610# qhasm: uint32323232 b7 >>= 14
4611# asm 1: psrld $14,<b7=int6464#7
4612# asm 2: psrld $14,<b7=%xmm6
4613psrld $14,%xmm6
4614
4615# qhasm: diag0 ^= a7
4616# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4617# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4618pxor %xmm5,%xmm0
4619
4620# qhasm: diag3 <<<= 96
4621# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4622# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4623pshufd $0x39,%xmm3,%xmm3
4624
4625# qhasm: diag0 ^= b7
4626# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4627# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4628pxor %xmm6,%xmm0
4629# comment:fp stack unchanged by jump
4630
4631# qhasm: goto mainloop2 if unsigned>
4632ja ._mainloop2
4633
4634# qhasm: uint32323232 diag0 += x0
4635# asm 1: paddd <x0=stack128#3,<diag0=int6464#1
4636# asm 2: paddd <x0=64(%esp),<diag0=%xmm0
4637paddd 64(%esp),%xmm0
4638
4639# qhasm: uint32323232 diag1 += x1
4640# asm 1: paddd <x1=stack128#2,<diag1=int6464#2
4641# asm 2: paddd <x1=48(%esp),<diag1=%xmm1
4642paddd 48(%esp),%xmm1
4643
4644# qhasm: uint32323232 diag2 += x2
4645# asm 1: paddd <x2=stack128#4,<diag2=int6464#3
4646# asm 2: paddd <x2=80(%esp),<diag2=%xmm2
4647paddd 80(%esp),%xmm2
4648
4649# qhasm: uint32323232 diag3 += x3
4650# asm 1: paddd <x3=stack128#1,<diag3=int6464#4
4651# asm 2: paddd <x3=32(%esp),<diag3=%xmm3
4652paddd 32(%esp),%xmm3
4653
4654# qhasm: in0 = diag0
4655# asm 1: movd <diag0=int6464#1,>in0=int32#1
4656# asm 2: movd <diag0=%xmm0,>in0=%eax
4657movd %xmm0,%eax
4658
4659# qhasm: in12 = diag1
4660# asm 1: movd <diag1=int6464#2,>in12=int32#2
4661# asm 2: movd <diag1=%xmm1,>in12=%ecx
4662movd %xmm1,%ecx
4663
4664# qhasm: in8 = diag2
4665# asm 1: movd <diag2=int6464#3,>in8=int32#3
4666# asm 2: movd <diag2=%xmm2,>in8=%edx
4667movd %xmm2,%edx
4668
4669# qhasm: in4 = diag3
4670# asm 1: movd <diag3=int6464#4,>in4=int32#4
4671# asm 2: movd <diag3=%xmm3,>in4=%ebx
4672movd %xmm3,%ebx
4673
4674# qhasm: diag0 <<<= 96
4675# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4676# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4677pshufd $0x39,%xmm0,%xmm0
4678
4679# qhasm: diag1 <<<= 96
4680# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4681# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4682pshufd $0x39,%xmm1,%xmm1
4683
4684# qhasm: diag2 <<<= 96
4685# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4686# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4687pshufd $0x39,%xmm2,%xmm2
4688
4689# qhasm: diag3 <<<= 96
4690# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4691# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4692pshufd $0x39,%xmm3,%xmm3
4693
4694# qhasm: in0 ^= *(uint32 *) (m + 0)
4695# asm 1: xorl 0(<m=int32#5),<in0=int32#1
4696# asm 2: xorl 0(<m=%esi),<in0=%eax
4697xorl 0(%esi),%eax
4698
4699# qhasm: in12 ^= *(uint32 *) (m + 48)
4700# asm 1: xorl 48(<m=int32#5),<in12=int32#2
4701# asm 2: xorl 48(<m=%esi),<in12=%ecx
4702xorl 48(%esi),%ecx
4703
4704# qhasm: in8 ^= *(uint32 *) (m + 32)
4705# asm 1: xorl 32(<m=int32#5),<in8=int32#3
4706# asm 2: xorl 32(<m=%esi),<in8=%edx
4707xorl 32(%esi),%edx
4708
4709# qhasm: in4 ^= *(uint32 *) (m + 16)
4710# asm 1: xorl 16(<m=int32#5),<in4=int32#4
4711# asm 2: xorl 16(<m=%esi),<in4=%ebx
4712xorl 16(%esi),%ebx
4713
4714# qhasm: *(uint32 *) (out + 0) = in0
4715# asm 1: movl <in0=int32#1,0(<out=int32#6)
4716# asm 2: movl <in0=%eax,0(<out=%edi)
4717movl %eax,0(%edi)
4718
4719# qhasm: *(uint32 *) (out + 48) = in12
4720# asm 1: movl <in12=int32#2,48(<out=int32#6)
4721# asm 2: movl <in12=%ecx,48(<out=%edi)
4722movl %ecx,48(%edi)
4723
4724# qhasm: *(uint32 *) (out + 32) = in8
4725# asm 1: movl <in8=int32#3,32(<out=int32#6)
4726# asm 2: movl <in8=%edx,32(<out=%edi)
4727movl %edx,32(%edi)
4728
4729# qhasm: *(uint32 *) (out + 16) = in4
4730# asm 1: movl <in4=int32#4,16(<out=int32#6)
4731# asm 2: movl <in4=%ebx,16(<out=%edi)
4732movl %ebx,16(%edi)
4733
4734# qhasm: in5 = diag0
4735# asm 1: movd <diag0=int6464#1,>in5=int32#1
4736# asm 2: movd <diag0=%xmm0,>in5=%eax
4737movd %xmm0,%eax
4738
4739# qhasm: in1 = diag1
4740# asm 1: movd <diag1=int6464#2,>in1=int32#2
4741# asm 2: movd <diag1=%xmm1,>in1=%ecx
4742movd %xmm1,%ecx
4743
4744# qhasm: in13 = diag2
4745# asm 1: movd <diag2=int6464#3,>in13=int32#3
4746# asm 2: movd <diag2=%xmm2,>in13=%edx
4747movd %xmm2,%edx
4748
4749# qhasm: in9 = diag3
4750# asm 1: movd <diag3=int6464#4,>in9=int32#4
4751# asm 2: movd <diag3=%xmm3,>in9=%ebx
4752movd %xmm3,%ebx
4753
4754# qhasm: diag0 <<<= 96
4755# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4756# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4757pshufd $0x39,%xmm0,%xmm0
4758
4759# qhasm: diag1 <<<= 96
4760# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4761# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4762pshufd $0x39,%xmm1,%xmm1
4763
4764# qhasm: diag2 <<<= 96
4765# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4766# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4767pshufd $0x39,%xmm2,%xmm2
4768
4769# qhasm: diag3 <<<= 96
4770# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4771# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4772pshufd $0x39,%xmm3,%xmm3
4773
4774# qhasm: in5 ^= *(uint32 *) (m + 20)
4775# asm 1: xorl 20(<m=int32#5),<in5=int32#1
4776# asm 2: xorl 20(<m=%esi),<in5=%eax
4777xorl 20(%esi),%eax
4778
4779# qhasm: in1 ^= *(uint32 *) (m + 4)
4780# asm 1: xorl 4(<m=int32#5),<in1=int32#2
4781# asm 2: xorl 4(<m=%esi),<in1=%ecx
4782xorl 4(%esi),%ecx
4783
4784# qhasm: in13 ^= *(uint32 *) (m + 52)
4785# asm 1: xorl 52(<m=int32#5),<in13=int32#3
4786# asm 2: xorl 52(<m=%esi),<in13=%edx
4787xorl 52(%esi),%edx
4788
4789# qhasm: in9 ^= *(uint32 *) (m + 36)
4790# asm 1: xorl 36(<m=int32#5),<in9=int32#4
4791# asm 2: xorl 36(<m=%esi),<in9=%ebx
4792xorl 36(%esi),%ebx
4793
4794# qhasm: *(uint32 *) (out + 20) = in5
4795# asm 1: movl <in5=int32#1,20(<out=int32#6)
4796# asm 2: movl <in5=%eax,20(<out=%edi)
4797movl %eax,20(%edi)
4798
4799# qhasm: *(uint32 *) (out + 4) = in1
4800# asm 1: movl <in1=int32#2,4(<out=int32#6)
4801# asm 2: movl <in1=%ecx,4(<out=%edi)
4802movl %ecx,4(%edi)
4803
4804# qhasm: *(uint32 *) (out + 52) = in13
4805# asm 1: movl <in13=int32#3,52(<out=int32#6)
4806# asm 2: movl <in13=%edx,52(<out=%edi)
4807movl %edx,52(%edi)
4808
4809# qhasm: *(uint32 *) (out + 36) = in9
4810# asm 1: movl <in9=int32#4,36(<out=int32#6)
4811# asm 2: movl <in9=%ebx,36(<out=%edi)
4812movl %ebx,36(%edi)
4813
4814# qhasm: in10 = diag0
4815# asm 1: movd <diag0=int6464#1,>in10=int32#1
4816# asm 2: movd <diag0=%xmm0,>in10=%eax
4817movd %xmm0,%eax
4818
4819# qhasm: in6 = diag1
4820# asm 1: movd <diag1=int6464#2,>in6=int32#2
4821# asm 2: movd <diag1=%xmm1,>in6=%ecx
4822movd %xmm1,%ecx
4823
4824# qhasm: in2 = diag2
4825# asm 1: movd <diag2=int6464#3,>in2=int32#3
4826# asm 2: movd <diag2=%xmm2,>in2=%edx
4827movd %xmm2,%edx
4828
4829# qhasm: in14 = diag3
4830# asm 1: movd <diag3=int6464#4,>in14=int32#4
4831# asm 2: movd <diag3=%xmm3,>in14=%ebx
4832movd %xmm3,%ebx
4833
4834# qhasm: diag0 <<<= 96
4835# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4836# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4837pshufd $0x39,%xmm0,%xmm0
4838
4839# qhasm: diag1 <<<= 96
4840# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4841# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4842pshufd $0x39,%xmm1,%xmm1
4843
4844# qhasm: diag2 <<<= 96
4845# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4846# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4847pshufd $0x39,%xmm2,%xmm2
4848
4849# qhasm: diag3 <<<= 96
4850# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4851# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4852pshufd $0x39,%xmm3,%xmm3
4853
4854# qhasm: in10 ^= *(uint32 *) (m + 40)
4855# asm 1: xorl 40(<m=int32#5),<in10=int32#1
4856# asm 2: xorl 40(<m=%esi),<in10=%eax
4857xorl 40(%esi),%eax
4858
4859# qhasm: in6 ^= *(uint32 *) (m + 24)
4860# asm 1: xorl 24(<m=int32#5),<in6=int32#2
4861# asm 2: xorl 24(<m=%esi),<in6=%ecx
4862xorl 24(%esi),%ecx
4863
4864# qhasm: in2 ^= *(uint32 *) (m + 8)
4865# asm 1: xorl 8(<m=int32#5),<in2=int32#3
4866# asm 2: xorl 8(<m=%esi),<in2=%edx
4867xorl 8(%esi),%edx
4868
4869# qhasm: in14 ^= *(uint32 *) (m + 56)
4870# asm 1: xorl 56(<m=int32#5),<in14=int32#4
4871# asm 2: xorl 56(<m=%esi),<in14=%ebx
4872xorl 56(%esi),%ebx
4873
4874# qhasm: *(uint32 *) (out + 40) = in10
4875# asm 1: movl <in10=int32#1,40(<out=int32#6)
4876# asm 2: movl <in10=%eax,40(<out=%edi)
4877movl %eax,40(%edi)
4878
4879# qhasm: *(uint32 *) (out + 24) = in6
4880# asm 1: movl <in6=int32#2,24(<out=int32#6)
4881# asm 2: movl <in6=%ecx,24(<out=%edi)
4882movl %ecx,24(%edi)
4883
4884# qhasm: *(uint32 *) (out + 8) = in2
4885# asm 1: movl <in2=int32#3,8(<out=int32#6)
4886# asm 2: movl <in2=%edx,8(<out=%edi)
4887movl %edx,8(%edi)
4888
4889# qhasm: *(uint32 *) (out + 56) = in14
4890# asm 1: movl <in14=int32#4,56(<out=int32#6)
4891# asm 2: movl <in14=%ebx,56(<out=%edi)
4892movl %ebx,56(%edi)
4893
4894# qhasm: in15 = diag0
4895# asm 1: movd <diag0=int6464#1,>in15=int32#1
4896# asm 2: movd <diag0=%xmm0,>in15=%eax
4897movd %xmm0,%eax
4898
4899# qhasm: in11 = diag1
4900# asm 1: movd <diag1=int6464#2,>in11=int32#2
4901# asm 2: movd <diag1=%xmm1,>in11=%ecx
4902movd %xmm1,%ecx
4903
4904# qhasm: in7 = diag2
4905# asm 1: movd <diag2=int6464#3,>in7=int32#3
4906# asm 2: movd <diag2=%xmm2,>in7=%edx
4907movd %xmm2,%edx
4908
4909# qhasm: in3 = diag3
4910# asm 1: movd <diag3=int6464#4,>in3=int32#4
4911# asm 2: movd <diag3=%xmm3,>in3=%ebx
4912movd %xmm3,%ebx
4913
4914# qhasm: in15 ^= *(uint32 *) (m + 60)
4915# asm 1: xorl 60(<m=int32#5),<in15=int32#1
4916# asm 2: xorl 60(<m=%esi),<in15=%eax
4917xorl 60(%esi),%eax
4918
4919# qhasm: in11 ^= *(uint32 *) (m + 44)
4920# asm 1: xorl 44(<m=int32#5),<in11=int32#2
4921# asm 2: xorl 44(<m=%esi),<in11=%ecx
4922xorl 44(%esi),%ecx
4923
4924# qhasm: in7 ^= *(uint32 *) (m + 28)
4925# asm 1: xorl 28(<m=int32#5),<in7=int32#3
4926# asm 2: xorl 28(<m=%esi),<in7=%edx
4927xorl 28(%esi),%edx
4928
4929# qhasm: in3 ^= *(uint32 *) (m + 12)
4930# asm 1: xorl 12(<m=int32#5),<in3=int32#4
4931# asm 2: xorl 12(<m=%esi),<in3=%ebx
4932xorl 12(%esi),%ebx
4933
4934# qhasm: *(uint32 *) (out + 60) = in15
4935# asm 1: movl <in15=int32#1,60(<out=int32#6)
4936# asm 2: movl <in15=%eax,60(<out=%edi)
4937movl %eax,60(%edi)
4938
4939# qhasm: *(uint32 *) (out + 44) = in11
4940# asm 1: movl <in11=int32#2,44(<out=int32#6)
4941# asm 2: movl <in11=%ecx,44(<out=%edi)
4942movl %ecx,44(%edi)
4943
4944# qhasm: *(uint32 *) (out + 28) = in7
4945# asm 1: movl <in7=int32#3,28(<out=int32#6)
4946# asm 2: movl <in7=%edx,28(<out=%edi)
4947movl %edx,28(%edi)
4948
4949# qhasm: *(uint32 *) (out + 12) = in3
4950# asm 1: movl <in3=int32#4,12(<out=int32#6)
4951# asm 2: movl <in3=%ebx,12(<out=%edi)
4952movl %ebx,12(%edi)
4953
4954# qhasm: bytes = bytes_stack
4955# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
4956# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
4957movl 24(%esp),%eax
4958
4959# qhasm: in8 = ((uint32 *)&x2)[0]
4960# asm 1: movl <x2=stack128#4,>in8=int32#2
4961# asm 2: movl <x2=80(%esp),>in8=%ecx
4962movl 80(%esp),%ecx
4963
4964# qhasm: in9 = ((uint32 *)&x3)[1]
4965# asm 1: movl 4+<x3=stack128#1,>in9=int32#3
4966# asm 2: movl 4+<x3=32(%esp),>in9=%edx
4967movl 4+32(%esp),%edx
4968
4969# qhasm: carry? in8 += 1
4970# asm 1: add $1,<in8=int32#2
4971# asm 2: add $1,<in8=%ecx
4972add $1,%ecx
4973
4974# qhasm: in9 += 0 + carry
4975# asm 1: adc $0,<in9=int32#3
4976# asm 2: adc $0,<in9=%edx
4977adc $0,%edx
4978
4979# qhasm: ((uint32 *)&x2)[0] = in8
4980# asm 1: movl <in8=int32#2,>x2=stack128#4
4981# asm 2: movl <in8=%ecx,>x2=80(%esp)
4982movl %ecx,80(%esp)
4983
4984# qhasm: ((uint32 *)&x3)[1] = in9
4985# asm 1: movl <in9=int32#3,4+<x3=stack128#1
4986# asm 2: movl <in9=%edx,4+<x3=32(%esp)
4987movl %edx,4+32(%esp)
4988
4989# qhasm: unsigned>? unsigned<? bytes - 64
4990# asm 1: cmp $64,<bytes=int32#1
4991# asm 2: cmp $64,<bytes=%eax
4992cmp $64,%eax
4993# comment:fp stack unchanged by jump
4994
4995# qhasm: goto bytesatleast65 if unsigned>
4996ja ._bytesatleast65
4997# comment:fp stack unchanged by jump
4998
4999# qhasm: goto bytesatleast64 if !unsigned<
5000jae ._bytesatleast64
5001
5002# qhasm: m = out
5003# asm 1: mov <out=int32#6,>m=int32#5
5004# asm 2: mov <out=%edi,>m=%esi
5005mov %edi,%esi
5006
5007# qhasm: out = ctarget
5008# asm 1: movl <ctarget=stack32#6,>out=int32#6
5009# asm 2: movl <ctarget=20(%esp),>out=%edi
5010movl 20(%esp),%edi
5011
5012# qhasm: i = bytes
5013# asm 1: mov <bytes=int32#1,>i=int32#2
5014# asm 2: mov <bytes=%eax,>i=%ecx
5015mov %eax,%ecx
5016
5017# qhasm: while (i) { *out++ = *m++; --i }
5018rep movsb
5019# comment:fp stack unchanged by fallthrough
5020
5021# qhasm: bytesatleast64:
5022._bytesatleast64:
5023# comment:fp stack unchanged by fallthrough
5024
5025# qhasm: done:
5026._done:
5027
5028# qhasm: eax = eax_stack
5029# asm 1: movl <eax_stack=stack32#1,>eax=int32#1
5030# asm 2: movl <eax_stack=0(%esp),>eax=%eax
5031movl 0(%esp),%eax
5032
5033# qhasm: ebx = ebx_stack
5034# asm 1: movl <ebx_stack=stack32#2,>ebx=int32#4
5035# asm 2: movl <ebx_stack=4(%esp),>ebx=%ebx
5036movl 4(%esp),%ebx
5037
5038# qhasm: esi = esi_stack
5039# asm 1: movl <esi_stack=stack32#3,>esi=int32#5
5040# asm 2: movl <esi_stack=8(%esp),>esi=%esi
5041movl 8(%esp),%esi
5042
5043# qhasm: edi = edi_stack
5044# asm 1: movl <edi_stack=stack32#4,>edi=int32#6
5045# asm 2: movl <edi_stack=12(%esp),>edi=%edi
5046movl 12(%esp),%edi
5047
5048# qhasm: ebp = ebp_stack
5049# asm 1: movl <ebp_stack=stack32#5,>ebp=int32#7
5050# asm 2: movl <ebp_stack=16(%esp),>ebp=%ebp
5051movl 16(%esp),%ebp
5052
5053# qhasm: leave
5054add %eax,%esp
5055xor %eax,%eax
5056ret
5057
5058# qhasm: bytesatleast65:
5059._bytesatleast65:
5060
5061# qhasm: bytes -= 64
5062# asm 1: sub $64,<bytes=int32#1
5063# asm 2: sub $64,<bytes=%eax
5064sub $64,%eax
5065
5066# qhasm: out += 64
5067# asm 1: add $64,<out=int32#6
5068# asm 2: add $64,<out=%edi
5069add $64,%edi
5070
5071# qhasm: m += 64
5072# asm 1: add $64,<m=int32#5
5073# asm 2: add $64,<m=%esi
5074add $64,%esi
5075# comment:fp stack unchanged by jump
5076
5077# qhasm: goto bytesbetween1and255
5078jmp ._bytesbetween1and255
diff --git a/nacl/crypto_stream/salsa2012/amd64_xmm6/api.h b/nacl/crypto_stream/salsa2012/amd64_xmm6/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/amd64_xmm6/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa2012/amd64_xmm6/implementors b/nacl/crypto_stream/salsa2012/amd64_xmm6/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/amd64_xmm6/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s b/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s
new file mode 100644
index 00000000..0e26dc9f
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s
@@ -0,0 +1,4823 @@
1
2# qhasm: int64 r11_caller
3
4# qhasm: int64 r12_caller
5
6# qhasm: int64 r13_caller
7
8# qhasm: int64 r14_caller
9
10# qhasm: int64 r15_caller
11
12# qhasm: int64 rbx_caller
13
14# qhasm: int64 rbp_caller
15
16# qhasm: caller r11_caller
17
18# qhasm: caller r12_caller
19
20# qhasm: caller r13_caller
21
22# qhasm: caller r14_caller
23
24# qhasm: caller r15_caller
25
26# qhasm: caller rbx_caller
27
28# qhasm: caller rbp_caller
29
30# qhasm: stack64 r11_stack
31
32# qhasm: stack64 r12_stack
33
34# qhasm: stack64 r13_stack
35
36# qhasm: stack64 r14_stack
37
38# qhasm: stack64 r15_stack
39
40# qhasm: stack64 rbx_stack
41
42# qhasm: stack64 rbp_stack
43
44# qhasm: int64 a
45
46# qhasm: int64 arg1
47
48# qhasm: int64 arg2
49
50# qhasm: int64 arg3
51
52# qhasm: int64 arg4
53
54# qhasm: int64 arg5
55
56# qhasm: input arg1
57
58# qhasm: input arg2
59
60# qhasm: input arg3
61
62# qhasm: input arg4
63
64# qhasm: input arg5
65
66# qhasm: int64 k
67
68# qhasm: int64 kbits
69
70# qhasm: int64 iv
71
72# qhasm: int64 i
73
74# qhasm: stack128 x0
75
76# qhasm: stack128 x1
77
78# qhasm: stack128 x2
79
80# qhasm: stack128 x3
81
82# qhasm: int64 m
83
84# qhasm: int64 out
85
86# qhasm: int64 bytes
87
88# qhasm: stack32 eax_stack
89
90# qhasm: stack32 ebx_stack
91
92# qhasm: stack32 esi_stack
93
94# qhasm: stack32 edi_stack
95
96# qhasm: stack32 ebp_stack
97
98# qhasm: int6464 diag0
99
100# qhasm: int6464 diag1
101
102# qhasm: int6464 diag2
103
104# qhasm: int6464 diag3
105
106# qhasm: int6464 a0
107
108# qhasm: int6464 a1
109
110# qhasm: int6464 a2
111
112# qhasm: int6464 a3
113
114# qhasm: int6464 a4
115
116# qhasm: int6464 a5
117
118# qhasm: int6464 a6
119
120# qhasm: int6464 a7
121
122# qhasm: int6464 b0
123
124# qhasm: int6464 b1
125
126# qhasm: int6464 b2
127
128# qhasm: int6464 b3
129
130# qhasm: int6464 b4
131
132# qhasm: int6464 b5
133
134# qhasm: int6464 b6
135
136# qhasm: int6464 b7
137
138# qhasm: int6464 z0
139
140# qhasm: int6464 z1
141
142# qhasm: int6464 z2
143
144# qhasm: int6464 z3
145
146# qhasm: int6464 z4
147
148# qhasm: int6464 z5
149
150# qhasm: int6464 z6
151
152# qhasm: int6464 z7
153
154# qhasm: int6464 z8
155
156# qhasm: int6464 z9
157
158# qhasm: int6464 z10
159
160# qhasm: int6464 z11
161
162# qhasm: int6464 z12
163
164# qhasm: int6464 z13
165
166# qhasm: int6464 z14
167
168# qhasm: int6464 z15
169
170# qhasm: stack128 z0_stack
171
172# qhasm: stack128 z1_stack
173
174# qhasm: stack128 z2_stack
175
176# qhasm: stack128 z3_stack
177
178# qhasm: stack128 z4_stack
179
180# qhasm: stack128 z5_stack
181
182# qhasm: stack128 z6_stack
183
184# qhasm: stack128 z7_stack
185
186# qhasm: stack128 z8_stack
187
188# qhasm: stack128 z9_stack
189
190# qhasm: stack128 z10_stack
191
192# qhasm: stack128 z11_stack
193
194# qhasm: stack128 z12_stack
195
196# qhasm: stack128 z13_stack
197
198# qhasm: stack128 z14_stack
199
200# qhasm: stack128 z15_stack
201
202# qhasm: int6464 y0
203
204# qhasm: int6464 y1
205
206# qhasm: int6464 y2
207
208# qhasm: int6464 y3
209
210# qhasm: int6464 y4
211
212# qhasm: int6464 y5
213
214# qhasm: int6464 y6
215
216# qhasm: int6464 y7
217
218# qhasm: int6464 y8
219
220# qhasm: int6464 y9
221
222# qhasm: int6464 y10
223
224# qhasm: int6464 y11
225
226# qhasm: int6464 y12
227
228# qhasm: int6464 y13
229
230# qhasm: int6464 y14
231
232# qhasm: int6464 y15
233
234# qhasm: int6464 r0
235
236# qhasm: int6464 r1
237
238# qhasm: int6464 r2
239
240# qhasm: int6464 r3
241
242# qhasm: int6464 r4
243
244# qhasm: int6464 r5
245
246# qhasm: int6464 r6
247
248# qhasm: int6464 r7
249
250# qhasm: int6464 r8
251
252# qhasm: int6464 r9
253
254# qhasm: int6464 r10
255
256# qhasm: int6464 r11
257
258# qhasm: int6464 r12
259
260# qhasm: int6464 r13
261
262# qhasm: int6464 r14
263
264# qhasm: int6464 r15
265
266# qhasm: stack128 orig0
267
268# qhasm: stack128 orig1
269
270# qhasm: stack128 orig2
271
272# qhasm: stack128 orig3
273
274# qhasm: stack128 orig4
275
276# qhasm: stack128 orig5
277
278# qhasm: stack128 orig6
279
280# qhasm: stack128 orig7
281
282# qhasm: stack128 orig8
283
284# qhasm: stack128 orig9
285
286# qhasm: stack128 orig10
287
288# qhasm: stack128 orig11
289
290# qhasm: stack128 orig12
291
292# qhasm: stack128 orig13
293
294# qhasm: stack128 orig14
295
296# qhasm: stack128 orig15
297
298# qhasm: int64 in0
299
300# qhasm: int64 in1
301
302# qhasm: int64 in2
303
304# qhasm: int64 in3
305
306# qhasm: int64 in4
307
308# qhasm: int64 in5
309
310# qhasm: int64 in6
311
312# qhasm: int64 in7
313
314# qhasm: int64 in8
315
316# qhasm: int64 in9
317
318# qhasm: int64 in10
319
320# qhasm: int64 in11
321
322# qhasm: int64 in12
323
324# qhasm: int64 in13
325
326# qhasm: int64 in14
327
328# qhasm: int64 in15
329
330# qhasm: stack512 tmp
331
332# qhasm: int64 ctarget
333
334# qhasm: stack64 bytes_backup
335
336# qhasm: enter crypto_stream_salsa2012_amd64_xmm6
337.text
338.p2align 5
339.globl _crypto_stream_salsa2012_amd64_xmm6
340.globl crypto_stream_salsa2012_amd64_xmm6
341_crypto_stream_salsa2012_amd64_xmm6:
342crypto_stream_salsa2012_amd64_xmm6:
343mov %rsp,%r11
344and $31,%r11
345add $480,%r11
346sub %r11,%rsp
347
348# qhasm: r11_stack = r11_caller
349# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
350# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
351movq %r11,352(%rsp)
352
353# qhasm: r12_stack = r12_caller
354# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
355# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
356movq %r12,360(%rsp)
357
358# qhasm: r13_stack = r13_caller
359# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
360# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
361movq %r13,368(%rsp)
362
363# qhasm: r14_stack = r14_caller
364# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
365# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
366movq %r14,376(%rsp)
367
368# qhasm: r15_stack = r15_caller
369# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
370# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
371movq %r15,384(%rsp)
372
373# qhasm: rbx_stack = rbx_caller
374# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
375# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
376movq %rbx,392(%rsp)
377
378# qhasm: rbp_stack = rbp_caller
379# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
380# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
381movq %rbp,400(%rsp)
382
383# qhasm: bytes = arg2
384# asm 1: mov <arg2=int64#2,>bytes=int64#6
385# asm 2: mov <arg2=%rsi,>bytes=%r9
386mov %rsi,%r9
387
388# qhasm: out = arg1
389# asm 1: mov <arg1=int64#1,>out=int64#1
390# asm 2: mov <arg1=%rdi,>out=%rdi
391mov %rdi,%rdi
392
393# qhasm: m = out
394# asm 1: mov <out=int64#1,>m=int64#2
395# asm 2: mov <out=%rdi,>m=%rsi
396mov %rdi,%rsi
397
398# qhasm: iv = arg3
399# asm 1: mov <arg3=int64#3,>iv=int64#3
400# asm 2: mov <arg3=%rdx,>iv=%rdx
401mov %rdx,%rdx
402
403# qhasm: k = arg4
404# asm 1: mov <arg4=int64#4,>k=int64#8
405# asm 2: mov <arg4=%rcx,>k=%r10
406mov %rcx,%r10
407
408# qhasm: unsigned>? bytes - 0
409# asm 1: cmp $0,<bytes=int64#6
410# asm 2: cmp $0,<bytes=%r9
411cmp $0,%r9
412# comment:fp stack unchanged by jump
413
414# qhasm: goto done if !unsigned>
415jbe ._done
416
417# qhasm: a = 0
418# asm 1: mov $0,>a=int64#7
419# asm 2: mov $0,>a=%rax
420mov $0,%rax
421
422# qhasm: i = bytes
423# asm 1: mov <bytes=int64#6,>i=int64#4
424# asm 2: mov <bytes=%r9,>i=%rcx
425mov %r9,%rcx
426
427# qhasm: while (i) { *out++ = a; --i }
428rep stosb
429
430# qhasm: out -= bytes
431# asm 1: sub <bytes=int64#6,<out=int64#1
432# asm 2: sub <bytes=%r9,<out=%rdi
433sub %r9,%rdi
434# comment:fp stack unchanged by jump
435
436# qhasm: goto start
437jmp ._start
438
439# qhasm: enter crypto_stream_salsa2012_amd64_xmm6_xor
440.text
441.p2align 5
442.globl _crypto_stream_salsa2012_amd64_xmm6_xor
443.globl crypto_stream_salsa2012_amd64_xmm6_xor
444_crypto_stream_salsa2012_amd64_xmm6_xor:
445crypto_stream_salsa2012_amd64_xmm6_xor:
446mov %rsp,%r11
447and $31,%r11
448add $480,%r11
449sub %r11,%rsp
450
451# qhasm: r11_stack = r11_caller
452# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
453# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
454movq %r11,352(%rsp)
455
456# qhasm: r12_stack = r12_caller
457# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
458# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
459movq %r12,360(%rsp)
460
461# qhasm: r13_stack = r13_caller
462# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
463# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
464movq %r13,368(%rsp)
465
466# qhasm: r14_stack = r14_caller
467# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
468# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
469movq %r14,376(%rsp)
470
471# qhasm: r15_stack = r15_caller
472# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
473# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
474movq %r15,384(%rsp)
475
476# qhasm: rbx_stack = rbx_caller
477# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
478# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
479movq %rbx,392(%rsp)
480
481# qhasm: rbp_stack = rbp_caller
482# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
483# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
484movq %rbp,400(%rsp)
485
486# qhasm: out = arg1
487# asm 1: mov <arg1=int64#1,>out=int64#1
488# asm 2: mov <arg1=%rdi,>out=%rdi
489mov %rdi,%rdi
490
491# qhasm: m = arg2
492# asm 1: mov <arg2=int64#2,>m=int64#2
493# asm 2: mov <arg2=%rsi,>m=%rsi
494mov %rsi,%rsi
495
496# qhasm: bytes = arg3
497# asm 1: mov <arg3=int64#3,>bytes=int64#6
498# asm 2: mov <arg3=%rdx,>bytes=%r9
499mov %rdx,%r9
500
501# qhasm: iv = arg4
502# asm 1: mov <arg4=int64#4,>iv=int64#3
503# asm 2: mov <arg4=%rcx,>iv=%rdx
504mov %rcx,%rdx
505
506# qhasm: k = arg5
507# asm 1: mov <arg5=int64#5,>k=int64#8
508# asm 2: mov <arg5=%r8,>k=%r10
509mov %r8,%r10
510
511# qhasm: unsigned>? bytes - 0
512# asm 1: cmp $0,<bytes=int64#6
513# asm 2: cmp $0,<bytes=%r9
514cmp $0,%r9
515# comment:fp stack unchanged by jump
516
517# qhasm: goto done if !unsigned>
518jbe ._done
519# comment:fp stack unchanged by fallthrough
520
521# qhasm: start:
522._start:
523
524# qhasm: in12 = *(uint32 *) (k + 20)
525# asm 1: movl 20(<k=int64#8),>in12=int64#4d
526# asm 2: movl 20(<k=%r10),>in12=%ecx
527movl 20(%r10),%ecx
528
529# qhasm: in1 = *(uint32 *) (k + 0)
530# asm 1: movl 0(<k=int64#8),>in1=int64#5d
531# asm 2: movl 0(<k=%r10),>in1=%r8d
532movl 0(%r10),%r8d
533
534# qhasm: in6 = *(uint32 *) (iv + 0)
535# asm 1: movl 0(<iv=int64#3),>in6=int64#7d
536# asm 2: movl 0(<iv=%rdx),>in6=%eax
537movl 0(%rdx),%eax
538
539# qhasm: in11 = *(uint32 *) (k + 16)
540# asm 1: movl 16(<k=int64#8),>in11=int64#9d
541# asm 2: movl 16(<k=%r10),>in11=%r11d
542movl 16(%r10),%r11d
543
544# qhasm: ((uint32 *)&x1)[0] = in12
545# asm 1: movl <in12=int64#4d,>x1=stack128#1
546# asm 2: movl <in12=%ecx,>x1=0(%rsp)
547movl %ecx,0(%rsp)
548
549# qhasm: ((uint32 *)&x1)[1] = in1
550# asm 1: movl <in1=int64#5d,4+<x1=stack128#1
551# asm 2: movl <in1=%r8d,4+<x1=0(%rsp)
552movl %r8d,4+0(%rsp)
553
554# qhasm: ((uint32 *)&x1)[2] = in6
555# asm 1: movl <in6=int64#7d,8+<x1=stack128#1
556# asm 2: movl <in6=%eax,8+<x1=0(%rsp)
557movl %eax,8+0(%rsp)
558
559# qhasm: ((uint32 *)&x1)[3] = in11
560# asm 1: movl <in11=int64#9d,12+<x1=stack128#1
561# asm 2: movl <in11=%r11d,12+<x1=0(%rsp)
562movl %r11d,12+0(%rsp)
563
564# qhasm: in8 = 0
565# asm 1: mov $0,>in8=int64#4
566# asm 2: mov $0,>in8=%rcx
567mov $0,%rcx
568
569# qhasm: in13 = *(uint32 *) (k + 24)
570# asm 1: movl 24(<k=int64#8),>in13=int64#5d
571# asm 2: movl 24(<k=%r10),>in13=%r8d
572movl 24(%r10),%r8d
573
574# qhasm: in2 = *(uint32 *) (k + 4)
575# asm 1: movl 4(<k=int64#8),>in2=int64#7d
576# asm 2: movl 4(<k=%r10),>in2=%eax
577movl 4(%r10),%eax
578
579# qhasm: in7 = *(uint32 *) (iv + 4)
580# asm 1: movl 4(<iv=int64#3),>in7=int64#3d
581# asm 2: movl 4(<iv=%rdx),>in7=%edx
582movl 4(%rdx),%edx
583
584# qhasm: ((uint32 *)&x2)[0] = in8
585# asm 1: movl <in8=int64#4d,>x2=stack128#2
586# asm 2: movl <in8=%ecx,>x2=16(%rsp)
587movl %ecx,16(%rsp)
588
589# qhasm: ((uint32 *)&x2)[1] = in13
590# asm 1: movl <in13=int64#5d,4+<x2=stack128#2
591# asm 2: movl <in13=%r8d,4+<x2=16(%rsp)
592movl %r8d,4+16(%rsp)
593
594# qhasm: ((uint32 *)&x2)[2] = in2
595# asm 1: movl <in2=int64#7d,8+<x2=stack128#2
596# asm 2: movl <in2=%eax,8+<x2=16(%rsp)
597movl %eax,8+16(%rsp)
598
599# qhasm: ((uint32 *)&x2)[3] = in7
600# asm 1: movl <in7=int64#3d,12+<x2=stack128#2
601# asm 2: movl <in7=%edx,12+<x2=16(%rsp)
602movl %edx,12+16(%rsp)
603
604# qhasm: in4 = *(uint32 *) (k + 12)
605# asm 1: movl 12(<k=int64#8),>in4=int64#3d
606# asm 2: movl 12(<k=%r10),>in4=%edx
607movl 12(%r10),%edx
608
609# qhasm: in9 = 0
610# asm 1: mov $0,>in9=int64#4
611# asm 2: mov $0,>in9=%rcx
612mov $0,%rcx
613
614# qhasm: in14 = *(uint32 *) (k + 28)
615# asm 1: movl 28(<k=int64#8),>in14=int64#5d
616# asm 2: movl 28(<k=%r10),>in14=%r8d
617movl 28(%r10),%r8d
618
619# qhasm: in3 = *(uint32 *) (k + 8)
620# asm 1: movl 8(<k=int64#8),>in3=int64#7d
621# asm 2: movl 8(<k=%r10),>in3=%eax
622movl 8(%r10),%eax
623
624# qhasm: ((uint32 *)&x3)[0] = in4
625# asm 1: movl <in4=int64#3d,>x3=stack128#3
626# asm 2: movl <in4=%edx,>x3=32(%rsp)
627movl %edx,32(%rsp)
628
629# qhasm: ((uint32 *)&x3)[1] = in9
630# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
631# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
632movl %ecx,4+32(%rsp)
633
634# qhasm: ((uint32 *)&x3)[2] = in14
635# asm 1: movl <in14=int64#5d,8+<x3=stack128#3
636# asm 2: movl <in14=%r8d,8+<x3=32(%rsp)
637movl %r8d,8+32(%rsp)
638
639# qhasm: ((uint32 *)&x3)[3] = in3
640# asm 1: movl <in3=int64#7d,12+<x3=stack128#3
641# asm 2: movl <in3=%eax,12+<x3=32(%rsp)
642movl %eax,12+32(%rsp)
643
644# qhasm: in0 = 1634760805
645# asm 1: mov $1634760805,>in0=int64#3
646# asm 2: mov $1634760805,>in0=%rdx
647mov $1634760805,%rdx
648
649# qhasm: in5 = 857760878
650# asm 1: mov $857760878,>in5=int64#4
651# asm 2: mov $857760878,>in5=%rcx
652mov $857760878,%rcx
653
654# qhasm: in10 = 2036477234
655# asm 1: mov $2036477234,>in10=int64#5
656# asm 2: mov $2036477234,>in10=%r8
657mov $2036477234,%r8
658
659# qhasm: in15 = 1797285236
660# asm 1: mov $1797285236,>in15=int64#7
661# asm 2: mov $1797285236,>in15=%rax
662mov $1797285236,%rax
663
664# qhasm: ((uint32 *)&x0)[0] = in0
665# asm 1: movl <in0=int64#3d,>x0=stack128#4
666# asm 2: movl <in0=%edx,>x0=48(%rsp)
667movl %edx,48(%rsp)
668
669# qhasm: ((uint32 *)&x0)[1] = in5
670# asm 1: movl <in5=int64#4d,4+<x0=stack128#4
671# asm 2: movl <in5=%ecx,4+<x0=48(%rsp)
672movl %ecx,4+48(%rsp)
673
674# qhasm: ((uint32 *)&x0)[2] = in10
675# asm 1: movl <in10=int64#5d,8+<x0=stack128#4
676# asm 2: movl <in10=%r8d,8+<x0=48(%rsp)
677movl %r8d,8+48(%rsp)
678
679# qhasm: ((uint32 *)&x0)[3] = in15
680# asm 1: movl <in15=int64#7d,12+<x0=stack128#4
681# asm 2: movl <in15=%eax,12+<x0=48(%rsp)
682movl %eax,12+48(%rsp)
683
684# qhasm: unsigned<? bytes - 256
685# asm 1: cmp $256,<bytes=int64#6
686# asm 2: cmp $256,<bytes=%r9
687cmp $256,%r9
688# comment:fp stack unchanged by jump
689
690# qhasm: goto bytesbetween1and255 if unsigned<
691jb ._bytesbetween1and255
692
693# qhasm: z0 = x0
694# asm 1: movdqa <x0=stack128#4,>z0=int6464#1
695# asm 2: movdqa <x0=48(%rsp),>z0=%xmm0
696movdqa 48(%rsp),%xmm0
697
698# qhasm: z5 = z0[1,1,1,1]
699# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
700# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
701pshufd $0x55,%xmm0,%xmm1
702
703# qhasm: z10 = z0[2,2,2,2]
704# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
705# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
706pshufd $0xaa,%xmm0,%xmm2
707
708# qhasm: z15 = z0[3,3,3,3]
709# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
710# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
711pshufd $0xff,%xmm0,%xmm3
712
713# qhasm: z0 = z0[0,0,0,0]
714# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
715# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
716pshufd $0x00,%xmm0,%xmm0
717
718# qhasm: orig5 = z5
719# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
720# asm 2: movdqa <z5=%xmm1,>orig5=64(%rsp)
721movdqa %xmm1,64(%rsp)
722
723# qhasm: orig10 = z10
724# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
725# asm 2: movdqa <z10=%xmm2,>orig10=80(%rsp)
726movdqa %xmm2,80(%rsp)
727
728# qhasm: orig15 = z15
729# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
730# asm 2: movdqa <z15=%xmm3,>orig15=96(%rsp)
731movdqa %xmm3,96(%rsp)
732
733# qhasm: orig0 = z0
734# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
735# asm 2: movdqa <z0=%xmm0,>orig0=112(%rsp)
736movdqa %xmm0,112(%rsp)
737
738# qhasm: z1 = x1
739# asm 1: movdqa <x1=stack128#1,>z1=int6464#1
740# asm 2: movdqa <x1=0(%rsp),>z1=%xmm0
741movdqa 0(%rsp),%xmm0
742
743# qhasm: z6 = z1[2,2,2,2]
744# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
745# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
746pshufd $0xaa,%xmm0,%xmm1
747
748# qhasm: z11 = z1[3,3,3,3]
749# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
750# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
751pshufd $0xff,%xmm0,%xmm2
752
753# qhasm: z12 = z1[0,0,0,0]
754# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
755# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
756pshufd $0x00,%xmm0,%xmm3
757
758# qhasm: z1 = z1[1,1,1,1]
759# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
760# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
761pshufd $0x55,%xmm0,%xmm0
762
763# qhasm: orig6 = z6
764# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
765# asm 2: movdqa <z6=%xmm1,>orig6=128(%rsp)
766movdqa %xmm1,128(%rsp)
767
768# qhasm: orig11 = z11
769# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
770# asm 2: movdqa <z11=%xmm2,>orig11=144(%rsp)
771movdqa %xmm2,144(%rsp)
772
773# qhasm: orig12 = z12
774# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
775# asm 2: movdqa <z12=%xmm3,>orig12=160(%rsp)
776movdqa %xmm3,160(%rsp)
777
778# qhasm: orig1 = z1
779# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
780# asm 2: movdqa <z1=%xmm0,>orig1=176(%rsp)
781movdqa %xmm0,176(%rsp)
782
783# qhasm: z2 = x2
784# asm 1: movdqa <x2=stack128#2,>z2=int6464#1
785# asm 2: movdqa <x2=16(%rsp),>z2=%xmm0
786movdqa 16(%rsp),%xmm0
787
788# qhasm: z7 = z2[3,3,3,3]
789# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
790# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
791pshufd $0xff,%xmm0,%xmm1
792
793# qhasm: z13 = z2[1,1,1,1]
794# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
795# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
796pshufd $0x55,%xmm0,%xmm2
797
798# qhasm: z2 = z2[2,2,2,2]
799# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
800# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
801pshufd $0xaa,%xmm0,%xmm0
802
803# qhasm: orig7 = z7
804# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
805# asm 2: movdqa <z7=%xmm1,>orig7=192(%rsp)
806movdqa %xmm1,192(%rsp)
807
808# qhasm: orig13 = z13
809# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
810# asm 2: movdqa <z13=%xmm2,>orig13=208(%rsp)
811movdqa %xmm2,208(%rsp)
812
813# qhasm: orig2 = z2
814# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
815# asm 2: movdqa <z2=%xmm0,>orig2=224(%rsp)
816movdqa %xmm0,224(%rsp)
817
818# qhasm: z3 = x3
819# asm 1: movdqa <x3=stack128#3,>z3=int6464#1
820# asm 2: movdqa <x3=32(%rsp),>z3=%xmm0
821movdqa 32(%rsp),%xmm0
822
823# qhasm: z4 = z3[0,0,0,0]
824# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
825# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
826pshufd $0x00,%xmm0,%xmm1
827
828# qhasm: z14 = z3[2,2,2,2]
829# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
830# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
831pshufd $0xaa,%xmm0,%xmm2
832
833# qhasm: z3 = z3[3,3,3,3]
834# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
835# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
836pshufd $0xff,%xmm0,%xmm0
837
838# qhasm: orig4 = z4
839# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
840# asm 2: movdqa <z4=%xmm1,>orig4=240(%rsp)
841movdqa %xmm1,240(%rsp)
842
843# qhasm: orig14 = z14
844# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
845# asm 2: movdqa <z14=%xmm2,>orig14=256(%rsp)
846movdqa %xmm2,256(%rsp)
847
848# qhasm: orig3 = z3
849# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
850# asm 2: movdqa <z3=%xmm0,>orig3=272(%rsp)
851movdqa %xmm0,272(%rsp)
852
853# qhasm: bytesatleast256:
854._bytesatleast256:
855
856# qhasm: in8 = ((uint32 *)&x2)[0]
857# asm 1: movl <x2=stack128#2,>in8=int64#3d
858# asm 2: movl <x2=16(%rsp),>in8=%edx
859movl 16(%rsp),%edx
860
861# qhasm: in9 = ((uint32 *)&x3)[1]
862# asm 1: movl 4+<x3=stack128#3,>in9=int64#4d
863# asm 2: movl 4+<x3=32(%rsp),>in9=%ecx
864movl 4+32(%rsp),%ecx
865
866# qhasm: ((uint32 *) &orig8)[0] = in8
867# asm 1: movl <in8=int64#3d,>orig8=stack128#19
868# asm 2: movl <in8=%edx,>orig8=288(%rsp)
869movl %edx,288(%rsp)
870
871# qhasm: ((uint32 *) &orig9)[0] = in9
872# asm 1: movl <in9=int64#4d,>orig9=stack128#20
873# asm 2: movl <in9=%ecx,>orig9=304(%rsp)
874movl %ecx,304(%rsp)
875
876# qhasm: in8 += 1
877# asm 1: add $1,<in8=int64#3
878# asm 2: add $1,<in8=%rdx
879add $1,%rdx
880
881# qhasm: in9 <<= 32
882# asm 1: shl $32,<in9=int64#4
883# asm 2: shl $32,<in9=%rcx
884shl $32,%rcx
885
886# qhasm: in8 += in9
887# asm 1: add <in9=int64#4,<in8=int64#3
888# asm 2: add <in9=%rcx,<in8=%rdx
889add %rcx,%rdx
890
891# qhasm: in9 = in8
892# asm 1: mov <in8=int64#3,>in9=int64#4
893# asm 2: mov <in8=%rdx,>in9=%rcx
894mov %rdx,%rcx
895
896# qhasm: (uint64) in9 >>= 32
897# asm 1: shr $32,<in9=int64#4
898# asm 2: shr $32,<in9=%rcx
899shr $32,%rcx
900
901# qhasm: ((uint32 *) &orig8)[1] = in8
902# asm 1: movl <in8=int64#3d,4+<orig8=stack128#19
903# asm 2: movl <in8=%edx,4+<orig8=288(%rsp)
904movl %edx,4+288(%rsp)
905
906# qhasm: ((uint32 *) &orig9)[1] = in9
907# asm 1: movl <in9=int64#4d,4+<orig9=stack128#20
908# asm 2: movl <in9=%ecx,4+<orig9=304(%rsp)
909movl %ecx,4+304(%rsp)
910
911# qhasm: in8 += 1
912# asm 1: add $1,<in8=int64#3
913# asm 2: add $1,<in8=%rdx
914add $1,%rdx
915
916# qhasm: in9 <<= 32
917# asm 1: shl $32,<in9=int64#4
918# asm 2: shl $32,<in9=%rcx
919shl $32,%rcx
920
921# qhasm: in8 += in9
922# asm 1: add <in9=int64#4,<in8=int64#3
923# asm 2: add <in9=%rcx,<in8=%rdx
924add %rcx,%rdx
925
926# qhasm: in9 = in8
927# asm 1: mov <in8=int64#3,>in9=int64#4
928# asm 2: mov <in8=%rdx,>in9=%rcx
929mov %rdx,%rcx
930
931# qhasm: (uint64) in9 >>= 32
932# asm 1: shr $32,<in9=int64#4
933# asm 2: shr $32,<in9=%rcx
934shr $32,%rcx
935
936# qhasm: ((uint32 *) &orig8)[2] = in8
937# asm 1: movl <in8=int64#3d,8+<orig8=stack128#19
938# asm 2: movl <in8=%edx,8+<orig8=288(%rsp)
939movl %edx,8+288(%rsp)
940
941# qhasm: ((uint32 *) &orig9)[2] = in9
942# asm 1: movl <in9=int64#4d,8+<orig9=stack128#20
943# asm 2: movl <in9=%ecx,8+<orig9=304(%rsp)
944movl %ecx,8+304(%rsp)
945
946# qhasm: in8 += 1
947# asm 1: add $1,<in8=int64#3
948# asm 2: add $1,<in8=%rdx
949add $1,%rdx
950
951# qhasm: in9 <<= 32
952# asm 1: shl $32,<in9=int64#4
953# asm 2: shl $32,<in9=%rcx
954shl $32,%rcx
955
956# qhasm: in8 += in9
957# asm 1: add <in9=int64#4,<in8=int64#3
958# asm 2: add <in9=%rcx,<in8=%rdx
959add %rcx,%rdx
960
961# qhasm: in9 = in8
962# asm 1: mov <in8=int64#3,>in9=int64#4
963# asm 2: mov <in8=%rdx,>in9=%rcx
964mov %rdx,%rcx
965
966# qhasm: (uint64) in9 >>= 32
967# asm 1: shr $32,<in9=int64#4
968# asm 2: shr $32,<in9=%rcx
969shr $32,%rcx
970
971# qhasm: ((uint32 *) &orig8)[3] = in8
972# asm 1: movl <in8=int64#3d,12+<orig8=stack128#19
973# asm 2: movl <in8=%edx,12+<orig8=288(%rsp)
974movl %edx,12+288(%rsp)
975
976# qhasm: ((uint32 *) &orig9)[3] = in9
977# asm 1: movl <in9=int64#4d,12+<orig9=stack128#20
978# asm 2: movl <in9=%ecx,12+<orig9=304(%rsp)
979movl %ecx,12+304(%rsp)
980
981# qhasm: in8 += 1
982# asm 1: add $1,<in8=int64#3
983# asm 2: add $1,<in8=%rdx
984add $1,%rdx
985
986# qhasm: in9 <<= 32
987# asm 1: shl $32,<in9=int64#4
988# asm 2: shl $32,<in9=%rcx
989shl $32,%rcx
990
991# qhasm: in8 += in9
992# asm 1: add <in9=int64#4,<in8=int64#3
993# asm 2: add <in9=%rcx,<in8=%rdx
994add %rcx,%rdx
995
996# qhasm: in9 = in8
997# asm 1: mov <in8=int64#3,>in9=int64#4
998# asm 2: mov <in8=%rdx,>in9=%rcx
999mov %rdx,%rcx
1000
1001# qhasm: (uint64) in9 >>= 32
1002# asm 1: shr $32,<in9=int64#4
1003# asm 2: shr $32,<in9=%rcx
1004shr $32,%rcx
1005
1006# qhasm: ((uint32 *)&x2)[0] = in8
1007# asm 1: movl <in8=int64#3d,>x2=stack128#2
1008# asm 2: movl <in8=%edx,>x2=16(%rsp)
1009movl %edx,16(%rsp)
1010
1011# qhasm: ((uint32 *)&x3)[1] = in9
1012# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
1013# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
1014movl %ecx,4+32(%rsp)
1015
1016# qhasm: bytes_backup = bytes
1017# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
1018# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
1019movq %r9,408(%rsp)
1020
1021# qhasm: i = 12
1022# asm 1: mov $12,>i=int64#3
1023# asm 2: mov $12,>i=%rdx
1024mov $12,%rdx
1025
1026# qhasm: z5 = orig5
1027# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
1028# asm 2: movdqa <orig5=64(%rsp),>z5=%xmm0
1029movdqa 64(%rsp),%xmm0
1030
1031# qhasm: z10 = orig10
1032# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
1033# asm 2: movdqa <orig10=80(%rsp),>z10=%xmm1
1034movdqa 80(%rsp),%xmm1
1035
1036# qhasm: z15 = orig15
1037# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
1038# asm 2: movdqa <orig15=96(%rsp),>z15=%xmm2
1039movdqa 96(%rsp),%xmm2
1040
1041# qhasm: z14 = orig14
1042# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
1043# asm 2: movdqa <orig14=256(%rsp),>z14=%xmm3
1044movdqa 256(%rsp),%xmm3
1045
1046# qhasm: z3 = orig3
1047# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
1048# asm 2: movdqa <orig3=272(%rsp),>z3=%xmm4
1049movdqa 272(%rsp),%xmm4
1050
1051# qhasm: z6 = orig6
1052# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
1053# asm 2: movdqa <orig6=128(%rsp),>z6=%xmm5
1054movdqa 128(%rsp),%xmm5
1055
1056# qhasm: z11 = orig11
1057# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
1058# asm 2: movdqa <orig11=144(%rsp),>z11=%xmm6
1059movdqa 144(%rsp),%xmm6
1060
1061# qhasm: z1 = orig1
1062# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
1063# asm 2: movdqa <orig1=176(%rsp),>z1=%xmm7
1064movdqa 176(%rsp),%xmm7
1065
1066# qhasm: z7 = orig7
1067# asm 1: movdqa <orig7=stack128#13,>z7=int6464#9
1068# asm 2: movdqa <orig7=192(%rsp),>z7=%xmm8
1069movdqa 192(%rsp),%xmm8
1070
1071# qhasm: z13 = orig13
1072# asm 1: movdqa <orig13=stack128#14,>z13=int6464#10
1073# asm 2: movdqa <orig13=208(%rsp),>z13=%xmm9
1074movdqa 208(%rsp),%xmm9
1075
1076# qhasm: z2 = orig2
1077# asm 1: movdqa <orig2=stack128#15,>z2=int6464#11
1078# asm 2: movdqa <orig2=224(%rsp),>z2=%xmm10
1079movdqa 224(%rsp),%xmm10
1080
1081# qhasm: z9 = orig9
1082# asm 1: movdqa <orig9=stack128#20,>z9=int6464#12
1083# asm 2: movdqa <orig9=304(%rsp),>z9=%xmm11
1084movdqa 304(%rsp),%xmm11
1085
1086# qhasm: z0 = orig0
1087# asm 1: movdqa <orig0=stack128#8,>z0=int6464#13
1088# asm 2: movdqa <orig0=112(%rsp),>z0=%xmm12
1089movdqa 112(%rsp),%xmm12
1090
1091# qhasm: z12 = orig12
1092# asm 1: movdqa <orig12=stack128#11,>z12=int6464#14
1093# asm 2: movdqa <orig12=160(%rsp),>z12=%xmm13
1094movdqa 160(%rsp),%xmm13
1095
1096# qhasm: z4 = orig4
1097# asm 1: movdqa <orig4=stack128#16,>z4=int6464#15
1098# asm 2: movdqa <orig4=240(%rsp),>z4=%xmm14
1099movdqa 240(%rsp),%xmm14
1100
1101# qhasm: z8 = orig8
1102# asm 1: movdqa <orig8=stack128#19,>z8=int6464#16
1103# asm 2: movdqa <orig8=288(%rsp),>z8=%xmm15
1104movdqa 288(%rsp),%xmm15
1105
1106# qhasm: mainloop1:
1107._mainloop1:
1108
1109# qhasm: z10_stack = z10
1110# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1111# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1112movdqa %xmm1,320(%rsp)
1113
1114# qhasm: z15_stack = z15
1115# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1116# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1117movdqa %xmm2,336(%rsp)
1118
1119# qhasm: y4 = z12
1120# asm 1: movdqa <z12=int6464#14,>y4=int6464#2
1121# asm 2: movdqa <z12=%xmm13,>y4=%xmm1
1122movdqa %xmm13,%xmm1
1123
1124# qhasm: uint32323232 y4 += z0
1125# asm 1: paddd <z0=int6464#13,<y4=int6464#2
1126# asm 2: paddd <z0=%xmm12,<y4=%xmm1
1127paddd %xmm12,%xmm1
1128
1129# qhasm: r4 = y4
1130# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1131# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1132movdqa %xmm1,%xmm2
1133
1134# qhasm: uint32323232 y4 <<= 7
1135# asm 1: pslld $7,<y4=int6464#2
1136# asm 2: pslld $7,<y4=%xmm1
1137pslld $7,%xmm1
1138
1139# qhasm: z4 ^= y4
1140# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1141# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1142pxor %xmm1,%xmm14
1143
1144# qhasm: uint32323232 r4 >>= 25
1145# asm 1: psrld $25,<r4=int6464#3
1146# asm 2: psrld $25,<r4=%xmm2
1147psrld $25,%xmm2
1148
1149# qhasm: z4 ^= r4
1150# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1151# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1152pxor %xmm2,%xmm14
1153
1154# qhasm: y9 = z1
1155# asm 1: movdqa <z1=int6464#8,>y9=int6464#2
1156# asm 2: movdqa <z1=%xmm7,>y9=%xmm1
1157movdqa %xmm7,%xmm1
1158
1159# qhasm: uint32323232 y9 += z5
1160# asm 1: paddd <z5=int6464#1,<y9=int6464#2
1161# asm 2: paddd <z5=%xmm0,<y9=%xmm1
1162paddd %xmm0,%xmm1
1163
1164# qhasm: r9 = y9
1165# asm 1: movdqa <y9=int6464#2,>r9=int6464#3
1166# asm 2: movdqa <y9=%xmm1,>r9=%xmm2
1167movdqa %xmm1,%xmm2
1168
1169# qhasm: uint32323232 y9 <<= 7
1170# asm 1: pslld $7,<y9=int6464#2
1171# asm 2: pslld $7,<y9=%xmm1
1172pslld $7,%xmm1
1173
1174# qhasm: z9 ^= y9
1175# asm 1: pxor <y9=int6464#2,<z9=int6464#12
1176# asm 2: pxor <y9=%xmm1,<z9=%xmm11
1177pxor %xmm1,%xmm11
1178
1179# qhasm: uint32323232 r9 >>= 25
1180# asm 1: psrld $25,<r9=int6464#3
1181# asm 2: psrld $25,<r9=%xmm2
1182psrld $25,%xmm2
1183
1184# qhasm: z9 ^= r9
1185# asm 1: pxor <r9=int6464#3,<z9=int6464#12
1186# asm 2: pxor <r9=%xmm2,<z9=%xmm11
1187pxor %xmm2,%xmm11
1188
1189# qhasm: y8 = z0
1190# asm 1: movdqa <z0=int6464#13,>y8=int6464#2
1191# asm 2: movdqa <z0=%xmm12,>y8=%xmm1
1192movdqa %xmm12,%xmm1
1193
1194# qhasm: uint32323232 y8 += z4
1195# asm 1: paddd <z4=int6464#15,<y8=int6464#2
1196# asm 2: paddd <z4=%xmm14,<y8=%xmm1
1197paddd %xmm14,%xmm1
1198
1199# qhasm: r8 = y8
1200# asm 1: movdqa <y8=int6464#2,>r8=int6464#3
1201# asm 2: movdqa <y8=%xmm1,>r8=%xmm2
1202movdqa %xmm1,%xmm2
1203
1204# qhasm: uint32323232 y8 <<= 9
1205# asm 1: pslld $9,<y8=int6464#2
1206# asm 2: pslld $9,<y8=%xmm1
1207pslld $9,%xmm1
1208
1209# qhasm: z8 ^= y8
1210# asm 1: pxor <y8=int6464#2,<z8=int6464#16
1211# asm 2: pxor <y8=%xmm1,<z8=%xmm15
1212pxor %xmm1,%xmm15
1213
1214# qhasm: uint32323232 r8 >>= 23
1215# asm 1: psrld $23,<r8=int6464#3
1216# asm 2: psrld $23,<r8=%xmm2
1217psrld $23,%xmm2
1218
1219# qhasm: z8 ^= r8
1220# asm 1: pxor <r8=int6464#3,<z8=int6464#16
1221# asm 2: pxor <r8=%xmm2,<z8=%xmm15
1222pxor %xmm2,%xmm15
1223
1224# qhasm: y13 = z5
1225# asm 1: movdqa <z5=int6464#1,>y13=int6464#2
1226# asm 2: movdqa <z5=%xmm0,>y13=%xmm1
1227movdqa %xmm0,%xmm1
1228
1229# qhasm: uint32323232 y13 += z9
1230# asm 1: paddd <z9=int6464#12,<y13=int6464#2
1231# asm 2: paddd <z9=%xmm11,<y13=%xmm1
1232paddd %xmm11,%xmm1
1233
1234# qhasm: r13 = y13
1235# asm 1: movdqa <y13=int6464#2,>r13=int6464#3
1236# asm 2: movdqa <y13=%xmm1,>r13=%xmm2
1237movdqa %xmm1,%xmm2
1238
1239# qhasm: uint32323232 y13 <<= 9
1240# asm 1: pslld $9,<y13=int6464#2
1241# asm 2: pslld $9,<y13=%xmm1
1242pslld $9,%xmm1
1243
1244# qhasm: z13 ^= y13
1245# asm 1: pxor <y13=int6464#2,<z13=int6464#10
1246# asm 2: pxor <y13=%xmm1,<z13=%xmm9
1247pxor %xmm1,%xmm9
1248
1249# qhasm: uint32323232 r13 >>= 23
1250# asm 1: psrld $23,<r13=int6464#3
1251# asm 2: psrld $23,<r13=%xmm2
1252psrld $23,%xmm2
1253
1254# qhasm: z13 ^= r13
1255# asm 1: pxor <r13=int6464#3,<z13=int6464#10
1256# asm 2: pxor <r13=%xmm2,<z13=%xmm9
1257pxor %xmm2,%xmm9
1258
1259# qhasm: y12 = z4
1260# asm 1: movdqa <z4=int6464#15,>y12=int6464#2
1261# asm 2: movdqa <z4=%xmm14,>y12=%xmm1
1262movdqa %xmm14,%xmm1
1263
1264# qhasm: uint32323232 y12 += z8
1265# asm 1: paddd <z8=int6464#16,<y12=int6464#2
1266# asm 2: paddd <z8=%xmm15,<y12=%xmm1
1267paddd %xmm15,%xmm1
1268
1269# qhasm: r12 = y12
1270# asm 1: movdqa <y12=int6464#2,>r12=int6464#3
1271# asm 2: movdqa <y12=%xmm1,>r12=%xmm2
1272movdqa %xmm1,%xmm2
1273
1274# qhasm: uint32323232 y12 <<= 13
1275# asm 1: pslld $13,<y12=int6464#2
1276# asm 2: pslld $13,<y12=%xmm1
1277pslld $13,%xmm1
1278
1279# qhasm: z12 ^= y12
1280# asm 1: pxor <y12=int6464#2,<z12=int6464#14
1281# asm 2: pxor <y12=%xmm1,<z12=%xmm13
1282pxor %xmm1,%xmm13
1283
1284# qhasm: uint32323232 r12 >>= 19
1285# asm 1: psrld $19,<r12=int6464#3
1286# asm 2: psrld $19,<r12=%xmm2
1287psrld $19,%xmm2
1288
1289# qhasm: z12 ^= r12
1290# asm 1: pxor <r12=int6464#3,<z12=int6464#14
1291# asm 2: pxor <r12=%xmm2,<z12=%xmm13
1292pxor %xmm2,%xmm13
1293
1294# qhasm: y1 = z9
1295# asm 1: movdqa <z9=int6464#12,>y1=int6464#2
1296# asm 2: movdqa <z9=%xmm11,>y1=%xmm1
1297movdqa %xmm11,%xmm1
1298
1299# qhasm: uint32323232 y1 += z13
1300# asm 1: paddd <z13=int6464#10,<y1=int6464#2
1301# asm 2: paddd <z13=%xmm9,<y1=%xmm1
1302paddd %xmm9,%xmm1
1303
1304# qhasm: r1 = y1
1305# asm 1: movdqa <y1=int6464#2,>r1=int6464#3
1306# asm 2: movdqa <y1=%xmm1,>r1=%xmm2
1307movdqa %xmm1,%xmm2
1308
1309# qhasm: uint32323232 y1 <<= 13
1310# asm 1: pslld $13,<y1=int6464#2
1311# asm 2: pslld $13,<y1=%xmm1
1312pslld $13,%xmm1
1313
1314# qhasm: z1 ^= y1
1315# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1316# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1317pxor %xmm1,%xmm7
1318
1319# qhasm: uint32323232 r1 >>= 19
1320# asm 1: psrld $19,<r1=int6464#3
1321# asm 2: psrld $19,<r1=%xmm2
1322psrld $19,%xmm2
1323
1324# qhasm: z1 ^= r1
1325# asm 1: pxor <r1=int6464#3,<z1=int6464#8
1326# asm 2: pxor <r1=%xmm2,<z1=%xmm7
1327pxor %xmm2,%xmm7
1328
1329# qhasm: y0 = z8
1330# asm 1: movdqa <z8=int6464#16,>y0=int6464#2
1331# asm 2: movdqa <z8=%xmm15,>y0=%xmm1
1332movdqa %xmm15,%xmm1
1333
1334# qhasm: uint32323232 y0 += z12
1335# asm 1: paddd <z12=int6464#14,<y0=int6464#2
1336# asm 2: paddd <z12=%xmm13,<y0=%xmm1
1337paddd %xmm13,%xmm1
1338
1339# qhasm: r0 = y0
1340# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1341# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1342movdqa %xmm1,%xmm2
1343
1344# qhasm: uint32323232 y0 <<= 18
1345# asm 1: pslld $18,<y0=int6464#2
1346# asm 2: pslld $18,<y0=%xmm1
1347pslld $18,%xmm1
1348
1349# qhasm: z0 ^= y0
1350# asm 1: pxor <y0=int6464#2,<z0=int6464#13
1351# asm 2: pxor <y0=%xmm1,<z0=%xmm12
1352pxor %xmm1,%xmm12
1353
1354# qhasm: uint32323232 r0 >>= 14
1355# asm 1: psrld $14,<r0=int6464#3
1356# asm 2: psrld $14,<r0=%xmm2
1357psrld $14,%xmm2
1358
1359# qhasm: z0 ^= r0
1360# asm 1: pxor <r0=int6464#3,<z0=int6464#13
1361# asm 2: pxor <r0=%xmm2,<z0=%xmm12
1362pxor %xmm2,%xmm12
1363
1364# qhasm: z10 = z10_stack
1365# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1366# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1367movdqa 320(%rsp),%xmm1
1368
1369# qhasm: z0_stack = z0
1370# asm 1: movdqa <z0=int6464#13,>z0_stack=stack128#21
1371# asm 2: movdqa <z0=%xmm12,>z0_stack=320(%rsp)
1372movdqa %xmm12,320(%rsp)
1373
1374# qhasm: y5 = z13
1375# asm 1: movdqa <z13=int6464#10,>y5=int6464#3
1376# asm 2: movdqa <z13=%xmm9,>y5=%xmm2
1377movdqa %xmm9,%xmm2
1378
1379# qhasm: uint32323232 y5 += z1
1380# asm 1: paddd <z1=int6464#8,<y5=int6464#3
1381# asm 2: paddd <z1=%xmm7,<y5=%xmm2
1382paddd %xmm7,%xmm2
1383
1384# qhasm: r5 = y5
1385# asm 1: movdqa <y5=int6464#3,>r5=int6464#13
1386# asm 2: movdqa <y5=%xmm2,>r5=%xmm12
1387movdqa %xmm2,%xmm12
1388
1389# qhasm: uint32323232 y5 <<= 18
1390# asm 1: pslld $18,<y5=int6464#3
1391# asm 2: pslld $18,<y5=%xmm2
1392pslld $18,%xmm2
1393
1394# qhasm: z5 ^= y5
1395# asm 1: pxor <y5=int6464#3,<z5=int6464#1
1396# asm 2: pxor <y5=%xmm2,<z5=%xmm0
1397pxor %xmm2,%xmm0
1398
1399# qhasm: uint32323232 r5 >>= 14
1400# asm 1: psrld $14,<r5=int6464#13
1401# asm 2: psrld $14,<r5=%xmm12
1402psrld $14,%xmm12
1403
1404# qhasm: z5 ^= r5
1405# asm 1: pxor <r5=int6464#13,<z5=int6464#1
1406# asm 2: pxor <r5=%xmm12,<z5=%xmm0
1407pxor %xmm12,%xmm0
1408
1409# qhasm: y14 = z6
1410# asm 1: movdqa <z6=int6464#6,>y14=int6464#3
1411# asm 2: movdqa <z6=%xmm5,>y14=%xmm2
1412movdqa %xmm5,%xmm2
1413
1414# qhasm: uint32323232 y14 += z10
1415# asm 1: paddd <z10=int6464#2,<y14=int6464#3
1416# asm 2: paddd <z10=%xmm1,<y14=%xmm2
1417paddd %xmm1,%xmm2
1418
1419# qhasm: r14 = y14
1420# asm 1: movdqa <y14=int6464#3,>r14=int6464#13
1421# asm 2: movdqa <y14=%xmm2,>r14=%xmm12
1422movdqa %xmm2,%xmm12
1423
1424# qhasm: uint32323232 y14 <<= 7
1425# asm 1: pslld $7,<y14=int6464#3
1426# asm 2: pslld $7,<y14=%xmm2
1427pslld $7,%xmm2
1428
1429# qhasm: z14 ^= y14
1430# asm 1: pxor <y14=int6464#3,<z14=int6464#4
1431# asm 2: pxor <y14=%xmm2,<z14=%xmm3
1432pxor %xmm2,%xmm3
1433
1434# qhasm: uint32323232 r14 >>= 25
1435# asm 1: psrld $25,<r14=int6464#13
1436# asm 2: psrld $25,<r14=%xmm12
1437psrld $25,%xmm12
1438
1439# qhasm: z14 ^= r14
1440# asm 1: pxor <r14=int6464#13,<z14=int6464#4
1441# asm 2: pxor <r14=%xmm12,<z14=%xmm3
1442pxor %xmm12,%xmm3
1443
1444# qhasm: z15 = z15_stack
1445# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
1446# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
1447movdqa 336(%rsp),%xmm2
1448
1449# qhasm: z5_stack = z5
1450# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#22
1451# asm 2: movdqa <z5=%xmm0,>z5_stack=336(%rsp)
1452movdqa %xmm0,336(%rsp)
1453
1454# qhasm: y3 = z11
1455# asm 1: movdqa <z11=int6464#7,>y3=int6464#1
1456# asm 2: movdqa <z11=%xmm6,>y3=%xmm0
1457movdqa %xmm6,%xmm0
1458
1459# qhasm: uint32323232 y3 += z15
1460# asm 1: paddd <z15=int6464#3,<y3=int6464#1
1461# asm 2: paddd <z15=%xmm2,<y3=%xmm0
1462paddd %xmm2,%xmm0
1463
1464# qhasm: r3 = y3
1465# asm 1: movdqa <y3=int6464#1,>r3=int6464#13
1466# asm 2: movdqa <y3=%xmm0,>r3=%xmm12
1467movdqa %xmm0,%xmm12
1468
1469# qhasm: uint32323232 y3 <<= 7
1470# asm 1: pslld $7,<y3=int6464#1
1471# asm 2: pslld $7,<y3=%xmm0
1472pslld $7,%xmm0
1473
1474# qhasm: z3 ^= y3
1475# asm 1: pxor <y3=int6464#1,<z3=int6464#5
1476# asm 2: pxor <y3=%xmm0,<z3=%xmm4
1477pxor %xmm0,%xmm4
1478
1479# qhasm: uint32323232 r3 >>= 25
1480# asm 1: psrld $25,<r3=int6464#13
1481# asm 2: psrld $25,<r3=%xmm12
1482psrld $25,%xmm12
1483
1484# qhasm: z3 ^= r3
1485# asm 1: pxor <r3=int6464#13,<z3=int6464#5
1486# asm 2: pxor <r3=%xmm12,<z3=%xmm4
1487pxor %xmm12,%xmm4
1488
1489# qhasm: y2 = z10
1490# asm 1: movdqa <z10=int6464#2,>y2=int6464#1
1491# asm 2: movdqa <z10=%xmm1,>y2=%xmm0
1492movdqa %xmm1,%xmm0
1493
1494# qhasm: uint32323232 y2 += z14
1495# asm 1: paddd <z14=int6464#4,<y2=int6464#1
1496# asm 2: paddd <z14=%xmm3,<y2=%xmm0
1497paddd %xmm3,%xmm0
1498
1499# qhasm: r2 = y2
1500# asm 1: movdqa <y2=int6464#1,>r2=int6464#13
1501# asm 2: movdqa <y2=%xmm0,>r2=%xmm12
1502movdqa %xmm0,%xmm12
1503
1504# qhasm: uint32323232 y2 <<= 9
1505# asm 1: pslld $9,<y2=int6464#1
1506# asm 2: pslld $9,<y2=%xmm0
1507pslld $9,%xmm0
1508
1509# qhasm: z2 ^= y2
1510# asm 1: pxor <y2=int6464#1,<z2=int6464#11
1511# asm 2: pxor <y2=%xmm0,<z2=%xmm10
1512pxor %xmm0,%xmm10
1513
1514# qhasm: uint32323232 r2 >>= 23
1515# asm 1: psrld $23,<r2=int6464#13
1516# asm 2: psrld $23,<r2=%xmm12
1517psrld $23,%xmm12
1518
1519# qhasm: z2 ^= r2
1520# asm 1: pxor <r2=int6464#13,<z2=int6464#11
1521# asm 2: pxor <r2=%xmm12,<z2=%xmm10
1522pxor %xmm12,%xmm10
1523
1524# qhasm: y7 = z15
1525# asm 1: movdqa <z15=int6464#3,>y7=int6464#1
1526# asm 2: movdqa <z15=%xmm2,>y7=%xmm0
1527movdqa %xmm2,%xmm0
1528
1529# qhasm: uint32323232 y7 += z3
1530# asm 1: paddd <z3=int6464#5,<y7=int6464#1
1531# asm 2: paddd <z3=%xmm4,<y7=%xmm0
1532paddd %xmm4,%xmm0
1533
1534# qhasm: r7 = y7
1535# asm 1: movdqa <y7=int6464#1,>r7=int6464#13
1536# asm 2: movdqa <y7=%xmm0,>r7=%xmm12
1537movdqa %xmm0,%xmm12
1538
1539# qhasm: uint32323232 y7 <<= 9
1540# asm 1: pslld $9,<y7=int6464#1
1541# asm 2: pslld $9,<y7=%xmm0
1542pslld $9,%xmm0
1543
1544# qhasm: z7 ^= y7
1545# asm 1: pxor <y7=int6464#1,<z7=int6464#9
1546# asm 2: pxor <y7=%xmm0,<z7=%xmm8
1547pxor %xmm0,%xmm8
1548
1549# qhasm: uint32323232 r7 >>= 23
1550# asm 1: psrld $23,<r7=int6464#13
1551# asm 2: psrld $23,<r7=%xmm12
1552psrld $23,%xmm12
1553
1554# qhasm: z7 ^= r7
1555# asm 1: pxor <r7=int6464#13,<z7=int6464#9
1556# asm 2: pxor <r7=%xmm12,<z7=%xmm8
1557pxor %xmm12,%xmm8
1558
1559# qhasm: y6 = z14
1560# asm 1: movdqa <z14=int6464#4,>y6=int6464#1
1561# asm 2: movdqa <z14=%xmm3,>y6=%xmm0
1562movdqa %xmm3,%xmm0
1563
1564# qhasm: uint32323232 y6 += z2
1565# asm 1: paddd <z2=int6464#11,<y6=int6464#1
1566# asm 2: paddd <z2=%xmm10,<y6=%xmm0
1567paddd %xmm10,%xmm0
1568
1569# qhasm: r6 = y6
1570# asm 1: movdqa <y6=int6464#1,>r6=int6464#13
1571# asm 2: movdqa <y6=%xmm0,>r6=%xmm12
1572movdqa %xmm0,%xmm12
1573
1574# qhasm: uint32323232 y6 <<= 13
1575# asm 1: pslld $13,<y6=int6464#1
1576# asm 2: pslld $13,<y6=%xmm0
1577pslld $13,%xmm0
1578
1579# qhasm: z6 ^= y6
1580# asm 1: pxor <y6=int6464#1,<z6=int6464#6
1581# asm 2: pxor <y6=%xmm0,<z6=%xmm5
1582pxor %xmm0,%xmm5
1583
1584# qhasm: uint32323232 r6 >>= 19
1585# asm 1: psrld $19,<r6=int6464#13
1586# asm 2: psrld $19,<r6=%xmm12
1587psrld $19,%xmm12
1588
1589# qhasm: z6 ^= r6
1590# asm 1: pxor <r6=int6464#13,<z6=int6464#6
1591# asm 2: pxor <r6=%xmm12,<z6=%xmm5
1592pxor %xmm12,%xmm5
1593
1594# qhasm: y11 = z3
1595# asm 1: movdqa <z3=int6464#5,>y11=int6464#1
1596# asm 2: movdqa <z3=%xmm4,>y11=%xmm0
1597movdqa %xmm4,%xmm0
1598
1599# qhasm: uint32323232 y11 += z7
1600# asm 1: paddd <z7=int6464#9,<y11=int6464#1
1601# asm 2: paddd <z7=%xmm8,<y11=%xmm0
1602paddd %xmm8,%xmm0
1603
1604# qhasm: r11 = y11
1605# asm 1: movdqa <y11=int6464#1,>r11=int6464#13
1606# asm 2: movdqa <y11=%xmm0,>r11=%xmm12
1607movdqa %xmm0,%xmm12
1608
1609# qhasm: uint32323232 y11 <<= 13
1610# asm 1: pslld $13,<y11=int6464#1
1611# asm 2: pslld $13,<y11=%xmm0
1612pslld $13,%xmm0
1613
1614# qhasm: z11 ^= y11
1615# asm 1: pxor <y11=int6464#1,<z11=int6464#7
1616# asm 2: pxor <y11=%xmm0,<z11=%xmm6
1617pxor %xmm0,%xmm6
1618
1619# qhasm: uint32323232 r11 >>= 19
1620# asm 1: psrld $19,<r11=int6464#13
1621# asm 2: psrld $19,<r11=%xmm12
1622psrld $19,%xmm12
1623
1624# qhasm: z11 ^= r11
1625# asm 1: pxor <r11=int6464#13,<z11=int6464#7
1626# asm 2: pxor <r11=%xmm12,<z11=%xmm6
1627pxor %xmm12,%xmm6
1628
1629# qhasm: y10 = z2
1630# asm 1: movdqa <z2=int6464#11,>y10=int6464#1
1631# asm 2: movdqa <z2=%xmm10,>y10=%xmm0
1632movdqa %xmm10,%xmm0
1633
1634# qhasm: uint32323232 y10 += z6
1635# asm 1: paddd <z6=int6464#6,<y10=int6464#1
1636# asm 2: paddd <z6=%xmm5,<y10=%xmm0
1637paddd %xmm5,%xmm0
1638
1639# qhasm: r10 = y10
1640# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
1641# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
1642movdqa %xmm0,%xmm12
1643
1644# qhasm: uint32323232 y10 <<= 18
1645# asm 1: pslld $18,<y10=int6464#1
1646# asm 2: pslld $18,<y10=%xmm0
1647pslld $18,%xmm0
1648
1649# qhasm: z10 ^= y10
1650# asm 1: pxor <y10=int6464#1,<z10=int6464#2
1651# asm 2: pxor <y10=%xmm0,<z10=%xmm1
1652pxor %xmm0,%xmm1
1653
1654# qhasm: uint32323232 r10 >>= 14
1655# asm 1: psrld $14,<r10=int6464#13
1656# asm 2: psrld $14,<r10=%xmm12
1657psrld $14,%xmm12
1658
1659# qhasm: z10 ^= r10
1660# asm 1: pxor <r10=int6464#13,<z10=int6464#2
1661# asm 2: pxor <r10=%xmm12,<z10=%xmm1
1662pxor %xmm12,%xmm1
1663
1664# qhasm: z0 = z0_stack
1665# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#1
1666# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm0
1667movdqa 320(%rsp),%xmm0
1668
1669# qhasm: z10_stack = z10
1670# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1671# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1672movdqa %xmm1,320(%rsp)
1673
1674# qhasm: y1 = z3
1675# asm 1: movdqa <z3=int6464#5,>y1=int6464#2
1676# asm 2: movdqa <z3=%xmm4,>y1=%xmm1
1677movdqa %xmm4,%xmm1
1678
1679# qhasm: uint32323232 y1 += z0
1680# asm 1: paddd <z0=int6464#1,<y1=int6464#2
1681# asm 2: paddd <z0=%xmm0,<y1=%xmm1
1682paddd %xmm0,%xmm1
1683
1684# qhasm: r1 = y1
1685# asm 1: movdqa <y1=int6464#2,>r1=int6464#13
1686# asm 2: movdqa <y1=%xmm1,>r1=%xmm12
1687movdqa %xmm1,%xmm12
1688
1689# qhasm: uint32323232 y1 <<= 7
1690# asm 1: pslld $7,<y1=int6464#2
1691# asm 2: pslld $7,<y1=%xmm1
1692pslld $7,%xmm1
1693
1694# qhasm: z1 ^= y1
1695# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1696# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1697pxor %xmm1,%xmm7
1698
1699# qhasm: uint32323232 r1 >>= 25
1700# asm 1: psrld $25,<r1=int6464#13
1701# asm 2: psrld $25,<r1=%xmm12
1702psrld $25,%xmm12
1703
1704# qhasm: z1 ^= r1
1705# asm 1: pxor <r1=int6464#13,<z1=int6464#8
1706# asm 2: pxor <r1=%xmm12,<z1=%xmm7
1707pxor %xmm12,%xmm7
1708
1709# qhasm: y15 = z7
1710# asm 1: movdqa <z7=int6464#9,>y15=int6464#2
1711# asm 2: movdqa <z7=%xmm8,>y15=%xmm1
1712movdqa %xmm8,%xmm1
1713
1714# qhasm: uint32323232 y15 += z11
1715# asm 1: paddd <z11=int6464#7,<y15=int6464#2
1716# asm 2: paddd <z11=%xmm6,<y15=%xmm1
1717paddd %xmm6,%xmm1
1718
1719# qhasm: r15 = y15
1720# asm 1: movdqa <y15=int6464#2,>r15=int6464#13
1721# asm 2: movdqa <y15=%xmm1,>r15=%xmm12
1722movdqa %xmm1,%xmm12
1723
1724# qhasm: uint32323232 y15 <<= 18
1725# asm 1: pslld $18,<y15=int6464#2
1726# asm 2: pslld $18,<y15=%xmm1
1727pslld $18,%xmm1
1728
1729# qhasm: z15 ^= y15
1730# asm 1: pxor <y15=int6464#2,<z15=int6464#3
1731# asm 2: pxor <y15=%xmm1,<z15=%xmm2
1732pxor %xmm1,%xmm2
1733
1734# qhasm: uint32323232 r15 >>= 14
1735# asm 1: psrld $14,<r15=int6464#13
1736# asm 2: psrld $14,<r15=%xmm12
1737psrld $14,%xmm12
1738
1739# qhasm: z15 ^= r15
1740# asm 1: pxor <r15=int6464#13,<z15=int6464#3
1741# asm 2: pxor <r15=%xmm12,<z15=%xmm2
1742pxor %xmm12,%xmm2
1743
1744# qhasm: z5 = z5_stack
1745# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#13
1746# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm12
1747movdqa 336(%rsp),%xmm12
1748
1749# qhasm: z15_stack = z15
1750# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1751# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1752movdqa %xmm2,336(%rsp)
1753
1754# qhasm: y6 = z4
1755# asm 1: movdqa <z4=int6464#15,>y6=int6464#2
1756# asm 2: movdqa <z4=%xmm14,>y6=%xmm1
1757movdqa %xmm14,%xmm1
1758
1759# qhasm: uint32323232 y6 += z5
1760# asm 1: paddd <z5=int6464#13,<y6=int6464#2
1761# asm 2: paddd <z5=%xmm12,<y6=%xmm1
1762paddd %xmm12,%xmm1
1763
1764# qhasm: r6 = y6
1765# asm 1: movdqa <y6=int6464#2,>r6=int6464#3
1766# asm 2: movdqa <y6=%xmm1,>r6=%xmm2
1767movdqa %xmm1,%xmm2
1768
1769# qhasm: uint32323232 y6 <<= 7
1770# asm 1: pslld $7,<y6=int6464#2
1771# asm 2: pslld $7,<y6=%xmm1
1772pslld $7,%xmm1
1773
1774# qhasm: z6 ^= y6
1775# asm 1: pxor <y6=int6464#2,<z6=int6464#6
1776# asm 2: pxor <y6=%xmm1,<z6=%xmm5
1777pxor %xmm1,%xmm5
1778
1779# qhasm: uint32323232 r6 >>= 25
1780# asm 1: psrld $25,<r6=int6464#3
1781# asm 2: psrld $25,<r6=%xmm2
1782psrld $25,%xmm2
1783
1784# qhasm: z6 ^= r6
1785# asm 1: pxor <r6=int6464#3,<z6=int6464#6
1786# asm 2: pxor <r6=%xmm2,<z6=%xmm5
1787pxor %xmm2,%xmm5
1788
1789# qhasm: y2 = z0
1790# asm 1: movdqa <z0=int6464#1,>y2=int6464#2
1791# asm 2: movdqa <z0=%xmm0,>y2=%xmm1
1792movdqa %xmm0,%xmm1
1793
1794# qhasm: uint32323232 y2 += z1
1795# asm 1: paddd <z1=int6464#8,<y2=int6464#2
1796# asm 2: paddd <z1=%xmm7,<y2=%xmm1
1797paddd %xmm7,%xmm1
1798
1799# qhasm: r2 = y2
1800# asm 1: movdqa <y2=int6464#2,>r2=int6464#3
1801# asm 2: movdqa <y2=%xmm1,>r2=%xmm2
1802movdqa %xmm1,%xmm2
1803
1804# qhasm: uint32323232 y2 <<= 9
1805# asm 1: pslld $9,<y2=int6464#2
1806# asm 2: pslld $9,<y2=%xmm1
1807pslld $9,%xmm1
1808
1809# qhasm: z2 ^= y2
1810# asm 1: pxor <y2=int6464#2,<z2=int6464#11
1811# asm 2: pxor <y2=%xmm1,<z2=%xmm10
1812pxor %xmm1,%xmm10
1813
1814# qhasm: uint32323232 r2 >>= 23
1815# asm 1: psrld $23,<r2=int6464#3
1816# asm 2: psrld $23,<r2=%xmm2
1817psrld $23,%xmm2
1818
1819# qhasm: z2 ^= r2
1820# asm 1: pxor <r2=int6464#3,<z2=int6464#11
1821# asm 2: pxor <r2=%xmm2,<z2=%xmm10
1822pxor %xmm2,%xmm10
1823
1824# qhasm: y7 = z5
1825# asm 1: movdqa <z5=int6464#13,>y7=int6464#2
1826# asm 2: movdqa <z5=%xmm12,>y7=%xmm1
1827movdqa %xmm12,%xmm1
1828
1829# qhasm: uint32323232 y7 += z6
1830# asm 1: paddd <z6=int6464#6,<y7=int6464#2
1831# asm 2: paddd <z6=%xmm5,<y7=%xmm1
1832paddd %xmm5,%xmm1
1833
1834# qhasm: r7 = y7
1835# asm 1: movdqa <y7=int6464#2,>r7=int6464#3
1836# asm 2: movdqa <y7=%xmm1,>r7=%xmm2
1837movdqa %xmm1,%xmm2
1838
1839# qhasm: uint32323232 y7 <<= 9
1840# asm 1: pslld $9,<y7=int6464#2
1841# asm 2: pslld $9,<y7=%xmm1
1842pslld $9,%xmm1
1843
1844# qhasm: z7 ^= y7
1845# asm 1: pxor <y7=int6464#2,<z7=int6464#9
1846# asm 2: pxor <y7=%xmm1,<z7=%xmm8
1847pxor %xmm1,%xmm8
1848
1849# qhasm: uint32323232 r7 >>= 23
1850# asm 1: psrld $23,<r7=int6464#3
1851# asm 2: psrld $23,<r7=%xmm2
1852psrld $23,%xmm2
1853
1854# qhasm: z7 ^= r7
1855# asm 1: pxor <r7=int6464#3,<z7=int6464#9
1856# asm 2: pxor <r7=%xmm2,<z7=%xmm8
1857pxor %xmm2,%xmm8
1858
1859# qhasm: y3 = z1
1860# asm 1: movdqa <z1=int6464#8,>y3=int6464#2
1861# asm 2: movdqa <z1=%xmm7,>y3=%xmm1
1862movdqa %xmm7,%xmm1
1863
1864# qhasm: uint32323232 y3 += z2
1865# asm 1: paddd <z2=int6464#11,<y3=int6464#2
1866# asm 2: paddd <z2=%xmm10,<y3=%xmm1
1867paddd %xmm10,%xmm1
1868
1869# qhasm: r3 = y3
1870# asm 1: movdqa <y3=int6464#2,>r3=int6464#3
1871# asm 2: movdqa <y3=%xmm1,>r3=%xmm2
1872movdqa %xmm1,%xmm2
1873
1874# qhasm: uint32323232 y3 <<= 13
1875# asm 1: pslld $13,<y3=int6464#2
1876# asm 2: pslld $13,<y3=%xmm1
1877pslld $13,%xmm1
1878
1879# qhasm: z3 ^= y3
1880# asm 1: pxor <y3=int6464#2,<z3=int6464#5
1881# asm 2: pxor <y3=%xmm1,<z3=%xmm4
1882pxor %xmm1,%xmm4
1883
1884# qhasm: uint32323232 r3 >>= 19
1885# asm 1: psrld $19,<r3=int6464#3
1886# asm 2: psrld $19,<r3=%xmm2
1887psrld $19,%xmm2
1888
1889# qhasm: z3 ^= r3
1890# asm 1: pxor <r3=int6464#3,<z3=int6464#5
1891# asm 2: pxor <r3=%xmm2,<z3=%xmm4
1892pxor %xmm2,%xmm4
1893
1894# qhasm: y4 = z6
1895# asm 1: movdqa <z6=int6464#6,>y4=int6464#2
1896# asm 2: movdqa <z6=%xmm5,>y4=%xmm1
1897movdqa %xmm5,%xmm1
1898
1899# qhasm: uint32323232 y4 += z7
1900# asm 1: paddd <z7=int6464#9,<y4=int6464#2
1901# asm 2: paddd <z7=%xmm8,<y4=%xmm1
1902paddd %xmm8,%xmm1
1903
1904# qhasm: r4 = y4
1905# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1906# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1907movdqa %xmm1,%xmm2
1908
1909# qhasm: uint32323232 y4 <<= 13
1910# asm 1: pslld $13,<y4=int6464#2
1911# asm 2: pslld $13,<y4=%xmm1
1912pslld $13,%xmm1
1913
1914# qhasm: z4 ^= y4
1915# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1916# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1917pxor %xmm1,%xmm14
1918
1919# qhasm: uint32323232 r4 >>= 19
1920# asm 1: psrld $19,<r4=int6464#3
1921# asm 2: psrld $19,<r4=%xmm2
1922psrld $19,%xmm2
1923
1924# qhasm: z4 ^= r4
1925# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1926# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1927pxor %xmm2,%xmm14
1928
1929# qhasm: y0 = z2
1930# asm 1: movdqa <z2=int6464#11,>y0=int6464#2
1931# asm 2: movdqa <z2=%xmm10,>y0=%xmm1
1932movdqa %xmm10,%xmm1
1933
1934# qhasm: uint32323232 y0 += z3
1935# asm 1: paddd <z3=int6464#5,<y0=int6464#2
1936# asm 2: paddd <z3=%xmm4,<y0=%xmm1
1937paddd %xmm4,%xmm1
1938
1939# qhasm: r0 = y0
1940# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1941# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1942movdqa %xmm1,%xmm2
1943
1944# qhasm: uint32323232 y0 <<= 18
1945# asm 1: pslld $18,<y0=int6464#2
1946# asm 2: pslld $18,<y0=%xmm1
1947pslld $18,%xmm1
1948
1949# qhasm: z0 ^= y0
1950# asm 1: pxor <y0=int6464#2,<z0=int6464#1
1951# asm 2: pxor <y0=%xmm1,<z0=%xmm0
1952pxor %xmm1,%xmm0
1953
1954# qhasm: uint32323232 r0 >>= 14
1955# asm 1: psrld $14,<r0=int6464#3
1956# asm 2: psrld $14,<r0=%xmm2
1957psrld $14,%xmm2
1958
1959# qhasm: z0 ^= r0
1960# asm 1: pxor <r0=int6464#3,<z0=int6464#1
1961# asm 2: pxor <r0=%xmm2,<z0=%xmm0
1962pxor %xmm2,%xmm0
1963
1964# qhasm: z10 = z10_stack
1965# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1966# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1967movdqa 320(%rsp),%xmm1
1968
1969# qhasm: z0_stack = z0
1970# asm 1: movdqa <z0=int6464#1,>z0_stack=stack128#21
1971# asm 2: movdqa <z0=%xmm0,>z0_stack=320(%rsp)
1972movdqa %xmm0,320(%rsp)
1973
1974# qhasm: y5 = z7
1975# asm 1: movdqa <z7=int6464#9,>y5=int6464#1
1976# asm 2: movdqa <z7=%xmm8,>y5=%xmm0
1977movdqa %xmm8,%xmm0
1978
1979# qhasm: uint32323232 y5 += z4
1980# asm 1: paddd <z4=int6464#15,<y5=int6464#1
1981# asm 2: paddd <z4=%xmm14,<y5=%xmm0
1982paddd %xmm14,%xmm0
1983
1984# qhasm: r5 = y5
1985# asm 1: movdqa <y5=int6464#1,>r5=int6464#3
1986# asm 2: movdqa <y5=%xmm0,>r5=%xmm2
1987movdqa %xmm0,%xmm2
1988
1989# qhasm: uint32323232 y5 <<= 18
1990# asm 1: pslld $18,<y5=int6464#1
1991# asm 2: pslld $18,<y5=%xmm0
1992pslld $18,%xmm0
1993
1994# qhasm: z5 ^= y5
1995# asm 1: pxor <y5=int6464#1,<z5=int6464#13
1996# asm 2: pxor <y5=%xmm0,<z5=%xmm12
1997pxor %xmm0,%xmm12
1998
1999# qhasm: uint32323232 r5 >>= 14
2000# asm 1: psrld $14,<r5=int6464#3
2001# asm 2: psrld $14,<r5=%xmm2
2002psrld $14,%xmm2
2003
2004# qhasm: z5 ^= r5
2005# asm 1: pxor <r5=int6464#3,<z5=int6464#13
2006# asm 2: pxor <r5=%xmm2,<z5=%xmm12
2007pxor %xmm2,%xmm12
2008
2009# qhasm: y11 = z9
2010# asm 1: movdqa <z9=int6464#12,>y11=int6464#1
2011# asm 2: movdqa <z9=%xmm11,>y11=%xmm0
2012movdqa %xmm11,%xmm0
2013
2014# qhasm: uint32323232 y11 += z10
2015# asm 1: paddd <z10=int6464#2,<y11=int6464#1
2016# asm 2: paddd <z10=%xmm1,<y11=%xmm0
2017paddd %xmm1,%xmm0
2018
2019# qhasm: r11 = y11
2020# asm 1: movdqa <y11=int6464#1,>r11=int6464#3
2021# asm 2: movdqa <y11=%xmm0,>r11=%xmm2
2022movdqa %xmm0,%xmm2
2023
2024# qhasm: uint32323232 y11 <<= 7
2025# asm 1: pslld $7,<y11=int6464#1
2026# asm 2: pslld $7,<y11=%xmm0
2027pslld $7,%xmm0
2028
2029# qhasm: z11 ^= y11
2030# asm 1: pxor <y11=int6464#1,<z11=int6464#7
2031# asm 2: pxor <y11=%xmm0,<z11=%xmm6
2032pxor %xmm0,%xmm6
2033
2034# qhasm: uint32323232 r11 >>= 25
2035# asm 1: psrld $25,<r11=int6464#3
2036# asm 2: psrld $25,<r11=%xmm2
2037psrld $25,%xmm2
2038
2039# qhasm: z11 ^= r11
2040# asm 1: pxor <r11=int6464#3,<z11=int6464#7
2041# asm 2: pxor <r11=%xmm2,<z11=%xmm6
2042pxor %xmm2,%xmm6
2043
2044# qhasm: z15 = z15_stack
2045# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
2046# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
2047movdqa 336(%rsp),%xmm2
2048
2049# qhasm: z5_stack = z5
2050# asm 1: movdqa <z5=int6464#13,>z5_stack=stack128#22
2051# asm 2: movdqa <z5=%xmm12,>z5_stack=336(%rsp)
2052movdqa %xmm12,336(%rsp)
2053
2054# qhasm: y12 = z14
2055# asm 1: movdqa <z14=int6464#4,>y12=int6464#1
2056# asm 2: movdqa <z14=%xmm3,>y12=%xmm0
2057movdqa %xmm3,%xmm0
2058
2059# qhasm: uint32323232 y12 += z15
2060# asm 1: paddd <z15=int6464#3,<y12=int6464#1
2061# asm 2: paddd <z15=%xmm2,<y12=%xmm0
2062paddd %xmm2,%xmm0
2063
2064# qhasm: r12 = y12
2065# asm 1: movdqa <y12=int6464#1,>r12=int6464#13
2066# asm 2: movdqa <y12=%xmm0,>r12=%xmm12
2067movdqa %xmm0,%xmm12
2068
2069# qhasm: uint32323232 y12 <<= 7
2070# asm 1: pslld $7,<y12=int6464#1
2071# asm 2: pslld $7,<y12=%xmm0
2072pslld $7,%xmm0
2073
2074# qhasm: z12 ^= y12
2075# asm 1: pxor <y12=int6464#1,<z12=int6464#14
2076# asm 2: pxor <y12=%xmm0,<z12=%xmm13
2077pxor %xmm0,%xmm13
2078
2079# qhasm: uint32323232 r12 >>= 25
2080# asm 1: psrld $25,<r12=int6464#13
2081# asm 2: psrld $25,<r12=%xmm12
2082psrld $25,%xmm12
2083
2084# qhasm: z12 ^= r12
2085# asm 1: pxor <r12=int6464#13,<z12=int6464#14
2086# asm 2: pxor <r12=%xmm12,<z12=%xmm13
2087pxor %xmm12,%xmm13
2088
2089# qhasm: y8 = z10
2090# asm 1: movdqa <z10=int6464#2,>y8=int6464#1
2091# asm 2: movdqa <z10=%xmm1,>y8=%xmm0
2092movdqa %xmm1,%xmm0
2093
2094# qhasm: uint32323232 y8 += z11
2095# asm 1: paddd <z11=int6464#7,<y8=int6464#1
2096# asm 2: paddd <z11=%xmm6,<y8=%xmm0
2097paddd %xmm6,%xmm0
2098
2099# qhasm: r8 = y8
2100# asm 1: movdqa <y8=int6464#1,>r8=int6464#13
2101# asm 2: movdqa <y8=%xmm0,>r8=%xmm12
2102movdqa %xmm0,%xmm12
2103
2104# qhasm: uint32323232 y8 <<= 9
2105# asm 1: pslld $9,<y8=int6464#1
2106# asm 2: pslld $9,<y8=%xmm0
2107pslld $9,%xmm0
2108
2109# qhasm: z8 ^= y8
2110# asm 1: pxor <y8=int6464#1,<z8=int6464#16
2111# asm 2: pxor <y8=%xmm0,<z8=%xmm15
2112pxor %xmm0,%xmm15
2113
2114# qhasm: uint32323232 r8 >>= 23
2115# asm 1: psrld $23,<r8=int6464#13
2116# asm 2: psrld $23,<r8=%xmm12
2117psrld $23,%xmm12
2118
2119# qhasm: z8 ^= r8
2120# asm 1: pxor <r8=int6464#13,<z8=int6464#16
2121# asm 2: pxor <r8=%xmm12,<z8=%xmm15
2122pxor %xmm12,%xmm15
2123
2124# qhasm: y13 = z15
2125# asm 1: movdqa <z15=int6464#3,>y13=int6464#1
2126# asm 2: movdqa <z15=%xmm2,>y13=%xmm0
2127movdqa %xmm2,%xmm0
2128
2129# qhasm: uint32323232 y13 += z12
2130# asm 1: paddd <z12=int6464#14,<y13=int6464#1
2131# asm 2: paddd <z12=%xmm13,<y13=%xmm0
2132paddd %xmm13,%xmm0
2133
2134# qhasm: r13 = y13
2135# asm 1: movdqa <y13=int6464#1,>r13=int6464#13
2136# asm 2: movdqa <y13=%xmm0,>r13=%xmm12
2137movdqa %xmm0,%xmm12
2138
2139# qhasm: uint32323232 y13 <<= 9
2140# asm 1: pslld $9,<y13=int6464#1
2141# asm 2: pslld $9,<y13=%xmm0
2142pslld $9,%xmm0
2143
2144# qhasm: z13 ^= y13
2145# asm 1: pxor <y13=int6464#1,<z13=int6464#10
2146# asm 2: pxor <y13=%xmm0,<z13=%xmm9
2147pxor %xmm0,%xmm9
2148
2149# qhasm: uint32323232 r13 >>= 23
2150# asm 1: psrld $23,<r13=int6464#13
2151# asm 2: psrld $23,<r13=%xmm12
2152psrld $23,%xmm12
2153
2154# qhasm: z13 ^= r13
2155# asm 1: pxor <r13=int6464#13,<z13=int6464#10
2156# asm 2: pxor <r13=%xmm12,<z13=%xmm9
2157pxor %xmm12,%xmm9
2158
2159# qhasm: y9 = z11
2160# asm 1: movdqa <z11=int6464#7,>y9=int6464#1
2161# asm 2: movdqa <z11=%xmm6,>y9=%xmm0
2162movdqa %xmm6,%xmm0
2163
2164# qhasm: uint32323232 y9 += z8
2165# asm 1: paddd <z8=int6464#16,<y9=int6464#1
2166# asm 2: paddd <z8=%xmm15,<y9=%xmm0
2167paddd %xmm15,%xmm0
2168
2169# qhasm: r9 = y9
2170# asm 1: movdqa <y9=int6464#1,>r9=int6464#13
2171# asm 2: movdqa <y9=%xmm0,>r9=%xmm12
2172movdqa %xmm0,%xmm12
2173
2174# qhasm: uint32323232 y9 <<= 13
2175# asm 1: pslld $13,<y9=int6464#1
2176# asm 2: pslld $13,<y9=%xmm0
2177pslld $13,%xmm0
2178
2179# qhasm: z9 ^= y9
2180# asm 1: pxor <y9=int6464#1,<z9=int6464#12
2181# asm 2: pxor <y9=%xmm0,<z9=%xmm11
2182pxor %xmm0,%xmm11
2183
2184# qhasm: uint32323232 r9 >>= 19
2185# asm 1: psrld $19,<r9=int6464#13
2186# asm 2: psrld $19,<r9=%xmm12
2187psrld $19,%xmm12
2188
2189# qhasm: z9 ^= r9
2190# asm 1: pxor <r9=int6464#13,<z9=int6464#12
2191# asm 2: pxor <r9=%xmm12,<z9=%xmm11
2192pxor %xmm12,%xmm11
2193
2194# qhasm: y14 = z12
2195# asm 1: movdqa <z12=int6464#14,>y14=int6464#1
2196# asm 2: movdqa <z12=%xmm13,>y14=%xmm0
2197movdqa %xmm13,%xmm0
2198
2199# qhasm: uint32323232 y14 += z13
2200# asm 1: paddd <z13=int6464#10,<y14=int6464#1
2201# asm 2: paddd <z13=%xmm9,<y14=%xmm0
2202paddd %xmm9,%xmm0
2203
2204# qhasm: r14 = y14
2205# asm 1: movdqa <y14=int6464#1,>r14=int6464#13
2206# asm 2: movdqa <y14=%xmm0,>r14=%xmm12
2207movdqa %xmm0,%xmm12
2208
2209# qhasm: uint32323232 y14 <<= 13
2210# asm 1: pslld $13,<y14=int6464#1
2211# asm 2: pslld $13,<y14=%xmm0
2212pslld $13,%xmm0
2213
2214# qhasm: z14 ^= y14
2215# asm 1: pxor <y14=int6464#1,<z14=int6464#4
2216# asm 2: pxor <y14=%xmm0,<z14=%xmm3
2217pxor %xmm0,%xmm3
2218
2219# qhasm: uint32323232 r14 >>= 19
2220# asm 1: psrld $19,<r14=int6464#13
2221# asm 2: psrld $19,<r14=%xmm12
2222psrld $19,%xmm12
2223
2224# qhasm: z14 ^= r14
2225# asm 1: pxor <r14=int6464#13,<z14=int6464#4
2226# asm 2: pxor <r14=%xmm12,<z14=%xmm3
2227pxor %xmm12,%xmm3
2228
2229# qhasm: y10 = z8
2230# asm 1: movdqa <z8=int6464#16,>y10=int6464#1
2231# asm 2: movdqa <z8=%xmm15,>y10=%xmm0
2232movdqa %xmm15,%xmm0
2233
2234# qhasm: uint32323232 y10 += z9
2235# asm 1: paddd <z9=int6464#12,<y10=int6464#1
2236# asm 2: paddd <z9=%xmm11,<y10=%xmm0
2237paddd %xmm11,%xmm0
2238
2239# qhasm: r10 = y10
2240# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
2241# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
2242movdqa %xmm0,%xmm12
2243
2244# qhasm: uint32323232 y10 <<= 18
2245# asm 1: pslld $18,<y10=int6464#1
2246# asm 2: pslld $18,<y10=%xmm0
2247pslld $18,%xmm0
2248
2249# qhasm: z10 ^= y10
2250# asm 1: pxor <y10=int6464#1,<z10=int6464#2
2251# asm 2: pxor <y10=%xmm0,<z10=%xmm1
2252pxor %xmm0,%xmm1
2253
2254# qhasm: uint32323232 r10 >>= 14
2255# asm 1: psrld $14,<r10=int6464#13
2256# asm 2: psrld $14,<r10=%xmm12
2257psrld $14,%xmm12
2258
2259# qhasm: z10 ^= r10
2260# asm 1: pxor <r10=int6464#13,<z10=int6464#2
2261# asm 2: pxor <r10=%xmm12,<z10=%xmm1
2262pxor %xmm12,%xmm1
2263
2264# qhasm: y15 = z13
2265# asm 1: movdqa <z13=int6464#10,>y15=int6464#1
2266# asm 2: movdqa <z13=%xmm9,>y15=%xmm0
2267movdqa %xmm9,%xmm0
2268
2269# qhasm: uint32323232 y15 += z14
2270# asm 1: paddd <z14=int6464#4,<y15=int6464#1
2271# asm 2: paddd <z14=%xmm3,<y15=%xmm0
2272paddd %xmm3,%xmm0
2273
2274# qhasm: r15 = y15
2275# asm 1: movdqa <y15=int6464#1,>r15=int6464#13
2276# asm 2: movdqa <y15=%xmm0,>r15=%xmm12
2277movdqa %xmm0,%xmm12
2278
2279# qhasm: uint32323232 y15 <<= 18
2280# asm 1: pslld $18,<y15=int6464#1
2281# asm 2: pslld $18,<y15=%xmm0
2282pslld $18,%xmm0
2283
2284# qhasm: z15 ^= y15
2285# asm 1: pxor <y15=int6464#1,<z15=int6464#3
2286# asm 2: pxor <y15=%xmm0,<z15=%xmm2
2287pxor %xmm0,%xmm2
2288
2289# qhasm: uint32323232 r15 >>= 14
2290# asm 1: psrld $14,<r15=int6464#13
2291# asm 2: psrld $14,<r15=%xmm12
2292psrld $14,%xmm12
2293
2294# qhasm: z15 ^= r15
2295# asm 1: pxor <r15=int6464#13,<z15=int6464#3
2296# asm 2: pxor <r15=%xmm12,<z15=%xmm2
2297pxor %xmm12,%xmm2
2298
2299# qhasm: z0 = z0_stack
2300# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#13
2301# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm12
2302movdqa 320(%rsp),%xmm12
2303
2304# qhasm: z5 = z5_stack
2305# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#1
2306# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm0
2307movdqa 336(%rsp),%xmm0
2308
2309# qhasm: unsigned>? i -= 2
2310# asm 1: sub $2,<i=int64#3
2311# asm 2: sub $2,<i=%rdx
2312sub $2,%rdx
2313# comment:fp stack unchanged by jump
2314
2315# qhasm: goto mainloop1 if unsigned>
2316ja ._mainloop1
2317
2318# qhasm: uint32323232 z0 += orig0
2319# asm 1: paddd <orig0=stack128#8,<z0=int6464#13
2320# asm 2: paddd <orig0=112(%rsp),<z0=%xmm12
2321paddd 112(%rsp),%xmm12
2322
2323# qhasm: uint32323232 z1 += orig1
2324# asm 1: paddd <orig1=stack128#12,<z1=int6464#8
2325# asm 2: paddd <orig1=176(%rsp),<z1=%xmm7
2326paddd 176(%rsp),%xmm7
2327
2328# qhasm: uint32323232 z2 += orig2
2329# asm 1: paddd <orig2=stack128#15,<z2=int6464#11
2330# asm 2: paddd <orig2=224(%rsp),<z2=%xmm10
2331paddd 224(%rsp),%xmm10
2332
2333# qhasm: uint32323232 z3 += orig3
2334# asm 1: paddd <orig3=stack128#18,<z3=int6464#5
2335# asm 2: paddd <orig3=272(%rsp),<z3=%xmm4
2336paddd 272(%rsp),%xmm4
2337
2338# qhasm: in0 = z0
2339# asm 1: movd <z0=int6464#13,>in0=int64#3
2340# asm 2: movd <z0=%xmm12,>in0=%rdx
2341movd %xmm12,%rdx
2342
2343# qhasm: in1 = z1
2344# asm 1: movd <z1=int6464#8,>in1=int64#4
2345# asm 2: movd <z1=%xmm7,>in1=%rcx
2346movd %xmm7,%rcx
2347
2348# qhasm: in2 = z2
2349# asm 1: movd <z2=int6464#11,>in2=int64#5
2350# asm 2: movd <z2=%xmm10,>in2=%r8
2351movd %xmm10,%r8
2352
2353# qhasm: in3 = z3
2354# asm 1: movd <z3=int6464#5,>in3=int64#6
2355# asm 2: movd <z3=%xmm4,>in3=%r9
2356movd %xmm4,%r9
2357
2358# qhasm: z0 <<<= 96
2359# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2360# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2361pshufd $0x39,%xmm12,%xmm12
2362
2363# qhasm: z1 <<<= 96
2364# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2365# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2366pshufd $0x39,%xmm7,%xmm7
2367
2368# qhasm: z2 <<<= 96
2369# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2370# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2371pshufd $0x39,%xmm10,%xmm10
2372
2373# qhasm: z3 <<<= 96
2374# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2375# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2376pshufd $0x39,%xmm4,%xmm4
2377
2378# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
2379# asm 1: xorl 0(<m=int64#2),<in0=int64#3d
2380# asm 2: xorl 0(<m=%rsi),<in0=%edx
2381xorl 0(%rsi),%edx
2382
2383# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
2384# asm 1: xorl 4(<m=int64#2),<in1=int64#4d
2385# asm 2: xorl 4(<m=%rsi),<in1=%ecx
2386xorl 4(%rsi),%ecx
2387
2388# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
2389# asm 1: xorl 8(<m=int64#2),<in2=int64#5d
2390# asm 2: xorl 8(<m=%rsi),<in2=%r8d
2391xorl 8(%rsi),%r8d
2392
2393# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
2394# asm 1: xorl 12(<m=int64#2),<in3=int64#6d
2395# asm 2: xorl 12(<m=%rsi),<in3=%r9d
2396xorl 12(%rsi),%r9d
2397
2398# qhasm: *(uint32 *) (out + 0) = in0
2399# asm 1: movl <in0=int64#3d,0(<out=int64#1)
2400# asm 2: movl <in0=%edx,0(<out=%rdi)
2401movl %edx,0(%rdi)
2402
2403# qhasm: *(uint32 *) (out + 4) = in1
2404# asm 1: movl <in1=int64#4d,4(<out=int64#1)
2405# asm 2: movl <in1=%ecx,4(<out=%rdi)
2406movl %ecx,4(%rdi)
2407
2408# qhasm: *(uint32 *) (out + 8) = in2
2409# asm 1: movl <in2=int64#5d,8(<out=int64#1)
2410# asm 2: movl <in2=%r8d,8(<out=%rdi)
2411movl %r8d,8(%rdi)
2412
2413# qhasm: *(uint32 *) (out + 12) = in3
2414# asm 1: movl <in3=int64#6d,12(<out=int64#1)
2415# asm 2: movl <in3=%r9d,12(<out=%rdi)
2416movl %r9d,12(%rdi)
2417
2418# qhasm: in0 = z0
2419# asm 1: movd <z0=int6464#13,>in0=int64#3
2420# asm 2: movd <z0=%xmm12,>in0=%rdx
2421movd %xmm12,%rdx
2422
2423# qhasm: in1 = z1
2424# asm 1: movd <z1=int6464#8,>in1=int64#4
2425# asm 2: movd <z1=%xmm7,>in1=%rcx
2426movd %xmm7,%rcx
2427
2428# qhasm: in2 = z2
2429# asm 1: movd <z2=int6464#11,>in2=int64#5
2430# asm 2: movd <z2=%xmm10,>in2=%r8
2431movd %xmm10,%r8
2432
2433# qhasm: in3 = z3
2434# asm 1: movd <z3=int6464#5,>in3=int64#6
2435# asm 2: movd <z3=%xmm4,>in3=%r9
2436movd %xmm4,%r9
2437
2438# qhasm: z0 <<<= 96
2439# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2440# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2441pshufd $0x39,%xmm12,%xmm12
2442
2443# qhasm: z1 <<<= 96
2444# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2445# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2446pshufd $0x39,%xmm7,%xmm7
2447
2448# qhasm: z2 <<<= 96
2449# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2450# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2451pshufd $0x39,%xmm10,%xmm10
2452
2453# qhasm: z3 <<<= 96
2454# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2455# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2456pshufd $0x39,%xmm4,%xmm4
2457
2458# qhasm: (uint32) in0 ^= *(uint32 *) (m + 64)
2459# asm 1: xorl 64(<m=int64#2),<in0=int64#3d
2460# asm 2: xorl 64(<m=%rsi),<in0=%edx
2461xorl 64(%rsi),%edx
2462
2463# qhasm: (uint32) in1 ^= *(uint32 *) (m + 68)
2464# asm 1: xorl 68(<m=int64#2),<in1=int64#4d
2465# asm 2: xorl 68(<m=%rsi),<in1=%ecx
2466xorl 68(%rsi),%ecx
2467
2468# qhasm: (uint32) in2 ^= *(uint32 *) (m + 72)
2469# asm 1: xorl 72(<m=int64#2),<in2=int64#5d
2470# asm 2: xorl 72(<m=%rsi),<in2=%r8d
2471xorl 72(%rsi),%r8d
2472
2473# qhasm: (uint32) in3 ^= *(uint32 *) (m + 76)
2474# asm 1: xorl 76(<m=int64#2),<in3=int64#6d
2475# asm 2: xorl 76(<m=%rsi),<in3=%r9d
2476xorl 76(%rsi),%r9d
2477
2478# qhasm: *(uint32 *) (out + 64) = in0
2479# asm 1: movl <in0=int64#3d,64(<out=int64#1)
2480# asm 2: movl <in0=%edx,64(<out=%rdi)
2481movl %edx,64(%rdi)
2482
2483# qhasm: *(uint32 *) (out + 68) = in1
2484# asm 1: movl <in1=int64#4d,68(<out=int64#1)
2485# asm 2: movl <in1=%ecx,68(<out=%rdi)
2486movl %ecx,68(%rdi)
2487
2488# qhasm: *(uint32 *) (out + 72) = in2
2489# asm 1: movl <in2=int64#5d,72(<out=int64#1)
2490# asm 2: movl <in2=%r8d,72(<out=%rdi)
2491movl %r8d,72(%rdi)
2492
2493# qhasm: *(uint32 *) (out + 76) = in3
2494# asm 1: movl <in3=int64#6d,76(<out=int64#1)
2495# asm 2: movl <in3=%r9d,76(<out=%rdi)
2496movl %r9d,76(%rdi)
2497
2498# qhasm: in0 = z0
2499# asm 1: movd <z0=int6464#13,>in0=int64#3
2500# asm 2: movd <z0=%xmm12,>in0=%rdx
2501movd %xmm12,%rdx
2502
2503# qhasm: in1 = z1
2504# asm 1: movd <z1=int6464#8,>in1=int64#4
2505# asm 2: movd <z1=%xmm7,>in1=%rcx
2506movd %xmm7,%rcx
2507
2508# qhasm: in2 = z2
2509# asm 1: movd <z2=int6464#11,>in2=int64#5
2510# asm 2: movd <z2=%xmm10,>in2=%r8
2511movd %xmm10,%r8
2512
2513# qhasm: in3 = z3
2514# asm 1: movd <z3=int6464#5,>in3=int64#6
2515# asm 2: movd <z3=%xmm4,>in3=%r9
2516movd %xmm4,%r9
2517
2518# qhasm: z0 <<<= 96
2519# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2520# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2521pshufd $0x39,%xmm12,%xmm12
2522
2523# qhasm: z1 <<<= 96
2524# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2525# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2526pshufd $0x39,%xmm7,%xmm7
2527
2528# qhasm: z2 <<<= 96
2529# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2530# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2531pshufd $0x39,%xmm10,%xmm10
2532
2533# qhasm: z3 <<<= 96
2534# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2535# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2536pshufd $0x39,%xmm4,%xmm4
2537
2538# qhasm: (uint32) in0 ^= *(uint32 *) (m + 128)
2539# asm 1: xorl 128(<m=int64#2),<in0=int64#3d
2540# asm 2: xorl 128(<m=%rsi),<in0=%edx
2541xorl 128(%rsi),%edx
2542
2543# qhasm: (uint32) in1 ^= *(uint32 *) (m + 132)
2544# asm 1: xorl 132(<m=int64#2),<in1=int64#4d
2545# asm 2: xorl 132(<m=%rsi),<in1=%ecx
2546xorl 132(%rsi),%ecx
2547
2548# qhasm: (uint32) in2 ^= *(uint32 *) (m + 136)
2549# asm 1: xorl 136(<m=int64#2),<in2=int64#5d
2550# asm 2: xorl 136(<m=%rsi),<in2=%r8d
2551xorl 136(%rsi),%r8d
2552
2553# qhasm: (uint32) in3 ^= *(uint32 *) (m + 140)
2554# asm 1: xorl 140(<m=int64#2),<in3=int64#6d
2555# asm 2: xorl 140(<m=%rsi),<in3=%r9d
2556xorl 140(%rsi),%r9d
2557
2558# qhasm: *(uint32 *) (out + 128) = in0
2559# asm 1: movl <in0=int64#3d,128(<out=int64#1)
2560# asm 2: movl <in0=%edx,128(<out=%rdi)
2561movl %edx,128(%rdi)
2562
2563# qhasm: *(uint32 *) (out + 132) = in1
2564# asm 1: movl <in1=int64#4d,132(<out=int64#1)
2565# asm 2: movl <in1=%ecx,132(<out=%rdi)
2566movl %ecx,132(%rdi)
2567
2568# qhasm: *(uint32 *) (out + 136) = in2
2569# asm 1: movl <in2=int64#5d,136(<out=int64#1)
2570# asm 2: movl <in2=%r8d,136(<out=%rdi)
2571movl %r8d,136(%rdi)
2572
2573# qhasm: *(uint32 *) (out + 140) = in3
2574# asm 1: movl <in3=int64#6d,140(<out=int64#1)
2575# asm 2: movl <in3=%r9d,140(<out=%rdi)
2576movl %r9d,140(%rdi)
2577
2578# qhasm: in0 = z0
2579# asm 1: movd <z0=int6464#13,>in0=int64#3
2580# asm 2: movd <z0=%xmm12,>in0=%rdx
2581movd %xmm12,%rdx
2582
2583# qhasm: in1 = z1
2584# asm 1: movd <z1=int6464#8,>in1=int64#4
2585# asm 2: movd <z1=%xmm7,>in1=%rcx
2586movd %xmm7,%rcx
2587
2588# qhasm: in2 = z2
2589# asm 1: movd <z2=int6464#11,>in2=int64#5
2590# asm 2: movd <z2=%xmm10,>in2=%r8
2591movd %xmm10,%r8
2592
2593# qhasm: in3 = z3
2594# asm 1: movd <z3=int6464#5,>in3=int64#6
2595# asm 2: movd <z3=%xmm4,>in3=%r9
2596movd %xmm4,%r9
2597
2598# qhasm: (uint32) in0 ^= *(uint32 *) (m + 192)
2599# asm 1: xorl 192(<m=int64#2),<in0=int64#3d
2600# asm 2: xorl 192(<m=%rsi),<in0=%edx
2601xorl 192(%rsi),%edx
2602
2603# qhasm: (uint32) in1 ^= *(uint32 *) (m + 196)
2604# asm 1: xorl 196(<m=int64#2),<in1=int64#4d
2605# asm 2: xorl 196(<m=%rsi),<in1=%ecx
2606xorl 196(%rsi),%ecx
2607
2608# qhasm: (uint32) in2 ^= *(uint32 *) (m + 200)
2609# asm 1: xorl 200(<m=int64#2),<in2=int64#5d
2610# asm 2: xorl 200(<m=%rsi),<in2=%r8d
2611xorl 200(%rsi),%r8d
2612
2613# qhasm: (uint32) in3 ^= *(uint32 *) (m + 204)
2614# asm 1: xorl 204(<m=int64#2),<in3=int64#6d
2615# asm 2: xorl 204(<m=%rsi),<in3=%r9d
2616xorl 204(%rsi),%r9d
2617
2618# qhasm: *(uint32 *) (out + 192) = in0
2619# asm 1: movl <in0=int64#3d,192(<out=int64#1)
2620# asm 2: movl <in0=%edx,192(<out=%rdi)
2621movl %edx,192(%rdi)
2622
2623# qhasm: *(uint32 *) (out + 196) = in1
2624# asm 1: movl <in1=int64#4d,196(<out=int64#1)
2625# asm 2: movl <in1=%ecx,196(<out=%rdi)
2626movl %ecx,196(%rdi)
2627
2628# qhasm: *(uint32 *) (out + 200) = in2
2629# asm 1: movl <in2=int64#5d,200(<out=int64#1)
2630# asm 2: movl <in2=%r8d,200(<out=%rdi)
2631movl %r8d,200(%rdi)
2632
2633# qhasm: *(uint32 *) (out + 204) = in3
2634# asm 1: movl <in3=int64#6d,204(<out=int64#1)
2635# asm 2: movl <in3=%r9d,204(<out=%rdi)
2636movl %r9d,204(%rdi)
2637
2638# qhasm: uint32323232 z4 += orig4
2639# asm 1: paddd <orig4=stack128#16,<z4=int6464#15
2640# asm 2: paddd <orig4=240(%rsp),<z4=%xmm14
2641paddd 240(%rsp),%xmm14
2642
2643# qhasm: uint32323232 z5 += orig5
2644# asm 1: paddd <orig5=stack128#5,<z5=int6464#1
2645# asm 2: paddd <orig5=64(%rsp),<z5=%xmm0
2646paddd 64(%rsp),%xmm0
2647
2648# qhasm: uint32323232 z6 += orig6
2649# asm 1: paddd <orig6=stack128#9,<z6=int6464#6
2650# asm 2: paddd <orig6=128(%rsp),<z6=%xmm5
2651paddd 128(%rsp),%xmm5
2652
2653# qhasm: uint32323232 z7 += orig7
2654# asm 1: paddd <orig7=stack128#13,<z7=int6464#9
2655# asm 2: paddd <orig7=192(%rsp),<z7=%xmm8
2656paddd 192(%rsp),%xmm8
2657
2658# qhasm: in4 = z4
2659# asm 1: movd <z4=int6464#15,>in4=int64#3
2660# asm 2: movd <z4=%xmm14,>in4=%rdx
2661movd %xmm14,%rdx
2662
2663# qhasm: in5 = z5
2664# asm 1: movd <z5=int6464#1,>in5=int64#4
2665# asm 2: movd <z5=%xmm0,>in5=%rcx
2666movd %xmm0,%rcx
2667
2668# qhasm: in6 = z6
2669# asm 1: movd <z6=int6464#6,>in6=int64#5
2670# asm 2: movd <z6=%xmm5,>in6=%r8
2671movd %xmm5,%r8
2672
2673# qhasm: in7 = z7
2674# asm 1: movd <z7=int6464#9,>in7=int64#6
2675# asm 2: movd <z7=%xmm8,>in7=%r9
2676movd %xmm8,%r9
2677
2678# qhasm: z4 <<<= 96
2679# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2680# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2681pshufd $0x39,%xmm14,%xmm14
2682
2683# qhasm: z5 <<<= 96
2684# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2685# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2686pshufd $0x39,%xmm0,%xmm0
2687
2688# qhasm: z6 <<<= 96
2689# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2690# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2691pshufd $0x39,%xmm5,%xmm5
2692
2693# qhasm: z7 <<<= 96
2694# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2695# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2696pshufd $0x39,%xmm8,%xmm8
2697
2698# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
2699# asm 1: xorl 16(<m=int64#2),<in4=int64#3d
2700# asm 2: xorl 16(<m=%rsi),<in4=%edx
2701xorl 16(%rsi),%edx
2702
2703# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
2704# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
2705# asm 2: xorl 20(<m=%rsi),<in5=%ecx
2706xorl 20(%rsi),%ecx
2707
2708# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
2709# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
2710# asm 2: xorl 24(<m=%rsi),<in6=%r8d
2711xorl 24(%rsi),%r8d
2712
2713# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
2714# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
2715# asm 2: xorl 28(<m=%rsi),<in7=%r9d
2716xorl 28(%rsi),%r9d
2717
2718# qhasm: *(uint32 *) (out + 16) = in4
2719# asm 1: movl <in4=int64#3d,16(<out=int64#1)
2720# asm 2: movl <in4=%edx,16(<out=%rdi)
2721movl %edx,16(%rdi)
2722
2723# qhasm: *(uint32 *) (out + 20) = in5
2724# asm 1: movl <in5=int64#4d,20(<out=int64#1)
2725# asm 2: movl <in5=%ecx,20(<out=%rdi)
2726movl %ecx,20(%rdi)
2727
2728# qhasm: *(uint32 *) (out + 24) = in6
2729# asm 1: movl <in6=int64#5d,24(<out=int64#1)
2730# asm 2: movl <in6=%r8d,24(<out=%rdi)
2731movl %r8d,24(%rdi)
2732
2733# qhasm: *(uint32 *) (out + 28) = in7
2734# asm 1: movl <in7=int64#6d,28(<out=int64#1)
2735# asm 2: movl <in7=%r9d,28(<out=%rdi)
2736movl %r9d,28(%rdi)
2737
2738# qhasm: in4 = z4
2739# asm 1: movd <z4=int6464#15,>in4=int64#3
2740# asm 2: movd <z4=%xmm14,>in4=%rdx
2741movd %xmm14,%rdx
2742
2743# qhasm: in5 = z5
2744# asm 1: movd <z5=int6464#1,>in5=int64#4
2745# asm 2: movd <z5=%xmm0,>in5=%rcx
2746movd %xmm0,%rcx
2747
2748# qhasm: in6 = z6
2749# asm 1: movd <z6=int6464#6,>in6=int64#5
2750# asm 2: movd <z6=%xmm5,>in6=%r8
2751movd %xmm5,%r8
2752
2753# qhasm: in7 = z7
2754# asm 1: movd <z7=int6464#9,>in7=int64#6
2755# asm 2: movd <z7=%xmm8,>in7=%r9
2756movd %xmm8,%r9
2757
2758# qhasm: z4 <<<= 96
2759# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2760# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2761pshufd $0x39,%xmm14,%xmm14
2762
2763# qhasm: z5 <<<= 96
2764# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2765# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2766pshufd $0x39,%xmm0,%xmm0
2767
2768# qhasm: z6 <<<= 96
2769# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2770# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2771pshufd $0x39,%xmm5,%xmm5
2772
2773# qhasm: z7 <<<= 96
2774# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2775# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2776pshufd $0x39,%xmm8,%xmm8
2777
2778# qhasm: (uint32) in4 ^= *(uint32 *) (m + 80)
2779# asm 1: xorl 80(<m=int64#2),<in4=int64#3d
2780# asm 2: xorl 80(<m=%rsi),<in4=%edx
2781xorl 80(%rsi),%edx
2782
2783# qhasm: (uint32) in5 ^= *(uint32 *) (m + 84)
2784# asm 1: xorl 84(<m=int64#2),<in5=int64#4d
2785# asm 2: xorl 84(<m=%rsi),<in5=%ecx
2786xorl 84(%rsi),%ecx
2787
2788# qhasm: (uint32) in6 ^= *(uint32 *) (m + 88)
2789# asm 1: xorl 88(<m=int64#2),<in6=int64#5d
2790# asm 2: xorl 88(<m=%rsi),<in6=%r8d
2791xorl 88(%rsi),%r8d
2792
2793# qhasm: (uint32) in7 ^= *(uint32 *) (m + 92)
2794# asm 1: xorl 92(<m=int64#2),<in7=int64#6d
2795# asm 2: xorl 92(<m=%rsi),<in7=%r9d
2796xorl 92(%rsi),%r9d
2797
2798# qhasm: *(uint32 *) (out + 80) = in4
2799# asm 1: movl <in4=int64#3d,80(<out=int64#1)
2800# asm 2: movl <in4=%edx,80(<out=%rdi)
2801movl %edx,80(%rdi)
2802
2803# qhasm: *(uint32 *) (out + 84) = in5
2804# asm 1: movl <in5=int64#4d,84(<out=int64#1)
2805# asm 2: movl <in5=%ecx,84(<out=%rdi)
2806movl %ecx,84(%rdi)
2807
2808# qhasm: *(uint32 *) (out + 88) = in6
2809# asm 1: movl <in6=int64#5d,88(<out=int64#1)
2810# asm 2: movl <in6=%r8d,88(<out=%rdi)
2811movl %r8d,88(%rdi)
2812
2813# qhasm: *(uint32 *) (out + 92) = in7
2814# asm 1: movl <in7=int64#6d,92(<out=int64#1)
2815# asm 2: movl <in7=%r9d,92(<out=%rdi)
2816movl %r9d,92(%rdi)
2817
2818# qhasm: in4 = z4
2819# asm 1: movd <z4=int6464#15,>in4=int64#3
2820# asm 2: movd <z4=%xmm14,>in4=%rdx
2821movd %xmm14,%rdx
2822
2823# qhasm: in5 = z5
2824# asm 1: movd <z5=int6464#1,>in5=int64#4
2825# asm 2: movd <z5=%xmm0,>in5=%rcx
2826movd %xmm0,%rcx
2827
2828# qhasm: in6 = z6
2829# asm 1: movd <z6=int6464#6,>in6=int64#5
2830# asm 2: movd <z6=%xmm5,>in6=%r8
2831movd %xmm5,%r8
2832
2833# qhasm: in7 = z7
2834# asm 1: movd <z7=int6464#9,>in7=int64#6
2835# asm 2: movd <z7=%xmm8,>in7=%r9
2836movd %xmm8,%r9
2837
2838# qhasm: z4 <<<= 96
2839# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2840# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2841pshufd $0x39,%xmm14,%xmm14
2842
2843# qhasm: z5 <<<= 96
2844# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2845# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2846pshufd $0x39,%xmm0,%xmm0
2847
2848# qhasm: z6 <<<= 96
2849# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2850# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2851pshufd $0x39,%xmm5,%xmm5
2852
2853# qhasm: z7 <<<= 96
2854# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2855# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2856pshufd $0x39,%xmm8,%xmm8
2857
2858# qhasm: (uint32) in4 ^= *(uint32 *) (m + 144)
2859# asm 1: xorl 144(<m=int64#2),<in4=int64#3d
2860# asm 2: xorl 144(<m=%rsi),<in4=%edx
2861xorl 144(%rsi),%edx
2862
2863# qhasm: (uint32) in5 ^= *(uint32 *) (m + 148)
2864# asm 1: xorl 148(<m=int64#2),<in5=int64#4d
2865# asm 2: xorl 148(<m=%rsi),<in5=%ecx
2866xorl 148(%rsi),%ecx
2867
2868# qhasm: (uint32) in6 ^= *(uint32 *) (m + 152)
2869# asm 1: xorl 152(<m=int64#2),<in6=int64#5d
2870# asm 2: xorl 152(<m=%rsi),<in6=%r8d
2871xorl 152(%rsi),%r8d
2872
2873# qhasm: (uint32) in7 ^= *(uint32 *) (m + 156)
2874# asm 1: xorl 156(<m=int64#2),<in7=int64#6d
2875# asm 2: xorl 156(<m=%rsi),<in7=%r9d
2876xorl 156(%rsi),%r9d
2877
2878# qhasm: *(uint32 *) (out + 144) = in4
2879# asm 1: movl <in4=int64#3d,144(<out=int64#1)
2880# asm 2: movl <in4=%edx,144(<out=%rdi)
2881movl %edx,144(%rdi)
2882
2883# qhasm: *(uint32 *) (out + 148) = in5
2884# asm 1: movl <in5=int64#4d,148(<out=int64#1)
2885# asm 2: movl <in5=%ecx,148(<out=%rdi)
2886movl %ecx,148(%rdi)
2887
2888# qhasm: *(uint32 *) (out + 152) = in6
2889# asm 1: movl <in6=int64#5d,152(<out=int64#1)
2890# asm 2: movl <in6=%r8d,152(<out=%rdi)
2891movl %r8d,152(%rdi)
2892
2893# qhasm: *(uint32 *) (out + 156) = in7
2894# asm 1: movl <in7=int64#6d,156(<out=int64#1)
2895# asm 2: movl <in7=%r9d,156(<out=%rdi)
2896movl %r9d,156(%rdi)
2897
2898# qhasm: in4 = z4
2899# asm 1: movd <z4=int6464#15,>in4=int64#3
2900# asm 2: movd <z4=%xmm14,>in4=%rdx
2901movd %xmm14,%rdx
2902
2903# qhasm: in5 = z5
2904# asm 1: movd <z5=int6464#1,>in5=int64#4
2905# asm 2: movd <z5=%xmm0,>in5=%rcx
2906movd %xmm0,%rcx
2907
2908# qhasm: in6 = z6
2909# asm 1: movd <z6=int6464#6,>in6=int64#5
2910# asm 2: movd <z6=%xmm5,>in6=%r8
2911movd %xmm5,%r8
2912
2913# qhasm: in7 = z7
2914# asm 1: movd <z7=int6464#9,>in7=int64#6
2915# asm 2: movd <z7=%xmm8,>in7=%r9
2916movd %xmm8,%r9
2917
2918# qhasm: (uint32) in4 ^= *(uint32 *) (m + 208)
2919# asm 1: xorl 208(<m=int64#2),<in4=int64#3d
2920# asm 2: xorl 208(<m=%rsi),<in4=%edx
2921xorl 208(%rsi),%edx
2922
2923# qhasm: (uint32) in5 ^= *(uint32 *) (m + 212)
2924# asm 1: xorl 212(<m=int64#2),<in5=int64#4d
2925# asm 2: xorl 212(<m=%rsi),<in5=%ecx
2926xorl 212(%rsi),%ecx
2927
2928# qhasm: (uint32) in6 ^= *(uint32 *) (m + 216)
2929# asm 1: xorl 216(<m=int64#2),<in6=int64#5d
2930# asm 2: xorl 216(<m=%rsi),<in6=%r8d
2931xorl 216(%rsi),%r8d
2932
2933# qhasm: (uint32) in7 ^= *(uint32 *) (m + 220)
2934# asm 1: xorl 220(<m=int64#2),<in7=int64#6d
2935# asm 2: xorl 220(<m=%rsi),<in7=%r9d
2936xorl 220(%rsi),%r9d
2937
2938# qhasm: *(uint32 *) (out + 208) = in4
2939# asm 1: movl <in4=int64#3d,208(<out=int64#1)
2940# asm 2: movl <in4=%edx,208(<out=%rdi)
2941movl %edx,208(%rdi)
2942
2943# qhasm: *(uint32 *) (out + 212) = in5
2944# asm 1: movl <in5=int64#4d,212(<out=int64#1)
2945# asm 2: movl <in5=%ecx,212(<out=%rdi)
2946movl %ecx,212(%rdi)
2947
2948# qhasm: *(uint32 *) (out + 216) = in6
2949# asm 1: movl <in6=int64#5d,216(<out=int64#1)
2950# asm 2: movl <in6=%r8d,216(<out=%rdi)
2951movl %r8d,216(%rdi)
2952
2953# qhasm: *(uint32 *) (out + 220) = in7
2954# asm 1: movl <in7=int64#6d,220(<out=int64#1)
2955# asm 2: movl <in7=%r9d,220(<out=%rdi)
2956movl %r9d,220(%rdi)
2957
2958# qhasm: uint32323232 z8 += orig8
2959# asm 1: paddd <orig8=stack128#19,<z8=int6464#16
2960# asm 2: paddd <orig8=288(%rsp),<z8=%xmm15
2961paddd 288(%rsp),%xmm15
2962
2963# qhasm: uint32323232 z9 += orig9
2964# asm 1: paddd <orig9=stack128#20,<z9=int6464#12
2965# asm 2: paddd <orig9=304(%rsp),<z9=%xmm11
2966paddd 304(%rsp),%xmm11
2967
2968# qhasm: uint32323232 z10 += orig10
2969# asm 1: paddd <orig10=stack128#6,<z10=int6464#2
2970# asm 2: paddd <orig10=80(%rsp),<z10=%xmm1
2971paddd 80(%rsp),%xmm1
2972
2973# qhasm: uint32323232 z11 += orig11
2974# asm 1: paddd <orig11=stack128#10,<z11=int6464#7
2975# asm 2: paddd <orig11=144(%rsp),<z11=%xmm6
2976paddd 144(%rsp),%xmm6
2977
2978# qhasm: in8 = z8
2979# asm 1: movd <z8=int6464#16,>in8=int64#3
2980# asm 2: movd <z8=%xmm15,>in8=%rdx
2981movd %xmm15,%rdx
2982
2983# qhasm: in9 = z9
2984# asm 1: movd <z9=int6464#12,>in9=int64#4
2985# asm 2: movd <z9=%xmm11,>in9=%rcx
2986movd %xmm11,%rcx
2987
2988# qhasm: in10 = z10
2989# asm 1: movd <z10=int6464#2,>in10=int64#5
2990# asm 2: movd <z10=%xmm1,>in10=%r8
2991movd %xmm1,%r8
2992
2993# qhasm: in11 = z11
2994# asm 1: movd <z11=int6464#7,>in11=int64#6
2995# asm 2: movd <z11=%xmm6,>in11=%r9
2996movd %xmm6,%r9
2997
2998# qhasm: z8 <<<= 96
2999# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3000# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3001pshufd $0x39,%xmm15,%xmm15
3002
3003# qhasm: z9 <<<= 96
3004# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3005# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3006pshufd $0x39,%xmm11,%xmm11
3007
3008# qhasm: z10 <<<= 96
3009# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3010# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3011pshufd $0x39,%xmm1,%xmm1
3012
3013# qhasm: z11 <<<= 96
3014# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3015# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3016pshufd $0x39,%xmm6,%xmm6
3017
3018# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
3019# asm 1: xorl 32(<m=int64#2),<in8=int64#3d
3020# asm 2: xorl 32(<m=%rsi),<in8=%edx
3021xorl 32(%rsi),%edx
3022
3023# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
3024# asm 1: xorl 36(<m=int64#2),<in9=int64#4d
3025# asm 2: xorl 36(<m=%rsi),<in9=%ecx
3026xorl 36(%rsi),%ecx
3027
3028# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
3029# asm 1: xorl 40(<m=int64#2),<in10=int64#5d
3030# asm 2: xorl 40(<m=%rsi),<in10=%r8d
3031xorl 40(%rsi),%r8d
3032
3033# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
3034# asm 1: xorl 44(<m=int64#2),<in11=int64#6d
3035# asm 2: xorl 44(<m=%rsi),<in11=%r9d
3036xorl 44(%rsi),%r9d
3037
3038# qhasm: *(uint32 *) (out + 32) = in8
3039# asm 1: movl <in8=int64#3d,32(<out=int64#1)
3040# asm 2: movl <in8=%edx,32(<out=%rdi)
3041movl %edx,32(%rdi)
3042
3043# qhasm: *(uint32 *) (out + 36) = in9
3044# asm 1: movl <in9=int64#4d,36(<out=int64#1)
3045# asm 2: movl <in9=%ecx,36(<out=%rdi)
3046movl %ecx,36(%rdi)
3047
3048# qhasm: *(uint32 *) (out + 40) = in10
3049# asm 1: movl <in10=int64#5d,40(<out=int64#1)
3050# asm 2: movl <in10=%r8d,40(<out=%rdi)
3051movl %r8d,40(%rdi)
3052
3053# qhasm: *(uint32 *) (out + 44) = in11
3054# asm 1: movl <in11=int64#6d,44(<out=int64#1)
3055# asm 2: movl <in11=%r9d,44(<out=%rdi)
3056movl %r9d,44(%rdi)
3057
3058# qhasm: in8 = z8
3059# asm 1: movd <z8=int6464#16,>in8=int64#3
3060# asm 2: movd <z8=%xmm15,>in8=%rdx
3061movd %xmm15,%rdx
3062
3063# qhasm: in9 = z9
3064# asm 1: movd <z9=int6464#12,>in9=int64#4
3065# asm 2: movd <z9=%xmm11,>in9=%rcx
3066movd %xmm11,%rcx
3067
3068# qhasm: in10 = z10
3069# asm 1: movd <z10=int6464#2,>in10=int64#5
3070# asm 2: movd <z10=%xmm1,>in10=%r8
3071movd %xmm1,%r8
3072
3073# qhasm: in11 = z11
3074# asm 1: movd <z11=int6464#7,>in11=int64#6
3075# asm 2: movd <z11=%xmm6,>in11=%r9
3076movd %xmm6,%r9
3077
3078# qhasm: z8 <<<= 96
3079# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3080# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3081pshufd $0x39,%xmm15,%xmm15
3082
3083# qhasm: z9 <<<= 96
3084# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3085# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3086pshufd $0x39,%xmm11,%xmm11
3087
3088# qhasm: z10 <<<= 96
3089# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3090# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3091pshufd $0x39,%xmm1,%xmm1
3092
3093# qhasm: z11 <<<= 96
3094# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3095# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3096pshufd $0x39,%xmm6,%xmm6
3097
3098# qhasm: (uint32) in8 ^= *(uint32 *) (m + 96)
3099# asm 1: xorl 96(<m=int64#2),<in8=int64#3d
3100# asm 2: xorl 96(<m=%rsi),<in8=%edx
3101xorl 96(%rsi),%edx
3102
3103# qhasm: (uint32) in9 ^= *(uint32 *) (m + 100)
3104# asm 1: xorl 100(<m=int64#2),<in9=int64#4d
3105# asm 2: xorl 100(<m=%rsi),<in9=%ecx
3106xorl 100(%rsi),%ecx
3107
3108# qhasm: (uint32) in10 ^= *(uint32 *) (m + 104)
3109# asm 1: xorl 104(<m=int64#2),<in10=int64#5d
3110# asm 2: xorl 104(<m=%rsi),<in10=%r8d
3111xorl 104(%rsi),%r8d
3112
3113# qhasm: (uint32) in11 ^= *(uint32 *) (m + 108)
3114# asm 1: xorl 108(<m=int64#2),<in11=int64#6d
3115# asm 2: xorl 108(<m=%rsi),<in11=%r9d
3116xorl 108(%rsi),%r9d
3117
3118# qhasm: *(uint32 *) (out + 96) = in8
3119# asm 1: movl <in8=int64#3d,96(<out=int64#1)
3120# asm 2: movl <in8=%edx,96(<out=%rdi)
3121movl %edx,96(%rdi)
3122
3123# qhasm: *(uint32 *) (out + 100) = in9
3124# asm 1: movl <in9=int64#4d,100(<out=int64#1)
3125# asm 2: movl <in9=%ecx,100(<out=%rdi)
3126movl %ecx,100(%rdi)
3127
3128# qhasm: *(uint32 *) (out + 104) = in10
3129# asm 1: movl <in10=int64#5d,104(<out=int64#1)
3130# asm 2: movl <in10=%r8d,104(<out=%rdi)
3131movl %r8d,104(%rdi)
3132
3133# qhasm: *(uint32 *) (out + 108) = in11
3134# asm 1: movl <in11=int64#6d,108(<out=int64#1)
3135# asm 2: movl <in11=%r9d,108(<out=%rdi)
3136movl %r9d,108(%rdi)
3137
3138# qhasm: in8 = z8
3139# asm 1: movd <z8=int6464#16,>in8=int64#3
3140# asm 2: movd <z8=%xmm15,>in8=%rdx
3141movd %xmm15,%rdx
3142
3143# qhasm: in9 = z9
3144# asm 1: movd <z9=int6464#12,>in9=int64#4
3145# asm 2: movd <z9=%xmm11,>in9=%rcx
3146movd %xmm11,%rcx
3147
3148# qhasm: in10 = z10
3149# asm 1: movd <z10=int6464#2,>in10=int64#5
3150# asm 2: movd <z10=%xmm1,>in10=%r8
3151movd %xmm1,%r8
3152
3153# qhasm: in11 = z11
3154# asm 1: movd <z11=int6464#7,>in11=int64#6
3155# asm 2: movd <z11=%xmm6,>in11=%r9
3156movd %xmm6,%r9
3157
3158# qhasm: z8 <<<= 96
3159# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3160# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3161pshufd $0x39,%xmm15,%xmm15
3162
3163# qhasm: z9 <<<= 96
3164# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3165# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3166pshufd $0x39,%xmm11,%xmm11
3167
3168# qhasm: z10 <<<= 96
3169# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3170# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3171pshufd $0x39,%xmm1,%xmm1
3172
3173# qhasm: z11 <<<= 96
3174# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3175# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3176pshufd $0x39,%xmm6,%xmm6
3177
3178# qhasm: (uint32) in8 ^= *(uint32 *) (m + 160)
3179# asm 1: xorl 160(<m=int64#2),<in8=int64#3d
3180# asm 2: xorl 160(<m=%rsi),<in8=%edx
3181xorl 160(%rsi),%edx
3182
3183# qhasm: (uint32) in9 ^= *(uint32 *) (m + 164)
3184# asm 1: xorl 164(<m=int64#2),<in9=int64#4d
3185# asm 2: xorl 164(<m=%rsi),<in9=%ecx
3186xorl 164(%rsi),%ecx
3187
3188# qhasm: (uint32) in10 ^= *(uint32 *) (m + 168)
3189# asm 1: xorl 168(<m=int64#2),<in10=int64#5d
3190# asm 2: xorl 168(<m=%rsi),<in10=%r8d
3191xorl 168(%rsi),%r8d
3192
3193# qhasm: (uint32) in11 ^= *(uint32 *) (m + 172)
3194# asm 1: xorl 172(<m=int64#2),<in11=int64#6d
3195# asm 2: xorl 172(<m=%rsi),<in11=%r9d
3196xorl 172(%rsi),%r9d
3197
3198# qhasm: *(uint32 *) (out + 160) = in8
3199# asm 1: movl <in8=int64#3d,160(<out=int64#1)
3200# asm 2: movl <in8=%edx,160(<out=%rdi)
3201movl %edx,160(%rdi)
3202
3203# qhasm: *(uint32 *) (out + 164) = in9
3204# asm 1: movl <in9=int64#4d,164(<out=int64#1)
3205# asm 2: movl <in9=%ecx,164(<out=%rdi)
3206movl %ecx,164(%rdi)
3207
3208# qhasm: *(uint32 *) (out + 168) = in10
3209# asm 1: movl <in10=int64#5d,168(<out=int64#1)
3210# asm 2: movl <in10=%r8d,168(<out=%rdi)
3211movl %r8d,168(%rdi)
3212
3213# qhasm: *(uint32 *) (out + 172) = in11
3214# asm 1: movl <in11=int64#6d,172(<out=int64#1)
3215# asm 2: movl <in11=%r9d,172(<out=%rdi)
3216movl %r9d,172(%rdi)
3217
3218# qhasm: in8 = z8
3219# asm 1: movd <z8=int6464#16,>in8=int64#3
3220# asm 2: movd <z8=%xmm15,>in8=%rdx
3221movd %xmm15,%rdx
3222
3223# qhasm: in9 = z9
3224# asm 1: movd <z9=int6464#12,>in9=int64#4
3225# asm 2: movd <z9=%xmm11,>in9=%rcx
3226movd %xmm11,%rcx
3227
3228# qhasm: in10 = z10
3229# asm 1: movd <z10=int6464#2,>in10=int64#5
3230# asm 2: movd <z10=%xmm1,>in10=%r8
3231movd %xmm1,%r8
3232
3233# qhasm: in11 = z11
3234# asm 1: movd <z11=int6464#7,>in11=int64#6
3235# asm 2: movd <z11=%xmm6,>in11=%r9
3236movd %xmm6,%r9
3237
3238# qhasm: (uint32) in8 ^= *(uint32 *) (m + 224)
3239# asm 1: xorl 224(<m=int64#2),<in8=int64#3d
3240# asm 2: xorl 224(<m=%rsi),<in8=%edx
3241xorl 224(%rsi),%edx
3242
3243# qhasm: (uint32) in9 ^= *(uint32 *) (m + 228)
3244# asm 1: xorl 228(<m=int64#2),<in9=int64#4d
3245# asm 2: xorl 228(<m=%rsi),<in9=%ecx
3246xorl 228(%rsi),%ecx
3247
3248# qhasm: (uint32) in10 ^= *(uint32 *) (m + 232)
3249# asm 1: xorl 232(<m=int64#2),<in10=int64#5d
3250# asm 2: xorl 232(<m=%rsi),<in10=%r8d
3251xorl 232(%rsi),%r8d
3252
3253# qhasm: (uint32) in11 ^= *(uint32 *) (m + 236)
3254# asm 1: xorl 236(<m=int64#2),<in11=int64#6d
3255# asm 2: xorl 236(<m=%rsi),<in11=%r9d
3256xorl 236(%rsi),%r9d
3257
3258# qhasm: *(uint32 *) (out + 224) = in8
3259# asm 1: movl <in8=int64#3d,224(<out=int64#1)
3260# asm 2: movl <in8=%edx,224(<out=%rdi)
3261movl %edx,224(%rdi)
3262
3263# qhasm: *(uint32 *) (out + 228) = in9
3264# asm 1: movl <in9=int64#4d,228(<out=int64#1)
3265# asm 2: movl <in9=%ecx,228(<out=%rdi)
3266movl %ecx,228(%rdi)
3267
3268# qhasm: *(uint32 *) (out + 232) = in10
3269# asm 1: movl <in10=int64#5d,232(<out=int64#1)
3270# asm 2: movl <in10=%r8d,232(<out=%rdi)
3271movl %r8d,232(%rdi)
3272
3273# qhasm: *(uint32 *) (out + 236) = in11
3274# asm 1: movl <in11=int64#6d,236(<out=int64#1)
3275# asm 2: movl <in11=%r9d,236(<out=%rdi)
3276movl %r9d,236(%rdi)
3277
3278# qhasm: uint32323232 z12 += orig12
3279# asm 1: paddd <orig12=stack128#11,<z12=int6464#14
3280# asm 2: paddd <orig12=160(%rsp),<z12=%xmm13
3281paddd 160(%rsp),%xmm13
3282
3283# qhasm: uint32323232 z13 += orig13
3284# asm 1: paddd <orig13=stack128#14,<z13=int6464#10
3285# asm 2: paddd <orig13=208(%rsp),<z13=%xmm9
3286paddd 208(%rsp),%xmm9
3287
3288# qhasm: uint32323232 z14 += orig14
3289# asm 1: paddd <orig14=stack128#17,<z14=int6464#4
3290# asm 2: paddd <orig14=256(%rsp),<z14=%xmm3
3291paddd 256(%rsp),%xmm3
3292
3293# qhasm: uint32323232 z15 += orig15
3294# asm 1: paddd <orig15=stack128#7,<z15=int6464#3
3295# asm 2: paddd <orig15=96(%rsp),<z15=%xmm2
3296paddd 96(%rsp),%xmm2
3297
3298# qhasm: in12 = z12
3299# asm 1: movd <z12=int6464#14,>in12=int64#3
3300# asm 2: movd <z12=%xmm13,>in12=%rdx
3301movd %xmm13,%rdx
3302
3303# qhasm: in13 = z13
3304# asm 1: movd <z13=int6464#10,>in13=int64#4
3305# asm 2: movd <z13=%xmm9,>in13=%rcx
3306movd %xmm9,%rcx
3307
3308# qhasm: in14 = z14
3309# asm 1: movd <z14=int6464#4,>in14=int64#5
3310# asm 2: movd <z14=%xmm3,>in14=%r8
3311movd %xmm3,%r8
3312
3313# qhasm: in15 = z15
3314# asm 1: movd <z15=int6464#3,>in15=int64#6
3315# asm 2: movd <z15=%xmm2,>in15=%r9
3316movd %xmm2,%r9
3317
3318# qhasm: z12 <<<= 96
3319# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3320# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3321pshufd $0x39,%xmm13,%xmm13
3322
3323# qhasm: z13 <<<= 96
3324# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3325# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3326pshufd $0x39,%xmm9,%xmm9
3327
3328# qhasm: z14 <<<= 96
3329# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3330# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3331pshufd $0x39,%xmm3,%xmm3
3332
3333# qhasm: z15 <<<= 96
3334# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3335# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3336pshufd $0x39,%xmm2,%xmm2
3337
3338# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
3339# asm 1: xorl 48(<m=int64#2),<in12=int64#3d
3340# asm 2: xorl 48(<m=%rsi),<in12=%edx
3341xorl 48(%rsi),%edx
3342
3343# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
3344# asm 1: xorl 52(<m=int64#2),<in13=int64#4d
3345# asm 2: xorl 52(<m=%rsi),<in13=%ecx
3346xorl 52(%rsi),%ecx
3347
3348# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
3349# asm 1: xorl 56(<m=int64#2),<in14=int64#5d
3350# asm 2: xorl 56(<m=%rsi),<in14=%r8d
3351xorl 56(%rsi),%r8d
3352
3353# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
3354# asm 1: xorl 60(<m=int64#2),<in15=int64#6d
3355# asm 2: xorl 60(<m=%rsi),<in15=%r9d
3356xorl 60(%rsi),%r9d
3357
3358# qhasm: *(uint32 *) (out + 48) = in12
3359# asm 1: movl <in12=int64#3d,48(<out=int64#1)
3360# asm 2: movl <in12=%edx,48(<out=%rdi)
3361movl %edx,48(%rdi)
3362
3363# qhasm: *(uint32 *) (out + 52) = in13
3364# asm 1: movl <in13=int64#4d,52(<out=int64#1)
3365# asm 2: movl <in13=%ecx,52(<out=%rdi)
3366movl %ecx,52(%rdi)
3367
3368# qhasm: *(uint32 *) (out + 56) = in14
3369# asm 1: movl <in14=int64#5d,56(<out=int64#1)
3370# asm 2: movl <in14=%r8d,56(<out=%rdi)
3371movl %r8d,56(%rdi)
3372
3373# qhasm: *(uint32 *) (out + 60) = in15
3374# asm 1: movl <in15=int64#6d,60(<out=int64#1)
3375# asm 2: movl <in15=%r9d,60(<out=%rdi)
3376movl %r9d,60(%rdi)
3377
3378# qhasm: in12 = z12
3379# asm 1: movd <z12=int6464#14,>in12=int64#3
3380# asm 2: movd <z12=%xmm13,>in12=%rdx
3381movd %xmm13,%rdx
3382
3383# qhasm: in13 = z13
3384# asm 1: movd <z13=int6464#10,>in13=int64#4
3385# asm 2: movd <z13=%xmm9,>in13=%rcx
3386movd %xmm9,%rcx
3387
3388# qhasm: in14 = z14
3389# asm 1: movd <z14=int6464#4,>in14=int64#5
3390# asm 2: movd <z14=%xmm3,>in14=%r8
3391movd %xmm3,%r8
3392
3393# qhasm: in15 = z15
3394# asm 1: movd <z15=int6464#3,>in15=int64#6
3395# asm 2: movd <z15=%xmm2,>in15=%r9
3396movd %xmm2,%r9
3397
3398# qhasm: z12 <<<= 96
3399# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3400# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3401pshufd $0x39,%xmm13,%xmm13
3402
3403# qhasm: z13 <<<= 96
3404# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3405# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3406pshufd $0x39,%xmm9,%xmm9
3407
3408# qhasm: z14 <<<= 96
3409# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3410# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3411pshufd $0x39,%xmm3,%xmm3
3412
3413# qhasm: z15 <<<= 96
3414# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3415# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3416pshufd $0x39,%xmm2,%xmm2
3417
3418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 112)
3419# asm 1: xorl 112(<m=int64#2),<in12=int64#3d
3420# asm 2: xorl 112(<m=%rsi),<in12=%edx
3421xorl 112(%rsi),%edx
3422
3423# qhasm: (uint32) in13 ^= *(uint32 *) (m + 116)
3424# asm 1: xorl 116(<m=int64#2),<in13=int64#4d
3425# asm 2: xorl 116(<m=%rsi),<in13=%ecx
3426xorl 116(%rsi),%ecx
3427
3428# qhasm: (uint32) in14 ^= *(uint32 *) (m + 120)
3429# asm 1: xorl 120(<m=int64#2),<in14=int64#5d
3430# asm 2: xorl 120(<m=%rsi),<in14=%r8d
3431xorl 120(%rsi),%r8d
3432
3433# qhasm: (uint32) in15 ^= *(uint32 *) (m + 124)
3434# asm 1: xorl 124(<m=int64#2),<in15=int64#6d
3435# asm 2: xorl 124(<m=%rsi),<in15=%r9d
3436xorl 124(%rsi),%r9d
3437
3438# qhasm: *(uint32 *) (out + 112) = in12
3439# asm 1: movl <in12=int64#3d,112(<out=int64#1)
3440# asm 2: movl <in12=%edx,112(<out=%rdi)
3441movl %edx,112(%rdi)
3442
3443# qhasm: *(uint32 *) (out + 116) = in13
3444# asm 1: movl <in13=int64#4d,116(<out=int64#1)
3445# asm 2: movl <in13=%ecx,116(<out=%rdi)
3446movl %ecx,116(%rdi)
3447
3448# qhasm: *(uint32 *) (out + 120) = in14
3449# asm 1: movl <in14=int64#5d,120(<out=int64#1)
3450# asm 2: movl <in14=%r8d,120(<out=%rdi)
3451movl %r8d,120(%rdi)
3452
3453# qhasm: *(uint32 *) (out + 124) = in15
3454# asm 1: movl <in15=int64#6d,124(<out=int64#1)
3455# asm 2: movl <in15=%r9d,124(<out=%rdi)
3456movl %r9d,124(%rdi)
3457
3458# qhasm: in12 = z12
3459# asm 1: movd <z12=int6464#14,>in12=int64#3
3460# asm 2: movd <z12=%xmm13,>in12=%rdx
3461movd %xmm13,%rdx
3462
3463# qhasm: in13 = z13
3464# asm 1: movd <z13=int6464#10,>in13=int64#4
3465# asm 2: movd <z13=%xmm9,>in13=%rcx
3466movd %xmm9,%rcx
3467
3468# qhasm: in14 = z14
3469# asm 1: movd <z14=int6464#4,>in14=int64#5
3470# asm 2: movd <z14=%xmm3,>in14=%r8
3471movd %xmm3,%r8
3472
3473# qhasm: in15 = z15
3474# asm 1: movd <z15=int6464#3,>in15=int64#6
3475# asm 2: movd <z15=%xmm2,>in15=%r9
3476movd %xmm2,%r9
3477
3478# qhasm: z12 <<<= 96
3479# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3480# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3481pshufd $0x39,%xmm13,%xmm13
3482
3483# qhasm: z13 <<<= 96
3484# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3485# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3486pshufd $0x39,%xmm9,%xmm9
3487
3488# qhasm: z14 <<<= 96
3489# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3490# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3491pshufd $0x39,%xmm3,%xmm3
3492
3493# qhasm: z15 <<<= 96
3494# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3495# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3496pshufd $0x39,%xmm2,%xmm2
3497
3498# qhasm: (uint32) in12 ^= *(uint32 *) (m + 176)
3499# asm 1: xorl 176(<m=int64#2),<in12=int64#3d
3500# asm 2: xorl 176(<m=%rsi),<in12=%edx
3501xorl 176(%rsi),%edx
3502
3503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 180)
3504# asm 1: xorl 180(<m=int64#2),<in13=int64#4d
3505# asm 2: xorl 180(<m=%rsi),<in13=%ecx
3506xorl 180(%rsi),%ecx
3507
3508# qhasm: (uint32) in14 ^= *(uint32 *) (m + 184)
3509# asm 1: xorl 184(<m=int64#2),<in14=int64#5d
3510# asm 2: xorl 184(<m=%rsi),<in14=%r8d
3511xorl 184(%rsi),%r8d
3512
3513# qhasm: (uint32) in15 ^= *(uint32 *) (m + 188)
3514# asm 1: xorl 188(<m=int64#2),<in15=int64#6d
3515# asm 2: xorl 188(<m=%rsi),<in15=%r9d
3516xorl 188(%rsi),%r9d
3517
3518# qhasm: *(uint32 *) (out + 176) = in12
3519# asm 1: movl <in12=int64#3d,176(<out=int64#1)
3520# asm 2: movl <in12=%edx,176(<out=%rdi)
3521movl %edx,176(%rdi)
3522
3523# qhasm: *(uint32 *) (out + 180) = in13
3524# asm 1: movl <in13=int64#4d,180(<out=int64#1)
3525# asm 2: movl <in13=%ecx,180(<out=%rdi)
3526movl %ecx,180(%rdi)
3527
3528# qhasm: *(uint32 *) (out + 184) = in14
3529# asm 1: movl <in14=int64#5d,184(<out=int64#1)
3530# asm 2: movl <in14=%r8d,184(<out=%rdi)
3531movl %r8d,184(%rdi)
3532
3533# qhasm: *(uint32 *) (out + 188) = in15
3534# asm 1: movl <in15=int64#6d,188(<out=int64#1)
3535# asm 2: movl <in15=%r9d,188(<out=%rdi)
3536movl %r9d,188(%rdi)
3537
3538# qhasm: in12 = z12
3539# asm 1: movd <z12=int6464#14,>in12=int64#3
3540# asm 2: movd <z12=%xmm13,>in12=%rdx
3541movd %xmm13,%rdx
3542
3543# qhasm: in13 = z13
3544# asm 1: movd <z13=int6464#10,>in13=int64#4
3545# asm 2: movd <z13=%xmm9,>in13=%rcx
3546movd %xmm9,%rcx
3547
3548# qhasm: in14 = z14
3549# asm 1: movd <z14=int6464#4,>in14=int64#5
3550# asm 2: movd <z14=%xmm3,>in14=%r8
3551movd %xmm3,%r8
3552
3553# qhasm: in15 = z15
3554# asm 1: movd <z15=int6464#3,>in15=int64#6
3555# asm 2: movd <z15=%xmm2,>in15=%r9
3556movd %xmm2,%r9
3557
3558# qhasm: (uint32) in12 ^= *(uint32 *) (m + 240)
3559# asm 1: xorl 240(<m=int64#2),<in12=int64#3d
3560# asm 2: xorl 240(<m=%rsi),<in12=%edx
3561xorl 240(%rsi),%edx
3562
3563# qhasm: (uint32) in13 ^= *(uint32 *) (m + 244)
3564# asm 1: xorl 244(<m=int64#2),<in13=int64#4d
3565# asm 2: xorl 244(<m=%rsi),<in13=%ecx
3566xorl 244(%rsi),%ecx
3567
3568# qhasm: (uint32) in14 ^= *(uint32 *) (m + 248)
3569# asm 1: xorl 248(<m=int64#2),<in14=int64#5d
3570# asm 2: xorl 248(<m=%rsi),<in14=%r8d
3571xorl 248(%rsi),%r8d
3572
3573# qhasm: (uint32) in15 ^= *(uint32 *) (m + 252)
3574# asm 1: xorl 252(<m=int64#2),<in15=int64#6d
3575# asm 2: xorl 252(<m=%rsi),<in15=%r9d
3576xorl 252(%rsi),%r9d
3577
3578# qhasm: *(uint32 *) (out + 240) = in12
3579# asm 1: movl <in12=int64#3d,240(<out=int64#1)
3580# asm 2: movl <in12=%edx,240(<out=%rdi)
3581movl %edx,240(%rdi)
3582
3583# qhasm: *(uint32 *) (out + 244) = in13
3584# asm 1: movl <in13=int64#4d,244(<out=int64#1)
3585# asm 2: movl <in13=%ecx,244(<out=%rdi)
3586movl %ecx,244(%rdi)
3587
3588# qhasm: *(uint32 *) (out + 248) = in14
3589# asm 1: movl <in14=int64#5d,248(<out=int64#1)
3590# asm 2: movl <in14=%r8d,248(<out=%rdi)
3591movl %r8d,248(%rdi)
3592
3593# qhasm: *(uint32 *) (out + 252) = in15
3594# asm 1: movl <in15=int64#6d,252(<out=int64#1)
3595# asm 2: movl <in15=%r9d,252(<out=%rdi)
3596movl %r9d,252(%rdi)
3597
3598# qhasm: bytes = bytes_backup
3599# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
3600# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
3601movq 408(%rsp),%r9
3602
3603# qhasm: bytes -= 256
3604# asm 1: sub $256,<bytes=int64#6
3605# asm 2: sub $256,<bytes=%r9
3606sub $256,%r9
3607
3608# qhasm: m += 256
3609# asm 1: add $256,<m=int64#2
3610# asm 2: add $256,<m=%rsi
3611add $256,%rsi
3612
3613# qhasm: out += 256
3614# asm 1: add $256,<out=int64#1
3615# asm 2: add $256,<out=%rdi
3616add $256,%rdi
3617
3618# qhasm: unsigned<? bytes - 256
3619# asm 1: cmp $256,<bytes=int64#6
3620# asm 2: cmp $256,<bytes=%r9
3621cmp $256,%r9
3622# comment:fp stack unchanged by jump
3623
3624# qhasm: goto bytesatleast256 if !unsigned<
3625jae ._bytesatleast256
3626
3627# qhasm: unsigned>? bytes - 0
3628# asm 1: cmp $0,<bytes=int64#6
3629# asm 2: cmp $0,<bytes=%r9
3630cmp $0,%r9
3631# comment:fp stack unchanged by jump
3632
3633# qhasm: goto done if !unsigned>
3634jbe ._done
3635# comment:fp stack unchanged by fallthrough
3636
3637# qhasm: bytesbetween1and255:
3638._bytesbetween1and255:
3639
3640# qhasm: unsigned<? bytes - 64
3641# asm 1: cmp $64,<bytes=int64#6
3642# asm 2: cmp $64,<bytes=%r9
3643cmp $64,%r9
3644# comment:fp stack unchanged by jump
3645
3646# qhasm: goto nocopy if !unsigned<
3647jae ._nocopy
3648
3649# qhasm: ctarget = out
3650# asm 1: mov <out=int64#1,>ctarget=int64#3
3651# asm 2: mov <out=%rdi,>ctarget=%rdx
3652mov %rdi,%rdx
3653
3654# qhasm: out = &tmp
3655# asm 1: leaq <tmp=stack512#1,>out=int64#1
3656# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3657leaq 416(%rsp),%rdi
3658
3659# qhasm: i = bytes
3660# asm 1: mov <bytes=int64#6,>i=int64#4
3661# asm 2: mov <bytes=%r9,>i=%rcx
3662mov %r9,%rcx
3663
3664# qhasm: while (i) { *out++ = *m++; --i }
3665rep movsb
3666
3667# qhasm: out = &tmp
3668# asm 1: leaq <tmp=stack512#1,>out=int64#1
3669# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3670leaq 416(%rsp),%rdi
3671
3672# qhasm: m = &tmp
3673# asm 1: leaq <tmp=stack512#1,>m=int64#2
3674# asm 2: leaq <tmp=416(%rsp),>m=%rsi
3675leaq 416(%rsp),%rsi
3676# comment:fp stack unchanged by fallthrough
3677
3678# qhasm: nocopy:
3679._nocopy:
3680
3681# qhasm: bytes_backup = bytes
3682# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
3683# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
3684movq %r9,408(%rsp)
3685
3686# qhasm: diag0 = x0
3687# asm 1: movdqa <x0=stack128#4,>diag0=int6464#1
3688# asm 2: movdqa <x0=48(%rsp),>diag0=%xmm0
3689movdqa 48(%rsp),%xmm0
3690
3691# qhasm: diag1 = x1
3692# asm 1: movdqa <x1=stack128#1,>diag1=int6464#2
3693# asm 2: movdqa <x1=0(%rsp),>diag1=%xmm1
3694movdqa 0(%rsp),%xmm1
3695
3696# qhasm: diag2 = x2
3697# asm 1: movdqa <x2=stack128#2,>diag2=int6464#3
3698# asm 2: movdqa <x2=16(%rsp),>diag2=%xmm2
3699movdqa 16(%rsp),%xmm2
3700
3701# qhasm: diag3 = x3
3702# asm 1: movdqa <x3=stack128#3,>diag3=int6464#4
3703# asm 2: movdqa <x3=32(%rsp),>diag3=%xmm3
3704movdqa 32(%rsp),%xmm3
3705
3706# qhasm: a0 = diag1
3707# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3708# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3709movdqa %xmm1,%xmm4
3710
3711# qhasm: i = 12
3712# asm 1: mov $12,>i=int64#4
3713# asm 2: mov $12,>i=%rcx
3714mov $12,%rcx
3715
3716# qhasm: mainloop2:
3717._mainloop2:
3718
3719# qhasm: uint32323232 a0 += diag0
3720# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
3721# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
3722paddd %xmm0,%xmm4
3723
3724# qhasm: a1 = diag0
3725# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
3726# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
3727movdqa %xmm0,%xmm5
3728
3729# qhasm: b0 = a0
3730# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
3731# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
3732movdqa %xmm4,%xmm6
3733
3734# qhasm: uint32323232 a0 <<= 7
3735# asm 1: pslld $7,<a0=int6464#5
3736# asm 2: pslld $7,<a0=%xmm4
3737pslld $7,%xmm4
3738
3739# qhasm: uint32323232 b0 >>= 25
3740# asm 1: psrld $25,<b0=int6464#7
3741# asm 2: psrld $25,<b0=%xmm6
3742psrld $25,%xmm6
3743
3744# qhasm: diag3 ^= a0
3745# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
3746# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
3747pxor %xmm4,%xmm3
3748
3749# qhasm: diag3 ^= b0
3750# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
3751# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
3752pxor %xmm6,%xmm3
3753
3754# qhasm: uint32323232 a1 += diag3
3755# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
3756# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
3757paddd %xmm3,%xmm5
3758
3759# qhasm: a2 = diag3
3760# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
3761# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
3762movdqa %xmm3,%xmm4
3763
3764# qhasm: b1 = a1
3765# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
3766# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
3767movdqa %xmm5,%xmm6
3768
3769# qhasm: uint32323232 a1 <<= 9
3770# asm 1: pslld $9,<a1=int6464#6
3771# asm 2: pslld $9,<a1=%xmm5
3772pslld $9,%xmm5
3773
3774# qhasm: uint32323232 b1 >>= 23
3775# asm 1: psrld $23,<b1=int6464#7
3776# asm 2: psrld $23,<b1=%xmm6
3777psrld $23,%xmm6
3778
3779# qhasm: diag2 ^= a1
3780# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
3781# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
3782pxor %xmm5,%xmm2
3783
3784# qhasm: diag3 <<<= 32
3785# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
3786# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
3787pshufd $0x93,%xmm3,%xmm3
3788
3789# qhasm: diag2 ^= b1
3790# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
3791# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
3792pxor %xmm6,%xmm2
3793
3794# qhasm: uint32323232 a2 += diag2
3795# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
3796# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
3797paddd %xmm2,%xmm4
3798
3799# qhasm: a3 = diag2
3800# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
3801# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
3802movdqa %xmm2,%xmm5
3803
3804# qhasm: b2 = a2
3805# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
3806# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
3807movdqa %xmm4,%xmm6
3808
3809# qhasm: uint32323232 a2 <<= 13
3810# asm 1: pslld $13,<a2=int6464#5
3811# asm 2: pslld $13,<a2=%xmm4
3812pslld $13,%xmm4
3813
3814# qhasm: uint32323232 b2 >>= 19
3815# asm 1: psrld $19,<b2=int6464#7
3816# asm 2: psrld $19,<b2=%xmm6
3817psrld $19,%xmm6
3818
3819# qhasm: diag1 ^= a2
3820# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
3821# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
3822pxor %xmm4,%xmm1
3823
3824# qhasm: diag2 <<<= 64
3825# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3826# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3827pshufd $0x4e,%xmm2,%xmm2
3828
3829# qhasm: diag1 ^= b2
3830# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
3831# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
3832pxor %xmm6,%xmm1
3833
3834# qhasm: uint32323232 a3 += diag1
3835# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
3836# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
3837paddd %xmm1,%xmm5
3838
3839# qhasm: a4 = diag3
3840# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
3841# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
3842movdqa %xmm3,%xmm4
3843
3844# qhasm: b3 = a3
3845# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
3846# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
3847movdqa %xmm5,%xmm6
3848
3849# qhasm: uint32323232 a3 <<= 18
3850# asm 1: pslld $18,<a3=int6464#6
3851# asm 2: pslld $18,<a3=%xmm5
3852pslld $18,%xmm5
3853
3854# qhasm: uint32323232 b3 >>= 14
3855# asm 1: psrld $14,<b3=int6464#7
3856# asm 2: psrld $14,<b3=%xmm6
3857psrld $14,%xmm6
3858
3859# qhasm: diag0 ^= a3
3860# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
3861# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
3862pxor %xmm5,%xmm0
3863
3864# qhasm: diag1 <<<= 96
3865# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
3866# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
3867pshufd $0x39,%xmm1,%xmm1
3868
3869# qhasm: diag0 ^= b3
3870# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
3871# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
3872pxor %xmm6,%xmm0
3873
3874# qhasm: uint32323232 a4 += diag0
3875# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
3876# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
3877paddd %xmm0,%xmm4
3878
3879# qhasm: a5 = diag0
3880# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
3881# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
3882movdqa %xmm0,%xmm5
3883
3884# qhasm: b4 = a4
3885# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
3886# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
3887movdqa %xmm4,%xmm6
3888
3889# qhasm: uint32323232 a4 <<= 7
3890# asm 1: pslld $7,<a4=int6464#5
3891# asm 2: pslld $7,<a4=%xmm4
3892pslld $7,%xmm4
3893
3894# qhasm: uint32323232 b4 >>= 25
3895# asm 1: psrld $25,<b4=int6464#7
3896# asm 2: psrld $25,<b4=%xmm6
3897psrld $25,%xmm6
3898
3899# qhasm: diag1 ^= a4
3900# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
3901# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
3902pxor %xmm4,%xmm1
3903
3904# qhasm: diag1 ^= b4
3905# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
3906# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
3907pxor %xmm6,%xmm1
3908
3909# qhasm: uint32323232 a5 += diag1
3910# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
3911# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
3912paddd %xmm1,%xmm5
3913
3914# qhasm: a6 = diag1
3915# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
3916# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
3917movdqa %xmm1,%xmm4
3918
3919# qhasm: b5 = a5
3920# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
3921# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
3922movdqa %xmm5,%xmm6
3923
3924# qhasm: uint32323232 a5 <<= 9
3925# asm 1: pslld $9,<a5=int6464#6
3926# asm 2: pslld $9,<a5=%xmm5
3927pslld $9,%xmm5
3928
3929# qhasm: uint32323232 b5 >>= 23
3930# asm 1: psrld $23,<b5=int6464#7
3931# asm 2: psrld $23,<b5=%xmm6
3932psrld $23,%xmm6
3933
3934# qhasm: diag2 ^= a5
3935# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
3936# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
3937pxor %xmm5,%xmm2
3938
3939# qhasm: diag1 <<<= 32
3940# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
3941# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
3942pshufd $0x93,%xmm1,%xmm1
3943
3944# qhasm: diag2 ^= b5
3945# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
3946# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
3947pxor %xmm6,%xmm2
3948
3949# qhasm: uint32323232 a6 += diag2
3950# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
3951# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
3952paddd %xmm2,%xmm4
3953
3954# qhasm: a7 = diag2
3955# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
3956# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
3957movdqa %xmm2,%xmm5
3958
3959# qhasm: b6 = a6
3960# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
3961# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
3962movdqa %xmm4,%xmm6
3963
3964# qhasm: uint32323232 a6 <<= 13
3965# asm 1: pslld $13,<a6=int6464#5
3966# asm 2: pslld $13,<a6=%xmm4
3967pslld $13,%xmm4
3968
3969# qhasm: uint32323232 b6 >>= 19
3970# asm 1: psrld $19,<b6=int6464#7
3971# asm 2: psrld $19,<b6=%xmm6
3972psrld $19,%xmm6
3973
3974# qhasm: diag3 ^= a6
3975# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
3976# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
3977pxor %xmm4,%xmm3
3978
3979# qhasm: diag2 <<<= 64
3980# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3981# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3982pshufd $0x4e,%xmm2,%xmm2
3983
3984# qhasm: diag3 ^= b6
3985# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
3986# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
3987pxor %xmm6,%xmm3
3988
3989# qhasm: uint32323232 a7 += diag3
3990# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
3991# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
3992paddd %xmm3,%xmm5
3993
3994# qhasm: a0 = diag1
3995# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3996# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3997movdqa %xmm1,%xmm4
3998
3999# qhasm: b7 = a7
4000# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4001# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4002movdqa %xmm5,%xmm6
4003
4004# qhasm: uint32323232 a7 <<= 18
4005# asm 1: pslld $18,<a7=int6464#6
4006# asm 2: pslld $18,<a7=%xmm5
4007pslld $18,%xmm5
4008
4009# qhasm: uint32323232 b7 >>= 14
4010# asm 1: psrld $14,<b7=int6464#7
4011# asm 2: psrld $14,<b7=%xmm6
4012psrld $14,%xmm6
4013
4014# qhasm: diag0 ^= a7
4015# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4016# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4017pxor %xmm5,%xmm0
4018
4019# qhasm: diag3 <<<= 96
4020# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4021# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4022pshufd $0x39,%xmm3,%xmm3
4023
4024# qhasm: diag0 ^= b7
4025# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4026# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4027pxor %xmm6,%xmm0
4028
4029# qhasm: uint32323232 a0 += diag0
4030# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4031# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4032paddd %xmm0,%xmm4
4033
4034# qhasm: a1 = diag0
4035# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4036# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4037movdqa %xmm0,%xmm5
4038
4039# qhasm: b0 = a0
4040# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4041# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4042movdqa %xmm4,%xmm6
4043
4044# qhasm: uint32323232 a0 <<= 7
4045# asm 1: pslld $7,<a0=int6464#5
4046# asm 2: pslld $7,<a0=%xmm4
4047pslld $7,%xmm4
4048
4049# qhasm: uint32323232 b0 >>= 25
4050# asm 1: psrld $25,<b0=int6464#7
4051# asm 2: psrld $25,<b0=%xmm6
4052psrld $25,%xmm6
4053
4054# qhasm: diag3 ^= a0
4055# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4056# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4057pxor %xmm4,%xmm3
4058
4059# qhasm: diag3 ^= b0
4060# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4061# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4062pxor %xmm6,%xmm3
4063
4064# qhasm: uint32323232 a1 += diag3
4065# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4066# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4067paddd %xmm3,%xmm5
4068
4069# qhasm: a2 = diag3
4070# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4071# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4072movdqa %xmm3,%xmm4
4073
4074# qhasm: b1 = a1
4075# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4076# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4077movdqa %xmm5,%xmm6
4078
4079# qhasm: uint32323232 a1 <<= 9
4080# asm 1: pslld $9,<a1=int6464#6
4081# asm 2: pslld $9,<a1=%xmm5
4082pslld $9,%xmm5
4083
4084# qhasm: uint32323232 b1 >>= 23
4085# asm 1: psrld $23,<b1=int6464#7
4086# asm 2: psrld $23,<b1=%xmm6
4087psrld $23,%xmm6
4088
4089# qhasm: diag2 ^= a1
4090# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4091# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4092pxor %xmm5,%xmm2
4093
4094# qhasm: diag3 <<<= 32
4095# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4096# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4097pshufd $0x93,%xmm3,%xmm3
4098
4099# qhasm: diag2 ^= b1
4100# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4101# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4102pxor %xmm6,%xmm2
4103
4104# qhasm: uint32323232 a2 += diag2
4105# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4106# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4107paddd %xmm2,%xmm4
4108
4109# qhasm: a3 = diag2
4110# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4111# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4112movdqa %xmm2,%xmm5
4113
4114# qhasm: b2 = a2
4115# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4116# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4117movdqa %xmm4,%xmm6
4118
4119# qhasm: uint32323232 a2 <<= 13
4120# asm 1: pslld $13,<a2=int6464#5
4121# asm 2: pslld $13,<a2=%xmm4
4122pslld $13,%xmm4
4123
4124# qhasm: uint32323232 b2 >>= 19
4125# asm 1: psrld $19,<b2=int6464#7
4126# asm 2: psrld $19,<b2=%xmm6
4127psrld $19,%xmm6
4128
4129# qhasm: diag1 ^= a2
4130# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4131# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4132pxor %xmm4,%xmm1
4133
4134# qhasm: diag2 <<<= 64
4135# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4136# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4137pshufd $0x4e,%xmm2,%xmm2
4138
4139# qhasm: diag1 ^= b2
4140# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4141# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4142pxor %xmm6,%xmm1
4143
4144# qhasm: uint32323232 a3 += diag1
4145# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4146# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4147paddd %xmm1,%xmm5
4148
4149# qhasm: a4 = diag3
4150# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4151# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4152movdqa %xmm3,%xmm4
4153
4154# qhasm: b3 = a3
4155# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4156# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4157movdqa %xmm5,%xmm6
4158
4159# qhasm: uint32323232 a3 <<= 18
4160# asm 1: pslld $18,<a3=int6464#6
4161# asm 2: pslld $18,<a3=%xmm5
4162pslld $18,%xmm5
4163
4164# qhasm: uint32323232 b3 >>= 14
4165# asm 1: psrld $14,<b3=int6464#7
4166# asm 2: psrld $14,<b3=%xmm6
4167psrld $14,%xmm6
4168
4169# qhasm: diag0 ^= a3
4170# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4171# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4172pxor %xmm5,%xmm0
4173
4174# qhasm: diag1 <<<= 96
4175# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4176# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4177pshufd $0x39,%xmm1,%xmm1
4178
4179# qhasm: diag0 ^= b3
4180# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4181# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4182pxor %xmm6,%xmm0
4183
4184# qhasm: uint32323232 a4 += diag0
4185# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4186# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4187paddd %xmm0,%xmm4
4188
4189# qhasm: a5 = diag0
4190# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4191# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4192movdqa %xmm0,%xmm5
4193
4194# qhasm: b4 = a4
4195# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4196# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4197movdqa %xmm4,%xmm6
4198
4199# qhasm: uint32323232 a4 <<= 7
4200# asm 1: pslld $7,<a4=int6464#5
4201# asm 2: pslld $7,<a4=%xmm4
4202pslld $7,%xmm4
4203
4204# qhasm: uint32323232 b4 >>= 25
4205# asm 1: psrld $25,<b4=int6464#7
4206# asm 2: psrld $25,<b4=%xmm6
4207psrld $25,%xmm6
4208
4209# qhasm: diag1 ^= a4
4210# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4211# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4212pxor %xmm4,%xmm1
4213
4214# qhasm: diag1 ^= b4
4215# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4216# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4217pxor %xmm6,%xmm1
4218
4219# qhasm: uint32323232 a5 += diag1
4220# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4221# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4222paddd %xmm1,%xmm5
4223
4224# qhasm: a6 = diag1
4225# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4226# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4227movdqa %xmm1,%xmm4
4228
4229# qhasm: b5 = a5
4230# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4231# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4232movdqa %xmm5,%xmm6
4233
4234# qhasm: uint32323232 a5 <<= 9
4235# asm 1: pslld $9,<a5=int6464#6
4236# asm 2: pslld $9,<a5=%xmm5
4237pslld $9,%xmm5
4238
4239# qhasm: uint32323232 b5 >>= 23
4240# asm 1: psrld $23,<b5=int6464#7
4241# asm 2: psrld $23,<b5=%xmm6
4242psrld $23,%xmm6
4243
4244# qhasm: diag2 ^= a5
4245# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4246# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4247pxor %xmm5,%xmm2
4248
4249# qhasm: diag1 <<<= 32
4250# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4251# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4252pshufd $0x93,%xmm1,%xmm1
4253
4254# qhasm: diag2 ^= b5
4255# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4256# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4257pxor %xmm6,%xmm2
4258
4259# qhasm: uint32323232 a6 += diag2
4260# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4261# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4262paddd %xmm2,%xmm4
4263
4264# qhasm: a7 = diag2
4265# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4266# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4267movdqa %xmm2,%xmm5
4268
4269# qhasm: b6 = a6
4270# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4271# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4272movdqa %xmm4,%xmm6
4273
4274# qhasm: uint32323232 a6 <<= 13
4275# asm 1: pslld $13,<a6=int6464#5
4276# asm 2: pslld $13,<a6=%xmm4
4277pslld $13,%xmm4
4278
4279# qhasm: uint32323232 b6 >>= 19
4280# asm 1: psrld $19,<b6=int6464#7
4281# asm 2: psrld $19,<b6=%xmm6
4282psrld $19,%xmm6
4283
4284# qhasm: diag3 ^= a6
4285# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4286# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4287pxor %xmm4,%xmm3
4288
4289# qhasm: diag2 <<<= 64
4290# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4291# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4292pshufd $0x4e,%xmm2,%xmm2
4293
4294# qhasm: diag3 ^= b6
4295# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4296# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4297pxor %xmm6,%xmm3
4298
4299# qhasm: unsigned>? i -= 4
4300# asm 1: sub $4,<i=int64#4
4301# asm 2: sub $4,<i=%rcx
4302sub $4,%rcx
4303
4304# qhasm: uint32323232 a7 += diag3
4305# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4306# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4307paddd %xmm3,%xmm5
4308
4309# qhasm: a0 = diag1
4310# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4311# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4312movdqa %xmm1,%xmm4
4313
4314# qhasm: b7 = a7
4315# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4316# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4317movdqa %xmm5,%xmm6
4318
4319# qhasm: uint32323232 a7 <<= 18
4320# asm 1: pslld $18,<a7=int6464#6
4321# asm 2: pslld $18,<a7=%xmm5
4322pslld $18,%xmm5
4323
4324# qhasm: b0 = 0
4325# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4326# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4327pxor %xmm7,%xmm7
4328
4329# qhasm: uint32323232 b7 >>= 14
4330# asm 1: psrld $14,<b7=int6464#7
4331# asm 2: psrld $14,<b7=%xmm6
4332psrld $14,%xmm6
4333
4334# qhasm: diag0 ^= a7
4335# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4336# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4337pxor %xmm5,%xmm0
4338
4339# qhasm: diag3 <<<= 96
4340# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4341# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4342pshufd $0x39,%xmm3,%xmm3
4343
4344# qhasm: diag0 ^= b7
4345# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4346# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4347pxor %xmm6,%xmm0
4348# comment:fp stack unchanged by jump
4349
4350# qhasm: goto mainloop2 if unsigned>
4351ja ._mainloop2
4352
4353# qhasm: uint32323232 diag0 += x0
4354# asm 1: paddd <x0=stack128#4,<diag0=int6464#1
4355# asm 2: paddd <x0=48(%rsp),<diag0=%xmm0
4356paddd 48(%rsp),%xmm0
4357
4358# qhasm: uint32323232 diag1 += x1
4359# asm 1: paddd <x1=stack128#1,<diag1=int6464#2
4360# asm 2: paddd <x1=0(%rsp),<diag1=%xmm1
4361paddd 0(%rsp),%xmm1
4362
4363# qhasm: uint32323232 diag2 += x2
4364# asm 1: paddd <x2=stack128#2,<diag2=int6464#3
4365# asm 2: paddd <x2=16(%rsp),<diag2=%xmm2
4366paddd 16(%rsp),%xmm2
4367
4368# qhasm: uint32323232 diag3 += x3
4369# asm 1: paddd <x3=stack128#3,<diag3=int6464#4
4370# asm 2: paddd <x3=32(%rsp),<diag3=%xmm3
4371paddd 32(%rsp),%xmm3
4372
4373# qhasm: in0 = diag0
4374# asm 1: movd <diag0=int6464#1,>in0=int64#4
4375# asm 2: movd <diag0=%xmm0,>in0=%rcx
4376movd %xmm0,%rcx
4377
4378# qhasm: in12 = diag1
4379# asm 1: movd <diag1=int6464#2,>in12=int64#5
4380# asm 2: movd <diag1=%xmm1,>in12=%r8
4381movd %xmm1,%r8
4382
4383# qhasm: in8 = diag2
4384# asm 1: movd <diag2=int6464#3,>in8=int64#6
4385# asm 2: movd <diag2=%xmm2,>in8=%r9
4386movd %xmm2,%r9
4387
4388# qhasm: in4 = diag3
4389# asm 1: movd <diag3=int6464#4,>in4=int64#7
4390# asm 2: movd <diag3=%xmm3,>in4=%rax
4391movd %xmm3,%rax
4392
4393# qhasm: diag0 <<<= 96
4394# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4395# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4396pshufd $0x39,%xmm0,%xmm0
4397
4398# qhasm: diag1 <<<= 96
4399# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4400# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4401pshufd $0x39,%xmm1,%xmm1
4402
4403# qhasm: diag2 <<<= 96
4404# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4405# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4406pshufd $0x39,%xmm2,%xmm2
4407
4408# qhasm: diag3 <<<= 96
4409# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4410# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4411pshufd $0x39,%xmm3,%xmm3
4412
4413# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
4414# asm 1: xorl 0(<m=int64#2),<in0=int64#4d
4415# asm 2: xorl 0(<m=%rsi),<in0=%ecx
4416xorl 0(%rsi),%ecx
4417
4418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
4419# asm 1: xorl 48(<m=int64#2),<in12=int64#5d
4420# asm 2: xorl 48(<m=%rsi),<in12=%r8d
4421xorl 48(%rsi),%r8d
4422
4423# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
4424# asm 1: xorl 32(<m=int64#2),<in8=int64#6d
4425# asm 2: xorl 32(<m=%rsi),<in8=%r9d
4426xorl 32(%rsi),%r9d
4427
4428# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
4429# asm 1: xorl 16(<m=int64#2),<in4=int64#7d
4430# asm 2: xorl 16(<m=%rsi),<in4=%eax
4431xorl 16(%rsi),%eax
4432
4433# qhasm: *(uint32 *) (out + 0) = in0
4434# asm 1: movl <in0=int64#4d,0(<out=int64#1)
4435# asm 2: movl <in0=%ecx,0(<out=%rdi)
4436movl %ecx,0(%rdi)
4437
4438# qhasm: *(uint32 *) (out + 48) = in12
4439# asm 1: movl <in12=int64#5d,48(<out=int64#1)
4440# asm 2: movl <in12=%r8d,48(<out=%rdi)
4441movl %r8d,48(%rdi)
4442
4443# qhasm: *(uint32 *) (out + 32) = in8
4444# asm 1: movl <in8=int64#6d,32(<out=int64#1)
4445# asm 2: movl <in8=%r9d,32(<out=%rdi)
4446movl %r9d,32(%rdi)
4447
4448# qhasm: *(uint32 *) (out + 16) = in4
4449# asm 1: movl <in4=int64#7d,16(<out=int64#1)
4450# asm 2: movl <in4=%eax,16(<out=%rdi)
4451movl %eax,16(%rdi)
4452
4453# qhasm: in5 = diag0
4454# asm 1: movd <diag0=int6464#1,>in5=int64#4
4455# asm 2: movd <diag0=%xmm0,>in5=%rcx
4456movd %xmm0,%rcx
4457
4458# qhasm: in1 = diag1
4459# asm 1: movd <diag1=int6464#2,>in1=int64#5
4460# asm 2: movd <diag1=%xmm1,>in1=%r8
4461movd %xmm1,%r8
4462
4463# qhasm: in13 = diag2
4464# asm 1: movd <diag2=int6464#3,>in13=int64#6
4465# asm 2: movd <diag2=%xmm2,>in13=%r9
4466movd %xmm2,%r9
4467
4468# qhasm: in9 = diag3
4469# asm 1: movd <diag3=int6464#4,>in9=int64#7
4470# asm 2: movd <diag3=%xmm3,>in9=%rax
4471movd %xmm3,%rax
4472
4473# qhasm: diag0 <<<= 96
4474# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4475# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4476pshufd $0x39,%xmm0,%xmm0
4477
4478# qhasm: diag1 <<<= 96
4479# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4480# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4481pshufd $0x39,%xmm1,%xmm1
4482
4483# qhasm: diag2 <<<= 96
4484# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4485# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4486pshufd $0x39,%xmm2,%xmm2
4487
4488# qhasm: diag3 <<<= 96
4489# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4490# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4491pshufd $0x39,%xmm3,%xmm3
4492
4493# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
4494# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
4495# asm 2: xorl 20(<m=%rsi),<in5=%ecx
4496xorl 20(%rsi),%ecx
4497
4498# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
4499# asm 1: xorl 4(<m=int64#2),<in1=int64#5d
4500# asm 2: xorl 4(<m=%rsi),<in1=%r8d
4501xorl 4(%rsi),%r8d
4502
4503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
4504# asm 1: xorl 52(<m=int64#2),<in13=int64#6d
4505# asm 2: xorl 52(<m=%rsi),<in13=%r9d
4506xorl 52(%rsi),%r9d
4507
4508# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
4509# asm 1: xorl 36(<m=int64#2),<in9=int64#7d
4510# asm 2: xorl 36(<m=%rsi),<in9=%eax
4511xorl 36(%rsi),%eax
4512
4513# qhasm: *(uint32 *) (out + 20) = in5
4514# asm 1: movl <in5=int64#4d,20(<out=int64#1)
4515# asm 2: movl <in5=%ecx,20(<out=%rdi)
4516movl %ecx,20(%rdi)
4517
4518# qhasm: *(uint32 *) (out + 4) = in1
4519# asm 1: movl <in1=int64#5d,4(<out=int64#1)
4520# asm 2: movl <in1=%r8d,4(<out=%rdi)
4521movl %r8d,4(%rdi)
4522
4523# qhasm: *(uint32 *) (out + 52) = in13
4524# asm 1: movl <in13=int64#6d,52(<out=int64#1)
4525# asm 2: movl <in13=%r9d,52(<out=%rdi)
4526movl %r9d,52(%rdi)
4527
4528# qhasm: *(uint32 *) (out + 36) = in9
4529# asm 1: movl <in9=int64#7d,36(<out=int64#1)
4530# asm 2: movl <in9=%eax,36(<out=%rdi)
4531movl %eax,36(%rdi)
4532
4533# qhasm: in10 = diag0
4534# asm 1: movd <diag0=int6464#1,>in10=int64#4
4535# asm 2: movd <diag0=%xmm0,>in10=%rcx
4536movd %xmm0,%rcx
4537
4538# qhasm: in6 = diag1
4539# asm 1: movd <diag1=int6464#2,>in6=int64#5
4540# asm 2: movd <diag1=%xmm1,>in6=%r8
4541movd %xmm1,%r8
4542
4543# qhasm: in2 = diag2
4544# asm 1: movd <diag2=int6464#3,>in2=int64#6
4545# asm 2: movd <diag2=%xmm2,>in2=%r9
4546movd %xmm2,%r9
4547
4548# qhasm: in14 = diag3
4549# asm 1: movd <diag3=int6464#4,>in14=int64#7
4550# asm 2: movd <diag3=%xmm3,>in14=%rax
4551movd %xmm3,%rax
4552
4553# qhasm: diag0 <<<= 96
4554# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4555# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4556pshufd $0x39,%xmm0,%xmm0
4557
4558# qhasm: diag1 <<<= 96
4559# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4560# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4561pshufd $0x39,%xmm1,%xmm1
4562
4563# qhasm: diag2 <<<= 96
4564# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4565# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4566pshufd $0x39,%xmm2,%xmm2
4567
4568# qhasm: diag3 <<<= 96
4569# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4570# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4571pshufd $0x39,%xmm3,%xmm3
4572
4573# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
4574# asm 1: xorl 40(<m=int64#2),<in10=int64#4d
4575# asm 2: xorl 40(<m=%rsi),<in10=%ecx
4576xorl 40(%rsi),%ecx
4577
4578# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
4579# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
4580# asm 2: xorl 24(<m=%rsi),<in6=%r8d
4581xorl 24(%rsi),%r8d
4582
4583# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
4584# asm 1: xorl 8(<m=int64#2),<in2=int64#6d
4585# asm 2: xorl 8(<m=%rsi),<in2=%r9d
4586xorl 8(%rsi),%r9d
4587
4588# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
4589# asm 1: xorl 56(<m=int64#2),<in14=int64#7d
4590# asm 2: xorl 56(<m=%rsi),<in14=%eax
4591xorl 56(%rsi),%eax
4592
4593# qhasm: *(uint32 *) (out + 40) = in10
4594# asm 1: movl <in10=int64#4d,40(<out=int64#1)
4595# asm 2: movl <in10=%ecx,40(<out=%rdi)
4596movl %ecx,40(%rdi)
4597
4598# qhasm: *(uint32 *) (out + 24) = in6
4599# asm 1: movl <in6=int64#5d,24(<out=int64#1)
4600# asm 2: movl <in6=%r8d,24(<out=%rdi)
4601movl %r8d,24(%rdi)
4602
4603# qhasm: *(uint32 *) (out + 8) = in2
4604# asm 1: movl <in2=int64#6d,8(<out=int64#1)
4605# asm 2: movl <in2=%r9d,8(<out=%rdi)
4606movl %r9d,8(%rdi)
4607
4608# qhasm: *(uint32 *) (out + 56) = in14
4609# asm 1: movl <in14=int64#7d,56(<out=int64#1)
4610# asm 2: movl <in14=%eax,56(<out=%rdi)
4611movl %eax,56(%rdi)
4612
4613# qhasm: in15 = diag0
4614# asm 1: movd <diag0=int6464#1,>in15=int64#4
4615# asm 2: movd <diag0=%xmm0,>in15=%rcx
4616movd %xmm0,%rcx
4617
4618# qhasm: in11 = diag1
4619# asm 1: movd <diag1=int6464#2,>in11=int64#5
4620# asm 2: movd <diag1=%xmm1,>in11=%r8
4621movd %xmm1,%r8
4622
4623# qhasm: in7 = diag2
4624# asm 1: movd <diag2=int6464#3,>in7=int64#6
4625# asm 2: movd <diag2=%xmm2,>in7=%r9
4626movd %xmm2,%r9
4627
4628# qhasm: in3 = diag3
4629# asm 1: movd <diag3=int6464#4,>in3=int64#7
4630# asm 2: movd <diag3=%xmm3,>in3=%rax
4631movd %xmm3,%rax
4632
4633# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
4634# asm 1: xorl 60(<m=int64#2),<in15=int64#4d
4635# asm 2: xorl 60(<m=%rsi),<in15=%ecx
4636xorl 60(%rsi),%ecx
4637
4638# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
4639# asm 1: xorl 44(<m=int64#2),<in11=int64#5d
4640# asm 2: xorl 44(<m=%rsi),<in11=%r8d
4641xorl 44(%rsi),%r8d
4642
4643# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
4644# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
4645# asm 2: xorl 28(<m=%rsi),<in7=%r9d
4646xorl 28(%rsi),%r9d
4647
4648# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
4649# asm 1: xorl 12(<m=int64#2),<in3=int64#7d
4650# asm 2: xorl 12(<m=%rsi),<in3=%eax
4651xorl 12(%rsi),%eax
4652
4653# qhasm: *(uint32 *) (out + 60) = in15
4654# asm 1: movl <in15=int64#4d,60(<out=int64#1)
4655# asm 2: movl <in15=%ecx,60(<out=%rdi)
4656movl %ecx,60(%rdi)
4657
4658# qhasm: *(uint32 *) (out + 44) = in11
4659# asm 1: movl <in11=int64#5d,44(<out=int64#1)
4660# asm 2: movl <in11=%r8d,44(<out=%rdi)
4661movl %r8d,44(%rdi)
4662
4663# qhasm: *(uint32 *) (out + 28) = in7
4664# asm 1: movl <in7=int64#6d,28(<out=int64#1)
4665# asm 2: movl <in7=%r9d,28(<out=%rdi)
4666movl %r9d,28(%rdi)
4667
4668# qhasm: *(uint32 *) (out + 12) = in3
4669# asm 1: movl <in3=int64#7d,12(<out=int64#1)
4670# asm 2: movl <in3=%eax,12(<out=%rdi)
4671movl %eax,12(%rdi)
4672
4673# qhasm: bytes = bytes_backup
4674# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
4675# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
4676movq 408(%rsp),%r9
4677
4678# qhasm: in8 = ((uint32 *)&x2)[0]
4679# asm 1: movl <x2=stack128#2,>in8=int64#4d
4680# asm 2: movl <x2=16(%rsp),>in8=%ecx
4681movl 16(%rsp),%ecx
4682
4683# qhasm: in9 = ((uint32 *)&x3)[1]
4684# asm 1: movl 4+<x3=stack128#3,>in9=int64#5d
4685# asm 2: movl 4+<x3=32(%rsp),>in9=%r8d
4686movl 4+32(%rsp),%r8d
4687
4688# qhasm: in8 += 1
4689# asm 1: add $1,<in8=int64#4
4690# asm 2: add $1,<in8=%rcx
4691add $1,%rcx
4692
4693# qhasm: in9 <<= 32
4694# asm 1: shl $32,<in9=int64#5
4695# asm 2: shl $32,<in9=%r8
4696shl $32,%r8
4697
4698# qhasm: in8 += in9
4699# asm 1: add <in9=int64#5,<in8=int64#4
4700# asm 2: add <in9=%r8,<in8=%rcx
4701add %r8,%rcx
4702
4703# qhasm: in9 = in8
4704# asm 1: mov <in8=int64#4,>in9=int64#5
4705# asm 2: mov <in8=%rcx,>in9=%r8
4706mov %rcx,%r8
4707
4708# qhasm: (uint64) in9 >>= 32
4709# asm 1: shr $32,<in9=int64#5
4710# asm 2: shr $32,<in9=%r8
4711shr $32,%r8
4712
4713# qhasm: ((uint32 *)&x2)[0] = in8
4714# asm 1: movl <in8=int64#4d,>x2=stack128#2
4715# asm 2: movl <in8=%ecx,>x2=16(%rsp)
4716movl %ecx,16(%rsp)
4717
4718# qhasm: ((uint32 *)&x3)[1] = in9
4719# asm 1: movl <in9=int64#5d,4+<x3=stack128#3
4720# asm 2: movl <in9=%r8d,4+<x3=32(%rsp)
4721movl %r8d,4+32(%rsp)
4722
4723# qhasm: unsigned>? unsigned<? bytes - 64
4724# asm 1: cmp $64,<bytes=int64#6
4725# asm 2: cmp $64,<bytes=%r9
4726cmp $64,%r9
4727# comment:fp stack unchanged by jump
4728
4729# qhasm: goto bytesatleast65 if unsigned>
4730ja ._bytesatleast65
4731# comment:fp stack unchanged by jump
4732
4733# qhasm: goto bytesatleast64 if !unsigned<
4734jae ._bytesatleast64
4735
4736# qhasm: m = out
4737# asm 1: mov <out=int64#1,>m=int64#2
4738# asm 2: mov <out=%rdi,>m=%rsi
4739mov %rdi,%rsi
4740
4741# qhasm: out = ctarget
4742# asm 1: mov <ctarget=int64#3,>out=int64#1
4743# asm 2: mov <ctarget=%rdx,>out=%rdi
4744mov %rdx,%rdi
4745
4746# qhasm: i = bytes
4747# asm 1: mov <bytes=int64#6,>i=int64#4
4748# asm 2: mov <bytes=%r9,>i=%rcx
4749mov %r9,%rcx
4750
4751# qhasm: while (i) { *out++ = *m++; --i }
4752rep movsb
4753# comment:fp stack unchanged by fallthrough
4754
4755# qhasm: bytesatleast64:
4756._bytesatleast64:
4757# comment:fp stack unchanged by fallthrough
4758
4759# qhasm: done:
4760._done:
4761
4762# qhasm: r11_caller = r11_stack
4763# asm 1: movq <r11_stack=stack64#1,>r11_caller=int64#9
4764# asm 2: movq <r11_stack=352(%rsp),>r11_caller=%r11
4765movq 352(%rsp),%r11
4766
4767# qhasm: r12_caller = r12_stack
4768# asm 1: movq <r12_stack=stack64#2,>r12_caller=int64#10
4769# asm 2: movq <r12_stack=360(%rsp),>r12_caller=%r12
4770movq 360(%rsp),%r12
4771
4772# qhasm: r13_caller = r13_stack
4773# asm 1: movq <r13_stack=stack64#3,>r13_caller=int64#11
4774# asm 2: movq <r13_stack=368(%rsp),>r13_caller=%r13
4775movq 368(%rsp),%r13
4776
4777# qhasm: r14_caller = r14_stack
4778# asm 1: movq <r14_stack=stack64#4,>r14_caller=int64#12
4779# asm 2: movq <r14_stack=376(%rsp),>r14_caller=%r14
4780movq 376(%rsp),%r14
4781
4782# qhasm: r15_caller = r15_stack
4783# asm 1: movq <r15_stack=stack64#5,>r15_caller=int64#13
4784# asm 2: movq <r15_stack=384(%rsp),>r15_caller=%r15
4785movq 384(%rsp),%r15
4786
4787# qhasm: rbx_caller = rbx_stack
4788# asm 1: movq <rbx_stack=stack64#6,>rbx_caller=int64#14
4789# asm 2: movq <rbx_stack=392(%rsp),>rbx_caller=%rbx
4790movq 392(%rsp),%rbx
4791
4792# qhasm: rbp_caller = rbp_stack
4793# asm 1: movq <rbp_stack=stack64#7,>rbp_caller=int64#15
4794# asm 2: movq <rbp_stack=400(%rsp),>rbp_caller=%rbp
4795movq 400(%rsp),%rbp
4796
4797# qhasm: leave
4798add %r11,%rsp
4799xor %rax,%rax
4800xor %rdx,%rdx
4801ret
4802
4803# qhasm: bytesatleast65:
4804._bytesatleast65:
4805
4806# qhasm: bytes -= 64
4807# asm 1: sub $64,<bytes=int64#6
4808# asm 2: sub $64,<bytes=%r9
4809sub $64,%r9
4810
4811# qhasm: out += 64
4812# asm 1: add $64,<out=int64#1
4813# asm 2: add $64,<out=%rdi
4814add $64,%rdi
4815
4816# qhasm: m += 64
4817# asm 1: add $64,<m=int64#2
4818# asm 2: add $64,<m=%rsi
4819add $64,%rsi
4820# comment:fp stack unchanged by jump
4821
4822# qhasm: goto bytesbetween1and255
4823jmp ._bytesbetween1and255
diff --git a/nacl/crypto_stream/salsa2012/checksum b/nacl/crypto_stream/salsa2012/checksum
new file mode 100644
index 00000000..f801d9e3
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/checksum
@@ -0,0 +1 @@
ecc758f200061c3cc770b25797da73583548d4f90f69a967fbbe1a6d94d1705c
diff --git a/nacl/crypto_stream/salsa2012/ref/api.h b/nacl/crypto_stream/salsa2012/ref/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/ref/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa2012/ref/implementors b/nacl/crypto_stream/salsa2012/ref/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/ref/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa2012/ref/stream.c b/nacl/crypto_stream/salsa2012/ref/stream.c
new file mode 100644
index 00000000..86053337
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/ref/stream.c
@@ -0,0 +1,49 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_salsa2012.h"
8#include "crypto_stream.h"
9
10typedef unsigned int uint32;
11
12static const unsigned char sigma[16] = "expand 32-byte k";
13
14int crypto_stream(
15 unsigned char *c,unsigned long long clen,
16 const unsigned char *n,
17 const unsigned char *k
18)
19{
20 unsigned char in[16];
21 unsigned char block[64];
22 int i;
23 unsigned int u;
24
25 if (!clen) return 0;
26
27 for (i = 0;i < 8;++i) in[i] = n[i];
28 for (i = 8;i < 16;++i) in[i] = 0;
29
30 while (clen >= 64) {
31 crypto_core_salsa2012(c,in,k,sigma);
32
33 u = 1;
34 for (i = 8;i < 16;++i) {
35 u += (unsigned int) in[i];
36 in[i] = u;
37 u >>= 8;
38 }
39
40 clen -= 64;
41 c += 64;
42 }
43
44 if (clen) {
45 crypto_core_salsa2012(block,in,k,sigma);
46 for (i = 0;i < clen;++i) c[i] = block[i];
47 }
48 return 0;
49}
diff --git a/nacl/crypto_stream/salsa2012/ref/xor.c b/nacl/crypto_stream/salsa2012/ref/xor.c
new file mode 100644
index 00000000..90206426
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/ref/xor.c
@@ -0,0 +1,52 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_salsa2012.h"
8#include "crypto_stream.h"
9
10typedef unsigned int uint32;
11
12static const unsigned char sigma[16] = "expand 32-byte k";
13
14int crypto_stream_xor(
15 unsigned char *c,
16 const unsigned char *m,unsigned long long mlen,
17 const unsigned char *n,
18 const unsigned char *k
19)
20{
21 unsigned char in[16];
22 unsigned char block[64];
23 int i;
24 unsigned int u;
25
26 if (!mlen) return 0;
27
28 for (i = 0;i < 8;++i) in[i] = n[i];
29 for (i = 8;i < 16;++i) in[i] = 0;
30
31 while (mlen >= 64) {
32 crypto_core_salsa2012(block,in,k,sigma);
33 for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i];
34
35 u = 1;
36 for (i = 8;i < 16;++i) {
37 u += (unsigned int) in[i];
38 in[i] = u;
39 u >>= 8;
40 }
41
42 mlen -= 64;
43 c += 64;
44 m += 64;
45 }
46
47 if (mlen) {
48 crypto_core_salsa2012(block,in,k,sigma);
49 for (i = 0;i < mlen;++i) c[i] = m[i] ^ block[i];
50 }
51 return 0;
52}
diff --git a/nacl/crypto_stream/salsa2012/used b/nacl/crypto_stream/salsa2012/used
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/used
diff --git a/nacl/crypto_stream/salsa2012/x86_xmm5/api.h b/nacl/crypto_stream/salsa2012/x86_xmm5/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/x86_xmm5/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa2012/x86_xmm5/implementors b/nacl/crypto_stream/salsa2012/x86_xmm5/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/x86_xmm5/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa2012/x86_xmm5/stream.s b/nacl/crypto_stream/salsa2012/x86_xmm5/stream.s
new file mode 100644
index 00000000..c511b0d3
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/x86_xmm5/stream.s
@@ -0,0 +1,5078 @@
1
2# qhasm: int32 a
3
4# qhasm: stack32 arg1
5
6# qhasm: stack32 arg2
7
8# qhasm: stack32 arg3
9
10# qhasm: stack32 arg4
11
12# qhasm: stack32 arg5
13
14# qhasm: stack32 arg6
15
16# qhasm: input arg1
17
18# qhasm: input arg2
19
20# qhasm: input arg3
21
22# qhasm: input arg4
23
24# qhasm: input arg5
25
26# qhasm: input arg6
27
28# qhasm: int32 eax
29
30# qhasm: int32 ebx
31
32# qhasm: int32 esi
33
34# qhasm: int32 edi
35
36# qhasm: int32 ebp
37
38# qhasm: caller eax
39
40# qhasm: caller ebx
41
42# qhasm: caller esi
43
44# qhasm: caller edi
45
46# qhasm: caller ebp
47
48# qhasm: int32 k
49
50# qhasm: int32 kbits
51
52# qhasm: int32 iv
53
54# qhasm: int32 i
55
56# qhasm: stack128 x0
57
58# qhasm: stack128 x1
59
60# qhasm: stack128 x2
61
62# qhasm: stack128 x3
63
64# qhasm: int32 m
65
66# qhasm: stack32 out_stack
67
68# qhasm: int32 out
69
70# qhasm: stack32 bytes_stack
71
72# qhasm: int32 bytes
73
74# qhasm: stack32 eax_stack
75
76# qhasm: stack32 ebx_stack
77
78# qhasm: stack32 esi_stack
79
80# qhasm: stack32 edi_stack
81
82# qhasm: stack32 ebp_stack
83
84# qhasm: int6464 diag0
85
86# qhasm: int6464 diag1
87
88# qhasm: int6464 diag2
89
90# qhasm: int6464 diag3
91
92# qhasm: int6464 a0
93
94# qhasm: int6464 a1
95
96# qhasm: int6464 a2
97
98# qhasm: int6464 a3
99
100# qhasm: int6464 a4
101
102# qhasm: int6464 a5
103
104# qhasm: int6464 a6
105
106# qhasm: int6464 a7
107
108# qhasm: int6464 b0
109
110# qhasm: int6464 b1
111
112# qhasm: int6464 b2
113
114# qhasm: int6464 b3
115
116# qhasm: int6464 b4
117
118# qhasm: int6464 b5
119
120# qhasm: int6464 b6
121
122# qhasm: int6464 b7
123
124# qhasm: int6464 z0
125
126# qhasm: int6464 z1
127
128# qhasm: int6464 z2
129
130# qhasm: int6464 z3
131
132# qhasm: int6464 z4
133
134# qhasm: int6464 z5
135
136# qhasm: int6464 z6
137
138# qhasm: int6464 z7
139
140# qhasm: int6464 z8
141
142# qhasm: int6464 z9
143
144# qhasm: int6464 z10
145
146# qhasm: int6464 z11
147
148# qhasm: int6464 z12
149
150# qhasm: int6464 z13
151
152# qhasm: int6464 z14
153
154# qhasm: int6464 z15
155
156# qhasm: stack128 z0_stack
157
158# qhasm: stack128 z1_stack
159
160# qhasm: stack128 z2_stack
161
162# qhasm: stack128 z3_stack
163
164# qhasm: stack128 z4_stack
165
166# qhasm: stack128 z5_stack
167
168# qhasm: stack128 z6_stack
169
170# qhasm: stack128 z7_stack
171
172# qhasm: stack128 z8_stack
173
174# qhasm: stack128 z9_stack
175
176# qhasm: stack128 z10_stack
177
178# qhasm: stack128 z11_stack
179
180# qhasm: stack128 z12_stack
181
182# qhasm: stack128 z13_stack
183
184# qhasm: stack128 z14_stack
185
186# qhasm: stack128 z15_stack
187
188# qhasm: stack128 orig0
189
190# qhasm: stack128 orig1
191
192# qhasm: stack128 orig2
193
194# qhasm: stack128 orig3
195
196# qhasm: stack128 orig4
197
198# qhasm: stack128 orig5
199
200# qhasm: stack128 orig6
201
202# qhasm: stack128 orig7
203
204# qhasm: stack128 orig8
205
206# qhasm: stack128 orig9
207
208# qhasm: stack128 orig10
209
210# qhasm: stack128 orig11
211
212# qhasm: stack128 orig12
213
214# qhasm: stack128 orig13
215
216# qhasm: stack128 orig14
217
218# qhasm: stack128 orig15
219
220# qhasm: int6464 p
221
222# qhasm: int6464 q
223
224# qhasm: int6464 r
225
226# qhasm: int6464 s
227
228# qhasm: int6464 t
229
230# qhasm: int6464 u
231
232# qhasm: int6464 v
233
234# qhasm: int6464 w
235
236# qhasm: int6464 mp
237
238# qhasm: int6464 mq
239
240# qhasm: int6464 mr
241
242# qhasm: int6464 ms
243
244# qhasm: int6464 mt
245
246# qhasm: int6464 mu
247
248# qhasm: int6464 mv
249
250# qhasm: int6464 mw
251
252# qhasm: int32 in0
253
254# qhasm: int32 in1
255
256# qhasm: int32 in2
257
258# qhasm: int32 in3
259
260# qhasm: int32 in4
261
262# qhasm: int32 in5
263
264# qhasm: int32 in6
265
266# qhasm: int32 in7
267
268# qhasm: int32 in8
269
270# qhasm: int32 in9
271
272# qhasm: int32 in10
273
274# qhasm: int32 in11
275
276# qhasm: int32 in12
277
278# qhasm: int32 in13
279
280# qhasm: int32 in14
281
282# qhasm: int32 in15
283
284# qhasm: stack512 tmp
285
286# qhasm: stack32 ctarget
287
288# qhasm: enter crypto_stream_salsa2012_x86_xmm5
289.text
290.p2align 5
291.globl _crypto_stream_salsa2012_x86_xmm5
292.globl crypto_stream_salsa2012_x86_xmm5
293_crypto_stream_salsa2012_x86_xmm5:
294crypto_stream_salsa2012_x86_xmm5:
295mov %esp,%eax
296and $31,%eax
297add $704,%eax
298sub %eax,%esp
299
300# qhasm: eax_stack = eax
301# asm 1: movl <eax=int32#1,>eax_stack=stack32#1
302# asm 2: movl <eax=%eax,>eax_stack=0(%esp)
303movl %eax,0(%esp)
304
305# qhasm: ebx_stack = ebx
306# asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
307# asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
308movl %ebx,4(%esp)
309
310# qhasm: esi_stack = esi
311# asm 1: movl <esi=int32#5,>esi_stack=stack32#3
312# asm 2: movl <esi=%esi,>esi_stack=8(%esp)
313movl %esi,8(%esp)
314
315# qhasm: edi_stack = edi
316# asm 1: movl <edi=int32#6,>edi_stack=stack32#4
317# asm 2: movl <edi=%edi,>edi_stack=12(%esp)
318movl %edi,12(%esp)
319
320# qhasm: ebp_stack = ebp
321# asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
322# asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
323movl %ebp,16(%esp)
324
325# qhasm: bytes = arg2
326# asm 1: movl <arg2=stack32#-2,>bytes=int32#3
327# asm 2: movl <arg2=8(%esp,%eax),>bytes=%edx
328movl 8(%esp,%eax),%edx
329
330# qhasm: out = arg1
331# asm 1: movl <arg1=stack32#-1,>out=int32#6
332# asm 2: movl <arg1=4(%esp,%eax),>out=%edi
333movl 4(%esp,%eax),%edi
334
335# qhasm: m = out
336# asm 1: mov <out=int32#6,>m=int32#5
337# asm 2: mov <out=%edi,>m=%esi
338mov %edi,%esi
339
340# qhasm: iv = arg4
341# asm 1: movl <arg4=stack32#-4,>iv=int32#4
342# asm 2: movl <arg4=16(%esp,%eax),>iv=%ebx
343movl 16(%esp,%eax),%ebx
344
345# qhasm: k = arg5
346# asm 1: movl <arg5=stack32#-5,>k=int32#7
347# asm 2: movl <arg5=20(%esp,%eax),>k=%ebp
348movl 20(%esp,%eax),%ebp
349
350# qhasm: unsigned>? bytes - 0
351# asm 1: cmp $0,<bytes=int32#3
352# asm 2: cmp $0,<bytes=%edx
353cmp $0,%edx
354# comment:fp stack unchanged by jump
355
356# qhasm: goto done if !unsigned>
357jbe ._done
358
359# qhasm: a = 0
360# asm 1: mov $0,>a=int32#1
361# asm 2: mov $0,>a=%eax
362mov $0,%eax
363
364# qhasm: i = bytes
365# asm 1: mov <bytes=int32#3,>i=int32#2
366# asm 2: mov <bytes=%edx,>i=%ecx
367mov %edx,%ecx
368
369# qhasm: while (i) { *out++ = a; --i }
370rep stosb
371
372# qhasm: out -= bytes
373# asm 1: subl <bytes=int32#3,<out=int32#6
374# asm 2: subl <bytes=%edx,<out=%edi
375subl %edx,%edi
376# comment:fp stack unchanged by jump
377
378# qhasm: goto start
379jmp ._start
380
381# qhasm: enter crypto_stream_salsa2012_x86_xmm5_xor
382.text
383.p2align 5
384.globl _crypto_stream_salsa2012_x86_xmm5_xor
385.globl crypto_stream_salsa2012_x86_xmm5_xor
386_crypto_stream_salsa2012_x86_xmm5_xor:
387crypto_stream_salsa2012_x86_xmm5_xor:
388mov %esp,%eax
389and $31,%eax
390add $704,%eax
391sub %eax,%esp
392
393# qhasm: eax_stack = eax
394# asm 1: movl <eax=int32#1,>eax_stack=stack32#1
395# asm 2: movl <eax=%eax,>eax_stack=0(%esp)
396movl %eax,0(%esp)
397
398# qhasm: ebx_stack = ebx
399# asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
400# asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
401movl %ebx,4(%esp)
402
403# qhasm: esi_stack = esi
404# asm 1: movl <esi=int32#5,>esi_stack=stack32#3
405# asm 2: movl <esi=%esi,>esi_stack=8(%esp)
406movl %esi,8(%esp)
407
408# qhasm: edi_stack = edi
409# asm 1: movl <edi=int32#6,>edi_stack=stack32#4
410# asm 2: movl <edi=%edi,>edi_stack=12(%esp)
411movl %edi,12(%esp)
412
413# qhasm: ebp_stack = ebp
414# asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
415# asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
416movl %ebp,16(%esp)
417
418# qhasm: out = arg1
419# asm 1: movl <arg1=stack32#-1,>out=int32#6
420# asm 2: movl <arg1=4(%esp,%eax),>out=%edi
421movl 4(%esp,%eax),%edi
422
423# qhasm: m = arg2
424# asm 1: movl <arg2=stack32#-2,>m=int32#5
425# asm 2: movl <arg2=8(%esp,%eax),>m=%esi
426movl 8(%esp,%eax),%esi
427
428# qhasm: bytes = arg3
429# asm 1: movl <arg3=stack32#-3,>bytes=int32#3
430# asm 2: movl <arg3=12(%esp,%eax),>bytes=%edx
431movl 12(%esp,%eax),%edx
432
433# qhasm: iv = arg5
434# asm 1: movl <arg5=stack32#-5,>iv=int32#4
435# asm 2: movl <arg5=20(%esp,%eax),>iv=%ebx
436movl 20(%esp,%eax),%ebx
437
438# qhasm: k = arg6
439# asm 1: movl <arg6=stack32#-6,>k=int32#7
440# asm 2: movl <arg6=24(%esp,%eax),>k=%ebp
441movl 24(%esp,%eax),%ebp
442
443# qhasm: unsigned>? bytes - 0
444# asm 1: cmp $0,<bytes=int32#3
445# asm 2: cmp $0,<bytes=%edx
446cmp $0,%edx
447# comment:fp stack unchanged by jump
448
449# qhasm: goto done if !unsigned>
450jbe ._done
451# comment:fp stack unchanged by fallthrough
452
453# qhasm: start:
454._start:
455
456# qhasm: out_stack = out
457# asm 1: movl <out=int32#6,>out_stack=stack32#6
458# asm 2: movl <out=%edi,>out_stack=20(%esp)
459movl %edi,20(%esp)
460
461# qhasm: bytes_stack = bytes
462# asm 1: movl <bytes=int32#3,>bytes_stack=stack32#7
463# asm 2: movl <bytes=%edx,>bytes_stack=24(%esp)
464movl %edx,24(%esp)
465
466# qhasm: in4 = *(uint32 *) (k + 12)
467# asm 1: movl 12(<k=int32#7),>in4=int32#1
468# asm 2: movl 12(<k=%ebp),>in4=%eax
469movl 12(%ebp),%eax
470
471# qhasm: in12 = *(uint32 *) (k + 20)
472# asm 1: movl 20(<k=int32#7),>in12=int32#2
473# asm 2: movl 20(<k=%ebp),>in12=%ecx
474movl 20(%ebp),%ecx
475
476# qhasm: ((uint32 *)&x3)[0] = in4
477# asm 1: movl <in4=int32#1,>x3=stack128#1
478# asm 2: movl <in4=%eax,>x3=32(%esp)
479movl %eax,32(%esp)
480
481# qhasm: ((uint32 *)&x1)[0] = in12
482# asm 1: movl <in12=int32#2,>x1=stack128#2
483# asm 2: movl <in12=%ecx,>x1=48(%esp)
484movl %ecx,48(%esp)
485
486# qhasm: in0 = 1634760805
487# asm 1: mov $1634760805,>in0=int32#1
488# asm 2: mov $1634760805,>in0=%eax
489mov $1634760805,%eax
490
491# qhasm: in8 = 0
492# asm 1: mov $0,>in8=int32#2
493# asm 2: mov $0,>in8=%ecx
494mov $0,%ecx
495
496# qhasm: ((uint32 *)&x0)[0] = in0
497# asm 1: movl <in0=int32#1,>x0=stack128#3
498# asm 2: movl <in0=%eax,>x0=64(%esp)
499movl %eax,64(%esp)
500
501# qhasm: ((uint32 *)&x2)[0] = in8
502# asm 1: movl <in8=int32#2,>x2=stack128#4
503# asm 2: movl <in8=%ecx,>x2=80(%esp)
504movl %ecx,80(%esp)
505
506# qhasm: in6 = *(uint32 *) (iv + 0)
507# asm 1: movl 0(<iv=int32#4),>in6=int32#1
508# asm 2: movl 0(<iv=%ebx),>in6=%eax
509movl 0(%ebx),%eax
510
511# qhasm: in7 = *(uint32 *) (iv + 4)
512# asm 1: movl 4(<iv=int32#4),>in7=int32#2
513# asm 2: movl 4(<iv=%ebx),>in7=%ecx
514movl 4(%ebx),%ecx
515
516# qhasm: ((uint32 *)&x1)[2] = in6
517# asm 1: movl <in6=int32#1,8+<x1=stack128#2
518# asm 2: movl <in6=%eax,8+<x1=48(%esp)
519movl %eax,8+48(%esp)
520
521# qhasm: ((uint32 *)&x2)[3] = in7
522# asm 1: movl <in7=int32#2,12+<x2=stack128#4
523# asm 2: movl <in7=%ecx,12+<x2=80(%esp)
524movl %ecx,12+80(%esp)
525
526# qhasm: in9 = 0
527# asm 1: mov $0,>in9=int32#1
528# asm 2: mov $0,>in9=%eax
529mov $0,%eax
530
531# qhasm: in10 = 2036477234
532# asm 1: mov $2036477234,>in10=int32#2
533# asm 2: mov $2036477234,>in10=%ecx
534mov $2036477234,%ecx
535
536# qhasm: ((uint32 *)&x3)[1] = in9
537# asm 1: movl <in9=int32#1,4+<x3=stack128#1
538# asm 2: movl <in9=%eax,4+<x3=32(%esp)
539movl %eax,4+32(%esp)
540
541# qhasm: ((uint32 *)&x0)[2] = in10
542# asm 1: movl <in10=int32#2,8+<x0=stack128#3
543# asm 2: movl <in10=%ecx,8+<x0=64(%esp)
544movl %ecx,8+64(%esp)
545
546# qhasm: in1 = *(uint32 *) (k + 0)
547# asm 1: movl 0(<k=int32#7),>in1=int32#1
548# asm 2: movl 0(<k=%ebp),>in1=%eax
549movl 0(%ebp),%eax
550
551# qhasm: in2 = *(uint32 *) (k + 4)
552# asm 1: movl 4(<k=int32#7),>in2=int32#2
553# asm 2: movl 4(<k=%ebp),>in2=%ecx
554movl 4(%ebp),%ecx
555
556# qhasm: in3 = *(uint32 *) (k + 8)
557# asm 1: movl 8(<k=int32#7),>in3=int32#3
558# asm 2: movl 8(<k=%ebp),>in3=%edx
559movl 8(%ebp),%edx
560
561# qhasm: in5 = 857760878
562# asm 1: mov $857760878,>in5=int32#4
563# asm 2: mov $857760878,>in5=%ebx
564mov $857760878,%ebx
565
566# qhasm: ((uint32 *)&x1)[1] = in1
567# asm 1: movl <in1=int32#1,4+<x1=stack128#2
568# asm 2: movl <in1=%eax,4+<x1=48(%esp)
569movl %eax,4+48(%esp)
570
571# qhasm: ((uint32 *)&x2)[2] = in2
572# asm 1: movl <in2=int32#2,8+<x2=stack128#4
573# asm 2: movl <in2=%ecx,8+<x2=80(%esp)
574movl %ecx,8+80(%esp)
575
576# qhasm: ((uint32 *)&x3)[3] = in3
577# asm 1: movl <in3=int32#3,12+<x3=stack128#1
578# asm 2: movl <in3=%edx,12+<x3=32(%esp)
579movl %edx,12+32(%esp)
580
581# qhasm: ((uint32 *)&x0)[1] = in5
582# asm 1: movl <in5=int32#4,4+<x0=stack128#3
583# asm 2: movl <in5=%ebx,4+<x0=64(%esp)
584movl %ebx,4+64(%esp)
585
586# qhasm: in11 = *(uint32 *) (k + 16)
587# asm 1: movl 16(<k=int32#7),>in11=int32#1
588# asm 2: movl 16(<k=%ebp),>in11=%eax
589movl 16(%ebp),%eax
590
591# qhasm: in13 = *(uint32 *) (k + 24)
592# asm 1: movl 24(<k=int32#7),>in13=int32#2
593# asm 2: movl 24(<k=%ebp),>in13=%ecx
594movl 24(%ebp),%ecx
595
596# qhasm: in14 = *(uint32 *) (k + 28)
597# asm 1: movl 28(<k=int32#7),>in14=int32#3
598# asm 2: movl 28(<k=%ebp),>in14=%edx
599movl 28(%ebp),%edx
600
601# qhasm: in15 = 1797285236
602# asm 1: mov $1797285236,>in15=int32#4
603# asm 2: mov $1797285236,>in15=%ebx
604mov $1797285236,%ebx
605
606# qhasm: ((uint32 *)&x1)[3] = in11
607# asm 1: movl <in11=int32#1,12+<x1=stack128#2
608# asm 2: movl <in11=%eax,12+<x1=48(%esp)
609movl %eax,12+48(%esp)
610
611# qhasm: ((uint32 *)&x2)[1] = in13
612# asm 1: movl <in13=int32#2,4+<x2=stack128#4
613# asm 2: movl <in13=%ecx,4+<x2=80(%esp)
614movl %ecx,4+80(%esp)
615
616# qhasm: ((uint32 *)&x3)[2] = in14
617# asm 1: movl <in14=int32#3,8+<x3=stack128#1
618# asm 2: movl <in14=%edx,8+<x3=32(%esp)
619movl %edx,8+32(%esp)
620
621# qhasm: ((uint32 *)&x0)[3] = in15
622# asm 1: movl <in15=int32#4,12+<x0=stack128#3
623# asm 2: movl <in15=%ebx,12+<x0=64(%esp)
624movl %ebx,12+64(%esp)
625
626# qhasm: bytes = bytes_stack
627# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
628# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
629movl 24(%esp),%eax
630
631# qhasm: unsigned<? bytes - 256
632# asm 1: cmp $256,<bytes=int32#1
633# asm 2: cmp $256,<bytes=%eax
634cmp $256,%eax
635# comment:fp stack unchanged by jump
636
637# qhasm: goto bytesbetween1and255 if unsigned<
638jb ._bytesbetween1and255
639
640# qhasm: z0 = x0
641# asm 1: movdqa <x0=stack128#3,>z0=int6464#1
642# asm 2: movdqa <x0=64(%esp),>z0=%xmm0
643movdqa 64(%esp),%xmm0
644
645# qhasm: z5 = z0[1,1,1,1]
646# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
647# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
648pshufd $0x55,%xmm0,%xmm1
649
650# qhasm: z10 = z0[2,2,2,2]
651# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
652# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
653pshufd $0xaa,%xmm0,%xmm2
654
655# qhasm: z15 = z0[3,3,3,3]
656# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
657# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
658pshufd $0xff,%xmm0,%xmm3
659
660# qhasm: z0 = z0[0,0,0,0]
661# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
662# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
663pshufd $0x00,%xmm0,%xmm0
664
665# qhasm: orig5 = z5
666# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
667# asm 2: movdqa <z5=%xmm1,>orig5=96(%esp)
668movdqa %xmm1,96(%esp)
669
670# qhasm: orig10 = z10
671# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
672# asm 2: movdqa <z10=%xmm2,>orig10=112(%esp)
673movdqa %xmm2,112(%esp)
674
675# qhasm: orig15 = z15
676# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
677# asm 2: movdqa <z15=%xmm3,>orig15=128(%esp)
678movdqa %xmm3,128(%esp)
679
680# qhasm: orig0 = z0
681# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
682# asm 2: movdqa <z0=%xmm0,>orig0=144(%esp)
683movdqa %xmm0,144(%esp)
684
685# qhasm: z1 = x1
686# asm 1: movdqa <x1=stack128#2,>z1=int6464#1
687# asm 2: movdqa <x1=48(%esp),>z1=%xmm0
688movdqa 48(%esp),%xmm0
689
690# qhasm: z6 = z1[2,2,2,2]
691# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
692# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
693pshufd $0xaa,%xmm0,%xmm1
694
695# qhasm: z11 = z1[3,3,3,3]
696# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
697# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
698pshufd $0xff,%xmm0,%xmm2
699
700# qhasm: z12 = z1[0,0,0,0]
701# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
702# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
703pshufd $0x00,%xmm0,%xmm3
704
705# qhasm: z1 = z1[1,1,1,1]
706# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
707# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
708pshufd $0x55,%xmm0,%xmm0
709
710# qhasm: orig6 = z6
711# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
712# asm 2: movdqa <z6=%xmm1,>orig6=160(%esp)
713movdqa %xmm1,160(%esp)
714
715# qhasm: orig11 = z11
716# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
717# asm 2: movdqa <z11=%xmm2,>orig11=176(%esp)
718movdqa %xmm2,176(%esp)
719
720# qhasm: orig12 = z12
721# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
722# asm 2: movdqa <z12=%xmm3,>orig12=192(%esp)
723movdqa %xmm3,192(%esp)
724
725# qhasm: orig1 = z1
726# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
727# asm 2: movdqa <z1=%xmm0,>orig1=208(%esp)
728movdqa %xmm0,208(%esp)
729
730# qhasm: z2 = x2
731# asm 1: movdqa <x2=stack128#4,>z2=int6464#1
732# asm 2: movdqa <x2=80(%esp),>z2=%xmm0
733movdqa 80(%esp),%xmm0
734
735# qhasm: z7 = z2[3,3,3,3]
736# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
737# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
738pshufd $0xff,%xmm0,%xmm1
739
740# qhasm: z13 = z2[1,1,1,1]
741# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
742# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
743pshufd $0x55,%xmm0,%xmm2
744
745# qhasm: z2 = z2[2,2,2,2]
746# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
747# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
748pshufd $0xaa,%xmm0,%xmm0
749
750# qhasm: orig7 = z7
751# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
752# asm 2: movdqa <z7=%xmm1,>orig7=224(%esp)
753movdqa %xmm1,224(%esp)
754
755# qhasm: orig13 = z13
756# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
757# asm 2: movdqa <z13=%xmm2,>orig13=240(%esp)
758movdqa %xmm2,240(%esp)
759
760# qhasm: orig2 = z2
761# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
762# asm 2: movdqa <z2=%xmm0,>orig2=256(%esp)
763movdqa %xmm0,256(%esp)
764
765# qhasm: z3 = x3
766# asm 1: movdqa <x3=stack128#1,>z3=int6464#1
767# asm 2: movdqa <x3=32(%esp),>z3=%xmm0
768movdqa 32(%esp),%xmm0
769
770# qhasm: z4 = z3[0,0,0,0]
771# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
772# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
773pshufd $0x00,%xmm0,%xmm1
774
775# qhasm: z14 = z3[2,2,2,2]
776# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
777# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
778pshufd $0xaa,%xmm0,%xmm2
779
780# qhasm: z3 = z3[3,3,3,3]
781# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
782# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
783pshufd $0xff,%xmm0,%xmm0
784
785# qhasm: orig4 = z4
786# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
787# asm 2: movdqa <z4=%xmm1,>orig4=272(%esp)
788movdqa %xmm1,272(%esp)
789
790# qhasm: orig14 = z14
791# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
792# asm 2: movdqa <z14=%xmm2,>orig14=288(%esp)
793movdqa %xmm2,288(%esp)
794
795# qhasm: orig3 = z3
796# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
797# asm 2: movdqa <z3=%xmm0,>orig3=304(%esp)
798movdqa %xmm0,304(%esp)
799
800# qhasm: bytesatleast256:
801._bytesatleast256:
802
803# qhasm: in8 = ((uint32 *)&x2)[0]
804# asm 1: movl <x2=stack128#4,>in8=int32#2
805# asm 2: movl <x2=80(%esp),>in8=%ecx
806movl 80(%esp),%ecx
807
808# qhasm: in9 = ((uint32 *)&x3)[1]
809# asm 1: movl 4+<x3=stack128#1,>in9=int32#3
810# asm 2: movl 4+<x3=32(%esp),>in9=%edx
811movl 4+32(%esp),%edx
812
813# qhasm: ((uint32 *) &orig8)[0] = in8
814# asm 1: movl <in8=int32#2,>orig8=stack128#19
815# asm 2: movl <in8=%ecx,>orig8=320(%esp)
816movl %ecx,320(%esp)
817
818# qhasm: ((uint32 *) &orig9)[0] = in9
819# asm 1: movl <in9=int32#3,>orig9=stack128#20
820# asm 2: movl <in9=%edx,>orig9=336(%esp)
821movl %edx,336(%esp)
822
823# qhasm: carry? in8 += 1
824# asm 1: add $1,<in8=int32#2
825# asm 2: add $1,<in8=%ecx
826add $1,%ecx
827
828# qhasm: in9 += 0 + carry
829# asm 1: adc $0,<in9=int32#3
830# asm 2: adc $0,<in9=%edx
831adc $0,%edx
832
833# qhasm: ((uint32 *) &orig8)[1] = in8
834# asm 1: movl <in8=int32#2,4+<orig8=stack128#19
835# asm 2: movl <in8=%ecx,4+<orig8=320(%esp)
836movl %ecx,4+320(%esp)
837
838# qhasm: ((uint32 *) &orig9)[1] = in9
839# asm 1: movl <in9=int32#3,4+<orig9=stack128#20
840# asm 2: movl <in9=%edx,4+<orig9=336(%esp)
841movl %edx,4+336(%esp)
842
843# qhasm: carry? in8 += 1
844# asm 1: add $1,<in8=int32#2
845# asm 2: add $1,<in8=%ecx
846add $1,%ecx
847
848# qhasm: in9 += 0 + carry
849# asm 1: adc $0,<in9=int32#3
850# asm 2: adc $0,<in9=%edx
851adc $0,%edx
852
853# qhasm: ((uint32 *) &orig8)[2] = in8
854# asm 1: movl <in8=int32#2,8+<orig8=stack128#19
855# asm 2: movl <in8=%ecx,8+<orig8=320(%esp)
856movl %ecx,8+320(%esp)
857
858# qhasm: ((uint32 *) &orig9)[2] = in9
859# asm 1: movl <in9=int32#3,8+<orig9=stack128#20
860# asm 2: movl <in9=%edx,8+<orig9=336(%esp)
861movl %edx,8+336(%esp)
862
863# qhasm: carry? in8 += 1
864# asm 1: add $1,<in8=int32#2
865# asm 2: add $1,<in8=%ecx
866add $1,%ecx
867
868# qhasm: in9 += 0 + carry
869# asm 1: adc $0,<in9=int32#3
870# asm 2: adc $0,<in9=%edx
871adc $0,%edx
872
873# qhasm: ((uint32 *) &orig8)[3] = in8
874# asm 1: movl <in8=int32#2,12+<orig8=stack128#19
875# asm 2: movl <in8=%ecx,12+<orig8=320(%esp)
876movl %ecx,12+320(%esp)
877
878# qhasm: ((uint32 *) &orig9)[3] = in9
879# asm 1: movl <in9=int32#3,12+<orig9=stack128#20
880# asm 2: movl <in9=%edx,12+<orig9=336(%esp)
881movl %edx,12+336(%esp)
882
883# qhasm: carry? in8 += 1
884# asm 1: add $1,<in8=int32#2
885# asm 2: add $1,<in8=%ecx
886add $1,%ecx
887
888# qhasm: in9 += 0 + carry
889# asm 1: adc $0,<in9=int32#3
890# asm 2: adc $0,<in9=%edx
891adc $0,%edx
892
893# qhasm: ((uint32 *)&x2)[0] = in8
894# asm 1: movl <in8=int32#2,>x2=stack128#4
895# asm 2: movl <in8=%ecx,>x2=80(%esp)
896movl %ecx,80(%esp)
897
898# qhasm: ((uint32 *)&x3)[1] = in9
899# asm 1: movl <in9=int32#3,4+<x3=stack128#1
900# asm 2: movl <in9=%edx,4+<x3=32(%esp)
901movl %edx,4+32(%esp)
902
903# qhasm: bytes_stack = bytes
904# asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
905# asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
906movl %eax,24(%esp)
907
908# qhasm: i = 12
909# asm 1: mov $12,>i=int32#1
910# asm 2: mov $12,>i=%eax
911mov $12,%eax
912
913# qhasm: z5 = orig5
914# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
915# asm 2: movdqa <orig5=96(%esp),>z5=%xmm0
916movdqa 96(%esp),%xmm0
917
918# qhasm: z10 = orig10
919# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
920# asm 2: movdqa <orig10=112(%esp),>z10=%xmm1
921movdqa 112(%esp),%xmm1
922
923# qhasm: z15 = orig15
924# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
925# asm 2: movdqa <orig15=128(%esp),>z15=%xmm2
926movdqa 128(%esp),%xmm2
927
928# qhasm: z14 = orig14
929# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
930# asm 2: movdqa <orig14=288(%esp),>z14=%xmm3
931movdqa 288(%esp),%xmm3
932
933# qhasm: z3 = orig3
934# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
935# asm 2: movdqa <orig3=304(%esp),>z3=%xmm4
936movdqa 304(%esp),%xmm4
937
938# qhasm: z6 = orig6
939# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
940# asm 2: movdqa <orig6=160(%esp),>z6=%xmm5
941movdqa 160(%esp),%xmm5
942
943# qhasm: z11 = orig11
944# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
945# asm 2: movdqa <orig11=176(%esp),>z11=%xmm6
946movdqa 176(%esp),%xmm6
947
948# qhasm: z1 = orig1
949# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
950# asm 2: movdqa <orig1=208(%esp),>z1=%xmm7
951movdqa 208(%esp),%xmm7
952
953# qhasm: z5_stack = z5
954# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#21
955# asm 2: movdqa <z5=%xmm0,>z5_stack=352(%esp)
956movdqa %xmm0,352(%esp)
957
958# qhasm: z10_stack = z10
959# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#22
960# asm 2: movdqa <z10=%xmm1,>z10_stack=368(%esp)
961movdqa %xmm1,368(%esp)
962
963# qhasm: z15_stack = z15
964# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#23
965# asm 2: movdqa <z15=%xmm2,>z15_stack=384(%esp)
966movdqa %xmm2,384(%esp)
967
968# qhasm: z14_stack = z14
969# asm 1: movdqa <z14=int6464#4,>z14_stack=stack128#24
970# asm 2: movdqa <z14=%xmm3,>z14_stack=400(%esp)
971movdqa %xmm3,400(%esp)
972
973# qhasm: z3_stack = z3
974# asm 1: movdqa <z3=int6464#5,>z3_stack=stack128#25
975# asm 2: movdqa <z3=%xmm4,>z3_stack=416(%esp)
976movdqa %xmm4,416(%esp)
977
978# qhasm: z6_stack = z6
979# asm 1: movdqa <z6=int6464#6,>z6_stack=stack128#26
980# asm 2: movdqa <z6=%xmm5,>z6_stack=432(%esp)
981movdqa %xmm5,432(%esp)
982
983# qhasm: z11_stack = z11
984# asm 1: movdqa <z11=int6464#7,>z11_stack=stack128#27
985# asm 2: movdqa <z11=%xmm6,>z11_stack=448(%esp)
986movdqa %xmm6,448(%esp)
987
988# qhasm: z1_stack = z1
989# asm 1: movdqa <z1=int6464#8,>z1_stack=stack128#28
990# asm 2: movdqa <z1=%xmm7,>z1_stack=464(%esp)
991movdqa %xmm7,464(%esp)
992
993# qhasm: z7 = orig7
994# asm 1: movdqa <orig7=stack128#13,>z7=int6464#5
995# asm 2: movdqa <orig7=224(%esp),>z7=%xmm4
996movdqa 224(%esp),%xmm4
997
998# qhasm: z13 = orig13
999# asm 1: movdqa <orig13=stack128#14,>z13=int6464#6
1000# asm 2: movdqa <orig13=240(%esp),>z13=%xmm5
1001movdqa 240(%esp),%xmm5
1002
1003# qhasm: z2 = orig2
1004# asm 1: movdqa <orig2=stack128#15,>z2=int6464#7
1005# asm 2: movdqa <orig2=256(%esp),>z2=%xmm6
1006movdqa 256(%esp),%xmm6
1007
1008# qhasm: z9 = orig9
1009# asm 1: movdqa <orig9=stack128#20,>z9=int6464#8
1010# asm 2: movdqa <orig9=336(%esp),>z9=%xmm7
1011movdqa 336(%esp),%xmm7
1012
1013# qhasm: p = orig0
1014# asm 1: movdqa <orig0=stack128#8,>p=int6464#1
1015# asm 2: movdqa <orig0=144(%esp),>p=%xmm0
1016movdqa 144(%esp),%xmm0
1017
1018# qhasm: t = orig12
1019# asm 1: movdqa <orig12=stack128#11,>t=int6464#3
1020# asm 2: movdqa <orig12=192(%esp),>t=%xmm2
1021movdqa 192(%esp),%xmm2
1022
1023# qhasm: q = orig4
1024# asm 1: movdqa <orig4=stack128#16,>q=int6464#4
1025# asm 2: movdqa <orig4=272(%esp),>q=%xmm3
1026movdqa 272(%esp),%xmm3
1027
1028# qhasm: r = orig8
1029# asm 1: movdqa <orig8=stack128#19,>r=int6464#2
1030# asm 2: movdqa <orig8=320(%esp),>r=%xmm1
1031movdqa 320(%esp),%xmm1
1032
1033# qhasm: z7_stack = z7
1034# asm 1: movdqa <z7=int6464#5,>z7_stack=stack128#29
1035# asm 2: movdqa <z7=%xmm4,>z7_stack=480(%esp)
1036movdqa %xmm4,480(%esp)
1037
1038# qhasm: z13_stack = z13
1039# asm 1: movdqa <z13=int6464#6,>z13_stack=stack128#30
1040# asm 2: movdqa <z13=%xmm5,>z13_stack=496(%esp)
1041movdqa %xmm5,496(%esp)
1042
1043# qhasm: z2_stack = z2
1044# asm 1: movdqa <z2=int6464#7,>z2_stack=stack128#31
1045# asm 2: movdqa <z2=%xmm6,>z2_stack=512(%esp)
1046movdqa %xmm6,512(%esp)
1047
1048# qhasm: z9_stack = z9
1049# asm 1: movdqa <z9=int6464#8,>z9_stack=stack128#32
1050# asm 2: movdqa <z9=%xmm7,>z9_stack=528(%esp)
1051movdqa %xmm7,528(%esp)
1052
1053# qhasm: z0_stack = p
1054# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1055# asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1056movdqa %xmm0,544(%esp)
1057
1058# qhasm: z12_stack = t
1059# asm 1: movdqa <t=int6464#3,>z12_stack=stack128#34
1060# asm 2: movdqa <t=%xmm2,>z12_stack=560(%esp)
1061movdqa %xmm2,560(%esp)
1062
1063# qhasm: z4_stack = q
1064# asm 1: movdqa <q=int6464#4,>z4_stack=stack128#35
1065# asm 2: movdqa <q=%xmm3,>z4_stack=576(%esp)
1066movdqa %xmm3,576(%esp)
1067
1068# qhasm: z8_stack = r
1069# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#36
1070# asm 2: movdqa <r=%xmm1,>z8_stack=592(%esp)
1071movdqa %xmm1,592(%esp)
1072
1073# qhasm: mainloop1:
1074._mainloop1:
1075
1076# qhasm: assign xmm0 to p
1077
1078# qhasm: assign xmm1 to r
1079
1080# qhasm: assign xmm2 to t
1081
1082# qhasm: assign xmm3 to q
1083
1084# qhasm: s = t
1085# asm 1: movdqa <t=int6464#3,>s=int6464#7
1086# asm 2: movdqa <t=%xmm2,>s=%xmm6
1087movdqa %xmm2,%xmm6
1088
1089# qhasm: uint32323232 t += p
1090# asm 1: paddd <p=int6464#1,<t=int6464#3
1091# asm 2: paddd <p=%xmm0,<t=%xmm2
1092paddd %xmm0,%xmm2
1093
1094# qhasm: u = t
1095# asm 1: movdqa <t=int6464#3,>u=int6464#5
1096# asm 2: movdqa <t=%xmm2,>u=%xmm4
1097movdqa %xmm2,%xmm4
1098
1099# qhasm: uint32323232 t >>= 25
1100# asm 1: psrld $25,<t=int6464#3
1101# asm 2: psrld $25,<t=%xmm2
1102psrld $25,%xmm2
1103
1104# qhasm: q ^= t
1105# asm 1: pxor <t=int6464#3,<q=int6464#4
1106# asm 2: pxor <t=%xmm2,<q=%xmm3
1107pxor %xmm2,%xmm3
1108
1109# qhasm: uint32323232 u <<= 7
1110# asm 1: pslld $7,<u=int6464#5
1111# asm 2: pslld $7,<u=%xmm4
1112pslld $7,%xmm4
1113
1114# qhasm: q ^= u
1115# asm 1: pxor <u=int6464#5,<q=int6464#4
1116# asm 2: pxor <u=%xmm4,<q=%xmm3
1117pxor %xmm4,%xmm3
1118
1119# qhasm: z4_stack = q
1120# asm 1: movdqa <q=int6464#4,>z4_stack=stack128#33
1121# asm 2: movdqa <q=%xmm3,>z4_stack=544(%esp)
1122movdqa %xmm3,544(%esp)
1123
1124# qhasm: t = p
1125# asm 1: movdqa <p=int6464#1,>t=int6464#3
1126# asm 2: movdqa <p=%xmm0,>t=%xmm2
1127movdqa %xmm0,%xmm2
1128
1129# qhasm: uint32323232 t += q
1130# asm 1: paddd <q=int6464#4,<t=int6464#3
1131# asm 2: paddd <q=%xmm3,<t=%xmm2
1132paddd %xmm3,%xmm2
1133
1134# qhasm: u = t
1135# asm 1: movdqa <t=int6464#3,>u=int6464#5
1136# asm 2: movdqa <t=%xmm2,>u=%xmm4
1137movdqa %xmm2,%xmm4
1138
1139# qhasm: uint32323232 t >>= 23
1140# asm 1: psrld $23,<t=int6464#3
1141# asm 2: psrld $23,<t=%xmm2
1142psrld $23,%xmm2
1143
1144# qhasm: r ^= t
1145# asm 1: pxor <t=int6464#3,<r=int6464#2
1146# asm 2: pxor <t=%xmm2,<r=%xmm1
1147pxor %xmm2,%xmm1
1148
1149# qhasm: uint32323232 u <<= 9
1150# asm 1: pslld $9,<u=int6464#5
1151# asm 2: pslld $9,<u=%xmm4
1152pslld $9,%xmm4
1153
1154# qhasm: r ^= u
1155# asm 1: pxor <u=int6464#5,<r=int6464#2
1156# asm 2: pxor <u=%xmm4,<r=%xmm1
1157pxor %xmm4,%xmm1
1158
1159# qhasm: z8_stack = r
1160# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#34
1161# asm 2: movdqa <r=%xmm1,>z8_stack=560(%esp)
1162movdqa %xmm1,560(%esp)
1163
1164# qhasm: uint32323232 q += r
1165# asm 1: paddd <r=int6464#2,<q=int6464#4
1166# asm 2: paddd <r=%xmm1,<q=%xmm3
1167paddd %xmm1,%xmm3
1168
1169# qhasm: u = q
1170# asm 1: movdqa <q=int6464#4,>u=int6464#3
1171# asm 2: movdqa <q=%xmm3,>u=%xmm2
1172movdqa %xmm3,%xmm2
1173
1174# qhasm: uint32323232 q >>= 19
1175# asm 1: psrld $19,<q=int6464#4
1176# asm 2: psrld $19,<q=%xmm3
1177psrld $19,%xmm3
1178
1179# qhasm: s ^= q
1180# asm 1: pxor <q=int6464#4,<s=int6464#7
1181# asm 2: pxor <q=%xmm3,<s=%xmm6
1182pxor %xmm3,%xmm6
1183
1184# qhasm: uint32323232 u <<= 13
1185# asm 1: pslld $13,<u=int6464#3
1186# asm 2: pslld $13,<u=%xmm2
1187pslld $13,%xmm2
1188
1189# qhasm: s ^= u
1190# asm 1: pxor <u=int6464#3,<s=int6464#7
1191# asm 2: pxor <u=%xmm2,<s=%xmm6
1192pxor %xmm2,%xmm6
1193
1194# qhasm: mt = z1_stack
1195# asm 1: movdqa <z1_stack=stack128#28,>mt=int6464#3
1196# asm 2: movdqa <z1_stack=464(%esp),>mt=%xmm2
1197movdqa 464(%esp),%xmm2
1198
1199# qhasm: mp = z5_stack
1200# asm 1: movdqa <z5_stack=stack128#21,>mp=int6464#5
1201# asm 2: movdqa <z5_stack=352(%esp),>mp=%xmm4
1202movdqa 352(%esp),%xmm4
1203
1204# qhasm: mq = z9_stack
1205# asm 1: movdqa <z9_stack=stack128#32,>mq=int6464#4
1206# asm 2: movdqa <z9_stack=528(%esp),>mq=%xmm3
1207movdqa 528(%esp),%xmm3
1208
1209# qhasm: mr = z13_stack
1210# asm 1: movdqa <z13_stack=stack128#30,>mr=int6464#6
1211# asm 2: movdqa <z13_stack=496(%esp),>mr=%xmm5
1212movdqa 496(%esp),%xmm5
1213
1214# qhasm: z12_stack = s
1215# asm 1: movdqa <s=int6464#7,>z12_stack=stack128#30
1216# asm 2: movdqa <s=%xmm6,>z12_stack=496(%esp)
1217movdqa %xmm6,496(%esp)
1218
1219# qhasm: uint32323232 r += s
1220# asm 1: paddd <s=int6464#7,<r=int6464#2
1221# asm 2: paddd <s=%xmm6,<r=%xmm1
1222paddd %xmm6,%xmm1
1223
1224# qhasm: u = r
1225# asm 1: movdqa <r=int6464#2,>u=int6464#7
1226# asm 2: movdqa <r=%xmm1,>u=%xmm6
1227movdqa %xmm1,%xmm6
1228
1229# qhasm: uint32323232 r >>= 14
1230# asm 1: psrld $14,<r=int6464#2
1231# asm 2: psrld $14,<r=%xmm1
1232psrld $14,%xmm1
1233
1234# qhasm: p ^= r
1235# asm 1: pxor <r=int6464#2,<p=int6464#1
1236# asm 2: pxor <r=%xmm1,<p=%xmm0
1237pxor %xmm1,%xmm0
1238
1239# qhasm: uint32323232 u <<= 18
1240# asm 1: pslld $18,<u=int6464#7
1241# asm 2: pslld $18,<u=%xmm6
1242pslld $18,%xmm6
1243
1244# qhasm: p ^= u
1245# asm 1: pxor <u=int6464#7,<p=int6464#1
1246# asm 2: pxor <u=%xmm6,<p=%xmm0
1247pxor %xmm6,%xmm0
1248
1249# qhasm: z0_stack = p
1250# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#21
1251# asm 2: movdqa <p=%xmm0,>z0_stack=352(%esp)
1252movdqa %xmm0,352(%esp)
1253
1254# qhasm: assign xmm2 to mt
1255
1256# qhasm: assign xmm3 to mq
1257
1258# qhasm: assign xmm4 to mp
1259
1260# qhasm: assign xmm5 to mr
1261
1262# qhasm: ms = mt
1263# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1264# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1265movdqa %xmm2,%xmm6
1266
1267# qhasm: uint32323232 mt += mp
1268# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1269# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1270paddd %xmm4,%xmm2
1271
1272# qhasm: mu = mt
1273# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1274# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1275movdqa %xmm2,%xmm0
1276
1277# qhasm: uint32323232 mt >>= 25
1278# asm 1: psrld $25,<mt=int6464#3
1279# asm 2: psrld $25,<mt=%xmm2
1280psrld $25,%xmm2
1281
1282# qhasm: mq ^= mt
1283# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1284# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1285pxor %xmm2,%xmm3
1286
1287# qhasm: uint32323232 mu <<= 7
1288# asm 1: pslld $7,<mu=int6464#1
1289# asm 2: pslld $7,<mu=%xmm0
1290pslld $7,%xmm0
1291
1292# qhasm: mq ^= mu
1293# asm 1: pxor <mu=int6464#1,<mq=int6464#4
1294# asm 2: pxor <mu=%xmm0,<mq=%xmm3
1295pxor %xmm0,%xmm3
1296
1297# qhasm: z9_stack = mq
1298# asm 1: movdqa <mq=int6464#4,>z9_stack=stack128#32
1299# asm 2: movdqa <mq=%xmm3,>z9_stack=528(%esp)
1300movdqa %xmm3,528(%esp)
1301
1302# qhasm: mt = mp
1303# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1304# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1305movdqa %xmm4,%xmm0
1306
1307# qhasm: uint32323232 mt += mq
1308# asm 1: paddd <mq=int6464#4,<mt=int6464#1
1309# asm 2: paddd <mq=%xmm3,<mt=%xmm0
1310paddd %xmm3,%xmm0
1311
1312# qhasm: mu = mt
1313# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1314# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1315movdqa %xmm0,%xmm1
1316
1317# qhasm: uint32323232 mt >>= 23
1318# asm 1: psrld $23,<mt=int6464#1
1319# asm 2: psrld $23,<mt=%xmm0
1320psrld $23,%xmm0
1321
1322# qhasm: mr ^= mt
1323# asm 1: pxor <mt=int6464#1,<mr=int6464#6
1324# asm 2: pxor <mt=%xmm0,<mr=%xmm5
1325pxor %xmm0,%xmm5
1326
1327# qhasm: uint32323232 mu <<= 9
1328# asm 1: pslld $9,<mu=int6464#2
1329# asm 2: pslld $9,<mu=%xmm1
1330pslld $9,%xmm1
1331
1332# qhasm: mr ^= mu
1333# asm 1: pxor <mu=int6464#2,<mr=int6464#6
1334# asm 2: pxor <mu=%xmm1,<mr=%xmm5
1335pxor %xmm1,%xmm5
1336
1337# qhasm: z13_stack = mr
1338# asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#35
1339# asm 2: movdqa <mr=%xmm5,>z13_stack=576(%esp)
1340movdqa %xmm5,576(%esp)
1341
1342# qhasm: uint32323232 mq += mr
1343# asm 1: paddd <mr=int6464#6,<mq=int6464#4
1344# asm 2: paddd <mr=%xmm5,<mq=%xmm3
1345paddd %xmm5,%xmm3
1346
1347# qhasm: mu = mq
1348# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1349# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1350movdqa %xmm3,%xmm0
1351
1352# qhasm: uint32323232 mq >>= 19
1353# asm 1: psrld $19,<mq=int6464#4
1354# asm 2: psrld $19,<mq=%xmm3
1355psrld $19,%xmm3
1356
1357# qhasm: ms ^= mq
1358# asm 1: pxor <mq=int6464#4,<ms=int6464#7
1359# asm 2: pxor <mq=%xmm3,<ms=%xmm6
1360pxor %xmm3,%xmm6
1361
1362# qhasm: uint32323232 mu <<= 13
1363# asm 1: pslld $13,<mu=int6464#1
1364# asm 2: pslld $13,<mu=%xmm0
1365pslld $13,%xmm0
1366
1367# qhasm: ms ^= mu
1368# asm 1: pxor <mu=int6464#1,<ms=int6464#7
1369# asm 2: pxor <mu=%xmm0,<ms=%xmm6
1370pxor %xmm0,%xmm6
1371
1372# qhasm: t = z6_stack
1373# asm 1: movdqa <z6_stack=stack128#26,>t=int6464#3
1374# asm 2: movdqa <z6_stack=432(%esp),>t=%xmm2
1375movdqa 432(%esp),%xmm2
1376
1377# qhasm: p = z10_stack
1378# asm 1: movdqa <z10_stack=stack128#22,>p=int6464#1
1379# asm 2: movdqa <z10_stack=368(%esp),>p=%xmm0
1380movdqa 368(%esp),%xmm0
1381
1382# qhasm: q = z14_stack
1383# asm 1: movdqa <z14_stack=stack128#24,>q=int6464#4
1384# asm 2: movdqa <z14_stack=400(%esp),>q=%xmm3
1385movdqa 400(%esp),%xmm3
1386
1387# qhasm: r = z2_stack
1388# asm 1: movdqa <z2_stack=stack128#31,>r=int6464#2
1389# asm 2: movdqa <z2_stack=512(%esp),>r=%xmm1
1390movdqa 512(%esp),%xmm1
1391
1392# qhasm: z1_stack = ms
1393# asm 1: movdqa <ms=int6464#7,>z1_stack=stack128#22
1394# asm 2: movdqa <ms=%xmm6,>z1_stack=368(%esp)
1395movdqa %xmm6,368(%esp)
1396
1397# qhasm: uint32323232 mr += ms
1398# asm 1: paddd <ms=int6464#7,<mr=int6464#6
1399# asm 2: paddd <ms=%xmm6,<mr=%xmm5
1400paddd %xmm6,%xmm5
1401
1402# qhasm: mu = mr
1403# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1404# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1405movdqa %xmm5,%xmm6
1406
1407# qhasm: uint32323232 mr >>= 14
1408# asm 1: psrld $14,<mr=int6464#6
1409# asm 2: psrld $14,<mr=%xmm5
1410psrld $14,%xmm5
1411
1412# qhasm: mp ^= mr
1413# asm 1: pxor <mr=int6464#6,<mp=int6464#5
1414# asm 2: pxor <mr=%xmm5,<mp=%xmm4
1415pxor %xmm5,%xmm4
1416
1417# qhasm: uint32323232 mu <<= 18
1418# asm 1: pslld $18,<mu=int6464#7
1419# asm 2: pslld $18,<mu=%xmm6
1420pslld $18,%xmm6
1421
1422# qhasm: mp ^= mu
1423# asm 1: pxor <mu=int6464#7,<mp=int6464#5
1424# asm 2: pxor <mu=%xmm6,<mp=%xmm4
1425pxor %xmm6,%xmm4
1426
1427# qhasm: z5_stack = mp
1428# asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#24
1429# asm 2: movdqa <mp=%xmm4,>z5_stack=400(%esp)
1430movdqa %xmm4,400(%esp)
1431
1432# qhasm: assign xmm0 to p
1433
1434# qhasm: assign xmm1 to r
1435
1436# qhasm: assign xmm2 to t
1437
1438# qhasm: assign xmm3 to q
1439
1440# qhasm: s = t
1441# asm 1: movdqa <t=int6464#3,>s=int6464#7
1442# asm 2: movdqa <t=%xmm2,>s=%xmm6
1443movdqa %xmm2,%xmm6
1444
1445# qhasm: uint32323232 t += p
1446# asm 1: paddd <p=int6464#1,<t=int6464#3
1447# asm 2: paddd <p=%xmm0,<t=%xmm2
1448paddd %xmm0,%xmm2
1449
1450# qhasm: u = t
1451# asm 1: movdqa <t=int6464#3,>u=int6464#5
1452# asm 2: movdqa <t=%xmm2,>u=%xmm4
1453movdqa %xmm2,%xmm4
1454
1455# qhasm: uint32323232 t >>= 25
1456# asm 1: psrld $25,<t=int6464#3
1457# asm 2: psrld $25,<t=%xmm2
1458psrld $25,%xmm2
1459
1460# qhasm: q ^= t
1461# asm 1: pxor <t=int6464#3,<q=int6464#4
1462# asm 2: pxor <t=%xmm2,<q=%xmm3
1463pxor %xmm2,%xmm3
1464
1465# qhasm: uint32323232 u <<= 7
1466# asm 1: pslld $7,<u=int6464#5
1467# asm 2: pslld $7,<u=%xmm4
1468pslld $7,%xmm4
1469
1470# qhasm: q ^= u
1471# asm 1: pxor <u=int6464#5,<q=int6464#4
1472# asm 2: pxor <u=%xmm4,<q=%xmm3
1473pxor %xmm4,%xmm3
1474
1475# qhasm: z14_stack = q
1476# asm 1: movdqa <q=int6464#4,>z14_stack=stack128#36
1477# asm 2: movdqa <q=%xmm3,>z14_stack=592(%esp)
1478movdqa %xmm3,592(%esp)
1479
1480# qhasm: t = p
1481# asm 1: movdqa <p=int6464#1,>t=int6464#3
1482# asm 2: movdqa <p=%xmm0,>t=%xmm2
1483movdqa %xmm0,%xmm2
1484
1485# qhasm: uint32323232 t += q
1486# asm 1: paddd <q=int6464#4,<t=int6464#3
1487# asm 2: paddd <q=%xmm3,<t=%xmm2
1488paddd %xmm3,%xmm2
1489
1490# qhasm: u = t
1491# asm 1: movdqa <t=int6464#3,>u=int6464#5
1492# asm 2: movdqa <t=%xmm2,>u=%xmm4
1493movdqa %xmm2,%xmm4
1494
1495# qhasm: uint32323232 t >>= 23
1496# asm 1: psrld $23,<t=int6464#3
1497# asm 2: psrld $23,<t=%xmm2
1498psrld $23,%xmm2
1499
1500# qhasm: r ^= t
1501# asm 1: pxor <t=int6464#3,<r=int6464#2
1502# asm 2: pxor <t=%xmm2,<r=%xmm1
1503pxor %xmm2,%xmm1
1504
1505# qhasm: uint32323232 u <<= 9
1506# asm 1: pslld $9,<u=int6464#5
1507# asm 2: pslld $9,<u=%xmm4
1508pslld $9,%xmm4
1509
1510# qhasm: r ^= u
1511# asm 1: pxor <u=int6464#5,<r=int6464#2
1512# asm 2: pxor <u=%xmm4,<r=%xmm1
1513pxor %xmm4,%xmm1
1514
1515# qhasm: z2_stack = r
1516# asm 1: movdqa <r=int6464#2,>z2_stack=stack128#26
1517# asm 2: movdqa <r=%xmm1,>z2_stack=432(%esp)
1518movdqa %xmm1,432(%esp)
1519
1520# qhasm: uint32323232 q += r
1521# asm 1: paddd <r=int6464#2,<q=int6464#4
1522# asm 2: paddd <r=%xmm1,<q=%xmm3
1523paddd %xmm1,%xmm3
1524
1525# qhasm: u = q
1526# asm 1: movdqa <q=int6464#4,>u=int6464#3
1527# asm 2: movdqa <q=%xmm3,>u=%xmm2
1528movdqa %xmm3,%xmm2
1529
1530# qhasm: uint32323232 q >>= 19
1531# asm 1: psrld $19,<q=int6464#4
1532# asm 2: psrld $19,<q=%xmm3
1533psrld $19,%xmm3
1534
1535# qhasm: s ^= q
1536# asm 1: pxor <q=int6464#4,<s=int6464#7
1537# asm 2: pxor <q=%xmm3,<s=%xmm6
1538pxor %xmm3,%xmm6
1539
1540# qhasm: uint32323232 u <<= 13
1541# asm 1: pslld $13,<u=int6464#3
1542# asm 2: pslld $13,<u=%xmm2
1543pslld $13,%xmm2
1544
1545# qhasm: s ^= u
1546# asm 1: pxor <u=int6464#3,<s=int6464#7
1547# asm 2: pxor <u=%xmm2,<s=%xmm6
1548pxor %xmm2,%xmm6
1549
1550# qhasm: mt = z11_stack
1551# asm 1: movdqa <z11_stack=stack128#27,>mt=int6464#3
1552# asm 2: movdqa <z11_stack=448(%esp),>mt=%xmm2
1553movdqa 448(%esp),%xmm2
1554
1555# qhasm: mp = z15_stack
1556# asm 1: movdqa <z15_stack=stack128#23,>mp=int6464#5
1557# asm 2: movdqa <z15_stack=384(%esp),>mp=%xmm4
1558movdqa 384(%esp),%xmm4
1559
1560# qhasm: mq = z3_stack
1561# asm 1: movdqa <z3_stack=stack128#25,>mq=int6464#4
1562# asm 2: movdqa <z3_stack=416(%esp),>mq=%xmm3
1563movdqa 416(%esp),%xmm3
1564
1565# qhasm: mr = z7_stack
1566# asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1567# asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1568movdqa 480(%esp),%xmm5
1569
1570# qhasm: z6_stack = s
1571# asm 1: movdqa <s=int6464#7,>z6_stack=stack128#23
1572# asm 2: movdqa <s=%xmm6,>z6_stack=384(%esp)
1573movdqa %xmm6,384(%esp)
1574
1575# qhasm: uint32323232 r += s
1576# asm 1: paddd <s=int6464#7,<r=int6464#2
1577# asm 2: paddd <s=%xmm6,<r=%xmm1
1578paddd %xmm6,%xmm1
1579
1580# qhasm: u = r
1581# asm 1: movdqa <r=int6464#2,>u=int6464#7
1582# asm 2: movdqa <r=%xmm1,>u=%xmm6
1583movdqa %xmm1,%xmm6
1584
1585# qhasm: uint32323232 r >>= 14
1586# asm 1: psrld $14,<r=int6464#2
1587# asm 2: psrld $14,<r=%xmm1
1588psrld $14,%xmm1
1589
1590# qhasm: p ^= r
1591# asm 1: pxor <r=int6464#2,<p=int6464#1
1592# asm 2: pxor <r=%xmm1,<p=%xmm0
1593pxor %xmm1,%xmm0
1594
1595# qhasm: uint32323232 u <<= 18
1596# asm 1: pslld $18,<u=int6464#7
1597# asm 2: pslld $18,<u=%xmm6
1598pslld $18,%xmm6
1599
1600# qhasm: p ^= u
1601# asm 1: pxor <u=int6464#7,<p=int6464#1
1602# asm 2: pxor <u=%xmm6,<p=%xmm0
1603pxor %xmm6,%xmm0
1604
1605# qhasm: z10_stack = p
1606# asm 1: movdqa <p=int6464#1,>z10_stack=stack128#27
1607# asm 2: movdqa <p=%xmm0,>z10_stack=448(%esp)
1608movdqa %xmm0,448(%esp)
1609
1610# qhasm: assign xmm2 to mt
1611
1612# qhasm: assign xmm3 to mq
1613
1614# qhasm: assign xmm4 to mp
1615
1616# qhasm: assign xmm5 to mr
1617
1618# qhasm: ms = mt
1619# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1620# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1621movdqa %xmm2,%xmm6
1622
1623# qhasm: uint32323232 mt += mp
1624# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1625# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1626paddd %xmm4,%xmm2
1627
1628# qhasm: mu = mt
1629# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1630# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1631movdqa %xmm2,%xmm0
1632
1633# qhasm: uint32323232 mt >>= 25
1634# asm 1: psrld $25,<mt=int6464#3
1635# asm 2: psrld $25,<mt=%xmm2
1636psrld $25,%xmm2
1637
1638# qhasm: mq ^= mt
1639# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1640# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1641pxor %xmm2,%xmm3
1642
1643# qhasm: uint32323232 mu <<= 7
1644# asm 1: pslld $7,<mu=int6464#1
1645# asm 2: pslld $7,<mu=%xmm0
1646pslld $7,%xmm0
1647
1648# qhasm: mq ^= mu
1649# asm 1: pxor <mu=int6464#1,<mq=int6464#4
1650# asm 2: pxor <mu=%xmm0,<mq=%xmm3
1651pxor %xmm0,%xmm3
1652
1653# qhasm: z3_stack = mq
1654# asm 1: movdqa <mq=int6464#4,>z3_stack=stack128#25
1655# asm 2: movdqa <mq=%xmm3,>z3_stack=416(%esp)
1656movdqa %xmm3,416(%esp)
1657
1658# qhasm: mt = mp
1659# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1660# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1661movdqa %xmm4,%xmm0
1662
1663# qhasm: uint32323232 mt += mq
1664# asm 1: paddd <mq=int6464#4,<mt=int6464#1
1665# asm 2: paddd <mq=%xmm3,<mt=%xmm0
1666paddd %xmm3,%xmm0
1667
1668# qhasm: mu = mt
1669# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1670# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1671movdqa %xmm0,%xmm1
1672
1673# qhasm: uint32323232 mt >>= 23
1674# asm 1: psrld $23,<mt=int6464#1
1675# asm 2: psrld $23,<mt=%xmm0
1676psrld $23,%xmm0
1677
1678# qhasm: mr ^= mt
1679# asm 1: pxor <mt=int6464#1,<mr=int6464#6
1680# asm 2: pxor <mt=%xmm0,<mr=%xmm5
1681pxor %xmm0,%xmm5
1682
1683# qhasm: uint32323232 mu <<= 9
1684# asm 1: pslld $9,<mu=int6464#2
1685# asm 2: pslld $9,<mu=%xmm1
1686pslld $9,%xmm1
1687
1688# qhasm: mr ^= mu
1689# asm 1: pxor <mu=int6464#2,<mr=int6464#6
1690# asm 2: pxor <mu=%xmm1,<mr=%xmm5
1691pxor %xmm1,%xmm5
1692
1693# qhasm: z7_stack = mr
1694# asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
1695# asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
1696movdqa %xmm5,480(%esp)
1697
1698# qhasm: uint32323232 mq += mr
1699# asm 1: paddd <mr=int6464#6,<mq=int6464#4
1700# asm 2: paddd <mr=%xmm5,<mq=%xmm3
1701paddd %xmm5,%xmm3
1702
1703# qhasm: mu = mq
1704# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1705# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1706movdqa %xmm3,%xmm0
1707
1708# qhasm: uint32323232 mq >>= 19
1709# asm 1: psrld $19,<mq=int6464#4
1710# asm 2: psrld $19,<mq=%xmm3
1711psrld $19,%xmm3
1712
1713# qhasm: ms ^= mq
1714# asm 1: pxor <mq=int6464#4,<ms=int6464#7
1715# asm 2: pxor <mq=%xmm3,<ms=%xmm6
1716pxor %xmm3,%xmm6
1717
1718# qhasm: uint32323232 mu <<= 13
1719# asm 1: pslld $13,<mu=int6464#1
1720# asm 2: pslld $13,<mu=%xmm0
1721pslld $13,%xmm0
1722
1723# qhasm: ms ^= mu
1724# asm 1: pxor <mu=int6464#1,<ms=int6464#7
1725# asm 2: pxor <mu=%xmm0,<ms=%xmm6
1726pxor %xmm0,%xmm6
1727
1728# qhasm: t = z3_stack
1729# asm 1: movdqa <z3_stack=stack128#25,>t=int6464#3
1730# asm 2: movdqa <z3_stack=416(%esp),>t=%xmm2
1731movdqa 416(%esp),%xmm2
1732
1733# qhasm: p = z0_stack
1734# asm 1: movdqa <z0_stack=stack128#21,>p=int6464#1
1735# asm 2: movdqa <z0_stack=352(%esp),>p=%xmm0
1736movdqa 352(%esp),%xmm0
1737
1738# qhasm: q = z1_stack
1739# asm 1: movdqa <z1_stack=stack128#22,>q=int6464#4
1740# asm 2: movdqa <z1_stack=368(%esp),>q=%xmm3
1741movdqa 368(%esp),%xmm3
1742
1743# qhasm: r = z2_stack
1744# asm 1: movdqa <z2_stack=stack128#26,>r=int6464#2
1745# asm 2: movdqa <z2_stack=432(%esp),>r=%xmm1
1746movdqa 432(%esp),%xmm1
1747
1748# qhasm: z11_stack = ms
1749# asm 1: movdqa <ms=int6464#7,>z11_stack=stack128#21
1750# asm 2: movdqa <ms=%xmm6,>z11_stack=352(%esp)
1751movdqa %xmm6,352(%esp)
1752
1753# qhasm: uint32323232 mr += ms
1754# asm 1: paddd <ms=int6464#7,<mr=int6464#6
1755# asm 2: paddd <ms=%xmm6,<mr=%xmm5
1756paddd %xmm6,%xmm5
1757
1758# qhasm: mu = mr
1759# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1760# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1761movdqa %xmm5,%xmm6
1762
1763# qhasm: uint32323232 mr >>= 14
1764# asm 1: psrld $14,<mr=int6464#6
1765# asm 2: psrld $14,<mr=%xmm5
1766psrld $14,%xmm5
1767
1768# qhasm: mp ^= mr
1769# asm 1: pxor <mr=int6464#6,<mp=int6464#5
1770# asm 2: pxor <mr=%xmm5,<mp=%xmm4
1771pxor %xmm5,%xmm4
1772
1773# qhasm: uint32323232 mu <<= 18
1774# asm 1: pslld $18,<mu=int6464#7
1775# asm 2: pslld $18,<mu=%xmm6
1776pslld $18,%xmm6
1777
1778# qhasm: mp ^= mu
1779# asm 1: pxor <mu=int6464#7,<mp=int6464#5
1780# asm 2: pxor <mu=%xmm6,<mp=%xmm4
1781pxor %xmm6,%xmm4
1782
1783# qhasm: z15_stack = mp
1784# asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#22
1785# asm 2: movdqa <mp=%xmm4,>z15_stack=368(%esp)
1786movdqa %xmm4,368(%esp)
1787
1788# qhasm: assign xmm0 to p
1789
1790# qhasm: assign xmm1 to r
1791
1792# qhasm: assign xmm2 to t
1793
1794# qhasm: assign xmm3 to q
1795
1796# qhasm: s = t
1797# asm 1: movdqa <t=int6464#3,>s=int6464#7
1798# asm 2: movdqa <t=%xmm2,>s=%xmm6
1799movdqa %xmm2,%xmm6
1800
1801# qhasm: uint32323232 t += p
1802# asm 1: paddd <p=int6464#1,<t=int6464#3
1803# asm 2: paddd <p=%xmm0,<t=%xmm2
1804paddd %xmm0,%xmm2
1805
1806# qhasm: u = t
1807# asm 1: movdqa <t=int6464#3,>u=int6464#5
1808# asm 2: movdqa <t=%xmm2,>u=%xmm4
1809movdqa %xmm2,%xmm4
1810
1811# qhasm: uint32323232 t >>= 25
1812# asm 1: psrld $25,<t=int6464#3
1813# asm 2: psrld $25,<t=%xmm2
1814psrld $25,%xmm2
1815
1816# qhasm: q ^= t
1817# asm 1: pxor <t=int6464#3,<q=int6464#4
1818# asm 2: pxor <t=%xmm2,<q=%xmm3
1819pxor %xmm2,%xmm3
1820
1821# qhasm: uint32323232 u <<= 7
1822# asm 1: pslld $7,<u=int6464#5
1823# asm 2: pslld $7,<u=%xmm4
1824pslld $7,%xmm4
1825
1826# qhasm: q ^= u
1827# asm 1: pxor <u=int6464#5,<q=int6464#4
1828# asm 2: pxor <u=%xmm4,<q=%xmm3
1829pxor %xmm4,%xmm3
1830
1831# qhasm: z1_stack = q
1832# asm 1: movdqa <q=int6464#4,>z1_stack=stack128#28
1833# asm 2: movdqa <q=%xmm3,>z1_stack=464(%esp)
1834movdqa %xmm3,464(%esp)
1835
1836# qhasm: t = p
1837# asm 1: movdqa <p=int6464#1,>t=int6464#3
1838# asm 2: movdqa <p=%xmm0,>t=%xmm2
1839movdqa %xmm0,%xmm2
1840
1841# qhasm: uint32323232 t += q
1842# asm 1: paddd <q=int6464#4,<t=int6464#3
1843# asm 2: paddd <q=%xmm3,<t=%xmm2
1844paddd %xmm3,%xmm2
1845
1846# qhasm: u = t
1847# asm 1: movdqa <t=int6464#3,>u=int6464#5
1848# asm 2: movdqa <t=%xmm2,>u=%xmm4
1849movdqa %xmm2,%xmm4
1850
1851# qhasm: uint32323232 t >>= 23
1852# asm 1: psrld $23,<t=int6464#3
1853# asm 2: psrld $23,<t=%xmm2
1854psrld $23,%xmm2
1855
1856# qhasm: r ^= t
1857# asm 1: pxor <t=int6464#3,<r=int6464#2
1858# asm 2: pxor <t=%xmm2,<r=%xmm1
1859pxor %xmm2,%xmm1
1860
1861# qhasm: uint32323232 u <<= 9
1862# asm 1: pslld $9,<u=int6464#5
1863# asm 2: pslld $9,<u=%xmm4
1864pslld $9,%xmm4
1865
1866# qhasm: r ^= u
1867# asm 1: pxor <u=int6464#5,<r=int6464#2
1868# asm 2: pxor <u=%xmm4,<r=%xmm1
1869pxor %xmm4,%xmm1
1870
1871# qhasm: z2_stack = r
1872# asm 1: movdqa <r=int6464#2,>z2_stack=stack128#31
1873# asm 2: movdqa <r=%xmm1,>z2_stack=512(%esp)
1874movdqa %xmm1,512(%esp)
1875
1876# qhasm: uint32323232 q += r
1877# asm 1: paddd <r=int6464#2,<q=int6464#4
1878# asm 2: paddd <r=%xmm1,<q=%xmm3
1879paddd %xmm1,%xmm3
1880
1881# qhasm: u = q
1882# asm 1: movdqa <q=int6464#4,>u=int6464#3
1883# asm 2: movdqa <q=%xmm3,>u=%xmm2
1884movdqa %xmm3,%xmm2
1885
1886# qhasm: uint32323232 q >>= 19
1887# asm 1: psrld $19,<q=int6464#4
1888# asm 2: psrld $19,<q=%xmm3
1889psrld $19,%xmm3
1890
1891# qhasm: s ^= q
1892# asm 1: pxor <q=int6464#4,<s=int6464#7
1893# asm 2: pxor <q=%xmm3,<s=%xmm6
1894pxor %xmm3,%xmm6
1895
1896# qhasm: uint32323232 u <<= 13
1897# asm 1: pslld $13,<u=int6464#3
1898# asm 2: pslld $13,<u=%xmm2
1899pslld $13,%xmm2
1900
1901# qhasm: s ^= u
1902# asm 1: pxor <u=int6464#3,<s=int6464#7
1903# asm 2: pxor <u=%xmm2,<s=%xmm6
1904pxor %xmm2,%xmm6
1905
1906# qhasm: mt = z4_stack
1907# asm 1: movdqa <z4_stack=stack128#33,>mt=int6464#3
1908# asm 2: movdqa <z4_stack=544(%esp),>mt=%xmm2
1909movdqa 544(%esp),%xmm2
1910
1911# qhasm: mp = z5_stack
1912# asm 1: movdqa <z5_stack=stack128#24,>mp=int6464#5
1913# asm 2: movdqa <z5_stack=400(%esp),>mp=%xmm4
1914movdqa 400(%esp),%xmm4
1915
1916# qhasm: mq = z6_stack
1917# asm 1: movdqa <z6_stack=stack128#23,>mq=int6464#4
1918# asm 2: movdqa <z6_stack=384(%esp),>mq=%xmm3
1919movdqa 384(%esp),%xmm3
1920
1921# qhasm: mr = z7_stack
1922# asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1923# asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1924movdqa 480(%esp),%xmm5
1925
1926# qhasm: z3_stack = s
1927# asm 1: movdqa <s=int6464#7,>z3_stack=stack128#25
1928# asm 2: movdqa <s=%xmm6,>z3_stack=416(%esp)
1929movdqa %xmm6,416(%esp)
1930
1931# qhasm: uint32323232 r += s
1932# asm 1: paddd <s=int6464#7,<r=int6464#2
1933# asm 2: paddd <s=%xmm6,<r=%xmm1
1934paddd %xmm6,%xmm1
1935
1936# qhasm: u = r
1937# asm 1: movdqa <r=int6464#2,>u=int6464#7
1938# asm 2: movdqa <r=%xmm1,>u=%xmm6
1939movdqa %xmm1,%xmm6
1940
1941# qhasm: uint32323232 r >>= 14
1942# asm 1: psrld $14,<r=int6464#2
1943# asm 2: psrld $14,<r=%xmm1
1944psrld $14,%xmm1
1945
1946# qhasm: p ^= r
1947# asm 1: pxor <r=int6464#2,<p=int6464#1
1948# asm 2: pxor <r=%xmm1,<p=%xmm0
1949pxor %xmm1,%xmm0
1950
1951# qhasm: uint32323232 u <<= 18
1952# asm 1: pslld $18,<u=int6464#7
1953# asm 2: pslld $18,<u=%xmm6
1954pslld $18,%xmm6
1955
1956# qhasm: p ^= u
1957# asm 1: pxor <u=int6464#7,<p=int6464#1
1958# asm 2: pxor <u=%xmm6,<p=%xmm0
1959pxor %xmm6,%xmm0
1960
1961# qhasm: z0_stack = p
1962# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1963# asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1964movdqa %xmm0,544(%esp)
1965
1966# qhasm: assign xmm2 to mt
1967
1968# qhasm: assign xmm3 to mq
1969
1970# qhasm: assign xmm4 to mp
1971
1972# qhasm: assign xmm5 to mr
1973
1974# qhasm: ms = mt
1975# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1976# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1977movdqa %xmm2,%xmm6
1978
1979# qhasm: uint32323232 mt += mp
1980# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1981# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1982paddd %xmm4,%xmm2
1983
1984# qhasm: mu = mt
1985# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1986# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1987movdqa %xmm2,%xmm0
1988
1989# qhasm: uint32323232 mt >>= 25
1990# asm 1: psrld $25,<mt=int6464#3
1991# asm 2: psrld $25,<mt=%xmm2
1992psrld $25,%xmm2
1993
1994# qhasm: mq ^= mt
1995# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1996# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1997pxor %xmm2,%xmm3
1998
1999# qhasm: uint32323232 mu <<= 7
2000# asm 1: pslld $7,<mu=int6464#1
2001# asm 2: pslld $7,<mu=%xmm0
2002pslld $7,%xmm0
2003
2004# qhasm: mq ^= mu
2005# asm 1: pxor <mu=int6464#1,<mq=int6464#4
2006# asm 2: pxor <mu=%xmm0,<mq=%xmm3
2007pxor %xmm0,%xmm3
2008
2009# qhasm: z6_stack = mq
2010# asm 1: movdqa <mq=int6464#4,>z6_stack=stack128#26
2011# asm 2: movdqa <mq=%xmm3,>z6_stack=432(%esp)
2012movdqa %xmm3,432(%esp)
2013
2014# qhasm: mt = mp
2015# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2016# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2017movdqa %xmm4,%xmm0
2018
2019# qhasm: uint32323232 mt += mq
2020# asm 1: paddd <mq=int6464#4,<mt=int6464#1
2021# asm 2: paddd <mq=%xmm3,<mt=%xmm0
2022paddd %xmm3,%xmm0
2023
2024# qhasm: mu = mt
2025# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2026# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2027movdqa %xmm0,%xmm1
2028
2029# qhasm: uint32323232 mt >>= 23
2030# asm 1: psrld $23,<mt=int6464#1
2031# asm 2: psrld $23,<mt=%xmm0
2032psrld $23,%xmm0
2033
2034# qhasm: mr ^= mt
2035# asm 1: pxor <mt=int6464#1,<mr=int6464#6
2036# asm 2: pxor <mt=%xmm0,<mr=%xmm5
2037pxor %xmm0,%xmm5
2038
2039# qhasm: uint32323232 mu <<= 9
2040# asm 1: pslld $9,<mu=int6464#2
2041# asm 2: pslld $9,<mu=%xmm1
2042pslld $9,%xmm1
2043
2044# qhasm: mr ^= mu
2045# asm 1: pxor <mu=int6464#2,<mr=int6464#6
2046# asm 2: pxor <mu=%xmm1,<mr=%xmm5
2047pxor %xmm1,%xmm5
2048
2049# qhasm: z7_stack = mr
2050# asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
2051# asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
2052movdqa %xmm5,480(%esp)
2053
2054# qhasm: uint32323232 mq += mr
2055# asm 1: paddd <mr=int6464#6,<mq=int6464#4
2056# asm 2: paddd <mr=%xmm5,<mq=%xmm3
2057paddd %xmm5,%xmm3
2058
2059# qhasm: mu = mq
2060# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2061# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2062movdqa %xmm3,%xmm0
2063
2064# qhasm: uint32323232 mq >>= 19
2065# asm 1: psrld $19,<mq=int6464#4
2066# asm 2: psrld $19,<mq=%xmm3
2067psrld $19,%xmm3
2068
2069# qhasm: ms ^= mq
2070# asm 1: pxor <mq=int6464#4,<ms=int6464#7
2071# asm 2: pxor <mq=%xmm3,<ms=%xmm6
2072pxor %xmm3,%xmm6
2073
2074# qhasm: uint32323232 mu <<= 13
2075# asm 1: pslld $13,<mu=int6464#1
2076# asm 2: pslld $13,<mu=%xmm0
2077pslld $13,%xmm0
2078
2079# qhasm: ms ^= mu
2080# asm 1: pxor <mu=int6464#1,<ms=int6464#7
2081# asm 2: pxor <mu=%xmm0,<ms=%xmm6
2082pxor %xmm0,%xmm6
2083
2084# qhasm: t = z9_stack
2085# asm 1: movdqa <z9_stack=stack128#32,>t=int6464#3
2086# asm 2: movdqa <z9_stack=528(%esp),>t=%xmm2
2087movdqa 528(%esp),%xmm2
2088
2089# qhasm: p = z10_stack
2090# asm 1: movdqa <z10_stack=stack128#27,>p=int6464#1
2091# asm 2: movdqa <z10_stack=448(%esp),>p=%xmm0
2092movdqa 448(%esp),%xmm0
2093
2094# qhasm: q = z11_stack
2095# asm 1: movdqa <z11_stack=stack128#21,>q=int6464#4
2096# asm 2: movdqa <z11_stack=352(%esp),>q=%xmm3
2097movdqa 352(%esp),%xmm3
2098
2099# qhasm: r = z8_stack
2100# asm 1: movdqa <z8_stack=stack128#34,>r=int6464#2
2101# asm 2: movdqa <z8_stack=560(%esp),>r=%xmm1
2102movdqa 560(%esp),%xmm1
2103
2104# qhasm: z4_stack = ms
2105# asm 1: movdqa <ms=int6464#7,>z4_stack=stack128#34
2106# asm 2: movdqa <ms=%xmm6,>z4_stack=560(%esp)
2107movdqa %xmm6,560(%esp)
2108
2109# qhasm: uint32323232 mr += ms
2110# asm 1: paddd <ms=int6464#7,<mr=int6464#6
2111# asm 2: paddd <ms=%xmm6,<mr=%xmm5
2112paddd %xmm6,%xmm5
2113
2114# qhasm: mu = mr
2115# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2116# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2117movdqa %xmm5,%xmm6
2118
2119# qhasm: uint32323232 mr >>= 14
2120# asm 1: psrld $14,<mr=int6464#6
2121# asm 2: psrld $14,<mr=%xmm5
2122psrld $14,%xmm5
2123
2124# qhasm: mp ^= mr
2125# asm 1: pxor <mr=int6464#6,<mp=int6464#5
2126# asm 2: pxor <mr=%xmm5,<mp=%xmm4
2127pxor %xmm5,%xmm4
2128
2129# qhasm: uint32323232 mu <<= 18
2130# asm 1: pslld $18,<mu=int6464#7
2131# asm 2: pslld $18,<mu=%xmm6
2132pslld $18,%xmm6
2133
2134# qhasm: mp ^= mu
2135# asm 1: pxor <mu=int6464#7,<mp=int6464#5
2136# asm 2: pxor <mu=%xmm6,<mp=%xmm4
2137pxor %xmm6,%xmm4
2138
2139# qhasm: z5_stack = mp
2140# asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#21
2141# asm 2: movdqa <mp=%xmm4,>z5_stack=352(%esp)
2142movdqa %xmm4,352(%esp)
2143
2144# qhasm: assign xmm0 to p
2145
2146# qhasm: assign xmm1 to r
2147
2148# qhasm: assign xmm2 to t
2149
2150# qhasm: assign xmm3 to q
2151
2152# qhasm: s = t
2153# asm 1: movdqa <t=int6464#3,>s=int6464#7
2154# asm 2: movdqa <t=%xmm2,>s=%xmm6
2155movdqa %xmm2,%xmm6
2156
2157# qhasm: uint32323232 t += p
2158# asm 1: paddd <p=int6464#1,<t=int6464#3
2159# asm 2: paddd <p=%xmm0,<t=%xmm2
2160paddd %xmm0,%xmm2
2161
2162# qhasm: u = t
2163# asm 1: movdqa <t=int6464#3,>u=int6464#5
2164# asm 2: movdqa <t=%xmm2,>u=%xmm4
2165movdqa %xmm2,%xmm4
2166
2167# qhasm: uint32323232 t >>= 25
2168# asm 1: psrld $25,<t=int6464#3
2169# asm 2: psrld $25,<t=%xmm2
2170psrld $25,%xmm2
2171
2172# qhasm: q ^= t
2173# asm 1: pxor <t=int6464#3,<q=int6464#4
2174# asm 2: pxor <t=%xmm2,<q=%xmm3
2175pxor %xmm2,%xmm3
2176
2177# qhasm: uint32323232 u <<= 7
2178# asm 1: pslld $7,<u=int6464#5
2179# asm 2: pslld $7,<u=%xmm4
2180pslld $7,%xmm4
2181
2182# qhasm: q ^= u
2183# asm 1: pxor <u=int6464#5,<q=int6464#4
2184# asm 2: pxor <u=%xmm4,<q=%xmm3
2185pxor %xmm4,%xmm3
2186
2187# qhasm: z11_stack = q
2188# asm 1: movdqa <q=int6464#4,>z11_stack=stack128#27
2189# asm 2: movdqa <q=%xmm3,>z11_stack=448(%esp)
2190movdqa %xmm3,448(%esp)
2191
2192# qhasm: t = p
2193# asm 1: movdqa <p=int6464#1,>t=int6464#3
2194# asm 2: movdqa <p=%xmm0,>t=%xmm2
2195movdqa %xmm0,%xmm2
2196
2197# qhasm: uint32323232 t += q
2198# asm 1: paddd <q=int6464#4,<t=int6464#3
2199# asm 2: paddd <q=%xmm3,<t=%xmm2
2200paddd %xmm3,%xmm2
2201
2202# qhasm: u = t
2203# asm 1: movdqa <t=int6464#3,>u=int6464#5
2204# asm 2: movdqa <t=%xmm2,>u=%xmm4
2205movdqa %xmm2,%xmm4
2206
2207# qhasm: uint32323232 t >>= 23
2208# asm 1: psrld $23,<t=int6464#3
2209# asm 2: psrld $23,<t=%xmm2
2210psrld $23,%xmm2
2211
2212# qhasm: r ^= t
2213# asm 1: pxor <t=int6464#3,<r=int6464#2
2214# asm 2: pxor <t=%xmm2,<r=%xmm1
2215pxor %xmm2,%xmm1
2216
2217# qhasm: uint32323232 u <<= 9
2218# asm 1: pslld $9,<u=int6464#5
2219# asm 2: pslld $9,<u=%xmm4
2220pslld $9,%xmm4
2221
2222# qhasm: r ^= u
2223# asm 1: pxor <u=int6464#5,<r=int6464#2
2224# asm 2: pxor <u=%xmm4,<r=%xmm1
2225pxor %xmm4,%xmm1
2226
2227# qhasm: z8_stack = r
2228# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#37
2229# asm 2: movdqa <r=%xmm1,>z8_stack=608(%esp)
2230movdqa %xmm1,608(%esp)
2231
2232# qhasm: uint32323232 q += r
2233# asm 1: paddd <r=int6464#2,<q=int6464#4
2234# asm 2: paddd <r=%xmm1,<q=%xmm3
2235paddd %xmm1,%xmm3
2236
2237# qhasm: u = q
2238# asm 1: movdqa <q=int6464#4,>u=int6464#3
2239# asm 2: movdqa <q=%xmm3,>u=%xmm2
2240movdqa %xmm3,%xmm2
2241
2242# qhasm: uint32323232 q >>= 19
2243# asm 1: psrld $19,<q=int6464#4
2244# asm 2: psrld $19,<q=%xmm3
2245psrld $19,%xmm3
2246
2247# qhasm: s ^= q
2248# asm 1: pxor <q=int6464#4,<s=int6464#7
2249# asm 2: pxor <q=%xmm3,<s=%xmm6
2250pxor %xmm3,%xmm6
2251
2252# qhasm: uint32323232 u <<= 13
2253# asm 1: pslld $13,<u=int6464#3
2254# asm 2: pslld $13,<u=%xmm2
2255pslld $13,%xmm2
2256
2257# qhasm: s ^= u
2258# asm 1: pxor <u=int6464#3,<s=int6464#7
2259# asm 2: pxor <u=%xmm2,<s=%xmm6
2260pxor %xmm2,%xmm6
2261
2262# qhasm: mt = z14_stack
2263# asm 1: movdqa <z14_stack=stack128#36,>mt=int6464#3
2264# asm 2: movdqa <z14_stack=592(%esp),>mt=%xmm2
2265movdqa 592(%esp),%xmm2
2266
2267# qhasm: mp = z15_stack
2268# asm 1: movdqa <z15_stack=stack128#22,>mp=int6464#5
2269# asm 2: movdqa <z15_stack=368(%esp),>mp=%xmm4
2270movdqa 368(%esp),%xmm4
2271
2272# qhasm: mq = z12_stack
2273# asm 1: movdqa <z12_stack=stack128#30,>mq=int6464#4
2274# asm 2: movdqa <z12_stack=496(%esp),>mq=%xmm3
2275movdqa 496(%esp),%xmm3
2276
2277# qhasm: mr = z13_stack
2278# asm 1: movdqa <z13_stack=stack128#35,>mr=int6464#6
2279# asm 2: movdqa <z13_stack=576(%esp),>mr=%xmm5
2280movdqa 576(%esp),%xmm5
2281
2282# qhasm: z9_stack = s
2283# asm 1: movdqa <s=int6464#7,>z9_stack=stack128#32
2284# asm 2: movdqa <s=%xmm6,>z9_stack=528(%esp)
2285movdqa %xmm6,528(%esp)
2286
2287# qhasm: uint32323232 r += s
2288# asm 1: paddd <s=int6464#7,<r=int6464#2
2289# asm 2: paddd <s=%xmm6,<r=%xmm1
2290paddd %xmm6,%xmm1
2291
2292# qhasm: u = r
2293# asm 1: movdqa <r=int6464#2,>u=int6464#7
2294# asm 2: movdqa <r=%xmm1,>u=%xmm6
2295movdqa %xmm1,%xmm6
2296
2297# qhasm: uint32323232 r >>= 14
2298# asm 1: psrld $14,<r=int6464#2
2299# asm 2: psrld $14,<r=%xmm1
2300psrld $14,%xmm1
2301
2302# qhasm: p ^= r
2303# asm 1: pxor <r=int6464#2,<p=int6464#1
2304# asm 2: pxor <r=%xmm1,<p=%xmm0
2305pxor %xmm1,%xmm0
2306
2307# qhasm: uint32323232 u <<= 18
2308# asm 1: pslld $18,<u=int6464#7
2309# asm 2: pslld $18,<u=%xmm6
2310pslld $18,%xmm6
2311
2312# qhasm: p ^= u
2313# asm 1: pxor <u=int6464#7,<p=int6464#1
2314# asm 2: pxor <u=%xmm6,<p=%xmm0
2315pxor %xmm6,%xmm0
2316
2317# qhasm: z10_stack = p
2318# asm 1: movdqa <p=int6464#1,>z10_stack=stack128#22
2319# asm 2: movdqa <p=%xmm0,>z10_stack=368(%esp)
2320movdqa %xmm0,368(%esp)
2321
2322# qhasm: assign xmm2 to mt
2323
2324# qhasm: assign xmm3 to mq
2325
2326# qhasm: assign xmm4 to mp
2327
2328# qhasm: assign xmm5 to mr
2329
2330# qhasm: ms = mt
2331# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
2332# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
2333movdqa %xmm2,%xmm6
2334
2335# qhasm: uint32323232 mt += mp
2336# asm 1: paddd <mp=int6464#5,<mt=int6464#3
2337# asm 2: paddd <mp=%xmm4,<mt=%xmm2
2338paddd %xmm4,%xmm2
2339
2340# qhasm: mu = mt
2341# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
2342# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
2343movdqa %xmm2,%xmm0
2344
2345# qhasm: uint32323232 mt >>= 25
2346# asm 1: psrld $25,<mt=int6464#3
2347# asm 2: psrld $25,<mt=%xmm2
2348psrld $25,%xmm2
2349
2350# qhasm: mq ^= mt
2351# asm 1: pxor <mt=int6464#3,<mq=int6464#4
2352# asm 2: pxor <mt=%xmm2,<mq=%xmm3
2353pxor %xmm2,%xmm3
2354
2355# qhasm: uint32323232 mu <<= 7
2356# asm 1: pslld $7,<mu=int6464#1
2357# asm 2: pslld $7,<mu=%xmm0
2358pslld $7,%xmm0
2359
2360# qhasm: mq ^= mu
2361# asm 1: pxor <mu=int6464#1,<mq=int6464#4
2362# asm 2: pxor <mu=%xmm0,<mq=%xmm3
2363pxor %xmm0,%xmm3
2364
2365# qhasm: z12_stack = mq
2366# asm 1: movdqa <mq=int6464#4,>z12_stack=stack128#35
2367# asm 2: movdqa <mq=%xmm3,>z12_stack=576(%esp)
2368movdqa %xmm3,576(%esp)
2369
2370# qhasm: mt = mp
2371# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2372# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2373movdqa %xmm4,%xmm0
2374
2375# qhasm: uint32323232 mt += mq
2376# asm 1: paddd <mq=int6464#4,<mt=int6464#1
2377# asm 2: paddd <mq=%xmm3,<mt=%xmm0
2378paddd %xmm3,%xmm0
2379
2380# qhasm: mu = mt
2381# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2382# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2383movdqa %xmm0,%xmm1
2384
2385# qhasm: uint32323232 mt >>= 23
2386# asm 1: psrld $23,<mt=int6464#1
2387# asm 2: psrld $23,<mt=%xmm0
2388psrld $23,%xmm0
2389
2390# qhasm: mr ^= mt
2391# asm 1: pxor <mt=int6464#1,<mr=int6464#6
2392# asm 2: pxor <mt=%xmm0,<mr=%xmm5
2393pxor %xmm0,%xmm5
2394
2395# qhasm: uint32323232 mu <<= 9
2396# asm 1: pslld $9,<mu=int6464#2
2397# asm 2: pslld $9,<mu=%xmm1
2398pslld $9,%xmm1
2399
2400# qhasm: mr ^= mu
2401# asm 1: pxor <mu=int6464#2,<mr=int6464#6
2402# asm 2: pxor <mu=%xmm1,<mr=%xmm5
2403pxor %xmm1,%xmm5
2404
2405# qhasm: z13_stack = mr
2406# asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#30
2407# asm 2: movdqa <mr=%xmm5,>z13_stack=496(%esp)
2408movdqa %xmm5,496(%esp)
2409
2410# qhasm: uint32323232 mq += mr
2411# asm 1: paddd <mr=int6464#6,<mq=int6464#4
2412# asm 2: paddd <mr=%xmm5,<mq=%xmm3
2413paddd %xmm5,%xmm3
2414
2415# qhasm: mu = mq
2416# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2417# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2418movdqa %xmm3,%xmm0
2419
2420# qhasm: uint32323232 mq >>= 19
2421# asm 1: psrld $19,<mq=int6464#4
2422# asm 2: psrld $19,<mq=%xmm3
2423psrld $19,%xmm3
2424
2425# qhasm: ms ^= mq
2426# asm 1: pxor <mq=int6464#4,<ms=int6464#7
2427# asm 2: pxor <mq=%xmm3,<ms=%xmm6
2428pxor %xmm3,%xmm6
2429
2430# qhasm: uint32323232 mu <<= 13
2431# asm 1: pslld $13,<mu=int6464#1
2432# asm 2: pslld $13,<mu=%xmm0
2433pslld $13,%xmm0
2434
2435# qhasm: ms ^= mu
2436# asm 1: pxor <mu=int6464#1,<ms=int6464#7
2437# asm 2: pxor <mu=%xmm0,<ms=%xmm6
2438pxor %xmm0,%xmm6
2439
2440# qhasm: t = z12_stack
2441# asm 1: movdqa <z12_stack=stack128#35,>t=int6464#3
2442# asm 2: movdqa <z12_stack=576(%esp),>t=%xmm2
2443movdqa 576(%esp),%xmm2
2444
2445# qhasm: p = z0_stack
2446# asm 1: movdqa <z0_stack=stack128#33,>p=int6464#1
2447# asm 2: movdqa <z0_stack=544(%esp),>p=%xmm0
2448movdqa 544(%esp),%xmm0
2449
2450# qhasm: q = z4_stack
2451# asm 1: movdqa <z4_stack=stack128#34,>q=int6464#4
2452# asm 2: movdqa <z4_stack=560(%esp),>q=%xmm3
2453movdqa 560(%esp),%xmm3
2454
2455# qhasm: r = z8_stack
2456# asm 1: movdqa <z8_stack=stack128#37,>r=int6464#2
2457# asm 2: movdqa <z8_stack=608(%esp),>r=%xmm1
2458movdqa 608(%esp),%xmm1
2459
2460# qhasm: z14_stack = ms
2461# asm 1: movdqa <ms=int6464#7,>z14_stack=stack128#24
2462# asm 2: movdqa <ms=%xmm6,>z14_stack=400(%esp)
2463movdqa %xmm6,400(%esp)
2464
2465# qhasm: uint32323232 mr += ms
2466# asm 1: paddd <ms=int6464#7,<mr=int6464#6
2467# asm 2: paddd <ms=%xmm6,<mr=%xmm5
2468paddd %xmm6,%xmm5
2469
2470# qhasm: mu = mr
2471# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2472# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2473movdqa %xmm5,%xmm6
2474
2475# qhasm: uint32323232 mr >>= 14
2476# asm 1: psrld $14,<mr=int6464#6
2477# asm 2: psrld $14,<mr=%xmm5
2478psrld $14,%xmm5
2479
2480# qhasm: mp ^= mr
2481# asm 1: pxor <mr=int6464#6,<mp=int6464#5
2482# asm 2: pxor <mr=%xmm5,<mp=%xmm4
2483pxor %xmm5,%xmm4
2484
2485# qhasm: uint32323232 mu <<= 18
2486# asm 1: pslld $18,<mu=int6464#7
2487# asm 2: pslld $18,<mu=%xmm6
2488pslld $18,%xmm6
2489
2490# qhasm: mp ^= mu
2491# asm 1: pxor <mu=int6464#7,<mp=int6464#5
2492# asm 2: pxor <mu=%xmm6,<mp=%xmm4
2493pxor %xmm6,%xmm4
2494
2495# qhasm: z15_stack = mp
2496# asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#23
2497# asm 2: movdqa <mp=%xmm4,>z15_stack=384(%esp)
2498movdqa %xmm4,384(%esp)
2499
2500# qhasm: unsigned>? i -= 2
2501# asm 1: sub $2,<i=int32#1
2502# asm 2: sub $2,<i=%eax
2503sub $2,%eax
2504# comment:fp stack unchanged by jump
2505
2506# qhasm: goto mainloop1 if unsigned>
2507ja ._mainloop1
2508
2509# qhasm: out = out_stack
2510# asm 1: movl <out_stack=stack32#6,>out=int32#6
2511# asm 2: movl <out_stack=20(%esp),>out=%edi
2512movl 20(%esp),%edi
2513
2514# qhasm: z0 = z0_stack
2515# asm 1: movdqa <z0_stack=stack128#33,>z0=int6464#1
2516# asm 2: movdqa <z0_stack=544(%esp),>z0=%xmm0
2517movdqa 544(%esp),%xmm0
2518
2519# qhasm: z1 = z1_stack
2520# asm 1: movdqa <z1_stack=stack128#28,>z1=int6464#2
2521# asm 2: movdqa <z1_stack=464(%esp),>z1=%xmm1
2522movdqa 464(%esp),%xmm1
2523
2524# qhasm: z2 = z2_stack
2525# asm 1: movdqa <z2_stack=stack128#31,>z2=int6464#3
2526# asm 2: movdqa <z2_stack=512(%esp),>z2=%xmm2
2527movdqa 512(%esp),%xmm2
2528
2529# qhasm: z3 = z3_stack
2530# asm 1: movdqa <z3_stack=stack128#25,>z3=int6464#4
2531# asm 2: movdqa <z3_stack=416(%esp),>z3=%xmm3
2532movdqa 416(%esp),%xmm3
2533
2534# qhasm: uint32323232 z0 += orig0
2535# asm 1: paddd <orig0=stack128#8,<z0=int6464#1
2536# asm 2: paddd <orig0=144(%esp),<z0=%xmm0
2537paddd 144(%esp),%xmm0
2538
2539# qhasm: uint32323232 z1 += orig1
2540# asm 1: paddd <orig1=stack128#12,<z1=int6464#2
2541# asm 2: paddd <orig1=208(%esp),<z1=%xmm1
2542paddd 208(%esp),%xmm1
2543
2544# qhasm: uint32323232 z2 += orig2
2545# asm 1: paddd <orig2=stack128#15,<z2=int6464#3
2546# asm 2: paddd <orig2=256(%esp),<z2=%xmm2
2547paddd 256(%esp),%xmm2
2548
2549# qhasm: uint32323232 z3 += orig3
2550# asm 1: paddd <orig3=stack128#18,<z3=int6464#4
2551# asm 2: paddd <orig3=304(%esp),<z3=%xmm3
2552paddd 304(%esp),%xmm3
2553
2554# qhasm: in0 = z0
2555# asm 1: movd <z0=int6464#1,>in0=int32#1
2556# asm 2: movd <z0=%xmm0,>in0=%eax
2557movd %xmm0,%eax
2558
2559# qhasm: in1 = z1
2560# asm 1: movd <z1=int6464#2,>in1=int32#2
2561# asm 2: movd <z1=%xmm1,>in1=%ecx
2562movd %xmm1,%ecx
2563
2564# qhasm: in2 = z2
2565# asm 1: movd <z2=int6464#3,>in2=int32#3
2566# asm 2: movd <z2=%xmm2,>in2=%edx
2567movd %xmm2,%edx
2568
2569# qhasm: in3 = z3
2570# asm 1: movd <z3=int6464#4,>in3=int32#4
2571# asm 2: movd <z3=%xmm3,>in3=%ebx
2572movd %xmm3,%ebx
2573
2574# qhasm: z0 <<<= 96
2575# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2576# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2577pshufd $0x39,%xmm0,%xmm0
2578
2579# qhasm: z1 <<<= 96
2580# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2581# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2582pshufd $0x39,%xmm1,%xmm1
2583
2584# qhasm: z2 <<<= 96
2585# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2586# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2587pshufd $0x39,%xmm2,%xmm2
2588
2589# qhasm: z3 <<<= 96
2590# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2591# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2592pshufd $0x39,%xmm3,%xmm3
2593
2594# qhasm: in0 ^= *(uint32 *) (m + 0)
2595# asm 1: xorl 0(<m=int32#5),<in0=int32#1
2596# asm 2: xorl 0(<m=%esi),<in0=%eax
2597xorl 0(%esi),%eax
2598
2599# qhasm: in1 ^= *(uint32 *) (m + 4)
2600# asm 1: xorl 4(<m=int32#5),<in1=int32#2
2601# asm 2: xorl 4(<m=%esi),<in1=%ecx
2602xorl 4(%esi),%ecx
2603
2604# qhasm: in2 ^= *(uint32 *) (m + 8)
2605# asm 1: xorl 8(<m=int32#5),<in2=int32#3
2606# asm 2: xorl 8(<m=%esi),<in2=%edx
2607xorl 8(%esi),%edx
2608
2609# qhasm: in3 ^= *(uint32 *) (m + 12)
2610# asm 1: xorl 12(<m=int32#5),<in3=int32#4
2611# asm 2: xorl 12(<m=%esi),<in3=%ebx
2612xorl 12(%esi),%ebx
2613
2614# qhasm: *(uint32 *) (out + 0) = in0
2615# asm 1: movl <in0=int32#1,0(<out=int32#6)
2616# asm 2: movl <in0=%eax,0(<out=%edi)
2617movl %eax,0(%edi)
2618
2619# qhasm: *(uint32 *) (out + 4) = in1
2620# asm 1: movl <in1=int32#2,4(<out=int32#6)
2621# asm 2: movl <in1=%ecx,4(<out=%edi)
2622movl %ecx,4(%edi)
2623
2624# qhasm: *(uint32 *) (out + 8) = in2
2625# asm 1: movl <in2=int32#3,8(<out=int32#6)
2626# asm 2: movl <in2=%edx,8(<out=%edi)
2627movl %edx,8(%edi)
2628
2629# qhasm: *(uint32 *) (out + 12) = in3
2630# asm 1: movl <in3=int32#4,12(<out=int32#6)
2631# asm 2: movl <in3=%ebx,12(<out=%edi)
2632movl %ebx,12(%edi)
2633
2634# qhasm: in0 = z0
2635# asm 1: movd <z0=int6464#1,>in0=int32#1
2636# asm 2: movd <z0=%xmm0,>in0=%eax
2637movd %xmm0,%eax
2638
2639# qhasm: in1 = z1
2640# asm 1: movd <z1=int6464#2,>in1=int32#2
2641# asm 2: movd <z1=%xmm1,>in1=%ecx
2642movd %xmm1,%ecx
2643
2644# qhasm: in2 = z2
2645# asm 1: movd <z2=int6464#3,>in2=int32#3
2646# asm 2: movd <z2=%xmm2,>in2=%edx
2647movd %xmm2,%edx
2648
2649# qhasm: in3 = z3
2650# asm 1: movd <z3=int6464#4,>in3=int32#4
2651# asm 2: movd <z3=%xmm3,>in3=%ebx
2652movd %xmm3,%ebx
2653
2654# qhasm: z0 <<<= 96
2655# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2656# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2657pshufd $0x39,%xmm0,%xmm0
2658
2659# qhasm: z1 <<<= 96
2660# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2661# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2662pshufd $0x39,%xmm1,%xmm1
2663
2664# qhasm: z2 <<<= 96
2665# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2666# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2667pshufd $0x39,%xmm2,%xmm2
2668
2669# qhasm: z3 <<<= 96
2670# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2671# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2672pshufd $0x39,%xmm3,%xmm3
2673
2674# qhasm: in0 ^= *(uint32 *) (m + 64)
2675# asm 1: xorl 64(<m=int32#5),<in0=int32#1
2676# asm 2: xorl 64(<m=%esi),<in0=%eax
2677xorl 64(%esi),%eax
2678
2679# qhasm: in1 ^= *(uint32 *) (m + 68)
2680# asm 1: xorl 68(<m=int32#5),<in1=int32#2
2681# asm 2: xorl 68(<m=%esi),<in1=%ecx
2682xorl 68(%esi),%ecx
2683
2684# qhasm: in2 ^= *(uint32 *) (m + 72)
2685# asm 1: xorl 72(<m=int32#5),<in2=int32#3
2686# asm 2: xorl 72(<m=%esi),<in2=%edx
2687xorl 72(%esi),%edx
2688
2689# qhasm: in3 ^= *(uint32 *) (m + 76)
2690# asm 1: xorl 76(<m=int32#5),<in3=int32#4
2691# asm 2: xorl 76(<m=%esi),<in3=%ebx
2692xorl 76(%esi),%ebx
2693
2694# qhasm: *(uint32 *) (out + 64) = in0
2695# asm 1: movl <in0=int32#1,64(<out=int32#6)
2696# asm 2: movl <in0=%eax,64(<out=%edi)
2697movl %eax,64(%edi)
2698
2699# qhasm: *(uint32 *) (out + 68) = in1
2700# asm 1: movl <in1=int32#2,68(<out=int32#6)
2701# asm 2: movl <in1=%ecx,68(<out=%edi)
2702movl %ecx,68(%edi)
2703
2704# qhasm: *(uint32 *) (out + 72) = in2
2705# asm 1: movl <in2=int32#3,72(<out=int32#6)
2706# asm 2: movl <in2=%edx,72(<out=%edi)
2707movl %edx,72(%edi)
2708
2709# qhasm: *(uint32 *) (out + 76) = in3
2710# asm 1: movl <in3=int32#4,76(<out=int32#6)
2711# asm 2: movl <in3=%ebx,76(<out=%edi)
2712movl %ebx,76(%edi)
2713
2714# qhasm: in0 = z0
2715# asm 1: movd <z0=int6464#1,>in0=int32#1
2716# asm 2: movd <z0=%xmm0,>in0=%eax
2717movd %xmm0,%eax
2718
2719# qhasm: in1 = z1
2720# asm 1: movd <z1=int6464#2,>in1=int32#2
2721# asm 2: movd <z1=%xmm1,>in1=%ecx
2722movd %xmm1,%ecx
2723
2724# qhasm: in2 = z2
2725# asm 1: movd <z2=int6464#3,>in2=int32#3
2726# asm 2: movd <z2=%xmm2,>in2=%edx
2727movd %xmm2,%edx
2728
2729# qhasm: in3 = z3
2730# asm 1: movd <z3=int6464#4,>in3=int32#4
2731# asm 2: movd <z3=%xmm3,>in3=%ebx
2732movd %xmm3,%ebx
2733
2734# qhasm: z0 <<<= 96
2735# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2736# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2737pshufd $0x39,%xmm0,%xmm0
2738
2739# qhasm: z1 <<<= 96
2740# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2741# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2742pshufd $0x39,%xmm1,%xmm1
2743
2744# qhasm: z2 <<<= 96
2745# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2746# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2747pshufd $0x39,%xmm2,%xmm2
2748
2749# qhasm: z3 <<<= 96
2750# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2751# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2752pshufd $0x39,%xmm3,%xmm3
2753
2754# qhasm: in0 ^= *(uint32 *) (m + 128)
2755# asm 1: xorl 128(<m=int32#5),<in0=int32#1
2756# asm 2: xorl 128(<m=%esi),<in0=%eax
2757xorl 128(%esi),%eax
2758
2759# qhasm: in1 ^= *(uint32 *) (m + 132)
2760# asm 1: xorl 132(<m=int32#5),<in1=int32#2
2761# asm 2: xorl 132(<m=%esi),<in1=%ecx
2762xorl 132(%esi),%ecx
2763
2764# qhasm: in2 ^= *(uint32 *) (m + 136)
2765# asm 1: xorl 136(<m=int32#5),<in2=int32#3
2766# asm 2: xorl 136(<m=%esi),<in2=%edx
2767xorl 136(%esi),%edx
2768
2769# qhasm: in3 ^= *(uint32 *) (m + 140)
2770# asm 1: xorl 140(<m=int32#5),<in3=int32#4
2771# asm 2: xorl 140(<m=%esi),<in3=%ebx
2772xorl 140(%esi),%ebx
2773
2774# qhasm: *(uint32 *) (out + 128) = in0
2775# asm 1: movl <in0=int32#1,128(<out=int32#6)
2776# asm 2: movl <in0=%eax,128(<out=%edi)
2777movl %eax,128(%edi)
2778
2779# qhasm: *(uint32 *) (out + 132) = in1
2780# asm 1: movl <in1=int32#2,132(<out=int32#6)
2781# asm 2: movl <in1=%ecx,132(<out=%edi)
2782movl %ecx,132(%edi)
2783
2784# qhasm: *(uint32 *) (out + 136) = in2
2785# asm 1: movl <in2=int32#3,136(<out=int32#6)
2786# asm 2: movl <in2=%edx,136(<out=%edi)
2787movl %edx,136(%edi)
2788
2789# qhasm: *(uint32 *) (out + 140) = in3
2790# asm 1: movl <in3=int32#4,140(<out=int32#6)
2791# asm 2: movl <in3=%ebx,140(<out=%edi)
2792movl %ebx,140(%edi)
2793
2794# qhasm: in0 = z0
2795# asm 1: movd <z0=int6464#1,>in0=int32#1
2796# asm 2: movd <z0=%xmm0,>in0=%eax
2797movd %xmm0,%eax
2798
2799# qhasm: in1 = z1
2800# asm 1: movd <z1=int6464#2,>in1=int32#2
2801# asm 2: movd <z1=%xmm1,>in1=%ecx
2802movd %xmm1,%ecx
2803
2804# qhasm: in2 = z2
2805# asm 1: movd <z2=int6464#3,>in2=int32#3
2806# asm 2: movd <z2=%xmm2,>in2=%edx
2807movd %xmm2,%edx
2808
2809# qhasm: in3 = z3
2810# asm 1: movd <z3=int6464#4,>in3=int32#4
2811# asm 2: movd <z3=%xmm3,>in3=%ebx
2812movd %xmm3,%ebx
2813
2814# qhasm: in0 ^= *(uint32 *) (m + 192)
2815# asm 1: xorl 192(<m=int32#5),<in0=int32#1
2816# asm 2: xorl 192(<m=%esi),<in0=%eax
2817xorl 192(%esi),%eax
2818
2819# qhasm: in1 ^= *(uint32 *) (m + 196)
2820# asm 1: xorl 196(<m=int32#5),<in1=int32#2
2821# asm 2: xorl 196(<m=%esi),<in1=%ecx
2822xorl 196(%esi),%ecx
2823
2824# qhasm: in2 ^= *(uint32 *) (m + 200)
2825# asm 1: xorl 200(<m=int32#5),<in2=int32#3
2826# asm 2: xorl 200(<m=%esi),<in2=%edx
2827xorl 200(%esi),%edx
2828
2829# qhasm: in3 ^= *(uint32 *) (m + 204)
2830# asm 1: xorl 204(<m=int32#5),<in3=int32#4
2831# asm 2: xorl 204(<m=%esi),<in3=%ebx
2832xorl 204(%esi),%ebx
2833
2834# qhasm: *(uint32 *) (out + 192) = in0
2835# asm 1: movl <in0=int32#1,192(<out=int32#6)
2836# asm 2: movl <in0=%eax,192(<out=%edi)
2837movl %eax,192(%edi)
2838
2839# qhasm: *(uint32 *) (out + 196) = in1
2840# asm 1: movl <in1=int32#2,196(<out=int32#6)
2841# asm 2: movl <in1=%ecx,196(<out=%edi)
2842movl %ecx,196(%edi)
2843
2844# qhasm: *(uint32 *) (out + 200) = in2
2845# asm 1: movl <in2=int32#3,200(<out=int32#6)
2846# asm 2: movl <in2=%edx,200(<out=%edi)
2847movl %edx,200(%edi)
2848
2849# qhasm: *(uint32 *) (out + 204) = in3
2850# asm 1: movl <in3=int32#4,204(<out=int32#6)
2851# asm 2: movl <in3=%ebx,204(<out=%edi)
2852movl %ebx,204(%edi)
2853
2854# qhasm: z4 = z4_stack
2855# asm 1: movdqa <z4_stack=stack128#34,>z4=int6464#1
2856# asm 2: movdqa <z4_stack=560(%esp),>z4=%xmm0
2857movdqa 560(%esp),%xmm0
2858
2859# qhasm: z5 = z5_stack
2860# asm 1: movdqa <z5_stack=stack128#21,>z5=int6464#2
2861# asm 2: movdqa <z5_stack=352(%esp),>z5=%xmm1
2862movdqa 352(%esp),%xmm1
2863
2864# qhasm: z6 = z6_stack
2865# asm 1: movdqa <z6_stack=stack128#26,>z6=int6464#3
2866# asm 2: movdqa <z6_stack=432(%esp),>z6=%xmm2
2867movdqa 432(%esp),%xmm2
2868
2869# qhasm: z7 = z7_stack
2870# asm 1: movdqa <z7_stack=stack128#29,>z7=int6464#4
2871# asm 2: movdqa <z7_stack=480(%esp),>z7=%xmm3
2872movdqa 480(%esp),%xmm3
2873
2874# qhasm: uint32323232 z4 += orig4
2875# asm 1: paddd <orig4=stack128#16,<z4=int6464#1
2876# asm 2: paddd <orig4=272(%esp),<z4=%xmm0
2877paddd 272(%esp),%xmm0
2878
2879# qhasm: uint32323232 z5 += orig5
2880# asm 1: paddd <orig5=stack128#5,<z5=int6464#2
2881# asm 2: paddd <orig5=96(%esp),<z5=%xmm1
2882paddd 96(%esp),%xmm1
2883
2884# qhasm: uint32323232 z6 += orig6
2885# asm 1: paddd <orig6=stack128#9,<z6=int6464#3
2886# asm 2: paddd <orig6=160(%esp),<z6=%xmm2
2887paddd 160(%esp),%xmm2
2888
2889# qhasm: uint32323232 z7 += orig7
2890# asm 1: paddd <orig7=stack128#13,<z7=int6464#4
2891# asm 2: paddd <orig7=224(%esp),<z7=%xmm3
2892paddd 224(%esp),%xmm3
2893
2894# qhasm: in4 = z4
2895# asm 1: movd <z4=int6464#1,>in4=int32#1
2896# asm 2: movd <z4=%xmm0,>in4=%eax
2897movd %xmm0,%eax
2898
2899# qhasm: in5 = z5
2900# asm 1: movd <z5=int6464#2,>in5=int32#2
2901# asm 2: movd <z5=%xmm1,>in5=%ecx
2902movd %xmm1,%ecx
2903
2904# qhasm: in6 = z6
2905# asm 1: movd <z6=int6464#3,>in6=int32#3
2906# asm 2: movd <z6=%xmm2,>in6=%edx
2907movd %xmm2,%edx
2908
2909# qhasm: in7 = z7
2910# asm 1: movd <z7=int6464#4,>in7=int32#4
2911# asm 2: movd <z7=%xmm3,>in7=%ebx
2912movd %xmm3,%ebx
2913
2914# qhasm: z4 <<<= 96
2915# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2916# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2917pshufd $0x39,%xmm0,%xmm0
2918
2919# qhasm: z5 <<<= 96
2920# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
2921# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
2922pshufd $0x39,%xmm1,%xmm1
2923
2924# qhasm: z6 <<<= 96
2925# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
2926# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
2927pshufd $0x39,%xmm2,%xmm2
2928
2929# qhasm: z7 <<<= 96
2930# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
2931# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
2932pshufd $0x39,%xmm3,%xmm3
2933
2934# qhasm: in4 ^= *(uint32 *) (m + 16)
2935# asm 1: xorl 16(<m=int32#5),<in4=int32#1
2936# asm 2: xorl 16(<m=%esi),<in4=%eax
2937xorl 16(%esi),%eax
2938
2939# qhasm: in5 ^= *(uint32 *) (m + 20)
2940# asm 1: xorl 20(<m=int32#5),<in5=int32#2
2941# asm 2: xorl 20(<m=%esi),<in5=%ecx
2942xorl 20(%esi),%ecx
2943
2944# qhasm: in6 ^= *(uint32 *) (m + 24)
2945# asm 1: xorl 24(<m=int32#5),<in6=int32#3
2946# asm 2: xorl 24(<m=%esi),<in6=%edx
2947xorl 24(%esi),%edx
2948
2949# qhasm: in7 ^= *(uint32 *) (m + 28)
2950# asm 1: xorl 28(<m=int32#5),<in7=int32#4
2951# asm 2: xorl 28(<m=%esi),<in7=%ebx
2952xorl 28(%esi),%ebx
2953
2954# qhasm: *(uint32 *) (out + 16) = in4
2955# asm 1: movl <in4=int32#1,16(<out=int32#6)
2956# asm 2: movl <in4=%eax,16(<out=%edi)
2957movl %eax,16(%edi)
2958
2959# qhasm: *(uint32 *) (out + 20) = in5
2960# asm 1: movl <in5=int32#2,20(<out=int32#6)
2961# asm 2: movl <in5=%ecx,20(<out=%edi)
2962movl %ecx,20(%edi)
2963
2964# qhasm: *(uint32 *) (out + 24) = in6
2965# asm 1: movl <in6=int32#3,24(<out=int32#6)
2966# asm 2: movl <in6=%edx,24(<out=%edi)
2967movl %edx,24(%edi)
2968
2969# qhasm: *(uint32 *) (out + 28) = in7
2970# asm 1: movl <in7=int32#4,28(<out=int32#6)
2971# asm 2: movl <in7=%ebx,28(<out=%edi)
2972movl %ebx,28(%edi)
2973
2974# qhasm: in4 = z4
2975# asm 1: movd <z4=int6464#1,>in4=int32#1
2976# asm 2: movd <z4=%xmm0,>in4=%eax
2977movd %xmm0,%eax
2978
2979# qhasm: in5 = z5
2980# asm 1: movd <z5=int6464#2,>in5=int32#2
2981# asm 2: movd <z5=%xmm1,>in5=%ecx
2982movd %xmm1,%ecx
2983
2984# qhasm: in6 = z6
2985# asm 1: movd <z6=int6464#3,>in6=int32#3
2986# asm 2: movd <z6=%xmm2,>in6=%edx
2987movd %xmm2,%edx
2988
2989# qhasm: in7 = z7
2990# asm 1: movd <z7=int6464#4,>in7=int32#4
2991# asm 2: movd <z7=%xmm3,>in7=%ebx
2992movd %xmm3,%ebx
2993
2994# qhasm: z4 <<<= 96
2995# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2996# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2997pshufd $0x39,%xmm0,%xmm0
2998
2999# qhasm: z5 <<<= 96
3000# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3001# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3002pshufd $0x39,%xmm1,%xmm1
3003
3004# qhasm: z6 <<<= 96
3005# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3006# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3007pshufd $0x39,%xmm2,%xmm2
3008
3009# qhasm: z7 <<<= 96
3010# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3011# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3012pshufd $0x39,%xmm3,%xmm3
3013
3014# qhasm: in4 ^= *(uint32 *) (m + 80)
3015# asm 1: xorl 80(<m=int32#5),<in4=int32#1
3016# asm 2: xorl 80(<m=%esi),<in4=%eax
3017xorl 80(%esi),%eax
3018
3019# qhasm: in5 ^= *(uint32 *) (m + 84)
3020# asm 1: xorl 84(<m=int32#5),<in5=int32#2
3021# asm 2: xorl 84(<m=%esi),<in5=%ecx
3022xorl 84(%esi),%ecx
3023
3024# qhasm: in6 ^= *(uint32 *) (m + 88)
3025# asm 1: xorl 88(<m=int32#5),<in6=int32#3
3026# asm 2: xorl 88(<m=%esi),<in6=%edx
3027xorl 88(%esi),%edx
3028
3029# qhasm: in7 ^= *(uint32 *) (m + 92)
3030# asm 1: xorl 92(<m=int32#5),<in7=int32#4
3031# asm 2: xorl 92(<m=%esi),<in7=%ebx
3032xorl 92(%esi),%ebx
3033
3034# qhasm: *(uint32 *) (out + 80) = in4
3035# asm 1: movl <in4=int32#1,80(<out=int32#6)
3036# asm 2: movl <in4=%eax,80(<out=%edi)
3037movl %eax,80(%edi)
3038
3039# qhasm: *(uint32 *) (out + 84) = in5
3040# asm 1: movl <in5=int32#2,84(<out=int32#6)
3041# asm 2: movl <in5=%ecx,84(<out=%edi)
3042movl %ecx,84(%edi)
3043
3044# qhasm: *(uint32 *) (out + 88) = in6
3045# asm 1: movl <in6=int32#3,88(<out=int32#6)
3046# asm 2: movl <in6=%edx,88(<out=%edi)
3047movl %edx,88(%edi)
3048
3049# qhasm: *(uint32 *) (out + 92) = in7
3050# asm 1: movl <in7=int32#4,92(<out=int32#6)
3051# asm 2: movl <in7=%ebx,92(<out=%edi)
3052movl %ebx,92(%edi)
3053
3054# qhasm: in4 = z4
3055# asm 1: movd <z4=int6464#1,>in4=int32#1
3056# asm 2: movd <z4=%xmm0,>in4=%eax
3057movd %xmm0,%eax
3058
3059# qhasm: in5 = z5
3060# asm 1: movd <z5=int6464#2,>in5=int32#2
3061# asm 2: movd <z5=%xmm1,>in5=%ecx
3062movd %xmm1,%ecx
3063
3064# qhasm: in6 = z6
3065# asm 1: movd <z6=int6464#3,>in6=int32#3
3066# asm 2: movd <z6=%xmm2,>in6=%edx
3067movd %xmm2,%edx
3068
3069# qhasm: in7 = z7
3070# asm 1: movd <z7=int6464#4,>in7=int32#4
3071# asm 2: movd <z7=%xmm3,>in7=%ebx
3072movd %xmm3,%ebx
3073
3074# qhasm: z4 <<<= 96
3075# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
3076# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
3077pshufd $0x39,%xmm0,%xmm0
3078
3079# qhasm: z5 <<<= 96
3080# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3081# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3082pshufd $0x39,%xmm1,%xmm1
3083
3084# qhasm: z6 <<<= 96
3085# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3086# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3087pshufd $0x39,%xmm2,%xmm2
3088
3089# qhasm: z7 <<<= 96
3090# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3091# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3092pshufd $0x39,%xmm3,%xmm3
3093
3094# qhasm: in4 ^= *(uint32 *) (m + 144)
3095# asm 1: xorl 144(<m=int32#5),<in4=int32#1
3096# asm 2: xorl 144(<m=%esi),<in4=%eax
3097xorl 144(%esi),%eax
3098
3099# qhasm: in5 ^= *(uint32 *) (m + 148)
3100# asm 1: xorl 148(<m=int32#5),<in5=int32#2
3101# asm 2: xorl 148(<m=%esi),<in5=%ecx
3102xorl 148(%esi),%ecx
3103
3104# qhasm: in6 ^= *(uint32 *) (m + 152)
3105# asm 1: xorl 152(<m=int32#5),<in6=int32#3
3106# asm 2: xorl 152(<m=%esi),<in6=%edx
3107xorl 152(%esi),%edx
3108
3109# qhasm: in7 ^= *(uint32 *) (m + 156)
3110# asm 1: xorl 156(<m=int32#5),<in7=int32#4
3111# asm 2: xorl 156(<m=%esi),<in7=%ebx
3112xorl 156(%esi),%ebx
3113
3114# qhasm: *(uint32 *) (out + 144) = in4
3115# asm 1: movl <in4=int32#1,144(<out=int32#6)
3116# asm 2: movl <in4=%eax,144(<out=%edi)
3117movl %eax,144(%edi)
3118
3119# qhasm: *(uint32 *) (out + 148) = in5
3120# asm 1: movl <in5=int32#2,148(<out=int32#6)
3121# asm 2: movl <in5=%ecx,148(<out=%edi)
3122movl %ecx,148(%edi)
3123
3124# qhasm: *(uint32 *) (out + 152) = in6
3125# asm 1: movl <in6=int32#3,152(<out=int32#6)
3126# asm 2: movl <in6=%edx,152(<out=%edi)
3127movl %edx,152(%edi)
3128
3129# qhasm: *(uint32 *) (out + 156) = in7
3130# asm 1: movl <in7=int32#4,156(<out=int32#6)
3131# asm 2: movl <in7=%ebx,156(<out=%edi)
3132movl %ebx,156(%edi)
3133
3134# qhasm: in4 = z4
3135# asm 1: movd <z4=int6464#1,>in4=int32#1
3136# asm 2: movd <z4=%xmm0,>in4=%eax
3137movd %xmm0,%eax
3138
3139# qhasm: in5 = z5
3140# asm 1: movd <z5=int6464#2,>in5=int32#2
3141# asm 2: movd <z5=%xmm1,>in5=%ecx
3142movd %xmm1,%ecx
3143
3144# qhasm: in6 = z6
3145# asm 1: movd <z6=int6464#3,>in6=int32#3
3146# asm 2: movd <z6=%xmm2,>in6=%edx
3147movd %xmm2,%edx
3148
3149# qhasm: in7 = z7
3150# asm 1: movd <z7=int6464#4,>in7=int32#4
3151# asm 2: movd <z7=%xmm3,>in7=%ebx
3152movd %xmm3,%ebx
3153
3154# qhasm: in4 ^= *(uint32 *) (m + 208)
3155# asm 1: xorl 208(<m=int32#5),<in4=int32#1
3156# asm 2: xorl 208(<m=%esi),<in4=%eax
3157xorl 208(%esi),%eax
3158
3159# qhasm: in5 ^= *(uint32 *) (m + 212)
3160# asm 1: xorl 212(<m=int32#5),<in5=int32#2
3161# asm 2: xorl 212(<m=%esi),<in5=%ecx
3162xorl 212(%esi),%ecx
3163
3164# qhasm: in6 ^= *(uint32 *) (m + 216)
3165# asm 1: xorl 216(<m=int32#5),<in6=int32#3
3166# asm 2: xorl 216(<m=%esi),<in6=%edx
3167xorl 216(%esi),%edx
3168
3169# qhasm: in7 ^= *(uint32 *) (m + 220)
3170# asm 1: xorl 220(<m=int32#5),<in7=int32#4
3171# asm 2: xorl 220(<m=%esi),<in7=%ebx
3172xorl 220(%esi),%ebx
3173
3174# qhasm: *(uint32 *) (out + 208) = in4
3175# asm 1: movl <in4=int32#1,208(<out=int32#6)
3176# asm 2: movl <in4=%eax,208(<out=%edi)
3177movl %eax,208(%edi)
3178
3179# qhasm: *(uint32 *) (out + 212) = in5
3180# asm 1: movl <in5=int32#2,212(<out=int32#6)
3181# asm 2: movl <in5=%ecx,212(<out=%edi)
3182movl %ecx,212(%edi)
3183
3184# qhasm: *(uint32 *) (out + 216) = in6
3185# asm 1: movl <in6=int32#3,216(<out=int32#6)
3186# asm 2: movl <in6=%edx,216(<out=%edi)
3187movl %edx,216(%edi)
3188
3189# qhasm: *(uint32 *) (out + 220) = in7
3190# asm 1: movl <in7=int32#4,220(<out=int32#6)
3191# asm 2: movl <in7=%ebx,220(<out=%edi)
3192movl %ebx,220(%edi)
3193
3194# qhasm: z8 = z8_stack
3195# asm 1: movdqa <z8_stack=stack128#37,>z8=int6464#1
3196# asm 2: movdqa <z8_stack=608(%esp),>z8=%xmm0
3197movdqa 608(%esp),%xmm0
3198
3199# qhasm: z9 = z9_stack
3200# asm 1: movdqa <z9_stack=stack128#32,>z9=int6464#2
3201# asm 2: movdqa <z9_stack=528(%esp),>z9=%xmm1
3202movdqa 528(%esp),%xmm1
3203
3204# qhasm: z10 = z10_stack
3205# asm 1: movdqa <z10_stack=stack128#22,>z10=int6464#3
3206# asm 2: movdqa <z10_stack=368(%esp),>z10=%xmm2
3207movdqa 368(%esp),%xmm2
3208
3209# qhasm: z11 = z11_stack
3210# asm 1: movdqa <z11_stack=stack128#27,>z11=int6464#4
3211# asm 2: movdqa <z11_stack=448(%esp),>z11=%xmm3
3212movdqa 448(%esp),%xmm3
3213
3214# qhasm: uint32323232 z8 += orig8
3215# asm 1: paddd <orig8=stack128#19,<z8=int6464#1
3216# asm 2: paddd <orig8=320(%esp),<z8=%xmm0
3217paddd 320(%esp),%xmm0
3218
3219# qhasm: uint32323232 z9 += orig9
3220# asm 1: paddd <orig9=stack128#20,<z9=int6464#2
3221# asm 2: paddd <orig9=336(%esp),<z9=%xmm1
3222paddd 336(%esp),%xmm1
3223
3224# qhasm: uint32323232 z10 += orig10
3225# asm 1: paddd <orig10=stack128#6,<z10=int6464#3
3226# asm 2: paddd <orig10=112(%esp),<z10=%xmm2
3227paddd 112(%esp),%xmm2
3228
3229# qhasm: uint32323232 z11 += orig11
3230# asm 1: paddd <orig11=stack128#10,<z11=int6464#4
3231# asm 2: paddd <orig11=176(%esp),<z11=%xmm3
3232paddd 176(%esp),%xmm3
3233
3234# qhasm: in8 = z8
3235# asm 1: movd <z8=int6464#1,>in8=int32#1
3236# asm 2: movd <z8=%xmm0,>in8=%eax
3237movd %xmm0,%eax
3238
3239# qhasm: in9 = z9
3240# asm 1: movd <z9=int6464#2,>in9=int32#2
3241# asm 2: movd <z9=%xmm1,>in9=%ecx
3242movd %xmm1,%ecx
3243
3244# qhasm: in10 = z10
3245# asm 1: movd <z10=int6464#3,>in10=int32#3
3246# asm 2: movd <z10=%xmm2,>in10=%edx
3247movd %xmm2,%edx
3248
3249# qhasm: in11 = z11
3250# asm 1: movd <z11=int6464#4,>in11=int32#4
3251# asm 2: movd <z11=%xmm3,>in11=%ebx
3252movd %xmm3,%ebx
3253
3254# qhasm: z8 <<<= 96
3255# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3256# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3257pshufd $0x39,%xmm0,%xmm0
3258
3259# qhasm: z9 <<<= 96
3260# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3261# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3262pshufd $0x39,%xmm1,%xmm1
3263
3264# qhasm: z10 <<<= 96
3265# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3266# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3267pshufd $0x39,%xmm2,%xmm2
3268
3269# qhasm: z11 <<<= 96
3270# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3271# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3272pshufd $0x39,%xmm3,%xmm3
3273
3274# qhasm: in8 ^= *(uint32 *) (m + 32)
3275# asm 1: xorl 32(<m=int32#5),<in8=int32#1
3276# asm 2: xorl 32(<m=%esi),<in8=%eax
3277xorl 32(%esi),%eax
3278
3279# qhasm: in9 ^= *(uint32 *) (m + 36)
3280# asm 1: xorl 36(<m=int32#5),<in9=int32#2
3281# asm 2: xorl 36(<m=%esi),<in9=%ecx
3282xorl 36(%esi),%ecx
3283
3284# qhasm: in10 ^= *(uint32 *) (m + 40)
3285# asm 1: xorl 40(<m=int32#5),<in10=int32#3
3286# asm 2: xorl 40(<m=%esi),<in10=%edx
3287xorl 40(%esi),%edx
3288
3289# qhasm: in11 ^= *(uint32 *) (m + 44)
3290# asm 1: xorl 44(<m=int32#5),<in11=int32#4
3291# asm 2: xorl 44(<m=%esi),<in11=%ebx
3292xorl 44(%esi),%ebx
3293
3294# qhasm: *(uint32 *) (out + 32) = in8
3295# asm 1: movl <in8=int32#1,32(<out=int32#6)
3296# asm 2: movl <in8=%eax,32(<out=%edi)
3297movl %eax,32(%edi)
3298
3299# qhasm: *(uint32 *) (out + 36) = in9
3300# asm 1: movl <in9=int32#2,36(<out=int32#6)
3301# asm 2: movl <in9=%ecx,36(<out=%edi)
3302movl %ecx,36(%edi)
3303
3304# qhasm: *(uint32 *) (out + 40) = in10
3305# asm 1: movl <in10=int32#3,40(<out=int32#6)
3306# asm 2: movl <in10=%edx,40(<out=%edi)
3307movl %edx,40(%edi)
3308
3309# qhasm: *(uint32 *) (out + 44) = in11
3310# asm 1: movl <in11=int32#4,44(<out=int32#6)
3311# asm 2: movl <in11=%ebx,44(<out=%edi)
3312movl %ebx,44(%edi)
3313
3314# qhasm: in8 = z8
3315# asm 1: movd <z8=int6464#1,>in8=int32#1
3316# asm 2: movd <z8=%xmm0,>in8=%eax
3317movd %xmm0,%eax
3318
3319# qhasm: in9 = z9
3320# asm 1: movd <z9=int6464#2,>in9=int32#2
3321# asm 2: movd <z9=%xmm1,>in9=%ecx
3322movd %xmm1,%ecx
3323
3324# qhasm: in10 = z10
3325# asm 1: movd <z10=int6464#3,>in10=int32#3
3326# asm 2: movd <z10=%xmm2,>in10=%edx
3327movd %xmm2,%edx
3328
3329# qhasm: in11 = z11
3330# asm 1: movd <z11=int6464#4,>in11=int32#4
3331# asm 2: movd <z11=%xmm3,>in11=%ebx
3332movd %xmm3,%ebx
3333
3334# qhasm: z8 <<<= 96
3335# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3336# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3337pshufd $0x39,%xmm0,%xmm0
3338
3339# qhasm: z9 <<<= 96
3340# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3341# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3342pshufd $0x39,%xmm1,%xmm1
3343
3344# qhasm: z10 <<<= 96
3345# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3346# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3347pshufd $0x39,%xmm2,%xmm2
3348
3349# qhasm: z11 <<<= 96
3350# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3351# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3352pshufd $0x39,%xmm3,%xmm3
3353
3354# qhasm: in8 ^= *(uint32 *) (m + 96)
3355# asm 1: xorl 96(<m=int32#5),<in8=int32#1
3356# asm 2: xorl 96(<m=%esi),<in8=%eax
3357xorl 96(%esi),%eax
3358
3359# qhasm: in9 ^= *(uint32 *) (m + 100)
3360# asm 1: xorl 100(<m=int32#5),<in9=int32#2
3361# asm 2: xorl 100(<m=%esi),<in9=%ecx
3362xorl 100(%esi),%ecx
3363
3364# qhasm: in10 ^= *(uint32 *) (m + 104)
3365# asm 1: xorl 104(<m=int32#5),<in10=int32#3
3366# asm 2: xorl 104(<m=%esi),<in10=%edx
3367xorl 104(%esi),%edx
3368
3369# qhasm: in11 ^= *(uint32 *) (m + 108)
3370# asm 1: xorl 108(<m=int32#5),<in11=int32#4
3371# asm 2: xorl 108(<m=%esi),<in11=%ebx
3372xorl 108(%esi),%ebx
3373
3374# qhasm: *(uint32 *) (out + 96) = in8
3375# asm 1: movl <in8=int32#1,96(<out=int32#6)
3376# asm 2: movl <in8=%eax,96(<out=%edi)
3377movl %eax,96(%edi)
3378
3379# qhasm: *(uint32 *) (out + 100) = in9
3380# asm 1: movl <in9=int32#2,100(<out=int32#6)
3381# asm 2: movl <in9=%ecx,100(<out=%edi)
3382movl %ecx,100(%edi)
3383
3384# qhasm: *(uint32 *) (out + 104) = in10
3385# asm 1: movl <in10=int32#3,104(<out=int32#6)
3386# asm 2: movl <in10=%edx,104(<out=%edi)
3387movl %edx,104(%edi)
3388
3389# qhasm: *(uint32 *) (out + 108) = in11
3390# asm 1: movl <in11=int32#4,108(<out=int32#6)
3391# asm 2: movl <in11=%ebx,108(<out=%edi)
3392movl %ebx,108(%edi)
3393
3394# qhasm: in8 = z8
3395# asm 1: movd <z8=int6464#1,>in8=int32#1
3396# asm 2: movd <z8=%xmm0,>in8=%eax
3397movd %xmm0,%eax
3398
3399# qhasm: in9 = z9
3400# asm 1: movd <z9=int6464#2,>in9=int32#2
3401# asm 2: movd <z9=%xmm1,>in9=%ecx
3402movd %xmm1,%ecx
3403
3404# qhasm: in10 = z10
3405# asm 1: movd <z10=int6464#3,>in10=int32#3
3406# asm 2: movd <z10=%xmm2,>in10=%edx
3407movd %xmm2,%edx
3408
3409# qhasm: in11 = z11
3410# asm 1: movd <z11=int6464#4,>in11=int32#4
3411# asm 2: movd <z11=%xmm3,>in11=%ebx
3412movd %xmm3,%ebx
3413
3414# qhasm: z8 <<<= 96
3415# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3416# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3417pshufd $0x39,%xmm0,%xmm0
3418
3419# qhasm: z9 <<<= 96
3420# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3421# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3422pshufd $0x39,%xmm1,%xmm1
3423
3424# qhasm: z10 <<<= 96
3425# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3426# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3427pshufd $0x39,%xmm2,%xmm2
3428
3429# qhasm: z11 <<<= 96
3430# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3431# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3432pshufd $0x39,%xmm3,%xmm3
3433
3434# qhasm: in8 ^= *(uint32 *) (m + 160)
3435# asm 1: xorl 160(<m=int32#5),<in8=int32#1
3436# asm 2: xorl 160(<m=%esi),<in8=%eax
3437xorl 160(%esi),%eax
3438
3439# qhasm: in9 ^= *(uint32 *) (m + 164)
3440# asm 1: xorl 164(<m=int32#5),<in9=int32#2
3441# asm 2: xorl 164(<m=%esi),<in9=%ecx
3442xorl 164(%esi),%ecx
3443
3444# qhasm: in10 ^= *(uint32 *) (m + 168)
3445# asm 1: xorl 168(<m=int32#5),<in10=int32#3
3446# asm 2: xorl 168(<m=%esi),<in10=%edx
3447xorl 168(%esi),%edx
3448
3449# qhasm: in11 ^= *(uint32 *) (m + 172)
3450# asm 1: xorl 172(<m=int32#5),<in11=int32#4
3451# asm 2: xorl 172(<m=%esi),<in11=%ebx
3452xorl 172(%esi),%ebx
3453
3454# qhasm: *(uint32 *) (out + 160) = in8
3455# asm 1: movl <in8=int32#1,160(<out=int32#6)
3456# asm 2: movl <in8=%eax,160(<out=%edi)
3457movl %eax,160(%edi)
3458
3459# qhasm: *(uint32 *) (out + 164) = in9
3460# asm 1: movl <in9=int32#2,164(<out=int32#6)
3461# asm 2: movl <in9=%ecx,164(<out=%edi)
3462movl %ecx,164(%edi)
3463
3464# qhasm: *(uint32 *) (out + 168) = in10
3465# asm 1: movl <in10=int32#3,168(<out=int32#6)
3466# asm 2: movl <in10=%edx,168(<out=%edi)
3467movl %edx,168(%edi)
3468
3469# qhasm: *(uint32 *) (out + 172) = in11
3470# asm 1: movl <in11=int32#4,172(<out=int32#6)
3471# asm 2: movl <in11=%ebx,172(<out=%edi)
3472movl %ebx,172(%edi)
3473
3474# qhasm: in8 = z8
3475# asm 1: movd <z8=int6464#1,>in8=int32#1
3476# asm 2: movd <z8=%xmm0,>in8=%eax
3477movd %xmm0,%eax
3478
3479# qhasm: in9 = z9
3480# asm 1: movd <z9=int6464#2,>in9=int32#2
3481# asm 2: movd <z9=%xmm1,>in9=%ecx
3482movd %xmm1,%ecx
3483
3484# qhasm: in10 = z10
3485# asm 1: movd <z10=int6464#3,>in10=int32#3
3486# asm 2: movd <z10=%xmm2,>in10=%edx
3487movd %xmm2,%edx
3488
3489# qhasm: in11 = z11
3490# asm 1: movd <z11=int6464#4,>in11=int32#4
3491# asm 2: movd <z11=%xmm3,>in11=%ebx
3492movd %xmm3,%ebx
3493
3494# qhasm: in8 ^= *(uint32 *) (m + 224)
3495# asm 1: xorl 224(<m=int32#5),<in8=int32#1
3496# asm 2: xorl 224(<m=%esi),<in8=%eax
3497xorl 224(%esi),%eax
3498
3499# qhasm: in9 ^= *(uint32 *) (m + 228)
3500# asm 1: xorl 228(<m=int32#5),<in9=int32#2
3501# asm 2: xorl 228(<m=%esi),<in9=%ecx
3502xorl 228(%esi),%ecx
3503
3504# qhasm: in10 ^= *(uint32 *) (m + 232)
3505# asm 1: xorl 232(<m=int32#5),<in10=int32#3
3506# asm 2: xorl 232(<m=%esi),<in10=%edx
3507xorl 232(%esi),%edx
3508
3509# qhasm: in11 ^= *(uint32 *) (m + 236)
3510# asm 1: xorl 236(<m=int32#5),<in11=int32#4
3511# asm 2: xorl 236(<m=%esi),<in11=%ebx
3512xorl 236(%esi),%ebx
3513
3514# qhasm: *(uint32 *) (out + 224) = in8
3515# asm 1: movl <in8=int32#1,224(<out=int32#6)
3516# asm 2: movl <in8=%eax,224(<out=%edi)
3517movl %eax,224(%edi)
3518
3519# qhasm: *(uint32 *) (out + 228) = in9
3520# asm 1: movl <in9=int32#2,228(<out=int32#6)
3521# asm 2: movl <in9=%ecx,228(<out=%edi)
3522movl %ecx,228(%edi)
3523
3524# qhasm: *(uint32 *) (out + 232) = in10
3525# asm 1: movl <in10=int32#3,232(<out=int32#6)
3526# asm 2: movl <in10=%edx,232(<out=%edi)
3527movl %edx,232(%edi)
3528
3529# qhasm: *(uint32 *) (out + 236) = in11
3530# asm 1: movl <in11=int32#4,236(<out=int32#6)
3531# asm 2: movl <in11=%ebx,236(<out=%edi)
3532movl %ebx,236(%edi)
3533
3534# qhasm: z12 = z12_stack
3535# asm 1: movdqa <z12_stack=stack128#35,>z12=int6464#1
3536# asm 2: movdqa <z12_stack=576(%esp),>z12=%xmm0
3537movdqa 576(%esp),%xmm0
3538
3539# qhasm: z13 = z13_stack
3540# asm 1: movdqa <z13_stack=stack128#30,>z13=int6464#2
3541# asm 2: movdqa <z13_stack=496(%esp),>z13=%xmm1
3542movdqa 496(%esp),%xmm1
3543
3544# qhasm: z14 = z14_stack
3545# asm 1: movdqa <z14_stack=stack128#24,>z14=int6464#3
3546# asm 2: movdqa <z14_stack=400(%esp),>z14=%xmm2
3547movdqa 400(%esp),%xmm2
3548
3549# qhasm: z15 = z15_stack
3550# asm 1: movdqa <z15_stack=stack128#23,>z15=int6464#4
3551# asm 2: movdqa <z15_stack=384(%esp),>z15=%xmm3
3552movdqa 384(%esp),%xmm3
3553
3554# qhasm: uint32323232 z12 += orig12
3555# asm 1: paddd <orig12=stack128#11,<z12=int6464#1
3556# asm 2: paddd <orig12=192(%esp),<z12=%xmm0
3557paddd 192(%esp),%xmm0
3558
3559# qhasm: uint32323232 z13 += orig13
3560# asm 1: paddd <orig13=stack128#14,<z13=int6464#2
3561# asm 2: paddd <orig13=240(%esp),<z13=%xmm1
3562paddd 240(%esp),%xmm1
3563
3564# qhasm: uint32323232 z14 += orig14
3565# asm 1: paddd <orig14=stack128#17,<z14=int6464#3
3566# asm 2: paddd <orig14=288(%esp),<z14=%xmm2
3567paddd 288(%esp),%xmm2
3568
3569# qhasm: uint32323232 z15 += orig15
3570# asm 1: paddd <orig15=stack128#7,<z15=int6464#4
3571# asm 2: paddd <orig15=128(%esp),<z15=%xmm3
3572paddd 128(%esp),%xmm3
3573
3574# qhasm: in12 = z12
3575# asm 1: movd <z12=int6464#1,>in12=int32#1
3576# asm 2: movd <z12=%xmm0,>in12=%eax
3577movd %xmm0,%eax
3578
3579# qhasm: in13 = z13
3580# asm 1: movd <z13=int6464#2,>in13=int32#2
3581# asm 2: movd <z13=%xmm1,>in13=%ecx
3582movd %xmm1,%ecx
3583
3584# qhasm: in14 = z14
3585# asm 1: movd <z14=int6464#3,>in14=int32#3
3586# asm 2: movd <z14=%xmm2,>in14=%edx
3587movd %xmm2,%edx
3588
3589# qhasm: in15 = z15
3590# asm 1: movd <z15=int6464#4,>in15=int32#4
3591# asm 2: movd <z15=%xmm3,>in15=%ebx
3592movd %xmm3,%ebx
3593
3594# qhasm: z12 <<<= 96
3595# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3596# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3597pshufd $0x39,%xmm0,%xmm0
3598
3599# qhasm: z13 <<<= 96
3600# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3601# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3602pshufd $0x39,%xmm1,%xmm1
3603
3604# qhasm: z14 <<<= 96
3605# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3606# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3607pshufd $0x39,%xmm2,%xmm2
3608
3609# qhasm: z15 <<<= 96
3610# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3611# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3612pshufd $0x39,%xmm3,%xmm3
3613
3614# qhasm: in12 ^= *(uint32 *) (m + 48)
3615# asm 1: xorl 48(<m=int32#5),<in12=int32#1
3616# asm 2: xorl 48(<m=%esi),<in12=%eax
3617xorl 48(%esi),%eax
3618
3619# qhasm: in13 ^= *(uint32 *) (m + 52)
3620# asm 1: xorl 52(<m=int32#5),<in13=int32#2
3621# asm 2: xorl 52(<m=%esi),<in13=%ecx
3622xorl 52(%esi),%ecx
3623
3624# qhasm: in14 ^= *(uint32 *) (m + 56)
3625# asm 1: xorl 56(<m=int32#5),<in14=int32#3
3626# asm 2: xorl 56(<m=%esi),<in14=%edx
3627xorl 56(%esi),%edx
3628
3629# qhasm: in15 ^= *(uint32 *) (m + 60)
3630# asm 1: xorl 60(<m=int32#5),<in15=int32#4
3631# asm 2: xorl 60(<m=%esi),<in15=%ebx
3632xorl 60(%esi),%ebx
3633
3634# qhasm: *(uint32 *) (out + 48) = in12
3635# asm 1: movl <in12=int32#1,48(<out=int32#6)
3636# asm 2: movl <in12=%eax,48(<out=%edi)
3637movl %eax,48(%edi)
3638
3639# qhasm: *(uint32 *) (out + 52) = in13
3640# asm 1: movl <in13=int32#2,52(<out=int32#6)
3641# asm 2: movl <in13=%ecx,52(<out=%edi)
3642movl %ecx,52(%edi)
3643
3644# qhasm: *(uint32 *) (out + 56) = in14
3645# asm 1: movl <in14=int32#3,56(<out=int32#6)
3646# asm 2: movl <in14=%edx,56(<out=%edi)
3647movl %edx,56(%edi)
3648
3649# qhasm: *(uint32 *) (out + 60) = in15
3650# asm 1: movl <in15=int32#4,60(<out=int32#6)
3651# asm 2: movl <in15=%ebx,60(<out=%edi)
3652movl %ebx,60(%edi)
3653
3654# qhasm: in12 = z12
3655# asm 1: movd <z12=int6464#1,>in12=int32#1
3656# asm 2: movd <z12=%xmm0,>in12=%eax
3657movd %xmm0,%eax
3658
3659# qhasm: in13 = z13
3660# asm 1: movd <z13=int6464#2,>in13=int32#2
3661# asm 2: movd <z13=%xmm1,>in13=%ecx
3662movd %xmm1,%ecx
3663
3664# qhasm: in14 = z14
3665# asm 1: movd <z14=int6464#3,>in14=int32#3
3666# asm 2: movd <z14=%xmm2,>in14=%edx
3667movd %xmm2,%edx
3668
3669# qhasm: in15 = z15
3670# asm 1: movd <z15=int6464#4,>in15=int32#4
3671# asm 2: movd <z15=%xmm3,>in15=%ebx
3672movd %xmm3,%ebx
3673
3674# qhasm: z12 <<<= 96
3675# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3676# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3677pshufd $0x39,%xmm0,%xmm0
3678
3679# qhasm: z13 <<<= 96
3680# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3681# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3682pshufd $0x39,%xmm1,%xmm1
3683
3684# qhasm: z14 <<<= 96
3685# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3686# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3687pshufd $0x39,%xmm2,%xmm2
3688
3689# qhasm: z15 <<<= 96
3690# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3691# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3692pshufd $0x39,%xmm3,%xmm3
3693
3694# qhasm: in12 ^= *(uint32 *) (m + 112)
3695# asm 1: xorl 112(<m=int32#5),<in12=int32#1
3696# asm 2: xorl 112(<m=%esi),<in12=%eax
3697xorl 112(%esi),%eax
3698
3699# qhasm: in13 ^= *(uint32 *) (m + 116)
3700# asm 1: xorl 116(<m=int32#5),<in13=int32#2
3701# asm 2: xorl 116(<m=%esi),<in13=%ecx
3702xorl 116(%esi),%ecx
3703
3704# qhasm: in14 ^= *(uint32 *) (m + 120)
3705# asm 1: xorl 120(<m=int32#5),<in14=int32#3
3706# asm 2: xorl 120(<m=%esi),<in14=%edx
3707xorl 120(%esi),%edx
3708
3709# qhasm: in15 ^= *(uint32 *) (m + 124)
3710# asm 1: xorl 124(<m=int32#5),<in15=int32#4
3711# asm 2: xorl 124(<m=%esi),<in15=%ebx
3712xorl 124(%esi),%ebx
3713
3714# qhasm: *(uint32 *) (out + 112) = in12
3715# asm 1: movl <in12=int32#1,112(<out=int32#6)
3716# asm 2: movl <in12=%eax,112(<out=%edi)
3717movl %eax,112(%edi)
3718
3719# qhasm: *(uint32 *) (out + 116) = in13
3720# asm 1: movl <in13=int32#2,116(<out=int32#6)
3721# asm 2: movl <in13=%ecx,116(<out=%edi)
3722movl %ecx,116(%edi)
3723
3724# qhasm: *(uint32 *) (out + 120) = in14
3725# asm 1: movl <in14=int32#3,120(<out=int32#6)
3726# asm 2: movl <in14=%edx,120(<out=%edi)
3727movl %edx,120(%edi)
3728
3729# qhasm: *(uint32 *) (out + 124) = in15
3730# asm 1: movl <in15=int32#4,124(<out=int32#6)
3731# asm 2: movl <in15=%ebx,124(<out=%edi)
3732movl %ebx,124(%edi)
3733
3734# qhasm: in12 = z12
3735# asm 1: movd <z12=int6464#1,>in12=int32#1
3736# asm 2: movd <z12=%xmm0,>in12=%eax
3737movd %xmm0,%eax
3738
3739# qhasm: in13 = z13
3740# asm 1: movd <z13=int6464#2,>in13=int32#2
3741# asm 2: movd <z13=%xmm1,>in13=%ecx
3742movd %xmm1,%ecx
3743
3744# qhasm: in14 = z14
3745# asm 1: movd <z14=int6464#3,>in14=int32#3
3746# asm 2: movd <z14=%xmm2,>in14=%edx
3747movd %xmm2,%edx
3748
3749# qhasm: in15 = z15
3750# asm 1: movd <z15=int6464#4,>in15=int32#4
3751# asm 2: movd <z15=%xmm3,>in15=%ebx
3752movd %xmm3,%ebx
3753
3754# qhasm: z12 <<<= 96
3755# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3756# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3757pshufd $0x39,%xmm0,%xmm0
3758
3759# qhasm: z13 <<<= 96
3760# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3761# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3762pshufd $0x39,%xmm1,%xmm1
3763
3764# qhasm: z14 <<<= 96
3765# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3766# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3767pshufd $0x39,%xmm2,%xmm2
3768
3769# qhasm: z15 <<<= 96
3770# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3771# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3772pshufd $0x39,%xmm3,%xmm3
3773
3774# qhasm: in12 ^= *(uint32 *) (m + 176)
3775# asm 1: xorl 176(<m=int32#5),<in12=int32#1
3776# asm 2: xorl 176(<m=%esi),<in12=%eax
3777xorl 176(%esi),%eax
3778
3779# qhasm: in13 ^= *(uint32 *) (m + 180)
3780# asm 1: xorl 180(<m=int32#5),<in13=int32#2
3781# asm 2: xorl 180(<m=%esi),<in13=%ecx
3782xorl 180(%esi),%ecx
3783
3784# qhasm: in14 ^= *(uint32 *) (m + 184)
3785# asm 1: xorl 184(<m=int32#5),<in14=int32#3
3786# asm 2: xorl 184(<m=%esi),<in14=%edx
3787xorl 184(%esi),%edx
3788
3789# qhasm: in15 ^= *(uint32 *) (m + 188)
3790# asm 1: xorl 188(<m=int32#5),<in15=int32#4
3791# asm 2: xorl 188(<m=%esi),<in15=%ebx
3792xorl 188(%esi),%ebx
3793
3794# qhasm: *(uint32 *) (out + 176) = in12
3795# asm 1: movl <in12=int32#1,176(<out=int32#6)
3796# asm 2: movl <in12=%eax,176(<out=%edi)
3797movl %eax,176(%edi)
3798
3799# qhasm: *(uint32 *) (out + 180) = in13
3800# asm 1: movl <in13=int32#2,180(<out=int32#6)
3801# asm 2: movl <in13=%ecx,180(<out=%edi)
3802movl %ecx,180(%edi)
3803
3804# qhasm: *(uint32 *) (out + 184) = in14
3805# asm 1: movl <in14=int32#3,184(<out=int32#6)
3806# asm 2: movl <in14=%edx,184(<out=%edi)
3807movl %edx,184(%edi)
3808
3809# qhasm: *(uint32 *) (out + 188) = in15
3810# asm 1: movl <in15=int32#4,188(<out=int32#6)
3811# asm 2: movl <in15=%ebx,188(<out=%edi)
3812movl %ebx,188(%edi)
3813
3814# qhasm: in12 = z12
3815# asm 1: movd <z12=int6464#1,>in12=int32#1
3816# asm 2: movd <z12=%xmm0,>in12=%eax
3817movd %xmm0,%eax
3818
3819# qhasm: in13 = z13
3820# asm 1: movd <z13=int6464#2,>in13=int32#2
3821# asm 2: movd <z13=%xmm1,>in13=%ecx
3822movd %xmm1,%ecx
3823
3824# qhasm: in14 = z14
3825# asm 1: movd <z14=int6464#3,>in14=int32#3
3826# asm 2: movd <z14=%xmm2,>in14=%edx
3827movd %xmm2,%edx
3828
3829# qhasm: in15 = z15
3830# asm 1: movd <z15=int6464#4,>in15=int32#4
3831# asm 2: movd <z15=%xmm3,>in15=%ebx
3832movd %xmm3,%ebx
3833
3834# qhasm: in12 ^= *(uint32 *) (m + 240)
3835# asm 1: xorl 240(<m=int32#5),<in12=int32#1
3836# asm 2: xorl 240(<m=%esi),<in12=%eax
3837xorl 240(%esi),%eax
3838
3839# qhasm: in13 ^= *(uint32 *) (m + 244)
3840# asm 1: xorl 244(<m=int32#5),<in13=int32#2
3841# asm 2: xorl 244(<m=%esi),<in13=%ecx
3842xorl 244(%esi),%ecx
3843
3844# qhasm: in14 ^= *(uint32 *) (m + 248)
3845# asm 1: xorl 248(<m=int32#5),<in14=int32#3
3846# asm 2: xorl 248(<m=%esi),<in14=%edx
3847xorl 248(%esi),%edx
3848
3849# qhasm: in15 ^= *(uint32 *) (m + 252)
3850# asm 1: xorl 252(<m=int32#5),<in15=int32#4
3851# asm 2: xorl 252(<m=%esi),<in15=%ebx
3852xorl 252(%esi),%ebx
3853
3854# qhasm: *(uint32 *) (out + 240) = in12
3855# asm 1: movl <in12=int32#1,240(<out=int32#6)
3856# asm 2: movl <in12=%eax,240(<out=%edi)
3857movl %eax,240(%edi)
3858
3859# qhasm: *(uint32 *) (out + 244) = in13
3860# asm 1: movl <in13=int32#2,244(<out=int32#6)
3861# asm 2: movl <in13=%ecx,244(<out=%edi)
3862movl %ecx,244(%edi)
3863
3864# qhasm: *(uint32 *) (out + 248) = in14
3865# asm 1: movl <in14=int32#3,248(<out=int32#6)
3866# asm 2: movl <in14=%edx,248(<out=%edi)
3867movl %edx,248(%edi)
3868
3869# qhasm: *(uint32 *) (out + 252) = in15
3870# asm 1: movl <in15=int32#4,252(<out=int32#6)
3871# asm 2: movl <in15=%ebx,252(<out=%edi)
3872movl %ebx,252(%edi)
3873
3874# qhasm: bytes = bytes_stack
3875# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
3876# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
3877movl 24(%esp),%eax
3878
3879# qhasm: bytes -= 256
3880# asm 1: sub $256,<bytes=int32#1
3881# asm 2: sub $256,<bytes=%eax
3882sub $256,%eax
3883
3884# qhasm: m += 256
3885# asm 1: add $256,<m=int32#5
3886# asm 2: add $256,<m=%esi
3887add $256,%esi
3888
3889# qhasm: out += 256
3890# asm 1: add $256,<out=int32#6
3891# asm 2: add $256,<out=%edi
3892add $256,%edi
3893
3894# qhasm: out_stack = out
3895# asm 1: movl <out=int32#6,>out_stack=stack32#6
3896# asm 2: movl <out=%edi,>out_stack=20(%esp)
3897movl %edi,20(%esp)
3898
3899# qhasm: unsigned<? bytes - 256
3900# asm 1: cmp $256,<bytes=int32#1
3901# asm 2: cmp $256,<bytes=%eax
3902cmp $256,%eax
3903# comment:fp stack unchanged by jump
3904
3905# qhasm: goto bytesatleast256 if !unsigned<
3906jae ._bytesatleast256
3907
3908# qhasm: unsigned>? bytes - 0
3909# asm 1: cmp $0,<bytes=int32#1
3910# asm 2: cmp $0,<bytes=%eax
3911cmp $0,%eax
3912# comment:fp stack unchanged by jump
3913
3914# qhasm: goto done if !unsigned>
3915jbe ._done
3916# comment:fp stack unchanged by fallthrough
3917
3918# qhasm: bytesbetween1and255:
3919._bytesbetween1and255:
3920
3921# qhasm: unsigned<? bytes - 64
3922# asm 1: cmp $64,<bytes=int32#1
3923# asm 2: cmp $64,<bytes=%eax
3924cmp $64,%eax
3925# comment:fp stack unchanged by jump
3926
3927# qhasm: goto nocopy if !unsigned<
3928jae ._nocopy
3929
3930# qhasm: ctarget = out
3931# asm 1: movl <out=int32#6,>ctarget=stack32#6
3932# asm 2: movl <out=%edi,>ctarget=20(%esp)
3933movl %edi,20(%esp)
3934
3935# qhasm: out = &tmp
3936# asm 1: leal <tmp=stack512#1,>out=int32#6
3937# asm 2: leal <tmp=640(%esp),>out=%edi
3938leal 640(%esp),%edi
3939
3940# qhasm: i = bytes
3941# asm 1: mov <bytes=int32#1,>i=int32#2
3942# asm 2: mov <bytes=%eax,>i=%ecx
3943mov %eax,%ecx
3944
3945# qhasm: while (i) { *out++ = *m++; --i }
3946rep movsb
3947
3948# qhasm: out = &tmp
3949# asm 1: leal <tmp=stack512#1,>out=int32#6
3950# asm 2: leal <tmp=640(%esp),>out=%edi
3951leal 640(%esp),%edi
3952
3953# qhasm: m = &tmp
3954# asm 1: leal <tmp=stack512#1,>m=int32#5
3955# asm 2: leal <tmp=640(%esp),>m=%esi
3956leal 640(%esp),%esi
3957# comment:fp stack unchanged by fallthrough
3958
3959# qhasm: nocopy:
3960._nocopy:
3961
3962# qhasm: bytes_stack = bytes
3963# asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
3964# asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
3965movl %eax,24(%esp)
3966
3967# qhasm: diag0 = x0
3968# asm 1: movdqa <x0=stack128#3,>diag0=int6464#1
3969# asm 2: movdqa <x0=64(%esp),>diag0=%xmm0
3970movdqa 64(%esp),%xmm0
3971
3972# qhasm: diag1 = x1
3973# asm 1: movdqa <x1=stack128#2,>diag1=int6464#2
3974# asm 2: movdqa <x1=48(%esp),>diag1=%xmm1
3975movdqa 48(%esp),%xmm1
3976
3977# qhasm: diag2 = x2
3978# asm 1: movdqa <x2=stack128#4,>diag2=int6464#3
3979# asm 2: movdqa <x2=80(%esp),>diag2=%xmm2
3980movdqa 80(%esp),%xmm2
3981
3982# qhasm: diag3 = x3
3983# asm 1: movdqa <x3=stack128#1,>diag3=int6464#4
3984# asm 2: movdqa <x3=32(%esp),>diag3=%xmm3
3985movdqa 32(%esp),%xmm3
3986
3987# qhasm: a0 = diag1
3988# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3989# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3990movdqa %xmm1,%xmm4
3991
3992# qhasm: i = 12
3993# asm 1: mov $12,>i=int32#1
3994# asm 2: mov $12,>i=%eax
3995mov $12,%eax
3996
3997# qhasm: mainloop2:
3998._mainloop2:
3999
4000# qhasm: uint32323232 a0 += diag0
4001# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4002# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4003paddd %xmm0,%xmm4
4004
4005# qhasm: a1 = diag0
4006# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4007# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4008movdqa %xmm0,%xmm5
4009
4010# qhasm: b0 = a0
4011# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4012# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4013movdqa %xmm4,%xmm6
4014
4015# qhasm: uint32323232 a0 <<= 7
4016# asm 1: pslld $7,<a0=int6464#5
4017# asm 2: pslld $7,<a0=%xmm4
4018pslld $7,%xmm4
4019
4020# qhasm: uint32323232 b0 >>= 25
4021# asm 1: psrld $25,<b0=int6464#7
4022# asm 2: psrld $25,<b0=%xmm6
4023psrld $25,%xmm6
4024
4025# qhasm: diag3 ^= a0
4026# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4027# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4028pxor %xmm4,%xmm3
4029
4030# qhasm: diag3 ^= b0
4031# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4032# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4033pxor %xmm6,%xmm3
4034
4035# qhasm: uint32323232 a1 += diag3
4036# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4037# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4038paddd %xmm3,%xmm5
4039
4040# qhasm: a2 = diag3
4041# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4042# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4043movdqa %xmm3,%xmm4
4044
4045# qhasm: b1 = a1
4046# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4047# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4048movdqa %xmm5,%xmm6
4049
4050# qhasm: uint32323232 a1 <<= 9
4051# asm 1: pslld $9,<a1=int6464#6
4052# asm 2: pslld $9,<a1=%xmm5
4053pslld $9,%xmm5
4054
4055# qhasm: uint32323232 b1 >>= 23
4056# asm 1: psrld $23,<b1=int6464#7
4057# asm 2: psrld $23,<b1=%xmm6
4058psrld $23,%xmm6
4059
4060# qhasm: diag2 ^= a1
4061# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4062# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4063pxor %xmm5,%xmm2
4064
4065# qhasm: diag3 <<<= 32
4066# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4067# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4068pshufd $0x93,%xmm3,%xmm3
4069
4070# qhasm: diag2 ^= b1
4071# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4072# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4073pxor %xmm6,%xmm2
4074
4075# qhasm: uint32323232 a2 += diag2
4076# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4077# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4078paddd %xmm2,%xmm4
4079
4080# qhasm: a3 = diag2
4081# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4082# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4083movdqa %xmm2,%xmm5
4084
4085# qhasm: b2 = a2
4086# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4087# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4088movdqa %xmm4,%xmm6
4089
4090# qhasm: uint32323232 a2 <<= 13
4091# asm 1: pslld $13,<a2=int6464#5
4092# asm 2: pslld $13,<a2=%xmm4
4093pslld $13,%xmm4
4094
4095# qhasm: uint32323232 b2 >>= 19
4096# asm 1: psrld $19,<b2=int6464#7
4097# asm 2: psrld $19,<b2=%xmm6
4098psrld $19,%xmm6
4099
4100# qhasm: diag1 ^= a2
4101# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4102# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4103pxor %xmm4,%xmm1
4104
4105# qhasm: diag2 <<<= 64
4106# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4107# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4108pshufd $0x4e,%xmm2,%xmm2
4109
4110# qhasm: diag1 ^= b2
4111# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4112# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4113pxor %xmm6,%xmm1
4114
4115# qhasm: uint32323232 a3 += diag1
4116# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4117# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4118paddd %xmm1,%xmm5
4119
4120# qhasm: a4 = diag3
4121# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4122# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4123movdqa %xmm3,%xmm4
4124
4125# qhasm: b3 = a3
4126# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4127# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4128movdqa %xmm5,%xmm6
4129
4130# qhasm: uint32323232 a3 <<= 18
4131# asm 1: pslld $18,<a3=int6464#6
4132# asm 2: pslld $18,<a3=%xmm5
4133pslld $18,%xmm5
4134
4135# qhasm: uint32323232 b3 >>= 14
4136# asm 1: psrld $14,<b3=int6464#7
4137# asm 2: psrld $14,<b3=%xmm6
4138psrld $14,%xmm6
4139
4140# qhasm: diag0 ^= a3
4141# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4142# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4143pxor %xmm5,%xmm0
4144
4145# qhasm: diag1 <<<= 96
4146# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4147# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4148pshufd $0x39,%xmm1,%xmm1
4149
4150# qhasm: diag0 ^= b3
4151# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4152# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4153pxor %xmm6,%xmm0
4154
4155# qhasm: uint32323232 a4 += diag0
4156# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4157# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4158paddd %xmm0,%xmm4
4159
4160# qhasm: a5 = diag0
4161# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4162# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4163movdqa %xmm0,%xmm5
4164
4165# qhasm: b4 = a4
4166# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4167# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4168movdqa %xmm4,%xmm6
4169
4170# qhasm: uint32323232 a4 <<= 7
4171# asm 1: pslld $7,<a4=int6464#5
4172# asm 2: pslld $7,<a4=%xmm4
4173pslld $7,%xmm4
4174
4175# qhasm: uint32323232 b4 >>= 25
4176# asm 1: psrld $25,<b4=int6464#7
4177# asm 2: psrld $25,<b4=%xmm6
4178psrld $25,%xmm6
4179
4180# qhasm: diag1 ^= a4
4181# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4182# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4183pxor %xmm4,%xmm1
4184
4185# qhasm: diag1 ^= b4
4186# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4187# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4188pxor %xmm6,%xmm1
4189
4190# qhasm: uint32323232 a5 += diag1
4191# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4192# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4193paddd %xmm1,%xmm5
4194
4195# qhasm: a6 = diag1
4196# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4197# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4198movdqa %xmm1,%xmm4
4199
4200# qhasm: b5 = a5
4201# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4202# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4203movdqa %xmm5,%xmm6
4204
4205# qhasm: uint32323232 a5 <<= 9
4206# asm 1: pslld $9,<a5=int6464#6
4207# asm 2: pslld $9,<a5=%xmm5
4208pslld $9,%xmm5
4209
4210# qhasm: uint32323232 b5 >>= 23
4211# asm 1: psrld $23,<b5=int6464#7
4212# asm 2: psrld $23,<b5=%xmm6
4213psrld $23,%xmm6
4214
4215# qhasm: diag2 ^= a5
4216# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4217# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4218pxor %xmm5,%xmm2
4219
4220# qhasm: diag1 <<<= 32
4221# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4222# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4223pshufd $0x93,%xmm1,%xmm1
4224
4225# qhasm: diag2 ^= b5
4226# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4227# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4228pxor %xmm6,%xmm2
4229
4230# qhasm: uint32323232 a6 += diag2
4231# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4232# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4233paddd %xmm2,%xmm4
4234
4235# qhasm: a7 = diag2
4236# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4237# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4238movdqa %xmm2,%xmm5
4239
4240# qhasm: b6 = a6
4241# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4242# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4243movdqa %xmm4,%xmm6
4244
4245# qhasm: uint32323232 a6 <<= 13
4246# asm 1: pslld $13,<a6=int6464#5
4247# asm 2: pslld $13,<a6=%xmm4
4248pslld $13,%xmm4
4249
4250# qhasm: uint32323232 b6 >>= 19
4251# asm 1: psrld $19,<b6=int6464#7
4252# asm 2: psrld $19,<b6=%xmm6
4253psrld $19,%xmm6
4254
4255# qhasm: diag3 ^= a6
4256# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4257# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4258pxor %xmm4,%xmm3
4259
4260# qhasm: diag2 <<<= 64
4261# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4262# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4263pshufd $0x4e,%xmm2,%xmm2
4264
4265# qhasm: diag3 ^= b6
4266# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4267# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4268pxor %xmm6,%xmm3
4269
4270# qhasm: uint32323232 a7 += diag3
4271# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4272# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4273paddd %xmm3,%xmm5
4274
4275# qhasm: a0 = diag1
4276# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4277# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4278movdqa %xmm1,%xmm4
4279
4280# qhasm: b7 = a7
4281# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4282# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4283movdqa %xmm5,%xmm6
4284
4285# qhasm: uint32323232 a7 <<= 18
4286# asm 1: pslld $18,<a7=int6464#6
4287# asm 2: pslld $18,<a7=%xmm5
4288pslld $18,%xmm5
4289
4290# qhasm: uint32323232 b7 >>= 14
4291# asm 1: psrld $14,<b7=int6464#7
4292# asm 2: psrld $14,<b7=%xmm6
4293psrld $14,%xmm6
4294
4295# qhasm: diag0 ^= a7
4296# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4297# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4298pxor %xmm5,%xmm0
4299
4300# qhasm: diag3 <<<= 96
4301# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4302# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4303pshufd $0x39,%xmm3,%xmm3
4304
4305# qhasm: diag0 ^= b7
4306# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4307# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4308pxor %xmm6,%xmm0
4309
4310# qhasm: uint32323232 a0 += diag0
4311# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4312# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4313paddd %xmm0,%xmm4
4314
4315# qhasm: a1 = diag0
4316# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4317# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4318movdqa %xmm0,%xmm5
4319
4320# qhasm: b0 = a0
4321# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4322# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4323movdqa %xmm4,%xmm6
4324
4325# qhasm: uint32323232 a0 <<= 7
4326# asm 1: pslld $7,<a0=int6464#5
4327# asm 2: pslld $7,<a0=%xmm4
4328pslld $7,%xmm4
4329
4330# qhasm: uint32323232 b0 >>= 25
4331# asm 1: psrld $25,<b0=int6464#7
4332# asm 2: psrld $25,<b0=%xmm6
4333psrld $25,%xmm6
4334
4335# qhasm: diag3 ^= a0
4336# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4337# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4338pxor %xmm4,%xmm3
4339
4340# qhasm: diag3 ^= b0
4341# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4342# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4343pxor %xmm6,%xmm3
4344
4345# qhasm: uint32323232 a1 += diag3
4346# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4347# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4348paddd %xmm3,%xmm5
4349
4350# qhasm: a2 = diag3
4351# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4352# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4353movdqa %xmm3,%xmm4
4354
4355# qhasm: b1 = a1
4356# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4357# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4358movdqa %xmm5,%xmm6
4359
4360# qhasm: uint32323232 a1 <<= 9
4361# asm 1: pslld $9,<a1=int6464#6
4362# asm 2: pslld $9,<a1=%xmm5
4363pslld $9,%xmm5
4364
4365# qhasm: uint32323232 b1 >>= 23
4366# asm 1: psrld $23,<b1=int6464#7
4367# asm 2: psrld $23,<b1=%xmm6
4368psrld $23,%xmm6
4369
4370# qhasm: diag2 ^= a1
4371# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4372# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4373pxor %xmm5,%xmm2
4374
4375# qhasm: diag3 <<<= 32
4376# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4377# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4378pshufd $0x93,%xmm3,%xmm3
4379
4380# qhasm: diag2 ^= b1
4381# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4382# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4383pxor %xmm6,%xmm2
4384
4385# qhasm: uint32323232 a2 += diag2
4386# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4387# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4388paddd %xmm2,%xmm4
4389
4390# qhasm: a3 = diag2
4391# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4392# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4393movdqa %xmm2,%xmm5
4394
4395# qhasm: b2 = a2
4396# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4397# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4398movdqa %xmm4,%xmm6
4399
4400# qhasm: uint32323232 a2 <<= 13
4401# asm 1: pslld $13,<a2=int6464#5
4402# asm 2: pslld $13,<a2=%xmm4
4403pslld $13,%xmm4
4404
4405# qhasm: uint32323232 b2 >>= 19
4406# asm 1: psrld $19,<b2=int6464#7
4407# asm 2: psrld $19,<b2=%xmm6
4408psrld $19,%xmm6
4409
4410# qhasm: diag1 ^= a2
4411# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4412# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4413pxor %xmm4,%xmm1
4414
4415# qhasm: diag2 <<<= 64
4416# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4417# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4418pshufd $0x4e,%xmm2,%xmm2
4419
4420# qhasm: diag1 ^= b2
4421# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4422# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4423pxor %xmm6,%xmm1
4424
4425# qhasm: uint32323232 a3 += diag1
4426# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4427# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4428paddd %xmm1,%xmm5
4429
4430# qhasm: a4 = diag3
4431# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4432# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4433movdqa %xmm3,%xmm4
4434
4435# qhasm: b3 = a3
4436# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4437# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4438movdqa %xmm5,%xmm6
4439
4440# qhasm: uint32323232 a3 <<= 18
4441# asm 1: pslld $18,<a3=int6464#6
4442# asm 2: pslld $18,<a3=%xmm5
4443pslld $18,%xmm5
4444
4445# qhasm: uint32323232 b3 >>= 14
4446# asm 1: psrld $14,<b3=int6464#7
4447# asm 2: psrld $14,<b3=%xmm6
4448psrld $14,%xmm6
4449
4450# qhasm: diag0 ^= a3
4451# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4452# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4453pxor %xmm5,%xmm0
4454
4455# qhasm: diag1 <<<= 96
4456# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4457# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4458pshufd $0x39,%xmm1,%xmm1
4459
4460# qhasm: diag0 ^= b3
4461# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4462# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4463pxor %xmm6,%xmm0
4464
4465# qhasm: uint32323232 a4 += diag0
4466# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4467# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4468paddd %xmm0,%xmm4
4469
4470# qhasm: a5 = diag0
4471# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4472# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4473movdqa %xmm0,%xmm5
4474
4475# qhasm: b4 = a4
4476# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4477# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4478movdqa %xmm4,%xmm6
4479
4480# qhasm: uint32323232 a4 <<= 7
4481# asm 1: pslld $7,<a4=int6464#5
4482# asm 2: pslld $7,<a4=%xmm4
4483pslld $7,%xmm4
4484
4485# qhasm: uint32323232 b4 >>= 25
4486# asm 1: psrld $25,<b4=int6464#7
4487# asm 2: psrld $25,<b4=%xmm6
4488psrld $25,%xmm6
4489
4490# qhasm: diag1 ^= a4
4491# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4492# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4493pxor %xmm4,%xmm1
4494
4495# qhasm: diag1 ^= b4
4496# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4497# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4498pxor %xmm6,%xmm1
4499
4500# qhasm: uint32323232 a5 += diag1
4501# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4502# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4503paddd %xmm1,%xmm5
4504
4505# qhasm: a6 = diag1
4506# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4507# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4508movdqa %xmm1,%xmm4
4509
4510# qhasm: b5 = a5
4511# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4512# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4513movdqa %xmm5,%xmm6
4514
4515# qhasm: uint32323232 a5 <<= 9
4516# asm 1: pslld $9,<a5=int6464#6
4517# asm 2: pslld $9,<a5=%xmm5
4518pslld $9,%xmm5
4519
4520# qhasm: uint32323232 b5 >>= 23
4521# asm 1: psrld $23,<b5=int6464#7
4522# asm 2: psrld $23,<b5=%xmm6
4523psrld $23,%xmm6
4524
4525# qhasm: diag2 ^= a5
4526# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4527# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4528pxor %xmm5,%xmm2
4529
4530# qhasm: diag1 <<<= 32
4531# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4532# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4533pshufd $0x93,%xmm1,%xmm1
4534
4535# qhasm: diag2 ^= b5
4536# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4537# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4538pxor %xmm6,%xmm2
4539
4540# qhasm: uint32323232 a6 += diag2
4541# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4542# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4543paddd %xmm2,%xmm4
4544
4545# qhasm: a7 = diag2
4546# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4547# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4548movdqa %xmm2,%xmm5
4549
4550# qhasm: b6 = a6
4551# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4552# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4553movdqa %xmm4,%xmm6
4554
4555# qhasm: uint32323232 a6 <<= 13
4556# asm 1: pslld $13,<a6=int6464#5
4557# asm 2: pslld $13,<a6=%xmm4
4558pslld $13,%xmm4
4559
4560# qhasm: uint32323232 b6 >>= 19
4561# asm 1: psrld $19,<b6=int6464#7
4562# asm 2: psrld $19,<b6=%xmm6
4563psrld $19,%xmm6
4564
4565# qhasm: diag3 ^= a6
4566# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4567# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4568pxor %xmm4,%xmm3
4569
4570# qhasm: diag2 <<<= 64
4571# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4572# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4573pshufd $0x4e,%xmm2,%xmm2
4574
4575# qhasm: diag3 ^= b6
4576# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4577# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4578pxor %xmm6,%xmm3
4579
4580# qhasm: unsigned>? i -= 4
4581# asm 1: sub $4,<i=int32#1
4582# asm 2: sub $4,<i=%eax
4583sub $4,%eax
4584
4585# qhasm: uint32323232 a7 += diag3
4586# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4587# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4588paddd %xmm3,%xmm5
4589
4590# qhasm: a0 = diag1
4591# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4592# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4593movdqa %xmm1,%xmm4
4594
4595# qhasm: b7 = a7
4596# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4597# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4598movdqa %xmm5,%xmm6
4599
4600# qhasm: uint32323232 a7 <<= 18
4601# asm 1: pslld $18,<a7=int6464#6
4602# asm 2: pslld $18,<a7=%xmm5
4603pslld $18,%xmm5
4604
4605# qhasm: b0 = 0
4606# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4607# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4608pxor %xmm7,%xmm7
4609
4610# qhasm: uint32323232 b7 >>= 14
4611# asm 1: psrld $14,<b7=int6464#7
4612# asm 2: psrld $14,<b7=%xmm6
4613psrld $14,%xmm6
4614
4615# qhasm: diag0 ^= a7
4616# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4617# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4618pxor %xmm5,%xmm0
4619
4620# qhasm: diag3 <<<= 96
4621# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4622# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4623pshufd $0x39,%xmm3,%xmm3
4624
4625# qhasm: diag0 ^= b7
4626# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4627# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4628pxor %xmm6,%xmm0
4629# comment:fp stack unchanged by jump
4630
4631# qhasm: goto mainloop2 if unsigned>
4632ja ._mainloop2
4633
4634# qhasm: uint32323232 diag0 += x0
4635# asm 1: paddd <x0=stack128#3,<diag0=int6464#1
4636# asm 2: paddd <x0=64(%esp),<diag0=%xmm0
4637paddd 64(%esp),%xmm0
4638
4639# qhasm: uint32323232 diag1 += x1
4640# asm 1: paddd <x1=stack128#2,<diag1=int6464#2
4641# asm 2: paddd <x1=48(%esp),<diag1=%xmm1
4642paddd 48(%esp),%xmm1
4643
4644# qhasm: uint32323232 diag2 += x2
4645# asm 1: paddd <x2=stack128#4,<diag2=int6464#3
4646# asm 2: paddd <x2=80(%esp),<diag2=%xmm2
4647paddd 80(%esp),%xmm2
4648
4649# qhasm: uint32323232 diag3 += x3
4650# asm 1: paddd <x3=stack128#1,<diag3=int6464#4
4651# asm 2: paddd <x3=32(%esp),<diag3=%xmm3
4652paddd 32(%esp),%xmm3
4653
4654# qhasm: in0 = diag0
4655# asm 1: movd <diag0=int6464#1,>in0=int32#1
4656# asm 2: movd <diag0=%xmm0,>in0=%eax
4657movd %xmm0,%eax
4658
4659# qhasm: in12 = diag1
4660# asm 1: movd <diag1=int6464#2,>in12=int32#2
4661# asm 2: movd <diag1=%xmm1,>in12=%ecx
4662movd %xmm1,%ecx
4663
4664# qhasm: in8 = diag2
4665# asm 1: movd <diag2=int6464#3,>in8=int32#3
4666# asm 2: movd <diag2=%xmm2,>in8=%edx
4667movd %xmm2,%edx
4668
4669# qhasm: in4 = diag3
4670# asm 1: movd <diag3=int6464#4,>in4=int32#4
4671# asm 2: movd <diag3=%xmm3,>in4=%ebx
4672movd %xmm3,%ebx
4673
4674# qhasm: diag0 <<<= 96
4675# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4676# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4677pshufd $0x39,%xmm0,%xmm0
4678
4679# qhasm: diag1 <<<= 96
4680# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4681# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4682pshufd $0x39,%xmm1,%xmm1
4683
4684# qhasm: diag2 <<<= 96
4685# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4686# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4687pshufd $0x39,%xmm2,%xmm2
4688
4689# qhasm: diag3 <<<= 96
4690# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4691# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4692pshufd $0x39,%xmm3,%xmm3
4693
4694# qhasm: in0 ^= *(uint32 *) (m + 0)
4695# asm 1: xorl 0(<m=int32#5),<in0=int32#1
4696# asm 2: xorl 0(<m=%esi),<in0=%eax
4697xorl 0(%esi),%eax
4698
4699# qhasm: in12 ^= *(uint32 *) (m + 48)
4700# asm 1: xorl 48(<m=int32#5),<in12=int32#2
4701# asm 2: xorl 48(<m=%esi),<in12=%ecx
4702xorl 48(%esi),%ecx
4703
4704# qhasm: in8 ^= *(uint32 *) (m + 32)
4705# asm 1: xorl 32(<m=int32#5),<in8=int32#3
4706# asm 2: xorl 32(<m=%esi),<in8=%edx
4707xorl 32(%esi),%edx
4708
4709# qhasm: in4 ^= *(uint32 *) (m + 16)
4710# asm 1: xorl 16(<m=int32#5),<in4=int32#4
4711# asm 2: xorl 16(<m=%esi),<in4=%ebx
4712xorl 16(%esi),%ebx
4713
4714# qhasm: *(uint32 *) (out + 0) = in0
4715# asm 1: movl <in0=int32#1,0(<out=int32#6)
4716# asm 2: movl <in0=%eax,0(<out=%edi)
4717movl %eax,0(%edi)
4718
4719# qhasm: *(uint32 *) (out + 48) = in12
4720# asm 1: movl <in12=int32#2,48(<out=int32#6)
4721# asm 2: movl <in12=%ecx,48(<out=%edi)
4722movl %ecx,48(%edi)
4723
4724# qhasm: *(uint32 *) (out + 32) = in8
4725# asm 1: movl <in8=int32#3,32(<out=int32#6)
4726# asm 2: movl <in8=%edx,32(<out=%edi)
4727movl %edx,32(%edi)
4728
4729# qhasm: *(uint32 *) (out + 16) = in4
4730# asm 1: movl <in4=int32#4,16(<out=int32#6)
4731# asm 2: movl <in4=%ebx,16(<out=%edi)
4732movl %ebx,16(%edi)
4733
4734# qhasm: in5 = diag0
4735# asm 1: movd <diag0=int6464#1,>in5=int32#1
4736# asm 2: movd <diag0=%xmm0,>in5=%eax
4737movd %xmm0,%eax
4738
4739# qhasm: in1 = diag1
4740# asm 1: movd <diag1=int6464#2,>in1=int32#2
4741# asm 2: movd <diag1=%xmm1,>in1=%ecx
4742movd %xmm1,%ecx
4743
4744# qhasm: in13 = diag2
4745# asm 1: movd <diag2=int6464#3,>in13=int32#3
4746# asm 2: movd <diag2=%xmm2,>in13=%edx
4747movd %xmm2,%edx
4748
4749# qhasm: in9 = diag3
4750# asm 1: movd <diag3=int6464#4,>in9=int32#4
4751# asm 2: movd <diag3=%xmm3,>in9=%ebx
4752movd %xmm3,%ebx
4753
4754# qhasm: diag0 <<<= 96
4755# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4756# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4757pshufd $0x39,%xmm0,%xmm0
4758
4759# qhasm: diag1 <<<= 96
4760# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4761# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4762pshufd $0x39,%xmm1,%xmm1
4763
4764# qhasm: diag2 <<<= 96
4765# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4766# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4767pshufd $0x39,%xmm2,%xmm2
4768
4769# qhasm: diag3 <<<= 96
4770# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4771# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4772pshufd $0x39,%xmm3,%xmm3
4773
4774# qhasm: in5 ^= *(uint32 *) (m + 20)
4775# asm 1: xorl 20(<m=int32#5),<in5=int32#1
4776# asm 2: xorl 20(<m=%esi),<in5=%eax
4777xorl 20(%esi),%eax
4778
4779# qhasm: in1 ^= *(uint32 *) (m + 4)
4780# asm 1: xorl 4(<m=int32#5),<in1=int32#2
4781# asm 2: xorl 4(<m=%esi),<in1=%ecx
4782xorl 4(%esi),%ecx
4783
4784# qhasm: in13 ^= *(uint32 *) (m + 52)
4785# asm 1: xorl 52(<m=int32#5),<in13=int32#3
4786# asm 2: xorl 52(<m=%esi),<in13=%edx
4787xorl 52(%esi),%edx
4788
4789# qhasm: in9 ^= *(uint32 *) (m + 36)
4790# asm 1: xorl 36(<m=int32#5),<in9=int32#4
4791# asm 2: xorl 36(<m=%esi),<in9=%ebx
4792xorl 36(%esi),%ebx
4793
4794# qhasm: *(uint32 *) (out + 20) = in5
4795# asm 1: movl <in5=int32#1,20(<out=int32#6)
4796# asm 2: movl <in5=%eax,20(<out=%edi)
4797movl %eax,20(%edi)
4798
4799# qhasm: *(uint32 *) (out + 4) = in1
4800# asm 1: movl <in1=int32#2,4(<out=int32#6)
4801# asm 2: movl <in1=%ecx,4(<out=%edi)
4802movl %ecx,4(%edi)
4803
4804# qhasm: *(uint32 *) (out + 52) = in13
4805# asm 1: movl <in13=int32#3,52(<out=int32#6)
4806# asm 2: movl <in13=%edx,52(<out=%edi)
4807movl %edx,52(%edi)
4808
4809# qhasm: *(uint32 *) (out + 36) = in9
4810# asm 1: movl <in9=int32#4,36(<out=int32#6)
4811# asm 2: movl <in9=%ebx,36(<out=%edi)
4812movl %ebx,36(%edi)
4813
4814# qhasm: in10 = diag0
4815# asm 1: movd <diag0=int6464#1,>in10=int32#1
4816# asm 2: movd <diag0=%xmm0,>in10=%eax
4817movd %xmm0,%eax
4818
4819# qhasm: in6 = diag1
4820# asm 1: movd <diag1=int6464#2,>in6=int32#2
4821# asm 2: movd <diag1=%xmm1,>in6=%ecx
4822movd %xmm1,%ecx
4823
4824# qhasm: in2 = diag2
4825# asm 1: movd <diag2=int6464#3,>in2=int32#3
4826# asm 2: movd <diag2=%xmm2,>in2=%edx
4827movd %xmm2,%edx
4828
4829# qhasm: in14 = diag3
4830# asm 1: movd <diag3=int6464#4,>in14=int32#4
4831# asm 2: movd <diag3=%xmm3,>in14=%ebx
4832movd %xmm3,%ebx
4833
4834# qhasm: diag0 <<<= 96
4835# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4836# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4837pshufd $0x39,%xmm0,%xmm0
4838
4839# qhasm: diag1 <<<= 96
4840# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4841# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4842pshufd $0x39,%xmm1,%xmm1
4843
4844# qhasm: diag2 <<<= 96
4845# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4846# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4847pshufd $0x39,%xmm2,%xmm2
4848
4849# qhasm: diag3 <<<= 96
4850# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4851# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4852pshufd $0x39,%xmm3,%xmm3
4853
4854# qhasm: in10 ^= *(uint32 *) (m + 40)
4855# asm 1: xorl 40(<m=int32#5),<in10=int32#1
4856# asm 2: xorl 40(<m=%esi),<in10=%eax
4857xorl 40(%esi),%eax
4858
4859# qhasm: in6 ^= *(uint32 *) (m + 24)
4860# asm 1: xorl 24(<m=int32#5),<in6=int32#2
4861# asm 2: xorl 24(<m=%esi),<in6=%ecx
4862xorl 24(%esi),%ecx
4863
4864# qhasm: in2 ^= *(uint32 *) (m + 8)
4865# asm 1: xorl 8(<m=int32#5),<in2=int32#3
4866# asm 2: xorl 8(<m=%esi),<in2=%edx
4867xorl 8(%esi),%edx
4868
4869# qhasm: in14 ^= *(uint32 *) (m + 56)
4870# asm 1: xorl 56(<m=int32#5),<in14=int32#4
4871# asm 2: xorl 56(<m=%esi),<in14=%ebx
4872xorl 56(%esi),%ebx
4873
4874# qhasm: *(uint32 *) (out + 40) = in10
4875# asm 1: movl <in10=int32#1,40(<out=int32#6)
4876# asm 2: movl <in10=%eax,40(<out=%edi)
4877movl %eax,40(%edi)
4878
4879# qhasm: *(uint32 *) (out + 24) = in6
4880# asm 1: movl <in6=int32#2,24(<out=int32#6)
4881# asm 2: movl <in6=%ecx,24(<out=%edi)
4882movl %ecx,24(%edi)
4883
4884# qhasm: *(uint32 *) (out + 8) = in2
4885# asm 1: movl <in2=int32#3,8(<out=int32#6)
4886# asm 2: movl <in2=%edx,8(<out=%edi)
4887movl %edx,8(%edi)
4888
4889# qhasm: *(uint32 *) (out + 56) = in14
4890# asm 1: movl <in14=int32#4,56(<out=int32#6)
4891# asm 2: movl <in14=%ebx,56(<out=%edi)
4892movl %ebx,56(%edi)
4893
4894# qhasm: in15 = diag0
4895# asm 1: movd <diag0=int6464#1,>in15=int32#1
4896# asm 2: movd <diag0=%xmm0,>in15=%eax
4897movd %xmm0,%eax
4898
4899# qhasm: in11 = diag1
4900# asm 1: movd <diag1=int6464#2,>in11=int32#2
4901# asm 2: movd <diag1=%xmm1,>in11=%ecx
4902movd %xmm1,%ecx
4903
4904# qhasm: in7 = diag2
4905# asm 1: movd <diag2=int6464#3,>in7=int32#3
4906# asm 2: movd <diag2=%xmm2,>in7=%edx
4907movd %xmm2,%edx
4908
4909# qhasm: in3 = diag3
4910# asm 1: movd <diag3=int6464#4,>in3=int32#4
4911# asm 2: movd <diag3=%xmm3,>in3=%ebx
4912movd %xmm3,%ebx
4913
4914# qhasm: in15 ^= *(uint32 *) (m + 60)
4915# asm 1: xorl 60(<m=int32#5),<in15=int32#1
4916# asm 2: xorl 60(<m=%esi),<in15=%eax
4917xorl 60(%esi),%eax
4918
4919# qhasm: in11 ^= *(uint32 *) (m + 44)
4920# asm 1: xorl 44(<m=int32#5),<in11=int32#2
4921# asm 2: xorl 44(<m=%esi),<in11=%ecx
4922xorl 44(%esi),%ecx
4923
4924# qhasm: in7 ^= *(uint32 *) (m + 28)
4925# asm 1: xorl 28(<m=int32#5),<in7=int32#3
4926# asm 2: xorl 28(<m=%esi),<in7=%edx
4927xorl 28(%esi),%edx
4928
4929# qhasm: in3 ^= *(uint32 *) (m + 12)
4930# asm 1: xorl 12(<m=int32#5),<in3=int32#4
4931# asm 2: xorl 12(<m=%esi),<in3=%ebx
4932xorl 12(%esi),%ebx
4933
4934# qhasm: *(uint32 *) (out + 60) = in15
4935# asm 1: movl <in15=int32#1,60(<out=int32#6)
4936# asm 2: movl <in15=%eax,60(<out=%edi)
4937movl %eax,60(%edi)
4938
4939# qhasm: *(uint32 *) (out + 44) = in11
4940# asm 1: movl <in11=int32#2,44(<out=int32#6)
4941# asm 2: movl <in11=%ecx,44(<out=%edi)
4942movl %ecx,44(%edi)
4943
4944# qhasm: *(uint32 *) (out + 28) = in7
4945# asm 1: movl <in7=int32#3,28(<out=int32#6)
4946# asm 2: movl <in7=%edx,28(<out=%edi)
4947movl %edx,28(%edi)
4948
4949# qhasm: *(uint32 *) (out + 12) = in3
4950# asm 1: movl <in3=int32#4,12(<out=int32#6)
4951# asm 2: movl <in3=%ebx,12(<out=%edi)
4952movl %ebx,12(%edi)
4953
4954# qhasm: bytes = bytes_stack
4955# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
4956# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
4957movl 24(%esp),%eax
4958
4959# qhasm: in8 = ((uint32 *)&x2)[0]
4960# asm 1: movl <x2=stack128#4,>in8=int32#2
4961# asm 2: movl <x2=80(%esp),>in8=%ecx
4962movl 80(%esp),%ecx
4963
4964# qhasm: in9 = ((uint32 *)&x3)[1]
4965# asm 1: movl 4+<x3=stack128#1,>in9=int32#3
4966# asm 2: movl 4+<x3=32(%esp),>in9=%edx
4967movl 4+32(%esp),%edx
4968
4969# qhasm: carry? in8 += 1
4970# asm 1: add $1,<in8=int32#2
4971# asm 2: add $1,<in8=%ecx
4972add $1,%ecx
4973
4974# qhasm: in9 += 0 + carry
4975# asm 1: adc $0,<in9=int32#3
4976# asm 2: adc $0,<in9=%edx
4977adc $0,%edx
4978
4979# qhasm: ((uint32 *)&x2)[0] = in8
4980# asm 1: movl <in8=int32#2,>x2=stack128#4
4981# asm 2: movl <in8=%ecx,>x2=80(%esp)
4982movl %ecx,80(%esp)
4983
4984# qhasm: ((uint32 *)&x3)[1] = in9
4985# asm 1: movl <in9=int32#3,4+<x3=stack128#1
4986# asm 2: movl <in9=%edx,4+<x3=32(%esp)
4987movl %edx,4+32(%esp)
4988
4989# qhasm: unsigned>? unsigned<? bytes - 64
4990# asm 1: cmp $64,<bytes=int32#1
4991# asm 2: cmp $64,<bytes=%eax
4992cmp $64,%eax
4993# comment:fp stack unchanged by jump
4994
4995# qhasm: goto bytesatleast65 if unsigned>
4996ja ._bytesatleast65
4997# comment:fp stack unchanged by jump
4998
4999# qhasm: goto bytesatleast64 if !unsigned<
5000jae ._bytesatleast64
5001
5002# qhasm: m = out
5003# asm 1: mov <out=int32#6,>m=int32#5
5004# asm 2: mov <out=%edi,>m=%esi
5005mov %edi,%esi
5006
5007# qhasm: out = ctarget
5008# asm 1: movl <ctarget=stack32#6,>out=int32#6
5009# asm 2: movl <ctarget=20(%esp),>out=%edi
5010movl 20(%esp),%edi
5011
5012# qhasm: i = bytes
5013# asm 1: mov <bytes=int32#1,>i=int32#2
5014# asm 2: mov <bytes=%eax,>i=%ecx
5015mov %eax,%ecx
5016
5017# qhasm: while (i) { *out++ = *m++; --i }
5018rep movsb
5019# comment:fp stack unchanged by fallthrough
5020
5021# qhasm: bytesatleast64:
5022._bytesatleast64:
5023# comment:fp stack unchanged by fallthrough
5024
5025# qhasm: done:
5026._done:
5027
5028# qhasm: eax = eax_stack
5029# asm 1: movl <eax_stack=stack32#1,>eax=int32#1
5030# asm 2: movl <eax_stack=0(%esp),>eax=%eax
5031movl 0(%esp),%eax
5032
5033# qhasm: ebx = ebx_stack
5034# asm 1: movl <ebx_stack=stack32#2,>ebx=int32#4
5035# asm 2: movl <ebx_stack=4(%esp),>ebx=%ebx
5036movl 4(%esp),%ebx
5037
5038# qhasm: esi = esi_stack
5039# asm 1: movl <esi_stack=stack32#3,>esi=int32#5
5040# asm 2: movl <esi_stack=8(%esp),>esi=%esi
5041movl 8(%esp),%esi
5042
5043# qhasm: edi = edi_stack
5044# asm 1: movl <edi_stack=stack32#4,>edi=int32#6
5045# asm 2: movl <edi_stack=12(%esp),>edi=%edi
5046movl 12(%esp),%edi
5047
5048# qhasm: ebp = ebp_stack
5049# asm 1: movl <ebp_stack=stack32#5,>ebp=int32#7
5050# asm 2: movl <ebp_stack=16(%esp),>ebp=%ebp
5051movl 16(%esp),%ebp
5052
5053# qhasm: leave
5054add %eax,%esp
5055xor %eax,%eax
5056ret
5057
5058# qhasm: bytesatleast65:
5059._bytesatleast65:
5060
5061# qhasm: bytes -= 64
5062# asm 1: sub $64,<bytes=int32#1
5063# asm 2: sub $64,<bytes=%eax
5064sub $64,%eax
5065
5066# qhasm: out += 64
5067# asm 1: add $64,<out=int32#6
5068# asm 2: add $64,<out=%edi
5069add $64,%edi
5070
5071# qhasm: m += 64
5072# asm 1: add $64,<m=int32#5
5073# asm 2: add $64,<m=%esi
5074add $64,%esi
5075# comment:fp stack unchanged by jump
5076
5077# qhasm: goto bytesbetween1and255
5078jmp ._bytesbetween1and255
diff --git a/nacl/crypto_stream/salsa208/amd64_xmm6/api.h b/nacl/crypto_stream/salsa208/amd64_xmm6/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/amd64_xmm6/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa208/amd64_xmm6/implementors b/nacl/crypto_stream/salsa208/amd64_xmm6/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/amd64_xmm6/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa208/amd64_xmm6/stream.s b/nacl/crypto_stream/salsa208/amd64_xmm6/stream.s
new file mode 100644
index 00000000..f27411fe
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/amd64_xmm6/stream.s
@@ -0,0 +1,4823 @@
1
2# qhasm: int64 r11_caller
3
4# qhasm: int64 r12_caller
5
6# qhasm: int64 r13_caller
7
8# qhasm: int64 r14_caller
9
10# qhasm: int64 r15_caller
11
12# qhasm: int64 rbx_caller
13
14# qhasm: int64 rbp_caller
15
16# qhasm: caller r11_caller
17
18# qhasm: caller r12_caller
19
20# qhasm: caller r13_caller
21
22# qhasm: caller r14_caller
23
24# qhasm: caller r15_caller
25
26# qhasm: caller rbx_caller
27
28# qhasm: caller rbp_caller
29
30# qhasm: stack64 r11_stack
31
32# qhasm: stack64 r12_stack
33
34# qhasm: stack64 r13_stack
35
36# qhasm: stack64 r14_stack
37
38# qhasm: stack64 r15_stack
39
40# qhasm: stack64 rbx_stack
41
42# qhasm: stack64 rbp_stack
43
44# qhasm: int64 a
45
46# qhasm: int64 arg1
47
48# qhasm: int64 arg2
49
50# qhasm: int64 arg3
51
52# qhasm: int64 arg4
53
54# qhasm: int64 arg5
55
56# qhasm: input arg1
57
58# qhasm: input arg2
59
60# qhasm: input arg3
61
62# qhasm: input arg4
63
64# qhasm: input arg5
65
66# qhasm: int64 k
67
68# qhasm: int64 kbits
69
70# qhasm: int64 iv
71
72# qhasm: int64 i
73
74# qhasm: stack128 x0
75
76# qhasm: stack128 x1
77
78# qhasm: stack128 x2
79
80# qhasm: stack128 x3
81
82# qhasm: int64 m
83
84# qhasm: int64 out
85
86# qhasm: int64 bytes
87
88# qhasm: stack32 eax_stack
89
90# qhasm: stack32 ebx_stack
91
92# qhasm: stack32 esi_stack
93
94# qhasm: stack32 edi_stack
95
96# qhasm: stack32 ebp_stack
97
98# qhasm: int6464 diag0
99
100# qhasm: int6464 diag1
101
102# qhasm: int6464 diag2
103
104# qhasm: int6464 diag3
105
106# qhasm: int6464 a0
107
108# qhasm: int6464 a1
109
110# qhasm: int6464 a2
111
112# qhasm: int6464 a3
113
114# qhasm: int6464 a4
115
116# qhasm: int6464 a5
117
118# qhasm: int6464 a6
119
120# qhasm: int6464 a7
121
122# qhasm: int6464 b0
123
124# qhasm: int6464 b1
125
126# qhasm: int6464 b2
127
128# qhasm: int6464 b3
129
130# qhasm: int6464 b4
131
132# qhasm: int6464 b5
133
134# qhasm: int6464 b6
135
136# qhasm: int6464 b7
137
138# qhasm: int6464 z0
139
140# qhasm: int6464 z1
141
142# qhasm: int6464 z2
143
144# qhasm: int6464 z3
145
146# qhasm: int6464 z4
147
148# qhasm: int6464 z5
149
150# qhasm: int6464 z6
151
152# qhasm: int6464 z7
153
154# qhasm: int6464 z8
155
156# qhasm: int6464 z9
157
158# qhasm: int6464 z10
159
160# qhasm: int6464 z11
161
162# qhasm: int6464 z12
163
164# qhasm: int6464 z13
165
166# qhasm: int6464 z14
167
168# qhasm: int6464 z15
169
170# qhasm: stack128 z0_stack
171
172# qhasm: stack128 z1_stack
173
174# qhasm: stack128 z2_stack
175
176# qhasm: stack128 z3_stack
177
178# qhasm: stack128 z4_stack
179
180# qhasm: stack128 z5_stack
181
182# qhasm: stack128 z6_stack
183
184# qhasm: stack128 z7_stack
185
186# qhasm: stack128 z8_stack
187
188# qhasm: stack128 z9_stack
189
190# qhasm: stack128 z10_stack
191
192# qhasm: stack128 z11_stack
193
194# qhasm: stack128 z12_stack
195
196# qhasm: stack128 z13_stack
197
198# qhasm: stack128 z14_stack
199
200# qhasm: stack128 z15_stack
201
202# qhasm: int6464 y0
203
204# qhasm: int6464 y1
205
206# qhasm: int6464 y2
207
208# qhasm: int6464 y3
209
210# qhasm: int6464 y4
211
212# qhasm: int6464 y5
213
214# qhasm: int6464 y6
215
216# qhasm: int6464 y7
217
218# qhasm: int6464 y8
219
220# qhasm: int6464 y9
221
222# qhasm: int6464 y10
223
224# qhasm: int6464 y11
225
226# qhasm: int6464 y12
227
228# qhasm: int6464 y13
229
230# qhasm: int6464 y14
231
232# qhasm: int6464 y15
233
234# qhasm: int6464 r0
235
236# qhasm: int6464 r1
237
238# qhasm: int6464 r2
239
240# qhasm: int6464 r3
241
242# qhasm: int6464 r4
243
244# qhasm: int6464 r5
245
246# qhasm: int6464 r6
247
248# qhasm: int6464 r7
249
250# qhasm: int6464 r8
251
252# qhasm: int6464 r9
253
254# qhasm: int6464 r10
255
256# qhasm: int6464 r11
257
258# qhasm: int6464 r12
259
260# qhasm: int6464 r13
261
262# qhasm: int6464 r14
263
264# qhasm: int6464 r15
265
266# qhasm: stack128 orig0
267
268# qhasm: stack128 orig1
269
270# qhasm: stack128 orig2
271
272# qhasm: stack128 orig3
273
274# qhasm: stack128 orig4
275
276# qhasm: stack128 orig5
277
278# qhasm: stack128 orig6
279
280# qhasm: stack128 orig7
281
282# qhasm: stack128 orig8
283
284# qhasm: stack128 orig9
285
286# qhasm: stack128 orig10
287
288# qhasm: stack128 orig11
289
290# qhasm: stack128 orig12
291
292# qhasm: stack128 orig13
293
294# qhasm: stack128 orig14
295
296# qhasm: stack128 orig15
297
298# qhasm: int64 in0
299
300# qhasm: int64 in1
301
302# qhasm: int64 in2
303
304# qhasm: int64 in3
305
306# qhasm: int64 in4
307
308# qhasm: int64 in5
309
310# qhasm: int64 in6
311
312# qhasm: int64 in7
313
314# qhasm: int64 in8
315
316# qhasm: int64 in9
317
318# qhasm: int64 in10
319
320# qhasm: int64 in11
321
322# qhasm: int64 in12
323
324# qhasm: int64 in13
325
326# qhasm: int64 in14
327
328# qhasm: int64 in15
329
330# qhasm: stack512 tmp
331
332# qhasm: int64 ctarget
333
334# qhasm: stack64 bytes_backup
335
336# qhasm: enter crypto_stream_salsa208_amd64_xmm6
337.text
338.p2align 5
339.globl _crypto_stream_salsa208_amd64_xmm6
340.globl crypto_stream_salsa208_amd64_xmm6
341_crypto_stream_salsa208_amd64_xmm6:
342crypto_stream_salsa208_amd64_xmm6:
343mov %rsp,%r11
344and $31,%r11
345add $480,%r11
346sub %r11,%rsp
347
348# qhasm: r11_stack = r11_caller
349# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
350# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
351movq %r11,352(%rsp)
352
353# qhasm: r12_stack = r12_caller
354# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
355# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
356movq %r12,360(%rsp)
357
358# qhasm: r13_stack = r13_caller
359# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
360# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
361movq %r13,368(%rsp)
362
363# qhasm: r14_stack = r14_caller
364# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
365# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
366movq %r14,376(%rsp)
367
368# qhasm: r15_stack = r15_caller
369# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
370# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
371movq %r15,384(%rsp)
372
373# qhasm: rbx_stack = rbx_caller
374# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
375# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
376movq %rbx,392(%rsp)
377
378# qhasm: rbp_stack = rbp_caller
379# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
380# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
381movq %rbp,400(%rsp)
382
383# qhasm: bytes = arg2
384# asm 1: mov <arg2=int64#2,>bytes=int64#6
385# asm 2: mov <arg2=%rsi,>bytes=%r9
386mov %rsi,%r9
387
388# qhasm: out = arg1
389# asm 1: mov <arg1=int64#1,>out=int64#1
390# asm 2: mov <arg1=%rdi,>out=%rdi
391mov %rdi,%rdi
392
393# qhasm: m = out
394# asm 1: mov <out=int64#1,>m=int64#2
395# asm 2: mov <out=%rdi,>m=%rsi
396mov %rdi,%rsi
397
398# qhasm: iv = arg3
399# asm 1: mov <arg3=int64#3,>iv=int64#3
400# asm 2: mov <arg3=%rdx,>iv=%rdx
401mov %rdx,%rdx
402
403# qhasm: k = arg4
404# asm 1: mov <arg4=int64#4,>k=int64#8
405# asm 2: mov <arg4=%rcx,>k=%r10
406mov %rcx,%r10
407
408# qhasm: unsigned>? bytes - 0
409# asm 1: cmp $0,<bytes=int64#6
410# asm 2: cmp $0,<bytes=%r9
411cmp $0,%r9
412# comment:fp stack unchanged by jump
413
414# qhasm: goto done if !unsigned>
415jbe ._done
416
417# qhasm: a = 0
418# asm 1: mov $0,>a=int64#7
419# asm 2: mov $0,>a=%rax
420mov $0,%rax
421
422# qhasm: i = bytes
423# asm 1: mov <bytes=int64#6,>i=int64#4
424# asm 2: mov <bytes=%r9,>i=%rcx
425mov %r9,%rcx
426
427# qhasm: while (i) { *out++ = a; --i }
428rep stosb
429
430# qhasm: out -= bytes
431# asm 1: sub <bytes=int64#6,<out=int64#1
432# asm 2: sub <bytes=%r9,<out=%rdi
433sub %r9,%rdi
434# comment:fp stack unchanged by jump
435
436# qhasm: goto start
437jmp ._start
438
439# qhasm: enter crypto_stream_salsa208_amd64_xmm6_xor
440.text
441.p2align 5
442.globl _crypto_stream_salsa208_amd64_xmm6_xor
443.globl crypto_stream_salsa208_amd64_xmm6_xor
444_crypto_stream_salsa208_amd64_xmm6_xor:
445crypto_stream_salsa208_amd64_xmm6_xor:
446mov %rsp,%r11
447and $31,%r11
448add $480,%r11
449sub %r11,%rsp
450
451# qhasm: r11_stack = r11_caller
452# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
453# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
454movq %r11,352(%rsp)
455
456# qhasm: r12_stack = r12_caller
457# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
458# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
459movq %r12,360(%rsp)
460
461# qhasm: r13_stack = r13_caller
462# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
463# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
464movq %r13,368(%rsp)
465
466# qhasm: r14_stack = r14_caller
467# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
468# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
469movq %r14,376(%rsp)
470
471# qhasm: r15_stack = r15_caller
472# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
473# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
474movq %r15,384(%rsp)
475
476# qhasm: rbx_stack = rbx_caller
477# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
478# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
479movq %rbx,392(%rsp)
480
481# qhasm: rbp_stack = rbp_caller
482# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
483# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
484movq %rbp,400(%rsp)
485
486# qhasm: out = arg1
487# asm 1: mov <arg1=int64#1,>out=int64#1
488# asm 2: mov <arg1=%rdi,>out=%rdi
489mov %rdi,%rdi
490
491# qhasm: m = arg2
492# asm 1: mov <arg2=int64#2,>m=int64#2
493# asm 2: mov <arg2=%rsi,>m=%rsi
494mov %rsi,%rsi
495
496# qhasm: bytes = arg3
497# asm 1: mov <arg3=int64#3,>bytes=int64#6
498# asm 2: mov <arg3=%rdx,>bytes=%r9
499mov %rdx,%r9
500
501# qhasm: iv = arg4
502# asm 1: mov <arg4=int64#4,>iv=int64#3
503# asm 2: mov <arg4=%rcx,>iv=%rdx
504mov %rcx,%rdx
505
506# qhasm: k = arg5
507# asm 1: mov <arg5=int64#5,>k=int64#8
508# asm 2: mov <arg5=%r8,>k=%r10
509mov %r8,%r10
510
511# qhasm: unsigned>? bytes - 0
512# asm 1: cmp $0,<bytes=int64#6
513# asm 2: cmp $0,<bytes=%r9
514cmp $0,%r9
515# comment:fp stack unchanged by jump
516
517# qhasm: goto done if !unsigned>
518jbe ._done
519# comment:fp stack unchanged by fallthrough
520
521# qhasm: start:
522._start:
523
524# qhasm: in12 = *(uint32 *) (k + 20)
525# asm 1: movl 20(<k=int64#8),>in12=int64#4d
526# asm 2: movl 20(<k=%r10),>in12=%ecx
527movl 20(%r10),%ecx
528
529# qhasm: in1 = *(uint32 *) (k + 0)
530# asm 1: movl 0(<k=int64#8),>in1=int64#5d
531# asm 2: movl 0(<k=%r10),>in1=%r8d
532movl 0(%r10),%r8d
533
534# qhasm: in6 = *(uint32 *) (iv + 0)
535# asm 1: movl 0(<iv=int64#3),>in6=int64#7d
536# asm 2: movl 0(<iv=%rdx),>in6=%eax
537movl 0(%rdx),%eax
538
539# qhasm: in11 = *(uint32 *) (k + 16)
540# asm 1: movl 16(<k=int64#8),>in11=int64#9d
541# asm 2: movl 16(<k=%r10),>in11=%r11d
542movl 16(%r10),%r11d
543
544# qhasm: ((uint32 *)&x1)[0] = in12
545# asm 1: movl <in12=int64#4d,>x1=stack128#1
546# asm 2: movl <in12=%ecx,>x1=0(%rsp)
547movl %ecx,0(%rsp)
548
549# qhasm: ((uint32 *)&x1)[1] = in1
550# asm 1: movl <in1=int64#5d,4+<x1=stack128#1
551# asm 2: movl <in1=%r8d,4+<x1=0(%rsp)
552movl %r8d,4+0(%rsp)
553
554# qhasm: ((uint32 *)&x1)[2] = in6
555# asm 1: movl <in6=int64#7d,8+<x1=stack128#1
556# asm 2: movl <in6=%eax,8+<x1=0(%rsp)
557movl %eax,8+0(%rsp)
558
559# qhasm: ((uint32 *)&x1)[3] = in11
560# asm 1: movl <in11=int64#9d,12+<x1=stack128#1
561# asm 2: movl <in11=%r11d,12+<x1=0(%rsp)
562movl %r11d,12+0(%rsp)
563
564# qhasm: in8 = 0
565# asm 1: mov $0,>in8=int64#4
566# asm 2: mov $0,>in8=%rcx
567mov $0,%rcx
568
569# qhasm: in13 = *(uint32 *) (k + 24)
570# asm 1: movl 24(<k=int64#8),>in13=int64#5d
571# asm 2: movl 24(<k=%r10),>in13=%r8d
572movl 24(%r10),%r8d
573
574# qhasm: in2 = *(uint32 *) (k + 4)
575# asm 1: movl 4(<k=int64#8),>in2=int64#7d
576# asm 2: movl 4(<k=%r10),>in2=%eax
577movl 4(%r10),%eax
578
579# qhasm: in7 = *(uint32 *) (iv + 4)
580# asm 1: movl 4(<iv=int64#3),>in7=int64#3d
581# asm 2: movl 4(<iv=%rdx),>in7=%edx
582movl 4(%rdx),%edx
583
584# qhasm: ((uint32 *)&x2)[0] = in8
585# asm 1: movl <in8=int64#4d,>x2=stack128#2
586# asm 2: movl <in8=%ecx,>x2=16(%rsp)
587movl %ecx,16(%rsp)
588
589# qhasm: ((uint32 *)&x2)[1] = in13
590# asm 1: movl <in13=int64#5d,4+<x2=stack128#2
591# asm 2: movl <in13=%r8d,4+<x2=16(%rsp)
592movl %r8d,4+16(%rsp)
593
594# qhasm: ((uint32 *)&x2)[2] = in2
595# asm 1: movl <in2=int64#7d,8+<x2=stack128#2
596# asm 2: movl <in2=%eax,8+<x2=16(%rsp)
597movl %eax,8+16(%rsp)
598
599# qhasm: ((uint32 *)&x2)[3] = in7
600# asm 1: movl <in7=int64#3d,12+<x2=stack128#2
601# asm 2: movl <in7=%edx,12+<x2=16(%rsp)
602movl %edx,12+16(%rsp)
603
604# qhasm: in4 = *(uint32 *) (k + 12)
605# asm 1: movl 12(<k=int64#8),>in4=int64#3d
606# asm 2: movl 12(<k=%r10),>in4=%edx
607movl 12(%r10),%edx
608
609# qhasm: in9 = 0
610# asm 1: mov $0,>in9=int64#4
611# asm 2: mov $0,>in9=%rcx
612mov $0,%rcx
613
614# qhasm: in14 = *(uint32 *) (k + 28)
615# asm 1: movl 28(<k=int64#8),>in14=int64#5d
616# asm 2: movl 28(<k=%r10),>in14=%r8d
617movl 28(%r10),%r8d
618
619# qhasm: in3 = *(uint32 *) (k + 8)
620# asm 1: movl 8(<k=int64#8),>in3=int64#7d
621# asm 2: movl 8(<k=%r10),>in3=%eax
622movl 8(%r10),%eax
623
624# qhasm: ((uint32 *)&x3)[0] = in4
625# asm 1: movl <in4=int64#3d,>x3=stack128#3
626# asm 2: movl <in4=%edx,>x3=32(%rsp)
627movl %edx,32(%rsp)
628
629# qhasm: ((uint32 *)&x3)[1] = in9
630# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
631# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
632movl %ecx,4+32(%rsp)
633
634# qhasm: ((uint32 *)&x3)[2] = in14
635# asm 1: movl <in14=int64#5d,8+<x3=stack128#3
636# asm 2: movl <in14=%r8d,8+<x3=32(%rsp)
637movl %r8d,8+32(%rsp)
638
639# qhasm: ((uint32 *)&x3)[3] = in3
640# asm 1: movl <in3=int64#7d,12+<x3=stack128#3
641# asm 2: movl <in3=%eax,12+<x3=32(%rsp)
642movl %eax,12+32(%rsp)
643
644# qhasm: in0 = 1634760805
645# asm 1: mov $1634760805,>in0=int64#3
646# asm 2: mov $1634760805,>in0=%rdx
647mov $1634760805,%rdx
648
649# qhasm: in5 = 857760878
650# asm 1: mov $857760878,>in5=int64#4
651# asm 2: mov $857760878,>in5=%rcx
652mov $857760878,%rcx
653
654# qhasm: in10 = 2036477234
655# asm 1: mov $2036477234,>in10=int64#5
656# asm 2: mov $2036477234,>in10=%r8
657mov $2036477234,%r8
658
659# qhasm: in15 = 1797285236
660# asm 1: mov $1797285236,>in15=int64#7
661# asm 2: mov $1797285236,>in15=%rax
662mov $1797285236,%rax
663
664# qhasm: ((uint32 *)&x0)[0] = in0
665# asm 1: movl <in0=int64#3d,>x0=stack128#4
666# asm 2: movl <in0=%edx,>x0=48(%rsp)
667movl %edx,48(%rsp)
668
669# qhasm: ((uint32 *)&x0)[1] = in5
670# asm 1: movl <in5=int64#4d,4+<x0=stack128#4
671# asm 2: movl <in5=%ecx,4+<x0=48(%rsp)
672movl %ecx,4+48(%rsp)
673
674# qhasm: ((uint32 *)&x0)[2] = in10
675# asm 1: movl <in10=int64#5d,8+<x0=stack128#4
676# asm 2: movl <in10=%r8d,8+<x0=48(%rsp)
677movl %r8d,8+48(%rsp)
678
679# qhasm: ((uint32 *)&x0)[3] = in15
680# asm 1: movl <in15=int64#7d,12+<x0=stack128#4
681# asm 2: movl <in15=%eax,12+<x0=48(%rsp)
682movl %eax,12+48(%rsp)
683
684# qhasm: unsigned<? bytes - 256
685# asm 1: cmp $256,<bytes=int64#6
686# asm 2: cmp $256,<bytes=%r9
687cmp $256,%r9
688# comment:fp stack unchanged by jump
689
690# qhasm: goto bytesbetween1and255 if unsigned<
691jb ._bytesbetween1and255
692
693# qhasm: z0 = x0
694# asm 1: movdqa <x0=stack128#4,>z0=int6464#1
695# asm 2: movdqa <x0=48(%rsp),>z0=%xmm0
696movdqa 48(%rsp),%xmm0
697
698# qhasm: z5 = z0[1,1,1,1]
699# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
700# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
701pshufd $0x55,%xmm0,%xmm1
702
703# qhasm: z10 = z0[2,2,2,2]
704# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
705# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
706pshufd $0xaa,%xmm0,%xmm2
707
708# qhasm: z15 = z0[3,3,3,3]
709# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
710# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
711pshufd $0xff,%xmm0,%xmm3
712
713# qhasm: z0 = z0[0,0,0,0]
714# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
715# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
716pshufd $0x00,%xmm0,%xmm0
717
718# qhasm: orig5 = z5
719# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
720# asm 2: movdqa <z5=%xmm1,>orig5=64(%rsp)
721movdqa %xmm1,64(%rsp)
722
723# qhasm: orig10 = z10
724# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
725# asm 2: movdqa <z10=%xmm2,>orig10=80(%rsp)
726movdqa %xmm2,80(%rsp)
727
728# qhasm: orig15 = z15
729# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
730# asm 2: movdqa <z15=%xmm3,>orig15=96(%rsp)
731movdqa %xmm3,96(%rsp)
732
733# qhasm: orig0 = z0
734# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
735# asm 2: movdqa <z0=%xmm0,>orig0=112(%rsp)
736movdqa %xmm0,112(%rsp)
737
738# qhasm: z1 = x1
739# asm 1: movdqa <x1=stack128#1,>z1=int6464#1
740# asm 2: movdqa <x1=0(%rsp),>z1=%xmm0
741movdqa 0(%rsp),%xmm0
742
743# qhasm: z6 = z1[2,2,2,2]
744# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
745# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
746pshufd $0xaa,%xmm0,%xmm1
747
748# qhasm: z11 = z1[3,3,3,3]
749# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
750# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
751pshufd $0xff,%xmm0,%xmm2
752
753# qhasm: z12 = z1[0,0,0,0]
754# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
755# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
756pshufd $0x00,%xmm0,%xmm3
757
758# qhasm: z1 = z1[1,1,1,1]
759# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
760# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
761pshufd $0x55,%xmm0,%xmm0
762
763# qhasm: orig6 = z6
764# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
765# asm 2: movdqa <z6=%xmm1,>orig6=128(%rsp)
766movdqa %xmm1,128(%rsp)
767
768# qhasm: orig11 = z11
769# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
770# asm 2: movdqa <z11=%xmm2,>orig11=144(%rsp)
771movdqa %xmm2,144(%rsp)
772
773# qhasm: orig12 = z12
774# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
775# asm 2: movdqa <z12=%xmm3,>orig12=160(%rsp)
776movdqa %xmm3,160(%rsp)
777
778# qhasm: orig1 = z1
779# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
780# asm 2: movdqa <z1=%xmm0,>orig1=176(%rsp)
781movdqa %xmm0,176(%rsp)
782
783# qhasm: z2 = x2
784# asm 1: movdqa <x2=stack128#2,>z2=int6464#1
785# asm 2: movdqa <x2=16(%rsp),>z2=%xmm0
786movdqa 16(%rsp),%xmm0
787
788# qhasm: z7 = z2[3,3,3,3]
789# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
790# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
791pshufd $0xff,%xmm0,%xmm1
792
793# qhasm: z13 = z2[1,1,1,1]
794# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
795# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
796pshufd $0x55,%xmm0,%xmm2
797
798# qhasm: z2 = z2[2,2,2,2]
799# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
800# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
801pshufd $0xaa,%xmm0,%xmm0
802
803# qhasm: orig7 = z7
804# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
805# asm 2: movdqa <z7=%xmm1,>orig7=192(%rsp)
806movdqa %xmm1,192(%rsp)
807
808# qhasm: orig13 = z13
809# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
810# asm 2: movdqa <z13=%xmm2,>orig13=208(%rsp)
811movdqa %xmm2,208(%rsp)
812
813# qhasm: orig2 = z2
814# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
815# asm 2: movdqa <z2=%xmm0,>orig2=224(%rsp)
816movdqa %xmm0,224(%rsp)
817
818# qhasm: z3 = x3
819# asm 1: movdqa <x3=stack128#3,>z3=int6464#1
820# asm 2: movdqa <x3=32(%rsp),>z3=%xmm0
821movdqa 32(%rsp),%xmm0
822
823# qhasm: z4 = z3[0,0,0,0]
824# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
825# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
826pshufd $0x00,%xmm0,%xmm1
827
828# qhasm: z14 = z3[2,2,2,2]
829# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
830# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
831pshufd $0xaa,%xmm0,%xmm2
832
833# qhasm: z3 = z3[3,3,3,3]
834# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
835# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
836pshufd $0xff,%xmm0,%xmm0
837
838# qhasm: orig4 = z4
839# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
840# asm 2: movdqa <z4=%xmm1,>orig4=240(%rsp)
841movdqa %xmm1,240(%rsp)
842
843# qhasm: orig14 = z14
844# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
845# asm 2: movdqa <z14=%xmm2,>orig14=256(%rsp)
846movdqa %xmm2,256(%rsp)
847
848# qhasm: orig3 = z3
849# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
850# asm 2: movdqa <z3=%xmm0,>orig3=272(%rsp)
851movdqa %xmm0,272(%rsp)
852
853# qhasm: bytesatleast256:
854._bytesatleast256:
855
856# qhasm: in8 = ((uint32 *)&x2)[0]
857# asm 1: movl <x2=stack128#2,>in8=int64#3d
858# asm 2: movl <x2=16(%rsp),>in8=%edx
859movl 16(%rsp),%edx
860
861# qhasm: in9 = ((uint32 *)&x3)[1]
862# asm 1: movl 4+<x3=stack128#3,>in9=int64#4d
863# asm 2: movl 4+<x3=32(%rsp),>in9=%ecx
864movl 4+32(%rsp),%ecx
865
866# qhasm: ((uint32 *) &orig8)[0] = in8
867# asm 1: movl <in8=int64#3d,>orig8=stack128#19
868# asm 2: movl <in8=%edx,>orig8=288(%rsp)
869movl %edx,288(%rsp)
870
871# qhasm: ((uint32 *) &orig9)[0] = in9
872# asm 1: movl <in9=int64#4d,>orig9=stack128#20
873# asm 2: movl <in9=%ecx,>orig9=304(%rsp)
874movl %ecx,304(%rsp)
875
876# qhasm: in8 += 1
877# asm 1: add $1,<in8=int64#3
878# asm 2: add $1,<in8=%rdx
879add $1,%rdx
880
881# qhasm: in9 <<= 32
882# asm 1: shl $32,<in9=int64#4
883# asm 2: shl $32,<in9=%rcx
884shl $32,%rcx
885
886# qhasm: in8 += in9
887# asm 1: add <in9=int64#4,<in8=int64#3
888# asm 2: add <in9=%rcx,<in8=%rdx
889add %rcx,%rdx
890
891# qhasm: in9 = in8
892# asm 1: mov <in8=int64#3,>in9=int64#4
893# asm 2: mov <in8=%rdx,>in9=%rcx
894mov %rdx,%rcx
895
896# qhasm: (uint64) in9 >>= 32
897# asm 1: shr $32,<in9=int64#4
898# asm 2: shr $32,<in9=%rcx
899shr $32,%rcx
900
901# qhasm: ((uint32 *) &orig8)[1] = in8
902# asm 1: movl <in8=int64#3d,4+<orig8=stack128#19
903# asm 2: movl <in8=%edx,4+<orig8=288(%rsp)
904movl %edx,4+288(%rsp)
905
906# qhasm: ((uint32 *) &orig9)[1] = in9
907# asm 1: movl <in9=int64#4d,4+<orig9=stack128#20
908# asm 2: movl <in9=%ecx,4+<orig9=304(%rsp)
909movl %ecx,4+304(%rsp)
910
911# qhasm: in8 += 1
912# asm 1: add $1,<in8=int64#3
913# asm 2: add $1,<in8=%rdx
914add $1,%rdx
915
916# qhasm: in9 <<= 32
917# asm 1: shl $32,<in9=int64#4
918# asm 2: shl $32,<in9=%rcx
919shl $32,%rcx
920
921# qhasm: in8 += in9
922# asm 1: add <in9=int64#4,<in8=int64#3
923# asm 2: add <in9=%rcx,<in8=%rdx
924add %rcx,%rdx
925
926# qhasm: in9 = in8
927# asm 1: mov <in8=int64#3,>in9=int64#4
928# asm 2: mov <in8=%rdx,>in9=%rcx
929mov %rdx,%rcx
930
931# qhasm: (uint64) in9 >>= 32
932# asm 1: shr $32,<in9=int64#4
933# asm 2: shr $32,<in9=%rcx
934shr $32,%rcx
935
936# qhasm: ((uint32 *) &orig8)[2] = in8
937# asm 1: movl <in8=int64#3d,8+<orig8=stack128#19
938# asm 2: movl <in8=%edx,8+<orig8=288(%rsp)
939movl %edx,8+288(%rsp)
940
941# qhasm: ((uint32 *) &orig9)[2] = in9
942# asm 1: movl <in9=int64#4d,8+<orig9=stack128#20
943# asm 2: movl <in9=%ecx,8+<orig9=304(%rsp)
944movl %ecx,8+304(%rsp)
945
946# qhasm: in8 += 1
947# asm 1: add $1,<in8=int64#3
948# asm 2: add $1,<in8=%rdx
949add $1,%rdx
950
951# qhasm: in9 <<= 32
952# asm 1: shl $32,<in9=int64#4
953# asm 2: shl $32,<in9=%rcx
954shl $32,%rcx
955
956# qhasm: in8 += in9
957# asm 1: add <in9=int64#4,<in8=int64#3
958# asm 2: add <in9=%rcx,<in8=%rdx
959add %rcx,%rdx
960
961# qhasm: in9 = in8
962# asm 1: mov <in8=int64#3,>in9=int64#4
963# asm 2: mov <in8=%rdx,>in9=%rcx
964mov %rdx,%rcx
965
966# qhasm: (uint64) in9 >>= 32
967# asm 1: shr $32,<in9=int64#4
968# asm 2: shr $32,<in9=%rcx
969shr $32,%rcx
970
971# qhasm: ((uint32 *) &orig8)[3] = in8
972# asm 1: movl <in8=int64#3d,12+<orig8=stack128#19
973# asm 2: movl <in8=%edx,12+<orig8=288(%rsp)
974movl %edx,12+288(%rsp)
975
976# qhasm: ((uint32 *) &orig9)[3] = in9
977# asm 1: movl <in9=int64#4d,12+<orig9=stack128#20
978# asm 2: movl <in9=%ecx,12+<orig9=304(%rsp)
979movl %ecx,12+304(%rsp)
980
981# qhasm: in8 += 1
982# asm 1: add $1,<in8=int64#3
983# asm 2: add $1,<in8=%rdx
984add $1,%rdx
985
986# qhasm: in9 <<= 32
987# asm 1: shl $32,<in9=int64#4
988# asm 2: shl $32,<in9=%rcx
989shl $32,%rcx
990
991# qhasm: in8 += in9
992# asm 1: add <in9=int64#4,<in8=int64#3
993# asm 2: add <in9=%rcx,<in8=%rdx
994add %rcx,%rdx
995
996# qhasm: in9 = in8
997# asm 1: mov <in8=int64#3,>in9=int64#4
998# asm 2: mov <in8=%rdx,>in9=%rcx
999mov %rdx,%rcx
1000
1001# qhasm: (uint64) in9 >>= 32
1002# asm 1: shr $32,<in9=int64#4
1003# asm 2: shr $32,<in9=%rcx
1004shr $32,%rcx
1005
1006# qhasm: ((uint32 *)&x2)[0] = in8
1007# asm 1: movl <in8=int64#3d,>x2=stack128#2
1008# asm 2: movl <in8=%edx,>x2=16(%rsp)
1009movl %edx,16(%rsp)
1010
1011# qhasm: ((uint32 *)&x3)[1] = in9
1012# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
1013# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
1014movl %ecx,4+32(%rsp)
1015
1016# qhasm: bytes_backup = bytes
1017# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
1018# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
1019movq %r9,408(%rsp)
1020
1021# qhasm: i = 8
1022# asm 1: mov $8,>i=int64#3
1023# asm 2: mov $8,>i=%rdx
1024mov $8,%rdx
1025
1026# qhasm: z5 = orig5
1027# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
1028# asm 2: movdqa <orig5=64(%rsp),>z5=%xmm0
1029movdqa 64(%rsp),%xmm0
1030
1031# qhasm: z10 = orig10
1032# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
1033# asm 2: movdqa <orig10=80(%rsp),>z10=%xmm1
1034movdqa 80(%rsp),%xmm1
1035
1036# qhasm: z15 = orig15
1037# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
1038# asm 2: movdqa <orig15=96(%rsp),>z15=%xmm2
1039movdqa 96(%rsp),%xmm2
1040
1041# qhasm: z14 = orig14
1042# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
1043# asm 2: movdqa <orig14=256(%rsp),>z14=%xmm3
1044movdqa 256(%rsp),%xmm3
1045
1046# qhasm: z3 = orig3
1047# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
1048# asm 2: movdqa <orig3=272(%rsp),>z3=%xmm4
1049movdqa 272(%rsp),%xmm4
1050
1051# qhasm: z6 = orig6
1052# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
1053# asm 2: movdqa <orig6=128(%rsp),>z6=%xmm5
1054movdqa 128(%rsp),%xmm5
1055
1056# qhasm: z11 = orig11
1057# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
1058# asm 2: movdqa <orig11=144(%rsp),>z11=%xmm6
1059movdqa 144(%rsp),%xmm6
1060
1061# qhasm: z1 = orig1
1062# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
1063# asm 2: movdqa <orig1=176(%rsp),>z1=%xmm7
1064movdqa 176(%rsp),%xmm7
1065
1066# qhasm: z7 = orig7
1067# asm 1: movdqa <orig7=stack128#13,>z7=int6464#9
1068# asm 2: movdqa <orig7=192(%rsp),>z7=%xmm8
1069movdqa 192(%rsp),%xmm8
1070
1071# qhasm: z13 = orig13
1072# asm 1: movdqa <orig13=stack128#14,>z13=int6464#10
1073# asm 2: movdqa <orig13=208(%rsp),>z13=%xmm9
1074movdqa 208(%rsp),%xmm9
1075
1076# qhasm: z2 = orig2
1077# asm 1: movdqa <orig2=stack128#15,>z2=int6464#11
1078# asm 2: movdqa <orig2=224(%rsp),>z2=%xmm10
1079movdqa 224(%rsp),%xmm10
1080
1081# qhasm: z9 = orig9
1082# asm 1: movdqa <orig9=stack128#20,>z9=int6464#12
1083# asm 2: movdqa <orig9=304(%rsp),>z9=%xmm11
1084movdqa 304(%rsp),%xmm11
1085
1086# qhasm: z0 = orig0
1087# asm 1: movdqa <orig0=stack128#8,>z0=int6464#13
1088# asm 2: movdqa <orig0=112(%rsp),>z0=%xmm12
1089movdqa 112(%rsp),%xmm12
1090
1091# qhasm: z12 = orig12
1092# asm 1: movdqa <orig12=stack128#11,>z12=int6464#14
1093# asm 2: movdqa <orig12=160(%rsp),>z12=%xmm13
1094movdqa 160(%rsp),%xmm13
1095
1096# qhasm: z4 = orig4
1097# asm 1: movdqa <orig4=stack128#16,>z4=int6464#15
1098# asm 2: movdqa <orig4=240(%rsp),>z4=%xmm14
1099movdqa 240(%rsp),%xmm14
1100
1101# qhasm: z8 = orig8
1102# asm 1: movdqa <orig8=stack128#19,>z8=int6464#16
1103# asm 2: movdqa <orig8=288(%rsp),>z8=%xmm15
1104movdqa 288(%rsp),%xmm15
1105
1106# qhasm: mainloop1:
1107._mainloop1:
1108
1109# qhasm: z10_stack = z10
1110# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1111# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1112movdqa %xmm1,320(%rsp)
1113
1114# qhasm: z15_stack = z15
1115# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1116# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1117movdqa %xmm2,336(%rsp)
1118
1119# qhasm: y4 = z12
1120# asm 1: movdqa <z12=int6464#14,>y4=int6464#2
1121# asm 2: movdqa <z12=%xmm13,>y4=%xmm1
1122movdqa %xmm13,%xmm1
1123
1124# qhasm: uint32323232 y4 += z0
1125# asm 1: paddd <z0=int6464#13,<y4=int6464#2
1126# asm 2: paddd <z0=%xmm12,<y4=%xmm1
1127paddd %xmm12,%xmm1
1128
1129# qhasm: r4 = y4
1130# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1131# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1132movdqa %xmm1,%xmm2
1133
1134# qhasm: uint32323232 y4 <<= 7
1135# asm 1: pslld $7,<y4=int6464#2
1136# asm 2: pslld $7,<y4=%xmm1
1137pslld $7,%xmm1
1138
1139# qhasm: z4 ^= y4
1140# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1141# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1142pxor %xmm1,%xmm14
1143
1144# qhasm: uint32323232 r4 >>= 25
1145# asm 1: psrld $25,<r4=int6464#3
1146# asm 2: psrld $25,<r4=%xmm2
1147psrld $25,%xmm2
1148
1149# qhasm: z4 ^= r4
1150# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1151# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1152pxor %xmm2,%xmm14
1153
1154# qhasm: y9 = z1
1155# asm 1: movdqa <z1=int6464#8,>y9=int6464#2
1156# asm 2: movdqa <z1=%xmm7,>y9=%xmm1
1157movdqa %xmm7,%xmm1
1158
1159# qhasm: uint32323232 y9 += z5
1160# asm 1: paddd <z5=int6464#1,<y9=int6464#2
1161# asm 2: paddd <z5=%xmm0,<y9=%xmm1
1162paddd %xmm0,%xmm1
1163
1164# qhasm: r9 = y9
1165# asm 1: movdqa <y9=int6464#2,>r9=int6464#3
1166# asm 2: movdqa <y9=%xmm1,>r9=%xmm2
1167movdqa %xmm1,%xmm2
1168
1169# qhasm: uint32323232 y9 <<= 7
1170# asm 1: pslld $7,<y9=int6464#2
1171# asm 2: pslld $7,<y9=%xmm1
1172pslld $7,%xmm1
1173
1174# qhasm: z9 ^= y9
1175# asm 1: pxor <y9=int6464#2,<z9=int6464#12
1176# asm 2: pxor <y9=%xmm1,<z9=%xmm11
1177pxor %xmm1,%xmm11
1178
1179# qhasm: uint32323232 r9 >>= 25
1180# asm 1: psrld $25,<r9=int6464#3
1181# asm 2: psrld $25,<r9=%xmm2
1182psrld $25,%xmm2
1183
1184# qhasm: z9 ^= r9
1185# asm 1: pxor <r9=int6464#3,<z9=int6464#12
1186# asm 2: pxor <r9=%xmm2,<z9=%xmm11
1187pxor %xmm2,%xmm11
1188
1189# qhasm: y8 = z0
1190# asm 1: movdqa <z0=int6464#13,>y8=int6464#2
1191# asm 2: movdqa <z0=%xmm12,>y8=%xmm1
1192movdqa %xmm12,%xmm1
1193
1194# qhasm: uint32323232 y8 += z4
1195# asm 1: paddd <z4=int6464#15,<y8=int6464#2
1196# asm 2: paddd <z4=%xmm14,<y8=%xmm1
1197paddd %xmm14,%xmm1
1198
1199# qhasm: r8 = y8
1200# asm 1: movdqa <y8=int6464#2,>r8=int6464#3
1201# asm 2: movdqa <y8=%xmm1,>r8=%xmm2
1202movdqa %xmm1,%xmm2
1203
1204# qhasm: uint32323232 y8 <<= 9
1205# asm 1: pslld $9,<y8=int6464#2
1206# asm 2: pslld $9,<y8=%xmm1
1207pslld $9,%xmm1
1208
1209# qhasm: z8 ^= y8
1210# asm 1: pxor <y8=int6464#2,<z8=int6464#16
1211# asm 2: pxor <y8=%xmm1,<z8=%xmm15
1212pxor %xmm1,%xmm15
1213
1214# qhasm: uint32323232 r8 >>= 23
1215# asm 1: psrld $23,<r8=int6464#3
1216# asm 2: psrld $23,<r8=%xmm2
1217psrld $23,%xmm2
1218
1219# qhasm: z8 ^= r8
1220# asm 1: pxor <r8=int6464#3,<z8=int6464#16
1221# asm 2: pxor <r8=%xmm2,<z8=%xmm15
1222pxor %xmm2,%xmm15
1223
1224# qhasm: y13 = z5
1225# asm 1: movdqa <z5=int6464#1,>y13=int6464#2
1226# asm 2: movdqa <z5=%xmm0,>y13=%xmm1
1227movdqa %xmm0,%xmm1
1228
1229# qhasm: uint32323232 y13 += z9
1230# asm 1: paddd <z9=int6464#12,<y13=int6464#2
1231# asm 2: paddd <z9=%xmm11,<y13=%xmm1
1232paddd %xmm11,%xmm1
1233
1234# qhasm: r13 = y13
1235# asm 1: movdqa <y13=int6464#2,>r13=int6464#3
1236# asm 2: movdqa <y13=%xmm1,>r13=%xmm2
1237movdqa %xmm1,%xmm2
1238
1239# qhasm: uint32323232 y13 <<= 9
1240# asm 1: pslld $9,<y13=int6464#2
1241# asm 2: pslld $9,<y13=%xmm1
1242pslld $9,%xmm1
1243
1244# qhasm: z13 ^= y13
1245# asm 1: pxor <y13=int6464#2,<z13=int6464#10
1246# asm 2: pxor <y13=%xmm1,<z13=%xmm9
1247pxor %xmm1,%xmm9
1248
1249# qhasm: uint32323232 r13 >>= 23
1250# asm 1: psrld $23,<r13=int6464#3
1251# asm 2: psrld $23,<r13=%xmm2
1252psrld $23,%xmm2
1253
1254# qhasm: z13 ^= r13
1255# asm 1: pxor <r13=int6464#3,<z13=int6464#10
1256# asm 2: pxor <r13=%xmm2,<z13=%xmm9
1257pxor %xmm2,%xmm9
1258
1259# qhasm: y12 = z4
1260# asm 1: movdqa <z4=int6464#15,>y12=int6464#2
1261# asm 2: movdqa <z4=%xmm14,>y12=%xmm1
1262movdqa %xmm14,%xmm1
1263
1264# qhasm: uint32323232 y12 += z8
1265# asm 1: paddd <z8=int6464#16,<y12=int6464#2
1266# asm 2: paddd <z8=%xmm15,<y12=%xmm1
1267paddd %xmm15,%xmm1
1268
1269# qhasm: r12 = y12
1270# asm 1: movdqa <y12=int6464#2,>r12=int6464#3
1271# asm 2: movdqa <y12=%xmm1,>r12=%xmm2
1272movdqa %xmm1,%xmm2
1273
1274# qhasm: uint32323232 y12 <<= 13
1275# asm 1: pslld $13,<y12=int6464#2
1276# asm 2: pslld $13,<y12=%xmm1
1277pslld $13,%xmm1
1278
1279# qhasm: z12 ^= y12
1280# asm 1: pxor <y12=int6464#2,<z12=int6464#14
1281# asm 2: pxor <y12=%xmm1,<z12=%xmm13
1282pxor %xmm1,%xmm13
1283
1284# qhasm: uint32323232 r12 >>= 19
1285# asm 1: psrld $19,<r12=int6464#3
1286# asm 2: psrld $19,<r12=%xmm2
1287psrld $19,%xmm2
1288
1289# qhasm: z12 ^= r12
1290# asm 1: pxor <r12=int6464#3,<z12=int6464#14
1291# asm 2: pxor <r12=%xmm2,<z12=%xmm13
1292pxor %xmm2,%xmm13
1293
1294# qhasm: y1 = z9
1295# asm 1: movdqa <z9=int6464#12,>y1=int6464#2
1296# asm 2: movdqa <z9=%xmm11,>y1=%xmm1
1297movdqa %xmm11,%xmm1
1298
1299# qhasm: uint32323232 y1 += z13
1300# asm 1: paddd <z13=int6464#10,<y1=int6464#2
1301# asm 2: paddd <z13=%xmm9,<y1=%xmm1
1302paddd %xmm9,%xmm1
1303
1304# qhasm: r1 = y1
1305# asm 1: movdqa <y1=int6464#2,>r1=int6464#3
1306# asm 2: movdqa <y1=%xmm1,>r1=%xmm2
1307movdqa %xmm1,%xmm2
1308
1309# qhasm: uint32323232 y1 <<= 13
1310# asm 1: pslld $13,<y1=int6464#2
1311# asm 2: pslld $13,<y1=%xmm1
1312pslld $13,%xmm1
1313
1314# qhasm: z1 ^= y1
1315# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1316# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1317pxor %xmm1,%xmm7
1318
1319# qhasm: uint32323232 r1 >>= 19
1320# asm 1: psrld $19,<r1=int6464#3
1321# asm 2: psrld $19,<r1=%xmm2
1322psrld $19,%xmm2
1323
1324# qhasm: z1 ^= r1
1325# asm 1: pxor <r1=int6464#3,<z1=int6464#8
1326# asm 2: pxor <r1=%xmm2,<z1=%xmm7
1327pxor %xmm2,%xmm7
1328
1329# qhasm: y0 = z8
1330# asm 1: movdqa <z8=int6464#16,>y0=int6464#2
1331# asm 2: movdqa <z8=%xmm15,>y0=%xmm1
1332movdqa %xmm15,%xmm1
1333
1334# qhasm: uint32323232 y0 += z12
1335# asm 1: paddd <z12=int6464#14,<y0=int6464#2
1336# asm 2: paddd <z12=%xmm13,<y0=%xmm1
1337paddd %xmm13,%xmm1
1338
1339# qhasm: r0 = y0
1340# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1341# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1342movdqa %xmm1,%xmm2
1343
1344# qhasm: uint32323232 y0 <<= 18
1345# asm 1: pslld $18,<y0=int6464#2
1346# asm 2: pslld $18,<y0=%xmm1
1347pslld $18,%xmm1
1348
1349# qhasm: z0 ^= y0
1350# asm 1: pxor <y0=int6464#2,<z0=int6464#13
1351# asm 2: pxor <y0=%xmm1,<z0=%xmm12
1352pxor %xmm1,%xmm12
1353
1354# qhasm: uint32323232 r0 >>= 14
1355# asm 1: psrld $14,<r0=int6464#3
1356# asm 2: psrld $14,<r0=%xmm2
1357psrld $14,%xmm2
1358
1359# qhasm: z0 ^= r0
1360# asm 1: pxor <r0=int6464#3,<z0=int6464#13
1361# asm 2: pxor <r0=%xmm2,<z0=%xmm12
1362pxor %xmm2,%xmm12
1363
1364# qhasm: z10 = z10_stack
1365# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1366# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1367movdqa 320(%rsp),%xmm1
1368
1369# qhasm: z0_stack = z0
1370# asm 1: movdqa <z0=int6464#13,>z0_stack=stack128#21
1371# asm 2: movdqa <z0=%xmm12,>z0_stack=320(%rsp)
1372movdqa %xmm12,320(%rsp)
1373
1374# qhasm: y5 = z13
1375# asm 1: movdqa <z13=int6464#10,>y5=int6464#3
1376# asm 2: movdqa <z13=%xmm9,>y5=%xmm2
1377movdqa %xmm9,%xmm2
1378
1379# qhasm: uint32323232 y5 += z1
1380# asm 1: paddd <z1=int6464#8,<y5=int6464#3
1381# asm 2: paddd <z1=%xmm7,<y5=%xmm2
1382paddd %xmm7,%xmm2
1383
1384# qhasm: r5 = y5
1385# asm 1: movdqa <y5=int6464#3,>r5=int6464#13
1386# asm 2: movdqa <y5=%xmm2,>r5=%xmm12
1387movdqa %xmm2,%xmm12
1388
1389# qhasm: uint32323232 y5 <<= 18
1390# asm 1: pslld $18,<y5=int6464#3
1391# asm 2: pslld $18,<y5=%xmm2
1392pslld $18,%xmm2
1393
1394# qhasm: z5 ^= y5
1395# asm 1: pxor <y5=int6464#3,<z5=int6464#1
1396# asm 2: pxor <y5=%xmm2,<z5=%xmm0
1397pxor %xmm2,%xmm0
1398
1399# qhasm: uint32323232 r5 >>= 14
1400# asm 1: psrld $14,<r5=int6464#13
1401# asm 2: psrld $14,<r5=%xmm12
1402psrld $14,%xmm12
1403
1404# qhasm: z5 ^= r5
1405# asm 1: pxor <r5=int6464#13,<z5=int6464#1
1406# asm 2: pxor <r5=%xmm12,<z5=%xmm0
1407pxor %xmm12,%xmm0
1408
1409# qhasm: y14 = z6
1410# asm 1: movdqa <z6=int6464#6,>y14=int6464#3
1411# asm 2: movdqa <z6=%xmm5,>y14=%xmm2
1412movdqa %xmm5,%xmm2
1413
1414# qhasm: uint32323232 y14 += z10
1415# asm 1: paddd <z10=int6464#2,<y14=int6464#3
1416# asm 2: paddd <z10=%xmm1,<y14=%xmm2
1417paddd %xmm1,%xmm2
1418
1419# qhasm: r14 = y14
1420# asm 1: movdqa <y14=int6464#3,>r14=int6464#13
1421# asm 2: movdqa <y14=%xmm2,>r14=%xmm12
1422movdqa %xmm2,%xmm12
1423
1424# qhasm: uint32323232 y14 <<= 7
1425# asm 1: pslld $7,<y14=int6464#3
1426# asm 2: pslld $7,<y14=%xmm2
1427pslld $7,%xmm2
1428
1429# qhasm: z14 ^= y14
1430# asm 1: pxor <y14=int6464#3,<z14=int6464#4
1431# asm 2: pxor <y14=%xmm2,<z14=%xmm3
1432pxor %xmm2,%xmm3
1433
1434# qhasm: uint32323232 r14 >>= 25
1435# asm 1: psrld $25,<r14=int6464#13
1436# asm 2: psrld $25,<r14=%xmm12
1437psrld $25,%xmm12
1438
1439# qhasm: z14 ^= r14
1440# asm 1: pxor <r14=int6464#13,<z14=int6464#4
1441# asm 2: pxor <r14=%xmm12,<z14=%xmm3
1442pxor %xmm12,%xmm3
1443
1444# qhasm: z15 = z15_stack
1445# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
1446# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
1447movdqa 336(%rsp),%xmm2
1448
1449# qhasm: z5_stack = z5
1450# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#22
1451# asm 2: movdqa <z5=%xmm0,>z5_stack=336(%rsp)
1452movdqa %xmm0,336(%rsp)
1453
1454# qhasm: y3 = z11
1455# asm 1: movdqa <z11=int6464#7,>y3=int6464#1
1456# asm 2: movdqa <z11=%xmm6,>y3=%xmm0
1457movdqa %xmm6,%xmm0
1458
1459# qhasm: uint32323232 y3 += z15
1460# asm 1: paddd <z15=int6464#3,<y3=int6464#1
1461# asm 2: paddd <z15=%xmm2,<y3=%xmm0
1462paddd %xmm2,%xmm0
1463
1464# qhasm: r3 = y3
1465# asm 1: movdqa <y3=int6464#1,>r3=int6464#13
1466# asm 2: movdqa <y3=%xmm0,>r3=%xmm12
1467movdqa %xmm0,%xmm12
1468
1469# qhasm: uint32323232 y3 <<= 7
1470# asm 1: pslld $7,<y3=int6464#1
1471# asm 2: pslld $7,<y3=%xmm0
1472pslld $7,%xmm0
1473
1474# qhasm: z3 ^= y3
1475# asm 1: pxor <y3=int6464#1,<z3=int6464#5
1476# asm 2: pxor <y3=%xmm0,<z3=%xmm4
1477pxor %xmm0,%xmm4
1478
1479# qhasm: uint32323232 r3 >>= 25
1480# asm 1: psrld $25,<r3=int6464#13
1481# asm 2: psrld $25,<r3=%xmm12
1482psrld $25,%xmm12
1483
1484# qhasm: z3 ^= r3
1485# asm 1: pxor <r3=int6464#13,<z3=int6464#5
1486# asm 2: pxor <r3=%xmm12,<z3=%xmm4
1487pxor %xmm12,%xmm4
1488
1489# qhasm: y2 = z10
1490# asm 1: movdqa <z10=int6464#2,>y2=int6464#1
1491# asm 2: movdqa <z10=%xmm1,>y2=%xmm0
1492movdqa %xmm1,%xmm0
1493
1494# qhasm: uint32323232 y2 += z14
1495# asm 1: paddd <z14=int6464#4,<y2=int6464#1
1496# asm 2: paddd <z14=%xmm3,<y2=%xmm0
1497paddd %xmm3,%xmm0
1498
1499# qhasm: r2 = y2
1500# asm 1: movdqa <y2=int6464#1,>r2=int6464#13
1501# asm 2: movdqa <y2=%xmm0,>r2=%xmm12
1502movdqa %xmm0,%xmm12
1503
1504# qhasm: uint32323232 y2 <<= 9
1505# asm 1: pslld $9,<y2=int6464#1
1506# asm 2: pslld $9,<y2=%xmm0
1507pslld $9,%xmm0
1508
1509# qhasm: z2 ^= y2
1510# asm 1: pxor <y2=int6464#1,<z2=int6464#11
1511# asm 2: pxor <y2=%xmm0,<z2=%xmm10
1512pxor %xmm0,%xmm10
1513
1514# qhasm: uint32323232 r2 >>= 23
1515# asm 1: psrld $23,<r2=int6464#13
1516# asm 2: psrld $23,<r2=%xmm12
1517psrld $23,%xmm12
1518
1519# qhasm: z2 ^= r2
1520# asm 1: pxor <r2=int6464#13,<z2=int6464#11
1521# asm 2: pxor <r2=%xmm12,<z2=%xmm10
1522pxor %xmm12,%xmm10
1523
1524# qhasm: y7 = z15
1525# asm 1: movdqa <z15=int6464#3,>y7=int6464#1
1526# asm 2: movdqa <z15=%xmm2,>y7=%xmm0
1527movdqa %xmm2,%xmm0
1528
1529# qhasm: uint32323232 y7 += z3
1530# asm 1: paddd <z3=int6464#5,<y7=int6464#1
1531# asm 2: paddd <z3=%xmm4,<y7=%xmm0
1532paddd %xmm4,%xmm0
1533
1534# qhasm: r7 = y7
1535# asm 1: movdqa <y7=int6464#1,>r7=int6464#13
1536# asm 2: movdqa <y7=%xmm0,>r7=%xmm12
1537movdqa %xmm0,%xmm12
1538
1539# qhasm: uint32323232 y7 <<= 9
1540# asm 1: pslld $9,<y7=int6464#1
1541# asm 2: pslld $9,<y7=%xmm0
1542pslld $9,%xmm0
1543
1544# qhasm: z7 ^= y7
1545# asm 1: pxor <y7=int6464#1,<z7=int6464#9
1546# asm 2: pxor <y7=%xmm0,<z7=%xmm8
1547pxor %xmm0,%xmm8
1548
1549# qhasm: uint32323232 r7 >>= 23
1550# asm 1: psrld $23,<r7=int6464#13
1551# asm 2: psrld $23,<r7=%xmm12
1552psrld $23,%xmm12
1553
1554# qhasm: z7 ^= r7
1555# asm 1: pxor <r7=int6464#13,<z7=int6464#9
1556# asm 2: pxor <r7=%xmm12,<z7=%xmm8
1557pxor %xmm12,%xmm8
1558
1559# qhasm: y6 = z14
1560# asm 1: movdqa <z14=int6464#4,>y6=int6464#1
1561# asm 2: movdqa <z14=%xmm3,>y6=%xmm0
1562movdqa %xmm3,%xmm0
1563
1564# qhasm: uint32323232 y6 += z2
1565# asm 1: paddd <z2=int6464#11,<y6=int6464#1
1566# asm 2: paddd <z2=%xmm10,<y6=%xmm0
1567paddd %xmm10,%xmm0
1568
1569# qhasm: r6 = y6
1570# asm 1: movdqa <y6=int6464#1,>r6=int6464#13
1571# asm 2: movdqa <y6=%xmm0,>r6=%xmm12
1572movdqa %xmm0,%xmm12
1573
1574# qhasm: uint32323232 y6 <<= 13
1575# asm 1: pslld $13,<y6=int6464#1
1576# asm 2: pslld $13,<y6=%xmm0
1577pslld $13,%xmm0
1578
1579# qhasm: z6 ^= y6
1580# asm 1: pxor <y6=int6464#1,<z6=int6464#6
1581# asm 2: pxor <y6=%xmm0,<z6=%xmm5
1582pxor %xmm0,%xmm5
1583
1584# qhasm: uint32323232 r6 >>= 19
1585# asm 1: psrld $19,<r6=int6464#13
1586# asm 2: psrld $19,<r6=%xmm12
1587psrld $19,%xmm12
1588
1589# qhasm: z6 ^= r6
1590# asm 1: pxor <r6=int6464#13,<z6=int6464#6
1591# asm 2: pxor <r6=%xmm12,<z6=%xmm5
1592pxor %xmm12,%xmm5
1593
1594# qhasm: y11 = z3
1595# asm 1: movdqa <z3=int6464#5,>y11=int6464#1
1596# asm 2: movdqa <z3=%xmm4,>y11=%xmm0
1597movdqa %xmm4,%xmm0
1598
1599# qhasm: uint32323232 y11 += z7
1600# asm 1: paddd <z7=int6464#9,<y11=int6464#1
1601# asm 2: paddd <z7=%xmm8,<y11=%xmm0
1602paddd %xmm8,%xmm0
1603
1604# qhasm: r11 = y11
1605# asm 1: movdqa <y11=int6464#1,>r11=int6464#13
1606# asm 2: movdqa <y11=%xmm0,>r11=%xmm12
1607movdqa %xmm0,%xmm12
1608
1609# qhasm: uint32323232 y11 <<= 13
1610# asm 1: pslld $13,<y11=int6464#1
1611# asm 2: pslld $13,<y11=%xmm0
1612pslld $13,%xmm0
1613
1614# qhasm: z11 ^= y11
1615# asm 1: pxor <y11=int6464#1,<z11=int6464#7
1616# asm 2: pxor <y11=%xmm0,<z11=%xmm6
1617pxor %xmm0,%xmm6
1618
1619# qhasm: uint32323232 r11 >>= 19
1620# asm 1: psrld $19,<r11=int6464#13
1621# asm 2: psrld $19,<r11=%xmm12
1622psrld $19,%xmm12
1623
1624# qhasm: z11 ^= r11
1625# asm 1: pxor <r11=int6464#13,<z11=int6464#7
1626# asm 2: pxor <r11=%xmm12,<z11=%xmm6
1627pxor %xmm12,%xmm6
1628
1629# qhasm: y10 = z2
1630# asm 1: movdqa <z2=int6464#11,>y10=int6464#1
1631# asm 2: movdqa <z2=%xmm10,>y10=%xmm0
1632movdqa %xmm10,%xmm0
1633
1634# qhasm: uint32323232 y10 += z6
1635# asm 1: paddd <z6=int6464#6,<y10=int6464#1
1636# asm 2: paddd <z6=%xmm5,<y10=%xmm0
1637paddd %xmm5,%xmm0
1638
1639# qhasm: r10 = y10
1640# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
1641# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
1642movdqa %xmm0,%xmm12
1643
1644# qhasm: uint32323232 y10 <<= 18
1645# asm 1: pslld $18,<y10=int6464#1
1646# asm 2: pslld $18,<y10=%xmm0
1647pslld $18,%xmm0
1648
1649# qhasm: z10 ^= y10
1650# asm 1: pxor <y10=int6464#1,<z10=int6464#2
1651# asm 2: pxor <y10=%xmm0,<z10=%xmm1
1652pxor %xmm0,%xmm1
1653
1654# qhasm: uint32323232 r10 >>= 14
1655# asm 1: psrld $14,<r10=int6464#13
1656# asm 2: psrld $14,<r10=%xmm12
1657psrld $14,%xmm12
1658
1659# qhasm: z10 ^= r10
1660# asm 1: pxor <r10=int6464#13,<z10=int6464#2
1661# asm 2: pxor <r10=%xmm12,<z10=%xmm1
1662pxor %xmm12,%xmm1
1663
1664# qhasm: z0 = z0_stack
1665# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#1
1666# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm0
1667movdqa 320(%rsp),%xmm0
1668
1669# qhasm: z10_stack = z10
1670# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1671# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1672movdqa %xmm1,320(%rsp)
1673
1674# qhasm: y1 = z3
1675# asm 1: movdqa <z3=int6464#5,>y1=int6464#2
1676# asm 2: movdqa <z3=%xmm4,>y1=%xmm1
1677movdqa %xmm4,%xmm1
1678
1679# qhasm: uint32323232 y1 += z0
1680# asm 1: paddd <z0=int6464#1,<y1=int6464#2
1681# asm 2: paddd <z0=%xmm0,<y1=%xmm1
1682paddd %xmm0,%xmm1
1683
1684# qhasm: r1 = y1
1685# asm 1: movdqa <y1=int6464#2,>r1=int6464#13
1686# asm 2: movdqa <y1=%xmm1,>r1=%xmm12
1687movdqa %xmm1,%xmm12
1688
1689# qhasm: uint32323232 y1 <<= 7
1690# asm 1: pslld $7,<y1=int6464#2
1691# asm 2: pslld $7,<y1=%xmm1
1692pslld $7,%xmm1
1693
1694# qhasm: z1 ^= y1
1695# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1696# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1697pxor %xmm1,%xmm7
1698
1699# qhasm: uint32323232 r1 >>= 25
1700# asm 1: psrld $25,<r1=int6464#13
1701# asm 2: psrld $25,<r1=%xmm12
1702psrld $25,%xmm12
1703
1704# qhasm: z1 ^= r1
1705# asm 1: pxor <r1=int6464#13,<z1=int6464#8
1706# asm 2: pxor <r1=%xmm12,<z1=%xmm7
1707pxor %xmm12,%xmm7
1708
1709# qhasm: y15 = z7
1710# asm 1: movdqa <z7=int6464#9,>y15=int6464#2
1711# asm 2: movdqa <z7=%xmm8,>y15=%xmm1
1712movdqa %xmm8,%xmm1
1713
1714# qhasm: uint32323232 y15 += z11
1715# asm 1: paddd <z11=int6464#7,<y15=int6464#2
1716# asm 2: paddd <z11=%xmm6,<y15=%xmm1
1717paddd %xmm6,%xmm1
1718
1719# qhasm: r15 = y15
1720# asm 1: movdqa <y15=int6464#2,>r15=int6464#13
1721# asm 2: movdqa <y15=%xmm1,>r15=%xmm12
1722movdqa %xmm1,%xmm12
1723
1724# qhasm: uint32323232 y15 <<= 18
1725# asm 1: pslld $18,<y15=int6464#2
1726# asm 2: pslld $18,<y15=%xmm1
1727pslld $18,%xmm1
1728
1729# qhasm: z15 ^= y15
1730# asm 1: pxor <y15=int6464#2,<z15=int6464#3
1731# asm 2: pxor <y15=%xmm1,<z15=%xmm2
1732pxor %xmm1,%xmm2
1733
1734# qhasm: uint32323232 r15 >>= 14
1735# asm 1: psrld $14,<r15=int6464#13
1736# asm 2: psrld $14,<r15=%xmm12
1737psrld $14,%xmm12
1738
1739# qhasm: z15 ^= r15
1740# asm 1: pxor <r15=int6464#13,<z15=int6464#3
1741# asm 2: pxor <r15=%xmm12,<z15=%xmm2
1742pxor %xmm12,%xmm2
1743
1744# qhasm: z5 = z5_stack
1745# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#13
1746# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm12
1747movdqa 336(%rsp),%xmm12
1748
1749# qhasm: z15_stack = z15
1750# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1751# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1752movdqa %xmm2,336(%rsp)
1753
1754# qhasm: y6 = z4
1755# asm 1: movdqa <z4=int6464#15,>y6=int6464#2
1756# asm 2: movdqa <z4=%xmm14,>y6=%xmm1
1757movdqa %xmm14,%xmm1
1758
1759# qhasm: uint32323232 y6 += z5
1760# asm 1: paddd <z5=int6464#13,<y6=int6464#2
1761# asm 2: paddd <z5=%xmm12,<y6=%xmm1
1762paddd %xmm12,%xmm1
1763
1764# qhasm: r6 = y6
1765# asm 1: movdqa <y6=int6464#2,>r6=int6464#3
1766# asm 2: movdqa <y6=%xmm1,>r6=%xmm2
1767movdqa %xmm1,%xmm2
1768
1769# qhasm: uint32323232 y6 <<= 7
1770# asm 1: pslld $7,<y6=int6464#2
1771# asm 2: pslld $7,<y6=%xmm1
1772pslld $7,%xmm1
1773
1774# qhasm: z6 ^= y6
1775# asm 1: pxor <y6=int6464#2,<z6=int6464#6
1776# asm 2: pxor <y6=%xmm1,<z6=%xmm5
1777pxor %xmm1,%xmm5
1778
1779# qhasm: uint32323232 r6 >>= 25
1780# asm 1: psrld $25,<r6=int6464#3
1781# asm 2: psrld $25,<r6=%xmm2
1782psrld $25,%xmm2
1783
1784# qhasm: z6 ^= r6
1785# asm 1: pxor <r6=int6464#3,<z6=int6464#6
1786# asm 2: pxor <r6=%xmm2,<z6=%xmm5
1787pxor %xmm2,%xmm5
1788
1789# qhasm: y2 = z0
1790# asm 1: movdqa <z0=int6464#1,>y2=int6464#2
1791# asm 2: movdqa <z0=%xmm0,>y2=%xmm1
1792movdqa %xmm0,%xmm1
1793
1794# qhasm: uint32323232 y2 += z1
1795# asm 1: paddd <z1=int6464#8,<y2=int6464#2
1796# asm 2: paddd <z1=%xmm7,<y2=%xmm1
1797paddd %xmm7,%xmm1
1798
1799# qhasm: r2 = y2
1800# asm 1: movdqa <y2=int6464#2,>r2=int6464#3
1801# asm 2: movdqa <y2=%xmm1,>r2=%xmm2
1802movdqa %xmm1,%xmm2
1803
1804# qhasm: uint32323232 y2 <<= 9
1805# asm 1: pslld $9,<y2=int6464#2
1806# asm 2: pslld $9,<y2=%xmm1
1807pslld $9,%xmm1
1808
1809# qhasm: z2 ^= y2
1810# asm 1: pxor <y2=int6464#2,<z2=int6464#11
1811# asm 2: pxor <y2=%xmm1,<z2=%xmm10
1812pxor %xmm1,%xmm10
1813
1814# qhasm: uint32323232 r2 >>= 23
1815# asm 1: psrld $23,<r2=int6464#3
1816# asm 2: psrld $23,<r2=%xmm2
1817psrld $23,%xmm2
1818
1819# qhasm: z2 ^= r2
1820# asm 1: pxor <r2=int6464#3,<z2=int6464#11
1821# asm 2: pxor <r2=%xmm2,<z2=%xmm10
1822pxor %xmm2,%xmm10
1823
1824# qhasm: y7 = z5
1825# asm 1: movdqa <z5=int6464#13,>y7=int6464#2
1826# asm 2: movdqa <z5=%xmm12,>y7=%xmm1
1827movdqa %xmm12,%xmm1
1828
1829# qhasm: uint32323232 y7 += z6
1830# asm 1: paddd <z6=int6464#6,<y7=int6464#2
1831# asm 2: paddd <z6=%xmm5,<y7=%xmm1
1832paddd %xmm5,%xmm1
1833
1834# qhasm: r7 = y7
1835# asm 1: movdqa <y7=int6464#2,>r7=int6464#3
1836# asm 2: movdqa <y7=%xmm1,>r7=%xmm2
1837movdqa %xmm1,%xmm2
1838
1839# qhasm: uint32323232 y7 <<= 9
1840# asm 1: pslld $9,<y7=int6464#2
1841# asm 2: pslld $9,<y7=%xmm1
1842pslld $9,%xmm1
1843
1844# qhasm: z7 ^= y7
1845# asm 1: pxor <y7=int6464#2,<z7=int6464#9
1846# asm 2: pxor <y7=%xmm1,<z7=%xmm8
1847pxor %xmm1,%xmm8
1848
1849# qhasm: uint32323232 r7 >>= 23
1850# asm 1: psrld $23,<r7=int6464#3
1851# asm 2: psrld $23,<r7=%xmm2
1852psrld $23,%xmm2
1853
1854# qhasm: z7 ^= r7
1855# asm 1: pxor <r7=int6464#3,<z7=int6464#9
1856# asm 2: pxor <r7=%xmm2,<z7=%xmm8
1857pxor %xmm2,%xmm8
1858
1859# qhasm: y3 = z1
1860# asm 1: movdqa <z1=int6464#8,>y3=int6464#2
1861# asm 2: movdqa <z1=%xmm7,>y3=%xmm1
1862movdqa %xmm7,%xmm1
1863
1864# qhasm: uint32323232 y3 += z2
1865# asm 1: paddd <z2=int6464#11,<y3=int6464#2
1866# asm 2: paddd <z2=%xmm10,<y3=%xmm1
1867paddd %xmm10,%xmm1
1868
1869# qhasm: r3 = y3
1870# asm 1: movdqa <y3=int6464#2,>r3=int6464#3
1871# asm 2: movdqa <y3=%xmm1,>r3=%xmm2
1872movdqa %xmm1,%xmm2
1873
1874# qhasm: uint32323232 y3 <<= 13
1875# asm 1: pslld $13,<y3=int6464#2
1876# asm 2: pslld $13,<y3=%xmm1
1877pslld $13,%xmm1
1878
1879# qhasm: z3 ^= y3
1880# asm 1: pxor <y3=int6464#2,<z3=int6464#5
1881# asm 2: pxor <y3=%xmm1,<z3=%xmm4
1882pxor %xmm1,%xmm4
1883
1884# qhasm: uint32323232 r3 >>= 19
1885# asm 1: psrld $19,<r3=int6464#3
1886# asm 2: psrld $19,<r3=%xmm2
1887psrld $19,%xmm2
1888
1889# qhasm: z3 ^= r3
1890# asm 1: pxor <r3=int6464#3,<z3=int6464#5
1891# asm 2: pxor <r3=%xmm2,<z3=%xmm4
1892pxor %xmm2,%xmm4
1893
1894# qhasm: y4 = z6
1895# asm 1: movdqa <z6=int6464#6,>y4=int6464#2
1896# asm 2: movdqa <z6=%xmm5,>y4=%xmm1
1897movdqa %xmm5,%xmm1
1898
1899# qhasm: uint32323232 y4 += z7
1900# asm 1: paddd <z7=int6464#9,<y4=int6464#2
1901# asm 2: paddd <z7=%xmm8,<y4=%xmm1
1902paddd %xmm8,%xmm1
1903
1904# qhasm: r4 = y4
1905# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1906# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1907movdqa %xmm1,%xmm2
1908
1909# qhasm: uint32323232 y4 <<= 13
1910# asm 1: pslld $13,<y4=int6464#2
1911# asm 2: pslld $13,<y4=%xmm1
1912pslld $13,%xmm1
1913
1914# qhasm: z4 ^= y4
1915# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1916# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1917pxor %xmm1,%xmm14
1918
1919# qhasm: uint32323232 r4 >>= 19
1920# asm 1: psrld $19,<r4=int6464#3
1921# asm 2: psrld $19,<r4=%xmm2
1922psrld $19,%xmm2
1923
1924# qhasm: z4 ^= r4
1925# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1926# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1927pxor %xmm2,%xmm14
1928
1929# qhasm: y0 = z2
1930# asm 1: movdqa <z2=int6464#11,>y0=int6464#2
1931# asm 2: movdqa <z2=%xmm10,>y0=%xmm1
1932movdqa %xmm10,%xmm1
1933
1934# qhasm: uint32323232 y0 += z3
1935# asm 1: paddd <z3=int6464#5,<y0=int6464#2
1936# asm 2: paddd <z3=%xmm4,<y0=%xmm1
1937paddd %xmm4,%xmm1
1938
1939# qhasm: r0 = y0
1940# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1941# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1942movdqa %xmm1,%xmm2
1943
1944# qhasm: uint32323232 y0 <<= 18
1945# asm 1: pslld $18,<y0=int6464#2
1946# asm 2: pslld $18,<y0=%xmm1
1947pslld $18,%xmm1
1948
1949# qhasm: z0 ^= y0
1950# asm 1: pxor <y0=int6464#2,<z0=int6464#1
1951# asm 2: pxor <y0=%xmm1,<z0=%xmm0
1952pxor %xmm1,%xmm0
1953
1954# qhasm: uint32323232 r0 >>= 14
1955# asm 1: psrld $14,<r0=int6464#3
1956# asm 2: psrld $14,<r0=%xmm2
1957psrld $14,%xmm2
1958
1959# qhasm: z0 ^= r0
1960# asm 1: pxor <r0=int6464#3,<z0=int6464#1
1961# asm 2: pxor <r0=%xmm2,<z0=%xmm0
1962pxor %xmm2,%xmm0
1963
1964# qhasm: z10 = z10_stack
1965# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1966# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1967movdqa 320(%rsp),%xmm1
1968
1969# qhasm: z0_stack = z0
1970# asm 1: movdqa <z0=int6464#1,>z0_stack=stack128#21
1971# asm 2: movdqa <z0=%xmm0,>z0_stack=320(%rsp)
1972movdqa %xmm0,320(%rsp)
1973
1974# qhasm: y5 = z7
1975# asm 1: movdqa <z7=int6464#9,>y5=int6464#1
1976# asm 2: movdqa <z7=%xmm8,>y5=%xmm0
1977movdqa %xmm8,%xmm0
1978
1979# qhasm: uint32323232 y5 += z4
1980# asm 1: paddd <z4=int6464#15,<y5=int6464#1
1981# asm 2: paddd <z4=%xmm14,<y5=%xmm0
1982paddd %xmm14,%xmm0
1983
1984# qhasm: r5 = y5
1985# asm 1: movdqa <y5=int6464#1,>r5=int6464#3
1986# asm 2: movdqa <y5=%xmm0,>r5=%xmm2
1987movdqa %xmm0,%xmm2
1988
1989# qhasm: uint32323232 y5 <<= 18
1990# asm 1: pslld $18,<y5=int6464#1
1991# asm 2: pslld $18,<y5=%xmm0
1992pslld $18,%xmm0
1993
1994# qhasm: z5 ^= y5
1995# asm 1: pxor <y5=int6464#1,<z5=int6464#13
1996# asm 2: pxor <y5=%xmm0,<z5=%xmm12
1997pxor %xmm0,%xmm12
1998
1999# qhasm: uint32323232 r5 >>= 14
2000# asm 1: psrld $14,<r5=int6464#3
2001# asm 2: psrld $14,<r5=%xmm2
2002psrld $14,%xmm2
2003
2004# qhasm: z5 ^= r5
2005# asm 1: pxor <r5=int6464#3,<z5=int6464#13
2006# asm 2: pxor <r5=%xmm2,<z5=%xmm12
2007pxor %xmm2,%xmm12
2008
2009# qhasm: y11 = z9
2010# asm 1: movdqa <z9=int6464#12,>y11=int6464#1
2011# asm 2: movdqa <z9=%xmm11,>y11=%xmm0
2012movdqa %xmm11,%xmm0
2013
2014# qhasm: uint32323232 y11 += z10
2015# asm 1: paddd <z10=int6464#2,<y11=int6464#1
2016# asm 2: paddd <z10=%xmm1,<y11=%xmm0
2017paddd %xmm1,%xmm0
2018
2019# qhasm: r11 = y11
2020# asm 1: movdqa <y11=int6464#1,>r11=int6464#3
2021# asm 2: movdqa <y11=%xmm0,>r11=%xmm2
2022movdqa %xmm0,%xmm2
2023
2024# qhasm: uint32323232 y11 <<= 7
2025# asm 1: pslld $7,<y11=int6464#1
2026# asm 2: pslld $7,<y11=%xmm0
2027pslld $7,%xmm0
2028
2029# qhasm: z11 ^= y11
2030# asm 1: pxor <y11=int6464#1,<z11=int6464#7
2031# asm 2: pxor <y11=%xmm0,<z11=%xmm6
2032pxor %xmm0,%xmm6
2033
2034# qhasm: uint32323232 r11 >>= 25
2035# asm 1: psrld $25,<r11=int6464#3
2036# asm 2: psrld $25,<r11=%xmm2
2037psrld $25,%xmm2
2038
2039# qhasm: z11 ^= r11
2040# asm 1: pxor <r11=int6464#3,<z11=int6464#7
2041# asm 2: pxor <r11=%xmm2,<z11=%xmm6
2042pxor %xmm2,%xmm6
2043
2044# qhasm: z15 = z15_stack
2045# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
2046# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
2047movdqa 336(%rsp),%xmm2
2048
2049# qhasm: z5_stack = z5
2050# asm 1: movdqa <z5=int6464#13,>z5_stack=stack128#22
2051# asm 2: movdqa <z5=%xmm12,>z5_stack=336(%rsp)
2052movdqa %xmm12,336(%rsp)
2053
2054# qhasm: y12 = z14
2055# asm 1: movdqa <z14=int6464#4,>y12=int6464#1
2056# asm 2: movdqa <z14=%xmm3,>y12=%xmm0
2057movdqa %xmm3,%xmm0
2058
2059# qhasm: uint32323232 y12 += z15
2060# asm 1: paddd <z15=int6464#3,<y12=int6464#1
2061# asm 2: paddd <z15=%xmm2,<y12=%xmm0
2062paddd %xmm2,%xmm0
2063
2064# qhasm: r12 = y12
2065# asm 1: movdqa <y12=int6464#1,>r12=int6464#13
2066# asm 2: movdqa <y12=%xmm0,>r12=%xmm12
2067movdqa %xmm0,%xmm12
2068
2069# qhasm: uint32323232 y12 <<= 7
2070# asm 1: pslld $7,<y12=int6464#1
2071# asm 2: pslld $7,<y12=%xmm0
2072pslld $7,%xmm0
2073
2074# qhasm: z12 ^= y12
2075# asm 1: pxor <y12=int6464#1,<z12=int6464#14
2076# asm 2: pxor <y12=%xmm0,<z12=%xmm13
2077pxor %xmm0,%xmm13
2078
2079# qhasm: uint32323232 r12 >>= 25
2080# asm 1: psrld $25,<r12=int6464#13
2081# asm 2: psrld $25,<r12=%xmm12
2082psrld $25,%xmm12
2083
2084# qhasm: z12 ^= r12
2085# asm 1: pxor <r12=int6464#13,<z12=int6464#14
2086# asm 2: pxor <r12=%xmm12,<z12=%xmm13
2087pxor %xmm12,%xmm13
2088
2089# qhasm: y8 = z10
2090# asm 1: movdqa <z10=int6464#2,>y8=int6464#1
2091# asm 2: movdqa <z10=%xmm1,>y8=%xmm0
2092movdqa %xmm1,%xmm0
2093
2094# qhasm: uint32323232 y8 += z11
2095# asm 1: paddd <z11=int6464#7,<y8=int6464#1
2096# asm 2: paddd <z11=%xmm6,<y8=%xmm0
2097paddd %xmm6,%xmm0
2098
2099# qhasm: r8 = y8
2100# asm 1: movdqa <y8=int6464#1,>r8=int6464#13
2101# asm 2: movdqa <y8=%xmm0,>r8=%xmm12
2102movdqa %xmm0,%xmm12
2103
2104# qhasm: uint32323232 y8 <<= 9
2105# asm 1: pslld $9,<y8=int6464#1
2106# asm 2: pslld $9,<y8=%xmm0
2107pslld $9,%xmm0
2108
2109# qhasm: z8 ^= y8
2110# asm 1: pxor <y8=int6464#1,<z8=int6464#16
2111# asm 2: pxor <y8=%xmm0,<z8=%xmm15
2112pxor %xmm0,%xmm15
2113
2114# qhasm: uint32323232 r8 >>= 23
2115# asm 1: psrld $23,<r8=int6464#13
2116# asm 2: psrld $23,<r8=%xmm12
2117psrld $23,%xmm12
2118
2119# qhasm: z8 ^= r8
2120# asm 1: pxor <r8=int6464#13,<z8=int6464#16
2121# asm 2: pxor <r8=%xmm12,<z8=%xmm15
2122pxor %xmm12,%xmm15
2123
2124# qhasm: y13 = z15
2125# asm 1: movdqa <z15=int6464#3,>y13=int6464#1
2126# asm 2: movdqa <z15=%xmm2,>y13=%xmm0
2127movdqa %xmm2,%xmm0
2128
2129# qhasm: uint32323232 y13 += z12
2130# asm 1: paddd <z12=int6464#14,<y13=int6464#1
2131# asm 2: paddd <z12=%xmm13,<y13=%xmm0
2132paddd %xmm13,%xmm0
2133
2134# qhasm: r13 = y13
2135# asm 1: movdqa <y13=int6464#1,>r13=int6464#13
2136# asm 2: movdqa <y13=%xmm0,>r13=%xmm12
2137movdqa %xmm0,%xmm12
2138
2139# qhasm: uint32323232 y13 <<= 9
2140# asm 1: pslld $9,<y13=int6464#1
2141# asm 2: pslld $9,<y13=%xmm0
2142pslld $9,%xmm0
2143
2144# qhasm: z13 ^= y13
2145# asm 1: pxor <y13=int6464#1,<z13=int6464#10
2146# asm 2: pxor <y13=%xmm0,<z13=%xmm9
2147pxor %xmm0,%xmm9
2148
2149# qhasm: uint32323232 r13 >>= 23
2150# asm 1: psrld $23,<r13=int6464#13
2151# asm 2: psrld $23,<r13=%xmm12
2152psrld $23,%xmm12
2153
2154# qhasm: z13 ^= r13
2155# asm 1: pxor <r13=int6464#13,<z13=int6464#10
2156# asm 2: pxor <r13=%xmm12,<z13=%xmm9
2157pxor %xmm12,%xmm9
2158
2159# qhasm: y9 = z11
2160# asm 1: movdqa <z11=int6464#7,>y9=int6464#1
2161# asm 2: movdqa <z11=%xmm6,>y9=%xmm0
2162movdqa %xmm6,%xmm0
2163
2164# qhasm: uint32323232 y9 += z8
2165# asm 1: paddd <z8=int6464#16,<y9=int6464#1
2166# asm 2: paddd <z8=%xmm15,<y9=%xmm0
2167paddd %xmm15,%xmm0
2168
2169# qhasm: r9 = y9
2170# asm 1: movdqa <y9=int6464#1,>r9=int6464#13
2171# asm 2: movdqa <y9=%xmm0,>r9=%xmm12
2172movdqa %xmm0,%xmm12
2173
2174# qhasm: uint32323232 y9 <<= 13
2175# asm 1: pslld $13,<y9=int6464#1
2176# asm 2: pslld $13,<y9=%xmm0
2177pslld $13,%xmm0
2178
2179# qhasm: z9 ^= y9
2180# asm 1: pxor <y9=int6464#1,<z9=int6464#12
2181# asm 2: pxor <y9=%xmm0,<z9=%xmm11
2182pxor %xmm0,%xmm11
2183
2184# qhasm: uint32323232 r9 >>= 19
2185# asm 1: psrld $19,<r9=int6464#13
2186# asm 2: psrld $19,<r9=%xmm12
2187psrld $19,%xmm12
2188
2189# qhasm: z9 ^= r9
2190# asm 1: pxor <r9=int6464#13,<z9=int6464#12
2191# asm 2: pxor <r9=%xmm12,<z9=%xmm11
2192pxor %xmm12,%xmm11
2193
2194# qhasm: y14 = z12
2195# asm 1: movdqa <z12=int6464#14,>y14=int6464#1
2196# asm 2: movdqa <z12=%xmm13,>y14=%xmm0
2197movdqa %xmm13,%xmm0
2198
2199# qhasm: uint32323232 y14 += z13
2200# asm 1: paddd <z13=int6464#10,<y14=int6464#1
2201# asm 2: paddd <z13=%xmm9,<y14=%xmm0
2202paddd %xmm9,%xmm0
2203
2204# qhasm: r14 = y14
2205# asm 1: movdqa <y14=int6464#1,>r14=int6464#13
2206# asm 2: movdqa <y14=%xmm0,>r14=%xmm12
2207movdqa %xmm0,%xmm12
2208
2209# qhasm: uint32323232 y14 <<= 13
2210# asm 1: pslld $13,<y14=int6464#1
2211# asm 2: pslld $13,<y14=%xmm0
2212pslld $13,%xmm0
2213
2214# qhasm: z14 ^= y14
2215# asm 1: pxor <y14=int6464#1,<z14=int6464#4
2216# asm 2: pxor <y14=%xmm0,<z14=%xmm3
2217pxor %xmm0,%xmm3
2218
2219# qhasm: uint32323232 r14 >>= 19
2220# asm 1: psrld $19,<r14=int6464#13
2221# asm 2: psrld $19,<r14=%xmm12
2222psrld $19,%xmm12
2223
2224# qhasm: z14 ^= r14
2225# asm 1: pxor <r14=int6464#13,<z14=int6464#4
2226# asm 2: pxor <r14=%xmm12,<z14=%xmm3
2227pxor %xmm12,%xmm3
2228
2229# qhasm: y10 = z8
2230# asm 1: movdqa <z8=int6464#16,>y10=int6464#1
2231# asm 2: movdqa <z8=%xmm15,>y10=%xmm0
2232movdqa %xmm15,%xmm0
2233
2234# qhasm: uint32323232 y10 += z9
2235# asm 1: paddd <z9=int6464#12,<y10=int6464#1
2236# asm 2: paddd <z9=%xmm11,<y10=%xmm0
2237paddd %xmm11,%xmm0
2238
2239# qhasm: r10 = y10
2240# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
2241# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
2242movdqa %xmm0,%xmm12
2243
2244# qhasm: uint32323232 y10 <<= 18
2245# asm 1: pslld $18,<y10=int6464#1
2246# asm 2: pslld $18,<y10=%xmm0
2247pslld $18,%xmm0
2248
2249# qhasm: z10 ^= y10
2250# asm 1: pxor <y10=int6464#1,<z10=int6464#2
2251# asm 2: pxor <y10=%xmm0,<z10=%xmm1
2252pxor %xmm0,%xmm1
2253
2254# qhasm: uint32323232 r10 >>= 14
2255# asm 1: psrld $14,<r10=int6464#13
2256# asm 2: psrld $14,<r10=%xmm12
2257psrld $14,%xmm12
2258
2259# qhasm: z10 ^= r10
2260# asm 1: pxor <r10=int6464#13,<z10=int6464#2
2261# asm 2: pxor <r10=%xmm12,<z10=%xmm1
2262pxor %xmm12,%xmm1
2263
2264# qhasm: y15 = z13
2265# asm 1: movdqa <z13=int6464#10,>y15=int6464#1
2266# asm 2: movdqa <z13=%xmm9,>y15=%xmm0
2267movdqa %xmm9,%xmm0
2268
2269# qhasm: uint32323232 y15 += z14
2270# asm 1: paddd <z14=int6464#4,<y15=int6464#1
2271# asm 2: paddd <z14=%xmm3,<y15=%xmm0
2272paddd %xmm3,%xmm0
2273
2274# qhasm: r15 = y15
2275# asm 1: movdqa <y15=int6464#1,>r15=int6464#13
2276# asm 2: movdqa <y15=%xmm0,>r15=%xmm12
2277movdqa %xmm0,%xmm12
2278
2279# qhasm: uint32323232 y15 <<= 18
2280# asm 1: pslld $18,<y15=int6464#1
2281# asm 2: pslld $18,<y15=%xmm0
2282pslld $18,%xmm0
2283
2284# qhasm: z15 ^= y15
2285# asm 1: pxor <y15=int6464#1,<z15=int6464#3
2286# asm 2: pxor <y15=%xmm0,<z15=%xmm2
2287pxor %xmm0,%xmm2
2288
2289# qhasm: uint32323232 r15 >>= 14
2290# asm 1: psrld $14,<r15=int6464#13
2291# asm 2: psrld $14,<r15=%xmm12
2292psrld $14,%xmm12
2293
2294# qhasm: z15 ^= r15
2295# asm 1: pxor <r15=int6464#13,<z15=int6464#3
2296# asm 2: pxor <r15=%xmm12,<z15=%xmm2
2297pxor %xmm12,%xmm2
2298
2299# qhasm: z0 = z0_stack
2300# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#13
2301# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm12
2302movdqa 320(%rsp),%xmm12
2303
2304# qhasm: z5 = z5_stack
2305# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#1
2306# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm0
2307movdqa 336(%rsp),%xmm0
2308
2309# qhasm: unsigned>? i -= 2
2310# asm 1: sub $2,<i=int64#3
2311# asm 2: sub $2,<i=%rdx
2312sub $2,%rdx
2313# comment:fp stack unchanged by jump
2314
2315# qhasm: goto mainloop1 if unsigned>
2316ja ._mainloop1
2317
2318# qhasm: uint32323232 z0 += orig0
2319# asm 1: paddd <orig0=stack128#8,<z0=int6464#13
2320# asm 2: paddd <orig0=112(%rsp),<z0=%xmm12
2321paddd 112(%rsp),%xmm12
2322
2323# qhasm: uint32323232 z1 += orig1
2324# asm 1: paddd <orig1=stack128#12,<z1=int6464#8
2325# asm 2: paddd <orig1=176(%rsp),<z1=%xmm7
2326paddd 176(%rsp),%xmm7
2327
2328# qhasm: uint32323232 z2 += orig2
2329# asm 1: paddd <orig2=stack128#15,<z2=int6464#11
2330# asm 2: paddd <orig2=224(%rsp),<z2=%xmm10
2331paddd 224(%rsp),%xmm10
2332
2333# qhasm: uint32323232 z3 += orig3
2334# asm 1: paddd <orig3=stack128#18,<z3=int6464#5
2335# asm 2: paddd <orig3=272(%rsp),<z3=%xmm4
2336paddd 272(%rsp),%xmm4
2337
2338# qhasm: in0 = z0
2339# asm 1: movd <z0=int6464#13,>in0=int64#3
2340# asm 2: movd <z0=%xmm12,>in0=%rdx
2341movd %xmm12,%rdx
2342
2343# qhasm: in1 = z1
2344# asm 1: movd <z1=int6464#8,>in1=int64#4
2345# asm 2: movd <z1=%xmm7,>in1=%rcx
2346movd %xmm7,%rcx
2347
2348# qhasm: in2 = z2
2349# asm 1: movd <z2=int6464#11,>in2=int64#5
2350# asm 2: movd <z2=%xmm10,>in2=%r8
2351movd %xmm10,%r8
2352
2353# qhasm: in3 = z3
2354# asm 1: movd <z3=int6464#5,>in3=int64#6
2355# asm 2: movd <z3=%xmm4,>in3=%r9
2356movd %xmm4,%r9
2357
2358# qhasm: z0 <<<= 96
2359# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2360# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2361pshufd $0x39,%xmm12,%xmm12
2362
2363# qhasm: z1 <<<= 96
2364# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2365# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2366pshufd $0x39,%xmm7,%xmm7
2367
2368# qhasm: z2 <<<= 96
2369# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2370# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2371pshufd $0x39,%xmm10,%xmm10
2372
2373# qhasm: z3 <<<= 96
2374# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2375# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2376pshufd $0x39,%xmm4,%xmm4
2377
2378# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
2379# asm 1: xorl 0(<m=int64#2),<in0=int64#3d
2380# asm 2: xorl 0(<m=%rsi),<in0=%edx
2381xorl 0(%rsi),%edx
2382
2383# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
2384# asm 1: xorl 4(<m=int64#2),<in1=int64#4d
2385# asm 2: xorl 4(<m=%rsi),<in1=%ecx
2386xorl 4(%rsi),%ecx
2387
2388# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
2389# asm 1: xorl 8(<m=int64#2),<in2=int64#5d
2390# asm 2: xorl 8(<m=%rsi),<in2=%r8d
2391xorl 8(%rsi),%r8d
2392
2393# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
2394# asm 1: xorl 12(<m=int64#2),<in3=int64#6d
2395# asm 2: xorl 12(<m=%rsi),<in3=%r9d
2396xorl 12(%rsi),%r9d
2397
2398# qhasm: *(uint32 *) (out + 0) = in0
2399# asm 1: movl <in0=int64#3d,0(<out=int64#1)
2400# asm 2: movl <in0=%edx,0(<out=%rdi)
2401movl %edx,0(%rdi)
2402
2403# qhasm: *(uint32 *) (out + 4) = in1
2404# asm 1: movl <in1=int64#4d,4(<out=int64#1)
2405# asm 2: movl <in1=%ecx,4(<out=%rdi)
2406movl %ecx,4(%rdi)
2407
2408# qhasm: *(uint32 *) (out + 8) = in2
2409# asm 1: movl <in2=int64#5d,8(<out=int64#1)
2410# asm 2: movl <in2=%r8d,8(<out=%rdi)
2411movl %r8d,8(%rdi)
2412
2413# qhasm: *(uint32 *) (out + 12) = in3
2414# asm 1: movl <in3=int64#6d,12(<out=int64#1)
2415# asm 2: movl <in3=%r9d,12(<out=%rdi)
2416movl %r9d,12(%rdi)
2417
2418# qhasm: in0 = z0
2419# asm 1: movd <z0=int6464#13,>in0=int64#3
2420# asm 2: movd <z0=%xmm12,>in0=%rdx
2421movd %xmm12,%rdx
2422
2423# qhasm: in1 = z1
2424# asm 1: movd <z1=int6464#8,>in1=int64#4
2425# asm 2: movd <z1=%xmm7,>in1=%rcx
2426movd %xmm7,%rcx
2427
2428# qhasm: in2 = z2
2429# asm 1: movd <z2=int6464#11,>in2=int64#5
2430# asm 2: movd <z2=%xmm10,>in2=%r8
2431movd %xmm10,%r8
2432
2433# qhasm: in3 = z3
2434# asm 1: movd <z3=int6464#5,>in3=int64#6
2435# asm 2: movd <z3=%xmm4,>in3=%r9
2436movd %xmm4,%r9
2437
2438# qhasm: z0 <<<= 96
2439# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2440# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2441pshufd $0x39,%xmm12,%xmm12
2442
2443# qhasm: z1 <<<= 96
2444# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2445# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2446pshufd $0x39,%xmm7,%xmm7
2447
2448# qhasm: z2 <<<= 96
2449# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2450# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2451pshufd $0x39,%xmm10,%xmm10
2452
2453# qhasm: z3 <<<= 96
2454# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2455# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2456pshufd $0x39,%xmm4,%xmm4
2457
2458# qhasm: (uint32) in0 ^= *(uint32 *) (m + 64)
2459# asm 1: xorl 64(<m=int64#2),<in0=int64#3d
2460# asm 2: xorl 64(<m=%rsi),<in0=%edx
2461xorl 64(%rsi),%edx
2462
2463# qhasm: (uint32) in1 ^= *(uint32 *) (m + 68)
2464# asm 1: xorl 68(<m=int64#2),<in1=int64#4d
2465# asm 2: xorl 68(<m=%rsi),<in1=%ecx
2466xorl 68(%rsi),%ecx
2467
2468# qhasm: (uint32) in2 ^= *(uint32 *) (m + 72)
2469# asm 1: xorl 72(<m=int64#2),<in2=int64#5d
2470# asm 2: xorl 72(<m=%rsi),<in2=%r8d
2471xorl 72(%rsi),%r8d
2472
2473# qhasm: (uint32) in3 ^= *(uint32 *) (m + 76)
2474# asm 1: xorl 76(<m=int64#2),<in3=int64#6d
2475# asm 2: xorl 76(<m=%rsi),<in3=%r9d
2476xorl 76(%rsi),%r9d
2477
2478# qhasm: *(uint32 *) (out + 64) = in0
2479# asm 1: movl <in0=int64#3d,64(<out=int64#1)
2480# asm 2: movl <in0=%edx,64(<out=%rdi)
2481movl %edx,64(%rdi)
2482
2483# qhasm: *(uint32 *) (out + 68) = in1
2484# asm 1: movl <in1=int64#4d,68(<out=int64#1)
2485# asm 2: movl <in1=%ecx,68(<out=%rdi)
2486movl %ecx,68(%rdi)
2487
2488# qhasm: *(uint32 *) (out + 72) = in2
2489# asm 1: movl <in2=int64#5d,72(<out=int64#1)
2490# asm 2: movl <in2=%r8d,72(<out=%rdi)
2491movl %r8d,72(%rdi)
2492
2493# qhasm: *(uint32 *) (out + 76) = in3
2494# asm 1: movl <in3=int64#6d,76(<out=int64#1)
2495# asm 2: movl <in3=%r9d,76(<out=%rdi)
2496movl %r9d,76(%rdi)
2497
2498# qhasm: in0 = z0
2499# asm 1: movd <z0=int6464#13,>in0=int64#3
2500# asm 2: movd <z0=%xmm12,>in0=%rdx
2501movd %xmm12,%rdx
2502
2503# qhasm: in1 = z1
2504# asm 1: movd <z1=int6464#8,>in1=int64#4
2505# asm 2: movd <z1=%xmm7,>in1=%rcx
2506movd %xmm7,%rcx
2507
2508# qhasm: in2 = z2
2509# asm 1: movd <z2=int6464#11,>in2=int64#5
2510# asm 2: movd <z2=%xmm10,>in2=%r8
2511movd %xmm10,%r8
2512
2513# qhasm: in3 = z3
2514# asm 1: movd <z3=int6464#5,>in3=int64#6
2515# asm 2: movd <z3=%xmm4,>in3=%r9
2516movd %xmm4,%r9
2517
2518# qhasm: z0 <<<= 96
2519# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2520# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2521pshufd $0x39,%xmm12,%xmm12
2522
2523# qhasm: z1 <<<= 96
2524# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2525# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2526pshufd $0x39,%xmm7,%xmm7
2527
2528# qhasm: z2 <<<= 96
2529# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2530# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2531pshufd $0x39,%xmm10,%xmm10
2532
2533# qhasm: z3 <<<= 96
2534# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2535# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2536pshufd $0x39,%xmm4,%xmm4
2537
2538# qhasm: (uint32) in0 ^= *(uint32 *) (m + 128)
2539# asm 1: xorl 128(<m=int64#2),<in0=int64#3d
2540# asm 2: xorl 128(<m=%rsi),<in0=%edx
2541xorl 128(%rsi),%edx
2542
2543# qhasm: (uint32) in1 ^= *(uint32 *) (m + 132)
2544# asm 1: xorl 132(<m=int64#2),<in1=int64#4d
2545# asm 2: xorl 132(<m=%rsi),<in1=%ecx
2546xorl 132(%rsi),%ecx
2547
2548# qhasm: (uint32) in2 ^= *(uint32 *) (m + 136)
2549# asm 1: xorl 136(<m=int64#2),<in2=int64#5d
2550# asm 2: xorl 136(<m=%rsi),<in2=%r8d
2551xorl 136(%rsi),%r8d
2552
2553# qhasm: (uint32) in3 ^= *(uint32 *) (m + 140)
2554# asm 1: xorl 140(<m=int64#2),<in3=int64#6d
2555# asm 2: xorl 140(<m=%rsi),<in3=%r9d
2556xorl 140(%rsi),%r9d
2557
2558# qhasm: *(uint32 *) (out + 128) = in0
2559# asm 1: movl <in0=int64#3d,128(<out=int64#1)
2560# asm 2: movl <in0=%edx,128(<out=%rdi)
2561movl %edx,128(%rdi)
2562
2563# qhasm: *(uint32 *) (out + 132) = in1
2564# asm 1: movl <in1=int64#4d,132(<out=int64#1)
2565# asm 2: movl <in1=%ecx,132(<out=%rdi)
2566movl %ecx,132(%rdi)
2567
2568# qhasm: *(uint32 *) (out + 136) = in2
2569# asm 1: movl <in2=int64#5d,136(<out=int64#1)
2570# asm 2: movl <in2=%r8d,136(<out=%rdi)
2571movl %r8d,136(%rdi)
2572
2573# qhasm: *(uint32 *) (out + 140) = in3
2574# asm 1: movl <in3=int64#6d,140(<out=int64#1)
2575# asm 2: movl <in3=%r9d,140(<out=%rdi)
2576movl %r9d,140(%rdi)
2577
2578# qhasm: in0 = z0
2579# asm 1: movd <z0=int6464#13,>in0=int64#3
2580# asm 2: movd <z0=%xmm12,>in0=%rdx
2581movd %xmm12,%rdx
2582
2583# qhasm: in1 = z1
2584# asm 1: movd <z1=int6464#8,>in1=int64#4
2585# asm 2: movd <z1=%xmm7,>in1=%rcx
2586movd %xmm7,%rcx
2587
2588# qhasm: in2 = z2
2589# asm 1: movd <z2=int6464#11,>in2=int64#5
2590# asm 2: movd <z2=%xmm10,>in2=%r8
2591movd %xmm10,%r8
2592
2593# qhasm: in3 = z3
2594# asm 1: movd <z3=int6464#5,>in3=int64#6
2595# asm 2: movd <z3=%xmm4,>in3=%r9
2596movd %xmm4,%r9
2597
2598# qhasm: (uint32) in0 ^= *(uint32 *) (m + 192)
2599# asm 1: xorl 192(<m=int64#2),<in0=int64#3d
2600# asm 2: xorl 192(<m=%rsi),<in0=%edx
2601xorl 192(%rsi),%edx
2602
2603# qhasm: (uint32) in1 ^= *(uint32 *) (m + 196)
2604# asm 1: xorl 196(<m=int64#2),<in1=int64#4d
2605# asm 2: xorl 196(<m=%rsi),<in1=%ecx
2606xorl 196(%rsi),%ecx
2607
2608# qhasm: (uint32) in2 ^= *(uint32 *) (m + 200)
2609# asm 1: xorl 200(<m=int64#2),<in2=int64#5d
2610# asm 2: xorl 200(<m=%rsi),<in2=%r8d
2611xorl 200(%rsi),%r8d
2612
2613# qhasm: (uint32) in3 ^= *(uint32 *) (m + 204)
2614# asm 1: xorl 204(<m=int64#2),<in3=int64#6d
2615# asm 2: xorl 204(<m=%rsi),<in3=%r9d
2616xorl 204(%rsi),%r9d
2617
2618# qhasm: *(uint32 *) (out + 192) = in0
2619# asm 1: movl <in0=int64#3d,192(<out=int64#1)
2620# asm 2: movl <in0=%edx,192(<out=%rdi)
2621movl %edx,192(%rdi)
2622
2623# qhasm: *(uint32 *) (out + 196) = in1
2624# asm 1: movl <in1=int64#4d,196(<out=int64#1)
2625# asm 2: movl <in1=%ecx,196(<out=%rdi)
2626movl %ecx,196(%rdi)
2627
2628# qhasm: *(uint32 *) (out + 200) = in2
2629# asm 1: movl <in2=int64#5d,200(<out=int64#1)
2630# asm 2: movl <in2=%r8d,200(<out=%rdi)
2631movl %r8d,200(%rdi)
2632
2633# qhasm: *(uint32 *) (out + 204) = in3
2634# asm 1: movl <in3=int64#6d,204(<out=int64#1)
2635# asm 2: movl <in3=%r9d,204(<out=%rdi)
2636movl %r9d,204(%rdi)
2637
2638# qhasm: uint32323232 z4 += orig4
2639# asm 1: paddd <orig4=stack128#16,<z4=int6464#15
2640# asm 2: paddd <orig4=240(%rsp),<z4=%xmm14
2641paddd 240(%rsp),%xmm14
2642
2643# qhasm: uint32323232 z5 += orig5
2644# asm 1: paddd <orig5=stack128#5,<z5=int6464#1
2645# asm 2: paddd <orig5=64(%rsp),<z5=%xmm0
2646paddd 64(%rsp),%xmm0
2647
2648# qhasm: uint32323232 z6 += orig6
2649# asm 1: paddd <orig6=stack128#9,<z6=int6464#6
2650# asm 2: paddd <orig6=128(%rsp),<z6=%xmm5
2651paddd 128(%rsp),%xmm5
2652
2653# qhasm: uint32323232 z7 += orig7
2654# asm 1: paddd <orig7=stack128#13,<z7=int6464#9
2655# asm 2: paddd <orig7=192(%rsp),<z7=%xmm8
2656paddd 192(%rsp),%xmm8
2657
2658# qhasm: in4 = z4
2659# asm 1: movd <z4=int6464#15,>in4=int64#3
2660# asm 2: movd <z4=%xmm14,>in4=%rdx
2661movd %xmm14,%rdx
2662
2663# qhasm: in5 = z5
2664# asm 1: movd <z5=int6464#1,>in5=int64#4
2665# asm 2: movd <z5=%xmm0,>in5=%rcx
2666movd %xmm0,%rcx
2667
2668# qhasm: in6 = z6
2669# asm 1: movd <z6=int6464#6,>in6=int64#5
2670# asm 2: movd <z6=%xmm5,>in6=%r8
2671movd %xmm5,%r8
2672
2673# qhasm: in7 = z7
2674# asm 1: movd <z7=int6464#9,>in7=int64#6
2675# asm 2: movd <z7=%xmm8,>in7=%r9
2676movd %xmm8,%r9
2677
2678# qhasm: z4 <<<= 96
2679# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2680# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2681pshufd $0x39,%xmm14,%xmm14
2682
2683# qhasm: z5 <<<= 96
2684# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2685# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2686pshufd $0x39,%xmm0,%xmm0
2687
2688# qhasm: z6 <<<= 96
2689# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2690# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2691pshufd $0x39,%xmm5,%xmm5
2692
2693# qhasm: z7 <<<= 96
2694# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2695# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2696pshufd $0x39,%xmm8,%xmm8
2697
2698# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
2699# asm 1: xorl 16(<m=int64#2),<in4=int64#3d
2700# asm 2: xorl 16(<m=%rsi),<in4=%edx
2701xorl 16(%rsi),%edx
2702
2703# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
2704# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
2705# asm 2: xorl 20(<m=%rsi),<in5=%ecx
2706xorl 20(%rsi),%ecx
2707
2708# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
2709# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
2710# asm 2: xorl 24(<m=%rsi),<in6=%r8d
2711xorl 24(%rsi),%r8d
2712
2713# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
2714# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
2715# asm 2: xorl 28(<m=%rsi),<in7=%r9d
2716xorl 28(%rsi),%r9d
2717
2718# qhasm: *(uint32 *) (out + 16) = in4
2719# asm 1: movl <in4=int64#3d,16(<out=int64#1)
2720# asm 2: movl <in4=%edx,16(<out=%rdi)
2721movl %edx,16(%rdi)
2722
2723# qhasm: *(uint32 *) (out + 20) = in5
2724# asm 1: movl <in5=int64#4d,20(<out=int64#1)
2725# asm 2: movl <in5=%ecx,20(<out=%rdi)
2726movl %ecx,20(%rdi)
2727
2728# qhasm: *(uint32 *) (out + 24) = in6
2729# asm 1: movl <in6=int64#5d,24(<out=int64#1)
2730# asm 2: movl <in6=%r8d,24(<out=%rdi)
2731movl %r8d,24(%rdi)
2732
2733# qhasm: *(uint32 *) (out + 28) = in7
2734# asm 1: movl <in7=int64#6d,28(<out=int64#1)
2735# asm 2: movl <in7=%r9d,28(<out=%rdi)
2736movl %r9d,28(%rdi)
2737
2738# qhasm: in4 = z4
2739# asm 1: movd <z4=int6464#15,>in4=int64#3
2740# asm 2: movd <z4=%xmm14,>in4=%rdx
2741movd %xmm14,%rdx
2742
2743# qhasm: in5 = z5
2744# asm 1: movd <z5=int6464#1,>in5=int64#4
2745# asm 2: movd <z5=%xmm0,>in5=%rcx
2746movd %xmm0,%rcx
2747
2748# qhasm: in6 = z6
2749# asm 1: movd <z6=int6464#6,>in6=int64#5
2750# asm 2: movd <z6=%xmm5,>in6=%r8
2751movd %xmm5,%r8
2752
2753# qhasm: in7 = z7
2754# asm 1: movd <z7=int6464#9,>in7=int64#6
2755# asm 2: movd <z7=%xmm8,>in7=%r9
2756movd %xmm8,%r9
2757
2758# qhasm: z4 <<<= 96
2759# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2760# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2761pshufd $0x39,%xmm14,%xmm14
2762
2763# qhasm: z5 <<<= 96
2764# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2765# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2766pshufd $0x39,%xmm0,%xmm0
2767
2768# qhasm: z6 <<<= 96
2769# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2770# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2771pshufd $0x39,%xmm5,%xmm5
2772
2773# qhasm: z7 <<<= 96
2774# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2775# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2776pshufd $0x39,%xmm8,%xmm8
2777
2778# qhasm: (uint32) in4 ^= *(uint32 *) (m + 80)
2779# asm 1: xorl 80(<m=int64#2),<in4=int64#3d
2780# asm 2: xorl 80(<m=%rsi),<in4=%edx
2781xorl 80(%rsi),%edx
2782
2783# qhasm: (uint32) in5 ^= *(uint32 *) (m + 84)
2784# asm 1: xorl 84(<m=int64#2),<in5=int64#4d
2785# asm 2: xorl 84(<m=%rsi),<in5=%ecx
2786xorl 84(%rsi),%ecx
2787
2788# qhasm: (uint32) in6 ^= *(uint32 *) (m + 88)
2789# asm 1: xorl 88(<m=int64#2),<in6=int64#5d
2790# asm 2: xorl 88(<m=%rsi),<in6=%r8d
2791xorl 88(%rsi),%r8d
2792
2793# qhasm: (uint32) in7 ^= *(uint32 *) (m + 92)
2794# asm 1: xorl 92(<m=int64#2),<in7=int64#6d
2795# asm 2: xorl 92(<m=%rsi),<in7=%r9d
2796xorl 92(%rsi),%r9d
2797
2798# qhasm: *(uint32 *) (out + 80) = in4
2799# asm 1: movl <in4=int64#3d,80(<out=int64#1)
2800# asm 2: movl <in4=%edx,80(<out=%rdi)
2801movl %edx,80(%rdi)
2802
2803# qhasm: *(uint32 *) (out + 84) = in5
2804# asm 1: movl <in5=int64#4d,84(<out=int64#1)
2805# asm 2: movl <in5=%ecx,84(<out=%rdi)
2806movl %ecx,84(%rdi)
2807
2808# qhasm: *(uint32 *) (out + 88) = in6
2809# asm 1: movl <in6=int64#5d,88(<out=int64#1)
2810# asm 2: movl <in6=%r8d,88(<out=%rdi)
2811movl %r8d,88(%rdi)
2812
2813# qhasm: *(uint32 *) (out + 92) = in7
2814# asm 1: movl <in7=int64#6d,92(<out=int64#1)
2815# asm 2: movl <in7=%r9d,92(<out=%rdi)
2816movl %r9d,92(%rdi)
2817
2818# qhasm: in4 = z4
2819# asm 1: movd <z4=int6464#15,>in4=int64#3
2820# asm 2: movd <z4=%xmm14,>in4=%rdx
2821movd %xmm14,%rdx
2822
2823# qhasm: in5 = z5
2824# asm 1: movd <z5=int6464#1,>in5=int64#4
2825# asm 2: movd <z5=%xmm0,>in5=%rcx
2826movd %xmm0,%rcx
2827
2828# qhasm: in6 = z6
2829# asm 1: movd <z6=int6464#6,>in6=int64#5
2830# asm 2: movd <z6=%xmm5,>in6=%r8
2831movd %xmm5,%r8
2832
2833# qhasm: in7 = z7
2834# asm 1: movd <z7=int6464#9,>in7=int64#6
2835# asm 2: movd <z7=%xmm8,>in7=%r9
2836movd %xmm8,%r9
2837
2838# qhasm: z4 <<<= 96
2839# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2840# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2841pshufd $0x39,%xmm14,%xmm14
2842
2843# qhasm: z5 <<<= 96
2844# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2845# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2846pshufd $0x39,%xmm0,%xmm0
2847
2848# qhasm: z6 <<<= 96
2849# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2850# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2851pshufd $0x39,%xmm5,%xmm5
2852
2853# qhasm: z7 <<<= 96
2854# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2855# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2856pshufd $0x39,%xmm8,%xmm8
2857
2858# qhasm: (uint32) in4 ^= *(uint32 *) (m + 144)
2859# asm 1: xorl 144(<m=int64#2),<in4=int64#3d
2860# asm 2: xorl 144(<m=%rsi),<in4=%edx
2861xorl 144(%rsi),%edx
2862
2863# qhasm: (uint32) in5 ^= *(uint32 *) (m + 148)
2864# asm 1: xorl 148(<m=int64#2),<in5=int64#4d
2865# asm 2: xorl 148(<m=%rsi),<in5=%ecx
2866xorl 148(%rsi),%ecx
2867
2868# qhasm: (uint32) in6 ^= *(uint32 *) (m + 152)
2869# asm 1: xorl 152(<m=int64#2),<in6=int64#5d
2870# asm 2: xorl 152(<m=%rsi),<in6=%r8d
2871xorl 152(%rsi),%r8d
2872
2873# qhasm: (uint32) in7 ^= *(uint32 *) (m + 156)
2874# asm 1: xorl 156(<m=int64#2),<in7=int64#6d
2875# asm 2: xorl 156(<m=%rsi),<in7=%r9d
2876xorl 156(%rsi),%r9d
2877
2878# qhasm: *(uint32 *) (out + 144) = in4
2879# asm 1: movl <in4=int64#3d,144(<out=int64#1)
2880# asm 2: movl <in4=%edx,144(<out=%rdi)
2881movl %edx,144(%rdi)
2882
2883# qhasm: *(uint32 *) (out + 148) = in5
2884# asm 1: movl <in5=int64#4d,148(<out=int64#1)
2885# asm 2: movl <in5=%ecx,148(<out=%rdi)
2886movl %ecx,148(%rdi)
2887
2888# qhasm: *(uint32 *) (out + 152) = in6
2889# asm 1: movl <in6=int64#5d,152(<out=int64#1)
2890# asm 2: movl <in6=%r8d,152(<out=%rdi)
2891movl %r8d,152(%rdi)
2892
2893# qhasm: *(uint32 *) (out + 156) = in7
2894# asm 1: movl <in7=int64#6d,156(<out=int64#1)
2895# asm 2: movl <in7=%r9d,156(<out=%rdi)
2896movl %r9d,156(%rdi)
2897
2898# qhasm: in4 = z4
2899# asm 1: movd <z4=int6464#15,>in4=int64#3
2900# asm 2: movd <z4=%xmm14,>in4=%rdx
2901movd %xmm14,%rdx
2902
2903# qhasm: in5 = z5
2904# asm 1: movd <z5=int6464#1,>in5=int64#4
2905# asm 2: movd <z5=%xmm0,>in5=%rcx
2906movd %xmm0,%rcx
2907
2908# qhasm: in6 = z6
2909# asm 1: movd <z6=int6464#6,>in6=int64#5
2910# asm 2: movd <z6=%xmm5,>in6=%r8
2911movd %xmm5,%r8
2912
2913# qhasm: in7 = z7
2914# asm 1: movd <z7=int6464#9,>in7=int64#6
2915# asm 2: movd <z7=%xmm8,>in7=%r9
2916movd %xmm8,%r9
2917
2918# qhasm: (uint32) in4 ^= *(uint32 *) (m + 208)
2919# asm 1: xorl 208(<m=int64#2),<in4=int64#3d
2920# asm 2: xorl 208(<m=%rsi),<in4=%edx
2921xorl 208(%rsi),%edx
2922
2923# qhasm: (uint32) in5 ^= *(uint32 *) (m + 212)
2924# asm 1: xorl 212(<m=int64#2),<in5=int64#4d
2925# asm 2: xorl 212(<m=%rsi),<in5=%ecx
2926xorl 212(%rsi),%ecx
2927
2928# qhasm: (uint32) in6 ^= *(uint32 *) (m + 216)
2929# asm 1: xorl 216(<m=int64#2),<in6=int64#5d
2930# asm 2: xorl 216(<m=%rsi),<in6=%r8d
2931xorl 216(%rsi),%r8d
2932
2933# qhasm: (uint32) in7 ^= *(uint32 *) (m + 220)
2934# asm 1: xorl 220(<m=int64#2),<in7=int64#6d
2935# asm 2: xorl 220(<m=%rsi),<in7=%r9d
2936xorl 220(%rsi),%r9d
2937
2938# qhasm: *(uint32 *) (out + 208) = in4
2939# asm 1: movl <in4=int64#3d,208(<out=int64#1)
2940# asm 2: movl <in4=%edx,208(<out=%rdi)
2941movl %edx,208(%rdi)
2942
2943# qhasm: *(uint32 *) (out + 212) = in5
2944# asm 1: movl <in5=int64#4d,212(<out=int64#1)
2945# asm 2: movl <in5=%ecx,212(<out=%rdi)
2946movl %ecx,212(%rdi)
2947
2948# qhasm: *(uint32 *) (out + 216) = in6
2949# asm 1: movl <in6=int64#5d,216(<out=int64#1)
2950# asm 2: movl <in6=%r8d,216(<out=%rdi)
2951movl %r8d,216(%rdi)
2952
2953# qhasm: *(uint32 *) (out + 220) = in7
2954# asm 1: movl <in7=int64#6d,220(<out=int64#1)
2955# asm 2: movl <in7=%r9d,220(<out=%rdi)
2956movl %r9d,220(%rdi)
2957
2958# qhasm: uint32323232 z8 += orig8
2959# asm 1: paddd <orig8=stack128#19,<z8=int6464#16
2960# asm 2: paddd <orig8=288(%rsp),<z8=%xmm15
2961paddd 288(%rsp),%xmm15
2962
2963# qhasm: uint32323232 z9 += orig9
2964# asm 1: paddd <orig9=stack128#20,<z9=int6464#12
2965# asm 2: paddd <orig9=304(%rsp),<z9=%xmm11
2966paddd 304(%rsp),%xmm11
2967
2968# qhasm: uint32323232 z10 += orig10
2969# asm 1: paddd <orig10=stack128#6,<z10=int6464#2
2970# asm 2: paddd <orig10=80(%rsp),<z10=%xmm1
2971paddd 80(%rsp),%xmm1
2972
2973# qhasm: uint32323232 z11 += orig11
2974# asm 1: paddd <orig11=stack128#10,<z11=int6464#7
2975# asm 2: paddd <orig11=144(%rsp),<z11=%xmm6
2976paddd 144(%rsp),%xmm6
2977
2978# qhasm: in8 = z8
2979# asm 1: movd <z8=int6464#16,>in8=int64#3
2980# asm 2: movd <z8=%xmm15,>in8=%rdx
2981movd %xmm15,%rdx
2982
2983# qhasm: in9 = z9
2984# asm 1: movd <z9=int6464#12,>in9=int64#4
2985# asm 2: movd <z9=%xmm11,>in9=%rcx
2986movd %xmm11,%rcx
2987
2988# qhasm: in10 = z10
2989# asm 1: movd <z10=int6464#2,>in10=int64#5
2990# asm 2: movd <z10=%xmm1,>in10=%r8
2991movd %xmm1,%r8
2992
2993# qhasm: in11 = z11
2994# asm 1: movd <z11=int6464#7,>in11=int64#6
2995# asm 2: movd <z11=%xmm6,>in11=%r9
2996movd %xmm6,%r9
2997
2998# qhasm: z8 <<<= 96
2999# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3000# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3001pshufd $0x39,%xmm15,%xmm15
3002
3003# qhasm: z9 <<<= 96
3004# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3005# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3006pshufd $0x39,%xmm11,%xmm11
3007
3008# qhasm: z10 <<<= 96
3009# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3010# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3011pshufd $0x39,%xmm1,%xmm1
3012
3013# qhasm: z11 <<<= 96
3014# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3015# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3016pshufd $0x39,%xmm6,%xmm6
3017
3018# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
3019# asm 1: xorl 32(<m=int64#2),<in8=int64#3d
3020# asm 2: xorl 32(<m=%rsi),<in8=%edx
3021xorl 32(%rsi),%edx
3022
3023# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
3024# asm 1: xorl 36(<m=int64#2),<in9=int64#4d
3025# asm 2: xorl 36(<m=%rsi),<in9=%ecx
3026xorl 36(%rsi),%ecx
3027
3028# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
3029# asm 1: xorl 40(<m=int64#2),<in10=int64#5d
3030# asm 2: xorl 40(<m=%rsi),<in10=%r8d
3031xorl 40(%rsi),%r8d
3032
3033# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
3034# asm 1: xorl 44(<m=int64#2),<in11=int64#6d
3035# asm 2: xorl 44(<m=%rsi),<in11=%r9d
3036xorl 44(%rsi),%r9d
3037
3038# qhasm: *(uint32 *) (out + 32) = in8
3039# asm 1: movl <in8=int64#3d,32(<out=int64#1)
3040# asm 2: movl <in8=%edx,32(<out=%rdi)
3041movl %edx,32(%rdi)
3042
3043# qhasm: *(uint32 *) (out + 36) = in9
3044# asm 1: movl <in9=int64#4d,36(<out=int64#1)
3045# asm 2: movl <in9=%ecx,36(<out=%rdi)
3046movl %ecx,36(%rdi)
3047
3048# qhasm: *(uint32 *) (out + 40) = in10
3049# asm 1: movl <in10=int64#5d,40(<out=int64#1)
3050# asm 2: movl <in10=%r8d,40(<out=%rdi)
3051movl %r8d,40(%rdi)
3052
3053# qhasm: *(uint32 *) (out + 44) = in11
3054# asm 1: movl <in11=int64#6d,44(<out=int64#1)
3055# asm 2: movl <in11=%r9d,44(<out=%rdi)
3056movl %r9d,44(%rdi)
3057
3058# qhasm: in8 = z8
3059# asm 1: movd <z8=int6464#16,>in8=int64#3
3060# asm 2: movd <z8=%xmm15,>in8=%rdx
3061movd %xmm15,%rdx
3062
3063# qhasm: in9 = z9
3064# asm 1: movd <z9=int6464#12,>in9=int64#4
3065# asm 2: movd <z9=%xmm11,>in9=%rcx
3066movd %xmm11,%rcx
3067
3068# qhasm: in10 = z10
3069# asm 1: movd <z10=int6464#2,>in10=int64#5
3070# asm 2: movd <z10=%xmm1,>in10=%r8
3071movd %xmm1,%r8
3072
3073# qhasm: in11 = z11
3074# asm 1: movd <z11=int6464#7,>in11=int64#6
3075# asm 2: movd <z11=%xmm6,>in11=%r9
3076movd %xmm6,%r9
3077
3078# qhasm: z8 <<<= 96
3079# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3080# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3081pshufd $0x39,%xmm15,%xmm15
3082
3083# qhasm: z9 <<<= 96
3084# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3085# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3086pshufd $0x39,%xmm11,%xmm11
3087
3088# qhasm: z10 <<<= 96
3089# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3090# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3091pshufd $0x39,%xmm1,%xmm1
3092
3093# qhasm: z11 <<<= 96
3094# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3095# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3096pshufd $0x39,%xmm6,%xmm6
3097
3098# qhasm: (uint32) in8 ^= *(uint32 *) (m + 96)
3099# asm 1: xorl 96(<m=int64#2),<in8=int64#3d
3100# asm 2: xorl 96(<m=%rsi),<in8=%edx
3101xorl 96(%rsi),%edx
3102
3103# qhasm: (uint32) in9 ^= *(uint32 *) (m + 100)
3104# asm 1: xorl 100(<m=int64#2),<in9=int64#4d
3105# asm 2: xorl 100(<m=%rsi),<in9=%ecx
3106xorl 100(%rsi),%ecx
3107
3108# qhasm: (uint32) in10 ^= *(uint32 *) (m + 104)
3109# asm 1: xorl 104(<m=int64#2),<in10=int64#5d
3110# asm 2: xorl 104(<m=%rsi),<in10=%r8d
3111xorl 104(%rsi),%r8d
3112
3113# qhasm: (uint32) in11 ^= *(uint32 *) (m + 108)
3114# asm 1: xorl 108(<m=int64#2),<in11=int64#6d
3115# asm 2: xorl 108(<m=%rsi),<in11=%r9d
3116xorl 108(%rsi),%r9d
3117
3118# qhasm: *(uint32 *) (out + 96) = in8
3119# asm 1: movl <in8=int64#3d,96(<out=int64#1)
3120# asm 2: movl <in8=%edx,96(<out=%rdi)
3121movl %edx,96(%rdi)
3122
3123# qhasm: *(uint32 *) (out + 100) = in9
3124# asm 1: movl <in9=int64#4d,100(<out=int64#1)
3125# asm 2: movl <in9=%ecx,100(<out=%rdi)
3126movl %ecx,100(%rdi)
3127
3128# qhasm: *(uint32 *) (out + 104) = in10
3129# asm 1: movl <in10=int64#5d,104(<out=int64#1)
3130# asm 2: movl <in10=%r8d,104(<out=%rdi)
3131movl %r8d,104(%rdi)
3132
3133# qhasm: *(uint32 *) (out + 108) = in11
3134# asm 1: movl <in11=int64#6d,108(<out=int64#1)
3135# asm 2: movl <in11=%r9d,108(<out=%rdi)
3136movl %r9d,108(%rdi)
3137
3138# qhasm: in8 = z8
3139# asm 1: movd <z8=int6464#16,>in8=int64#3
3140# asm 2: movd <z8=%xmm15,>in8=%rdx
3141movd %xmm15,%rdx
3142
3143# qhasm: in9 = z9
3144# asm 1: movd <z9=int6464#12,>in9=int64#4
3145# asm 2: movd <z9=%xmm11,>in9=%rcx
3146movd %xmm11,%rcx
3147
3148# qhasm: in10 = z10
3149# asm 1: movd <z10=int6464#2,>in10=int64#5
3150# asm 2: movd <z10=%xmm1,>in10=%r8
3151movd %xmm1,%r8
3152
3153# qhasm: in11 = z11
3154# asm 1: movd <z11=int6464#7,>in11=int64#6
3155# asm 2: movd <z11=%xmm6,>in11=%r9
3156movd %xmm6,%r9
3157
3158# qhasm: z8 <<<= 96
3159# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3160# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3161pshufd $0x39,%xmm15,%xmm15
3162
3163# qhasm: z9 <<<= 96
3164# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3165# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3166pshufd $0x39,%xmm11,%xmm11
3167
3168# qhasm: z10 <<<= 96
3169# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3170# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3171pshufd $0x39,%xmm1,%xmm1
3172
3173# qhasm: z11 <<<= 96
3174# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3175# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3176pshufd $0x39,%xmm6,%xmm6
3177
3178# qhasm: (uint32) in8 ^= *(uint32 *) (m + 160)
3179# asm 1: xorl 160(<m=int64#2),<in8=int64#3d
3180# asm 2: xorl 160(<m=%rsi),<in8=%edx
3181xorl 160(%rsi),%edx
3182
3183# qhasm: (uint32) in9 ^= *(uint32 *) (m + 164)
3184# asm 1: xorl 164(<m=int64#2),<in9=int64#4d
3185# asm 2: xorl 164(<m=%rsi),<in9=%ecx
3186xorl 164(%rsi),%ecx
3187
3188# qhasm: (uint32) in10 ^= *(uint32 *) (m + 168)
3189# asm 1: xorl 168(<m=int64#2),<in10=int64#5d
3190# asm 2: xorl 168(<m=%rsi),<in10=%r8d
3191xorl 168(%rsi),%r8d
3192
3193# qhasm: (uint32) in11 ^= *(uint32 *) (m + 172)
3194# asm 1: xorl 172(<m=int64#2),<in11=int64#6d
3195# asm 2: xorl 172(<m=%rsi),<in11=%r9d
3196xorl 172(%rsi),%r9d
3197
3198# qhasm: *(uint32 *) (out + 160) = in8
3199# asm 1: movl <in8=int64#3d,160(<out=int64#1)
3200# asm 2: movl <in8=%edx,160(<out=%rdi)
3201movl %edx,160(%rdi)
3202
3203# qhasm: *(uint32 *) (out + 164) = in9
3204# asm 1: movl <in9=int64#4d,164(<out=int64#1)
3205# asm 2: movl <in9=%ecx,164(<out=%rdi)
3206movl %ecx,164(%rdi)
3207
3208# qhasm: *(uint32 *) (out + 168) = in10
3209# asm 1: movl <in10=int64#5d,168(<out=int64#1)
3210# asm 2: movl <in10=%r8d,168(<out=%rdi)
3211movl %r8d,168(%rdi)
3212
3213# qhasm: *(uint32 *) (out + 172) = in11
3214# asm 1: movl <in11=int64#6d,172(<out=int64#1)
3215# asm 2: movl <in11=%r9d,172(<out=%rdi)
3216movl %r9d,172(%rdi)
3217
3218# qhasm: in8 = z8
3219# asm 1: movd <z8=int6464#16,>in8=int64#3
3220# asm 2: movd <z8=%xmm15,>in8=%rdx
3221movd %xmm15,%rdx
3222
3223# qhasm: in9 = z9
3224# asm 1: movd <z9=int6464#12,>in9=int64#4
3225# asm 2: movd <z9=%xmm11,>in9=%rcx
3226movd %xmm11,%rcx
3227
3228# qhasm: in10 = z10
3229# asm 1: movd <z10=int6464#2,>in10=int64#5
3230# asm 2: movd <z10=%xmm1,>in10=%r8
3231movd %xmm1,%r8
3232
3233# qhasm: in11 = z11
3234# asm 1: movd <z11=int6464#7,>in11=int64#6
3235# asm 2: movd <z11=%xmm6,>in11=%r9
3236movd %xmm6,%r9
3237
3238# qhasm: (uint32) in8 ^= *(uint32 *) (m + 224)
3239# asm 1: xorl 224(<m=int64#2),<in8=int64#3d
3240# asm 2: xorl 224(<m=%rsi),<in8=%edx
3241xorl 224(%rsi),%edx
3242
3243# qhasm: (uint32) in9 ^= *(uint32 *) (m + 228)
3244# asm 1: xorl 228(<m=int64#2),<in9=int64#4d
3245# asm 2: xorl 228(<m=%rsi),<in9=%ecx
3246xorl 228(%rsi),%ecx
3247
3248# qhasm: (uint32) in10 ^= *(uint32 *) (m + 232)
3249# asm 1: xorl 232(<m=int64#2),<in10=int64#5d
3250# asm 2: xorl 232(<m=%rsi),<in10=%r8d
3251xorl 232(%rsi),%r8d
3252
3253# qhasm: (uint32) in11 ^= *(uint32 *) (m + 236)
3254# asm 1: xorl 236(<m=int64#2),<in11=int64#6d
3255# asm 2: xorl 236(<m=%rsi),<in11=%r9d
3256xorl 236(%rsi),%r9d
3257
3258# qhasm: *(uint32 *) (out + 224) = in8
3259# asm 1: movl <in8=int64#3d,224(<out=int64#1)
3260# asm 2: movl <in8=%edx,224(<out=%rdi)
3261movl %edx,224(%rdi)
3262
3263# qhasm: *(uint32 *) (out + 228) = in9
3264# asm 1: movl <in9=int64#4d,228(<out=int64#1)
3265# asm 2: movl <in9=%ecx,228(<out=%rdi)
3266movl %ecx,228(%rdi)
3267
3268# qhasm: *(uint32 *) (out + 232) = in10
3269# asm 1: movl <in10=int64#5d,232(<out=int64#1)
3270# asm 2: movl <in10=%r8d,232(<out=%rdi)
3271movl %r8d,232(%rdi)
3272
3273# qhasm: *(uint32 *) (out + 236) = in11
3274# asm 1: movl <in11=int64#6d,236(<out=int64#1)
3275# asm 2: movl <in11=%r9d,236(<out=%rdi)
3276movl %r9d,236(%rdi)
3277
3278# qhasm: uint32323232 z12 += orig12
3279# asm 1: paddd <orig12=stack128#11,<z12=int6464#14
3280# asm 2: paddd <orig12=160(%rsp),<z12=%xmm13
3281paddd 160(%rsp),%xmm13
3282
3283# qhasm: uint32323232 z13 += orig13
3284# asm 1: paddd <orig13=stack128#14,<z13=int6464#10
3285# asm 2: paddd <orig13=208(%rsp),<z13=%xmm9
3286paddd 208(%rsp),%xmm9
3287
3288# qhasm: uint32323232 z14 += orig14
3289# asm 1: paddd <orig14=stack128#17,<z14=int6464#4
3290# asm 2: paddd <orig14=256(%rsp),<z14=%xmm3
3291paddd 256(%rsp),%xmm3
3292
3293# qhasm: uint32323232 z15 += orig15
3294# asm 1: paddd <orig15=stack128#7,<z15=int6464#3
3295# asm 2: paddd <orig15=96(%rsp),<z15=%xmm2
3296paddd 96(%rsp),%xmm2
3297
3298# qhasm: in12 = z12
3299# asm 1: movd <z12=int6464#14,>in12=int64#3
3300# asm 2: movd <z12=%xmm13,>in12=%rdx
3301movd %xmm13,%rdx
3302
3303# qhasm: in13 = z13
3304# asm 1: movd <z13=int6464#10,>in13=int64#4
3305# asm 2: movd <z13=%xmm9,>in13=%rcx
3306movd %xmm9,%rcx
3307
3308# qhasm: in14 = z14
3309# asm 1: movd <z14=int6464#4,>in14=int64#5
3310# asm 2: movd <z14=%xmm3,>in14=%r8
3311movd %xmm3,%r8
3312
3313# qhasm: in15 = z15
3314# asm 1: movd <z15=int6464#3,>in15=int64#6
3315# asm 2: movd <z15=%xmm2,>in15=%r9
3316movd %xmm2,%r9
3317
3318# qhasm: z12 <<<= 96
3319# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3320# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3321pshufd $0x39,%xmm13,%xmm13
3322
3323# qhasm: z13 <<<= 96
3324# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3325# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3326pshufd $0x39,%xmm9,%xmm9
3327
3328# qhasm: z14 <<<= 96
3329# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3330# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3331pshufd $0x39,%xmm3,%xmm3
3332
3333# qhasm: z15 <<<= 96
3334# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3335# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3336pshufd $0x39,%xmm2,%xmm2
3337
3338# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
3339# asm 1: xorl 48(<m=int64#2),<in12=int64#3d
3340# asm 2: xorl 48(<m=%rsi),<in12=%edx
3341xorl 48(%rsi),%edx
3342
3343# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
3344# asm 1: xorl 52(<m=int64#2),<in13=int64#4d
3345# asm 2: xorl 52(<m=%rsi),<in13=%ecx
3346xorl 52(%rsi),%ecx
3347
3348# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
3349# asm 1: xorl 56(<m=int64#2),<in14=int64#5d
3350# asm 2: xorl 56(<m=%rsi),<in14=%r8d
3351xorl 56(%rsi),%r8d
3352
3353# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
3354# asm 1: xorl 60(<m=int64#2),<in15=int64#6d
3355# asm 2: xorl 60(<m=%rsi),<in15=%r9d
3356xorl 60(%rsi),%r9d
3357
3358# qhasm: *(uint32 *) (out + 48) = in12
3359# asm 1: movl <in12=int64#3d,48(<out=int64#1)
3360# asm 2: movl <in12=%edx,48(<out=%rdi)
3361movl %edx,48(%rdi)
3362
3363# qhasm: *(uint32 *) (out + 52) = in13
3364# asm 1: movl <in13=int64#4d,52(<out=int64#1)
3365# asm 2: movl <in13=%ecx,52(<out=%rdi)
3366movl %ecx,52(%rdi)
3367
3368# qhasm: *(uint32 *) (out + 56) = in14
3369# asm 1: movl <in14=int64#5d,56(<out=int64#1)
3370# asm 2: movl <in14=%r8d,56(<out=%rdi)
3371movl %r8d,56(%rdi)
3372
3373# qhasm: *(uint32 *) (out + 60) = in15
3374# asm 1: movl <in15=int64#6d,60(<out=int64#1)
3375# asm 2: movl <in15=%r9d,60(<out=%rdi)
3376movl %r9d,60(%rdi)
3377
3378# qhasm: in12 = z12
3379# asm 1: movd <z12=int6464#14,>in12=int64#3
3380# asm 2: movd <z12=%xmm13,>in12=%rdx
3381movd %xmm13,%rdx
3382
3383# qhasm: in13 = z13
3384# asm 1: movd <z13=int6464#10,>in13=int64#4
3385# asm 2: movd <z13=%xmm9,>in13=%rcx
3386movd %xmm9,%rcx
3387
3388# qhasm: in14 = z14
3389# asm 1: movd <z14=int6464#4,>in14=int64#5
3390# asm 2: movd <z14=%xmm3,>in14=%r8
3391movd %xmm3,%r8
3392
3393# qhasm: in15 = z15
3394# asm 1: movd <z15=int6464#3,>in15=int64#6
3395# asm 2: movd <z15=%xmm2,>in15=%r9
3396movd %xmm2,%r9
3397
3398# qhasm: z12 <<<= 96
3399# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3400# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3401pshufd $0x39,%xmm13,%xmm13
3402
3403# qhasm: z13 <<<= 96
3404# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3405# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3406pshufd $0x39,%xmm9,%xmm9
3407
3408# qhasm: z14 <<<= 96
3409# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3410# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3411pshufd $0x39,%xmm3,%xmm3
3412
3413# qhasm: z15 <<<= 96
3414# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3415# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3416pshufd $0x39,%xmm2,%xmm2
3417
3418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 112)
3419# asm 1: xorl 112(<m=int64#2),<in12=int64#3d
3420# asm 2: xorl 112(<m=%rsi),<in12=%edx
3421xorl 112(%rsi),%edx
3422
3423# qhasm: (uint32) in13 ^= *(uint32 *) (m + 116)
3424# asm 1: xorl 116(<m=int64#2),<in13=int64#4d
3425# asm 2: xorl 116(<m=%rsi),<in13=%ecx
3426xorl 116(%rsi),%ecx
3427
3428# qhasm: (uint32) in14 ^= *(uint32 *) (m + 120)
3429# asm 1: xorl 120(<m=int64#2),<in14=int64#5d
3430# asm 2: xorl 120(<m=%rsi),<in14=%r8d
3431xorl 120(%rsi),%r8d
3432
3433# qhasm: (uint32) in15 ^= *(uint32 *) (m + 124)
3434# asm 1: xorl 124(<m=int64#2),<in15=int64#6d
3435# asm 2: xorl 124(<m=%rsi),<in15=%r9d
3436xorl 124(%rsi),%r9d
3437
3438# qhasm: *(uint32 *) (out + 112) = in12
3439# asm 1: movl <in12=int64#3d,112(<out=int64#1)
3440# asm 2: movl <in12=%edx,112(<out=%rdi)
3441movl %edx,112(%rdi)
3442
3443# qhasm: *(uint32 *) (out + 116) = in13
3444# asm 1: movl <in13=int64#4d,116(<out=int64#1)
3445# asm 2: movl <in13=%ecx,116(<out=%rdi)
3446movl %ecx,116(%rdi)
3447
3448# qhasm: *(uint32 *) (out + 120) = in14
3449# asm 1: movl <in14=int64#5d,120(<out=int64#1)
3450# asm 2: movl <in14=%r8d,120(<out=%rdi)
3451movl %r8d,120(%rdi)
3452
3453# qhasm: *(uint32 *) (out + 124) = in15
3454# asm 1: movl <in15=int64#6d,124(<out=int64#1)
3455# asm 2: movl <in15=%r9d,124(<out=%rdi)
3456movl %r9d,124(%rdi)
3457
3458# qhasm: in12 = z12
3459# asm 1: movd <z12=int6464#14,>in12=int64#3
3460# asm 2: movd <z12=%xmm13,>in12=%rdx
3461movd %xmm13,%rdx
3462
3463# qhasm: in13 = z13
3464# asm 1: movd <z13=int6464#10,>in13=int64#4
3465# asm 2: movd <z13=%xmm9,>in13=%rcx
3466movd %xmm9,%rcx
3467
3468# qhasm: in14 = z14
3469# asm 1: movd <z14=int6464#4,>in14=int64#5
3470# asm 2: movd <z14=%xmm3,>in14=%r8
3471movd %xmm3,%r8
3472
3473# qhasm: in15 = z15
3474# asm 1: movd <z15=int6464#3,>in15=int64#6
3475# asm 2: movd <z15=%xmm2,>in15=%r9
3476movd %xmm2,%r9
3477
3478# qhasm: z12 <<<= 96
3479# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3480# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3481pshufd $0x39,%xmm13,%xmm13
3482
3483# qhasm: z13 <<<= 96
3484# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3485# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3486pshufd $0x39,%xmm9,%xmm9
3487
3488# qhasm: z14 <<<= 96
3489# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3490# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3491pshufd $0x39,%xmm3,%xmm3
3492
3493# qhasm: z15 <<<= 96
3494# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3495# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3496pshufd $0x39,%xmm2,%xmm2
3497
3498# qhasm: (uint32) in12 ^= *(uint32 *) (m + 176)
3499# asm 1: xorl 176(<m=int64#2),<in12=int64#3d
3500# asm 2: xorl 176(<m=%rsi),<in12=%edx
3501xorl 176(%rsi),%edx
3502
3503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 180)
3504# asm 1: xorl 180(<m=int64#2),<in13=int64#4d
3505# asm 2: xorl 180(<m=%rsi),<in13=%ecx
3506xorl 180(%rsi),%ecx
3507
3508# qhasm: (uint32) in14 ^= *(uint32 *) (m + 184)
3509# asm 1: xorl 184(<m=int64#2),<in14=int64#5d
3510# asm 2: xorl 184(<m=%rsi),<in14=%r8d
3511xorl 184(%rsi),%r8d
3512
3513# qhasm: (uint32) in15 ^= *(uint32 *) (m + 188)
3514# asm 1: xorl 188(<m=int64#2),<in15=int64#6d
3515# asm 2: xorl 188(<m=%rsi),<in15=%r9d
3516xorl 188(%rsi),%r9d
3517
3518# qhasm: *(uint32 *) (out + 176) = in12
3519# asm 1: movl <in12=int64#3d,176(<out=int64#1)
3520# asm 2: movl <in12=%edx,176(<out=%rdi)
3521movl %edx,176(%rdi)
3522
3523# qhasm: *(uint32 *) (out + 180) = in13
3524# asm 1: movl <in13=int64#4d,180(<out=int64#1)
3525# asm 2: movl <in13=%ecx,180(<out=%rdi)
3526movl %ecx,180(%rdi)
3527
3528# qhasm: *(uint32 *) (out + 184) = in14
3529# asm 1: movl <in14=int64#5d,184(<out=int64#1)
3530# asm 2: movl <in14=%r8d,184(<out=%rdi)
3531movl %r8d,184(%rdi)
3532
3533# qhasm: *(uint32 *) (out + 188) = in15
3534# asm 1: movl <in15=int64#6d,188(<out=int64#1)
3535# asm 2: movl <in15=%r9d,188(<out=%rdi)
3536movl %r9d,188(%rdi)
3537
3538# qhasm: in12 = z12
3539# asm 1: movd <z12=int6464#14,>in12=int64#3
3540# asm 2: movd <z12=%xmm13,>in12=%rdx
3541movd %xmm13,%rdx
3542
3543# qhasm: in13 = z13
3544# asm 1: movd <z13=int6464#10,>in13=int64#4
3545# asm 2: movd <z13=%xmm9,>in13=%rcx
3546movd %xmm9,%rcx
3547
3548# qhasm: in14 = z14
3549# asm 1: movd <z14=int6464#4,>in14=int64#5
3550# asm 2: movd <z14=%xmm3,>in14=%r8
3551movd %xmm3,%r8
3552
3553# qhasm: in15 = z15
3554# asm 1: movd <z15=int6464#3,>in15=int64#6
3555# asm 2: movd <z15=%xmm2,>in15=%r9
3556movd %xmm2,%r9
3557
3558# qhasm: (uint32) in12 ^= *(uint32 *) (m + 240)
3559# asm 1: xorl 240(<m=int64#2),<in12=int64#3d
3560# asm 2: xorl 240(<m=%rsi),<in12=%edx
3561xorl 240(%rsi),%edx
3562
3563# qhasm: (uint32) in13 ^= *(uint32 *) (m + 244)
3564# asm 1: xorl 244(<m=int64#2),<in13=int64#4d
3565# asm 2: xorl 244(<m=%rsi),<in13=%ecx
3566xorl 244(%rsi),%ecx
3567
3568# qhasm: (uint32) in14 ^= *(uint32 *) (m + 248)
3569# asm 1: xorl 248(<m=int64#2),<in14=int64#5d
3570# asm 2: xorl 248(<m=%rsi),<in14=%r8d
3571xorl 248(%rsi),%r8d
3572
3573# qhasm: (uint32) in15 ^= *(uint32 *) (m + 252)
3574# asm 1: xorl 252(<m=int64#2),<in15=int64#6d
3575# asm 2: xorl 252(<m=%rsi),<in15=%r9d
3576xorl 252(%rsi),%r9d
3577
3578# qhasm: *(uint32 *) (out + 240) = in12
3579# asm 1: movl <in12=int64#3d,240(<out=int64#1)
3580# asm 2: movl <in12=%edx,240(<out=%rdi)
3581movl %edx,240(%rdi)
3582
3583# qhasm: *(uint32 *) (out + 244) = in13
3584# asm 1: movl <in13=int64#4d,244(<out=int64#1)
3585# asm 2: movl <in13=%ecx,244(<out=%rdi)
3586movl %ecx,244(%rdi)
3587
3588# qhasm: *(uint32 *) (out + 248) = in14
3589# asm 1: movl <in14=int64#5d,248(<out=int64#1)
3590# asm 2: movl <in14=%r8d,248(<out=%rdi)
3591movl %r8d,248(%rdi)
3592
3593# qhasm: *(uint32 *) (out + 252) = in15
3594# asm 1: movl <in15=int64#6d,252(<out=int64#1)
3595# asm 2: movl <in15=%r9d,252(<out=%rdi)
3596movl %r9d,252(%rdi)
3597
3598# qhasm: bytes = bytes_backup
3599# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
3600# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
3601movq 408(%rsp),%r9
3602
3603# qhasm: bytes -= 256
3604# asm 1: sub $256,<bytes=int64#6
3605# asm 2: sub $256,<bytes=%r9
3606sub $256,%r9
3607
3608# qhasm: m += 256
3609# asm 1: add $256,<m=int64#2
3610# asm 2: add $256,<m=%rsi
3611add $256,%rsi
3612
3613# qhasm: out += 256
3614# asm 1: add $256,<out=int64#1
3615# asm 2: add $256,<out=%rdi
3616add $256,%rdi
3617
3618# qhasm: unsigned<? bytes - 256
3619# asm 1: cmp $256,<bytes=int64#6
3620# asm 2: cmp $256,<bytes=%r9
3621cmp $256,%r9
3622# comment:fp stack unchanged by jump
3623
3624# qhasm: goto bytesatleast256 if !unsigned<
3625jae ._bytesatleast256
3626
3627# qhasm: unsigned>? bytes - 0
3628# asm 1: cmp $0,<bytes=int64#6
3629# asm 2: cmp $0,<bytes=%r9
3630cmp $0,%r9
3631# comment:fp stack unchanged by jump
3632
3633# qhasm: goto done if !unsigned>
3634jbe ._done
3635# comment:fp stack unchanged by fallthrough
3636
3637# qhasm: bytesbetween1and255:
3638._bytesbetween1and255:
3639
3640# qhasm: unsigned<? bytes - 64
3641# asm 1: cmp $64,<bytes=int64#6
3642# asm 2: cmp $64,<bytes=%r9
3643cmp $64,%r9
3644# comment:fp stack unchanged by jump
3645
3646# qhasm: goto nocopy if !unsigned<
3647jae ._nocopy
3648
3649# qhasm: ctarget = out
3650# asm 1: mov <out=int64#1,>ctarget=int64#3
3651# asm 2: mov <out=%rdi,>ctarget=%rdx
3652mov %rdi,%rdx
3653
3654# qhasm: out = &tmp
3655# asm 1: leaq <tmp=stack512#1,>out=int64#1
3656# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3657leaq 416(%rsp),%rdi
3658
3659# qhasm: i = bytes
3660# asm 1: mov <bytes=int64#6,>i=int64#4
3661# asm 2: mov <bytes=%r9,>i=%rcx
3662mov %r9,%rcx
3663
3664# qhasm: while (i) { *out++ = *m++; --i }
3665rep movsb
3666
3667# qhasm: out = &tmp
3668# asm 1: leaq <tmp=stack512#1,>out=int64#1
3669# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3670leaq 416(%rsp),%rdi
3671
3672# qhasm: m = &tmp
3673# asm 1: leaq <tmp=stack512#1,>m=int64#2
3674# asm 2: leaq <tmp=416(%rsp),>m=%rsi
3675leaq 416(%rsp),%rsi
3676# comment:fp stack unchanged by fallthrough
3677
3678# qhasm: nocopy:
3679._nocopy:
3680
3681# qhasm: bytes_backup = bytes
3682# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
3683# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
3684movq %r9,408(%rsp)
3685
3686# qhasm: diag0 = x0
3687# asm 1: movdqa <x0=stack128#4,>diag0=int6464#1
3688# asm 2: movdqa <x0=48(%rsp),>diag0=%xmm0
3689movdqa 48(%rsp),%xmm0
3690
3691# qhasm: diag1 = x1
3692# asm 1: movdqa <x1=stack128#1,>diag1=int6464#2
3693# asm 2: movdqa <x1=0(%rsp),>diag1=%xmm1
3694movdqa 0(%rsp),%xmm1
3695
3696# qhasm: diag2 = x2
3697# asm 1: movdqa <x2=stack128#2,>diag2=int6464#3
3698# asm 2: movdqa <x2=16(%rsp),>diag2=%xmm2
3699movdqa 16(%rsp),%xmm2
3700
3701# qhasm: diag3 = x3
3702# asm 1: movdqa <x3=stack128#3,>diag3=int6464#4
3703# asm 2: movdqa <x3=32(%rsp),>diag3=%xmm3
3704movdqa 32(%rsp),%xmm3
3705
3706# qhasm: a0 = diag1
3707# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3708# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3709movdqa %xmm1,%xmm4
3710
3711# qhasm: i = 8
3712# asm 1: mov $8,>i=int64#4
3713# asm 2: mov $8,>i=%rcx
3714mov $8,%rcx
3715
3716# qhasm: mainloop2:
3717._mainloop2:
3718
3719# qhasm: uint32323232 a0 += diag0
3720# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
3721# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
3722paddd %xmm0,%xmm4
3723
3724# qhasm: a1 = diag0
3725# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
3726# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
3727movdqa %xmm0,%xmm5
3728
3729# qhasm: b0 = a0
3730# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
3731# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
3732movdqa %xmm4,%xmm6
3733
3734# qhasm: uint32323232 a0 <<= 7
3735# asm 1: pslld $7,<a0=int6464#5
3736# asm 2: pslld $7,<a0=%xmm4
3737pslld $7,%xmm4
3738
3739# qhasm: uint32323232 b0 >>= 25
3740# asm 1: psrld $25,<b0=int6464#7
3741# asm 2: psrld $25,<b0=%xmm6
3742psrld $25,%xmm6
3743
3744# qhasm: diag3 ^= a0
3745# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
3746# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
3747pxor %xmm4,%xmm3
3748
3749# qhasm: diag3 ^= b0
3750# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
3751# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
3752pxor %xmm6,%xmm3
3753
3754# qhasm: uint32323232 a1 += diag3
3755# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
3756# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
3757paddd %xmm3,%xmm5
3758
3759# qhasm: a2 = diag3
3760# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
3761# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
3762movdqa %xmm3,%xmm4
3763
3764# qhasm: b1 = a1
3765# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
3766# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
3767movdqa %xmm5,%xmm6
3768
3769# qhasm: uint32323232 a1 <<= 9
3770# asm 1: pslld $9,<a1=int6464#6
3771# asm 2: pslld $9,<a1=%xmm5
3772pslld $9,%xmm5
3773
3774# qhasm: uint32323232 b1 >>= 23
3775# asm 1: psrld $23,<b1=int6464#7
3776# asm 2: psrld $23,<b1=%xmm6
3777psrld $23,%xmm6
3778
3779# qhasm: diag2 ^= a1
3780# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
3781# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
3782pxor %xmm5,%xmm2
3783
3784# qhasm: diag3 <<<= 32
3785# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
3786# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
3787pshufd $0x93,%xmm3,%xmm3
3788
3789# qhasm: diag2 ^= b1
3790# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
3791# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
3792pxor %xmm6,%xmm2
3793
3794# qhasm: uint32323232 a2 += diag2
3795# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
3796# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
3797paddd %xmm2,%xmm4
3798
3799# qhasm: a3 = diag2
3800# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
3801# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
3802movdqa %xmm2,%xmm5
3803
3804# qhasm: b2 = a2
3805# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
3806# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
3807movdqa %xmm4,%xmm6
3808
3809# qhasm: uint32323232 a2 <<= 13
3810# asm 1: pslld $13,<a2=int6464#5
3811# asm 2: pslld $13,<a2=%xmm4
3812pslld $13,%xmm4
3813
3814# qhasm: uint32323232 b2 >>= 19
3815# asm 1: psrld $19,<b2=int6464#7
3816# asm 2: psrld $19,<b2=%xmm6
3817psrld $19,%xmm6
3818
3819# qhasm: diag1 ^= a2
3820# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
3821# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
3822pxor %xmm4,%xmm1
3823
3824# qhasm: diag2 <<<= 64
3825# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3826# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3827pshufd $0x4e,%xmm2,%xmm2
3828
3829# qhasm: diag1 ^= b2
3830# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
3831# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
3832pxor %xmm6,%xmm1
3833
3834# qhasm: uint32323232 a3 += diag1
3835# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
3836# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
3837paddd %xmm1,%xmm5
3838
3839# qhasm: a4 = diag3
3840# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
3841# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
3842movdqa %xmm3,%xmm4
3843
3844# qhasm: b3 = a3
3845# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
3846# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
3847movdqa %xmm5,%xmm6
3848
3849# qhasm: uint32323232 a3 <<= 18
3850# asm 1: pslld $18,<a3=int6464#6
3851# asm 2: pslld $18,<a3=%xmm5
3852pslld $18,%xmm5
3853
3854# qhasm: uint32323232 b3 >>= 14
3855# asm 1: psrld $14,<b3=int6464#7
3856# asm 2: psrld $14,<b3=%xmm6
3857psrld $14,%xmm6
3858
3859# qhasm: diag0 ^= a3
3860# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
3861# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
3862pxor %xmm5,%xmm0
3863
3864# qhasm: diag1 <<<= 96
3865# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
3866# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
3867pshufd $0x39,%xmm1,%xmm1
3868
3869# qhasm: diag0 ^= b3
3870# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
3871# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
3872pxor %xmm6,%xmm0
3873
3874# qhasm: uint32323232 a4 += diag0
3875# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
3876# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
3877paddd %xmm0,%xmm4
3878
3879# qhasm: a5 = diag0
3880# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
3881# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
3882movdqa %xmm0,%xmm5
3883
3884# qhasm: b4 = a4
3885# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
3886# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
3887movdqa %xmm4,%xmm6
3888
3889# qhasm: uint32323232 a4 <<= 7
3890# asm 1: pslld $7,<a4=int6464#5
3891# asm 2: pslld $7,<a4=%xmm4
3892pslld $7,%xmm4
3893
3894# qhasm: uint32323232 b4 >>= 25
3895# asm 1: psrld $25,<b4=int6464#7
3896# asm 2: psrld $25,<b4=%xmm6
3897psrld $25,%xmm6
3898
3899# qhasm: diag1 ^= a4
3900# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
3901# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
3902pxor %xmm4,%xmm1
3903
3904# qhasm: diag1 ^= b4
3905# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
3906# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
3907pxor %xmm6,%xmm1
3908
3909# qhasm: uint32323232 a5 += diag1
3910# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
3911# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
3912paddd %xmm1,%xmm5
3913
3914# qhasm: a6 = diag1
3915# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
3916# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
3917movdqa %xmm1,%xmm4
3918
3919# qhasm: b5 = a5
3920# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
3921# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
3922movdqa %xmm5,%xmm6
3923
3924# qhasm: uint32323232 a5 <<= 9
3925# asm 1: pslld $9,<a5=int6464#6
3926# asm 2: pslld $9,<a5=%xmm5
3927pslld $9,%xmm5
3928
3929# qhasm: uint32323232 b5 >>= 23
3930# asm 1: psrld $23,<b5=int6464#7
3931# asm 2: psrld $23,<b5=%xmm6
3932psrld $23,%xmm6
3933
3934# qhasm: diag2 ^= a5
3935# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
3936# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
3937pxor %xmm5,%xmm2
3938
3939# qhasm: diag1 <<<= 32
3940# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
3941# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
3942pshufd $0x93,%xmm1,%xmm1
3943
3944# qhasm: diag2 ^= b5
3945# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
3946# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
3947pxor %xmm6,%xmm2
3948
3949# qhasm: uint32323232 a6 += diag2
3950# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
3951# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
3952paddd %xmm2,%xmm4
3953
3954# qhasm: a7 = diag2
3955# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
3956# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
3957movdqa %xmm2,%xmm5
3958
3959# qhasm: b6 = a6
3960# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
3961# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
3962movdqa %xmm4,%xmm6
3963
3964# qhasm: uint32323232 a6 <<= 13
3965# asm 1: pslld $13,<a6=int6464#5
3966# asm 2: pslld $13,<a6=%xmm4
3967pslld $13,%xmm4
3968
3969# qhasm: uint32323232 b6 >>= 19
3970# asm 1: psrld $19,<b6=int6464#7
3971# asm 2: psrld $19,<b6=%xmm6
3972psrld $19,%xmm6
3973
3974# qhasm: diag3 ^= a6
3975# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
3976# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
3977pxor %xmm4,%xmm3
3978
3979# qhasm: diag2 <<<= 64
3980# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3981# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3982pshufd $0x4e,%xmm2,%xmm2
3983
3984# qhasm: diag3 ^= b6
3985# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
3986# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
3987pxor %xmm6,%xmm3
3988
3989# qhasm: uint32323232 a7 += diag3
3990# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
3991# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
3992paddd %xmm3,%xmm5
3993
3994# qhasm: a0 = diag1
3995# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3996# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3997movdqa %xmm1,%xmm4
3998
3999# qhasm: b7 = a7
4000# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4001# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4002movdqa %xmm5,%xmm6
4003
4004# qhasm: uint32323232 a7 <<= 18
4005# asm 1: pslld $18,<a7=int6464#6
4006# asm 2: pslld $18,<a7=%xmm5
4007pslld $18,%xmm5
4008
4009# qhasm: uint32323232 b7 >>= 14
4010# asm 1: psrld $14,<b7=int6464#7
4011# asm 2: psrld $14,<b7=%xmm6
4012psrld $14,%xmm6
4013
4014# qhasm: diag0 ^= a7
4015# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4016# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4017pxor %xmm5,%xmm0
4018
4019# qhasm: diag3 <<<= 96
4020# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4021# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4022pshufd $0x39,%xmm3,%xmm3
4023
4024# qhasm: diag0 ^= b7
4025# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4026# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4027pxor %xmm6,%xmm0
4028
4029# qhasm: uint32323232 a0 += diag0
4030# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4031# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4032paddd %xmm0,%xmm4
4033
4034# qhasm: a1 = diag0
4035# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4036# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4037movdqa %xmm0,%xmm5
4038
4039# qhasm: b0 = a0
4040# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4041# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4042movdqa %xmm4,%xmm6
4043
4044# qhasm: uint32323232 a0 <<= 7
4045# asm 1: pslld $7,<a0=int6464#5
4046# asm 2: pslld $7,<a0=%xmm4
4047pslld $7,%xmm4
4048
4049# qhasm: uint32323232 b0 >>= 25
4050# asm 1: psrld $25,<b0=int6464#7
4051# asm 2: psrld $25,<b0=%xmm6
4052psrld $25,%xmm6
4053
4054# qhasm: diag3 ^= a0
4055# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4056# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4057pxor %xmm4,%xmm3
4058
4059# qhasm: diag3 ^= b0
4060# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4061# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4062pxor %xmm6,%xmm3
4063
4064# qhasm: uint32323232 a1 += diag3
4065# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4066# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4067paddd %xmm3,%xmm5
4068
4069# qhasm: a2 = diag3
4070# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4071# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4072movdqa %xmm3,%xmm4
4073
4074# qhasm: b1 = a1
4075# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4076# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4077movdqa %xmm5,%xmm6
4078
4079# qhasm: uint32323232 a1 <<= 9
4080# asm 1: pslld $9,<a1=int6464#6
4081# asm 2: pslld $9,<a1=%xmm5
4082pslld $9,%xmm5
4083
4084# qhasm: uint32323232 b1 >>= 23
4085# asm 1: psrld $23,<b1=int6464#7
4086# asm 2: psrld $23,<b1=%xmm6
4087psrld $23,%xmm6
4088
4089# qhasm: diag2 ^= a1
4090# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4091# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4092pxor %xmm5,%xmm2
4093
4094# qhasm: diag3 <<<= 32
4095# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4096# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4097pshufd $0x93,%xmm3,%xmm3
4098
4099# qhasm: diag2 ^= b1
4100# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4101# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4102pxor %xmm6,%xmm2
4103
4104# qhasm: uint32323232 a2 += diag2
4105# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4106# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4107paddd %xmm2,%xmm4
4108
4109# qhasm: a3 = diag2
4110# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4111# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4112movdqa %xmm2,%xmm5
4113
4114# qhasm: b2 = a2
4115# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4116# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4117movdqa %xmm4,%xmm6
4118
4119# qhasm: uint32323232 a2 <<= 13
4120# asm 1: pslld $13,<a2=int6464#5
4121# asm 2: pslld $13,<a2=%xmm4
4122pslld $13,%xmm4
4123
4124# qhasm: uint32323232 b2 >>= 19
4125# asm 1: psrld $19,<b2=int6464#7
4126# asm 2: psrld $19,<b2=%xmm6
4127psrld $19,%xmm6
4128
4129# qhasm: diag1 ^= a2
4130# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4131# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4132pxor %xmm4,%xmm1
4133
4134# qhasm: diag2 <<<= 64
4135# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4136# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4137pshufd $0x4e,%xmm2,%xmm2
4138
4139# qhasm: diag1 ^= b2
4140# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4141# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4142pxor %xmm6,%xmm1
4143
4144# qhasm: uint32323232 a3 += diag1
4145# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4146# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4147paddd %xmm1,%xmm5
4148
4149# qhasm: a4 = diag3
4150# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4151# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4152movdqa %xmm3,%xmm4
4153
4154# qhasm: b3 = a3
4155# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4156# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4157movdqa %xmm5,%xmm6
4158
4159# qhasm: uint32323232 a3 <<= 18
4160# asm 1: pslld $18,<a3=int6464#6
4161# asm 2: pslld $18,<a3=%xmm5
4162pslld $18,%xmm5
4163
4164# qhasm: uint32323232 b3 >>= 14
4165# asm 1: psrld $14,<b3=int6464#7
4166# asm 2: psrld $14,<b3=%xmm6
4167psrld $14,%xmm6
4168
4169# qhasm: diag0 ^= a3
4170# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4171# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4172pxor %xmm5,%xmm0
4173
4174# qhasm: diag1 <<<= 96
4175# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4176# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4177pshufd $0x39,%xmm1,%xmm1
4178
4179# qhasm: diag0 ^= b3
4180# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4181# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4182pxor %xmm6,%xmm0
4183
4184# qhasm: uint32323232 a4 += diag0
4185# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4186# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4187paddd %xmm0,%xmm4
4188
4189# qhasm: a5 = diag0
4190# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4191# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4192movdqa %xmm0,%xmm5
4193
4194# qhasm: b4 = a4
4195# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4196# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4197movdqa %xmm4,%xmm6
4198
4199# qhasm: uint32323232 a4 <<= 7
4200# asm 1: pslld $7,<a4=int6464#5
4201# asm 2: pslld $7,<a4=%xmm4
4202pslld $7,%xmm4
4203
4204# qhasm: uint32323232 b4 >>= 25
4205# asm 1: psrld $25,<b4=int6464#7
4206# asm 2: psrld $25,<b4=%xmm6
4207psrld $25,%xmm6
4208
4209# qhasm: diag1 ^= a4
4210# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4211# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4212pxor %xmm4,%xmm1
4213
4214# qhasm: diag1 ^= b4
4215# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4216# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4217pxor %xmm6,%xmm1
4218
4219# qhasm: uint32323232 a5 += diag1
4220# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4221# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4222paddd %xmm1,%xmm5
4223
4224# qhasm: a6 = diag1
4225# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4226# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4227movdqa %xmm1,%xmm4
4228
4229# qhasm: b5 = a5
4230# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4231# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4232movdqa %xmm5,%xmm6
4233
4234# qhasm: uint32323232 a5 <<= 9
4235# asm 1: pslld $9,<a5=int6464#6
4236# asm 2: pslld $9,<a5=%xmm5
4237pslld $9,%xmm5
4238
4239# qhasm: uint32323232 b5 >>= 23
4240# asm 1: psrld $23,<b5=int6464#7
4241# asm 2: psrld $23,<b5=%xmm6
4242psrld $23,%xmm6
4243
4244# qhasm: diag2 ^= a5
4245# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4246# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4247pxor %xmm5,%xmm2
4248
4249# qhasm: diag1 <<<= 32
4250# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4251# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4252pshufd $0x93,%xmm1,%xmm1
4253
4254# qhasm: diag2 ^= b5
4255# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4256# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4257pxor %xmm6,%xmm2
4258
4259# qhasm: uint32323232 a6 += diag2
4260# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4261# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4262paddd %xmm2,%xmm4
4263
4264# qhasm: a7 = diag2
4265# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4266# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4267movdqa %xmm2,%xmm5
4268
4269# qhasm: b6 = a6
4270# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4271# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4272movdqa %xmm4,%xmm6
4273
4274# qhasm: uint32323232 a6 <<= 13
4275# asm 1: pslld $13,<a6=int6464#5
4276# asm 2: pslld $13,<a6=%xmm4
4277pslld $13,%xmm4
4278
4279# qhasm: uint32323232 b6 >>= 19
4280# asm 1: psrld $19,<b6=int6464#7
4281# asm 2: psrld $19,<b6=%xmm6
4282psrld $19,%xmm6
4283
4284# qhasm: diag3 ^= a6
4285# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4286# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4287pxor %xmm4,%xmm3
4288
4289# qhasm: diag2 <<<= 64
4290# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4291# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4292pshufd $0x4e,%xmm2,%xmm2
4293
4294# qhasm: diag3 ^= b6
4295# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4296# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4297pxor %xmm6,%xmm3
4298
4299# qhasm: unsigned>? i -= 4
4300# asm 1: sub $4,<i=int64#4
4301# asm 2: sub $4,<i=%rcx
4302sub $4,%rcx
4303
4304# qhasm: uint32323232 a7 += diag3
4305# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4306# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4307paddd %xmm3,%xmm5
4308
4309# qhasm: a0 = diag1
4310# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4311# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4312movdqa %xmm1,%xmm4
4313
4314# qhasm: b7 = a7
4315# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4316# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4317movdqa %xmm5,%xmm6
4318
4319# qhasm: uint32323232 a7 <<= 18
4320# asm 1: pslld $18,<a7=int6464#6
4321# asm 2: pslld $18,<a7=%xmm5
4322pslld $18,%xmm5
4323
4324# qhasm: b0 = 0
4325# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4326# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4327pxor %xmm7,%xmm7
4328
4329# qhasm: uint32323232 b7 >>= 14
4330# asm 1: psrld $14,<b7=int6464#7
4331# asm 2: psrld $14,<b7=%xmm6
4332psrld $14,%xmm6
4333
4334# qhasm: diag0 ^= a7
4335# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4336# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4337pxor %xmm5,%xmm0
4338
4339# qhasm: diag3 <<<= 96
4340# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4341# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4342pshufd $0x39,%xmm3,%xmm3
4343
4344# qhasm: diag0 ^= b7
4345# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4346# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4347pxor %xmm6,%xmm0
4348# comment:fp stack unchanged by jump
4349
4350# qhasm: goto mainloop2 if unsigned>
4351ja ._mainloop2
4352
4353# qhasm: uint32323232 diag0 += x0
4354# asm 1: paddd <x0=stack128#4,<diag0=int6464#1
4355# asm 2: paddd <x0=48(%rsp),<diag0=%xmm0
4356paddd 48(%rsp),%xmm0
4357
4358# qhasm: uint32323232 diag1 += x1
4359# asm 1: paddd <x1=stack128#1,<diag1=int6464#2
4360# asm 2: paddd <x1=0(%rsp),<diag1=%xmm1
4361paddd 0(%rsp),%xmm1
4362
4363# qhasm: uint32323232 diag2 += x2
4364# asm 1: paddd <x2=stack128#2,<diag2=int6464#3
4365# asm 2: paddd <x2=16(%rsp),<diag2=%xmm2
4366paddd 16(%rsp),%xmm2
4367
4368# qhasm: uint32323232 diag3 += x3
4369# asm 1: paddd <x3=stack128#3,<diag3=int6464#4
4370# asm 2: paddd <x3=32(%rsp),<diag3=%xmm3
4371paddd 32(%rsp),%xmm3
4372
4373# qhasm: in0 = diag0
4374# asm 1: movd <diag0=int6464#1,>in0=int64#4
4375# asm 2: movd <diag0=%xmm0,>in0=%rcx
4376movd %xmm0,%rcx
4377
4378# qhasm: in12 = diag1
4379# asm 1: movd <diag1=int6464#2,>in12=int64#5
4380# asm 2: movd <diag1=%xmm1,>in12=%r8
4381movd %xmm1,%r8
4382
4383# qhasm: in8 = diag2
4384# asm 1: movd <diag2=int6464#3,>in8=int64#6
4385# asm 2: movd <diag2=%xmm2,>in8=%r9
4386movd %xmm2,%r9
4387
4388# qhasm: in4 = diag3
4389# asm 1: movd <diag3=int6464#4,>in4=int64#7
4390# asm 2: movd <diag3=%xmm3,>in4=%rax
4391movd %xmm3,%rax
4392
4393# qhasm: diag0 <<<= 96
4394# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4395# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4396pshufd $0x39,%xmm0,%xmm0
4397
4398# qhasm: diag1 <<<= 96
4399# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4400# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4401pshufd $0x39,%xmm1,%xmm1
4402
4403# qhasm: diag2 <<<= 96
4404# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4405# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4406pshufd $0x39,%xmm2,%xmm2
4407
4408# qhasm: diag3 <<<= 96
4409# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4410# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4411pshufd $0x39,%xmm3,%xmm3
4412
4413# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
4414# asm 1: xorl 0(<m=int64#2),<in0=int64#4d
4415# asm 2: xorl 0(<m=%rsi),<in0=%ecx
4416xorl 0(%rsi),%ecx
4417
4418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
4419# asm 1: xorl 48(<m=int64#2),<in12=int64#5d
4420# asm 2: xorl 48(<m=%rsi),<in12=%r8d
4421xorl 48(%rsi),%r8d
4422
4423# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
4424# asm 1: xorl 32(<m=int64#2),<in8=int64#6d
4425# asm 2: xorl 32(<m=%rsi),<in8=%r9d
4426xorl 32(%rsi),%r9d
4427
4428# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
4429# asm 1: xorl 16(<m=int64#2),<in4=int64#7d
4430# asm 2: xorl 16(<m=%rsi),<in4=%eax
4431xorl 16(%rsi),%eax
4432
4433# qhasm: *(uint32 *) (out + 0) = in0
4434# asm 1: movl <in0=int64#4d,0(<out=int64#1)
4435# asm 2: movl <in0=%ecx,0(<out=%rdi)
4436movl %ecx,0(%rdi)
4437
4438# qhasm: *(uint32 *) (out + 48) = in12
4439# asm 1: movl <in12=int64#5d,48(<out=int64#1)
4440# asm 2: movl <in12=%r8d,48(<out=%rdi)
4441movl %r8d,48(%rdi)
4442
4443# qhasm: *(uint32 *) (out + 32) = in8
4444# asm 1: movl <in8=int64#6d,32(<out=int64#1)
4445# asm 2: movl <in8=%r9d,32(<out=%rdi)
4446movl %r9d,32(%rdi)
4447
4448# qhasm: *(uint32 *) (out + 16) = in4
4449# asm 1: movl <in4=int64#7d,16(<out=int64#1)
4450# asm 2: movl <in4=%eax,16(<out=%rdi)
4451movl %eax,16(%rdi)
4452
4453# qhasm: in5 = diag0
4454# asm 1: movd <diag0=int6464#1,>in5=int64#4
4455# asm 2: movd <diag0=%xmm0,>in5=%rcx
4456movd %xmm0,%rcx
4457
4458# qhasm: in1 = diag1
4459# asm 1: movd <diag1=int6464#2,>in1=int64#5
4460# asm 2: movd <diag1=%xmm1,>in1=%r8
4461movd %xmm1,%r8
4462
4463# qhasm: in13 = diag2
4464# asm 1: movd <diag2=int6464#3,>in13=int64#6
4465# asm 2: movd <diag2=%xmm2,>in13=%r9
4466movd %xmm2,%r9
4467
4468# qhasm: in9 = diag3
4469# asm 1: movd <diag3=int6464#4,>in9=int64#7
4470# asm 2: movd <diag3=%xmm3,>in9=%rax
4471movd %xmm3,%rax
4472
4473# qhasm: diag0 <<<= 96
4474# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4475# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4476pshufd $0x39,%xmm0,%xmm0
4477
4478# qhasm: diag1 <<<= 96
4479# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4480# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4481pshufd $0x39,%xmm1,%xmm1
4482
4483# qhasm: diag2 <<<= 96
4484# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4485# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4486pshufd $0x39,%xmm2,%xmm2
4487
4488# qhasm: diag3 <<<= 96
4489# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4490# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4491pshufd $0x39,%xmm3,%xmm3
4492
4493# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
4494# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
4495# asm 2: xorl 20(<m=%rsi),<in5=%ecx
4496xorl 20(%rsi),%ecx
4497
4498# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
4499# asm 1: xorl 4(<m=int64#2),<in1=int64#5d
4500# asm 2: xorl 4(<m=%rsi),<in1=%r8d
4501xorl 4(%rsi),%r8d
4502
4503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
4504# asm 1: xorl 52(<m=int64#2),<in13=int64#6d
4505# asm 2: xorl 52(<m=%rsi),<in13=%r9d
4506xorl 52(%rsi),%r9d
4507
4508# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
4509# asm 1: xorl 36(<m=int64#2),<in9=int64#7d
4510# asm 2: xorl 36(<m=%rsi),<in9=%eax
4511xorl 36(%rsi),%eax
4512
4513# qhasm: *(uint32 *) (out + 20) = in5
4514# asm 1: movl <in5=int64#4d,20(<out=int64#1)
4515# asm 2: movl <in5=%ecx,20(<out=%rdi)
4516movl %ecx,20(%rdi)
4517
4518# qhasm: *(uint32 *) (out + 4) = in1
4519# asm 1: movl <in1=int64#5d,4(<out=int64#1)
4520# asm 2: movl <in1=%r8d,4(<out=%rdi)
4521movl %r8d,4(%rdi)
4522
4523# qhasm: *(uint32 *) (out + 52) = in13
4524# asm 1: movl <in13=int64#6d,52(<out=int64#1)
4525# asm 2: movl <in13=%r9d,52(<out=%rdi)
4526movl %r9d,52(%rdi)
4527
4528# qhasm: *(uint32 *) (out + 36) = in9
4529# asm 1: movl <in9=int64#7d,36(<out=int64#1)
4530# asm 2: movl <in9=%eax,36(<out=%rdi)
4531movl %eax,36(%rdi)
4532
4533# qhasm: in10 = diag0
4534# asm 1: movd <diag0=int6464#1,>in10=int64#4
4535# asm 2: movd <diag0=%xmm0,>in10=%rcx
4536movd %xmm0,%rcx
4537
4538# qhasm: in6 = diag1
4539# asm 1: movd <diag1=int6464#2,>in6=int64#5
4540# asm 2: movd <diag1=%xmm1,>in6=%r8
4541movd %xmm1,%r8
4542
4543# qhasm: in2 = diag2
4544# asm 1: movd <diag2=int6464#3,>in2=int64#6
4545# asm 2: movd <diag2=%xmm2,>in2=%r9
4546movd %xmm2,%r9
4547
4548# qhasm: in14 = diag3
4549# asm 1: movd <diag3=int6464#4,>in14=int64#7
4550# asm 2: movd <diag3=%xmm3,>in14=%rax
4551movd %xmm3,%rax
4552
4553# qhasm: diag0 <<<= 96
4554# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4555# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4556pshufd $0x39,%xmm0,%xmm0
4557
4558# qhasm: diag1 <<<= 96
4559# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4560# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4561pshufd $0x39,%xmm1,%xmm1
4562
4563# qhasm: diag2 <<<= 96
4564# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4565# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4566pshufd $0x39,%xmm2,%xmm2
4567
4568# qhasm: diag3 <<<= 96
4569# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4570# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4571pshufd $0x39,%xmm3,%xmm3
4572
4573# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
4574# asm 1: xorl 40(<m=int64#2),<in10=int64#4d
4575# asm 2: xorl 40(<m=%rsi),<in10=%ecx
4576xorl 40(%rsi),%ecx
4577
4578# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
4579# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
4580# asm 2: xorl 24(<m=%rsi),<in6=%r8d
4581xorl 24(%rsi),%r8d
4582
4583# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
4584# asm 1: xorl 8(<m=int64#2),<in2=int64#6d
4585# asm 2: xorl 8(<m=%rsi),<in2=%r9d
4586xorl 8(%rsi),%r9d
4587
4588# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
4589# asm 1: xorl 56(<m=int64#2),<in14=int64#7d
4590# asm 2: xorl 56(<m=%rsi),<in14=%eax
4591xorl 56(%rsi),%eax
4592
4593# qhasm: *(uint32 *) (out + 40) = in10
4594# asm 1: movl <in10=int64#4d,40(<out=int64#1)
4595# asm 2: movl <in10=%ecx,40(<out=%rdi)
4596movl %ecx,40(%rdi)
4597
4598# qhasm: *(uint32 *) (out + 24) = in6
4599# asm 1: movl <in6=int64#5d,24(<out=int64#1)
4600# asm 2: movl <in6=%r8d,24(<out=%rdi)
4601movl %r8d,24(%rdi)
4602
4603# qhasm: *(uint32 *) (out + 8) = in2
4604# asm 1: movl <in2=int64#6d,8(<out=int64#1)
4605# asm 2: movl <in2=%r9d,8(<out=%rdi)
4606movl %r9d,8(%rdi)
4607
4608# qhasm: *(uint32 *) (out + 56) = in14
4609# asm 1: movl <in14=int64#7d,56(<out=int64#1)
4610# asm 2: movl <in14=%eax,56(<out=%rdi)
4611movl %eax,56(%rdi)
4612
4613# qhasm: in15 = diag0
4614# asm 1: movd <diag0=int6464#1,>in15=int64#4
4615# asm 2: movd <diag0=%xmm0,>in15=%rcx
4616movd %xmm0,%rcx
4617
4618# qhasm: in11 = diag1
4619# asm 1: movd <diag1=int6464#2,>in11=int64#5
4620# asm 2: movd <diag1=%xmm1,>in11=%r8
4621movd %xmm1,%r8
4622
4623# qhasm: in7 = diag2
4624# asm 1: movd <diag2=int6464#3,>in7=int64#6
4625# asm 2: movd <diag2=%xmm2,>in7=%r9
4626movd %xmm2,%r9
4627
4628# qhasm: in3 = diag3
4629# asm 1: movd <diag3=int6464#4,>in3=int64#7
4630# asm 2: movd <diag3=%xmm3,>in3=%rax
4631movd %xmm3,%rax
4632
4633# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
4634# asm 1: xorl 60(<m=int64#2),<in15=int64#4d
4635# asm 2: xorl 60(<m=%rsi),<in15=%ecx
4636xorl 60(%rsi),%ecx
4637
4638# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
4639# asm 1: xorl 44(<m=int64#2),<in11=int64#5d
4640# asm 2: xorl 44(<m=%rsi),<in11=%r8d
4641xorl 44(%rsi),%r8d
4642
4643# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
4644# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
4645# asm 2: xorl 28(<m=%rsi),<in7=%r9d
4646xorl 28(%rsi),%r9d
4647
4648# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
4649# asm 1: xorl 12(<m=int64#2),<in3=int64#7d
4650# asm 2: xorl 12(<m=%rsi),<in3=%eax
4651xorl 12(%rsi),%eax
4652
4653# qhasm: *(uint32 *) (out + 60) = in15
4654# asm 1: movl <in15=int64#4d,60(<out=int64#1)
4655# asm 2: movl <in15=%ecx,60(<out=%rdi)
4656movl %ecx,60(%rdi)
4657
4658# qhasm: *(uint32 *) (out + 44) = in11
4659# asm 1: movl <in11=int64#5d,44(<out=int64#1)
4660# asm 2: movl <in11=%r8d,44(<out=%rdi)
4661movl %r8d,44(%rdi)
4662
4663# qhasm: *(uint32 *) (out + 28) = in7
4664# asm 1: movl <in7=int64#6d,28(<out=int64#1)
4665# asm 2: movl <in7=%r9d,28(<out=%rdi)
4666movl %r9d,28(%rdi)
4667
4668# qhasm: *(uint32 *) (out + 12) = in3
4669# asm 1: movl <in3=int64#7d,12(<out=int64#1)
4670# asm 2: movl <in3=%eax,12(<out=%rdi)
4671movl %eax,12(%rdi)
4672
4673# qhasm: bytes = bytes_backup
4674# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
4675# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
4676movq 408(%rsp),%r9
4677
4678# qhasm: in8 = ((uint32 *)&x2)[0]
4679# asm 1: movl <x2=stack128#2,>in8=int64#4d
4680# asm 2: movl <x2=16(%rsp),>in8=%ecx
4681movl 16(%rsp),%ecx
4682
4683# qhasm: in9 = ((uint32 *)&x3)[1]
4684# asm 1: movl 4+<x3=stack128#3,>in9=int64#5d
4685# asm 2: movl 4+<x3=32(%rsp),>in9=%r8d
4686movl 4+32(%rsp),%r8d
4687
4688# qhasm: in8 += 1
4689# asm 1: add $1,<in8=int64#4
4690# asm 2: add $1,<in8=%rcx
4691add $1,%rcx
4692
4693# qhasm: in9 <<= 32
4694# asm 1: shl $32,<in9=int64#5
4695# asm 2: shl $32,<in9=%r8
4696shl $32,%r8
4697
4698# qhasm: in8 += in9
4699# asm 1: add <in9=int64#5,<in8=int64#4
4700# asm 2: add <in9=%r8,<in8=%rcx
4701add %r8,%rcx
4702
4703# qhasm: in9 = in8
4704# asm 1: mov <in8=int64#4,>in9=int64#5
4705# asm 2: mov <in8=%rcx,>in9=%r8
4706mov %rcx,%r8
4707
4708# qhasm: (uint64) in9 >>= 32
4709# asm 1: shr $32,<in9=int64#5
4710# asm 2: shr $32,<in9=%r8
4711shr $32,%r8
4712
4713# qhasm: ((uint32 *)&x2)[0] = in8
4714# asm 1: movl <in8=int64#4d,>x2=stack128#2
4715# asm 2: movl <in8=%ecx,>x2=16(%rsp)
4716movl %ecx,16(%rsp)
4717
4718# qhasm: ((uint32 *)&x3)[1] = in9
4719# asm 1: movl <in9=int64#5d,4+<x3=stack128#3
4720# asm 2: movl <in9=%r8d,4+<x3=32(%rsp)
4721movl %r8d,4+32(%rsp)
4722
4723# qhasm: unsigned>? unsigned<? bytes - 64
4724# asm 1: cmp $64,<bytes=int64#6
4725# asm 2: cmp $64,<bytes=%r9
4726cmp $64,%r9
4727# comment:fp stack unchanged by jump
4728
4729# qhasm: goto bytesatleast65 if unsigned>
4730ja ._bytesatleast65
4731# comment:fp stack unchanged by jump
4732
4733# qhasm: goto bytesatleast64 if !unsigned<
4734jae ._bytesatleast64
4735
4736# qhasm: m = out
4737# asm 1: mov <out=int64#1,>m=int64#2
4738# asm 2: mov <out=%rdi,>m=%rsi
4739mov %rdi,%rsi
4740
4741# qhasm: out = ctarget
4742# asm 1: mov <ctarget=int64#3,>out=int64#1
4743# asm 2: mov <ctarget=%rdx,>out=%rdi
4744mov %rdx,%rdi
4745
4746# qhasm: i = bytes
4747# asm 1: mov <bytes=int64#6,>i=int64#4
4748# asm 2: mov <bytes=%r9,>i=%rcx
4749mov %r9,%rcx
4750
4751# qhasm: while (i) { *out++ = *m++; --i }
4752rep movsb
4753# comment:fp stack unchanged by fallthrough
4754
4755# qhasm: bytesatleast64:
4756._bytesatleast64:
4757# comment:fp stack unchanged by fallthrough
4758
4759# qhasm: done:
4760._done:
4761
4762# qhasm: r11_caller = r11_stack
4763# asm 1: movq <r11_stack=stack64#1,>r11_caller=int64#9
4764# asm 2: movq <r11_stack=352(%rsp),>r11_caller=%r11
4765movq 352(%rsp),%r11
4766
4767# qhasm: r12_caller = r12_stack
4768# asm 1: movq <r12_stack=stack64#2,>r12_caller=int64#10
4769# asm 2: movq <r12_stack=360(%rsp),>r12_caller=%r12
4770movq 360(%rsp),%r12
4771
4772# qhasm: r13_caller = r13_stack
4773# asm 1: movq <r13_stack=stack64#3,>r13_caller=int64#11
4774# asm 2: movq <r13_stack=368(%rsp),>r13_caller=%r13
4775movq 368(%rsp),%r13
4776
4777# qhasm: r14_caller = r14_stack
4778# asm 1: movq <r14_stack=stack64#4,>r14_caller=int64#12
4779# asm 2: movq <r14_stack=376(%rsp),>r14_caller=%r14
4780movq 376(%rsp),%r14
4781
4782# qhasm: r15_caller = r15_stack
4783# asm 1: movq <r15_stack=stack64#5,>r15_caller=int64#13
4784# asm 2: movq <r15_stack=384(%rsp),>r15_caller=%r15
4785movq 384(%rsp),%r15
4786
4787# qhasm: rbx_caller = rbx_stack
4788# asm 1: movq <rbx_stack=stack64#6,>rbx_caller=int64#14
4789# asm 2: movq <rbx_stack=392(%rsp),>rbx_caller=%rbx
4790movq 392(%rsp),%rbx
4791
4792# qhasm: rbp_caller = rbp_stack
4793# asm 1: movq <rbp_stack=stack64#7,>rbp_caller=int64#15
4794# asm 2: movq <rbp_stack=400(%rsp),>rbp_caller=%rbp
4795movq 400(%rsp),%rbp
4796
4797# qhasm: leave
4798add %r11,%rsp
4799xor %rax,%rax
4800xor %rdx,%rdx
4801ret
4802
4803# qhasm: bytesatleast65:
4804._bytesatleast65:
4805
4806# qhasm: bytes -= 64
4807# asm 1: sub $64,<bytes=int64#6
4808# asm 2: sub $64,<bytes=%r9
4809sub $64,%r9
4810
4811# qhasm: out += 64
4812# asm 1: add $64,<out=int64#1
4813# asm 2: add $64,<out=%rdi
4814add $64,%rdi
4815
4816# qhasm: m += 64
4817# asm 1: add $64,<m=int64#2
4818# asm 2: add $64,<m=%rsi
4819add $64,%rsi
4820# comment:fp stack unchanged by jump
4821
4822# qhasm: goto bytesbetween1and255
4823jmp ._bytesbetween1and255
diff --git a/nacl/crypto_stream/salsa208/checksum b/nacl/crypto_stream/salsa208/checksum
new file mode 100644
index 00000000..c87364e6
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/checksum
@@ -0,0 +1 @@
05f32b0647417aaa446b0b3127318133cf9af32b771869eab267000bf02710cd
diff --git a/nacl/crypto_stream/salsa208/ref/api.h b/nacl/crypto_stream/salsa208/ref/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/ref/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa208/ref/implementors b/nacl/crypto_stream/salsa208/ref/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/ref/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa208/ref/stream.c b/nacl/crypto_stream/salsa208/ref/stream.c
new file mode 100644
index 00000000..cdcfbc0e
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/ref/stream.c
@@ -0,0 +1,49 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_salsa208.h"
8#include "crypto_stream.h"
9
10typedef unsigned int uint32;
11
12static const unsigned char sigma[16] = "expand 32-byte k";
13
14int crypto_stream(
15 unsigned char *c,unsigned long long clen,
16 const unsigned char *n,
17 const unsigned char *k
18)
19{
20 unsigned char in[16];
21 unsigned char block[64];
22 int i;
23 unsigned int u;
24
25 if (!clen) return 0;
26
27 for (i = 0;i < 8;++i) in[i] = n[i];
28 for (i = 8;i < 16;++i) in[i] = 0;
29
30 while (clen >= 64) {
31 crypto_core_salsa208(c,in,k,sigma);
32
33 u = 1;
34 for (i = 8;i < 16;++i) {
35 u += (unsigned int) in[i];
36 in[i] = u;
37 u >>= 8;
38 }
39
40 clen -= 64;
41 c += 64;
42 }
43
44 if (clen) {
45 crypto_core_salsa208(block,in,k,sigma);
46 for (i = 0;i < clen;++i) c[i] = block[i];
47 }
48 return 0;
49}
diff --git a/nacl/crypto_stream/salsa208/ref/xor.c b/nacl/crypto_stream/salsa208/ref/xor.c
new file mode 100644
index 00000000..c017ac42
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/ref/xor.c
@@ -0,0 +1,52 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_salsa208.h"
8#include "crypto_stream.h"
9
10typedef unsigned int uint32;
11
12static const unsigned char sigma[16] = "expand 32-byte k";
13
14int crypto_stream_xor(
15 unsigned char *c,
16 const unsigned char *m,unsigned long long mlen,
17 const unsigned char *n,
18 const unsigned char *k
19)
20{
21 unsigned char in[16];
22 unsigned char block[64];
23 int i;
24 unsigned int u;
25
26 if (!mlen) return 0;
27
28 for (i = 0;i < 8;++i) in[i] = n[i];
29 for (i = 8;i < 16;++i) in[i] = 0;
30
31 while (mlen >= 64) {
32 crypto_core_salsa208(block,in,k,sigma);
33 for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i];
34
35 u = 1;
36 for (i = 8;i < 16;++i) {
37 u += (unsigned int) in[i];
38 in[i] = u;
39 u >>= 8;
40 }
41
42 mlen -= 64;
43 c += 64;
44 m += 64;
45 }
46
47 if (mlen) {
48 crypto_core_salsa208(block,in,k,sigma);
49 for (i = 0;i < mlen;++i) c[i] = m[i] ^ block[i];
50 }
51 return 0;
52}
diff --git a/nacl/crypto_stream/salsa208/used b/nacl/crypto_stream/salsa208/used
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/used
diff --git a/nacl/crypto_stream/salsa208/x86_xmm5/api.h b/nacl/crypto_stream/salsa208/x86_xmm5/api.h
new file mode 100644
index 00000000..c2b18461
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/x86_xmm5/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 8
diff --git a/nacl/crypto_stream/salsa208/x86_xmm5/implementors b/nacl/crypto_stream/salsa208/x86_xmm5/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/x86_xmm5/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/salsa208/x86_xmm5/stream.s b/nacl/crypto_stream/salsa208/x86_xmm5/stream.s
new file mode 100644
index 00000000..065253a8
--- /dev/null
+++ b/nacl/crypto_stream/salsa208/x86_xmm5/stream.s
@@ -0,0 +1,5078 @@
1
2# qhasm: int32 a
3
4# qhasm: stack32 arg1
5
6# qhasm: stack32 arg2
7
8# qhasm: stack32 arg3
9
10# qhasm: stack32 arg4
11
12# qhasm: stack32 arg5
13
14# qhasm: stack32 arg6
15
16# qhasm: input arg1
17
18# qhasm: input arg2
19
20# qhasm: input arg3
21
22# qhasm: input arg4
23
24# qhasm: input arg5
25
26# qhasm: input arg6
27
28# qhasm: int32 eax
29
30# qhasm: int32 ebx
31
32# qhasm: int32 esi
33
34# qhasm: int32 edi
35
36# qhasm: int32 ebp
37
38# qhasm: caller eax
39
40# qhasm: caller ebx
41
42# qhasm: caller esi
43
44# qhasm: caller edi
45
46# qhasm: caller ebp
47
48# qhasm: int32 k
49
50# qhasm: int32 kbits
51
52# qhasm: int32 iv
53
54# qhasm: int32 i
55
56# qhasm: stack128 x0
57
58# qhasm: stack128 x1
59
60# qhasm: stack128 x2
61
62# qhasm: stack128 x3
63
64# qhasm: int32 m
65
66# qhasm: stack32 out_stack
67
68# qhasm: int32 out
69
70# qhasm: stack32 bytes_stack
71
72# qhasm: int32 bytes
73
74# qhasm: stack32 eax_stack
75
76# qhasm: stack32 ebx_stack
77
78# qhasm: stack32 esi_stack
79
80# qhasm: stack32 edi_stack
81
82# qhasm: stack32 ebp_stack
83
84# qhasm: int6464 diag0
85
86# qhasm: int6464 diag1
87
88# qhasm: int6464 diag2
89
90# qhasm: int6464 diag3
91
92# qhasm: int6464 a0
93
94# qhasm: int6464 a1
95
96# qhasm: int6464 a2
97
98# qhasm: int6464 a3
99
100# qhasm: int6464 a4
101
102# qhasm: int6464 a5
103
104# qhasm: int6464 a6
105
106# qhasm: int6464 a7
107
108# qhasm: int6464 b0
109
110# qhasm: int6464 b1
111
112# qhasm: int6464 b2
113
114# qhasm: int6464 b3
115
116# qhasm: int6464 b4
117
118# qhasm: int6464 b5
119
120# qhasm: int6464 b6
121
122# qhasm: int6464 b7
123
124# qhasm: int6464 z0
125
126# qhasm: int6464 z1
127
128# qhasm: int6464 z2
129
130# qhasm: int6464 z3
131
132# qhasm: int6464 z4
133
134# qhasm: int6464 z5
135
136# qhasm: int6464 z6
137
138# qhasm: int6464 z7
139
140# qhasm: int6464 z8
141
142# qhasm: int6464 z9
143
144# qhasm: int6464 z10
145
146# qhasm: int6464 z11
147
148# qhasm: int6464 z12
149
150# qhasm: int6464 z13
151
152# qhasm: int6464 z14
153
154# qhasm: int6464 z15
155
156# qhasm: stack128 z0_stack
157
158# qhasm: stack128 z1_stack
159
160# qhasm: stack128 z2_stack
161
162# qhasm: stack128 z3_stack
163
164# qhasm: stack128 z4_stack
165
166# qhasm: stack128 z5_stack
167
168# qhasm: stack128 z6_stack
169
170# qhasm: stack128 z7_stack
171
172# qhasm: stack128 z8_stack
173
174# qhasm: stack128 z9_stack
175
176# qhasm: stack128 z10_stack
177
178# qhasm: stack128 z11_stack
179
180# qhasm: stack128 z12_stack
181
182# qhasm: stack128 z13_stack
183
184# qhasm: stack128 z14_stack
185
186# qhasm: stack128 z15_stack
187
188# qhasm: stack128 orig0
189
190# qhasm: stack128 orig1
191
192# qhasm: stack128 orig2
193
194# qhasm: stack128 orig3
195
196# qhasm: stack128 orig4
197
198# qhasm: stack128 orig5
199
200# qhasm: stack128 orig6
201
202# qhasm: stack128 orig7
203
204# qhasm: stack128 orig8
205
206# qhasm: stack128 orig9
207
208# qhasm: stack128 orig10
209
210# qhasm: stack128 orig11
211
212# qhasm: stack128 orig12
213
214# qhasm: stack128 orig13
215
216# qhasm: stack128 orig14
217
218# qhasm: stack128 orig15
219
220# qhasm: int6464 p
221
222# qhasm: int6464 q
223
224# qhasm: int6464 r
225
226# qhasm: int6464 s
227
228# qhasm: int6464 t
229
230# qhasm: int6464 u
231
232# qhasm: int6464 v
233
234# qhasm: int6464 w
235
236# qhasm: int6464 mp
237
238# qhasm: int6464 mq
239
240# qhasm: int6464 mr
241
242# qhasm: int6464 ms
243
244# qhasm: int6464 mt
245
246# qhasm: int6464 mu
247
248# qhasm: int6464 mv
249
250# qhasm: int6464 mw
251
252# qhasm: int32 in0
253
254# qhasm: int32 in1
255
256# qhasm: int32 in2
257
258# qhasm: int32 in3
259
260# qhasm: int32 in4
261
262# qhasm: int32 in5
263
264# qhasm: int32 in6
265
266# qhasm: int32 in7
267
268# qhasm: int32 in8
269
270# qhasm: int32 in9
271
272# qhasm: int32 in10
273
274# qhasm: int32 in11
275
276# qhasm: int32 in12
277
278# qhasm: int32 in13
279
280# qhasm: int32 in14
281
282# qhasm: int32 in15
283
284# qhasm: stack512 tmp
285
286# qhasm: stack32 ctarget
287
288# qhasm: enter crypto_stream_salsa208_x86_xmm5
289.text
290.p2align 5
291.globl _crypto_stream_salsa208_x86_xmm5
292.globl crypto_stream_salsa208_x86_xmm5
293_crypto_stream_salsa208_x86_xmm5:
294crypto_stream_salsa208_x86_xmm5:
295mov %esp,%eax
296and $31,%eax
297add $704,%eax
298sub %eax,%esp
299
300# qhasm: eax_stack = eax
301# asm 1: movl <eax=int32#1,>eax_stack=stack32#1
302# asm 2: movl <eax=%eax,>eax_stack=0(%esp)
303movl %eax,0(%esp)
304
305# qhasm: ebx_stack = ebx
306# asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
307# asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
308movl %ebx,4(%esp)
309
310# qhasm: esi_stack = esi
311# asm 1: movl <esi=int32#5,>esi_stack=stack32#3
312# asm 2: movl <esi=%esi,>esi_stack=8(%esp)
313movl %esi,8(%esp)
314
315# qhasm: edi_stack = edi
316# asm 1: movl <edi=int32#6,>edi_stack=stack32#4
317# asm 2: movl <edi=%edi,>edi_stack=12(%esp)
318movl %edi,12(%esp)
319
320# qhasm: ebp_stack = ebp
321# asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
322# asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
323movl %ebp,16(%esp)
324
325# qhasm: bytes = arg2
326# asm 1: movl <arg2=stack32#-2,>bytes=int32#3
327# asm 2: movl <arg2=8(%esp,%eax),>bytes=%edx
328movl 8(%esp,%eax),%edx
329
330# qhasm: out = arg1
331# asm 1: movl <arg1=stack32#-1,>out=int32#6
332# asm 2: movl <arg1=4(%esp,%eax),>out=%edi
333movl 4(%esp,%eax),%edi
334
335# qhasm: m = out
336# asm 1: mov <out=int32#6,>m=int32#5
337# asm 2: mov <out=%edi,>m=%esi
338mov %edi,%esi
339
340# qhasm: iv = arg4
341# asm 1: movl <arg4=stack32#-4,>iv=int32#4
342# asm 2: movl <arg4=16(%esp,%eax),>iv=%ebx
343movl 16(%esp,%eax),%ebx
344
345# qhasm: k = arg5
346# asm 1: movl <arg5=stack32#-5,>k=int32#7
347# asm 2: movl <arg5=20(%esp,%eax),>k=%ebp
348movl 20(%esp,%eax),%ebp
349
350# qhasm: unsigned>? bytes - 0
351# asm 1: cmp $0,<bytes=int32#3
352# asm 2: cmp $0,<bytes=%edx
353cmp $0,%edx
354# comment:fp stack unchanged by jump
355
356# qhasm: goto done if !unsigned>
357jbe ._done
358
359# qhasm: a = 0
360# asm 1: mov $0,>a=int32#1
361# asm 2: mov $0,>a=%eax
362mov $0,%eax
363
364# qhasm: i = bytes
365# asm 1: mov <bytes=int32#3,>i=int32#2
366# asm 2: mov <bytes=%edx,>i=%ecx
367mov %edx,%ecx
368
369# qhasm: while (i) { *out++ = a; --i }
370rep stosb
371
372# qhasm: out -= bytes
373# asm 1: subl <bytes=int32#3,<out=int32#6
374# asm 2: subl <bytes=%edx,<out=%edi
375subl %edx,%edi
376# comment:fp stack unchanged by jump
377
378# qhasm: goto start
379jmp ._start
380
381# qhasm: enter crypto_stream_salsa208_x86_xmm5_xor
382.text
383.p2align 5
384.globl _crypto_stream_salsa208_x86_xmm5_xor
385.globl crypto_stream_salsa208_x86_xmm5_xor
386_crypto_stream_salsa208_x86_xmm5_xor:
387crypto_stream_salsa208_x86_xmm5_xor:
388mov %esp,%eax
389and $31,%eax
390add $704,%eax
391sub %eax,%esp
392
393# qhasm: eax_stack = eax
394# asm 1: movl <eax=int32#1,>eax_stack=stack32#1
395# asm 2: movl <eax=%eax,>eax_stack=0(%esp)
396movl %eax,0(%esp)
397
398# qhasm: ebx_stack = ebx
399# asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
400# asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
401movl %ebx,4(%esp)
402
403# qhasm: esi_stack = esi
404# asm 1: movl <esi=int32#5,>esi_stack=stack32#3
405# asm 2: movl <esi=%esi,>esi_stack=8(%esp)
406movl %esi,8(%esp)
407
408# qhasm: edi_stack = edi
409# asm 1: movl <edi=int32#6,>edi_stack=stack32#4
410# asm 2: movl <edi=%edi,>edi_stack=12(%esp)
411movl %edi,12(%esp)
412
413# qhasm: ebp_stack = ebp
414# asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
415# asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
416movl %ebp,16(%esp)
417
418# qhasm: out = arg1
419# asm 1: movl <arg1=stack32#-1,>out=int32#6
420# asm 2: movl <arg1=4(%esp,%eax),>out=%edi
421movl 4(%esp,%eax),%edi
422
423# qhasm: m = arg2
424# asm 1: movl <arg2=stack32#-2,>m=int32#5
425# asm 2: movl <arg2=8(%esp,%eax),>m=%esi
426movl 8(%esp,%eax),%esi
427
428# qhasm: bytes = arg3
429# asm 1: movl <arg3=stack32#-3,>bytes=int32#3
430# asm 2: movl <arg3=12(%esp,%eax),>bytes=%edx
431movl 12(%esp,%eax),%edx
432
433# qhasm: iv = arg5
434# asm 1: movl <arg5=stack32#-5,>iv=int32#4
435# asm 2: movl <arg5=20(%esp,%eax),>iv=%ebx
436movl 20(%esp,%eax),%ebx
437
438# qhasm: k = arg6
439# asm 1: movl <arg6=stack32#-6,>k=int32#7
440# asm 2: movl <arg6=24(%esp,%eax),>k=%ebp
441movl 24(%esp,%eax),%ebp
442
443# qhasm: unsigned>? bytes - 0
444# asm 1: cmp $0,<bytes=int32#3
445# asm 2: cmp $0,<bytes=%edx
446cmp $0,%edx
447# comment:fp stack unchanged by jump
448
449# qhasm: goto done if !unsigned>
450jbe ._done
451# comment:fp stack unchanged by fallthrough
452
453# qhasm: start:
454._start:
455
456# qhasm: out_stack = out
457# asm 1: movl <out=int32#6,>out_stack=stack32#6
458# asm 2: movl <out=%edi,>out_stack=20(%esp)
459movl %edi,20(%esp)
460
461# qhasm: bytes_stack = bytes
462# asm 1: movl <bytes=int32#3,>bytes_stack=stack32#7
463# asm 2: movl <bytes=%edx,>bytes_stack=24(%esp)
464movl %edx,24(%esp)
465
466# qhasm: in4 = *(uint32 *) (k + 12)
467# asm 1: movl 12(<k=int32#7),>in4=int32#1
468# asm 2: movl 12(<k=%ebp),>in4=%eax
469movl 12(%ebp),%eax
470
471# qhasm: in12 = *(uint32 *) (k + 20)
472# asm 1: movl 20(<k=int32#7),>in12=int32#2
473# asm 2: movl 20(<k=%ebp),>in12=%ecx
474movl 20(%ebp),%ecx
475
476# qhasm: ((uint32 *)&x3)[0] = in4
477# asm 1: movl <in4=int32#1,>x3=stack128#1
478# asm 2: movl <in4=%eax,>x3=32(%esp)
479movl %eax,32(%esp)
480
481# qhasm: ((uint32 *)&x1)[0] = in12
482# asm 1: movl <in12=int32#2,>x1=stack128#2
483# asm 2: movl <in12=%ecx,>x1=48(%esp)
484movl %ecx,48(%esp)
485
486# qhasm: in0 = 1634760805
487# asm 1: mov $1634760805,>in0=int32#1
488# asm 2: mov $1634760805,>in0=%eax
489mov $1634760805,%eax
490
491# qhasm: in8 = 0
492# asm 1: mov $0,>in8=int32#2
493# asm 2: mov $0,>in8=%ecx
494mov $0,%ecx
495
496# qhasm: ((uint32 *)&x0)[0] = in0
497# asm 1: movl <in0=int32#1,>x0=stack128#3
498# asm 2: movl <in0=%eax,>x0=64(%esp)
499movl %eax,64(%esp)
500
501# qhasm: ((uint32 *)&x2)[0] = in8
502# asm 1: movl <in8=int32#2,>x2=stack128#4
503# asm 2: movl <in8=%ecx,>x2=80(%esp)
504movl %ecx,80(%esp)
505
506# qhasm: in6 = *(uint32 *) (iv + 0)
507# asm 1: movl 0(<iv=int32#4),>in6=int32#1
508# asm 2: movl 0(<iv=%ebx),>in6=%eax
509movl 0(%ebx),%eax
510
511# qhasm: in7 = *(uint32 *) (iv + 4)
512# asm 1: movl 4(<iv=int32#4),>in7=int32#2
513# asm 2: movl 4(<iv=%ebx),>in7=%ecx
514movl 4(%ebx),%ecx
515
516# qhasm: ((uint32 *)&x1)[2] = in6
517# asm 1: movl <in6=int32#1,8+<x1=stack128#2
518# asm 2: movl <in6=%eax,8+<x1=48(%esp)
519movl %eax,8+48(%esp)
520
521# qhasm: ((uint32 *)&x2)[3] = in7
522# asm 1: movl <in7=int32#2,12+<x2=stack128#4
523# asm 2: movl <in7=%ecx,12+<x2=80(%esp)
524movl %ecx,12+80(%esp)
525
526# qhasm: in9 = 0
527# asm 1: mov $0,>in9=int32#1
528# asm 2: mov $0,>in9=%eax
529mov $0,%eax
530
531# qhasm: in10 = 2036477234
532# asm 1: mov $2036477234,>in10=int32#2
533# asm 2: mov $2036477234,>in10=%ecx
534mov $2036477234,%ecx
535
536# qhasm: ((uint32 *)&x3)[1] = in9
537# asm 1: movl <in9=int32#1,4+<x3=stack128#1
538# asm 2: movl <in9=%eax,4+<x3=32(%esp)
539movl %eax,4+32(%esp)
540
541# qhasm: ((uint32 *)&x0)[2] = in10
542# asm 1: movl <in10=int32#2,8+<x0=stack128#3
543# asm 2: movl <in10=%ecx,8+<x0=64(%esp)
544movl %ecx,8+64(%esp)
545
546# qhasm: in1 = *(uint32 *) (k + 0)
547# asm 1: movl 0(<k=int32#7),>in1=int32#1
548# asm 2: movl 0(<k=%ebp),>in1=%eax
549movl 0(%ebp),%eax
550
551# qhasm: in2 = *(uint32 *) (k + 4)
552# asm 1: movl 4(<k=int32#7),>in2=int32#2
553# asm 2: movl 4(<k=%ebp),>in2=%ecx
554movl 4(%ebp),%ecx
555
556# qhasm: in3 = *(uint32 *) (k + 8)
557# asm 1: movl 8(<k=int32#7),>in3=int32#3
558# asm 2: movl 8(<k=%ebp),>in3=%edx
559movl 8(%ebp),%edx
560
561# qhasm: in5 = 857760878
562# asm 1: mov $857760878,>in5=int32#4
563# asm 2: mov $857760878,>in5=%ebx
564mov $857760878,%ebx
565
566# qhasm: ((uint32 *)&x1)[1] = in1
567# asm 1: movl <in1=int32#1,4+<x1=stack128#2
568# asm 2: movl <in1=%eax,4+<x1=48(%esp)
569movl %eax,4+48(%esp)
570
571# qhasm: ((uint32 *)&x2)[2] = in2
572# asm 1: movl <in2=int32#2,8+<x2=stack128#4
573# asm 2: movl <in2=%ecx,8+<x2=80(%esp)
574movl %ecx,8+80(%esp)
575
576# qhasm: ((uint32 *)&x3)[3] = in3
577# asm 1: movl <in3=int32#3,12+<x3=stack128#1
578# asm 2: movl <in3=%edx,12+<x3=32(%esp)
579movl %edx,12+32(%esp)
580
581# qhasm: ((uint32 *)&x0)[1] = in5
582# asm 1: movl <in5=int32#4,4+<x0=stack128#3
583# asm 2: movl <in5=%ebx,4+<x0=64(%esp)
584movl %ebx,4+64(%esp)
585
586# qhasm: in11 = *(uint32 *) (k + 16)
587# asm 1: movl 16(<k=int32#7),>in11=int32#1
588# asm 2: movl 16(<k=%ebp),>in11=%eax
589movl 16(%ebp),%eax
590
591# qhasm: in13 = *(uint32 *) (k + 24)
592# asm 1: movl 24(<k=int32#7),>in13=int32#2
593# asm 2: movl 24(<k=%ebp),>in13=%ecx
594movl 24(%ebp),%ecx
595
596# qhasm: in14 = *(uint32 *) (k + 28)
597# asm 1: movl 28(<k=int32#7),>in14=int32#3
598# asm 2: movl 28(<k=%ebp),>in14=%edx
599movl 28(%ebp),%edx
600
601# qhasm: in15 = 1797285236
602# asm 1: mov $1797285236,>in15=int32#4
603# asm 2: mov $1797285236,>in15=%ebx
604mov $1797285236,%ebx
605
606# qhasm: ((uint32 *)&x1)[3] = in11
607# asm 1: movl <in11=int32#1,12+<x1=stack128#2
608# asm 2: movl <in11=%eax,12+<x1=48(%esp)
609movl %eax,12+48(%esp)
610
611# qhasm: ((uint32 *)&x2)[1] = in13
612# asm 1: movl <in13=int32#2,4+<x2=stack128#4
613# asm 2: movl <in13=%ecx,4+<x2=80(%esp)
614movl %ecx,4+80(%esp)
615
616# qhasm: ((uint32 *)&x3)[2] = in14
617# asm 1: movl <in14=int32#3,8+<x3=stack128#1
618# asm 2: movl <in14=%edx,8+<x3=32(%esp)
619movl %edx,8+32(%esp)
620
621# qhasm: ((uint32 *)&x0)[3] = in15
622# asm 1: movl <in15=int32#4,12+<x0=stack128#3
623# asm 2: movl <in15=%ebx,12+<x0=64(%esp)
624movl %ebx,12+64(%esp)
625
626# qhasm: bytes = bytes_stack
627# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
628# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
629movl 24(%esp),%eax
630
631# qhasm: unsigned<? bytes - 256
632# asm 1: cmp $256,<bytes=int32#1
633# asm 2: cmp $256,<bytes=%eax
634cmp $256,%eax
635# comment:fp stack unchanged by jump
636
637# qhasm: goto bytesbetween1and255 if unsigned<
638jb ._bytesbetween1and255
639
640# qhasm: z0 = x0
641# asm 1: movdqa <x0=stack128#3,>z0=int6464#1
642# asm 2: movdqa <x0=64(%esp),>z0=%xmm0
643movdqa 64(%esp),%xmm0
644
645# qhasm: z5 = z0[1,1,1,1]
646# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
647# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
648pshufd $0x55,%xmm0,%xmm1
649
650# qhasm: z10 = z0[2,2,2,2]
651# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
652# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
653pshufd $0xaa,%xmm0,%xmm2
654
655# qhasm: z15 = z0[3,3,3,3]
656# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
657# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
658pshufd $0xff,%xmm0,%xmm3
659
660# qhasm: z0 = z0[0,0,0,0]
661# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
662# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
663pshufd $0x00,%xmm0,%xmm0
664
665# qhasm: orig5 = z5
666# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
667# asm 2: movdqa <z5=%xmm1,>orig5=96(%esp)
668movdqa %xmm1,96(%esp)
669
670# qhasm: orig10 = z10
671# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
672# asm 2: movdqa <z10=%xmm2,>orig10=112(%esp)
673movdqa %xmm2,112(%esp)
674
675# qhasm: orig15 = z15
676# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
677# asm 2: movdqa <z15=%xmm3,>orig15=128(%esp)
678movdqa %xmm3,128(%esp)
679
680# qhasm: orig0 = z0
681# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
682# asm 2: movdqa <z0=%xmm0,>orig0=144(%esp)
683movdqa %xmm0,144(%esp)
684
685# qhasm: z1 = x1
686# asm 1: movdqa <x1=stack128#2,>z1=int6464#1
687# asm 2: movdqa <x1=48(%esp),>z1=%xmm0
688movdqa 48(%esp),%xmm0
689
690# qhasm: z6 = z1[2,2,2,2]
691# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
692# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
693pshufd $0xaa,%xmm0,%xmm1
694
695# qhasm: z11 = z1[3,3,3,3]
696# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
697# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
698pshufd $0xff,%xmm0,%xmm2
699
700# qhasm: z12 = z1[0,0,0,0]
701# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
702# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
703pshufd $0x00,%xmm0,%xmm3
704
705# qhasm: z1 = z1[1,1,1,1]
706# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
707# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
708pshufd $0x55,%xmm0,%xmm0
709
710# qhasm: orig6 = z6
711# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
712# asm 2: movdqa <z6=%xmm1,>orig6=160(%esp)
713movdqa %xmm1,160(%esp)
714
715# qhasm: orig11 = z11
716# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
717# asm 2: movdqa <z11=%xmm2,>orig11=176(%esp)
718movdqa %xmm2,176(%esp)
719
720# qhasm: orig12 = z12
721# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
722# asm 2: movdqa <z12=%xmm3,>orig12=192(%esp)
723movdqa %xmm3,192(%esp)
724
725# qhasm: orig1 = z1
726# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
727# asm 2: movdqa <z1=%xmm0,>orig1=208(%esp)
728movdqa %xmm0,208(%esp)
729
730# qhasm: z2 = x2
731# asm 1: movdqa <x2=stack128#4,>z2=int6464#1
732# asm 2: movdqa <x2=80(%esp),>z2=%xmm0
733movdqa 80(%esp),%xmm0
734
735# qhasm: z7 = z2[3,3,3,3]
736# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
737# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
738pshufd $0xff,%xmm0,%xmm1
739
740# qhasm: z13 = z2[1,1,1,1]
741# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
742# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
743pshufd $0x55,%xmm0,%xmm2
744
745# qhasm: z2 = z2[2,2,2,2]
746# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
747# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
748pshufd $0xaa,%xmm0,%xmm0
749
750# qhasm: orig7 = z7
751# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
752# asm 2: movdqa <z7=%xmm1,>orig7=224(%esp)
753movdqa %xmm1,224(%esp)
754
755# qhasm: orig13 = z13
756# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
757# asm 2: movdqa <z13=%xmm2,>orig13=240(%esp)
758movdqa %xmm2,240(%esp)
759
760# qhasm: orig2 = z2
761# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
762# asm 2: movdqa <z2=%xmm0,>orig2=256(%esp)
763movdqa %xmm0,256(%esp)
764
765# qhasm: z3 = x3
766# asm 1: movdqa <x3=stack128#1,>z3=int6464#1
767# asm 2: movdqa <x3=32(%esp),>z3=%xmm0
768movdqa 32(%esp),%xmm0
769
770# qhasm: z4 = z3[0,0,0,0]
771# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
772# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
773pshufd $0x00,%xmm0,%xmm1
774
775# qhasm: z14 = z3[2,2,2,2]
776# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
777# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
778pshufd $0xaa,%xmm0,%xmm2
779
780# qhasm: z3 = z3[3,3,3,3]
781# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
782# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
783pshufd $0xff,%xmm0,%xmm0
784
785# qhasm: orig4 = z4
786# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
787# asm 2: movdqa <z4=%xmm1,>orig4=272(%esp)
788movdqa %xmm1,272(%esp)
789
790# qhasm: orig14 = z14
791# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
792# asm 2: movdqa <z14=%xmm2,>orig14=288(%esp)
793movdqa %xmm2,288(%esp)
794
795# qhasm: orig3 = z3
796# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
797# asm 2: movdqa <z3=%xmm0,>orig3=304(%esp)
798movdqa %xmm0,304(%esp)
799
800# qhasm: bytesatleast256:
801._bytesatleast256:
802
803# qhasm: in8 = ((uint32 *)&x2)[0]
804# asm 1: movl <x2=stack128#4,>in8=int32#2
805# asm 2: movl <x2=80(%esp),>in8=%ecx
806movl 80(%esp),%ecx
807
808# qhasm: in9 = ((uint32 *)&x3)[1]
809# asm 1: movl 4+<x3=stack128#1,>in9=int32#3
810# asm 2: movl 4+<x3=32(%esp),>in9=%edx
811movl 4+32(%esp),%edx
812
813# qhasm: ((uint32 *) &orig8)[0] = in8
814# asm 1: movl <in8=int32#2,>orig8=stack128#19
815# asm 2: movl <in8=%ecx,>orig8=320(%esp)
816movl %ecx,320(%esp)
817
818# qhasm: ((uint32 *) &orig9)[0] = in9
819# asm 1: movl <in9=int32#3,>orig9=stack128#20
820# asm 2: movl <in9=%edx,>orig9=336(%esp)
821movl %edx,336(%esp)
822
823# qhasm: carry? in8 += 1
824# asm 1: add $1,<in8=int32#2
825# asm 2: add $1,<in8=%ecx
826add $1,%ecx
827
828# qhasm: in9 += 0 + carry
829# asm 1: adc $0,<in9=int32#3
830# asm 2: adc $0,<in9=%edx
831adc $0,%edx
832
833# qhasm: ((uint32 *) &orig8)[1] = in8
834# asm 1: movl <in8=int32#2,4+<orig8=stack128#19
835# asm 2: movl <in8=%ecx,4+<orig8=320(%esp)
836movl %ecx,4+320(%esp)
837
838# qhasm: ((uint32 *) &orig9)[1] = in9
839# asm 1: movl <in9=int32#3,4+<orig9=stack128#20
840# asm 2: movl <in9=%edx,4+<orig9=336(%esp)
841movl %edx,4+336(%esp)
842
843# qhasm: carry? in8 += 1
844# asm 1: add $1,<in8=int32#2
845# asm 2: add $1,<in8=%ecx
846add $1,%ecx
847
848# qhasm: in9 += 0 + carry
849# asm 1: adc $0,<in9=int32#3
850# asm 2: adc $0,<in9=%edx
851adc $0,%edx
852
853# qhasm: ((uint32 *) &orig8)[2] = in8
854# asm 1: movl <in8=int32#2,8+<orig8=stack128#19
855# asm 2: movl <in8=%ecx,8+<orig8=320(%esp)
856movl %ecx,8+320(%esp)
857
858# qhasm: ((uint32 *) &orig9)[2] = in9
859# asm 1: movl <in9=int32#3,8+<orig9=stack128#20
860# asm 2: movl <in9=%edx,8+<orig9=336(%esp)
861movl %edx,8+336(%esp)
862
863# qhasm: carry? in8 += 1
864# asm 1: add $1,<in8=int32#2
865# asm 2: add $1,<in8=%ecx
866add $1,%ecx
867
868# qhasm: in9 += 0 + carry
869# asm 1: adc $0,<in9=int32#3
870# asm 2: adc $0,<in9=%edx
871adc $0,%edx
872
873# qhasm: ((uint32 *) &orig8)[3] = in8
874# asm 1: movl <in8=int32#2,12+<orig8=stack128#19
875# asm 2: movl <in8=%ecx,12+<orig8=320(%esp)
876movl %ecx,12+320(%esp)
877
878# qhasm: ((uint32 *) &orig9)[3] = in9
879# asm 1: movl <in9=int32#3,12+<orig9=stack128#20
880# asm 2: movl <in9=%edx,12+<orig9=336(%esp)
881movl %edx,12+336(%esp)
882
883# qhasm: carry? in8 += 1
884# asm 1: add $1,<in8=int32#2
885# asm 2: add $1,<in8=%ecx
886add $1,%ecx
887
888# qhasm: in9 += 0 + carry
889# asm 1: adc $0,<in9=int32#3
890# asm 2: adc $0,<in9=%edx
891adc $0,%edx
892
893# qhasm: ((uint32 *)&x2)[0] = in8
894# asm 1: movl <in8=int32#2,>x2=stack128#4
895# asm 2: movl <in8=%ecx,>x2=80(%esp)
896movl %ecx,80(%esp)
897
898# qhasm: ((uint32 *)&x3)[1] = in9
899# asm 1: movl <in9=int32#3,4+<x3=stack128#1
900# asm 2: movl <in9=%edx,4+<x3=32(%esp)
901movl %edx,4+32(%esp)
902
903# qhasm: bytes_stack = bytes
904# asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
905# asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
906movl %eax,24(%esp)
907
908# qhasm: i = 8
909# asm 1: mov $8,>i=int32#1
910# asm 2: mov $8,>i=%eax
911mov $8,%eax
912
913# qhasm: z5 = orig5
914# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
915# asm 2: movdqa <orig5=96(%esp),>z5=%xmm0
916movdqa 96(%esp),%xmm0
917
918# qhasm: z10 = orig10
919# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
920# asm 2: movdqa <orig10=112(%esp),>z10=%xmm1
921movdqa 112(%esp),%xmm1
922
923# qhasm: z15 = orig15
924# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
925# asm 2: movdqa <orig15=128(%esp),>z15=%xmm2
926movdqa 128(%esp),%xmm2
927
928# qhasm: z14 = orig14
929# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
930# asm 2: movdqa <orig14=288(%esp),>z14=%xmm3
931movdqa 288(%esp),%xmm3
932
933# qhasm: z3 = orig3
934# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
935# asm 2: movdqa <orig3=304(%esp),>z3=%xmm4
936movdqa 304(%esp),%xmm4
937
938# qhasm: z6 = orig6
939# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
940# asm 2: movdqa <orig6=160(%esp),>z6=%xmm5
941movdqa 160(%esp),%xmm5
942
943# qhasm: z11 = orig11
944# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
945# asm 2: movdqa <orig11=176(%esp),>z11=%xmm6
946movdqa 176(%esp),%xmm6
947
948# qhasm: z1 = orig1
949# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
950# asm 2: movdqa <orig1=208(%esp),>z1=%xmm7
951movdqa 208(%esp),%xmm7
952
953# qhasm: z5_stack = z5
954# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#21
955# asm 2: movdqa <z5=%xmm0,>z5_stack=352(%esp)
956movdqa %xmm0,352(%esp)
957
958# qhasm: z10_stack = z10
959# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#22
960# asm 2: movdqa <z10=%xmm1,>z10_stack=368(%esp)
961movdqa %xmm1,368(%esp)
962
963# qhasm: z15_stack = z15
964# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#23
965# asm 2: movdqa <z15=%xmm2,>z15_stack=384(%esp)
966movdqa %xmm2,384(%esp)
967
968# qhasm: z14_stack = z14
969# asm 1: movdqa <z14=int6464#4,>z14_stack=stack128#24
970# asm 2: movdqa <z14=%xmm3,>z14_stack=400(%esp)
971movdqa %xmm3,400(%esp)
972
973# qhasm: z3_stack = z3
974# asm 1: movdqa <z3=int6464#5,>z3_stack=stack128#25
975# asm 2: movdqa <z3=%xmm4,>z3_stack=416(%esp)
976movdqa %xmm4,416(%esp)
977
978# qhasm: z6_stack = z6
979# asm 1: movdqa <z6=int6464#6,>z6_stack=stack128#26
980# asm 2: movdqa <z6=%xmm5,>z6_stack=432(%esp)
981movdqa %xmm5,432(%esp)
982
983# qhasm: z11_stack = z11
984# asm 1: movdqa <z11=int6464#7,>z11_stack=stack128#27
985# asm 2: movdqa <z11=%xmm6,>z11_stack=448(%esp)
986movdqa %xmm6,448(%esp)
987
988# qhasm: z1_stack = z1
989# asm 1: movdqa <z1=int6464#8,>z1_stack=stack128#28
990# asm 2: movdqa <z1=%xmm7,>z1_stack=464(%esp)
991movdqa %xmm7,464(%esp)
992
993# qhasm: z7 = orig7
994# asm 1: movdqa <orig7=stack128#13,>z7=int6464#5
995# asm 2: movdqa <orig7=224(%esp),>z7=%xmm4
996movdqa 224(%esp),%xmm4
997
998# qhasm: z13 = orig13
999# asm 1: movdqa <orig13=stack128#14,>z13=int6464#6
1000# asm 2: movdqa <orig13=240(%esp),>z13=%xmm5
1001movdqa 240(%esp),%xmm5
1002
1003# qhasm: z2 = orig2
1004# asm 1: movdqa <orig2=stack128#15,>z2=int6464#7
1005# asm 2: movdqa <orig2=256(%esp),>z2=%xmm6
1006movdqa 256(%esp),%xmm6
1007
1008# qhasm: z9 = orig9
1009# asm 1: movdqa <orig9=stack128#20,>z9=int6464#8
1010# asm 2: movdqa <orig9=336(%esp),>z9=%xmm7
1011movdqa 336(%esp),%xmm7
1012
1013# qhasm: p = orig0
1014# asm 1: movdqa <orig0=stack128#8,>p=int6464#1
1015# asm 2: movdqa <orig0=144(%esp),>p=%xmm0
1016movdqa 144(%esp),%xmm0
1017
1018# qhasm: t = orig12
1019# asm 1: movdqa <orig12=stack128#11,>t=int6464#3
1020# asm 2: movdqa <orig12=192(%esp),>t=%xmm2
1021movdqa 192(%esp),%xmm2
1022
1023# qhasm: q = orig4
1024# asm 1: movdqa <orig4=stack128#16,>q=int6464#4
1025# asm 2: movdqa <orig4=272(%esp),>q=%xmm3
1026movdqa 272(%esp),%xmm3
1027
1028# qhasm: r = orig8
1029# asm 1: movdqa <orig8=stack128#19,>r=int6464#2
1030# asm 2: movdqa <orig8=320(%esp),>r=%xmm1
1031movdqa 320(%esp),%xmm1
1032
1033# qhasm: z7_stack = z7
1034# asm 1: movdqa <z7=int6464#5,>z7_stack=stack128#29
1035# asm 2: movdqa <z7=%xmm4,>z7_stack=480(%esp)
1036movdqa %xmm4,480(%esp)
1037
1038# qhasm: z13_stack = z13
1039# asm 1: movdqa <z13=int6464#6,>z13_stack=stack128#30
1040# asm 2: movdqa <z13=%xmm5,>z13_stack=496(%esp)
1041movdqa %xmm5,496(%esp)
1042
1043# qhasm: z2_stack = z2
1044# asm 1: movdqa <z2=int6464#7,>z2_stack=stack128#31
1045# asm 2: movdqa <z2=%xmm6,>z2_stack=512(%esp)
1046movdqa %xmm6,512(%esp)
1047
1048# qhasm: z9_stack = z9
1049# asm 1: movdqa <z9=int6464#8,>z9_stack=stack128#32
1050# asm 2: movdqa <z9=%xmm7,>z9_stack=528(%esp)
1051movdqa %xmm7,528(%esp)
1052
1053# qhasm: z0_stack = p
1054# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1055# asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1056movdqa %xmm0,544(%esp)
1057
1058# qhasm: z12_stack = t
1059# asm 1: movdqa <t=int6464#3,>z12_stack=stack128#34
1060# asm 2: movdqa <t=%xmm2,>z12_stack=560(%esp)
1061movdqa %xmm2,560(%esp)
1062
1063# qhasm: z4_stack = q
1064# asm 1: movdqa <q=int6464#4,>z4_stack=stack128#35
1065# asm 2: movdqa <q=%xmm3,>z4_stack=576(%esp)
1066movdqa %xmm3,576(%esp)
1067
1068# qhasm: z8_stack = r
1069# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#36
1070# asm 2: movdqa <r=%xmm1,>z8_stack=592(%esp)
1071movdqa %xmm1,592(%esp)
1072
1073# qhasm: mainloop1:
1074._mainloop1:
1075
1076# qhasm: assign xmm0 to p
1077
1078# qhasm: assign xmm1 to r
1079
1080# qhasm: assign xmm2 to t
1081
1082# qhasm: assign xmm3 to q
1083
1084# qhasm: s = t
1085# asm 1: movdqa <t=int6464#3,>s=int6464#7
1086# asm 2: movdqa <t=%xmm2,>s=%xmm6
1087movdqa %xmm2,%xmm6
1088
1089# qhasm: uint32323232 t += p
1090# asm 1: paddd <p=int6464#1,<t=int6464#3
1091# asm 2: paddd <p=%xmm0,<t=%xmm2
1092paddd %xmm0,%xmm2
1093
1094# qhasm: u = t
1095# asm 1: movdqa <t=int6464#3,>u=int6464#5
1096# asm 2: movdqa <t=%xmm2,>u=%xmm4
1097movdqa %xmm2,%xmm4
1098
1099# qhasm: uint32323232 t >>= 25
1100# asm 1: psrld $25,<t=int6464#3
1101# asm 2: psrld $25,<t=%xmm2
1102psrld $25,%xmm2
1103
1104# qhasm: q ^= t
1105# asm 1: pxor <t=int6464#3,<q=int6464#4
1106# asm 2: pxor <t=%xmm2,<q=%xmm3
1107pxor %xmm2,%xmm3
1108
1109# qhasm: uint32323232 u <<= 7
1110# asm 1: pslld $7,<u=int6464#5
1111# asm 2: pslld $7,<u=%xmm4
1112pslld $7,%xmm4
1113
1114# qhasm: q ^= u
1115# asm 1: pxor <u=int6464#5,<q=int6464#4
1116# asm 2: pxor <u=%xmm4,<q=%xmm3
1117pxor %xmm4,%xmm3
1118
1119# qhasm: z4_stack = q
1120# asm 1: movdqa <q=int6464#4,>z4_stack=stack128#33
1121# asm 2: movdqa <q=%xmm3,>z4_stack=544(%esp)
1122movdqa %xmm3,544(%esp)
1123
1124# qhasm: t = p
1125# asm 1: movdqa <p=int6464#1,>t=int6464#3
1126# asm 2: movdqa <p=%xmm0,>t=%xmm2
1127movdqa %xmm0,%xmm2
1128
1129# qhasm: uint32323232 t += q
1130# asm 1: paddd <q=int6464#4,<t=int6464#3
1131# asm 2: paddd <q=%xmm3,<t=%xmm2
1132paddd %xmm3,%xmm2
1133
1134# qhasm: u = t
1135# asm 1: movdqa <t=int6464#3,>u=int6464#5
1136# asm 2: movdqa <t=%xmm2,>u=%xmm4
1137movdqa %xmm2,%xmm4
1138
1139# qhasm: uint32323232 t >>= 23
1140# asm 1: psrld $23,<t=int6464#3
1141# asm 2: psrld $23,<t=%xmm2
1142psrld $23,%xmm2
1143
1144# qhasm: r ^= t
1145# asm 1: pxor <t=int6464#3,<r=int6464#2
1146# asm 2: pxor <t=%xmm2,<r=%xmm1
1147pxor %xmm2,%xmm1
1148
1149# qhasm: uint32323232 u <<= 9
1150# asm 1: pslld $9,<u=int6464#5
1151# asm 2: pslld $9,<u=%xmm4
1152pslld $9,%xmm4
1153
1154# qhasm: r ^= u
1155# asm 1: pxor <u=int6464#5,<r=int6464#2
1156# asm 2: pxor <u=%xmm4,<r=%xmm1
1157pxor %xmm4,%xmm1
1158
1159# qhasm: z8_stack = r
1160# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#34
1161# asm 2: movdqa <r=%xmm1,>z8_stack=560(%esp)
1162movdqa %xmm1,560(%esp)
1163
1164# qhasm: uint32323232 q += r
1165# asm 1: paddd <r=int6464#2,<q=int6464#4
1166# asm 2: paddd <r=%xmm1,<q=%xmm3
1167paddd %xmm1,%xmm3
1168
1169# qhasm: u = q
1170# asm 1: movdqa <q=int6464#4,>u=int6464#3
1171# asm 2: movdqa <q=%xmm3,>u=%xmm2
1172movdqa %xmm3,%xmm2
1173
1174# qhasm: uint32323232 q >>= 19
1175# asm 1: psrld $19,<q=int6464#4
1176# asm 2: psrld $19,<q=%xmm3
1177psrld $19,%xmm3
1178
1179# qhasm: s ^= q
1180# asm 1: pxor <q=int6464#4,<s=int6464#7
1181# asm 2: pxor <q=%xmm3,<s=%xmm6
1182pxor %xmm3,%xmm6
1183
1184# qhasm: uint32323232 u <<= 13
1185# asm 1: pslld $13,<u=int6464#3
1186# asm 2: pslld $13,<u=%xmm2
1187pslld $13,%xmm2
1188
1189# qhasm: s ^= u
1190# asm 1: pxor <u=int6464#3,<s=int6464#7
1191# asm 2: pxor <u=%xmm2,<s=%xmm6
1192pxor %xmm2,%xmm6
1193
1194# qhasm: mt = z1_stack
1195# asm 1: movdqa <z1_stack=stack128#28,>mt=int6464#3
1196# asm 2: movdqa <z1_stack=464(%esp),>mt=%xmm2
1197movdqa 464(%esp),%xmm2
1198
1199# qhasm: mp = z5_stack
1200# asm 1: movdqa <z5_stack=stack128#21,>mp=int6464#5
1201# asm 2: movdqa <z5_stack=352(%esp),>mp=%xmm4
1202movdqa 352(%esp),%xmm4
1203
1204# qhasm: mq = z9_stack
1205# asm 1: movdqa <z9_stack=stack128#32,>mq=int6464#4
1206# asm 2: movdqa <z9_stack=528(%esp),>mq=%xmm3
1207movdqa 528(%esp),%xmm3
1208
1209# qhasm: mr = z13_stack
1210# asm 1: movdqa <z13_stack=stack128#30,>mr=int6464#6
1211# asm 2: movdqa <z13_stack=496(%esp),>mr=%xmm5
1212movdqa 496(%esp),%xmm5
1213
1214# qhasm: z12_stack = s
1215# asm 1: movdqa <s=int6464#7,>z12_stack=stack128#30
1216# asm 2: movdqa <s=%xmm6,>z12_stack=496(%esp)
1217movdqa %xmm6,496(%esp)
1218
1219# qhasm: uint32323232 r += s
1220# asm 1: paddd <s=int6464#7,<r=int6464#2
1221# asm 2: paddd <s=%xmm6,<r=%xmm1
1222paddd %xmm6,%xmm1
1223
1224# qhasm: u = r
1225# asm 1: movdqa <r=int6464#2,>u=int6464#7
1226# asm 2: movdqa <r=%xmm1,>u=%xmm6
1227movdqa %xmm1,%xmm6
1228
1229# qhasm: uint32323232 r >>= 14
1230# asm 1: psrld $14,<r=int6464#2
1231# asm 2: psrld $14,<r=%xmm1
1232psrld $14,%xmm1
1233
1234# qhasm: p ^= r
1235# asm 1: pxor <r=int6464#2,<p=int6464#1
1236# asm 2: pxor <r=%xmm1,<p=%xmm0
1237pxor %xmm1,%xmm0
1238
1239# qhasm: uint32323232 u <<= 18
1240# asm 1: pslld $18,<u=int6464#7
1241# asm 2: pslld $18,<u=%xmm6
1242pslld $18,%xmm6
1243
1244# qhasm: p ^= u
1245# asm 1: pxor <u=int6464#7,<p=int6464#1
1246# asm 2: pxor <u=%xmm6,<p=%xmm0
1247pxor %xmm6,%xmm0
1248
1249# qhasm: z0_stack = p
1250# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#21
1251# asm 2: movdqa <p=%xmm0,>z0_stack=352(%esp)
1252movdqa %xmm0,352(%esp)
1253
1254# qhasm: assign xmm2 to mt
1255
1256# qhasm: assign xmm3 to mq
1257
1258# qhasm: assign xmm4 to mp
1259
1260# qhasm: assign xmm5 to mr
1261
1262# qhasm: ms = mt
1263# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1264# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1265movdqa %xmm2,%xmm6
1266
1267# qhasm: uint32323232 mt += mp
1268# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1269# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1270paddd %xmm4,%xmm2
1271
1272# qhasm: mu = mt
1273# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1274# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1275movdqa %xmm2,%xmm0
1276
1277# qhasm: uint32323232 mt >>= 25
1278# asm 1: psrld $25,<mt=int6464#3
1279# asm 2: psrld $25,<mt=%xmm2
1280psrld $25,%xmm2
1281
1282# qhasm: mq ^= mt
1283# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1284# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1285pxor %xmm2,%xmm3
1286
1287# qhasm: uint32323232 mu <<= 7
1288# asm 1: pslld $7,<mu=int6464#1
1289# asm 2: pslld $7,<mu=%xmm0
1290pslld $7,%xmm0
1291
1292# qhasm: mq ^= mu
1293# asm 1: pxor <mu=int6464#1,<mq=int6464#4
1294# asm 2: pxor <mu=%xmm0,<mq=%xmm3
1295pxor %xmm0,%xmm3
1296
1297# qhasm: z9_stack = mq
1298# asm 1: movdqa <mq=int6464#4,>z9_stack=stack128#32
1299# asm 2: movdqa <mq=%xmm3,>z9_stack=528(%esp)
1300movdqa %xmm3,528(%esp)
1301
1302# qhasm: mt = mp
1303# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1304# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1305movdqa %xmm4,%xmm0
1306
1307# qhasm: uint32323232 mt += mq
1308# asm 1: paddd <mq=int6464#4,<mt=int6464#1
1309# asm 2: paddd <mq=%xmm3,<mt=%xmm0
1310paddd %xmm3,%xmm0
1311
1312# qhasm: mu = mt
1313# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1314# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1315movdqa %xmm0,%xmm1
1316
1317# qhasm: uint32323232 mt >>= 23
1318# asm 1: psrld $23,<mt=int6464#1
1319# asm 2: psrld $23,<mt=%xmm0
1320psrld $23,%xmm0
1321
1322# qhasm: mr ^= mt
1323# asm 1: pxor <mt=int6464#1,<mr=int6464#6
1324# asm 2: pxor <mt=%xmm0,<mr=%xmm5
1325pxor %xmm0,%xmm5
1326
1327# qhasm: uint32323232 mu <<= 9
1328# asm 1: pslld $9,<mu=int6464#2
1329# asm 2: pslld $9,<mu=%xmm1
1330pslld $9,%xmm1
1331
1332# qhasm: mr ^= mu
1333# asm 1: pxor <mu=int6464#2,<mr=int6464#6
1334# asm 2: pxor <mu=%xmm1,<mr=%xmm5
1335pxor %xmm1,%xmm5
1336
1337# qhasm: z13_stack = mr
1338# asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#35
1339# asm 2: movdqa <mr=%xmm5,>z13_stack=576(%esp)
1340movdqa %xmm5,576(%esp)
1341
1342# qhasm: uint32323232 mq += mr
1343# asm 1: paddd <mr=int6464#6,<mq=int6464#4
1344# asm 2: paddd <mr=%xmm5,<mq=%xmm3
1345paddd %xmm5,%xmm3
1346
1347# qhasm: mu = mq
1348# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1349# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1350movdqa %xmm3,%xmm0
1351
1352# qhasm: uint32323232 mq >>= 19
1353# asm 1: psrld $19,<mq=int6464#4
1354# asm 2: psrld $19,<mq=%xmm3
1355psrld $19,%xmm3
1356
1357# qhasm: ms ^= mq
1358# asm 1: pxor <mq=int6464#4,<ms=int6464#7
1359# asm 2: pxor <mq=%xmm3,<ms=%xmm6
1360pxor %xmm3,%xmm6
1361
1362# qhasm: uint32323232 mu <<= 13
1363# asm 1: pslld $13,<mu=int6464#1
1364# asm 2: pslld $13,<mu=%xmm0
1365pslld $13,%xmm0
1366
1367# qhasm: ms ^= mu
1368# asm 1: pxor <mu=int6464#1,<ms=int6464#7
1369# asm 2: pxor <mu=%xmm0,<ms=%xmm6
1370pxor %xmm0,%xmm6
1371
1372# qhasm: t = z6_stack
1373# asm 1: movdqa <z6_stack=stack128#26,>t=int6464#3
1374# asm 2: movdqa <z6_stack=432(%esp),>t=%xmm2
1375movdqa 432(%esp),%xmm2
1376
1377# qhasm: p = z10_stack
1378# asm 1: movdqa <z10_stack=stack128#22,>p=int6464#1
1379# asm 2: movdqa <z10_stack=368(%esp),>p=%xmm0
1380movdqa 368(%esp),%xmm0
1381
1382# qhasm: q = z14_stack
1383# asm 1: movdqa <z14_stack=stack128#24,>q=int6464#4
1384# asm 2: movdqa <z14_stack=400(%esp),>q=%xmm3
1385movdqa 400(%esp),%xmm3
1386
1387# qhasm: r = z2_stack
1388# asm 1: movdqa <z2_stack=stack128#31,>r=int6464#2
1389# asm 2: movdqa <z2_stack=512(%esp),>r=%xmm1
1390movdqa 512(%esp),%xmm1
1391
1392# qhasm: z1_stack = ms
1393# asm 1: movdqa <ms=int6464#7,>z1_stack=stack128#22
1394# asm 2: movdqa <ms=%xmm6,>z1_stack=368(%esp)
1395movdqa %xmm6,368(%esp)
1396
1397# qhasm: uint32323232 mr += ms
1398# asm 1: paddd <ms=int6464#7,<mr=int6464#6
1399# asm 2: paddd <ms=%xmm6,<mr=%xmm5
1400paddd %xmm6,%xmm5
1401
1402# qhasm: mu = mr
1403# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1404# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1405movdqa %xmm5,%xmm6
1406
1407# qhasm: uint32323232 mr >>= 14
1408# asm 1: psrld $14,<mr=int6464#6
1409# asm 2: psrld $14,<mr=%xmm5
1410psrld $14,%xmm5
1411
1412# qhasm: mp ^= mr
1413# asm 1: pxor <mr=int6464#6,<mp=int6464#5
1414# asm 2: pxor <mr=%xmm5,<mp=%xmm4
1415pxor %xmm5,%xmm4
1416
1417# qhasm: uint32323232 mu <<= 18
1418# asm 1: pslld $18,<mu=int6464#7
1419# asm 2: pslld $18,<mu=%xmm6
1420pslld $18,%xmm6
1421
1422# qhasm: mp ^= mu
1423# asm 1: pxor <mu=int6464#7,<mp=int6464#5
1424# asm 2: pxor <mu=%xmm6,<mp=%xmm4
1425pxor %xmm6,%xmm4
1426
1427# qhasm: z5_stack = mp
1428# asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#24
1429# asm 2: movdqa <mp=%xmm4,>z5_stack=400(%esp)
1430movdqa %xmm4,400(%esp)
1431
1432# qhasm: assign xmm0 to p
1433
1434# qhasm: assign xmm1 to r
1435
1436# qhasm: assign xmm2 to t
1437
1438# qhasm: assign xmm3 to q
1439
1440# qhasm: s = t
1441# asm 1: movdqa <t=int6464#3,>s=int6464#7
1442# asm 2: movdqa <t=%xmm2,>s=%xmm6
1443movdqa %xmm2,%xmm6
1444
1445# qhasm: uint32323232 t += p
1446# asm 1: paddd <p=int6464#1,<t=int6464#3
1447# asm 2: paddd <p=%xmm0,<t=%xmm2
1448paddd %xmm0,%xmm2
1449
1450# qhasm: u = t
1451# asm 1: movdqa <t=int6464#3,>u=int6464#5
1452# asm 2: movdqa <t=%xmm2,>u=%xmm4
1453movdqa %xmm2,%xmm4
1454
1455# qhasm: uint32323232 t >>= 25
1456# asm 1: psrld $25,<t=int6464#3
1457# asm 2: psrld $25,<t=%xmm2
1458psrld $25,%xmm2
1459
1460# qhasm: q ^= t
1461# asm 1: pxor <t=int6464#3,<q=int6464#4
1462# asm 2: pxor <t=%xmm2,<q=%xmm3
1463pxor %xmm2,%xmm3
1464
1465# qhasm: uint32323232 u <<= 7
1466# asm 1: pslld $7,<u=int6464#5
1467# asm 2: pslld $7,<u=%xmm4
1468pslld $7,%xmm4
1469
1470# qhasm: q ^= u
1471# asm 1: pxor <u=int6464#5,<q=int6464#4
1472# asm 2: pxor <u=%xmm4,<q=%xmm3
1473pxor %xmm4,%xmm3
1474
1475# qhasm: z14_stack = q
1476# asm 1: movdqa <q=int6464#4,>z14_stack=stack128#36
1477# asm 2: movdqa <q=%xmm3,>z14_stack=592(%esp)
1478movdqa %xmm3,592(%esp)
1479
1480# qhasm: t = p
1481# asm 1: movdqa <p=int6464#1,>t=int6464#3
1482# asm 2: movdqa <p=%xmm0,>t=%xmm2
1483movdqa %xmm0,%xmm2
1484
1485# qhasm: uint32323232 t += q
1486# asm 1: paddd <q=int6464#4,<t=int6464#3
1487# asm 2: paddd <q=%xmm3,<t=%xmm2
1488paddd %xmm3,%xmm2
1489
1490# qhasm: u = t
1491# asm 1: movdqa <t=int6464#3,>u=int6464#5
1492# asm 2: movdqa <t=%xmm2,>u=%xmm4
1493movdqa %xmm2,%xmm4
1494
1495# qhasm: uint32323232 t >>= 23
1496# asm 1: psrld $23,<t=int6464#3
1497# asm 2: psrld $23,<t=%xmm2
1498psrld $23,%xmm2
1499
1500# qhasm: r ^= t
1501# asm 1: pxor <t=int6464#3,<r=int6464#2
1502# asm 2: pxor <t=%xmm2,<r=%xmm1
1503pxor %xmm2,%xmm1
1504
1505# qhasm: uint32323232 u <<= 9
1506# asm 1: pslld $9,<u=int6464#5
1507# asm 2: pslld $9,<u=%xmm4
1508pslld $9,%xmm4
1509
1510# qhasm: r ^= u
1511# asm 1: pxor <u=int6464#5,<r=int6464#2
1512# asm 2: pxor <u=%xmm4,<r=%xmm1
1513pxor %xmm4,%xmm1
1514
1515# qhasm: z2_stack = r
1516# asm 1: movdqa <r=int6464#2,>z2_stack=stack128#26
1517# asm 2: movdqa <r=%xmm1,>z2_stack=432(%esp)
1518movdqa %xmm1,432(%esp)
1519
1520# qhasm: uint32323232 q += r
1521# asm 1: paddd <r=int6464#2,<q=int6464#4
1522# asm 2: paddd <r=%xmm1,<q=%xmm3
1523paddd %xmm1,%xmm3
1524
1525# qhasm: u = q
1526# asm 1: movdqa <q=int6464#4,>u=int6464#3
1527# asm 2: movdqa <q=%xmm3,>u=%xmm2
1528movdqa %xmm3,%xmm2
1529
1530# qhasm: uint32323232 q >>= 19
1531# asm 1: psrld $19,<q=int6464#4
1532# asm 2: psrld $19,<q=%xmm3
1533psrld $19,%xmm3
1534
1535# qhasm: s ^= q
1536# asm 1: pxor <q=int6464#4,<s=int6464#7
1537# asm 2: pxor <q=%xmm3,<s=%xmm6
1538pxor %xmm3,%xmm6
1539
1540# qhasm: uint32323232 u <<= 13
1541# asm 1: pslld $13,<u=int6464#3
1542# asm 2: pslld $13,<u=%xmm2
1543pslld $13,%xmm2
1544
1545# qhasm: s ^= u
1546# asm 1: pxor <u=int6464#3,<s=int6464#7
1547# asm 2: pxor <u=%xmm2,<s=%xmm6
1548pxor %xmm2,%xmm6
1549
1550# qhasm: mt = z11_stack
1551# asm 1: movdqa <z11_stack=stack128#27,>mt=int6464#3
1552# asm 2: movdqa <z11_stack=448(%esp),>mt=%xmm2
1553movdqa 448(%esp),%xmm2
1554
1555# qhasm: mp = z15_stack
1556# asm 1: movdqa <z15_stack=stack128#23,>mp=int6464#5
1557# asm 2: movdqa <z15_stack=384(%esp),>mp=%xmm4
1558movdqa 384(%esp),%xmm4
1559
1560# qhasm: mq = z3_stack
1561# asm 1: movdqa <z3_stack=stack128#25,>mq=int6464#4
1562# asm 2: movdqa <z3_stack=416(%esp),>mq=%xmm3
1563movdqa 416(%esp),%xmm3
1564
1565# qhasm: mr = z7_stack
1566# asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1567# asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1568movdqa 480(%esp),%xmm5
1569
1570# qhasm: z6_stack = s
1571# asm 1: movdqa <s=int6464#7,>z6_stack=stack128#23
1572# asm 2: movdqa <s=%xmm6,>z6_stack=384(%esp)
1573movdqa %xmm6,384(%esp)
1574
1575# qhasm: uint32323232 r += s
1576# asm 1: paddd <s=int6464#7,<r=int6464#2
1577# asm 2: paddd <s=%xmm6,<r=%xmm1
1578paddd %xmm6,%xmm1
1579
1580# qhasm: u = r
1581# asm 1: movdqa <r=int6464#2,>u=int6464#7
1582# asm 2: movdqa <r=%xmm1,>u=%xmm6
1583movdqa %xmm1,%xmm6
1584
1585# qhasm: uint32323232 r >>= 14
1586# asm 1: psrld $14,<r=int6464#2
1587# asm 2: psrld $14,<r=%xmm1
1588psrld $14,%xmm1
1589
1590# qhasm: p ^= r
1591# asm 1: pxor <r=int6464#2,<p=int6464#1
1592# asm 2: pxor <r=%xmm1,<p=%xmm0
1593pxor %xmm1,%xmm0
1594
1595# qhasm: uint32323232 u <<= 18
1596# asm 1: pslld $18,<u=int6464#7
1597# asm 2: pslld $18,<u=%xmm6
1598pslld $18,%xmm6
1599
1600# qhasm: p ^= u
1601# asm 1: pxor <u=int6464#7,<p=int6464#1
1602# asm 2: pxor <u=%xmm6,<p=%xmm0
1603pxor %xmm6,%xmm0
1604
1605# qhasm: z10_stack = p
1606# asm 1: movdqa <p=int6464#1,>z10_stack=stack128#27
1607# asm 2: movdqa <p=%xmm0,>z10_stack=448(%esp)
1608movdqa %xmm0,448(%esp)
1609
1610# qhasm: assign xmm2 to mt
1611
1612# qhasm: assign xmm3 to mq
1613
1614# qhasm: assign xmm4 to mp
1615
1616# qhasm: assign xmm5 to mr
1617
1618# qhasm: ms = mt
1619# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1620# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1621movdqa %xmm2,%xmm6
1622
1623# qhasm: uint32323232 mt += mp
1624# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1625# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1626paddd %xmm4,%xmm2
1627
1628# qhasm: mu = mt
1629# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1630# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1631movdqa %xmm2,%xmm0
1632
1633# qhasm: uint32323232 mt >>= 25
1634# asm 1: psrld $25,<mt=int6464#3
1635# asm 2: psrld $25,<mt=%xmm2
1636psrld $25,%xmm2
1637
1638# qhasm: mq ^= mt
1639# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1640# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1641pxor %xmm2,%xmm3
1642
1643# qhasm: uint32323232 mu <<= 7
1644# asm 1: pslld $7,<mu=int6464#1
1645# asm 2: pslld $7,<mu=%xmm0
1646pslld $7,%xmm0
1647
1648# qhasm: mq ^= mu
1649# asm 1: pxor <mu=int6464#1,<mq=int6464#4
1650# asm 2: pxor <mu=%xmm0,<mq=%xmm3
1651pxor %xmm0,%xmm3
1652
1653# qhasm: z3_stack = mq
1654# asm 1: movdqa <mq=int6464#4,>z3_stack=stack128#25
1655# asm 2: movdqa <mq=%xmm3,>z3_stack=416(%esp)
1656movdqa %xmm3,416(%esp)
1657
1658# qhasm: mt = mp
1659# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1660# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1661movdqa %xmm4,%xmm0
1662
1663# qhasm: uint32323232 mt += mq
1664# asm 1: paddd <mq=int6464#4,<mt=int6464#1
1665# asm 2: paddd <mq=%xmm3,<mt=%xmm0
1666paddd %xmm3,%xmm0
1667
1668# qhasm: mu = mt
1669# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1670# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1671movdqa %xmm0,%xmm1
1672
1673# qhasm: uint32323232 mt >>= 23
1674# asm 1: psrld $23,<mt=int6464#1
1675# asm 2: psrld $23,<mt=%xmm0
1676psrld $23,%xmm0
1677
1678# qhasm: mr ^= mt
1679# asm 1: pxor <mt=int6464#1,<mr=int6464#6
1680# asm 2: pxor <mt=%xmm0,<mr=%xmm5
1681pxor %xmm0,%xmm5
1682
1683# qhasm: uint32323232 mu <<= 9
1684# asm 1: pslld $9,<mu=int6464#2
1685# asm 2: pslld $9,<mu=%xmm1
1686pslld $9,%xmm1
1687
1688# qhasm: mr ^= mu
1689# asm 1: pxor <mu=int6464#2,<mr=int6464#6
1690# asm 2: pxor <mu=%xmm1,<mr=%xmm5
1691pxor %xmm1,%xmm5
1692
1693# qhasm: z7_stack = mr
1694# asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
1695# asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
1696movdqa %xmm5,480(%esp)
1697
1698# qhasm: uint32323232 mq += mr
1699# asm 1: paddd <mr=int6464#6,<mq=int6464#4
1700# asm 2: paddd <mr=%xmm5,<mq=%xmm3
1701paddd %xmm5,%xmm3
1702
1703# qhasm: mu = mq
1704# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1705# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1706movdqa %xmm3,%xmm0
1707
1708# qhasm: uint32323232 mq >>= 19
1709# asm 1: psrld $19,<mq=int6464#4
1710# asm 2: psrld $19,<mq=%xmm3
1711psrld $19,%xmm3
1712
1713# qhasm: ms ^= mq
1714# asm 1: pxor <mq=int6464#4,<ms=int6464#7
1715# asm 2: pxor <mq=%xmm3,<ms=%xmm6
1716pxor %xmm3,%xmm6
1717
1718# qhasm: uint32323232 mu <<= 13
1719# asm 1: pslld $13,<mu=int6464#1
1720# asm 2: pslld $13,<mu=%xmm0
1721pslld $13,%xmm0
1722
1723# qhasm: ms ^= mu
1724# asm 1: pxor <mu=int6464#1,<ms=int6464#7
1725# asm 2: pxor <mu=%xmm0,<ms=%xmm6
1726pxor %xmm0,%xmm6
1727
1728# qhasm: t = z3_stack
1729# asm 1: movdqa <z3_stack=stack128#25,>t=int6464#3
1730# asm 2: movdqa <z3_stack=416(%esp),>t=%xmm2
1731movdqa 416(%esp),%xmm2
1732
1733# qhasm: p = z0_stack
1734# asm 1: movdqa <z0_stack=stack128#21,>p=int6464#1
1735# asm 2: movdqa <z0_stack=352(%esp),>p=%xmm0
1736movdqa 352(%esp),%xmm0
1737
1738# qhasm: q = z1_stack
1739# asm 1: movdqa <z1_stack=stack128#22,>q=int6464#4
1740# asm 2: movdqa <z1_stack=368(%esp),>q=%xmm3
1741movdqa 368(%esp),%xmm3
1742
1743# qhasm: r = z2_stack
1744# asm 1: movdqa <z2_stack=stack128#26,>r=int6464#2
1745# asm 2: movdqa <z2_stack=432(%esp),>r=%xmm1
1746movdqa 432(%esp),%xmm1
1747
1748# qhasm: z11_stack = ms
1749# asm 1: movdqa <ms=int6464#7,>z11_stack=stack128#21
1750# asm 2: movdqa <ms=%xmm6,>z11_stack=352(%esp)
1751movdqa %xmm6,352(%esp)
1752
1753# qhasm: uint32323232 mr += ms
1754# asm 1: paddd <ms=int6464#7,<mr=int6464#6
1755# asm 2: paddd <ms=%xmm6,<mr=%xmm5
1756paddd %xmm6,%xmm5
1757
1758# qhasm: mu = mr
1759# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1760# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1761movdqa %xmm5,%xmm6
1762
1763# qhasm: uint32323232 mr >>= 14
1764# asm 1: psrld $14,<mr=int6464#6
1765# asm 2: psrld $14,<mr=%xmm5
1766psrld $14,%xmm5
1767
1768# qhasm: mp ^= mr
1769# asm 1: pxor <mr=int6464#6,<mp=int6464#5
1770# asm 2: pxor <mr=%xmm5,<mp=%xmm4
1771pxor %xmm5,%xmm4
1772
1773# qhasm: uint32323232 mu <<= 18
1774# asm 1: pslld $18,<mu=int6464#7
1775# asm 2: pslld $18,<mu=%xmm6
1776pslld $18,%xmm6
1777
1778# qhasm: mp ^= mu
1779# asm 1: pxor <mu=int6464#7,<mp=int6464#5
1780# asm 2: pxor <mu=%xmm6,<mp=%xmm4
1781pxor %xmm6,%xmm4
1782
1783# qhasm: z15_stack = mp
1784# asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#22
1785# asm 2: movdqa <mp=%xmm4,>z15_stack=368(%esp)
1786movdqa %xmm4,368(%esp)
1787
1788# qhasm: assign xmm0 to p
1789
1790# qhasm: assign xmm1 to r
1791
1792# qhasm: assign xmm2 to t
1793
1794# qhasm: assign xmm3 to q
1795
1796# qhasm: s = t
1797# asm 1: movdqa <t=int6464#3,>s=int6464#7
1798# asm 2: movdqa <t=%xmm2,>s=%xmm6
1799movdqa %xmm2,%xmm6
1800
1801# qhasm: uint32323232 t += p
1802# asm 1: paddd <p=int6464#1,<t=int6464#3
1803# asm 2: paddd <p=%xmm0,<t=%xmm2
1804paddd %xmm0,%xmm2
1805
1806# qhasm: u = t
1807# asm 1: movdqa <t=int6464#3,>u=int6464#5
1808# asm 2: movdqa <t=%xmm2,>u=%xmm4
1809movdqa %xmm2,%xmm4
1810
1811# qhasm: uint32323232 t >>= 25
1812# asm 1: psrld $25,<t=int6464#3
1813# asm 2: psrld $25,<t=%xmm2
1814psrld $25,%xmm2
1815
1816# qhasm: q ^= t
1817# asm 1: pxor <t=int6464#3,<q=int6464#4
1818# asm 2: pxor <t=%xmm2,<q=%xmm3
1819pxor %xmm2,%xmm3
1820
1821# qhasm: uint32323232 u <<= 7
1822# asm 1: pslld $7,<u=int6464#5
1823# asm 2: pslld $7,<u=%xmm4
1824pslld $7,%xmm4
1825
1826# qhasm: q ^= u
1827# asm 1: pxor <u=int6464#5,<q=int6464#4
1828# asm 2: pxor <u=%xmm4,<q=%xmm3
1829pxor %xmm4,%xmm3
1830
1831# qhasm: z1_stack = q
1832# asm 1: movdqa <q=int6464#4,>z1_stack=stack128#28
1833# asm 2: movdqa <q=%xmm3,>z1_stack=464(%esp)
1834movdqa %xmm3,464(%esp)
1835
1836# qhasm: t = p
1837# asm 1: movdqa <p=int6464#1,>t=int6464#3
1838# asm 2: movdqa <p=%xmm0,>t=%xmm2
1839movdqa %xmm0,%xmm2
1840
1841# qhasm: uint32323232 t += q
1842# asm 1: paddd <q=int6464#4,<t=int6464#3
1843# asm 2: paddd <q=%xmm3,<t=%xmm2
1844paddd %xmm3,%xmm2
1845
1846# qhasm: u = t
1847# asm 1: movdqa <t=int6464#3,>u=int6464#5
1848# asm 2: movdqa <t=%xmm2,>u=%xmm4
1849movdqa %xmm2,%xmm4
1850
1851# qhasm: uint32323232 t >>= 23
1852# asm 1: psrld $23,<t=int6464#3
1853# asm 2: psrld $23,<t=%xmm2
1854psrld $23,%xmm2
1855
1856# qhasm: r ^= t
1857# asm 1: pxor <t=int6464#3,<r=int6464#2
1858# asm 2: pxor <t=%xmm2,<r=%xmm1
1859pxor %xmm2,%xmm1
1860
1861# qhasm: uint32323232 u <<= 9
1862# asm 1: pslld $9,<u=int6464#5
1863# asm 2: pslld $9,<u=%xmm4
1864pslld $9,%xmm4
1865
1866# qhasm: r ^= u
1867# asm 1: pxor <u=int6464#5,<r=int6464#2
1868# asm 2: pxor <u=%xmm4,<r=%xmm1
1869pxor %xmm4,%xmm1
1870
1871# qhasm: z2_stack = r
1872# asm 1: movdqa <r=int6464#2,>z2_stack=stack128#31
1873# asm 2: movdqa <r=%xmm1,>z2_stack=512(%esp)
1874movdqa %xmm1,512(%esp)
1875
1876# qhasm: uint32323232 q += r
1877# asm 1: paddd <r=int6464#2,<q=int6464#4
1878# asm 2: paddd <r=%xmm1,<q=%xmm3
1879paddd %xmm1,%xmm3
1880
1881# qhasm: u = q
1882# asm 1: movdqa <q=int6464#4,>u=int6464#3
1883# asm 2: movdqa <q=%xmm3,>u=%xmm2
1884movdqa %xmm3,%xmm2
1885
1886# qhasm: uint32323232 q >>= 19
1887# asm 1: psrld $19,<q=int6464#4
1888# asm 2: psrld $19,<q=%xmm3
1889psrld $19,%xmm3
1890
1891# qhasm: s ^= q
1892# asm 1: pxor <q=int6464#4,<s=int6464#7
1893# asm 2: pxor <q=%xmm3,<s=%xmm6
1894pxor %xmm3,%xmm6
1895
1896# qhasm: uint32323232 u <<= 13
1897# asm 1: pslld $13,<u=int6464#3
1898# asm 2: pslld $13,<u=%xmm2
1899pslld $13,%xmm2
1900
1901# qhasm: s ^= u
1902# asm 1: pxor <u=int6464#3,<s=int6464#7
1903# asm 2: pxor <u=%xmm2,<s=%xmm6
1904pxor %xmm2,%xmm6
1905
1906# qhasm: mt = z4_stack
1907# asm 1: movdqa <z4_stack=stack128#33,>mt=int6464#3
1908# asm 2: movdqa <z4_stack=544(%esp),>mt=%xmm2
1909movdqa 544(%esp),%xmm2
1910
1911# qhasm: mp = z5_stack
1912# asm 1: movdqa <z5_stack=stack128#24,>mp=int6464#5
1913# asm 2: movdqa <z5_stack=400(%esp),>mp=%xmm4
1914movdqa 400(%esp),%xmm4
1915
1916# qhasm: mq = z6_stack
1917# asm 1: movdqa <z6_stack=stack128#23,>mq=int6464#4
1918# asm 2: movdqa <z6_stack=384(%esp),>mq=%xmm3
1919movdqa 384(%esp),%xmm3
1920
1921# qhasm: mr = z7_stack
1922# asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1923# asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1924movdqa 480(%esp),%xmm5
1925
1926# qhasm: z3_stack = s
1927# asm 1: movdqa <s=int6464#7,>z3_stack=stack128#25
1928# asm 2: movdqa <s=%xmm6,>z3_stack=416(%esp)
1929movdqa %xmm6,416(%esp)
1930
1931# qhasm: uint32323232 r += s
1932# asm 1: paddd <s=int6464#7,<r=int6464#2
1933# asm 2: paddd <s=%xmm6,<r=%xmm1
1934paddd %xmm6,%xmm1
1935
1936# qhasm: u = r
1937# asm 1: movdqa <r=int6464#2,>u=int6464#7
1938# asm 2: movdqa <r=%xmm1,>u=%xmm6
1939movdqa %xmm1,%xmm6
1940
1941# qhasm: uint32323232 r >>= 14
1942# asm 1: psrld $14,<r=int6464#2
1943# asm 2: psrld $14,<r=%xmm1
1944psrld $14,%xmm1
1945
1946# qhasm: p ^= r
1947# asm 1: pxor <r=int6464#2,<p=int6464#1
1948# asm 2: pxor <r=%xmm1,<p=%xmm0
1949pxor %xmm1,%xmm0
1950
1951# qhasm: uint32323232 u <<= 18
1952# asm 1: pslld $18,<u=int6464#7
1953# asm 2: pslld $18,<u=%xmm6
1954pslld $18,%xmm6
1955
1956# qhasm: p ^= u
1957# asm 1: pxor <u=int6464#7,<p=int6464#1
1958# asm 2: pxor <u=%xmm6,<p=%xmm0
1959pxor %xmm6,%xmm0
1960
1961# qhasm: z0_stack = p
1962# asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1963# asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1964movdqa %xmm0,544(%esp)
1965
1966# qhasm: assign xmm2 to mt
1967
1968# qhasm: assign xmm3 to mq
1969
1970# qhasm: assign xmm4 to mp
1971
1972# qhasm: assign xmm5 to mr
1973
1974# qhasm: ms = mt
1975# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1976# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1977movdqa %xmm2,%xmm6
1978
1979# qhasm: uint32323232 mt += mp
1980# asm 1: paddd <mp=int6464#5,<mt=int6464#3
1981# asm 2: paddd <mp=%xmm4,<mt=%xmm2
1982paddd %xmm4,%xmm2
1983
1984# qhasm: mu = mt
1985# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1986# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1987movdqa %xmm2,%xmm0
1988
1989# qhasm: uint32323232 mt >>= 25
1990# asm 1: psrld $25,<mt=int6464#3
1991# asm 2: psrld $25,<mt=%xmm2
1992psrld $25,%xmm2
1993
1994# qhasm: mq ^= mt
1995# asm 1: pxor <mt=int6464#3,<mq=int6464#4
1996# asm 2: pxor <mt=%xmm2,<mq=%xmm3
1997pxor %xmm2,%xmm3
1998
1999# qhasm: uint32323232 mu <<= 7
2000# asm 1: pslld $7,<mu=int6464#1
2001# asm 2: pslld $7,<mu=%xmm0
2002pslld $7,%xmm0
2003
2004# qhasm: mq ^= mu
2005# asm 1: pxor <mu=int6464#1,<mq=int6464#4
2006# asm 2: pxor <mu=%xmm0,<mq=%xmm3
2007pxor %xmm0,%xmm3
2008
2009# qhasm: z6_stack = mq
2010# asm 1: movdqa <mq=int6464#4,>z6_stack=stack128#26
2011# asm 2: movdqa <mq=%xmm3,>z6_stack=432(%esp)
2012movdqa %xmm3,432(%esp)
2013
2014# qhasm: mt = mp
2015# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2016# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2017movdqa %xmm4,%xmm0
2018
2019# qhasm: uint32323232 mt += mq
2020# asm 1: paddd <mq=int6464#4,<mt=int6464#1
2021# asm 2: paddd <mq=%xmm3,<mt=%xmm0
2022paddd %xmm3,%xmm0
2023
2024# qhasm: mu = mt
2025# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2026# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2027movdqa %xmm0,%xmm1
2028
2029# qhasm: uint32323232 mt >>= 23
2030# asm 1: psrld $23,<mt=int6464#1
2031# asm 2: psrld $23,<mt=%xmm0
2032psrld $23,%xmm0
2033
2034# qhasm: mr ^= mt
2035# asm 1: pxor <mt=int6464#1,<mr=int6464#6
2036# asm 2: pxor <mt=%xmm0,<mr=%xmm5
2037pxor %xmm0,%xmm5
2038
2039# qhasm: uint32323232 mu <<= 9
2040# asm 1: pslld $9,<mu=int6464#2
2041# asm 2: pslld $9,<mu=%xmm1
2042pslld $9,%xmm1
2043
2044# qhasm: mr ^= mu
2045# asm 1: pxor <mu=int6464#2,<mr=int6464#6
2046# asm 2: pxor <mu=%xmm1,<mr=%xmm5
2047pxor %xmm1,%xmm5
2048
2049# qhasm: z7_stack = mr
2050# asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
2051# asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
2052movdqa %xmm5,480(%esp)
2053
2054# qhasm: uint32323232 mq += mr
2055# asm 1: paddd <mr=int6464#6,<mq=int6464#4
2056# asm 2: paddd <mr=%xmm5,<mq=%xmm3
2057paddd %xmm5,%xmm3
2058
2059# qhasm: mu = mq
2060# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2061# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2062movdqa %xmm3,%xmm0
2063
2064# qhasm: uint32323232 mq >>= 19
2065# asm 1: psrld $19,<mq=int6464#4
2066# asm 2: psrld $19,<mq=%xmm3
2067psrld $19,%xmm3
2068
2069# qhasm: ms ^= mq
2070# asm 1: pxor <mq=int6464#4,<ms=int6464#7
2071# asm 2: pxor <mq=%xmm3,<ms=%xmm6
2072pxor %xmm3,%xmm6
2073
2074# qhasm: uint32323232 mu <<= 13
2075# asm 1: pslld $13,<mu=int6464#1
2076# asm 2: pslld $13,<mu=%xmm0
2077pslld $13,%xmm0
2078
2079# qhasm: ms ^= mu
2080# asm 1: pxor <mu=int6464#1,<ms=int6464#7
2081# asm 2: pxor <mu=%xmm0,<ms=%xmm6
2082pxor %xmm0,%xmm6
2083
2084# qhasm: t = z9_stack
2085# asm 1: movdqa <z9_stack=stack128#32,>t=int6464#3
2086# asm 2: movdqa <z9_stack=528(%esp),>t=%xmm2
2087movdqa 528(%esp),%xmm2
2088
2089# qhasm: p = z10_stack
2090# asm 1: movdqa <z10_stack=stack128#27,>p=int6464#1
2091# asm 2: movdqa <z10_stack=448(%esp),>p=%xmm0
2092movdqa 448(%esp),%xmm0
2093
2094# qhasm: q = z11_stack
2095# asm 1: movdqa <z11_stack=stack128#21,>q=int6464#4
2096# asm 2: movdqa <z11_stack=352(%esp),>q=%xmm3
2097movdqa 352(%esp),%xmm3
2098
2099# qhasm: r = z8_stack
2100# asm 1: movdqa <z8_stack=stack128#34,>r=int6464#2
2101# asm 2: movdqa <z8_stack=560(%esp),>r=%xmm1
2102movdqa 560(%esp),%xmm1
2103
2104# qhasm: z4_stack = ms
2105# asm 1: movdqa <ms=int6464#7,>z4_stack=stack128#34
2106# asm 2: movdqa <ms=%xmm6,>z4_stack=560(%esp)
2107movdqa %xmm6,560(%esp)
2108
2109# qhasm: uint32323232 mr += ms
2110# asm 1: paddd <ms=int6464#7,<mr=int6464#6
2111# asm 2: paddd <ms=%xmm6,<mr=%xmm5
2112paddd %xmm6,%xmm5
2113
2114# qhasm: mu = mr
2115# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2116# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2117movdqa %xmm5,%xmm6
2118
2119# qhasm: uint32323232 mr >>= 14
2120# asm 1: psrld $14,<mr=int6464#6
2121# asm 2: psrld $14,<mr=%xmm5
2122psrld $14,%xmm5
2123
2124# qhasm: mp ^= mr
2125# asm 1: pxor <mr=int6464#6,<mp=int6464#5
2126# asm 2: pxor <mr=%xmm5,<mp=%xmm4
2127pxor %xmm5,%xmm4
2128
2129# qhasm: uint32323232 mu <<= 18
2130# asm 1: pslld $18,<mu=int6464#7
2131# asm 2: pslld $18,<mu=%xmm6
2132pslld $18,%xmm6
2133
2134# qhasm: mp ^= mu
2135# asm 1: pxor <mu=int6464#7,<mp=int6464#5
2136# asm 2: pxor <mu=%xmm6,<mp=%xmm4
2137pxor %xmm6,%xmm4
2138
2139# qhasm: z5_stack = mp
2140# asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#21
2141# asm 2: movdqa <mp=%xmm4,>z5_stack=352(%esp)
2142movdqa %xmm4,352(%esp)
2143
2144# qhasm: assign xmm0 to p
2145
2146# qhasm: assign xmm1 to r
2147
2148# qhasm: assign xmm2 to t
2149
2150# qhasm: assign xmm3 to q
2151
2152# qhasm: s = t
2153# asm 1: movdqa <t=int6464#3,>s=int6464#7
2154# asm 2: movdqa <t=%xmm2,>s=%xmm6
2155movdqa %xmm2,%xmm6
2156
2157# qhasm: uint32323232 t += p
2158# asm 1: paddd <p=int6464#1,<t=int6464#3
2159# asm 2: paddd <p=%xmm0,<t=%xmm2
2160paddd %xmm0,%xmm2
2161
2162# qhasm: u = t
2163# asm 1: movdqa <t=int6464#3,>u=int6464#5
2164# asm 2: movdqa <t=%xmm2,>u=%xmm4
2165movdqa %xmm2,%xmm4
2166
2167# qhasm: uint32323232 t >>= 25
2168# asm 1: psrld $25,<t=int6464#3
2169# asm 2: psrld $25,<t=%xmm2
2170psrld $25,%xmm2
2171
2172# qhasm: q ^= t
2173# asm 1: pxor <t=int6464#3,<q=int6464#4
2174# asm 2: pxor <t=%xmm2,<q=%xmm3
2175pxor %xmm2,%xmm3
2176
2177# qhasm: uint32323232 u <<= 7
2178# asm 1: pslld $7,<u=int6464#5
2179# asm 2: pslld $7,<u=%xmm4
2180pslld $7,%xmm4
2181
2182# qhasm: q ^= u
2183# asm 1: pxor <u=int6464#5,<q=int6464#4
2184# asm 2: pxor <u=%xmm4,<q=%xmm3
2185pxor %xmm4,%xmm3
2186
2187# qhasm: z11_stack = q
2188# asm 1: movdqa <q=int6464#4,>z11_stack=stack128#27
2189# asm 2: movdqa <q=%xmm3,>z11_stack=448(%esp)
2190movdqa %xmm3,448(%esp)
2191
2192# qhasm: t = p
2193# asm 1: movdqa <p=int6464#1,>t=int6464#3
2194# asm 2: movdqa <p=%xmm0,>t=%xmm2
2195movdqa %xmm0,%xmm2
2196
2197# qhasm: uint32323232 t += q
2198# asm 1: paddd <q=int6464#4,<t=int6464#3
2199# asm 2: paddd <q=%xmm3,<t=%xmm2
2200paddd %xmm3,%xmm2
2201
2202# qhasm: u = t
2203# asm 1: movdqa <t=int6464#3,>u=int6464#5
2204# asm 2: movdqa <t=%xmm2,>u=%xmm4
2205movdqa %xmm2,%xmm4
2206
2207# qhasm: uint32323232 t >>= 23
2208# asm 1: psrld $23,<t=int6464#3
2209# asm 2: psrld $23,<t=%xmm2
2210psrld $23,%xmm2
2211
2212# qhasm: r ^= t
2213# asm 1: pxor <t=int6464#3,<r=int6464#2
2214# asm 2: pxor <t=%xmm2,<r=%xmm1
2215pxor %xmm2,%xmm1
2216
2217# qhasm: uint32323232 u <<= 9
2218# asm 1: pslld $9,<u=int6464#5
2219# asm 2: pslld $9,<u=%xmm4
2220pslld $9,%xmm4
2221
2222# qhasm: r ^= u
2223# asm 1: pxor <u=int6464#5,<r=int6464#2
2224# asm 2: pxor <u=%xmm4,<r=%xmm1
2225pxor %xmm4,%xmm1
2226
2227# qhasm: z8_stack = r
2228# asm 1: movdqa <r=int6464#2,>z8_stack=stack128#37
2229# asm 2: movdqa <r=%xmm1,>z8_stack=608(%esp)
2230movdqa %xmm1,608(%esp)
2231
2232# qhasm: uint32323232 q += r
2233# asm 1: paddd <r=int6464#2,<q=int6464#4
2234# asm 2: paddd <r=%xmm1,<q=%xmm3
2235paddd %xmm1,%xmm3
2236
2237# qhasm: u = q
2238# asm 1: movdqa <q=int6464#4,>u=int6464#3
2239# asm 2: movdqa <q=%xmm3,>u=%xmm2
2240movdqa %xmm3,%xmm2
2241
2242# qhasm: uint32323232 q >>= 19
2243# asm 1: psrld $19,<q=int6464#4
2244# asm 2: psrld $19,<q=%xmm3
2245psrld $19,%xmm3
2246
2247# qhasm: s ^= q
2248# asm 1: pxor <q=int6464#4,<s=int6464#7
2249# asm 2: pxor <q=%xmm3,<s=%xmm6
2250pxor %xmm3,%xmm6
2251
2252# qhasm: uint32323232 u <<= 13
2253# asm 1: pslld $13,<u=int6464#3
2254# asm 2: pslld $13,<u=%xmm2
2255pslld $13,%xmm2
2256
2257# qhasm: s ^= u
2258# asm 1: pxor <u=int6464#3,<s=int6464#7
2259# asm 2: pxor <u=%xmm2,<s=%xmm6
2260pxor %xmm2,%xmm6
2261
2262# qhasm: mt = z14_stack
2263# asm 1: movdqa <z14_stack=stack128#36,>mt=int6464#3
2264# asm 2: movdqa <z14_stack=592(%esp),>mt=%xmm2
2265movdqa 592(%esp),%xmm2
2266
2267# qhasm: mp = z15_stack
2268# asm 1: movdqa <z15_stack=stack128#22,>mp=int6464#5
2269# asm 2: movdqa <z15_stack=368(%esp),>mp=%xmm4
2270movdqa 368(%esp),%xmm4
2271
2272# qhasm: mq = z12_stack
2273# asm 1: movdqa <z12_stack=stack128#30,>mq=int6464#4
2274# asm 2: movdqa <z12_stack=496(%esp),>mq=%xmm3
2275movdqa 496(%esp),%xmm3
2276
2277# qhasm: mr = z13_stack
2278# asm 1: movdqa <z13_stack=stack128#35,>mr=int6464#6
2279# asm 2: movdqa <z13_stack=576(%esp),>mr=%xmm5
2280movdqa 576(%esp),%xmm5
2281
2282# qhasm: z9_stack = s
2283# asm 1: movdqa <s=int6464#7,>z9_stack=stack128#32
2284# asm 2: movdqa <s=%xmm6,>z9_stack=528(%esp)
2285movdqa %xmm6,528(%esp)
2286
2287# qhasm: uint32323232 r += s
2288# asm 1: paddd <s=int6464#7,<r=int6464#2
2289# asm 2: paddd <s=%xmm6,<r=%xmm1
2290paddd %xmm6,%xmm1
2291
2292# qhasm: u = r
2293# asm 1: movdqa <r=int6464#2,>u=int6464#7
2294# asm 2: movdqa <r=%xmm1,>u=%xmm6
2295movdqa %xmm1,%xmm6
2296
2297# qhasm: uint32323232 r >>= 14
2298# asm 1: psrld $14,<r=int6464#2
2299# asm 2: psrld $14,<r=%xmm1
2300psrld $14,%xmm1
2301
2302# qhasm: p ^= r
2303# asm 1: pxor <r=int6464#2,<p=int6464#1
2304# asm 2: pxor <r=%xmm1,<p=%xmm0
2305pxor %xmm1,%xmm0
2306
2307# qhasm: uint32323232 u <<= 18
2308# asm 1: pslld $18,<u=int6464#7
2309# asm 2: pslld $18,<u=%xmm6
2310pslld $18,%xmm6
2311
2312# qhasm: p ^= u
2313# asm 1: pxor <u=int6464#7,<p=int6464#1
2314# asm 2: pxor <u=%xmm6,<p=%xmm0
2315pxor %xmm6,%xmm0
2316
2317# qhasm: z10_stack = p
2318# asm 1: movdqa <p=int6464#1,>z10_stack=stack128#22
2319# asm 2: movdqa <p=%xmm0,>z10_stack=368(%esp)
2320movdqa %xmm0,368(%esp)
2321
2322# qhasm: assign xmm2 to mt
2323
2324# qhasm: assign xmm3 to mq
2325
2326# qhasm: assign xmm4 to mp
2327
2328# qhasm: assign xmm5 to mr
2329
2330# qhasm: ms = mt
2331# asm 1: movdqa <mt=int6464#3,>ms=int6464#7
2332# asm 2: movdqa <mt=%xmm2,>ms=%xmm6
2333movdqa %xmm2,%xmm6
2334
2335# qhasm: uint32323232 mt += mp
2336# asm 1: paddd <mp=int6464#5,<mt=int6464#3
2337# asm 2: paddd <mp=%xmm4,<mt=%xmm2
2338paddd %xmm4,%xmm2
2339
2340# qhasm: mu = mt
2341# asm 1: movdqa <mt=int6464#3,>mu=int6464#1
2342# asm 2: movdqa <mt=%xmm2,>mu=%xmm0
2343movdqa %xmm2,%xmm0
2344
2345# qhasm: uint32323232 mt >>= 25
2346# asm 1: psrld $25,<mt=int6464#3
2347# asm 2: psrld $25,<mt=%xmm2
2348psrld $25,%xmm2
2349
2350# qhasm: mq ^= mt
2351# asm 1: pxor <mt=int6464#3,<mq=int6464#4
2352# asm 2: pxor <mt=%xmm2,<mq=%xmm3
2353pxor %xmm2,%xmm3
2354
2355# qhasm: uint32323232 mu <<= 7
2356# asm 1: pslld $7,<mu=int6464#1
2357# asm 2: pslld $7,<mu=%xmm0
2358pslld $7,%xmm0
2359
2360# qhasm: mq ^= mu
2361# asm 1: pxor <mu=int6464#1,<mq=int6464#4
2362# asm 2: pxor <mu=%xmm0,<mq=%xmm3
2363pxor %xmm0,%xmm3
2364
2365# qhasm: z12_stack = mq
2366# asm 1: movdqa <mq=int6464#4,>z12_stack=stack128#35
2367# asm 2: movdqa <mq=%xmm3,>z12_stack=576(%esp)
2368movdqa %xmm3,576(%esp)
2369
2370# qhasm: mt = mp
2371# asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2372# asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2373movdqa %xmm4,%xmm0
2374
2375# qhasm: uint32323232 mt += mq
2376# asm 1: paddd <mq=int6464#4,<mt=int6464#1
2377# asm 2: paddd <mq=%xmm3,<mt=%xmm0
2378paddd %xmm3,%xmm0
2379
2380# qhasm: mu = mt
2381# asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2382# asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2383movdqa %xmm0,%xmm1
2384
2385# qhasm: uint32323232 mt >>= 23
2386# asm 1: psrld $23,<mt=int6464#1
2387# asm 2: psrld $23,<mt=%xmm0
2388psrld $23,%xmm0
2389
2390# qhasm: mr ^= mt
2391# asm 1: pxor <mt=int6464#1,<mr=int6464#6
2392# asm 2: pxor <mt=%xmm0,<mr=%xmm5
2393pxor %xmm0,%xmm5
2394
2395# qhasm: uint32323232 mu <<= 9
2396# asm 1: pslld $9,<mu=int6464#2
2397# asm 2: pslld $9,<mu=%xmm1
2398pslld $9,%xmm1
2399
2400# qhasm: mr ^= mu
2401# asm 1: pxor <mu=int6464#2,<mr=int6464#6
2402# asm 2: pxor <mu=%xmm1,<mr=%xmm5
2403pxor %xmm1,%xmm5
2404
2405# qhasm: z13_stack = mr
2406# asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#30
2407# asm 2: movdqa <mr=%xmm5,>z13_stack=496(%esp)
2408movdqa %xmm5,496(%esp)
2409
2410# qhasm: uint32323232 mq += mr
2411# asm 1: paddd <mr=int6464#6,<mq=int6464#4
2412# asm 2: paddd <mr=%xmm5,<mq=%xmm3
2413paddd %xmm5,%xmm3
2414
2415# qhasm: mu = mq
2416# asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2417# asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2418movdqa %xmm3,%xmm0
2419
2420# qhasm: uint32323232 mq >>= 19
2421# asm 1: psrld $19,<mq=int6464#4
2422# asm 2: psrld $19,<mq=%xmm3
2423psrld $19,%xmm3
2424
2425# qhasm: ms ^= mq
2426# asm 1: pxor <mq=int6464#4,<ms=int6464#7
2427# asm 2: pxor <mq=%xmm3,<ms=%xmm6
2428pxor %xmm3,%xmm6
2429
2430# qhasm: uint32323232 mu <<= 13
2431# asm 1: pslld $13,<mu=int6464#1
2432# asm 2: pslld $13,<mu=%xmm0
2433pslld $13,%xmm0
2434
2435# qhasm: ms ^= mu
2436# asm 1: pxor <mu=int6464#1,<ms=int6464#7
2437# asm 2: pxor <mu=%xmm0,<ms=%xmm6
2438pxor %xmm0,%xmm6
2439
2440# qhasm: t = z12_stack
2441# asm 1: movdqa <z12_stack=stack128#35,>t=int6464#3
2442# asm 2: movdqa <z12_stack=576(%esp),>t=%xmm2
2443movdqa 576(%esp),%xmm2
2444
2445# qhasm: p = z0_stack
2446# asm 1: movdqa <z0_stack=stack128#33,>p=int6464#1
2447# asm 2: movdqa <z0_stack=544(%esp),>p=%xmm0
2448movdqa 544(%esp),%xmm0
2449
2450# qhasm: q = z4_stack
2451# asm 1: movdqa <z4_stack=stack128#34,>q=int6464#4
2452# asm 2: movdqa <z4_stack=560(%esp),>q=%xmm3
2453movdqa 560(%esp),%xmm3
2454
2455# qhasm: r = z8_stack
2456# asm 1: movdqa <z8_stack=stack128#37,>r=int6464#2
2457# asm 2: movdqa <z8_stack=608(%esp),>r=%xmm1
2458movdqa 608(%esp),%xmm1
2459
2460# qhasm: z14_stack = ms
2461# asm 1: movdqa <ms=int6464#7,>z14_stack=stack128#24
2462# asm 2: movdqa <ms=%xmm6,>z14_stack=400(%esp)
2463movdqa %xmm6,400(%esp)
2464
2465# qhasm: uint32323232 mr += ms
2466# asm 1: paddd <ms=int6464#7,<mr=int6464#6
2467# asm 2: paddd <ms=%xmm6,<mr=%xmm5
2468paddd %xmm6,%xmm5
2469
2470# qhasm: mu = mr
2471# asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2472# asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2473movdqa %xmm5,%xmm6
2474
2475# qhasm: uint32323232 mr >>= 14
2476# asm 1: psrld $14,<mr=int6464#6
2477# asm 2: psrld $14,<mr=%xmm5
2478psrld $14,%xmm5
2479
2480# qhasm: mp ^= mr
2481# asm 1: pxor <mr=int6464#6,<mp=int6464#5
2482# asm 2: pxor <mr=%xmm5,<mp=%xmm4
2483pxor %xmm5,%xmm4
2484
2485# qhasm: uint32323232 mu <<= 18
2486# asm 1: pslld $18,<mu=int6464#7
2487# asm 2: pslld $18,<mu=%xmm6
2488pslld $18,%xmm6
2489
2490# qhasm: mp ^= mu
2491# asm 1: pxor <mu=int6464#7,<mp=int6464#5
2492# asm 2: pxor <mu=%xmm6,<mp=%xmm4
2493pxor %xmm6,%xmm4
2494
2495# qhasm: z15_stack = mp
2496# asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#23
2497# asm 2: movdqa <mp=%xmm4,>z15_stack=384(%esp)
2498movdqa %xmm4,384(%esp)
2499
2500# qhasm: unsigned>? i -= 2
2501# asm 1: sub $2,<i=int32#1
2502# asm 2: sub $2,<i=%eax
2503sub $2,%eax
2504# comment:fp stack unchanged by jump
2505
2506# qhasm: goto mainloop1 if unsigned>
2507ja ._mainloop1
2508
2509# qhasm: out = out_stack
2510# asm 1: movl <out_stack=stack32#6,>out=int32#6
2511# asm 2: movl <out_stack=20(%esp),>out=%edi
2512movl 20(%esp),%edi
2513
2514# qhasm: z0 = z0_stack
2515# asm 1: movdqa <z0_stack=stack128#33,>z0=int6464#1
2516# asm 2: movdqa <z0_stack=544(%esp),>z0=%xmm0
2517movdqa 544(%esp),%xmm0
2518
2519# qhasm: z1 = z1_stack
2520# asm 1: movdqa <z1_stack=stack128#28,>z1=int6464#2
2521# asm 2: movdqa <z1_stack=464(%esp),>z1=%xmm1
2522movdqa 464(%esp),%xmm1
2523
2524# qhasm: z2 = z2_stack
2525# asm 1: movdqa <z2_stack=stack128#31,>z2=int6464#3
2526# asm 2: movdqa <z2_stack=512(%esp),>z2=%xmm2
2527movdqa 512(%esp),%xmm2
2528
2529# qhasm: z3 = z3_stack
2530# asm 1: movdqa <z3_stack=stack128#25,>z3=int6464#4
2531# asm 2: movdqa <z3_stack=416(%esp),>z3=%xmm3
2532movdqa 416(%esp),%xmm3
2533
2534# qhasm: uint32323232 z0 += orig0
2535# asm 1: paddd <orig0=stack128#8,<z0=int6464#1
2536# asm 2: paddd <orig0=144(%esp),<z0=%xmm0
2537paddd 144(%esp),%xmm0
2538
2539# qhasm: uint32323232 z1 += orig1
2540# asm 1: paddd <orig1=stack128#12,<z1=int6464#2
2541# asm 2: paddd <orig1=208(%esp),<z1=%xmm1
2542paddd 208(%esp),%xmm1
2543
2544# qhasm: uint32323232 z2 += orig2
2545# asm 1: paddd <orig2=stack128#15,<z2=int6464#3
2546# asm 2: paddd <orig2=256(%esp),<z2=%xmm2
2547paddd 256(%esp),%xmm2
2548
2549# qhasm: uint32323232 z3 += orig3
2550# asm 1: paddd <orig3=stack128#18,<z3=int6464#4
2551# asm 2: paddd <orig3=304(%esp),<z3=%xmm3
2552paddd 304(%esp),%xmm3
2553
2554# qhasm: in0 = z0
2555# asm 1: movd <z0=int6464#1,>in0=int32#1
2556# asm 2: movd <z0=%xmm0,>in0=%eax
2557movd %xmm0,%eax
2558
2559# qhasm: in1 = z1
2560# asm 1: movd <z1=int6464#2,>in1=int32#2
2561# asm 2: movd <z1=%xmm1,>in1=%ecx
2562movd %xmm1,%ecx
2563
2564# qhasm: in2 = z2
2565# asm 1: movd <z2=int6464#3,>in2=int32#3
2566# asm 2: movd <z2=%xmm2,>in2=%edx
2567movd %xmm2,%edx
2568
2569# qhasm: in3 = z3
2570# asm 1: movd <z3=int6464#4,>in3=int32#4
2571# asm 2: movd <z3=%xmm3,>in3=%ebx
2572movd %xmm3,%ebx
2573
2574# qhasm: z0 <<<= 96
2575# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2576# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2577pshufd $0x39,%xmm0,%xmm0
2578
2579# qhasm: z1 <<<= 96
2580# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2581# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2582pshufd $0x39,%xmm1,%xmm1
2583
2584# qhasm: z2 <<<= 96
2585# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2586# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2587pshufd $0x39,%xmm2,%xmm2
2588
2589# qhasm: z3 <<<= 96
2590# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2591# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2592pshufd $0x39,%xmm3,%xmm3
2593
2594# qhasm: in0 ^= *(uint32 *) (m + 0)
2595# asm 1: xorl 0(<m=int32#5),<in0=int32#1
2596# asm 2: xorl 0(<m=%esi),<in0=%eax
2597xorl 0(%esi),%eax
2598
2599# qhasm: in1 ^= *(uint32 *) (m + 4)
2600# asm 1: xorl 4(<m=int32#5),<in1=int32#2
2601# asm 2: xorl 4(<m=%esi),<in1=%ecx
2602xorl 4(%esi),%ecx
2603
2604# qhasm: in2 ^= *(uint32 *) (m + 8)
2605# asm 1: xorl 8(<m=int32#5),<in2=int32#3
2606# asm 2: xorl 8(<m=%esi),<in2=%edx
2607xorl 8(%esi),%edx
2608
2609# qhasm: in3 ^= *(uint32 *) (m + 12)
2610# asm 1: xorl 12(<m=int32#5),<in3=int32#4
2611# asm 2: xorl 12(<m=%esi),<in3=%ebx
2612xorl 12(%esi),%ebx
2613
2614# qhasm: *(uint32 *) (out + 0) = in0
2615# asm 1: movl <in0=int32#1,0(<out=int32#6)
2616# asm 2: movl <in0=%eax,0(<out=%edi)
2617movl %eax,0(%edi)
2618
2619# qhasm: *(uint32 *) (out + 4) = in1
2620# asm 1: movl <in1=int32#2,4(<out=int32#6)
2621# asm 2: movl <in1=%ecx,4(<out=%edi)
2622movl %ecx,4(%edi)
2623
2624# qhasm: *(uint32 *) (out + 8) = in2
2625# asm 1: movl <in2=int32#3,8(<out=int32#6)
2626# asm 2: movl <in2=%edx,8(<out=%edi)
2627movl %edx,8(%edi)
2628
2629# qhasm: *(uint32 *) (out + 12) = in3
2630# asm 1: movl <in3=int32#4,12(<out=int32#6)
2631# asm 2: movl <in3=%ebx,12(<out=%edi)
2632movl %ebx,12(%edi)
2633
2634# qhasm: in0 = z0
2635# asm 1: movd <z0=int6464#1,>in0=int32#1
2636# asm 2: movd <z0=%xmm0,>in0=%eax
2637movd %xmm0,%eax
2638
2639# qhasm: in1 = z1
2640# asm 1: movd <z1=int6464#2,>in1=int32#2
2641# asm 2: movd <z1=%xmm1,>in1=%ecx
2642movd %xmm1,%ecx
2643
2644# qhasm: in2 = z2
2645# asm 1: movd <z2=int6464#3,>in2=int32#3
2646# asm 2: movd <z2=%xmm2,>in2=%edx
2647movd %xmm2,%edx
2648
2649# qhasm: in3 = z3
2650# asm 1: movd <z3=int6464#4,>in3=int32#4
2651# asm 2: movd <z3=%xmm3,>in3=%ebx
2652movd %xmm3,%ebx
2653
2654# qhasm: z0 <<<= 96
2655# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2656# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2657pshufd $0x39,%xmm0,%xmm0
2658
2659# qhasm: z1 <<<= 96
2660# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2661# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2662pshufd $0x39,%xmm1,%xmm1
2663
2664# qhasm: z2 <<<= 96
2665# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2666# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2667pshufd $0x39,%xmm2,%xmm2
2668
2669# qhasm: z3 <<<= 96
2670# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2671# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2672pshufd $0x39,%xmm3,%xmm3
2673
2674# qhasm: in0 ^= *(uint32 *) (m + 64)
2675# asm 1: xorl 64(<m=int32#5),<in0=int32#1
2676# asm 2: xorl 64(<m=%esi),<in0=%eax
2677xorl 64(%esi),%eax
2678
2679# qhasm: in1 ^= *(uint32 *) (m + 68)
2680# asm 1: xorl 68(<m=int32#5),<in1=int32#2
2681# asm 2: xorl 68(<m=%esi),<in1=%ecx
2682xorl 68(%esi),%ecx
2683
2684# qhasm: in2 ^= *(uint32 *) (m + 72)
2685# asm 1: xorl 72(<m=int32#5),<in2=int32#3
2686# asm 2: xorl 72(<m=%esi),<in2=%edx
2687xorl 72(%esi),%edx
2688
2689# qhasm: in3 ^= *(uint32 *) (m + 76)
2690# asm 1: xorl 76(<m=int32#5),<in3=int32#4
2691# asm 2: xorl 76(<m=%esi),<in3=%ebx
2692xorl 76(%esi),%ebx
2693
2694# qhasm: *(uint32 *) (out + 64) = in0
2695# asm 1: movl <in0=int32#1,64(<out=int32#6)
2696# asm 2: movl <in0=%eax,64(<out=%edi)
2697movl %eax,64(%edi)
2698
2699# qhasm: *(uint32 *) (out + 68) = in1
2700# asm 1: movl <in1=int32#2,68(<out=int32#6)
2701# asm 2: movl <in1=%ecx,68(<out=%edi)
2702movl %ecx,68(%edi)
2703
2704# qhasm: *(uint32 *) (out + 72) = in2
2705# asm 1: movl <in2=int32#3,72(<out=int32#6)
2706# asm 2: movl <in2=%edx,72(<out=%edi)
2707movl %edx,72(%edi)
2708
2709# qhasm: *(uint32 *) (out + 76) = in3
2710# asm 1: movl <in3=int32#4,76(<out=int32#6)
2711# asm 2: movl <in3=%ebx,76(<out=%edi)
2712movl %ebx,76(%edi)
2713
2714# qhasm: in0 = z0
2715# asm 1: movd <z0=int6464#1,>in0=int32#1
2716# asm 2: movd <z0=%xmm0,>in0=%eax
2717movd %xmm0,%eax
2718
2719# qhasm: in1 = z1
2720# asm 1: movd <z1=int6464#2,>in1=int32#2
2721# asm 2: movd <z1=%xmm1,>in1=%ecx
2722movd %xmm1,%ecx
2723
2724# qhasm: in2 = z2
2725# asm 1: movd <z2=int6464#3,>in2=int32#3
2726# asm 2: movd <z2=%xmm2,>in2=%edx
2727movd %xmm2,%edx
2728
2729# qhasm: in3 = z3
2730# asm 1: movd <z3=int6464#4,>in3=int32#4
2731# asm 2: movd <z3=%xmm3,>in3=%ebx
2732movd %xmm3,%ebx
2733
2734# qhasm: z0 <<<= 96
2735# asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2736# asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2737pshufd $0x39,%xmm0,%xmm0
2738
2739# qhasm: z1 <<<= 96
2740# asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2741# asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2742pshufd $0x39,%xmm1,%xmm1
2743
2744# qhasm: z2 <<<= 96
2745# asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2746# asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2747pshufd $0x39,%xmm2,%xmm2
2748
2749# qhasm: z3 <<<= 96
2750# asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2751# asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2752pshufd $0x39,%xmm3,%xmm3
2753
2754# qhasm: in0 ^= *(uint32 *) (m + 128)
2755# asm 1: xorl 128(<m=int32#5),<in0=int32#1
2756# asm 2: xorl 128(<m=%esi),<in0=%eax
2757xorl 128(%esi),%eax
2758
2759# qhasm: in1 ^= *(uint32 *) (m + 132)
2760# asm 1: xorl 132(<m=int32#5),<in1=int32#2
2761# asm 2: xorl 132(<m=%esi),<in1=%ecx
2762xorl 132(%esi),%ecx
2763
2764# qhasm: in2 ^= *(uint32 *) (m + 136)
2765# asm 1: xorl 136(<m=int32#5),<in2=int32#3
2766# asm 2: xorl 136(<m=%esi),<in2=%edx
2767xorl 136(%esi),%edx
2768
2769# qhasm: in3 ^= *(uint32 *) (m + 140)
2770# asm 1: xorl 140(<m=int32#5),<in3=int32#4
2771# asm 2: xorl 140(<m=%esi),<in3=%ebx
2772xorl 140(%esi),%ebx
2773
2774# qhasm: *(uint32 *) (out + 128) = in0
2775# asm 1: movl <in0=int32#1,128(<out=int32#6)
2776# asm 2: movl <in0=%eax,128(<out=%edi)
2777movl %eax,128(%edi)
2778
2779# qhasm: *(uint32 *) (out + 132) = in1
2780# asm 1: movl <in1=int32#2,132(<out=int32#6)
2781# asm 2: movl <in1=%ecx,132(<out=%edi)
2782movl %ecx,132(%edi)
2783
2784# qhasm: *(uint32 *) (out + 136) = in2
2785# asm 1: movl <in2=int32#3,136(<out=int32#6)
2786# asm 2: movl <in2=%edx,136(<out=%edi)
2787movl %edx,136(%edi)
2788
2789# qhasm: *(uint32 *) (out + 140) = in3
2790# asm 1: movl <in3=int32#4,140(<out=int32#6)
2791# asm 2: movl <in3=%ebx,140(<out=%edi)
2792movl %ebx,140(%edi)
2793
2794# qhasm: in0 = z0
2795# asm 1: movd <z0=int6464#1,>in0=int32#1
2796# asm 2: movd <z0=%xmm0,>in0=%eax
2797movd %xmm0,%eax
2798
2799# qhasm: in1 = z1
2800# asm 1: movd <z1=int6464#2,>in1=int32#2
2801# asm 2: movd <z1=%xmm1,>in1=%ecx
2802movd %xmm1,%ecx
2803
2804# qhasm: in2 = z2
2805# asm 1: movd <z2=int6464#3,>in2=int32#3
2806# asm 2: movd <z2=%xmm2,>in2=%edx
2807movd %xmm2,%edx
2808
2809# qhasm: in3 = z3
2810# asm 1: movd <z3=int6464#4,>in3=int32#4
2811# asm 2: movd <z3=%xmm3,>in3=%ebx
2812movd %xmm3,%ebx
2813
2814# qhasm: in0 ^= *(uint32 *) (m + 192)
2815# asm 1: xorl 192(<m=int32#5),<in0=int32#1
2816# asm 2: xorl 192(<m=%esi),<in0=%eax
2817xorl 192(%esi),%eax
2818
2819# qhasm: in1 ^= *(uint32 *) (m + 196)
2820# asm 1: xorl 196(<m=int32#5),<in1=int32#2
2821# asm 2: xorl 196(<m=%esi),<in1=%ecx
2822xorl 196(%esi),%ecx
2823
2824# qhasm: in2 ^= *(uint32 *) (m + 200)
2825# asm 1: xorl 200(<m=int32#5),<in2=int32#3
2826# asm 2: xorl 200(<m=%esi),<in2=%edx
2827xorl 200(%esi),%edx
2828
2829# qhasm: in3 ^= *(uint32 *) (m + 204)
2830# asm 1: xorl 204(<m=int32#5),<in3=int32#4
2831# asm 2: xorl 204(<m=%esi),<in3=%ebx
2832xorl 204(%esi),%ebx
2833
2834# qhasm: *(uint32 *) (out + 192) = in0
2835# asm 1: movl <in0=int32#1,192(<out=int32#6)
2836# asm 2: movl <in0=%eax,192(<out=%edi)
2837movl %eax,192(%edi)
2838
2839# qhasm: *(uint32 *) (out + 196) = in1
2840# asm 1: movl <in1=int32#2,196(<out=int32#6)
2841# asm 2: movl <in1=%ecx,196(<out=%edi)
2842movl %ecx,196(%edi)
2843
2844# qhasm: *(uint32 *) (out + 200) = in2
2845# asm 1: movl <in2=int32#3,200(<out=int32#6)
2846# asm 2: movl <in2=%edx,200(<out=%edi)
2847movl %edx,200(%edi)
2848
2849# qhasm: *(uint32 *) (out + 204) = in3
2850# asm 1: movl <in3=int32#4,204(<out=int32#6)
2851# asm 2: movl <in3=%ebx,204(<out=%edi)
2852movl %ebx,204(%edi)
2853
2854# qhasm: z4 = z4_stack
2855# asm 1: movdqa <z4_stack=stack128#34,>z4=int6464#1
2856# asm 2: movdqa <z4_stack=560(%esp),>z4=%xmm0
2857movdqa 560(%esp),%xmm0
2858
2859# qhasm: z5 = z5_stack
2860# asm 1: movdqa <z5_stack=stack128#21,>z5=int6464#2
2861# asm 2: movdqa <z5_stack=352(%esp),>z5=%xmm1
2862movdqa 352(%esp),%xmm1
2863
2864# qhasm: z6 = z6_stack
2865# asm 1: movdqa <z6_stack=stack128#26,>z6=int6464#3
2866# asm 2: movdqa <z6_stack=432(%esp),>z6=%xmm2
2867movdqa 432(%esp),%xmm2
2868
2869# qhasm: z7 = z7_stack
2870# asm 1: movdqa <z7_stack=stack128#29,>z7=int6464#4
2871# asm 2: movdqa <z7_stack=480(%esp),>z7=%xmm3
2872movdqa 480(%esp),%xmm3
2873
2874# qhasm: uint32323232 z4 += orig4
2875# asm 1: paddd <orig4=stack128#16,<z4=int6464#1
2876# asm 2: paddd <orig4=272(%esp),<z4=%xmm0
2877paddd 272(%esp),%xmm0
2878
2879# qhasm: uint32323232 z5 += orig5
2880# asm 1: paddd <orig5=stack128#5,<z5=int6464#2
2881# asm 2: paddd <orig5=96(%esp),<z5=%xmm1
2882paddd 96(%esp),%xmm1
2883
2884# qhasm: uint32323232 z6 += orig6
2885# asm 1: paddd <orig6=stack128#9,<z6=int6464#3
2886# asm 2: paddd <orig6=160(%esp),<z6=%xmm2
2887paddd 160(%esp),%xmm2
2888
2889# qhasm: uint32323232 z7 += orig7
2890# asm 1: paddd <orig7=stack128#13,<z7=int6464#4
2891# asm 2: paddd <orig7=224(%esp),<z7=%xmm3
2892paddd 224(%esp),%xmm3
2893
2894# qhasm: in4 = z4
2895# asm 1: movd <z4=int6464#1,>in4=int32#1
2896# asm 2: movd <z4=%xmm0,>in4=%eax
2897movd %xmm0,%eax
2898
2899# qhasm: in5 = z5
2900# asm 1: movd <z5=int6464#2,>in5=int32#2
2901# asm 2: movd <z5=%xmm1,>in5=%ecx
2902movd %xmm1,%ecx
2903
2904# qhasm: in6 = z6
2905# asm 1: movd <z6=int6464#3,>in6=int32#3
2906# asm 2: movd <z6=%xmm2,>in6=%edx
2907movd %xmm2,%edx
2908
2909# qhasm: in7 = z7
2910# asm 1: movd <z7=int6464#4,>in7=int32#4
2911# asm 2: movd <z7=%xmm3,>in7=%ebx
2912movd %xmm3,%ebx
2913
2914# qhasm: z4 <<<= 96
2915# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2916# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2917pshufd $0x39,%xmm0,%xmm0
2918
2919# qhasm: z5 <<<= 96
2920# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
2921# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
2922pshufd $0x39,%xmm1,%xmm1
2923
2924# qhasm: z6 <<<= 96
2925# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
2926# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
2927pshufd $0x39,%xmm2,%xmm2
2928
2929# qhasm: z7 <<<= 96
2930# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
2931# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
2932pshufd $0x39,%xmm3,%xmm3
2933
2934# qhasm: in4 ^= *(uint32 *) (m + 16)
2935# asm 1: xorl 16(<m=int32#5),<in4=int32#1
2936# asm 2: xorl 16(<m=%esi),<in4=%eax
2937xorl 16(%esi),%eax
2938
2939# qhasm: in5 ^= *(uint32 *) (m + 20)
2940# asm 1: xorl 20(<m=int32#5),<in5=int32#2
2941# asm 2: xorl 20(<m=%esi),<in5=%ecx
2942xorl 20(%esi),%ecx
2943
2944# qhasm: in6 ^= *(uint32 *) (m + 24)
2945# asm 1: xorl 24(<m=int32#5),<in6=int32#3
2946# asm 2: xorl 24(<m=%esi),<in6=%edx
2947xorl 24(%esi),%edx
2948
2949# qhasm: in7 ^= *(uint32 *) (m + 28)
2950# asm 1: xorl 28(<m=int32#5),<in7=int32#4
2951# asm 2: xorl 28(<m=%esi),<in7=%ebx
2952xorl 28(%esi),%ebx
2953
2954# qhasm: *(uint32 *) (out + 16) = in4
2955# asm 1: movl <in4=int32#1,16(<out=int32#6)
2956# asm 2: movl <in4=%eax,16(<out=%edi)
2957movl %eax,16(%edi)
2958
2959# qhasm: *(uint32 *) (out + 20) = in5
2960# asm 1: movl <in5=int32#2,20(<out=int32#6)
2961# asm 2: movl <in5=%ecx,20(<out=%edi)
2962movl %ecx,20(%edi)
2963
2964# qhasm: *(uint32 *) (out + 24) = in6
2965# asm 1: movl <in6=int32#3,24(<out=int32#6)
2966# asm 2: movl <in6=%edx,24(<out=%edi)
2967movl %edx,24(%edi)
2968
2969# qhasm: *(uint32 *) (out + 28) = in7
2970# asm 1: movl <in7=int32#4,28(<out=int32#6)
2971# asm 2: movl <in7=%ebx,28(<out=%edi)
2972movl %ebx,28(%edi)
2973
2974# qhasm: in4 = z4
2975# asm 1: movd <z4=int6464#1,>in4=int32#1
2976# asm 2: movd <z4=%xmm0,>in4=%eax
2977movd %xmm0,%eax
2978
2979# qhasm: in5 = z5
2980# asm 1: movd <z5=int6464#2,>in5=int32#2
2981# asm 2: movd <z5=%xmm1,>in5=%ecx
2982movd %xmm1,%ecx
2983
2984# qhasm: in6 = z6
2985# asm 1: movd <z6=int6464#3,>in6=int32#3
2986# asm 2: movd <z6=%xmm2,>in6=%edx
2987movd %xmm2,%edx
2988
2989# qhasm: in7 = z7
2990# asm 1: movd <z7=int6464#4,>in7=int32#4
2991# asm 2: movd <z7=%xmm3,>in7=%ebx
2992movd %xmm3,%ebx
2993
2994# qhasm: z4 <<<= 96
2995# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2996# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2997pshufd $0x39,%xmm0,%xmm0
2998
2999# qhasm: z5 <<<= 96
3000# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3001# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3002pshufd $0x39,%xmm1,%xmm1
3003
3004# qhasm: z6 <<<= 96
3005# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3006# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3007pshufd $0x39,%xmm2,%xmm2
3008
3009# qhasm: z7 <<<= 96
3010# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3011# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3012pshufd $0x39,%xmm3,%xmm3
3013
3014# qhasm: in4 ^= *(uint32 *) (m + 80)
3015# asm 1: xorl 80(<m=int32#5),<in4=int32#1
3016# asm 2: xorl 80(<m=%esi),<in4=%eax
3017xorl 80(%esi),%eax
3018
3019# qhasm: in5 ^= *(uint32 *) (m + 84)
3020# asm 1: xorl 84(<m=int32#5),<in5=int32#2
3021# asm 2: xorl 84(<m=%esi),<in5=%ecx
3022xorl 84(%esi),%ecx
3023
3024# qhasm: in6 ^= *(uint32 *) (m + 88)
3025# asm 1: xorl 88(<m=int32#5),<in6=int32#3
3026# asm 2: xorl 88(<m=%esi),<in6=%edx
3027xorl 88(%esi),%edx
3028
3029# qhasm: in7 ^= *(uint32 *) (m + 92)
3030# asm 1: xorl 92(<m=int32#5),<in7=int32#4
3031# asm 2: xorl 92(<m=%esi),<in7=%ebx
3032xorl 92(%esi),%ebx
3033
3034# qhasm: *(uint32 *) (out + 80) = in4
3035# asm 1: movl <in4=int32#1,80(<out=int32#6)
3036# asm 2: movl <in4=%eax,80(<out=%edi)
3037movl %eax,80(%edi)
3038
3039# qhasm: *(uint32 *) (out + 84) = in5
3040# asm 1: movl <in5=int32#2,84(<out=int32#6)
3041# asm 2: movl <in5=%ecx,84(<out=%edi)
3042movl %ecx,84(%edi)
3043
3044# qhasm: *(uint32 *) (out + 88) = in6
3045# asm 1: movl <in6=int32#3,88(<out=int32#6)
3046# asm 2: movl <in6=%edx,88(<out=%edi)
3047movl %edx,88(%edi)
3048
3049# qhasm: *(uint32 *) (out + 92) = in7
3050# asm 1: movl <in7=int32#4,92(<out=int32#6)
3051# asm 2: movl <in7=%ebx,92(<out=%edi)
3052movl %ebx,92(%edi)
3053
3054# qhasm: in4 = z4
3055# asm 1: movd <z4=int6464#1,>in4=int32#1
3056# asm 2: movd <z4=%xmm0,>in4=%eax
3057movd %xmm0,%eax
3058
3059# qhasm: in5 = z5
3060# asm 1: movd <z5=int6464#2,>in5=int32#2
3061# asm 2: movd <z5=%xmm1,>in5=%ecx
3062movd %xmm1,%ecx
3063
3064# qhasm: in6 = z6
3065# asm 1: movd <z6=int6464#3,>in6=int32#3
3066# asm 2: movd <z6=%xmm2,>in6=%edx
3067movd %xmm2,%edx
3068
3069# qhasm: in7 = z7
3070# asm 1: movd <z7=int6464#4,>in7=int32#4
3071# asm 2: movd <z7=%xmm3,>in7=%ebx
3072movd %xmm3,%ebx
3073
3074# qhasm: z4 <<<= 96
3075# asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
3076# asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
3077pshufd $0x39,%xmm0,%xmm0
3078
3079# qhasm: z5 <<<= 96
3080# asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3081# asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3082pshufd $0x39,%xmm1,%xmm1
3083
3084# qhasm: z6 <<<= 96
3085# asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3086# asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3087pshufd $0x39,%xmm2,%xmm2
3088
3089# qhasm: z7 <<<= 96
3090# asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3091# asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3092pshufd $0x39,%xmm3,%xmm3
3093
3094# qhasm: in4 ^= *(uint32 *) (m + 144)
3095# asm 1: xorl 144(<m=int32#5),<in4=int32#1
3096# asm 2: xorl 144(<m=%esi),<in4=%eax
3097xorl 144(%esi),%eax
3098
3099# qhasm: in5 ^= *(uint32 *) (m + 148)
3100# asm 1: xorl 148(<m=int32#5),<in5=int32#2
3101# asm 2: xorl 148(<m=%esi),<in5=%ecx
3102xorl 148(%esi),%ecx
3103
3104# qhasm: in6 ^= *(uint32 *) (m + 152)
3105# asm 1: xorl 152(<m=int32#5),<in6=int32#3
3106# asm 2: xorl 152(<m=%esi),<in6=%edx
3107xorl 152(%esi),%edx
3108
3109# qhasm: in7 ^= *(uint32 *) (m + 156)
3110# asm 1: xorl 156(<m=int32#5),<in7=int32#4
3111# asm 2: xorl 156(<m=%esi),<in7=%ebx
3112xorl 156(%esi),%ebx
3113
3114# qhasm: *(uint32 *) (out + 144) = in4
3115# asm 1: movl <in4=int32#1,144(<out=int32#6)
3116# asm 2: movl <in4=%eax,144(<out=%edi)
3117movl %eax,144(%edi)
3118
3119# qhasm: *(uint32 *) (out + 148) = in5
3120# asm 1: movl <in5=int32#2,148(<out=int32#6)
3121# asm 2: movl <in5=%ecx,148(<out=%edi)
3122movl %ecx,148(%edi)
3123
3124# qhasm: *(uint32 *) (out + 152) = in6
3125# asm 1: movl <in6=int32#3,152(<out=int32#6)
3126# asm 2: movl <in6=%edx,152(<out=%edi)
3127movl %edx,152(%edi)
3128
3129# qhasm: *(uint32 *) (out + 156) = in7
3130# asm 1: movl <in7=int32#4,156(<out=int32#6)
3131# asm 2: movl <in7=%ebx,156(<out=%edi)
3132movl %ebx,156(%edi)
3133
3134# qhasm: in4 = z4
3135# asm 1: movd <z4=int6464#1,>in4=int32#1
3136# asm 2: movd <z4=%xmm0,>in4=%eax
3137movd %xmm0,%eax
3138
3139# qhasm: in5 = z5
3140# asm 1: movd <z5=int6464#2,>in5=int32#2
3141# asm 2: movd <z5=%xmm1,>in5=%ecx
3142movd %xmm1,%ecx
3143
3144# qhasm: in6 = z6
3145# asm 1: movd <z6=int6464#3,>in6=int32#3
3146# asm 2: movd <z6=%xmm2,>in6=%edx
3147movd %xmm2,%edx
3148
3149# qhasm: in7 = z7
3150# asm 1: movd <z7=int6464#4,>in7=int32#4
3151# asm 2: movd <z7=%xmm3,>in7=%ebx
3152movd %xmm3,%ebx
3153
3154# qhasm: in4 ^= *(uint32 *) (m + 208)
3155# asm 1: xorl 208(<m=int32#5),<in4=int32#1
3156# asm 2: xorl 208(<m=%esi),<in4=%eax
3157xorl 208(%esi),%eax
3158
3159# qhasm: in5 ^= *(uint32 *) (m + 212)
3160# asm 1: xorl 212(<m=int32#5),<in5=int32#2
3161# asm 2: xorl 212(<m=%esi),<in5=%ecx
3162xorl 212(%esi),%ecx
3163
3164# qhasm: in6 ^= *(uint32 *) (m + 216)
3165# asm 1: xorl 216(<m=int32#5),<in6=int32#3
3166# asm 2: xorl 216(<m=%esi),<in6=%edx
3167xorl 216(%esi),%edx
3168
3169# qhasm: in7 ^= *(uint32 *) (m + 220)
3170# asm 1: xorl 220(<m=int32#5),<in7=int32#4
3171# asm 2: xorl 220(<m=%esi),<in7=%ebx
3172xorl 220(%esi),%ebx
3173
3174# qhasm: *(uint32 *) (out + 208) = in4
3175# asm 1: movl <in4=int32#1,208(<out=int32#6)
3176# asm 2: movl <in4=%eax,208(<out=%edi)
3177movl %eax,208(%edi)
3178
3179# qhasm: *(uint32 *) (out + 212) = in5
3180# asm 1: movl <in5=int32#2,212(<out=int32#6)
3181# asm 2: movl <in5=%ecx,212(<out=%edi)
3182movl %ecx,212(%edi)
3183
3184# qhasm: *(uint32 *) (out + 216) = in6
3185# asm 1: movl <in6=int32#3,216(<out=int32#6)
3186# asm 2: movl <in6=%edx,216(<out=%edi)
3187movl %edx,216(%edi)
3188
3189# qhasm: *(uint32 *) (out + 220) = in7
3190# asm 1: movl <in7=int32#4,220(<out=int32#6)
3191# asm 2: movl <in7=%ebx,220(<out=%edi)
3192movl %ebx,220(%edi)
3193
3194# qhasm: z8 = z8_stack
3195# asm 1: movdqa <z8_stack=stack128#37,>z8=int6464#1
3196# asm 2: movdqa <z8_stack=608(%esp),>z8=%xmm0
3197movdqa 608(%esp),%xmm0
3198
3199# qhasm: z9 = z9_stack
3200# asm 1: movdqa <z9_stack=stack128#32,>z9=int6464#2
3201# asm 2: movdqa <z9_stack=528(%esp),>z9=%xmm1
3202movdqa 528(%esp),%xmm1
3203
3204# qhasm: z10 = z10_stack
3205# asm 1: movdqa <z10_stack=stack128#22,>z10=int6464#3
3206# asm 2: movdqa <z10_stack=368(%esp),>z10=%xmm2
3207movdqa 368(%esp),%xmm2
3208
3209# qhasm: z11 = z11_stack
3210# asm 1: movdqa <z11_stack=stack128#27,>z11=int6464#4
3211# asm 2: movdqa <z11_stack=448(%esp),>z11=%xmm3
3212movdqa 448(%esp),%xmm3
3213
3214# qhasm: uint32323232 z8 += orig8
3215# asm 1: paddd <orig8=stack128#19,<z8=int6464#1
3216# asm 2: paddd <orig8=320(%esp),<z8=%xmm0
3217paddd 320(%esp),%xmm0
3218
3219# qhasm: uint32323232 z9 += orig9
3220# asm 1: paddd <orig9=stack128#20,<z9=int6464#2
3221# asm 2: paddd <orig9=336(%esp),<z9=%xmm1
3222paddd 336(%esp),%xmm1
3223
3224# qhasm: uint32323232 z10 += orig10
3225# asm 1: paddd <orig10=stack128#6,<z10=int6464#3
3226# asm 2: paddd <orig10=112(%esp),<z10=%xmm2
3227paddd 112(%esp),%xmm2
3228
3229# qhasm: uint32323232 z11 += orig11
3230# asm 1: paddd <orig11=stack128#10,<z11=int6464#4
3231# asm 2: paddd <orig11=176(%esp),<z11=%xmm3
3232paddd 176(%esp),%xmm3
3233
3234# qhasm: in8 = z8
3235# asm 1: movd <z8=int6464#1,>in8=int32#1
3236# asm 2: movd <z8=%xmm0,>in8=%eax
3237movd %xmm0,%eax
3238
3239# qhasm: in9 = z9
3240# asm 1: movd <z9=int6464#2,>in9=int32#2
3241# asm 2: movd <z9=%xmm1,>in9=%ecx
3242movd %xmm1,%ecx
3243
3244# qhasm: in10 = z10
3245# asm 1: movd <z10=int6464#3,>in10=int32#3
3246# asm 2: movd <z10=%xmm2,>in10=%edx
3247movd %xmm2,%edx
3248
3249# qhasm: in11 = z11
3250# asm 1: movd <z11=int6464#4,>in11=int32#4
3251# asm 2: movd <z11=%xmm3,>in11=%ebx
3252movd %xmm3,%ebx
3253
3254# qhasm: z8 <<<= 96
3255# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3256# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3257pshufd $0x39,%xmm0,%xmm0
3258
3259# qhasm: z9 <<<= 96
3260# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3261# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3262pshufd $0x39,%xmm1,%xmm1
3263
3264# qhasm: z10 <<<= 96
3265# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3266# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3267pshufd $0x39,%xmm2,%xmm2
3268
3269# qhasm: z11 <<<= 96
3270# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3271# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3272pshufd $0x39,%xmm3,%xmm3
3273
3274# qhasm: in8 ^= *(uint32 *) (m + 32)
3275# asm 1: xorl 32(<m=int32#5),<in8=int32#1
3276# asm 2: xorl 32(<m=%esi),<in8=%eax
3277xorl 32(%esi),%eax
3278
3279# qhasm: in9 ^= *(uint32 *) (m + 36)
3280# asm 1: xorl 36(<m=int32#5),<in9=int32#2
3281# asm 2: xorl 36(<m=%esi),<in9=%ecx
3282xorl 36(%esi),%ecx
3283
3284# qhasm: in10 ^= *(uint32 *) (m + 40)
3285# asm 1: xorl 40(<m=int32#5),<in10=int32#3
3286# asm 2: xorl 40(<m=%esi),<in10=%edx
3287xorl 40(%esi),%edx
3288
3289# qhasm: in11 ^= *(uint32 *) (m + 44)
3290# asm 1: xorl 44(<m=int32#5),<in11=int32#4
3291# asm 2: xorl 44(<m=%esi),<in11=%ebx
3292xorl 44(%esi),%ebx
3293
3294# qhasm: *(uint32 *) (out + 32) = in8
3295# asm 1: movl <in8=int32#1,32(<out=int32#6)
3296# asm 2: movl <in8=%eax,32(<out=%edi)
3297movl %eax,32(%edi)
3298
3299# qhasm: *(uint32 *) (out + 36) = in9
3300# asm 1: movl <in9=int32#2,36(<out=int32#6)
3301# asm 2: movl <in9=%ecx,36(<out=%edi)
3302movl %ecx,36(%edi)
3303
3304# qhasm: *(uint32 *) (out + 40) = in10
3305# asm 1: movl <in10=int32#3,40(<out=int32#6)
3306# asm 2: movl <in10=%edx,40(<out=%edi)
3307movl %edx,40(%edi)
3308
3309# qhasm: *(uint32 *) (out + 44) = in11
3310# asm 1: movl <in11=int32#4,44(<out=int32#6)
3311# asm 2: movl <in11=%ebx,44(<out=%edi)
3312movl %ebx,44(%edi)
3313
3314# qhasm: in8 = z8
3315# asm 1: movd <z8=int6464#1,>in8=int32#1
3316# asm 2: movd <z8=%xmm0,>in8=%eax
3317movd %xmm0,%eax
3318
3319# qhasm: in9 = z9
3320# asm 1: movd <z9=int6464#2,>in9=int32#2
3321# asm 2: movd <z9=%xmm1,>in9=%ecx
3322movd %xmm1,%ecx
3323
3324# qhasm: in10 = z10
3325# asm 1: movd <z10=int6464#3,>in10=int32#3
3326# asm 2: movd <z10=%xmm2,>in10=%edx
3327movd %xmm2,%edx
3328
3329# qhasm: in11 = z11
3330# asm 1: movd <z11=int6464#4,>in11=int32#4
3331# asm 2: movd <z11=%xmm3,>in11=%ebx
3332movd %xmm3,%ebx
3333
3334# qhasm: z8 <<<= 96
3335# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3336# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3337pshufd $0x39,%xmm0,%xmm0
3338
3339# qhasm: z9 <<<= 96
3340# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3341# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3342pshufd $0x39,%xmm1,%xmm1
3343
3344# qhasm: z10 <<<= 96
3345# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3346# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3347pshufd $0x39,%xmm2,%xmm2
3348
3349# qhasm: z11 <<<= 96
3350# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3351# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3352pshufd $0x39,%xmm3,%xmm3
3353
3354# qhasm: in8 ^= *(uint32 *) (m + 96)
3355# asm 1: xorl 96(<m=int32#5),<in8=int32#1
3356# asm 2: xorl 96(<m=%esi),<in8=%eax
3357xorl 96(%esi),%eax
3358
3359# qhasm: in9 ^= *(uint32 *) (m + 100)
3360# asm 1: xorl 100(<m=int32#5),<in9=int32#2
3361# asm 2: xorl 100(<m=%esi),<in9=%ecx
3362xorl 100(%esi),%ecx
3363
3364# qhasm: in10 ^= *(uint32 *) (m + 104)
3365# asm 1: xorl 104(<m=int32#5),<in10=int32#3
3366# asm 2: xorl 104(<m=%esi),<in10=%edx
3367xorl 104(%esi),%edx
3368
3369# qhasm: in11 ^= *(uint32 *) (m + 108)
3370# asm 1: xorl 108(<m=int32#5),<in11=int32#4
3371# asm 2: xorl 108(<m=%esi),<in11=%ebx
3372xorl 108(%esi),%ebx
3373
3374# qhasm: *(uint32 *) (out + 96) = in8
3375# asm 1: movl <in8=int32#1,96(<out=int32#6)
3376# asm 2: movl <in8=%eax,96(<out=%edi)
3377movl %eax,96(%edi)
3378
3379# qhasm: *(uint32 *) (out + 100) = in9
3380# asm 1: movl <in9=int32#2,100(<out=int32#6)
3381# asm 2: movl <in9=%ecx,100(<out=%edi)
3382movl %ecx,100(%edi)
3383
3384# qhasm: *(uint32 *) (out + 104) = in10
3385# asm 1: movl <in10=int32#3,104(<out=int32#6)
3386# asm 2: movl <in10=%edx,104(<out=%edi)
3387movl %edx,104(%edi)
3388
3389# qhasm: *(uint32 *) (out + 108) = in11
3390# asm 1: movl <in11=int32#4,108(<out=int32#6)
3391# asm 2: movl <in11=%ebx,108(<out=%edi)
3392movl %ebx,108(%edi)
3393
3394# qhasm: in8 = z8
3395# asm 1: movd <z8=int6464#1,>in8=int32#1
3396# asm 2: movd <z8=%xmm0,>in8=%eax
3397movd %xmm0,%eax
3398
3399# qhasm: in9 = z9
3400# asm 1: movd <z9=int6464#2,>in9=int32#2
3401# asm 2: movd <z9=%xmm1,>in9=%ecx
3402movd %xmm1,%ecx
3403
3404# qhasm: in10 = z10
3405# asm 1: movd <z10=int6464#3,>in10=int32#3
3406# asm 2: movd <z10=%xmm2,>in10=%edx
3407movd %xmm2,%edx
3408
3409# qhasm: in11 = z11
3410# asm 1: movd <z11=int6464#4,>in11=int32#4
3411# asm 2: movd <z11=%xmm3,>in11=%ebx
3412movd %xmm3,%ebx
3413
3414# qhasm: z8 <<<= 96
3415# asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3416# asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3417pshufd $0x39,%xmm0,%xmm0
3418
3419# qhasm: z9 <<<= 96
3420# asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3421# asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3422pshufd $0x39,%xmm1,%xmm1
3423
3424# qhasm: z10 <<<= 96
3425# asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3426# asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3427pshufd $0x39,%xmm2,%xmm2
3428
3429# qhasm: z11 <<<= 96
3430# asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3431# asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3432pshufd $0x39,%xmm3,%xmm3
3433
3434# qhasm: in8 ^= *(uint32 *) (m + 160)
3435# asm 1: xorl 160(<m=int32#5),<in8=int32#1
3436# asm 2: xorl 160(<m=%esi),<in8=%eax
3437xorl 160(%esi),%eax
3438
3439# qhasm: in9 ^= *(uint32 *) (m + 164)
3440# asm 1: xorl 164(<m=int32#5),<in9=int32#2
3441# asm 2: xorl 164(<m=%esi),<in9=%ecx
3442xorl 164(%esi),%ecx
3443
3444# qhasm: in10 ^= *(uint32 *) (m + 168)
3445# asm 1: xorl 168(<m=int32#5),<in10=int32#3
3446# asm 2: xorl 168(<m=%esi),<in10=%edx
3447xorl 168(%esi),%edx
3448
3449# qhasm: in11 ^= *(uint32 *) (m + 172)
3450# asm 1: xorl 172(<m=int32#5),<in11=int32#4
3451# asm 2: xorl 172(<m=%esi),<in11=%ebx
3452xorl 172(%esi),%ebx
3453
3454# qhasm: *(uint32 *) (out + 160) = in8
3455# asm 1: movl <in8=int32#1,160(<out=int32#6)
3456# asm 2: movl <in8=%eax,160(<out=%edi)
3457movl %eax,160(%edi)
3458
3459# qhasm: *(uint32 *) (out + 164) = in9
3460# asm 1: movl <in9=int32#2,164(<out=int32#6)
3461# asm 2: movl <in9=%ecx,164(<out=%edi)
3462movl %ecx,164(%edi)
3463
3464# qhasm: *(uint32 *) (out + 168) = in10
3465# asm 1: movl <in10=int32#3,168(<out=int32#6)
3466# asm 2: movl <in10=%edx,168(<out=%edi)
3467movl %edx,168(%edi)
3468
3469# qhasm: *(uint32 *) (out + 172) = in11
3470# asm 1: movl <in11=int32#4,172(<out=int32#6)
3471# asm 2: movl <in11=%ebx,172(<out=%edi)
3472movl %ebx,172(%edi)
3473
3474# qhasm: in8 = z8
3475# asm 1: movd <z8=int6464#1,>in8=int32#1
3476# asm 2: movd <z8=%xmm0,>in8=%eax
3477movd %xmm0,%eax
3478
3479# qhasm: in9 = z9
3480# asm 1: movd <z9=int6464#2,>in9=int32#2
3481# asm 2: movd <z9=%xmm1,>in9=%ecx
3482movd %xmm1,%ecx
3483
3484# qhasm: in10 = z10
3485# asm 1: movd <z10=int6464#3,>in10=int32#3
3486# asm 2: movd <z10=%xmm2,>in10=%edx
3487movd %xmm2,%edx
3488
3489# qhasm: in11 = z11
3490# asm 1: movd <z11=int6464#4,>in11=int32#4
3491# asm 2: movd <z11=%xmm3,>in11=%ebx
3492movd %xmm3,%ebx
3493
3494# qhasm: in8 ^= *(uint32 *) (m + 224)
3495# asm 1: xorl 224(<m=int32#5),<in8=int32#1
3496# asm 2: xorl 224(<m=%esi),<in8=%eax
3497xorl 224(%esi),%eax
3498
3499# qhasm: in9 ^= *(uint32 *) (m + 228)
3500# asm 1: xorl 228(<m=int32#5),<in9=int32#2
3501# asm 2: xorl 228(<m=%esi),<in9=%ecx
3502xorl 228(%esi),%ecx
3503
3504# qhasm: in10 ^= *(uint32 *) (m + 232)
3505# asm 1: xorl 232(<m=int32#5),<in10=int32#3
3506# asm 2: xorl 232(<m=%esi),<in10=%edx
3507xorl 232(%esi),%edx
3508
3509# qhasm: in11 ^= *(uint32 *) (m + 236)
3510# asm 1: xorl 236(<m=int32#5),<in11=int32#4
3511# asm 2: xorl 236(<m=%esi),<in11=%ebx
3512xorl 236(%esi),%ebx
3513
3514# qhasm: *(uint32 *) (out + 224) = in8
3515# asm 1: movl <in8=int32#1,224(<out=int32#6)
3516# asm 2: movl <in8=%eax,224(<out=%edi)
3517movl %eax,224(%edi)
3518
3519# qhasm: *(uint32 *) (out + 228) = in9
3520# asm 1: movl <in9=int32#2,228(<out=int32#6)
3521# asm 2: movl <in9=%ecx,228(<out=%edi)
3522movl %ecx,228(%edi)
3523
3524# qhasm: *(uint32 *) (out + 232) = in10
3525# asm 1: movl <in10=int32#3,232(<out=int32#6)
3526# asm 2: movl <in10=%edx,232(<out=%edi)
3527movl %edx,232(%edi)
3528
3529# qhasm: *(uint32 *) (out + 236) = in11
3530# asm 1: movl <in11=int32#4,236(<out=int32#6)
3531# asm 2: movl <in11=%ebx,236(<out=%edi)
3532movl %ebx,236(%edi)
3533
3534# qhasm: z12 = z12_stack
3535# asm 1: movdqa <z12_stack=stack128#35,>z12=int6464#1
3536# asm 2: movdqa <z12_stack=576(%esp),>z12=%xmm0
3537movdqa 576(%esp),%xmm0
3538
3539# qhasm: z13 = z13_stack
3540# asm 1: movdqa <z13_stack=stack128#30,>z13=int6464#2
3541# asm 2: movdqa <z13_stack=496(%esp),>z13=%xmm1
3542movdqa 496(%esp),%xmm1
3543
3544# qhasm: z14 = z14_stack
3545# asm 1: movdqa <z14_stack=stack128#24,>z14=int6464#3
3546# asm 2: movdqa <z14_stack=400(%esp),>z14=%xmm2
3547movdqa 400(%esp),%xmm2
3548
3549# qhasm: z15 = z15_stack
3550# asm 1: movdqa <z15_stack=stack128#23,>z15=int6464#4
3551# asm 2: movdqa <z15_stack=384(%esp),>z15=%xmm3
3552movdqa 384(%esp),%xmm3
3553
3554# qhasm: uint32323232 z12 += orig12
3555# asm 1: paddd <orig12=stack128#11,<z12=int6464#1
3556# asm 2: paddd <orig12=192(%esp),<z12=%xmm0
3557paddd 192(%esp),%xmm0
3558
3559# qhasm: uint32323232 z13 += orig13
3560# asm 1: paddd <orig13=stack128#14,<z13=int6464#2
3561# asm 2: paddd <orig13=240(%esp),<z13=%xmm1
3562paddd 240(%esp),%xmm1
3563
3564# qhasm: uint32323232 z14 += orig14
3565# asm 1: paddd <orig14=stack128#17,<z14=int6464#3
3566# asm 2: paddd <orig14=288(%esp),<z14=%xmm2
3567paddd 288(%esp),%xmm2
3568
3569# qhasm: uint32323232 z15 += orig15
3570# asm 1: paddd <orig15=stack128#7,<z15=int6464#4
3571# asm 2: paddd <orig15=128(%esp),<z15=%xmm3
3572paddd 128(%esp),%xmm3
3573
3574# qhasm: in12 = z12
3575# asm 1: movd <z12=int6464#1,>in12=int32#1
3576# asm 2: movd <z12=%xmm0,>in12=%eax
3577movd %xmm0,%eax
3578
3579# qhasm: in13 = z13
3580# asm 1: movd <z13=int6464#2,>in13=int32#2
3581# asm 2: movd <z13=%xmm1,>in13=%ecx
3582movd %xmm1,%ecx
3583
3584# qhasm: in14 = z14
3585# asm 1: movd <z14=int6464#3,>in14=int32#3
3586# asm 2: movd <z14=%xmm2,>in14=%edx
3587movd %xmm2,%edx
3588
3589# qhasm: in15 = z15
3590# asm 1: movd <z15=int6464#4,>in15=int32#4
3591# asm 2: movd <z15=%xmm3,>in15=%ebx
3592movd %xmm3,%ebx
3593
3594# qhasm: z12 <<<= 96
3595# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3596# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3597pshufd $0x39,%xmm0,%xmm0
3598
3599# qhasm: z13 <<<= 96
3600# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3601# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3602pshufd $0x39,%xmm1,%xmm1
3603
3604# qhasm: z14 <<<= 96
3605# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3606# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3607pshufd $0x39,%xmm2,%xmm2
3608
3609# qhasm: z15 <<<= 96
3610# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3611# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3612pshufd $0x39,%xmm3,%xmm3
3613
3614# qhasm: in12 ^= *(uint32 *) (m + 48)
3615# asm 1: xorl 48(<m=int32#5),<in12=int32#1
3616# asm 2: xorl 48(<m=%esi),<in12=%eax
3617xorl 48(%esi),%eax
3618
3619# qhasm: in13 ^= *(uint32 *) (m + 52)
3620# asm 1: xorl 52(<m=int32#5),<in13=int32#2
3621# asm 2: xorl 52(<m=%esi),<in13=%ecx
3622xorl 52(%esi),%ecx
3623
3624# qhasm: in14 ^= *(uint32 *) (m + 56)
3625# asm 1: xorl 56(<m=int32#5),<in14=int32#3
3626# asm 2: xorl 56(<m=%esi),<in14=%edx
3627xorl 56(%esi),%edx
3628
3629# qhasm: in15 ^= *(uint32 *) (m + 60)
3630# asm 1: xorl 60(<m=int32#5),<in15=int32#4
3631# asm 2: xorl 60(<m=%esi),<in15=%ebx
3632xorl 60(%esi),%ebx
3633
3634# qhasm: *(uint32 *) (out + 48) = in12
3635# asm 1: movl <in12=int32#1,48(<out=int32#6)
3636# asm 2: movl <in12=%eax,48(<out=%edi)
3637movl %eax,48(%edi)
3638
3639# qhasm: *(uint32 *) (out + 52) = in13
3640# asm 1: movl <in13=int32#2,52(<out=int32#6)
3641# asm 2: movl <in13=%ecx,52(<out=%edi)
3642movl %ecx,52(%edi)
3643
3644# qhasm: *(uint32 *) (out + 56) = in14
3645# asm 1: movl <in14=int32#3,56(<out=int32#6)
3646# asm 2: movl <in14=%edx,56(<out=%edi)
3647movl %edx,56(%edi)
3648
3649# qhasm: *(uint32 *) (out + 60) = in15
3650# asm 1: movl <in15=int32#4,60(<out=int32#6)
3651# asm 2: movl <in15=%ebx,60(<out=%edi)
3652movl %ebx,60(%edi)
3653
3654# qhasm: in12 = z12
3655# asm 1: movd <z12=int6464#1,>in12=int32#1
3656# asm 2: movd <z12=%xmm0,>in12=%eax
3657movd %xmm0,%eax
3658
3659# qhasm: in13 = z13
3660# asm 1: movd <z13=int6464#2,>in13=int32#2
3661# asm 2: movd <z13=%xmm1,>in13=%ecx
3662movd %xmm1,%ecx
3663
3664# qhasm: in14 = z14
3665# asm 1: movd <z14=int6464#3,>in14=int32#3
3666# asm 2: movd <z14=%xmm2,>in14=%edx
3667movd %xmm2,%edx
3668
3669# qhasm: in15 = z15
3670# asm 1: movd <z15=int6464#4,>in15=int32#4
3671# asm 2: movd <z15=%xmm3,>in15=%ebx
3672movd %xmm3,%ebx
3673
3674# qhasm: z12 <<<= 96
3675# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3676# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3677pshufd $0x39,%xmm0,%xmm0
3678
3679# qhasm: z13 <<<= 96
3680# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3681# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3682pshufd $0x39,%xmm1,%xmm1
3683
3684# qhasm: z14 <<<= 96
3685# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3686# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3687pshufd $0x39,%xmm2,%xmm2
3688
3689# qhasm: z15 <<<= 96
3690# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3691# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3692pshufd $0x39,%xmm3,%xmm3
3693
3694# qhasm: in12 ^= *(uint32 *) (m + 112)
3695# asm 1: xorl 112(<m=int32#5),<in12=int32#1
3696# asm 2: xorl 112(<m=%esi),<in12=%eax
3697xorl 112(%esi),%eax
3698
3699# qhasm: in13 ^= *(uint32 *) (m + 116)
3700# asm 1: xorl 116(<m=int32#5),<in13=int32#2
3701# asm 2: xorl 116(<m=%esi),<in13=%ecx
3702xorl 116(%esi),%ecx
3703
3704# qhasm: in14 ^= *(uint32 *) (m + 120)
3705# asm 1: xorl 120(<m=int32#5),<in14=int32#3
3706# asm 2: xorl 120(<m=%esi),<in14=%edx
3707xorl 120(%esi),%edx
3708
3709# qhasm: in15 ^= *(uint32 *) (m + 124)
3710# asm 1: xorl 124(<m=int32#5),<in15=int32#4
3711# asm 2: xorl 124(<m=%esi),<in15=%ebx
3712xorl 124(%esi),%ebx
3713
3714# qhasm: *(uint32 *) (out + 112) = in12
3715# asm 1: movl <in12=int32#1,112(<out=int32#6)
3716# asm 2: movl <in12=%eax,112(<out=%edi)
3717movl %eax,112(%edi)
3718
3719# qhasm: *(uint32 *) (out + 116) = in13
3720# asm 1: movl <in13=int32#2,116(<out=int32#6)
3721# asm 2: movl <in13=%ecx,116(<out=%edi)
3722movl %ecx,116(%edi)
3723
3724# qhasm: *(uint32 *) (out + 120) = in14
3725# asm 1: movl <in14=int32#3,120(<out=int32#6)
3726# asm 2: movl <in14=%edx,120(<out=%edi)
3727movl %edx,120(%edi)
3728
3729# qhasm: *(uint32 *) (out + 124) = in15
3730# asm 1: movl <in15=int32#4,124(<out=int32#6)
3731# asm 2: movl <in15=%ebx,124(<out=%edi)
3732movl %ebx,124(%edi)
3733
3734# qhasm: in12 = z12
3735# asm 1: movd <z12=int6464#1,>in12=int32#1
3736# asm 2: movd <z12=%xmm0,>in12=%eax
3737movd %xmm0,%eax
3738
3739# qhasm: in13 = z13
3740# asm 1: movd <z13=int6464#2,>in13=int32#2
3741# asm 2: movd <z13=%xmm1,>in13=%ecx
3742movd %xmm1,%ecx
3743
3744# qhasm: in14 = z14
3745# asm 1: movd <z14=int6464#3,>in14=int32#3
3746# asm 2: movd <z14=%xmm2,>in14=%edx
3747movd %xmm2,%edx
3748
3749# qhasm: in15 = z15
3750# asm 1: movd <z15=int6464#4,>in15=int32#4
3751# asm 2: movd <z15=%xmm3,>in15=%ebx
3752movd %xmm3,%ebx
3753
3754# qhasm: z12 <<<= 96
3755# asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3756# asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3757pshufd $0x39,%xmm0,%xmm0
3758
3759# qhasm: z13 <<<= 96
3760# asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3761# asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3762pshufd $0x39,%xmm1,%xmm1
3763
3764# qhasm: z14 <<<= 96
3765# asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3766# asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3767pshufd $0x39,%xmm2,%xmm2
3768
3769# qhasm: z15 <<<= 96
3770# asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3771# asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3772pshufd $0x39,%xmm3,%xmm3
3773
3774# qhasm: in12 ^= *(uint32 *) (m + 176)
3775# asm 1: xorl 176(<m=int32#5),<in12=int32#1
3776# asm 2: xorl 176(<m=%esi),<in12=%eax
3777xorl 176(%esi),%eax
3778
3779# qhasm: in13 ^= *(uint32 *) (m + 180)
3780# asm 1: xorl 180(<m=int32#5),<in13=int32#2
3781# asm 2: xorl 180(<m=%esi),<in13=%ecx
3782xorl 180(%esi),%ecx
3783
3784# qhasm: in14 ^= *(uint32 *) (m + 184)
3785# asm 1: xorl 184(<m=int32#5),<in14=int32#3
3786# asm 2: xorl 184(<m=%esi),<in14=%edx
3787xorl 184(%esi),%edx
3788
3789# qhasm: in15 ^= *(uint32 *) (m + 188)
3790# asm 1: xorl 188(<m=int32#5),<in15=int32#4
3791# asm 2: xorl 188(<m=%esi),<in15=%ebx
3792xorl 188(%esi),%ebx
3793
3794# qhasm: *(uint32 *) (out + 176) = in12
3795# asm 1: movl <in12=int32#1,176(<out=int32#6)
3796# asm 2: movl <in12=%eax,176(<out=%edi)
3797movl %eax,176(%edi)
3798
3799# qhasm: *(uint32 *) (out + 180) = in13
3800# asm 1: movl <in13=int32#2,180(<out=int32#6)
3801# asm 2: movl <in13=%ecx,180(<out=%edi)
3802movl %ecx,180(%edi)
3803
3804# qhasm: *(uint32 *) (out + 184) = in14
3805# asm 1: movl <in14=int32#3,184(<out=int32#6)
3806# asm 2: movl <in14=%edx,184(<out=%edi)
3807movl %edx,184(%edi)
3808
3809# qhasm: *(uint32 *) (out + 188) = in15
3810# asm 1: movl <in15=int32#4,188(<out=int32#6)
3811# asm 2: movl <in15=%ebx,188(<out=%edi)
3812movl %ebx,188(%edi)
3813
3814# qhasm: in12 = z12
3815# asm 1: movd <z12=int6464#1,>in12=int32#1
3816# asm 2: movd <z12=%xmm0,>in12=%eax
3817movd %xmm0,%eax
3818
3819# qhasm: in13 = z13
3820# asm 1: movd <z13=int6464#2,>in13=int32#2
3821# asm 2: movd <z13=%xmm1,>in13=%ecx
3822movd %xmm1,%ecx
3823
3824# qhasm: in14 = z14
3825# asm 1: movd <z14=int6464#3,>in14=int32#3
3826# asm 2: movd <z14=%xmm2,>in14=%edx
3827movd %xmm2,%edx
3828
3829# qhasm: in15 = z15
3830# asm 1: movd <z15=int6464#4,>in15=int32#4
3831# asm 2: movd <z15=%xmm3,>in15=%ebx
3832movd %xmm3,%ebx
3833
3834# qhasm: in12 ^= *(uint32 *) (m + 240)
3835# asm 1: xorl 240(<m=int32#5),<in12=int32#1
3836# asm 2: xorl 240(<m=%esi),<in12=%eax
3837xorl 240(%esi),%eax
3838
3839# qhasm: in13 ^= *(uint32 *) (m + 244)
3840# asm 1: xorl 244(<m=int32#5),<in13=int32#2
3841# asm 2: xorl 244(<m=%esi),<in13=%ecx
3842xorl 244(%esi),%ecx
3843
3844# qhasm: in14 ^= *(uint32 *) (m + 248)
3845# asm 1: xorl 248(<m=int32#5),<in14=int32#3
3846# asm 2: xorl 248(<m=%esi),<in14=%edx
3847xorl 248(%esi),%edx
3848
3849# qhasm: in15 ^= *(uint32 *) (m + 252)
3850# asm 1: xorl 252(<m=int32#5),<in15=int32#4
3851# asm 2: xorl 252(<m=%esi),<in15=%ebx
3852xorl 252(%esi),%ebx
3853
3854# qhasm: *(uint32 *) (out + 240) = in12
3855# asm 1: movl <in12=int32#1,240(<out=int32#6)
3856# asm 2: movl <in12=%eax,240(<out=%edi)
3857movl %eax,240(%edi)
3858
3859# qhasm: *(uint32 *) (out + 244) = in13
3860# asm 1: movl <in13=int32#2,244(<out=int32#6)
3861# asm 2: movl <in13=%ecx,244(<out=%edi)
3862movl %ecx,244(%edi)
3863
3864# qhasm: *(uint32 *) (out + 248) = in14
3865# asm 1: movl <in14=int32#3,248(<out=int32#6)
3866# asm 2: movl <in14=%edx,248(<out=%edi)
3867movl %edx,248(%edi)
3868
3869# qhasm: *(uint32 *) (out + 252) = in15
3870# asm 1: movl <in15=int32#4,252(<out=int32#6)
3871# asm 2: movl <in15=%ebx,252(<out=%edi)
3872movl %ebx,252(%edi)
3873
3874# qhasm: bytes = bytes_stack
3875# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
3876# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
3877movl 24(%esp),%eax
3878
3879# qhasm: bytes -= 256
3880# asm 1: sub $256,<bytes=int32#1
3881# asm 2: sub $256,<bytes=%eax
3882sub $256,%eax
3883
3884# qhasm: m += 256
3885# asm 1: add $256,<m=int32#5
3886# asm 2: add $256,<m=%esi
3887add $256,%esi
3888
3889# qhasm: out += 256
3890# asm 1: add $256,<out=int32#6
3891# asm 2: add $256,<out=%edi
3892add $256,%edi
3893
3894# qhasm: out_stack = out
3895# asm 1: movl <out=int32#6,>out_stack=stack32#6
3896# asm 2: movl <out=%edi,>out_stack=20(%esp)
3897movl %edi,20(%esp)
3898
3899# qhasm: unsigned<? bytes - 256
3900# asm 1: cmp $256,<bytes=int32#1
3901# asm 2: cmp $256,<bytes=%eax
3902cmp $256,%eax
3903# comment:fp stack unchanged by jump
3904
3905# qhasm: goto bytesatleast256 if !unsigned<
3906jae ._bytesatleast256
3907
3908# qhasm: unsigned>? bytes - 0
3909# asm 1: cmp $0,<bytes=int32#1
3910# asm 2: cmp $0,<bytes=%eax
3911cmp $0,%eax
3912# comment:fp stack unchanged by jump
3913
3914# qhasm: goto done if !unsigned>
3915jbe ._done
3916# comment:fp stack unchanged by fallthrough
3917
3918# qhasm: bytesbetween1and255:
3919._bytesbetween1and255:
3920
3921# qhasm: unsigned<? bytes - 64
3922# asm 1: cmp $64,<bytes=int32#1
3923# asm 2: cmp $64,<bytes=%eax
3924cmp $64,%eax
3925# comment:fp stack unchanged by jump
3926
3927# qhasm: goto nocopy if !unsigned<
3928jae ._nocopy
3929
3930# qhasm: ctarget = out
3931# asm 1: movl <out=int32#6,>ctarget=stack32#6
3932# asm 2: movl <out=%edi,>ctarget=20(%esp)
3933movl %edi,20(%esp)
3934
3935# qhasm: out = &tmp
3936# asm 1: leal <tmp=stack512#1,>out=int32#6
3937# asm 2: leal <tmp=640(%esp),>out=%edi
3938leal 640(%esp),%edi
3939
3940# qhasm: i = bytes
3941# asm 1: mov <bytes=int32#1,>i=int32#2
3942# asm 2: mov <bytes=%eax,>i=%ecx
3943mov %eax,%ecx
3944
3945# qhasm: while (i) { *out++ = *m++; --i }
3946rep movsb
3947
3948# qhasm: out = &tmp
3949# asm 1: leal <tmp=stack512#1,>out=int32#6
3950# asm 2: leal <tmp=640(%esp),>out=%edi
3951leal 640(%esp),%edi
3952
3953# qhasm: m = &tmp
3954# asm 1: leal <tmp=stack512#1,>m=int32#5
3955# asm 2: leal <tmp=640(%esp),>m=%esi
3956leal 640(%esp),%esi
3957# comment:fp stack unchanged by fallthrough
3958
3959# qhasm: nocopy:
3960._nocopy:
3961
3962# qhasm: bytes_stack = bytes
3963# asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
3964# asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
3965movl %eax,24(%esp)
3966
3967# qhasm: diag0 = x0
3968# asm 1: movdqa <x0=stack128#3,>diag0=int6464#1
3969# asm 2: movdqa <x0=64(%esp),>diag0=%xmm0
3970movdqa 64(%esp),%xmm0
3971
3972# qhasm: diag1 = x1
3973# asm 1: movdqa <x1=stack128#2,>diag1=int6464#2
3974# asm 2: movdqa <x1=48(%esp),>diag1=%xmm1
3975movdqa 48(%esp),%xmm1
3976
3977# qhasm: diag2 = x2
3978# asm 1: movdqa <x2=stack128#4,>diag2=int6464#3
3979# asm 2: movdqa <x2=80(%esp),>diag2=%xmm2
3980movdqa 80(%esp),%xmm2
3981
3982# qhasm: diag3 = x3
3983# asm 1: movdqa <x3=stack128#1,>diag3=int6464#4
3984# asm 2: movdqa <x3=32(%esp),>diag3=%xmm3
3985movdqa 32(%esp),%xmm3
3986
3987# qhasm: a0 = diag1
3988# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3989# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3990movdqa %xmm1,%xmm4
3991
3992# qhasm: i = 8
3993# asm 1: mov $8,>i=int32#1
3994# asm 2: mov $8,>i=%eax
3995mov $8,%eax
3996
3997# qhasm: mainloop2:
3998._mainloop2:
3999
4000# qhasm: uint32323232 a0 += diag0
4001# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4002# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4003paddd %xmm0,%xmm4
4004
4005# qhasm: a1 = diag0
4006# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4007# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4008movdqa %xmm0,%xmm5
4009
4010# qhasm: b0 = a0
4011# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4012# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4013movdqa %xmm4,%xmm6
4014
4015# qhasm: uint32323232 a0 <<= 7
4016# asm 1: pslld $7,<a0=int6464#5
4017# asm 2: pslld $7,<a0=%xmm4
4018pslld $7,%xmm4
4019
4020# qhasm: uint32323232 b0 >>= 25
4021# asm 1: psrld $25,<b0=int6464#7
4022# asm 2: psrld $25,<b0=%xmm6
4023psrld $25,%xmm6
4024
4025# qhasm: diag3 ^= a0
4026# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4027# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4028pxor %xmm4,%xmm3
4029
4030# qhasm: diag3 ^= b0
4031# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4032# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4033pxor %xmm6,%xmm3
4034
4035# qhasm: uint32323232 a1 += diag3
4036# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4037# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4038paddd %xmm3,%xmm5
4039
4040# qhasm: a2 = diag3
4041# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4042# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4043movdqa %xmm3,%xmm4
4044
4045# qhasm: b1 = a1
4046# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4047# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4048movdqa %xmm5,%xmm6
4049
4050# qhasm: uint32323232 a1 <<= 9
4051# asm 1: pslld $9,<a1=int6464#6
4052# asm 2: pslld $9,<a1=%xmm5
4053pslld $9,%xmm5
4054
4055# qhasm: uint32323232 b1 >>= 23
4056# asm 1: psrld $23,<b1=int6464#7
4057# asm 2: psrld $23,<b1=%xmm6
4058psrld $23,%xmm6
4059
4060# qhasm: diag2 ^= a1
4061# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4062# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4063pxor %xmm5,%xmm2
4064
4065# qhasm: diag3 <<<= 32
4066# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4067# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4068pshufd $0x93,%xmm3,%xmm3
4069
4070# qhasm: diag2 ^= b1
4071# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4072# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4073pxor %xmm6,%xmm2
4074
4075# qhasm: uint32323232 a2 += diag2
4076# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4077# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4078paddd %xmm2,%xmm4
4079
4080# qhasm: a3 = diag2
4081# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4082# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4083movdqa %xmm2,%xmm5
4084
4085# qhasm: b2 = a2
4086# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4087# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4088movdqa %xmm4,%xmm6
4089
4090# qhasm: uint32323232 a2 <<= 13
4091# asm 1: pslld $13,<a2=int6464#5
4092# asm 2: pslld $13,<a2=%xmm4
4093pslld $13,%xmm4
4094
4095# qhasm: uint32323232 b2 >>= 19
4096# asm 1: psrld $19,<b2=int6464#7
4097# asm 2: psrld $19,<b2=%xmm6
4098psrld $19,%xmm6
4099
4100# qhasm: diag1 ^= a2
4101# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4102# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4103pxor %xmm4,%xmm1
4104
4105# qhasm: diag2 <<<= 64
4106# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4107# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4108pshufd $0x4e,%xmm2,%xmm2
4109
4110# qhasm: diag1 ^= b2
4111# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4112# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4113pxor %xmm6,%xmm1
4114
4115# qhasm: uint32323232 a3 += diag1
4116# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4117# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4118paddd %xmm1,%xmm5
4119
4120# qhasm: a4 = diag3
4121# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4122# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4123movdqa %xmm3,%xmm4
4124
4125# qhasm: b3 = a3
4126# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4127# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4128movdqa %xmm5,%xmm6
4129
4130# qhasm: uint32323232 a3 <<= 18
4131# asm 1: pslld $18,<a3=int6464#6
4132# asm 2: pslld $18,<a3=%xmm5
4133pslld $18,%xmm5
4134
4135# qhasm: uint32323232 b3 >>= 14
4136# asm 1: psrld $14,<b3=int6464#7
4137# asm 2: psrld $14,<b3=%xmm6
4138psrld $14,%xmm6
4139
4140# qhasm: diag0 ^= a3
4141# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4142# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4143pxor %xmm5,%xmm0
4144
4145# qhasm: diag1 <<<= 96
4146# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4147# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4148pshufd $0x39,%xmm1,%xmm1
4149
4150# qhasm: diag0 ^= b3
4151# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4152# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4153pxor %xmm6,%xmm0
4154
4155# qhasm: uint32323232 a4 += diag0
4156# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4157# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4158paddd %xmm0,%xmm4
4159
4160# qhasm: a5 = diag0
4161# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4162# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4163movdqa %xmm0,%xmm5
4164
4165# qhasm: b4 = a4
4166# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4167# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4168movdqa %xmm4,%xmm6
4169
4170# qhasm: uint32323232 a4 <<= 7
4171# asm 1: pslld $7,<a4=int6464#5
4172# asm 2: pslld $7,<a4=%xmm4
4173pslld $7,%xmm4
4174
4175# qhasm: uint32323232 b4 >>= 25
4176# asm 1: psrld $25,<b4=int6464#7
4177# asm 2: psrld $25,<b4=%xmm6
4178psrld $25,%xmm6
4179
4180# qhasm: diag1 ^= a4
4181# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4182# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4183pxor %xmm4,%xmm1
4184
4185# qhasm: diag1 ^= b4
4186# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4187# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4188pxor %xmm6,%xmm1
4189
4190# qhasm: uint32323232 a5 += diag1
4191# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4192# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4193paddd %xmm1,%xmm5
4194
4195# qhasm: a6 = diag1
4196# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4197# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4198movdqa %xmm1,%xmm4
4199
4200# qhasm: b5 = a5
4201# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4202# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4203movdqa %xmm5,%xmm6
4204
4205# qhasm: uint32323232 a5 <<= 9
4206# asm 1: pslld $9,<a5=int6464#6
4207# asm 2: pslld $9,<a5=%xmm5
4208pslld $9,%xmm5
4209
4210# qhasm: uint32323232 b5 >>= 23
4211# asm 1: psrld $23,<b5=int6464#7
4212# asm 2: psrld $23,<b5=%xmm6
4213psrld $23,%xmm6
4214
4215# qhasm: diag2 ^= a5
4216# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4217# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4218pxor %xmm5,%xmm2
4219
4220# qhasm: diag1 <<<= 32
4221# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4222# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4223pshufd $0x93,%xmm1,%xmm1
4224
4225# qhasm: diag2 ^= b5
4226# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4227# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4228pxor %xmm6,%xmm2
4229
4230# qhasm: uint32323232 a6 += diag2
4231# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4232# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4233paddd %xmm2,%xmm4
4234
4235# qhasm: a7 = diag2
4236# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4237# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4238movdqa %xmm2,%xmm5
4239
4240# qhasm: b6 = a6
4241# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4242# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4243movdqa %xmm4,%xmm6
4244
4245# qhasm: uint32323232 a6 <<= 13
4246# asm 1: pslld $13,<a6=int6464#5
4247# asm 2: pslld $13,<a6=%xmm4
4248pslld $13,%xmm4
4249
4250# qhasm: uint32323232 b6 >>= 19
4251# asm 1: psrld $19,<b6=int6464#7
4252# asm 2: psrld $19,<b6=%xmm6
4253psrld $19,%xmm6
4254
4255# qhasm: diag3 ^= a6
4256# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4257# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4258pxor %xmm4,%xmm3
4259
4260# qhasm: diag2 <<<= 64
4261# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4262# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4263pshufd $0x4e,%xmm2,%xmm2
4264
4265# qhasm: diag3 ^= b6
4266# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4267# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4268pxor %xmm6,%xmm3
4269
4270# qhasm: uint32323232 a7 += diag3
4271# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4272# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4273paddd %xmm3,%xmm5
4274
4275# qhasm: a0 = diag1
4276# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4277# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4278movdqa %xmm1,%xmm4
4279
4280# qhasm: b7 = a7
4281# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4282# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4283movdqa %xmm5,%xmm6
4284
4285# qhasm: uint32323232 a7 <<= 18
4286# asm 1: pslld $18,<a7=int6464#6
4287# asm 2: pslld $18,<a7=%xmm5
4288pslld $18,%xmm5
4289
4290# qhasm: uint32323232 b7 >>= 14
4291# asm 1: psrld $14,<b7=int6464#7
4292# asm 2: psrld $14,<b7=%xmm6
4293psrld $14,%xmm6
4294
4295# qhasm: diag0 ^= a7
4296# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4297# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4298pxor %xmm5,%xmm0
4299
4300# qhasm: diag3 <<<= 96
4301# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4302# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4303pshufd $0x39,%xmm3,%xmm3
4304
4305# qhasm: diag0 ^= b7
4306# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4307# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4308pxor %xmm6,%xmm0
4309
4310# qhasm: uint32323232 a0 += diag0
4311# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4312# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4313paddd %xmm0,%xmm4
4314
4315# qhasm: a1 = diag0
4316# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4317# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4318movdqa %xmm0,%xmm5
4319
4320# qhasm: b0 = a0
4321# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4322# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4323movdqa %xmm4,%xmm6
4324
4325# qhasm: uint32323232 a0 <<= 7
4326# asm 1: pslld $7,<a0=int6464#5
4327# asm 2: pslld $7,<a0=%xmm4
4328pslld $7,%xmm4
4329
4330# qhasm: uint32323232 b0 >>= 25
4331# asm 1: psrld $25,<b0=int6464#7
4332# asm 2: psrld $25,<b0=%xmm6
4333psrld $25,%xmm6
4334
4335# qhasm: diag3 ^= a0
4336# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4337# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4338pxor %xmm4,%xmm3
4339
4340# qhasm: diag3 ^= b0
4341# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4342# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4343pxor %xmm6,%xmm3
4344
4345# qhasm: uint32323232 a1 += diag3
4346# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4347# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4348paddd %xmm3,%xmm5
4349
4350# qhasm: a2 = diag3
4351# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4352# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4353movdqa %xmm3,%xmm4
4354
4355# qhasm: b1 = a1
4356# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4357# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4358movdqa %xmm5,%xmm6
4359
4360# qhasm: uint32323232 a1 <<= 9
4361# asm 1: pslld $9,<a1=int6464#6
4362# asm 2: pslld $9,<a1=%xmm5
4363pslld $9,%xmm5
4364
4365# qhasm: uint32323232 b1 >>= 23
4366# asm 1: psrld $23,<b1=int6464#7
4367# asm 2: psrld $23,<b1=%xmm6
4368psrld $23,%xmm6
4369
4370# qhasm: diag2 ^= a1
4371# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4372# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4373pxor %xmm5,%xmm2
4374
4375# qhasm: diag3 <<<= 32
4376# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4377# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4378pshufd $0x93,%xmm3,%xmm3
4379
4380# qhasm: diag2 ^= b1
4381# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4382# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4383pxor %xmm6,%xmm2
4384
4385# qhasm: uint32323232 a2 += diag2
4386# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4387# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4388paddd %xmm2,%xmm4
4389
4390# qhasm: a3 = diag2
4391# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4392# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4393movdqa %xmm2,%xmm5
4394
4395# qhasm: b2 = a2
4396# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4397# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4398movdqa %xmm4,%xmm6
4399
4400# qhasm: uint32323232 a2 <<= 13
4401# asm 1: pslld $13,<a2=int6464#5
4402# asm 2: pslld $13,<a2=%xmm4
4403pslld $13,%xmm4
4404
4405# qhasm: uint32323232 b2 >>= 19
4406# asm 1: psrld $19,<b2=int6464#7
4407# asm 2: psrld $19,<b2=%xmm6
4408psrld $19,%xmm6
4409
4410# qhasm: diag1 ^= a2
4411# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4412# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4413pxor %xmm4,%xmm1
4414
4415# qhasm: diag2 <<<= 64
4416# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4417# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4418pshufd $0x4e,%xmm2,%xmm2
4419
4420# qhasm: diag1 ^= b2
4421# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4422# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4423pxor %xmm6,%xmm1
4424
4425# qhasm: uint32323232 a3 += diag1
4426# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4427# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4428paddd %xmm1,%xmm5
4429
4430# qhasm: a4 = diag3
4431# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4432# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4433movdqa %xmm3,%xmm4
4434
4435# qhasm: b3 = a3
4436# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4437# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4438movdqa %xmm5,%xmm6
4439
4440# qhasm: uint32323232 a3 <<= 18
4441# asm 1: pslld $18,<a3=int6464#6
4442# asm 2: pslld $18,<a3=%xmm5
4443pslld $18,%xmm5
4444
4445# qhasm: uint32323232 b3 >>= 14
4446# asm 1: psrld $14,<b3=int6464#7
4447# asm 2: psrld $14,<b3=%xmm6
4448psrld $14,%xmm6
4449
4450# qhasm: diag0 ^= a3
4451# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4452# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4453pxor %xmm5,%xmm0
4454
4455# qhasm: diag1 <<<= 96
4456# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4457# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4458pshufd $0x39,%xmm1,%xmm1
4459
4460# qhasm: diag0 ^= b3
4461# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4462# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4463pxor %xmm6,%xmm0
4464
4465# qhasm: uint32323232 a4 += diag0
4466# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4467# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4468paddd %xmm0,%xmm4
4469
4470# qhasm: a5 = diag0
4471# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4472# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4473movdqa %xmm0,%xmm5
4474
4475# qhasm: b4 = a4
4476# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4477# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4478movdqa %xmm4,%xmm6
4479
4480# qhasm: uint32323232 a4 <<= 7
4481# asm 1: pslld $7,<a4=int6464#5
4482# asm 2: pslld $7,<a4=%xmm4
4483pslld $7,%xmm4
4484
4485# qhasm: uint32323232 b4 >>= 25
4486# asm 1: psrld $25,<b4=int6464#7
4487# asm 2: psrld $25,<b4=%xmm6
4488psrld $25,%xmm6
4489
4490# qhasm: diag1 ^= a4
4491# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4492# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4493pxor %xmm4,%xmm1
4494
4495# qhasm: diag1 ^= b4
4496# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4497# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4498pxor %xmm6,%xmm1
4499
4500# qhasm: uint32323232 a5 += diag1
4501# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4502# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4503paddd %xmm1,%xmm5
4504
4505# qhasm: a6 = diag1
4506# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4507# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4508movdqa %xmm1,%xmm4
4509
4510# qhasm: b5 = a5
4511# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4512# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4513movdqa %xmm5,%xmm6
4514
4515# qhasm: uint32323232 a5 <<= 9
4516# asm 1: pslld $9,<a5=int6464#6
4517# asm 2: pslld $9,<a5=%xmm5
4518pslld $9,%xmm5
4519
4520# qhasm: uint32323232 b5 >>= 23
4521# asm 1: psrld $23,<b5=int6464#7
4522# asm 2: psrld $23,<b5=%xmm6
4523psrld $23,%xmm6
4524
4525# qhasm: diag2 ^= a5
4526# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4527# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4528pxor %xmm5,%xmm2
4529
4530# qhasm: diag1 <<<= 32
4531# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4532# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4533pshufd $0x93,%xmm1,%xmm1
4534
4535# qhasm: diag2 ^= b5
4536# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4537# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4538pxor %xmm6,%xmm2
4539
4540# qhasm: uint32323232 a6 += diag2
4541# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4542# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4543paddd %xmm2,%xmm4
4544
4545# qhasm: a7 = diag2
4546# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4547# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4548movdqa %xmm2,%xmm5
4549
4550# qhasm: b6 = a6
4551# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4552# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4553movdqa %xmm4,%xmm6
4554
4555# qhasm: uint32323232 a6 <<= 13
4556# asm 1: pslld $13,<a6=int6464#5
4557# asm 2: pslld $13,<a6=%xmm4
4558pslld $13,%xmm4
4559
4560# qhasm: uint32323232 b6 >>= 19
4561# asm 1: psrld $19,<b6=int6464#7
4562# asm 2: psrld $19,<b6=%xmm6
4563psrld $19,%xmm6
4564
4565# qhasm: diag3 ^= a6
4566# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4567# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4568pxor %xmm4,%xmm3
4569
4570# qhasm: diag2 <<<= 64
4571# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4572# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4573pshufd $0x4e,%xmm2,%xmm2
4574
4575# qhasm: diag3 ^= b6
4576# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4577# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4578pxor %xmm6,%xmm3
4579
4580# qhasm: unsigned>? i -= 4
4581# asm 1: sub $4,<i=int32#1
4582# asm 2: sub $4,<i=%eax
4583sub $4,%eax
4584
4585# qhasm: uint32323232 a7 += diag3
4586# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4587# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4588paddd %xmm3,%xmm5
4589
4590# qhasm: a0 = diag1
4591# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4592# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4593movdqa %xmm1,%xmm4
4594
4595# qhasm: b7 = a7
4596# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4597# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4598movdqa %xmm5,%xmm6
4599
4600# qhasm: uint32323232 a7 <<= 18
4601# asm 1: pslld $18,<a7=int6464#6
4602# asm 2: pslld $18,<a7=%xmm5
4603pslld $18,%xmm5
4604
4605# qhasm: b0 = 0
4606# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4607# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4608pxor %xmm7,%xmm7
4609
4610# qhasm: uint32323232 b7 >>= 14
4611# asm 1: psrld $14,<b7=int6464#7
4612# asm 2: psrld $14,<b7=%xmm6
4613psrld $14,%xmm6
4614
4615# qhasm: diag0 ^= a7
4616# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4617# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4618pxor %xmm5,%xmm0
4619
4620# qhasm: diag3 <<<= 96
4621# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4622# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4623pshufd $0x39,%xmm3,%xmm3
4624
4625# qhasm: diag0 ^= b7
4626# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4627# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4628pxor %xmm6,%xmm0
4629# comment:fp stack unchanged by jump
4630
4631# qhasm: goto mainloop2 if unsigned>
4632ja ._mainloop2
4633
4634# qhasm: uint32323232 diag0 += x0
4635# asm 1: paddd <x0=stack128#3,<diag0=int6464#1
4636# asm 2: paddd <x0=64(%esp),<diag0=%xmm0
4637paddd 64(%esp),%xmm0
4638
4639# qhasm: uint32323232 diag1 += x1
4640# asm 1: paddd <x1=stack128#2,<diag1=int6464#2
4641# asm 2: paddd <x1=48(%esp),<diag1=%xmm1
4642paddd 48(%esp),%xmm1
4643
4644# qhasm: uint32323232 diag2 += x2
4645# asm 1: paddd <x2=stack128#4,<diag2=int6464#3
4646# asm 2: paddd <x2=80(%esp),<diag2=%xmm2
4647paddd 80(%esp),%xmm2
4648
4649# qhasm: uint32323232 diag3 += x3
4650# asm 1: paddd <x3=stack128#1,<diag3=int6464#4
4651# asm 2: paddd <x3=32(%esp),<diag3=%xmm3
4652paddd 32(%esp),%xmm3
4653
4654# qhasm: in0 = diag0
4655# asm 1: movd <diag0=int6464#1,>in0=int32#1
4656# asm 2: movd <diag0=%xmm0,>in0=%eax
4657movd %xmm0,%eax
4658
4659# qhasm: in12 = diag1
4660# asm 1: movd <diag1=int6464#2,>in12=int32#2
4661# asm 2: movd <diag1=%xmm1,>in12=%ecx
4662movd %xmm1,%ecx
4663
4664# qhasm: in8 = diag2
4665# asm 1: movd <diag2=int6464#3,>in8=int32#3
4666# asm 2: movd <diag2=%xmm2,>in8=%edx
4667movd %xmm2,%edx
4668
4669# qhasm: in4 = diag3
4670# asm 1: movd <diag3=int6464#4,>in4=int32#4
4671# asm 2: movd <diag3=%xmm3,>in4=%ebx
4672movd %xmm3,%ebx
4673
4674# qhasm: diag0 <<<= 96
4675# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4676# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4677pshufd $0x39,%xmm0,%xmm0
4678
4679# qhasm: diag1 <<<= 96
4680# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4681# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4682pshufd $0x39,%xmm1,%xmm1
4683
4684# qhasm: diag2 <<<= 96
4685# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4686# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4687pshufd $0x39,%xmm2,%xmm2
4688
4689# qhasm: diag3 <<<= 96
4690# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4691# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4692pshufd $0x39,%xmm3,%xmm3
4693
4694# qhasm: in0 ^= *(uint32 *) (m + 0)
4695# asm 1: xorl 0(<m=int32#5),<in0=int32#1
4696# asm 2: xorl 0(<m=%esi),<in0=%eax
4697xorl 0(%esi),%eax
4698
4699# qhasm: in12 ^= *(uint32 *) (m + 48)
4700# asm 1: xorl 48(<m=int32#5),<in12=int32#2
4701# asm 2: xorl 48(<m=%esi),<in12=%ecx
4702xorl 48(%esi),%ecx
4703
4704# qhasm: in8 ^= *(uint32 *) (m + 32)
4705# asm 1: xorl 32(<m=int32#5),<in8=int32#3
4706# asm 2: xorl 32(<m=%esi),<in8=%edx
4707xorl 32(%esi),%edx
4708
4709# qhasm: in4 ^= *(uint32 *) (m + 16)
4710# asm 1: xorl 16(<m=int32#5),<in4=int32#4
4711# asm 2: xorl 16(<m=%esi),<in4=%ebx
4712xorl 16(%esi),%ebx
4713
4714# qhasm: *(uint32 *) (out + 0) = in0
4715# asm 1: movl <in0=int32#1,0(<out=int32#6)
4716# asm 2: movl <in0=%eax,0(<out=%edi)
4717movl %eax,0(%edi)
4718
4719# qhasm: *(uint32 *) (out + 48) = in12
4720# asm 1: movl <in12=int32#2,48(<out=int32#6)
4721# asm 2: movl <in12=%ecx,48(<out=%edi)
4722movl %ecx,48(%edi)
4723
4724# qhasm: *(uint32 *) (out + 32) = in8
4725# asm 1: movl <in8=int32#3,32(<out=int32#6)
4726# asm 2: movl <in8=%edx,32(<out=%edi)
4727movl %edx,32(%edi)
4728
4729# qhasm: *(uint32 *) (out + 16) = in4
4730# asm 1: movl <in4=int32#4,16(<out=int32#6)
4731# asm 2: movl <in4=%ebx,16(<out=%edi)
4732movl %ebx,16(%edi)
4733
4734# qhasm: in5 = diag0
4735# asm 1: movd <diag0=int6464#1,>in5=int32#1
4736# asm 2: movd <diag0=%xmm0,>in5=%eax
4737movd %xmm0,%eax
4738
4739# qhasm: in1 = diag1
4740# asm 1: movd <diag1=int6464#2,>in1=int32#2
4741# asm 2: movd <diag1=%xmm1,>in1=%ecx
4742movd %xmm1,%ecx
4743
4744# qhasm: in13 = diag2
4745# asm 1: movd <diag2=int6464#3,>in13=int32#3
4746# asm 2: movd <diag2=%xmm2,>in13=%edx
4747movd %xmm2,%edx
4748
4749# qhasm: in9 = diag3
4750# asm 1: movd <diag3=int6464#4,>in9=int32#4
4751# asm 2: movd <diag3=%xmm3,>in9=%ebx
4752movd %xmm3,%ebx
4753
4754# qhasm: diag0 <<<= 96
4755# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4756# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4757pshufd $0x39,%xmm0,%xmm0
4758
4759# qhasm: diag1 <<<= 96
4760# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4761# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4762pshufd $0x39,%xmm1,%xmm1
4763
4764# qhasm: diag2 <<<= 96
4765# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4766# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4767pshufd $0x39,%xmm2,%xmm2
4768
4769# qhasm: diag3 <<<= 96
4770# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4771# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4772pshufd $0x39,%xmm3,%xmm3
4773
4774# qhasm: in5 ^= *(uint32 *) (m + 20)
4775# asm 1: xorl 20(<m=int32#5),<in5=int32#1
4776# asm 2: xorl 20(<m=%esi),<in5=%eax
4777xorl 20(%esi),%eax
4778
4779# qhasm: in1 ^= *(uint32 *) (m + 4)
4780# asm 1: xorl 4(<m=int32#5),<in1=int32#2
4781# asm 2: xorl 4(<m=%esi),<in1=%ecx
4782xorl 4(%esi),%ecx
4783
4784# qhasm: in13 ^= *(uint32 *) (m + 52)
4785# asm 1: xorl 52(<m=int32#5),<in13=int32#3
4786# asm 2: xorl 52(<m=%esi),<in13=%edx
4787xorl 52(%esi),%edx
4788
4789# qhasm: in9 ^= *(uint32 *) (m + 36)
4790# asm 1: xorl 36(<m=int32#5),<in9=int32#4
4791# asm 2: xorl 36(<m=%esi),<in9=%ebx
4792xorl 36(%esi),%ebx
4793
4794# qhasm: *(uint32 *) (out + 20) = in5
4795# asm 1: movl <in5=int32#1,20(<out=int32#6)
4796# asm 2: movl <in5=%eax,20(<out=%edi)
4797movl %eax,20(%edi)
4798
4799# qhasm: *(uint32 *) (out + 4) = in1
4800# asm 1: movl <in1=int32#2,4(<out=int32#6)
4801# asm 2: movl <in1=%ecx,4(<out=%edi)
4802movl %ecx,4(%edi)
4803
4804# qhasm: *(uint32 *) (out + 52) = in13
4805# asm 1: movl <in13=int32#3,52(<out=int32#6)
4806# asm 2: movl <in13=%edx,52(<out=%edi)
4807movl %edx,52(%edi)
4808
4809# qhasm: *(uint32 *) (out + 36) = in9
4810# asm 1: movl <in9=int32#4,36(<out=int32#6)
4811# asm 2: movl <in9=%ebx,36(<out=%edi)
4812movl %ebx,36(%edi)
4813
4814# qhasm: in10 = diag0
4815# asm 1: movd <diag0=int6464#1,>in10=int32#1
4816# asm 2: movd <diag0=%xmm0,>in10=%eax
4817movd %xmm0,%eax
4818
4819# qhasm: in6 = diag1
4820# asm 1: movd <diag1=int6464#2,>in6=int32#2
4821# asm 2: movd <diag1=%xmm1,>in6=%ecx
4822movd %xmm1,%ecx
4823
4824# qhasm: in2 = diag2
4825# asm 1: movd <diag2=int6464#3,>in2=int32#3
4826# asm 2: movd <diag2=%xmm2,>in2=%edx
4827movd %xmm2,%edx
4828
4829# qhasm: in14 = diag3
4830# asm 1: movd <diag3=int6464#4,>in14=int32#4
4831# asm 2: movd <diag3=%xmm3,>in14=%ebx
4832movd %xmm3,%ebx
4833
4834# qhasm: diag0 <<<= 96
4835# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4836# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4837pshufd $0x39,%xmm0,%xmm0
4838
4839# qhasm: diag1 <<<= 96
4840# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4841# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4842pshufd $0x39,%xmm1,%xmm1
4843
4844# qhasm: diag2 <<<= 96
4845# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4846# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4847pshufd $0x39,%xmm2,%xmm2
4848
4849# qhasm: diag3 <<<= 96
4850# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4851# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4852pshufd $0x39,%xmm3,%xmm3
4853
4854# qhasm: in10 ^= *(uint32 *) (m + 40)
4855# asm 1: xorl 40(<m=int32#5),<in10=int32#1
4856# asm 2: xorl 40(<m=%esi),<in10=%eax
4857xorl 40(%esi),%eax
4858
4859# qhasm: in6 ^= *(uint32 *) (m + 24)
4860# asm 1: xorl 24(<m=int32#5),<in6=int32#2
4861# asm 2: xorl 24(<m=%esi),<in6=%ecx
4862xorl 24(%esi),%ecx
4863
4864# qhasm: in2 ^= *(uint32 *) (m + 8)
4865# asm 1: xorl 8(<m=int32#5),<in2=int32#3
4866# asm 2: xorl 8(<m=%esi),<in2=%edx
4867xorl 8(%esi),%edx
4868
4869# qhasm: in14 ^= *(uint32 *) (m + 56)
4870# asm 1: xorl 56(<m=int32#5),<in14=int32#4
4871# asm 2: xorl 56(<m=%esi),<in14=%ebx
4872xorl 56(%esi),%ebx
4873
4874# qhasm: *(uint32 *) (out + 40) = in10
4875# asm 1: movl <in10=int32#1,40(<out=int32#6)
4876# asm 2: movl <in10=%eax,40(<out=%edi)
4877movl %eax,40(%edi)
4878
4879# qhasm: *(uint32 *) (out + 24) = in6
4880# asm 1: movl <in6=int32#2,24(<out=int32#6)
4881# asm 2: movl <in6=%ecx,24(<out=%edi)
4882movl %ecx,24(%edi)
4883
4884# qhasm: *(uint32 *) (out + 8) = in2
4885# asm 1: movl <in2=int32#3,8(<out=int32#6)
4886# asm 2: movl <in2=%edx,8(<out=%edi)
4887movl %edx,8(%edi)
4888
4889# qhasm: *(uint32 *) (out + 56) = in14
4890# asm 1: movl <in14=int32#4,56(<out=int32#6)
4891# asm 2: movl <in14=%ebx,56(<out=%edi)
4892movl %ebx,56(%edi)
4893
4894# qhasm: in15 = diag0
4895# asm 1: movd <diag0=int6464#1,>in15=int32#1
4896# asm 2: movd <diag0=%xmm0,>in15=%eax
4897movd %xmm0,%eax
4898
4899# qhasm: in11 = diag1
4900# asm 1: movd <diag1=int6464#2,>in11=int32#2
4901# asm 2: movd <diag1=%xmm1,>in11=%ecx
4902movd %xmm1,%ecx
4903
4904# qhasm: in7 = diag2
4905# asm 1: movd <diag2=int6464#3,>in7=int32#3
4906# asm 2: movd <diag2=%xmm2,>in7=%edx
4907movd %xmm2,%edx
4908
4909# qhasm: in3 = diag3
4910# asm 1: movd <diag3=int6464#4,>in3=int32#4
4911# asm 2: movd <diag3=%xmm3,>in3=%ebx
4912movd %xmm3,%ebx
4913
4914# qhasm: in15 ^= *(uint32 *) (m + 60)
4915# asm 1: xorl 60(<m=int32#5),<in15=int32#1
4916# asm 2: xorl 60(<m=%esi),<in15=%eax
4917xorl 60(%esi),%eax
4918
4919# qhasm: in11 ^= *(uint32 *) (m + 44)
4920# asm 1: xorl 44(<m=int32#5),<in11=int32#2
4921# asm 2: xorl 44(<m=%esi),<in11=%ecx
4922xorl 44(%esi),%ecx
4923
4924# qhasm: in7 ^= *(uint32 *) (m + 28)
4925# asm 1: xorl 28(<m=int32#5),<in7=int32#3
4926# asm 2: xorl 28(<m=%esi),<in7=%edx
4927xorl 28(%esi),%edx
4928
4929# qhasm: in3 ^= *(uint32 *) (m + 12)
4930# asm 1: xorl 12(<m=int32#5),<in3=int32#4
4931# asm 2: xorl 12(<m=%esi),<in3=%ebx
4932xorl 12(%esi),%ebx
4933
4934# qhasm: *(uint32 *) (out + 60) = in15
4935# asm 1: movl <in15=int32#1,60(<out=int32#6)
4936# asm 2: movl <in15=%eax,60(<out=%edi)
4937movl %eax,60(%edi)
4938
4939# qhasm: *(uint32 *) (out + 44) = in11
4940# asm 1: movl <in11=int32#2,44(<out=int32#6)
4941# asm 2: movl <in11=%ecx,44(<out=%edi)
4942movl %ecx,44(%edi)
4943
4944# qhasm: *(uint32 *) (out + 28) = in7
4945# asm 1: movl <in7=int32#3,28(<out=int32#6)
4946# asm 2: movl <in7=%edx,28(<out=%edi)
4947movl %edx,28(%edi)
4948
4949# qhasm: *(uint32 *) (out + 12) = in3
4950# asm 1: movl <in3=int32#4,12(<out=int32#6)
4951# asm 2: movl <in3=%ebx,12(<out=%edi)
4952movl %ebx,12(%edi)
4953
4954# qhasm: bytes = bytes_stack
4955# asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
4956# asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
4957movl 24(%esp),%eax
4958
4959# qhasm: in8 = ((uint32 *)&x2)[0]
4960# asm 1: movl <x2=stack128#4,>in8=int32#2
4961# asm 2: movl <x2=80(%esp),>in8=%ecx
4962movl 80(%esp),%ecx
4963
4964# qhasm: in9 = ((uint32 *)&x3)[1]
4965# asm 1: movl 4+<x3=stack128#1,>in9=int32#3
4966# asm 2: movl 4+<x3=32(%esp),>in9=%edx
4967movl 4+32(%esp),%edx
4968
4969# qhasm: carry? in8 += 1
4970# asm 1: add $1,<in8=int32#2
4971# asm 2: add $1,<in8=%ecx
4972add $1,%ecx
4973
4974# qhasm: in9 += 0 + carry
4975# asm 1: adc $0,<in9=int32#3
4976# asm 2: adc $0,<in9=%edx
4977adc $0,%edx
4978
4979# qhasm: ((uint32 *)&x2)[0] = in8
4980# asm 1: movl <in8=int32#2,>x2=stack128#4
4981# asm 2: movl <in8=%ecx,>x2=80(%esp)
4982movl %ecx,80(%esp)
4983
4984# qhasm: ((uint32 *)&x3)[1] = in9
4985# asm 1: movl <in9=int32#3,4+<x3=stack128#1
4986# asm 2: movl <in9=%edx,4+<x3=32(%esp)
4987movl %edx,4+32(%esp)
4988
4989# qhasm: unsigned>? unsigned<? bytes - 64
4990# asm 1: cmp $64,<bytes=int32#1
4991# asm 2: cmp $64,<bytes=%eax
4992cmp $64,%eax
4993# comment:fp stack unchanged by jump
4994
4995# qhasm: goto bytesatleast65 if unsigned>
4996ja ._bytesatleast65
4997# comment:fp stack unchanged by jump
4998
4999# qhasm: goto bytesatleast64 if !unsigned<
5000jae ._bytesatleast64
5001
5002# qhasm: m = out
5003# asm 1: mov <out=int32#6,>m=int32#5
5004# asm 2: mov <out=%edi,>m=%esi
5005mov %edi,%esi
5006
5007# qhasm: out = ctarget
5008# asm 1: movl <ctarget=stack32#6,>out=int32#6
5009# asm 2: movl <ctarget=20(%esp),>out=%edi
5010movl 20(%esp),%edi
5011
5012# qhasm: i = bytes
5013# asm 1: mov <bytes=int32#1,>i=int32#2
5014# asm 2: mov <bytes=%eax,>i=%ecx
5015mov %eax,%ecx
5016
5017# qhasm: while (i) { *out++ = *m++; --i }
5018rep movsb
5019# comment:fp stack unchanged by fallthrough
5020
5021# qhasm: bytesatleast64:
5022._bytesatleast64:
5023# comment:fp stack unchanged by fallthrough
5024
5025# qhasm: done:
5026._done:
5027
5028# qhasm: eax = eax_stack
5029# asm 1: movl <eax_stack=stack32#1,>eax=int32#1
5030# asm 2: movl <eax_stack=0(%esp),>eax=%eax
5031movl 0(%esp),%eax
5032
5033# qhasm: ebx = ebx_stack
5034# asm 1: movl <ebx_stack=stack32#2,>ebx=int32#4
5035# asm 2: movl <ebx_stack=4(%esp),>ebx=%ebx
5036movl 4(%esp),%ebx
5037
5038# qhasm: esi = esi_stack
5039# asm 1: movl <esi_stack=stack32#3,>esi=int32#5
5040# asm 2: movl <esi_stack=8(%esp),>esi=%esi
5041movl 8(%esp),%esi
5042
5043# qhasm: edi = edi_stack
5044# asm 1: movl <edi_stack=stack32#4,>edi=int32#6
5045# asm 2: movl <edi_stack=12(%esp),>edi=%edi
5046movl 12(%esp),%edi
5047
5048# qhasm: ebp = ebp_stack
5049# asm 1: movl <ebp_stack=stack32#5,>ebp=int32#7
5050# asm 2: movl <ebp_stack=16(%esp),>ebp=%ebp
5051movl 16(%esp),%ebp
5052
5053# qhasm: leave
5054add %eax,%esp
5055xor %eax,%eax
5056ret
5057
5058# qhasm: bytesatleast65:
5059._bytesatleast65:
5060
5061# qhasm: bytes -= 64
5062# asm 1: sub $64,<bytes=int32#1
5063# asm 2: sub $64,<bytes=%eax
5064sub $64,%eax
5065
5066# qhasm: out += 64
5067# asm 1: add $64,<out=int32#6
5068# asm 2: add $64,<out=%edi
5069add $64,%edi
5070
5071# qhasm: m += 64
5072# asm 1: add $64,<m=int32#5
5073# asm 2: add $64,<m=%esi
5074add $64,%esi
5075# comment:fp stack unchanged by jump
5076
5077# qhasm: goto bytesbetween1and255
5078jmp ._bytesbetween1and255
diff --git a/nacl/crypto_stream/try.c b/nacl/crypto_stream/try.c
new file mode 100644
index 00000000..9a36d760
--- /dev/null
+++ b/nacl/crypto_stream/try.c
@@ -0,0 +1,124 @@
1/*
2 * crypto_stream/try.c version 20090118
3 * D. J. Bernstein
4 * Public domain.
5 */
6
7#include <stdlib.h>
8#include "crypto_stream.h"
9
10extern unsigned char *alignedcalloc(unsigned long long);
11
12const char *primitiveimplementation = crypto_stream_IMPLEMENTATION;
13
14#define MAXTEST_BYTES 10000
15#define CHECKSUM_BYTES 4096
16#define TUNE_BYTES 1536
17
18static unsigned char *k;
19static unsigned char *n;
20static unsigned char *m;
21static unsigned char *c;
22static unsigned char *s;
23static unsigned char *k2;
24static unsigned char *n2;
25static unsigned char *m2;
26static unsigned char *c2;
27static unsigned char *s2;
28
29void preallocate(void)
30{
31}
32
33void allocate(void)
34{
35 k = alignedcalloc(crypto_stream_KEYBYTES);
36 n = alignedcalloc(crypto_stream_NONCEBYTES);
37 m = alignedcalloc(MAXTEST_BYTES);
38 c = alignedcalloc(MAXTEST_BYTES);
39 s = alignedcalloc(MAXTEST_BYTES);
40 k2 = alignedcalloc(crypto_stream_KEYBYTES);
41 n2 = alignedcalloc(crypto_stream_NONCEBYTES);
42 m2 = alignedcalloc(MAXTEST_BYTES);
43 c2 = alignedcalloc(MAXTEST_BYTES);
44 s2 = alignedcalloc(MAXTEST_BYTES);
45}
46
47void predoit(void)
48{
49}
50
51void doit(void)
52{
53 crypto_stream_xor(c,m,TUNE_BYTES,n,k);
54}
55
56char checksum[crypto_stream_KEYBYTES * 2 + 1];
57
58const char *checksum_compute(void)
59{
60 long long i;
61 long long j;
62
63 for (i = 0;i < CHECKSUM_BYTES;++i) {
64 long long mlen = i;
65 long long clen = i;
66 long long slen = i;
67 long long klen = crypto_stream_KEYBYTES;
68 long long nlen = crypto_stream_NONCEBYTES;
69 for (j = -16;j < 0;++j) m[j] = random();
70 for (j = -16;j < 0;++j) c[j] = random();
71 for (j = -16;j < 0;++j) s[j] = random();
72 for (j = -16;j < 0;++j) n[j] = random();
73 for (j = -16;j < 0;++j) k[j] = random();
74 for (j = mlen;j < mlen + 16;++j) m[j] = random();
75 for (j = clen;j < clen + 16;++j) c[j] = random();
76 for (j = slen;j < slen + 16;++j) s[j] = random();
77 for (j = nlen;j < nlen + 16;++j) n[j] = random();
78 for (j = klen;j < klen + 16;++j) k[j] = random();
79 for (j = -16;j < mlen + 16;++j) m2[j] = m[j];
80 for (j = -16;j < clen + 16;++j) c2[j] = c[j];
81 for (j = -16;j < slen + 16;++j) s2[j] = s[j];
82 for (j = -16;j < nlen + 16;++j) n2[j] = n[j];
83 for (j = -16;j < klen + 16;++j) k2[j] = k[j];
84
85 crypto_stream_xor(c,m,mlen,n,k);
86
87 for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_stream_xor overwrites m";
88 for (j = -16;j < slen + 16;++j) if (s[j] != s2[j]) return "crypto_stream_xor overwrites s";
89 for (j = -16;j < nlen + 16;++j) if (n[j] != n2[j]) return "crypto_stream_xor overwrites n";
90 for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_stream_xor overwrites k";
91 for (j = -16;j < 0;++j) if (c[j] != c2[j]) return "crypto_stream_xor writes before output";
92 for (j = clen;j < clen + 16;++j) if (c[j] != c2[j]) return "crypto_stream_xor writes after output";
93
94 for (j = -16;j < clen + 16;++j) c2[j] = c[j];
95
96 crypto_stream(s,slen,n,k);
97
98 for (j = -16;j < mlen + 16;++j) if (m[j] != m2[j]) return "crypto_stream overwrites m";
99 for (j = -16;j < clen + 16;++j) if (c[j] != c2[j]) return "crypto_stream overwrites c";
100 for (j = -16;j < nlen + 16;++j) if (n[j] != n2[j]) return "crypto_stream overwrites n";
101 for (j = -16;j < klen + 16;++j) if (k[j] != k2[j]) return "crypto_stream overwrites k";
102 for (j = -16;j < 0;++j) if (s[j] != s2[j]) return "crypto_stream writes before output";
103 for (j = slen;j < slen + 16;++j) if (s[j] != s2[j]) return "crypto_stream writes after output";
104
105 for (j = 0;j < mlen;++j)
106 if ((s[j] ^ m[j]) != c[j]) return "crypto_stream_xor does not match crypto_stream";
107
108 for (j = 0;j < clen;++j) k[j % klen] ^= c[j];
109 crypto_stream_xor(m,c,clen,n,k);
110 crypto_stream(s,slen,n,k);
111 for (j = 0;j < mlen;++j)
112 if ((s[j] ^ m[j]) != c[j]) return "crypto_stream_xor does not match crypto_stream";
113 for (j = 0;j < mlen;++j) n[j % nlen] ^= m[j];
114 m[mlen] = 0;
115 }
116
117 for (i = 0;i < crypto_stream_KEYBYTES;++i) {
118 checksum[2 * i] = "0123456789abcdef"[15 & (k[i] >> 4)];
119 checksum[2 * i + 1] = "0123456789abcdef"[15 & k[i]];
120 }
121 checksum[2 * i] = 0;
122
123 return 0;
124}
diff --git a/nacl/crypto_stream/wrapper-stream.cpp b/nacl/crypto_stream/wrapper-stream.cpp
new file mode 100644
index 00000000..dd10c2f6
--- /dev/null
+++ b/nacl/crypto_stream/wrapper-stream.cpp
@@ -0,0 +1,12 @@
1#include <string>
2using std::string;
3#include "crypto_stream.h"
4
5string crypto_stream(size_t clen,const string &n,const string &k)
6{
7 if (n.size() != crypto_stream_NONCEBYTES) throw "incorrect nonce length";
8 if (k.size() != crypto_stream_KEYBYTES) throw "incorrect key length";
9 unsigned char c[clen];
10 crypto_stream(c,clen,(const unsigned char *) n.c_str(),(const unsigned char *) k.c_str());
11 return string((char *) c,clen);
12}
diff --git a/nacl/crypto_stream/wrapper-xor.cpp b/nacl/crypto_stream/wrapper-xor.cpp
new file mode 100644
index 00000000..8d770d1e
--- /dev/null
+++ b/nacl/crypto_stream/wrapper-xor.cpp
@@ -0,0 +1,17 @@
1#include <string>
2using std::string;
3#include "crypto_stream.h"
4
5string crypto_stream_xor(const string &m,const string &n,const string &k)
6{
7 if (n.size() != crypto_stream_NONCEBYTES) throw "incorrect nonce length";
8 if (k.size() != crypto_stream_KEYBYTES) throw "incorrect key length";
9 size_t mlen = m.size();
10 unsigned char c[mlen];
11 crypto_stream_xor(c,
12 (const unsigned char *) m.c_str(),mlen,
13 (const unsigned char *) n.c_str(),
14 (const unsigned char *) k.c_str()
15 );
16 return string((char *) c,mlen);
17}
diff --git a/nacl/crypto_stream/xsalsa20/checksum b/nacl/crypto_stream/xsalsa20/checksum
new file mode 100644
index 00000000..cae64c0d
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/checksum
@@ -0,0 +1 @@
201bc58a96adcb6ed339ca33c188af8ca04a4ce68be1e0953309ee09a0cf8e7a
diff --git a/nacl/crypto_stream/xsalsa20/ref/api.h b/nacl/crypto_stream/xsalsa20/ref/api.h
new file mode 100644
index 00000000..6910a7dc
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/ref/api.h
@@ -0,0 +1,2 @@
1#define CRYPTO_KEYBYTES 32
2#define CRYPTO_NONCEBYTES 24
diff --git a/nacl/crypto_stream/xsalsa20/ref/implementors b/nacl/crypto_stream/xsalsa20/ref/implementors
new file mode 100644
index 00000000..f6fb3c73
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/ref/implementors
@@ -0,0 +1 @@
Daniel J. Bernstein
diff --git a/nacl/crypto_stream/xsalsa20/ref/stream.c b/nacl/crypto_stream/xsalsa20/ref/stream.c
new file mode 100644
index 00000000..2d710709
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/ref/stream.c
@@ -0,0 +1,22 @@
1/*
2version 20080914
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_hsalsa20.h"
8#include "crypto_stream_salsa20.h"
9#include "crypto_stream.h"
10
11static const unsigned char sigma[16] = "expand 32-byte k";
12
13int crypto_stream(
14 unsigned char *c,unsigned long long clen,
15 const unsigned char *n,
16 const unsigned char *k
17)
18{
19 unsigned char subkey[32];
20 crypto_core_hsalsa20(subkey,n,k,sigma);
21 return crypto_stream_salsa20(c,clen,n + 16,subkey);
22}
diff --git a/nacl/crypto_stream/xsalsa20/ref/xor.c b/nacl/crypto_stream/xsalsa20/ref/xor.c
new file mode 100644
index 00000000..13f3134a
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/ref/xor.c
@@ -0,0 +1,23 @@
1/*
2version 20080913
3D. J. Bernstein
4Public domain.
5*/
6
7#include "crypto_core_hsalsa20.h"
8#include "crypto_stream_salsa20.h"
9#include "crypto_stream.h"
10
11static const unsigned char sigma[16] = "expand 32-byte k";
12
13int crypto_stream_xor(
14 unsigned char *c,
15 const unsigned char *m,unsigned long long mlen,
16 const unsigned char *n,
17 const unsigned char *k
18)
19{
20 unsigned char subkey[32];
21 crypto_core_hsalsa20(subkey,n,k,sigma);
22 return crypto_stream_salsa20_xor(c,m,mlen,n + 16,subkey);
23}
diff --git a/nacl/crypto_stream/xsalsa20/selected b/nacl/crypto_stream/xsalsa20/selected
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/selected
diff --git a/nacl/crypto_stream/xsalsa20/used b/nacl/crypto_stream/xsalsa20/used
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/nacl/crypto_stream/xsalsa20/used