summaryrefslogtreecommitdiff
path: root/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s
diff options
context:
space:
mode:
Diffstat (limited to 'nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s')
-rw-r--r--nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s4823
1 files changed, 4823 insertions, 0 deletions
diff --git a/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s b/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s
new file mode 100644
index 00000000..0e26dc9f
--- /dev/null
+++ b/nacl/crypto_stream/salsa2012/amd64_xmm6/stream.s
@@ -0,0 +1,4823 @@
1
2# qhasm: int64 r11_caller
3
4# qhasm: int64 r12_caller
5
6# qhasm: int64 r13_caller
7
8# qhasm: int64 r14_caller
9
10# qhasm: int64 r15_caller
11
12# qhasm: int64 rbx_caller
13
14# qhasm: int64 rbp_caller
15
16# qhasm: caller r11_caller
17
18# qhasm: caller r12_caller
19
20# qhasm: caller r13_caller
21
22# qhasm: caller r14_caller
23
24# qhasm: caller r15_caller
25
26# qhasm: caller rbx_caller
27
28# qhasm: caller rbp_caller
29
30# qhasm: stack64 r11_stack
31
32# qhasm: stack64 r12_stack
33
34# qhasm: stack64 r13_stack
35
36# qhasm: stack64 r14_stack
37
38# qhasm: stack64 r15_stack
39
40# qhasm: stack64 rbx_stack
41
42# qhasm: stack64 rbp_stack
43
44# qhasm: int64 a
45
46# qhasm: int64 arg1
47
48# qhasm: int64 arg2
49
50# qhasm: int64 arg3
51
52# qhasm: int64 arg4
53
54# qhasm: int64 arg5
55
56# qhasm: input arg1
57
58# qhasm: input arg2
59
60# qhasm: input arg3
61
62# qhasm: input arg4
63
64# qhasm: input arg5
65
66# qhasm: int64 k
67
68# qhasm: int64 kbits
69
70# qhasm: int64 iv
71
72# qhasm: int64 i
73
74# qhasm: stack128 x0
75
76# qhasm: stack128 x1
77
78# qhasm: stack128 x2
79
80# qhasm: stack128 x3
81
82# qhasm: int64 m
83
84# qhasm: int64 out
85
86# qhasm: int64 bytes
87
88# qhasm: stack32 eax_stack
89
90# qhasm: stack32 ebx_stack
91
92# qhasm: stack32 esi_stack
93
94# qhasm: stack32 edi_stack
95
96# qhasm: stack32 ebp_stack
97
98# qhasm: int6464 diag0
99
100# qhasm: int6464 diag1
101
102# qhasm: int6464 diag2
103
104# qhasm: int6464 diag3
105
106# qhasm: int6464 a0
107
108# qhasm: int6464 a1
109
110# qhasm: int6464 a2
111
112# qhasm: int6464 a3
113
114# qhasm: int6464 a4
115
116# qhasm: int6464 a5
117
118# qhasm: int6464 a6
119
120# qhasm: int6464 a7
121
122# qhasm: int6464 b0
123
124# qhasm: int6464 b1
125
126# qhasm: int6464 b2
127
128# qhasm: int6464 b3
129
130# qhasm: int6464 b4
131
132# qhasm: int6464 b5
133
134# qhasm: int6464 b6
135
136# qhasm: int6464 b7
137
138# qhasm: int6464 z0
139
140# qhasm: int6464 z1
141
142# qhasm: int6464 z2
143
144# qhasm: int6464 z3
145
146# qhasm: int6464 z4
147
148# qhasm: int6464 z5
149
150# qhasm: int6464 z6
151
152# qhasm: int6464 z7
153
154# qhasm: int6464 z8
155
156# qhasm: int6464 z9
157
158# qhasm: int6464 z10
159
160# qhasm: int6464 z11
161
162# qhasm: int6464 z12
163
164# qhasm: int6464 z13
165
166# qhasm: int6464 z14
167
168# qhasm: int6464 z15
169
170# qhasm: stack128 z0_stack
171
172# qhasm: stack128 z1_stack
173
174# qhasm: stack128 z2_stack
175
176# qhasm: stack128 z3_stack
177
178# qhasm: stack128 z4_stack
179
180# qhasm: stack128 z5_stack
181
182# qhasm: stack128 z6_stack
183
184# qhasm: stack128 z7_stack
185
186# qhasm: stack128 z8_stack
187
188# qhasm: stack128 z9_stack
189
190# qhasm: stack128 z10_stack
191
192# qhasm: stack128 z11_stack
193
194# qhasm: stack128 z12_stack
195
196# qhasm: stack128 z13_stack
197
198# qhasm: stack128 z14_stack
199
200# qhasm: stack128 z15_stack
201
202# qhasm: int6464 y0
203
204# qhasm: int6464 y1
205
206# qhasm: int6464 y2
207
208# qhasm: int6464 y3
209
210# qhasm: int6464 y4
211
212# qhasm: int6464 y5
213
214# qhasm: int6464 y6
215
216# qhasm: int6464 y7
217
218# qhasm: int6464 y8
219
220# qhasm: int6464 y9
221
222# qhasm: int6464 y10
223
224# qhasm: int6464 y11
225
226# qhasm: int6464 y12
227
228# qhasm: int6464 y13
229
230# qhasm: int6464 y14
231
232# qhasm: int6464 y15
233
234# qhasm: int6464 r0
235
236# qhasm: int6464 r1
237
238# qhasm: int6464 r2
239
240# qhasm: int6464 r3
241
242# qhasm: int6464 r4
243
244# qhasm: int6464 r5
245
246# qhasm: int6464 r6
247
248# qhasm: int6464 r7
249
250# qhasm: int6464 r8
251
252# qhasm: int6464 r9
253
254# qhasm: int6464 r10
255
256# qhasm: int6464 r11
257
258# qhasm: int6464 r12
259
260# qhasm: int6464 r13
261
262# qhasm: int6464 r14
263
264# qhasm: int6464 r15
265
266# qhasm: stack128 orig0
267
268# qhasm: stack128 orig1
269
270# qhasm: stack128 orig2
271
272# qhasm: stack128 orig3
273
274# qhasm: stack128 orig4
275
276# qhasm: stack128 orig5
277
278# qhasm: stack128 orig6
279
280# qhasm: stack128 orig7
281
282# qhasm: stack128 orig8
283
284# qhasm: stack128 orig9
285
286# qhasm: stack128 orig10
287
288# qhasm: stack128 orig11
289
290# qhasm: stack128 orig12
291
292# qhasm: stack128 orig13
293
294# qhasm: stack128 orig14
295
296# qhasm: stack128 orig15
297
298# qhasm: int64 in0
299
300# qhasm: int64 in1
301
302# qhasm: int64 in2
303
304# qhasm: int64 in3
305
306# qhasm: int64 in4
307
308# qhasm: int64 in5
309
310# qhasm: int64 in6
311
312# qhasm: int64 in7
313
314# qhasm: int64 in8
315
316# qhasm: int64 in9
317
318# qhasm: int64 in10
319
320# qhasm: int64 in11
321
322# qhasm: int64 in12
323
324# qhasm: int64 in13
325
326# qhasm: int64 in14
327
328# qhasm: int64 in15
329
330# qhasm: stack512 tmp
331
332# qhasm: int64 ctarget
333
334# qhasm: stack64 bytes_backup
335
336# qhasm: enter crypto_stream_salsa2012_amd64_xmm6
337.text
338.p2align 5
339.globl _crypto_stream_salsa2012_amd64_xmm6
340.globl crypto_stream_salsa2012_amd64_xmm6
341_crypto_stream_salsa2012_amd64_xmm6:
342crypto_stream_salsa2012_amd64_xmm6:
343mov %rsp,%r11
344and $31,%r11
345add $480,%r11
346sub %r11,%rsp
347
348# qhasm: r11_stack = r11_caller
349# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
350# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
351movq %r11,352(%rsp)
352
353# qhasm: r12_stack = r12_caller
354# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
355# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
356movq %r12,360(%rsp)
357
358# qhasm: r13_stack = r13_caller
359# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
360# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
361movq %r13,368(%rsp)
362
363# qhasm: r14_stack = r14_caller
364# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
365# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
366movq %r14,376(%rsp)
367
368# qhasm: r15_stack = r15_caller
369# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
370# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
371movq %r15,384(%rsp)
372
373# qhasm: rbx_stack = rbx_caller
374# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
375# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
376movq %rbx,392(%rsp)
377
378# qhasm: rbp_stack = rbp_caller
379# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
380# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
381movq %rbp,400(%rsp)
382
383# qhasm: bytes = arg2
384# asm 1: mov <arg2=int64#2,>bytes=int64#6
385# asm 2: mov <arg2=%rsi,>bytes=%r9
386mov %rsi,%r9
387
388# qhasm: out = arg1
389# asm 1: mov <arg1=int64#1,>out=int64#1
390# asm 2: mov <arg1=%rdi,>out=%rdi
391mov %rdi,%rdi
392
393# qhasm: m = out
394# asm 1: mov <out=int64#1,>m=int64#2
395# asm 2: mov <out=%rdi,>m=%rsi
396mov %rdi,%rsi
397
398# qhasm: iv = arg3
399# asm 1: mov <arg3=int64#3,>iv=int64#3
400# asm 2: mov <arg3=%rdx,>iv=%rdx
401mov %rdx,%rdx
402
403# qhasm: k = arg4
404# asm 1: mov <arg4=int64#4,>k=int64#8
405# asm 2: mov <arg4=%rcx,>k=%r10
406mov %rcx,%r10
407
408# qhasm: unsigned>? bytes - 0
409# asm 1: cmp $0,<bytes=int64#6
410# asm 2: cmp $0,<bytes=%r9
411cmp $0,%r9
412# comment:fp stack unchanged by jump
413
414# qhasm: goto done if !unsigned>
415jbe ._done
416
417# qhasm: a = 0
418# asm 1: mov $0,>a=int64#7
419# asm 2: mov $0,>a=%rax
420mov $0,%rax
421
422# qhasm: i = bytes
423# asm 1: mov <bytes=int64#6,>i=int64#4
424# asm 2: mov <bytes=%r9,>i=%rcx
425mov %r9,%rcx
426
427# qhasm: while (i) { *out++ = a; --i }
428rep stosb
429
430# qhasm: out -= bytes
431# asm 1: sub <bytes=int64#6,<out=int64#1
432# asm 2: sub <bytes=%r9,<out=%rdi
433sub %r9,%rdi
434# comment:fp stack unchanged by jump
435
436# qhasm: goto start
437jmp ._start
438
439# qhasm: enter crypto_stream_salsa2012_amd64_xmm6_xor
440.text
441.p2align 5
442.globl _crypto_stream_salsa2012_amd64_xmm6_xor
443.globl crypto_stream_salsa2012_amd64_xmm6_xor
444_crypto_stream_salsa2012_amd64_xmm6_xor:
445crypto_stream_salsa2012_amd64_xmm6_xor:
446mov %rsp,%r11
447and $31,%r11
448add $480,%r11
449sub %r11,%rsp
450
451# qhasm: r11_stack = r11_caller
452# asm 1: movq <r11_caller=int64#9,>r11_stack=stack64#1
453# asm 2: movq <r11_caller=%r11,>r11_stack=352(%rsp)
454movq %r11,352(%rsp)
455
456# qhasm: r12_stack = r12_caller
457# asm 1: movq <r12_caller=int64#10,>r12_stack=stack64#2
458# asm 2: movq <r12_caller=%r12,>r12_stack=360(%rsp)
459movq %r12,360(%rsp)
460
461# qhasm: r13_stack = r13_caller
462# asm 1: movq <r13_caller=int64#11,>r13_stack=stack64#3
463# asm 2: movq <r13_caller=%r13,>r13_stack=368(%rsp)
464movq %r13,368(%rsp)
465
466# qhasm: r14_stack = r14_caller
467# asm 1: movq <r14_caller=int64#12,>r14_stack=stack64#4
468# asm 2: movq <r14_caller=%r14,>r14_stack=376(%rsp)
469movq %r14,376(%rsp)
470
471# qhasm: r15_stack = r15_caller
472# asm 1: movq <r15_caller=int64#13,>r15_stack=stack64#5
473# asm 2: movq <r15_caller=%r15,>r15_stack=384(%rsp)
474movq %r15,384(%rsp)
475
476# qhasm: rbx_stack = rbx_caller
477# asm 1: movq <rbx_caller=int64#14,>rbx_stack=stack64#6
478# asm 2: movq <rbx_caller=%rbx,>rbx_stack=392(%rsp)
479movq %rbx,392(%rsp)
480
481# qhasm: rbp_stack = rbp_caller
482# asm 1: movq <rbp_caller=int64#15,>rbp_stack=stack64#7
483# asm 2: movq <rbp_caller=%rbp,>rbp_stack=400(%rsp)
484movq %rbp,400(%rsp)
485
486# qhasm: out = arg1
487# asm 1: mov <arg1=int64#1,>out=int64#1
488# asm 2: mov <arg1=%rdi,>out=%rdi
489mov %rdi,%rdi
490
491# qhasm: m = arg2
492# asm 1: mov <arg2=int64#2,>m=int64#2
493# asm 2: mov <arg2=%rsi,>m=%rsi
494mov %rsi,%rsi
495
496# qhasm: bytes = arg3
497# asm 1: mov <arg3=int64#3,>bytes=int64#6
498# asm 2: mov <arg3=%rdx,>bytes=%r9
499mov %rdx,%r9
500
501# qhasm: iv = arg4
502# asm 1: mov <arg4=int64#4,>iv=int64#3
503# asm 2: mov <arg4=%rcx,>iv=%rdx
504mov %rcx,%rdx
505
506# qhasm: k = arg5
507# asm 1: mov <arg5=int64#5,>k=int64#8
508# asm 2: mov <arg5=%r8,>k=%r10
509mov %r8,%r10
510
511# qhasm: unsigned>? bytes - 0
512# asm 1: cmp $0,<bytes=int64#6
513# asm 2: cmp $0,<bytes=%r9
514cmp $0,%r9
515# comment:fp stack unchanged by jump
516
517# qhasm: goto done if !unsigned>
518jbe ._done
519# comment:fp stack unchanged by fallthrough
520
521# qhasm: start:
522._start:
523
524# qhasm: in12 = *(uint32 *) (k + 20)
525# asm 1: movl 20(<k=int64#8),>in12=int64#4d
526# asm 2: movl 20(<k=%r10),>in12=%ecx
527movl 20(%r10),%ecx
528
529# qhasm: in1 = *(uint32 *) (k + 0)
530# asm 1: movl 0(<k=int64#8),>in1=int64#5d
531# asm 2: movl 0(<k=%r10),>in1=%r8d
532movl 0(%r10),%r8d
533
534# qhasm: in6 = *(uint32 *) (iv + 0)
535# asm 1: movl 0(<iv=int64#3),>in6=int64#7d
536# asm 2: movl 0(<iv=%rdx),>in6=%eax
537movl 0(%rdx),%eax
538
539# qhasm: in11 = *(uint32 *) (k + 16)
540# asm 1: movl 16(<k=int64#8),>in11=int64#9d
541# asm 2: movl 16(<k=%r10),>in11=%r11d
542movl 16(%r10),%r11d
543
544# qhasm: ((uint32 *)&x1)[0] = in12
545# asm 1: movl <in12=int64#4d,>x1=stack128#1
546# asm 2: movl <in12=%ecx,>x1=0(%rsp)
547movl %ecx,0(%rsp)
548
549# qhasm: ((uint32 *)&x1)[1] = in1
550# asm 1: movl <in1=int64#5d,4+<x1=stack128#1
551# asm 2: movl <in1=%r8d,4+<x1=0(%rsp)
552movl %r8d,4+0(%rsp)
553
554# qhasm: ((uint32 *)&x1)[2] = in6
555# asm 1: movl <in6=int64#7d,8+<x1=stack128#1
556# asm 2: movl <in6=%eax,8+<x1=0(%rsp)
557movl %eax,8+0(%rsp)
558
559# qhasm: ((uint32 *)&x1)[3] = in11
560# asm 1: movl <in11=int64#9d,12+<x1=stack128#1
561# asm 2: movl <in11=%r11d,12+<x1=0(%rsp)
562movl %r11d,12+0(%rsp)
563
564# qhasm: in8 = 0
565# asm 1: mov $0,>in8=int64#4
566# asm 2: mov $0,>in8=%rcx
567mov $0,%rcx
568
569# qhasm: in13 = *(uint32 *) (k + 24)
570# asm 1: movl 24(<k=int64#8),>in13=int64#5d
571# asm 2: movl 24(<k=%r10),>in13=%r8d
572movl 24(%r10),%r8d
573
574# qhasm: in2 = *(uint32 *) (k + 4)
575# asm 1: movl 4(<k=int64#8),>in2=int64#7d
576# asm 2: movl 4(<k=%r10),>in2=%eax
577movl 4(%r10),%eax
578
579# qhasm: in7 = *(uint32 *) (iv + 4)
580# asm 1: movl 4(<iv=int64#3),>in7=int64#3d
581# asm 2: movl 4(<iv=%rdx),>in7=%edx
582movl 4(%rdx),%edx
583
584# qhasm: ((uint32 *)&x2)[0] = in8
585# asm 1: movl <in8=int64#4d,>x2=stack128#2
586# asm 2: movl <in8=%ecx,>x2=16(%rsp)
587movl %ecx,16(%rsp)
588
589# qhasm: ((uint32 *)&x2)[1] = in13
590# asm 1: movl <in13=int64#5d,4+<x2=stack128#2
591# asm 2: movl <in13=%r8d,4+<x2=16(%rsp)
592movl %r8d,4+16(%rsp)
593
594# qhasm: ((uint32 *)&x2)[2] = in2
595# asm 1: movl <in2=int64#7d,8+<x2=stack128#2
596# asm 2: movl <in2=%eax,8+<x2=16(%rsp)
597movl %eax,8+16(%rsp)
598
599# qhasm: ((uint32 *)&x2)[3] = in7
600# asm 1: movl <in7=int64#3d,12+<x2=stack128#2
601# asm 2: movl <in7=%edx,12+<x2=16(%rsp)
602movl %edx,12+16(%rsp)
603
604# qhasm: in4 = *(uint32 *) (k + 12)
605# asm 1: movl 12(<k=int64#8),>in4=int64#3d
606# asm 2: movl 12(<k=%r10),>in4=%edx
607movl 12(%r10),%edx
608
609# qhasm: in9 = 0
610# asm 1: mov $0,>in9=int64#4
611# asm 2: mov $0,>in9=%rcx
612mov $0,%rcx
613
614# qhasm: in14 = *(uint32 *) (k + 28)
615# asm 1: movl 28(<k=int64#8),>in14=int64#5d
616# asm 2: movl 28(<k=%r10),>in14=%r8d
617movl 28(%r10),%r8d
618
619# qhasm: in3 = *(uint32 *) (k + 8)
620# asm 1: movl 8(<k=int64#8),>in3=int64#7d
621# asm 2: movl 8(<k=%r10),>in3=%eax
622movl 8(%r10),%eax
623
624# qhasm: ((uint32 *)&x3)[0] = in4
625# asm 1: movl <in4=int64#3d,>x3=stack128#3
626# asm 2: movl <in4=%edx,>x3=32(%rsp)
627movl %edx,32(%rsp)
628
629# qhasm: ((uint32 *)&x3)[1] = in9
630# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
631# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
632movl %ecx,4+32(%rsp)
633
634# qhasm: ((uint32 *)&x3)[2] = in14
635# asm 1: movl <in14=int64#5d,8+<x3=stack128#3
636# asm 2: movl <in14=%r8d,8+<x3=32(%rsp)
637movl %r8d,8+32(%rsp)
638
639# qhasm: ((uint32 *)&x3)[3] = in3
640# asm 1: movl <in3=int64#7d,12+<x3=stack128#3
641# asm 2: movl <in3=%eax,12+<x3=32(%rsp)
642movl %eax,12+32(%rsp)
643
644# qhasm: in0 = 1634760805
645# asm 1: mov $1634760805,>in0=int64#3
646# asm 2: mov $1634760805,>in0=%rdx
647mov $1634760805,%rdx
648
649# qhasm: in5 = 857760878
650# asm 1: mov $857760878,>in5=int64#4
651# asm 2: mov $857760878,>in5=%rcx
652mov $857760878,%rcx
653
654# qhasm: in10 = 2036477234
655# asm 1: mov $2036477234,>in10=int64#5
656# asm 2: mov $2036477234,>in10=%r8
657mov $2036477234,%r8
658
659# qhasm: in15 = 1797285236
660# asm 1: mov $1797285236,>in15=int64#7
661# asm 2: mov $1797285236,>in15=%rax
662mov $1797285236,%rax
663
664# qhasm: ((uint32 *)&x0)[0] = in0
665# asm 1: movl <in0=int64#3d,>x0=stack128#4
666# asm 2: movl <in0=%edx,>x0=48(%rsp)
667movl %edx,48(%rsp)
668
669# qhasm: ((uint32 *)&x0)[1] = in5
670# asm 1: movl <in5=int64#4d,4+<x0=stack128#4
671# asm 2: movl <in5=%ecx,4+<x0=48(%rsp)
672movl %ecx,4+48(%rsp)
673
674# qhasm: ((uint32 *)&x0)[2] = in10
675# asm 1: movl <in10=int64#5d,8+<x0=stack128#4
676# asm 2: movl <in10=%r8d,8+<x0=48(%rsp)
677movl %r8d,8+48(%rsp)
678
679# qhasm: ((uint32 *)&x0)[3] = in15
680# asm 1: movl <in15=int64#7d,12+<x0=stack128#4
681# asm 2: movl <in15=%eax,12+<x0=48(%rsp)
682movl %eax,12+48(%rsp)
683
684# qhasm: unsigned<? bytes - 256
685# asm 1: cmp $256,<bytes=int64#6
686# asm 2: cmp $256,<bytes=%r9
687cmp $256,%r9
688# comment:fp stack unchanged by jump
689
690# qhasm: goto bytesbetween1and255 if unsigned<
691jb ._bytesbetween1and255
692
693# qhasm: z0 = x0
694# asm 1: movdqa <x0=stack128#4,>z0=int6464#1
695# asm 2: movdqa <x0=48(%rsp),>z0=%xmm0
696movdqa 48(%rsp),%xmm0
697
698# qhasm: z5 = z0[1,1,1,1]
699# asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
700# asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
701pshufd $0x55,%xmm0,%xmm1
702
703# qhasm: z10 = z0[2,2,2,2]
704# asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
705# asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
706pshufd $0xaa,%xmm0,%xmm2
707
708# qhasm: z15 = z0[3,3,3,3]
709# asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
710# asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
711pshufd $0xff,%xmm0,%xmm3
712
713# qhasm: z0 = z0[0,0,0,0]
714# asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
715# asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
716pshufd $0x00,%xmm0,%xmm0
717
718# qhasm: orig5 = z5
719# asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
720# asm 2: movdqa <z5=%xmm1,>orig5=64(%rsp)
721movdqa %xmm1,64(%rsp)
722
723# qhasm: orig10 = z10
724# asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
725# asm 2: movdqa <z10=%xmm2,>orig10=80(%rsp)
726movdqa %xmm2,80(%rsp)
727
728# qhasm: orig15 = z15
729# asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
730# asm 2: movdqa <z15=%xmm3,>orig15=96(%rsp)
731movdqa %xmm3,96(%rsp)
732
733# qhasm: orig0 = z0
734# asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
735# asm 2: movdqa <z0=%xmm0,>orig0=112(%rsp)
736movdqa %xmm0,112(%rsp)
737
738# qhasm: z1 = x1
739# asm 1: movdqa <x1=stack128#1,>z1=int6464#1
740# asm 2: movdqa <x1=0(%rsp),>z1=%xmm0
741movdqa 0(%rsp),%xmm0
742
743# qhasm: z6 = z1[2,2,2,2]
744# asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
745# asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
746pshufd $0xaa,%xmm0,%xmm1
747
748# qhasm: z11 = z1[3,3,3,3]
749# asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
750# asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
751pshufd $0xff,%xmm0,%xmm2
752
753# qhasm: z12 = z1[0,0,0,0]
754# asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
755# asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
756pshufd $0x00,%xmm0,%xmm3
757
758# qhasm: z1 = z1[1,1,1,1]
759# asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
760# asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
761pshufd $0x55,%xmm0,%xmm0
762
763# qhasm: orig6 = z6
764# asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
765# asm 2: movdqa <z6=%xmm1,>orig6=128(%rsp)
766movdqa %xmm1,128(%rsp)
767
768# qhasm: orig11 = z11
769# asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
770# asm 2: movdqa <z11=%xmm2,>orig11=144(%rsp)
771movdqa %xmm2,144(%rsp)
772
773# qhasm: orig12 = z12
774# asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
775# asm 2: movdqa <z12=%xmm3,>orig12=160(%rsp)
776movdqa %xmm3,160(%rsp)
777
778# qhasm: orig1 = z1
779# asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
780# asm 2: movdqa <z1=%xmm0,>orig1=176(%rsp)
781movdqa %xmm0,176(%rsp)
782
783# qhasm: z2 = x2
784# asm 1: movdqa <x2=stack128#2,>z2=int6464#1
785# asm 2: movdqa <x2=16(%rsp),>z2=%xmm0
786movdqa 16(%rsp),%xmm0
787
788# qhasm: z7 = z2[3,3,3,3]
789# asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
790# asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
791pshufd $0xff,%xmm0,%xmm1
792
793# qhasm: z13 = z2[1,1,1,1]
794# asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
795# asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
796pshufd $0x55,%xmm0,%xmm2
797
798# qhasm: z2 = z2[2,2,2,2]
799# asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
800# asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
801pshufd $0xaa,%xmm0,%xmm0
802
803# qhasm: orig7 = z7
804# asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
805# asm 2: movdqa <z7=%xmm1,>orig7=192(%rsp)
806movdqa %xmm1,192(%rsp)
807
808# qhasm: orig13 = z13
809# asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
810# asm 2: movdqa <z13=%xmm2,>orig13=208(%rsp)
811movdqa %xmm2,208(%rsp)
812
813# qhasm: orig2 = z2
814# asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
815# asm 2: movdqa <z2=%xmm0,>orig2=224(%rsp)
816movdqa %xmm0,224(%rsp)
817
818# qhasm: z3 = x3
819# asm 1: movdqa <x3=stack128#3,>z3=int6464#1
820# asm 2: movdqa <x3=32(%rsp),>z3=%xmm0
821movdqa 32(%rsp),%xmm0
822
823# qhasm: z4 = z3[0,0,0,0]
824# asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
825# asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
826pshufd $0x00,%xmm0,%xmm1
827
828# qhasm: z14 = z3[2,2,2,2]
829# asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
830# asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
831pshufd $0xaa,%xmm0,%xmm2
832
833# qhasm: z3 = z3[3,3,3,3]
834# asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
835# asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
836pshufd $0xff,%xmm0,%xmm0
837
838# qhasm: orig4 = z4
839# asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
840# asm 2: movdqa <z4=%xmm1,>orig4=240(%rsp)
841movdqa %xmm1,240(%rsp)
842
843# qhasm: orig14 = z14
844# asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
845# asm 2: movdqa <z14=%xmm2,>orig14=256(%rsp)
846movdqa %xmm2,256(%rsp)
847
848# qhasm: orig3 = z3
849# asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
850# asm 2: movdqa <z3=%xmm0,>orig3=272(%rsp)
851movdqa %xmm0,272(%rsp)
852
853# qhasm: bytesatleast256:
854._bytesatleast256:
855
856# qhasm: in8 = ((uint32 *)&x2)[0]
857# asm 1: movl <x2=stack128#2,>in8=int64#3d
858# asm 2: movl <x2=16(%rsp),>in8=%edx
859movl 16(%rsp),%edx
860
861# qhasm: in9 = ((uint32 *)&x3)[1]
862# asm 1: movl 4+<x3=stack128#3,>in9=int64#4d
863# asm 2: movl 4+<x3=32(%rsp),>in9=%ecx
864movl 4+32(%rsp),%ecx
865
866# qhasm: ((uint32 *) &orig8)[0] = in8
867# asm 1: movl <in8=int64#3d,>orig8=stack128#19
868# asm 2: movl <in8=%edx,>orig8=288(%rsp)
869movl %edx,288(%rsp)
870
871# qhasm: ((uint32 *) &orig9)[0] = in9
872# asm 1: movl <in9=int64#4d,>orig9=stack128#20
873# asm 2: movl <in9=%ecx,>orig9=304(%rsp)
874movl %ecx,304(%rsp)
875
876# qhasm: in8 += 1
877# asm 1: add $1,<in8=int64#3
878# asm 2: add $1,<in8=%rdx
879add $1,%rdx
880
881# qhasm: in9 <<= 32
882# asm 1: shl $32,<in9=int64#4
883# asm 2: shl $32,<in9=%rcx
884shl $32,%rcx
885
886# qhasm: in8 += in9
887# asm 1: add <in9=int64#4,<in8=int64#3
888# asm 2: add <in9=%rcx,<in8=%rdx
889add %rcx,%rdx
890
891# qhasm: in9 = in8
892# asm 1: mov <in8=int64#3,>in9=int64#4
893# asm 2: mov <in8=%rdx,>in9=%rcx
894mov %rdx,%rcx
895
896# qhasm: (uint64) in9 >>= 32
897# asm 1: shr $32,<in9=int64#4
898# asm 2: shr $32,<in9=%rcx
899shr $32,%rcx
900
901# qhasm: ((uint32 *) &orig8)[1] = in8
902# asm 1: movl <in8=int64#3d,4+<orig8=stack128#19
903# asm 2: movl <in8=%edx,4+<orig8=288(%rsp)
904movl %edx,4+288(%rsp)
905
906# qhasm: ((uint32 *) &orig9)[1] = in9
907# asm 1: movl <in9=int64#4d,4+<orig9=stack128#20
908# asm 2: movl <in9=%ecx,4+<orig9=304(%rsp)
909movl %ecx,4+304(%rsp)
910
911# qhasm: in8 += 1
912# asm 1: add $1,<in8=int64#3
913# asm 2: add $1,<in8=%rdx
914add $1,%rdx
915
916# qhasm: in9 <<= 32
917# asm 1: shl $32,<in9=int64#4
918# asm 2: shl $32,<in9=%rcx
919shl $32,%rcx
920
921# qhasm: in8 += in9
922# asm 1: add <in9=int64#4,<in8=int64#3
923# asm 2: add <in9=%rcx,<in8=%rdx
924add %rcx,%rdx
925
926# qhasm: in9 = in8
927# asm 1: mov <in8=int64#3,>in9=int64#4
928# asm 2: mov <in8=%rdx,>in9=%rcx
929mov %rdx,%rcx
930
931# qhasm: (uint64) in9 >>= 32
932# asm 1: shr $32,<in9=int64#4
933# asm 2: shr $32,<in9=%rcx
934shr $32,%rcx
935
936# qhasm: ((uint32 *) &orig8)[2] = in8
937# asm 1: movl <in8=int64#3d,8+<orig8=stack128#19
938# asm 2: movl <in8=%edx,8+<orig8=288(%rsp)
939movl %edx,8+288(%rsp)
940
941# qhasm: ((uint32 *) &orig9)[2] = in9
942# asm 1: movl <in9=int64#4d,8+<orig9=stack128#20
943# asm 2: movl <in9=%ecx,8+<orig9=304(%rsp)
944movl %ecx,8+304(%rsp)
945
946# qhasm: in8 += 1
947# asm 1: add $1,<in8=int64#3
948# asm 2: add $1,<in8=%rdx
949add $1,%rdx
950
951# qhasm: in9 <<= 32
952# asm 1: shl $32,<in9=int64#4
953# asm 2: shl $32,<in9=%rcx
954shl $32,%rcx
955
956# qhasm: in8 += in9
957# asm 1: add <in9=int64#4,<in8=int64#3
958# asm 2: add <in9=%rcx,<in8=%rdx
959add %rcx,%rdx
960
961# qhasm: in9 = in8
962# asm 1: mov <in8=int64#3,>in9=int64#4
963# asm 2: mov <in8=%rdx,>in9=%rcx
964mov %rdx,%rcx
965
966# qhasm: (uint64) in9 >>= 32
967# asm 1: shr $32,<in9=int64#4
968# asm 2: shr $32,<in9=%rcx
969shr $32,%rcx
970
971# qhasm: ((uint32 *) &orig8)[3] = in8
972# asm 1: movl <in8=int64#3d,12+<orig8=stack128#19
973# asm 2: movl <in8=%edx,12+<orig8=288(%rsp)
974movl %edx,12+288(%rsp)
975
976# qhasm: ((uint32 *) &orig9)[3] = in9
977# asm 1: movl <in9=int64#4d,12+<orig9=stack128#20
978# asm 2: movl <in9=%ecx,12+<orig9=304(%rsp)
979movl %ecx,12+304(%rsp)
980
981# qhasm: in8 += 1
982# asm 1: add $1,<in8=int64#3
983# asm 2: add $1,<in8=%rdx
984add $1,%rdx
985
986# qhasm: in9 <<= 32
987# asm 1: shl $32,<in9=int64#4
988# asm 2: shl $32,<in9=%rcx
989shl $32,%rcx
990
991# qhasm: in8 += in9
992# asm 1: add <in9=int64#4,<in8=int64#3
993# asm 2: add <in9=%rcx,<in8=%rdx
994add %rcx,%rdx
995
996# qhasm: in9 = in8
997# asm 1: mov <in8=int64#3,>in9=int64#4
998# asm 2: mov <in8=%rdx,>in9=%rcx
999mov %rdx,%rcx
1000
1001# qhasm: (uint64) in9 >>= 32
1002# asm 1: shr $32,<in9=int64#4
1003# asm 2: shr $32,<in9=%rcx
1004shr $32,%rcx
1005
1006# qhasm: ((uint32 *)&x2)[0] = in8
1007# asm 1: movl <in8=int64#3d,>x2=stack128#2
1008# asm 2: movl <in8=%edx,>x2=16(%rsp)
1009movl %edx,16(%rsp)
1010
1011# qhasm: ((uint32 *)&x3)[1] = in9
1012# asm 1: movl <in9=int64#4d,4+<x3=stack128#3
1013# asm 2: movl <in9=%ecx,4+<x3=32(%rsp)
1014movl %ecx,4+32(%rsp)
1015
1016# qhasm: bytes_backup = bytes
1017# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
1018# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
1019movq %r9,408(%rsp)
1020
1021# qhasm: i = 12
1022# asm 1: mov $12,>i=int64#3
1023# asm 2: mov $12,>i=%rdx
1024mov $12,%rdx
1025
1026# qhasm: z5 = orig5
1027# asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
1028# asm 2: movdqa <orig5=64(%rsp),>z5=%xmm0
1029movdqa 64(%rsp),%xmm0
1030
1031# qhasm: z10 = orig10
1032# asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
1033# asm 2: movdqa <orig10=80(%rsp),>z10=%xmm1
1034movdqa 80(%rsp),%xmm1
1035
1036# qhasm: z15 = orig15
1037# asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
1038# asm 2: movdqa <orig15=96(%rsp),>z15=%xmm2
1039movdqa 96(%rsp),%xmm2
1040
1041# qhasm: z14 = orig14
1042# asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
1043# asm 2: movdqa <orig14=256(%rsp),>z14=%xmm3
1044movdqa 256(%rsp),%xmm3
1045
1046# qhasm: z3 = orig3
1047# asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
1048# asm 2: movdqa <orig3=272(%rsp),>z3=%xmm4
1049movdqa 272(%rsp),%xmm4
1050
1051# qhasm: z6 = orig6
1052# asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
1053# asm 2: movdqa <orig6=128(%rsp),>z6=%xmm5
1054movdqa 128(%rsp),%xmm5
1055
1056# qhasm: z11 = orig11
1057# asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
1058# asm 2: movdqa <orig11=144(%rsp),>z11=%xmm6
1059movdqa 144(%rsp),%xmm6
1060
1061# qhasm: z1 = orig1
1062# asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
1063# asm 2: movdqa <orig1=176(%rsp),>z1=%xmm7
1064movdqa 176(%rsp),%xmm7
1065
1066# qhasm: z7 = orig7
1067# asm 1: movdqa <orig7=stack128#13,>z7=int6464#9
1068# asm 2: movdqa <orig7=192(%rsp),>z7=%xmm8
1069movdqa 192(%rsp),%xmm8
1070
1071# qhasm: z13 = orig13
1072# asm 1: movdqa <orig13=stack128#14,>z13=int6464#10
1073# asm 2: movdqa <orig13=208(%rsp),>z13=%xmm9
1074movdqa 208(%rsp),%xmm9
1075
1076# qhasm: z2 = orig2
1077# asm 1: movdqa <orig2=stack128#15,>z2=int6464#11
1078# asm 2: movdqa <orig2=224(%rsp),>z2=%xmm10
1079movdqa 224(%rsp),%xmm10
1080
1081# qhasm: z9 = orig9
1082# asm 1: movdqa <orig9=stack128#20,>z9=int6464#12
1083# asm 2: movdqa <orig9=304(%rsp),>z9=%xmm11
1084movdqa 304(%rsp),%xmm11
1085
1086# qhasm: z0 = orig0
1087# asm 1: movdqa <orig0=stack128#8,>z0=int6464#13
1088# asm 2: movdqa <orig0=112(%rsp),>z0=%xmm12
1089movdqa 112(%rsp),%xmm12
1090
1091# qhasm: z12 = orig12
1092# asm 1: movdqa <orig12=stack128#11,>z12=int6464#14
1093# asm 2: movdqa <orig12=160(%rsp),>z12=%xmm13
1094movdqa 160(%rsp),%xmm13
1095
1096# qhasm: z4 = orig4
1097# asm 1: movdqa <orig4=stack128#16,>z4=int6464#15
1098# asm 2: movdqa <orig4=240(%rsp),>z4=%xmm14
1099movdqa 240(%rsp),%xmm14
1100
1101# qhasm: z8 = orig8
1102# asm 1: movdqa <orig8=stack128#19,>z8=int6464#16
1103# asm 2: movdqa <orig8=288(%rsp),>z8=%xmm15
1104movdqa 288(%rsp),%xmm15
1105
1106# qhasm: mainloop1:
1107._mainloop1:
1108
1109# qhasm: z10_stack = z10
1110# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1111# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1112movdqa %xmm1,320(%rsp)
1113
1114# qhasm: z15_stack = z15
1115# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1116# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1117movdqa %xmm2,336(%rsp)
1118
1119# qhasm: y4 = z12
1120# asm 1: movdqa <z12=int6464#14,>y4=int6464#2
1121# asm 2: movdqa <z12=%xmm13,>y4=%xmm1
1122movdqa %xmm13,%xmm1
1123
1124# qhasm: uint32323232 y4 += z0
1125# asm 1: paddd <z0=int6464#13,<y4=int6464#2
1126# asm 2: paddd <z0=%xmm12,<y4=%xmm1
1127paddd %xmm12,%xmm1
1128
1129# qhasm: r4 = y4
1130# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1131# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1132movdqa %xmm1,%xmm2
1133
1134# qhasm: uint32323232 y4 <<= 7
1135# asm 1: pslld $7,<y4=int6464#2
1136# asm 2: pslld $7,<y4=%xmm1
1137pslld $7,%xmm1
1138
1139# qhasm: z4 ^= y4
1140# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1141# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1142pxor %xmm1,%xmm14
1143
1144# qhasm: uint32323232 r4 >>= 25
1145# asm 1: psrld $25,<r4=int6464#3
1146# asm 2: psrld $25,<r4=%xmm2
1147psrld $25,%xmm2
1148
1149# qhasm: z4 ^= r4
1150# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1151# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1152pxor %xmm2,%xmm14
1153
1154# qhasm: y9 = z1
1155# asm 1: movdqa <z1=int6464#8,>y9=int6464#2
1156# asm 2: movdqa <z1=%xmm7,>y9=%xmm1
1157movdqa %xmm7,%xmm1
1158
1159# qhasm: uint32323232 y9 += z5
1160# asm 1: paddd <z5=int6464#1,<y9=int6464#2
1161# asm 2: paddd <z5=%xmm0,<y9=%xmm1
1162paddd %xmm0,%xmm1
1163
1164# qhasm: r9 = y9
1165# asm 1: movdqa <y9=int6464#2,>r9=int6464#3
1166# asm 2: movdqa <y9=%xmm1,>r9=%xmm2
1167movdqa %xmm1,%xmm2
1168
1169# qhasm: uint32323232 y9 <<= 7
1170# asm 1: pslld $7,<y9=int6464#2
1171# asm 2: pslld $7,<y9=%xmm1
1172pslld $7,%xmm1
1173
1174# qhasm: z9 ^= y9
1175# asm 1: pxor <y9=int6464#2,<z9=int6464#12
1176# asm 2: pxor <y9=%xmm1,<z9=%xmm11
1177pxor %xmm1,%xmm11
1178
1179# qhasm: uint32323232 r9 >>= 25
1180# asm 1: psrld $25,<r9=int6464#3
1181# asm 2: psrld $25,<r9=%xmm2
1182psrld $25,%xmm2
1183
1184# qhasm: z9 ^= r9
1185# asm 1: pxor <r9=int6464#3,<z9=int6464#12
1186# asm 2: pxor <r9=%xmm2,<z9=%xmm11
1187pxor %xmm2,%xmm11
1188
1189# qhasm: y8 = z0
1190# asm 1: movdqa <z0=int6464#13,>y8=int6464#2
1191# asm 2: movdqa <z0=%xmm12,>y8=%xmm1
1192movdqa %xmm12,%xmm1
1193
1194# qhasm: uint32323232 y8 += z4
1195# asm 1: paddd <z4=int6464#15,<y8=int6464#2
1196# asm 2: paddd <z4=%xmm14,<y8=%xmm1
1197paddd %xmm14,%xmm1
1198
1199# qhasm: r8 = y8
1200# asm 1: movdqa <y8=int6464#2,>r8=int6464#3
1201# asm 2: movdqa <y8=%xmm1,>r8=%xmm2
1202movdqa %xmm1,%xmm2
1203
1204# qhasm: uint32323232 y8 <<= 9
1205# asm 1: pslld $9,<y8=int6464#2
1206# asm 2: pslld $9,<y8=%xmm1
1207pslld $9,%xmm1
1208
1209# qhasm: z8 ^= y8
1210# asm 1: pxor <y8=int6464#2,<z8=int6464#16
1211# asm 2: pxor <y8=%xmm1,<z8=%xmm15
1212pxor %xmm1,%xmm15
1213
1214# qhasm: uint32323232 r8 >>= 23
1215# asm 1: psrld $23,<r8=int6464#3
1216# asm 2: psrld $23,<r8=%xmm2
1217psrld $23,%xmm2
1218
1219# qhasm: z8 ^= r8
1220# asm 1: pxor <r8=int6464#3,<z8=int6464#16
1221# asm 2: pxor <r8=%xmm2,<z8=%xmm15
1222pxor %xmm2,%xmm15
1223
1224# qhasm: y13 = z5
1225# asm 1: movdqa <z5=int6464#1,>y13=int6464#2
1226# asm 2: movdqa <z5=%xmm0,>y13=%xmm1
1227movdqa %xmm0,%xmm1
1228
1229# qhasm: uint32323232 y13 += z9
1230# asm 1: paddd <z9=int6464#12,<y13=int6464#2
1231# asm 2: paddd <z9=%xmm11,<y13=%xmm1
1232paddd %xmm11,%xmm1
1233
1234# qhasm: r13 = y13
1235# asm 1: movdqa <y13=int6464#2,>r13=int6464#3
1236# asm 2: movdqa <y13=%xmm1,>r13=%xmm2
1237movdqa %xmm1,%xmm2
1238
1239# qhasm: uint32323232 y13 <<= 9
1240# asm 1: pslld $9,<y13=int6464#2
1241# asm 2: pslld $9,<y13=%xmm1
1242pslld $9,%xmm1
1243
1244# qhasm: z13 ^= y13
1245# asm 1: pxor <y13=int6464#2,<z13=int6464#10
1246# asm 2: pxor <y13=%xmm1,<z13=%xmm9
1247pxor %xmm1,%xmm9
1248
1249# qhasm: uint32323232 r13 >>= 23
1250# asm 1: psrld $23,<r13=int6464#3
1251# asm 2: psrld $23,<r13=%xmm2
1252psrld $23,%xmm2
1253
1254# qhasm: z13 ^= r13
1255# asm 1: pxor <r13=int6464#3,<z13=int6464#10
1256# asm 2: pxor <r13=%xmm2,<z13=%xmm9
1257pxor %xmm2,%xmm9
1258
1259# qhasm: y12 = z4
1260# asm 1: movdqa <z4=int6464#15,>y12=int6464#2
1261# asm 2: movdqa <z4=%xmm14,>y12=%xmm1
1262movdqa %xmm14,%xmm1
1263
1264# qhasm: uint32323232 y12 += z8
1265# asm 1: paddd <z8=int6464#16,<y12=int6464#2
1266# asm 2: paddd <z8=%xmm15,<y12=%xmm1
1267paddd %xmm15,%xmm1
1268
1269# qhasm: r12 = y12
1270# asm 1: movdqa <y12=int6464#2,>r12=int6464#3
1271# asm 2: movdqa <y12=%xmm1,>r12=%xmm2
1272movdqa %xmm1,%xmm2
1273
1274# qhasm: uint32323232 y12 <<= 13
1275# asm 1: pslld $13,<y12=int6464#2
1276# asm 2: pslld $13,<y12=%xmm1
1277pslld $13,%xmm1
1278
1279# qhasm: z12 ^= y12
1280# asm 1: pxor <y12=int6464#2,<z12=int6464#14
1281# asm 2: pxor <y12=%xmm1,<z12=%xmm13
1282pxor %xmm1,%xmm13
1283
1284# qhasm: uint32323232 r12 >>= 19
1285# asm 1: psrld $19,<r12=int6464#3
1286# asm 2: psrld $19,<r12=%xmm2
1287psrld $19,%xmm2
1288
1289# qhasm: z12 ^= r12
1290# asm 1: pxor <r12=int6464#3,<z12=int6464#14
1291# asm 2: pxor <r12=%xmm2,<z12=%xmm13
1292pxor %xmm2,%xmm13
1293
1294# qhasm: y1 = z9
1295# asm 1: movdqa <z9=int6464#12,>y1=int6464#2
1296# asm 2: movdqa <z9=%xmm11,>y1=%xmm1
1297movdqa %xmm11,%xmm1
1298
1299# qhasm: uint32323232 y1 += z13
1300# asm 1: paddd <z13=int6464#10,<y1=int6464#2
1301# asm 2: paddd <z13=%xmm9,<y1=%xmm1
1302paddd %xmm9,%xmm1
1303
1304# qhasm: r1 = y1
1305# asm 1: movdqa <y1=int6464#2,>r1=int6464#3
1306# asm 2: movdqa <y1=%xmm1,>r1=%xmm2
1307movdqa %xmm1,%xmm2
1308
1309# qhasm: uint32323232 y1 <<= 13
1310# asm 1: pslld $13,<y1=int6464#2
1311# asm 2: pslld $13,<y1=%xmm1
1312pslld $13,%xmm1
1313
1314# qhasm: z1 ^= y1
1315# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1316# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1317pxor %xmm1,%xmm7
1318
1319# qhasm: uint32323232 r1 >>= 19
1320# asm 1: psrld $19,<r1=int6464#3
1321# asm 2: psrld $19,<r1=%xmm2
1322psrld $19,%xmm2
1323
1324# qhasm: z1 ^= r1
1325# asm 1: pxor <r1=int6464#3,<z1=int6464#8
1326# asm 2: pxor <r1=%xmm2,<z1=%xmm7
1327pxor %xmm2,%xmm7
1328
1329# qhasm: y0 = z8
1330# asm 1: movdqa <z8=int6464#16,>y0=int6464#2
1331# asm 2: movdqa <z8=%xmm15,>y0=%xmm1
1332movdqa %xmm15,%xmm1
1333
1334# qhasm: uint32323232 y0 += z12
1335# asm 1: paddd <z12=int6464#14,<y0=int6464#2
1336# asm 2: paddd <z12=%xmm13,<y0=%xmm1
1337paddd %xmm13,%xmm1
1338
1339# qhasm: r0 = y0
1340# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1341# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1342movdqa %xmm1,%xmm2
1343
1344# qhasm: uint32323232 y0 <<= 18
1345# asm 1: pslld $18,<y0=int6464#2
1346# asm 2: pslld $18,<y0=%xmm1
1347pslld $18,%xmm1
1348
1349# qhasm: z0 ^= y0
1350# asm 1: pxor <y0=int6464#2,<z0=int6464#13
1351# asm 2: pxor <y0=%xmm1,<z0=%xmm12
1352pxor %xmm1,%xmm12
1353
1354# qhasm: uint32323232 r0 >>= 14
1355# asm 1: psrld $14,<r0=int6464#3
1356# asm 2: psrld $14,<r0=%xmm2
1357psrld $14,%xmm2
1358
1359# qhasm: z0 ^= r0
1360# asm 1: pxor <r0=int6464#3,<z0=int6464#13
1361# asm 2: pxor <r0=%xmm2,<z0=%xmm12
1362pxor %xmm2,%xmm12
1363
1364# qhasm: z10 = z10_stack
1365# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1366# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1367movdqa 320(%rsp),%xmm1
1368
1369# qhasm: z0_stack = z0
1370# asm 1: movdqa <z0=int6464#13,>z0_stack=stack128#21
1371# asm 2: movdqa <z0=%xmm12,>z0_stack=320(%rsp)
1372movdqa %xmm12,320(%rsp)
1373
1374# qhasm: y5 = z13
1375# asm 1: movdqa <z13=int6464#10,>y5=int6464#3
1376# asm 2: movdqa <z13=%xmm9,>y5=%xmm2
1377movdqa %xmm9,%xmm2
1378
1379# qhasm: uint32323232 y5 += z1
1380# asm 1: paddd <z1=int6464#8,<y5=int6464#3
1381# asm 2: paddd <z1=%xmm7,<y5=%xmm2
1382paddd %xmm7,%xmm2
1383
1384# qhasm: r5 = y5
1385# asm 1: movdqa <y5=int6464#3,>r5=int6464#13
1386# asm 2: movdqa <y5=%xmm2,>r5=%xmm12
1387movdqa %xmm2,%xmm12
1388
1389# qhasm: uint32323232 y5 <<= 18
1390# asm 1: pslld $18,<y5=int6464#3
1391# asm 2: pslld $18,<y5=%xmm2
1392pslld $18,%xmm2
1393
1394# qhasm: z5 ^= y5
1395# asm 1: pxor <y5=int6464#3,<z5=int6464#1
1396# asm 2: pxor <y5=%xmm2,<z5=%xmm0
1397pxor %xmm2,%xmm0
1398
1399# qhasm: uint32323232 r5 >>= 14
1400# asm 1: psrld $14,<r5=int6464#13
1401# asm 2: psrld $14,<r5=%xmm12
1402psrld $14,%xmm12
1403
1404# qhasm: z5 ^= r5
1405# asm 1: pxor <r5=int6464#13,<z5=int6464#1
1406# asm 2: pxor <r5=%xmm12,<z5=%xmm0
1407pxor %xmm12,%xmm0
1408
1409# qhasm: y14 = z6
1410# asm 1: movdqa <z6=int6464#6,>y14=int6464#3
1411# asm 2: movdqa <z6=%xmm5,>y14=%xmm2
1412movdqa %xmm5,%xmm2
1413
1414# qhasm: uint32323232 y14 += z10
1415# asm 1: paddd <z10=int6464#2,<y14=int6464#3
1416# asm 2: paddd <z10=%xmm1,<y14=%xmm2
1417paddd %xmm1,%xmm2
1418
1419# qhasm: r14 = y14
1420# asm 1: movdqa <y14=int6464#3,>r14=int6464#13
1421# asm 2: movdqa <y14=%xmm2,>r14=%xmm12
1422movdqa %xmm2,%xmm12
1423
1424# qhasm: uint32323232 y14 <<= 7
1425# asm 1: pslld $7,<y14=int6464#3
1426# asm 2: pslld $7,<y14=%xmm2
1427pslld $7,%xmm2
1428
1429# qhasm: z14 ^= y14
1430# asm 1: pxor <y14=int6464#3,<z14=int6464#4
1431# asm 2: pxor <y14=%xmm2,<z14=%xmm3
1432pxor %xmm2,%xmm3
1433
1434# qhasm: uint32323232 r14 >>= 25
1435# asm 1: psrld $25,<r14=int6464#13
1436# asm 2: psrld $25,<r14=%xmm12
1437psrld $25,%xmm12
1438
1439# qhasm: z14 ^= r14
1440# asm 1: pxor <r14=int6464#13,<z14=int6464#4
1441# asm 2: pxor <r14=%xmm12,<z14=%xmm3
1442pxor %xmm12,%xmm3
1443
1444# qhasm: z15 = z15_stack
1445# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
1446# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
1447movdqa 336(%rsp),%xmm2
1448
1449# qhasm: z5_stack = z5
1450# asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#22
1451# asm 2: movdqa <z5=%xmm0,>z5_stack=336(%rsp)
1452movdqa %xmm0,336(%rsp)
1453
1454# qhasm: y3 = z11
1455# asm 1: movdqa <z11=int6464#7,>y3=int6464#1
1456# asm 2: movdqa <z11=%xmm6,>y3=%xmm0
1457movdqa %xmm6,%xmm0
1458
1459# qhasm: uint32323232 y3 += z15
1460# asm 1: paddd <z15=int6464#3,<y3=int6464#1
1461# asm 2: paddd <z15=%xmm2,<y3=%xmm0
1462paddd %xmm2,%xmm0
1463
1464# qhasm: r3 = y3
1465# asm 1: movdqa <y3=int6464#1,>r3=int6464#13
1466# asm 2: movdqa <y3=%xmm0,>r3=%xmm12
1467movdqa %xmm0,%xmm12
1468
1469# qhasm: uint32323232 y3 <<= 7
1470# asm 1: pslld $7,<y3=int6464#1
1471# asm 2: pslld $7,<y3=%xmm0
1472pslld $7,%xmm0
1473
1474# qhasm: z3 ^= y3
1475# asm 1: pxor <y3=int6464#1,<z3=int6464#5
1476# asm 2: pxor <y3=%xmm0,<z3=%xmm4
1477pxor %xmm0,%xmm4
1478
1479# qhasm: uint32323232 r3 >>= 25
1480# asm 1: psrld $25,<r3=int6464#13
1481# asm 2: psrld $25,<r3=%xmm12
1482psrld $25,%xmm12
1483
1484# qhasm: z3 ^= r3
1485# asm 1: pxor <r3=int6464#13,<z3=int6464#5
1486# asm 2: pxor <r3=%xmm12,<z3=%xmm4
1487pxor %xmm12,%xmm4
1488
1489# qhasm: y2 = z10
1490# asm 1: movdqa <z10=int6464#2,>y2=int6464#1
1491# asm 2: movdqa <z10=%xmm1,>y2=%xmm0
1492movdqa %xmm1,%xmm0
1493
1494# qhasm: uint32323232 y2 += z14
1495# asm 1: paddd <z14=int6464#4,<y2=int6464#1
1496# asm 2: paddd <z14=%xmm3,<y2=%xmm0
1497paddd %xmm3,%xmm0
1498
1499# qhasm: r2 = y2
1500# asm 1: movdqa <y2=int6464#1,>r2=int6464#13
1501# asm 2: movdqa <y2=%xmm0,>r2=%xmm12
1502movdqa %xmm0,%xmm12
1503
1504# qhasm: uint32323232 y2 <<= 9
1505# asm 1: pslld $9,<y2=int6464#1
1506# asm 2: pslld $9,<y2=%xmm0
1507pslld $9,%xmm0
1508
1509# qhasm: z2 ^= y2
1510# asm 1: pxor <y2=int6464#1,<z2=int6464#11
1511# asm 2: pxor <y2=%xmm0,<z2=%xmm10
1512pxor %xmm0,%xmm10
1513
1514# qhasm: uint32323232 r2 >>= 23
1515# asm 1: psrld $23,<r2=int6464#13
1516# asm 2: psrld $23,<r2=%xmm12
1517psrld $23,%xmm12
1518
1519# qhasm: z2 ^= r2
1520# asm 1: pxor <r2=int6464#13,<z2=int6464#11
1521# asm 2: pxor <r2=%xmm12,<z2=%xmm10
1522pxor %xmm12,%xmm10
1523
1524# qhasm: y7 = z15
1525# asm 1: movdqa <z15=int6464#3,>y7=int6464#1
1526# asm 2: movdqa <z15=%xmm2,>y7=%xmm0
1527movdqa %xmm2,%xmm0
1528
1529# qhasm: uint32323232 y7 += z3
1530# asm 1: paddd <z3=int6464#5,<y7=int6464#1
1531# asm 2: paddd <z3=%xmm4,<y7=%xmm0
1532paddd %xmm4,%xmm0
1533
1534# qhasm: r7 = y7
1535# asm 1: movdqa <y7=int6464#1,>r7=int6464#13
1536# asm 2: movdqa <y7=%xmm0,>r7=%xmm12
1537movdqa %xmm0,%xmm12
1538
1539# qhasm: uint32323232 y7 <<= 9
1540# asm 1: pslld $9,<y7=int6464#1
1541# asm 2: pslld $9,<y7=%xmm0
1542pslld $9,%xmm0
1543
1544# qhasm: z7 ^= y7
1545# asm 1: pxor <y7=int6464#1,<z7=int6464#9
1546# asm 2: pxor <y7=%xmm0,<z7=%xmm8
1547pxor %xmm0,%xmm8
1548
1549# qhasm: uint32323232 r7 >>= 23
1550# asm 1: psrld $23,<r7=int6464#13
1551# asm 2: psrld $23,<r7=%xmm12
1552psrld $23,%xmm12
1553
1554# qhasm: z7 ^= r7
1555# asm 1: pxor <r7=int6464#13,<z7=int6464#9
1556# asm 2: pxor <r7=%xmm12,<z7=%xmm8
1557pxor %xmm12,%xmm8
1558
1559# qhasm: y6 = z14
1560# asm 1: movdqa <z14=int6464#4,>y6=int6464#1
1561# asm 2: movdqa <z14=%xmm3,>y6=%xmm0
1562movdqa %xmm3,%xmm0
1563
1564# qhasm: uint32323232 y6 += z2
1565# asm 1: paddd <z2=int6464#11,<y6=int6464#1
1566# asm 2: paddd <z2=%xmm10,<y6=%xmm0
1567paddd %xmm10,%xmm0
1568
1569# qhasm: r6 = y6
1570# asm 1: movdqa <y6=int6464#1,>r6=int6464#13
1571# asm 2: movdqa <y6=%xmm0,>r6=%xmm12
1572movdqa %xmm0,%xmm12
1573
1574# qhasm: uint32323232 y6 <<= 13
1575# asm 1: pslld $13,<y6=int6464#1
1576# asm 2: pslld $13,<y6=%xmm0
1577pslld $13,%xmm0
1578
1579# qhasm: z6 ^= y6
1580# asm 1: pxor <y6=int6464#1,<z6=int6464#6
1581# asm 2: pxor <y6=%xmm0,<z6=%xmm5
1582pxor %xmm0,%xmm5
1583
1584# qhasm: uint32323232 r6 >>= 19
1585# asm 1: psrld $19,<r6=int6464#13
1586# asm 2: psrld $19,<r6=%xmm12
1587psrld $19,%xmm12
1588
1589# qhasm: z6 ^= r6
1590# asm 1: pxor <r6=int6464#13,<z6=int6464#6
1591# asm 2: pxor <r6=%xmm12,<z6=%xmm5
1592pxor %xmm12,%xmm5
1593
1594# qhasm: y11 = z3
1595# asm 1: movdqa <z3=int6464#5,>y11=int6464#1
1596# asm 2: movdqa <z3=%xmm4,>y11=%xmm0
1597movdqa %xmm4,%xmm0
1598
1599# qhasm: uint32323232 y11 += z7
1600# asm 1: paddd <z7=int6464#9,<y11=int6464#1
1601# asm 2: paddd <z7=%xmm8,<y11=%xmm0
1602paddd %xmm8,%xmm0
1603
1604# qhasm: r11 = y11
1605# asm 1: movdqa <y11=int6464#1,>r11=int6464#13
1606# asm 2: movdqa <y11=%xmm0,>r11=%xmm12
1607movdqa %xmm0,%xmm12
1608
1609# qhasm: uint32323232 y11 <<= 13
1610# asm 1: pslld $13,<y11=int6464#1
1611# asm 2: pslld $13,<y11=%xmm0
1612pslld $13,%xmm0
1613
1614# qhasm: z11 ^= y11
1615# asm 1: pxor <y11=int6464#1,<z11=int6464#7
1616# asm 2: pxor <y11=%xmm0,<z11=%xmm6
1617pxor %xmm0,%xmm6
1618
1619# qhasm: uint32323232 r11 >>= 19
1620# asm 1: psrld $19,<r11=int6464#13
1621# asm 2: psrld $19,<r11=%xmm12
1622psrld $19,%xmm12
1623
1624# qhasm: z11 ^= r11
1625# asm 1: pxor <r11=int6464#13,<z11=int6464#7
1626# asm 2: pxor <r11=%xmm12,<z11=%xmm6
1627pxor %xmm12,%xmm6
1628
1629# qhasm: y10 = z2
1630# asm 1: movdqa <z2=int6464#11,>y10=int6464#1
1631# asm 2: movdqa <z2=%xmm10,>y10=%xmm0
1632movdqa %xmm10,%xmm0
1633
1634# qhasm: uint32323232 y10 += z6
1635# asm 1: paddd <z6=int6464#6,<y10=int6464#1
1636# asm 2: paddd <z6=%xmm5,<y10=%xmm0
1637paddd %xmm5,%xmm0
1638
1639# qhasm: r10 = y10
1640# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
1641# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
1642movdqa %xmm0,%xmm12
1643
1644# qhasm: uint32323232 y10 <<= 18
1645# asm 1: pslld $18,<y10=int6464#1
1646# asm 2: pslld $18,<y10=%xmm0
1647pslld $18,%xmm0
1648
1649# qhasm: z10 ^= y10
1650# asm 1: pxor <y10=int6464#1,<z10=int6464#2
1651# asm 2: pxor <y10=%xmm0,<z10=%xmm1
1652pxor %xmm0,%xmm1
1653
1654# qhasm: uint32323232 r10 >>= 14
1655# asm 1: psrld $14,<r10=int6464#13
1656# asm 2: psrld $14,<r10=%xmm12
1657psrld $14,%xmm12
1658
1659# qhasm: z10 ^= r10
1660# asm 1: pxor <r10=int6464#13,<z10=int6464#2
1661# asm 2: pxor <r10=%xmm12,<z10=%xmm1
1662pxor %xmm12,%xmm1
1663
1664# qhasm: z0 = z0_stack
1665# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#1
1666# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm0
1667movdqa 320(%rsp),%xmm0
1668
1669# qhasm: z10_stack = z10
1670# asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#21
1671# asm 2: movdqa <z10=%xmm1,>z10_stack=320(%rsp)
1672movdqa %xmm1,320(%rsp)
1673
1674# qhasm: y1 = z3
1675# asm 1: movdqa <z3=int6464#5,>y1=int6464#2
1676# asm 2: movdqa <z3=%xmm4,>y1=%xmm1
1677movdqa %xmm4,%xmm1
1678
1679# qhasm: uint32323232 y1 += z0
1680# asm 1: paddd <z0=int6464#1,<y1=int6464#2
1681# asm 2: paddd <z0=%xmm0,<y1=%xmm1
1682paddd %xmm0,%xmm1
1683
1684# qhasm: r1 = y1
1685# asm 1: movdqa <y1=int6464#2,>r1=int6464#13
1686# asm 2: movdqa <y1=%xmm1,>r1=%xmm12
1687movdqa %xmm1,%xmm12
1688
1689# qhasm: uint32323232 y1 <<= 7
1690# asm 1: pslld $7,<y1=int6464#2
1691# asm 2: pslld $7,<y1=%xmm1
1692pslld $7,%xmm1
1693
1694# qhasm: z1 ^= y1
1695# asm 1: pxor <y1=int6464#2,<z1=int6464#8
1696# asm 2: pxor <y1=%xmm1,<z1=%xmm7
1697pxor %xmm1,%xmm7
1698
1699# qhasm: uint32323232 r1 >>= 25
1700# asm 1: psrld $25,<r1=int6464#13
1701# asm 2: psrld $25,<r1=%xmm12
1702psrld $25,%xmm12
1703
1704# qhasm: z1 ^= r1
1705# asm 1: pxor <r1=int6464#13,<z1=int6464#8
1706# asm 2: pxor <r1=%xmm12,<z1=%xmm7
1707pxor %xmm12,%xmm7
1708
1709# qhasm: y15 = z7
1710# asm 1: movdqa <z7=int6464#9,>y15=int6464#2
1711# asm 2: movdqa <z7=%xmm8,>y15=%xmm1
1712movdqa %xmm8,%xmm1
1713
1714# qhasm: uint32323232 y15 += z11
1715# asm 1: paddd <z11=int6464#7,<y15=int6464#2
1716# asm 2: paddd <z11=%xmm6,<y15=%xmm1
1717paddd %xmm6,%xmm1
1718
1719# qhasm: r15 = y15
1720# asm 1: movdqa <y15=int6464#2,>r15=int6464#13
1721# asm 2: movdqa <y15=%xmm1,>r15=%xmm12
1722movdqa %xmm1,%xmm12
1723
1724# qhasm: uint32323232 y15 <<= 18
1725# asm 1: pslld $18,<y15=int6464#2
1726# asm 2: pslld $18,<y15=%xmm1
1727pslld $18,%xmm1
1728
1729# qhasm: z15 ^= y15
1730# asm 1: pxor <y15=int6464#2,<z15=int6464#3
1731# asm 2: pxor <y15=%xmm1,<z15=%xmm2
1732pxor %xmm1,%xmm2
1733
1734# qhasm: uint32323232 r15 >>= 14
1735# asm 1: psrld $14,<r15=int6464#13
1736# asm 2: psrld $14,<r15=%xmm12
1737psrld $14,%xmm12
1738
1739# qhasm: z15 ^= r15
1740# asm 1: pxor <r15=int6464#13,<z15=int6464#3
1741# asm 2: pxor <r15=%xmm12,<z15=%xmm2
1742pxor %xmm12,%xmm2
1743
1744# qhasm: z5 = z5_stack
1745# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#13
1746# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm12
1747movdqa 336(%rsp),%xmm12
1748
1749# qhasm: z15_stack = z15
1750# asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#22
1751# asm 2: movdqa <z15=%xmm2,>z15_stack=336(%rsp)
1752movdqa %xmm2,336(%rsp)
1753
1754# qhasm: y6 = z4
1755# asm 1: movdqa <z4=int6464#15,>y6=int6464#2
1756# asm 2: movdqa <z4=%xmm14,>y6=%xmm1
1757movdqa %xmm14,%xmm1
1758
1759# qhasm: uint32323232 y6 += z5
1760# asm 1: paddd <z5=int6464#13,<y6=int6464#2
1761# asm 2: paddd <z5=%xmm12,<y6=%xmm1
1762paddd %xmm12,%xmm1
1763
1764# qhasm: r6 = y6
1765# asm 1: movdqa <y6=int6464#2,>r6=int6464#3
1766# asm 2: movdqa <y6=%xmm1,>r6=%xmm2
1767movdqa %xmm1,%xmm2
1768
1769# qhasm: uint32323232 y6 <<= 7
1770# asm 1: pslld $7,<y6=int6464#2
1771# asm 2: pslld $7,<y6=%xmm1
1772pslld $7,%xmm1
1773
1774# qhasm: z6 ^= y6
1775# asm 1: pxor <y6=int6464#2,<z6=int6464#6
1776# asm 2: pxor <y6=%xmm1,<z6=%xmm5
1777pxor %xmm1,%xmm5
1778
1779# qhasm: uint32323232 r6 >>= 25
1780# asm 1: psrld $25,<r6=int6464#3
1781# asm 2: psrld $25,<r6=%xmm2
1782psrld $25,%xmm2
1783
1784# qhasm: z6 ^= r6
1785# asm 1: pxor <r6=int6464#3,<z6=int6464#6
1786# asm 2: pxor <r6=%xmm2,<z6=%xmm5
1787pxor %xmm2,%xmm5
1788
1789# qhasm: y2 = z0
1790# asm 1: movdqa <z0=int6464#1,>y2=int6464#2
1791# asm 2: movdqa <z0=%xmm0,>y2=%xmm1
1792movdqa %xmm0,%xmm1
1793
1794# qhasm: uint32323232 y2 += z1
1795# asm 1: paddd <z1=int6464#8,<y2=int6464#2
1796# asm 2: paddd <z1=%xmm7,<y2=%xmm1
1797paddd %xmm7,%xmm1
1798
1799# qhasm: r2 = y2
1800# asm 1: movdqa <y2=int6464#2,>r2=int6464#3
1801# asm 2: movdqa <y2=%xmm1,>r2=%xmm2
1802movdqa %xmm1,%xmm2
1803
1804# qhasm: uint32323232 y2 <<= 9
1805# asm 1: pslld $9,<y2=int6464#2
1806# asm 2: pslld $9,<y2=%xmm1
1807pslld $9,%xmm1
1808
1809# qhasm: z2 ^= y2
1810# asm 1: pxor <y2=int6464#2,<z2=int6464#11
1811# asm 2: pxor <y2=%xmm1,<z2=%xmm10
1812pxor %xmm1,%xmm10
1813
1814# qhasm: uint32323232 r2 >>= 23
1815# asm 1: psrld $23,<r2=int6464#3
1816# asm 2: psrld $23,<r2=%xmm2
1817psrld $23,%xmm2
1818
1819# qhasm: z2 ^= r2
1820# asm 1: pxor <r2=int6464#3,<z2=int6464#11
1821# asm 2: pxor <r2=%xmm2,<z2=%xmm10
1822pxor %xmm2,%xmm10
1823
1824# qhasm: y7 = z5
1825# asm 1: movdqa <z5=int6464#13,>y7=int6464#2
1826# asm 2: movdqa <z5=%xmm12,>y7=%xmm1
1827movdqa %xmm12,%xmm1
1828
1829# qhasm: uint32323232 y7 += z6
1830# asm 1: paddd <z6=int6464#6,<y7=int6464#2
1831# asm 2: paddd <z6=%xmm5,<y7=%xmm1
1832paddd %xmm5,%xmm1
1833
1834# qhasm: r7 = y7
1835# asm 1: movdqa <y7=int6464#2,>r7=int6464#3
1836# asm 2: movdqa <y7=%xmm1,>r7=%xmm2
1837movdqa %xmm1,%xmm2
1838
1839# qhasm: uint32323232 y7 <<= 9
1840# asm 1: pslld $9,<y7=int6464#2
1841# asm 2: pslld $9,<y7=%xmm1
1842pslld $9,%xmm1
1843
1844# qhasm: z7 ^= y7
1845# asm 1: pxor <y7=int6464#2,<z7=int6464#9
1846# asm 2: pxor <y7=%xmm1,<z7=%xmm8
1847pxor %xmm1,%xmm8
1848
1849# qhasm: uint32323232 r7 >>= 23
1850# asm 1: psrld $23,<r7=int6464#3
1851# asm 2: psrld $23,<r7=%xmm2
1852psrld $23,%xmm2
1853
1854# qhasm: z7 ^= r7
1855# asm 1: pxor <r7=int6464#3,<z7=int6464#9
1856# asm 2: pxor <r7=%xmm2,<z7=%xmm8
1857pxor %xmm2,%xmm8
1858
1859# qhasm: y3 = z1
1860# asm 1: movdqa <z1=int6464#8,>y3=int6464#2
1861# asm 2: movdqa <z1=%xmm7,>y3=%xmm1
1862movdqa %xmm7,%xmm1
1863
1864# qhasm: uint32323232 y3 += z2
1865# asm 1: paddd <z2=int6464#11,<y3=int6464#2
1866# asm 2: paddd <z2=%xmm10,<y3=%xmm1
1867paddd %xmm10,%xmm1
1868
1869# qhasm: r3 = y3
1870# asm 1: movdqa <y3=int6464#2,>r3=int6464#3
1871# asm 2: movdqa <y3=%xmm1,>r3=%xmm2
1872movdqa %xmm1,%xmm2
1873
1874# qhasm: uint32323232 y3 <<= 13
1875# asm 1: pslld $13,<y3=int6464#2
1876# asm 2: pslld $13,<y3=%xmm1
1877pslld $13,%xmm1
1878
1879# qhasm: z3 ^= y3
1880# asm 1: pxor <y3=int6464#2,<z3=int6464#5
1881# asm 2: pxor <y3=%xmm1,<z3=%xmm4
1882pxor %xmm1,%xmm4
1883
1884# qhasm: uint32323232 r3 >>= 19
1885# asm 1: psrld $19,<r3=int6464#3
1886# asm 2: psrld $19,<r3=%xmm2
1887psrld $19,%xmm2
1888
1889# qhasm: z3 ^= r3
1890# asm 1: pxor <r3=int6464#3,<z3=int6464#5
1891# asm 2: pxor <r3=%xmm2,<z3=%xmm4
1892pxor %xmm2,%xmm4
1893
1894# qhasm: y4 = z6
1895# asm 1: movdqa <z6=int6464#6,>y4=int6464#2
1896# asm 2: movdqa <z6=%xmm5,>y4=%xmm1
1897movdqa %xmm5,%xmm1
1898
1899# qhasm: uint32323232 y4 += z7
1900# asm 1: paddd <z7=int6464#9,<y4=int6464#2
1901# asm 2: paddd <z7=%xmm8,<y4=%xmm1
1902paddd %xmm8,%xmm1
1903
1904# qhasm: r4 = y4
1905# asm 1: movdqa <y4=int6464#2,>r4=int6464#3
1906# asm 2: movdqa <y4=%xmm1,>r4=%xmm2
1907movdqa %xmm1,%xmm2
1908
1909# qhasm: uint32323232 y4 <<= 13
1910# asm 1: pslld $13,<y4=int6464#2
1911# asm 2: pslld $13,<y4=%xmm1
1912pslld $13,%xmm1
1913
1914# qhasm: z4 ^= y4
1915# asm 1: pxor <y4=int6464#2,<z4=int6464#15
1916# asm 2: pxor <y4=%xmm1,<z4=%xmm14
1917pxor %xmm1,%xmm14
1918
1919# qhasm: uint32323232 r4 >>= 19
1920# asm 1: psrld $19,<r4=int6464#3
1921# asm 2: psrld $19,<r4=%xmm2
1922psrld $19,%xmm2
1923
1924# qhasm: z4 ^= r4
1925# asm 1: pxor <r4=int6464#3,<z4=int6464#15
1926# asm 2: pxor <r4=%xmm2,<z4=%xmm14
1927pxor %xmm2,%xmm14
1928
1929# qhasm: y0 = z2
1930# asm 1: movdqa <z2=int6464#11,>y0=int6464#2
1931# asm 2: movdqa <z2=%xmm10,>y0=%xmm1
1932movdqa %xmm10,%xmm1
1933
1934# qhasm: uint32323232 y0 += z3
1935# asm 1: paddd <z3=int6464#5,<y0=int6464#2
1936# asm 2: paddd <z3=%xmm4,<y0=%xmm1
1937paddd %xmm4,%xmm1
1938
1939# qhasm: r0 = y0
1940# asm 1: movdqa <y0=int6464#2,>r0=int6464#3
1941# asm 2: movdqa <y0=%xmm1,>r0=%xmm2
1942movdqa %xmm1,%xmm2
1943
1944# qhasm: uint32323232 y0 <<= 18
1945# asm 1: pslld $18,<y0=int6464#2
1946# asm 2: pslld $18,<y0=%xmm1
1947pslld $18,%xmm1
1948
1949# qhasm: z0 ^= y0
1950# asm 1: pxor <y0=int6464#2,<z0=int6464#1
1951# asm 2: pxor <y0=%xmm1,<z0=%xmm0
1952pxor %xmm1,%xmm0
1953
1954# qhasm: uint32323232 r0 >>= 14
1955# asm 1: psrld $14,<r0=int6464#3
1956# asm 2: psrld $14,<r0=%xmm2
1957psrld $14,%xmm2
1958
1959# qhasm: z0 ^= r0
1960# asm 1: pxor <r0=int6464#3,<z0=int6464#1
1961# asm 2: pxor <r0=%xmm2,<z0=%xmm0
1962pxor %xmm2,%xmm0
1963
1964# qhasm: z10 = z10_stack
1965# asm 1: movdqa <z10_stack=stack128#21,>z10=int6464#2
1966# asm 2: movdqa <z10_stack=320(%rsp),>z10=%xmm1
1967movdqa 320(%rsp),%xmm1
1968
1969# qhasm: z0_stack = z0
1970# asm 1: movdqa <z0=int6464#1,>z0_stack=stack128#21
1971# asm 2: movdqa <z0=%xmm0,>z0_stack=320(%rsp)
1972movdqa %xmm0,320(%rsp)
1973
1974# qhasm: y5 = z7
1975# asm 1: movdqa <z7=int6464#9,>y5=int6464#1
1976# asm 2: movdqa <z7=%xmm8,>y5=%xmm0
1977movdqa %xmm8,%xmm0
1978
1979# qhasm: uint32323232 y5 += z4
1980# asm 1: paddd <z4=int6464#15,<y5=int6464#1
1981# asm 2: paddd <z4=%xmm14,<y5=%xmm0
1982paddd %xmm14,%xmm0
1983
1984# qhasm: r5 = y5
1985# asm 1: movdqa <y5=int6464#1,>r5=int6464#3
1986# asm 2: movdqa <y5=%xmm0,>r5=%xmm2
1987movdqa %xmm0,%xmm2
1988
1989# qhasm: uint32323232 y5 <<= 18
1990# asm 1: pslld $18,<y5=int6464#1
1991# asm 2: pslld $18,<y5=%xmm0
1992pslld $18,%xmm0
1993
1994# qhasm: z5 ^= y5
1995# asm 1: pxor <y5=int6464#1,<z5=int6464#13
1996# asm 2: pxor <y5=%xmm0,<z5=%xmm12
1997pxor %xmm0,%xmm12
1998
1999# qhasm: uint32323232 r5 >>= 14
2000# asm 1: psrld $14,<r5=int6464#3
2001# asm 2: psrld $14,<r5=%xmm2
2002psrld $14,%xmm2
2003
2004# qhasm: z5 ^= r5
2005# asm 1: pxor <r5=int6464#3,<z5=int6464#13
2006# asm 2: pxor <r5=%xmm2,<z5=%xmm12
2007pxor %xmm2,%xmm12
2008
2009# qhasm: y11 = z9
2010# asm 1: movdqa <z9=int6464#12,>y11=int6464#1
2011# asm 2: movdqa <z9=%xmm11,>y11=%xmm0
2012movdqa %xmm11,%xmm0
2013
2014# qhasm: uint32323232 y11 += z10
2015# asm 1: paddd <z10=int6464#2,<y11=int6464#1
2016# asm 2: paddd <z10=%xmm1,<y11=%xmm0
2017paddd %xmm1,%xmm0
2018
2019# qhasm: r11 = y11
2020# asm 1: movdqa <y11=int6464#1,>r11=int6464#3
2021# asm 2: movdqa <y11=%xmm0,>r11=%xmm2
2022movdqa %xmm0,%xmm2
2023
2024# qhasm: uint32323232 y11 <<= 7
2025# asm 1: pslld $7,<y11=int6464#1
2026# asm 2: pslld $7,<y11=%xmm0
2027pslld $7,%xmm0
2028
2029# qhasm: z11 ^= y11
2030# asm 1: pxor <y11=int6464#1,<z11=int6464#7
2031# asm 2: pxor <y11=%xmm0,<z11=%xmm6
2032pxor %xmm0,%xmm6
2033
2034# qhasm: uint32323232 r11 >>= 25
2035# asm 1: psrld $25,<r11=int6464#3
2036# asm 2: psrld $25,<r11=%xmm2
2037psrld $25,%xmm2
2038
2039# qhasm: z11 ^= r11
2040# asm 1: pxor <r11=int6464#3,<z11=int6464#7
2041# asm 2: pxor <r11=%xmm2,<z11=%xmm6
2042pxor %xmm2,%xmm6
2043
2044# qhasm: z15 = z15_stack
2045# asm 1: movdqa <z15_stack=stack128#22,>z15=int6464#3
2046# asm 2: movdqa <z15_stack=336(%rsp),>z15=%xmm2
2047movdqa 336(%rsp),%xmm2
2048
2049# qhasm: z5_stack = z5
2050# asm 1: movdqa <z5=int6464#13,>z5_stack=stack128#22
2051# asm 2: movdqa <z5=%xmm12,>z5_stack=336(%rsp)
2052movdqa %xmm12,336(%rsp)
2053
2054# qhasm: y12 = z14
2055# asm 1: movdqa <z14=int6464#4,>y12=int6464#1
2056# asm 2: movdqa <z14=%xmm3,>y12=%xmm0
2057movdqa %xmm3,%xmm0
2058
2059# qhasm: uint32323232 y12 += z15
2060# asm 1: paddd <z15=int6464#3,<y12=int6464#1
2061# asm 2: paddd <z15=%xmm2,<y12=%xmm0
2062paddd %xmm2,%xmm0
2063
2064# qhasm: r12 = y12
2065# asm 1: movdqa <y12=int6464#1,>r12=int6464#13
2066# asm 2: movdqa <y12=%xmm0,>r12=%xmm12
2067movdqa %xmm0,%xmm12
2068
2069# qhasm: uint32323232 y12 <<= 7
2070# asm 1: pslld $7,<y12=int6464#1
2071# asm 2: pslld $7,<y12=%xmm0
2072pslld $7,%xmm0
2073
2074# qhasm: z12 ^= y12
2075# asm 1: pxor <y12=int6464#1,<z12=int6464#14
2076# asm 2: pxor <y12=%xmm0,<z12=%xmm13
2077pxor %xmm0,%xmm13
2078
2079# qhasm: uint32323232 r12 >>= 25
2080# asm 1: psrld $25,<r12=int6464#13
2081# asm 2: psrld $25,<r12=%xmm12
2082psrld $25,%xmm12
2083
2084# qhasm: z12 ^= r12
2085# asm 1: pxor <r12=int6464#13,<z12=int6464#14
2086# asm 2: pxor <r12=%xmm12,<z12=%xmm13
2087pxor %xmm12,%xmm13
2088
2089# qhasm: y8 = z10
2090# asm 1: movdqa <z10=int6464#2,>y8=int6464#1
2091# asm 2: movdqa <z10=%xmm1,>y8=%xmm0
2092movdqa %xmm1,%xmm0
2093
2094# qhasm: uint32323232 y8 += z11
2095# asm 1: paddd <z11=int6464#7,<y8=int6464#1
2096# asm 2: paddd <z11=%xmm6,<y8=%xmm0
2097paddd %xmm6,%xmm0
2098
2099# qhasm: r8 = y8
2100# asm 1: movdqa <y8=int6464#1,>r8=int6464#13
2101# asm 2: movdqa <y8=%xmm0,>r8=%xmm12
2102movdqa %xmm0,%xmm12
2103
2104# qhasm: uint32323232 y8 <<= 9
2105# asm 1: pslld $9,<y8=int6464#1
2106# asm 2: pslld $9,<y8=%xmm0
2107pslld $9,%xmm0
2108
2109# qhasm: z8 ^= y8
2110# asm 1: pxor <y8=int6464#1,<z8=int6464#16
2111# asm 2: pxor <y8=%xmm0,<z8=%xmm15
2112pxor %xmm0,%xmm15
2113
2114# qhasm: uint32323232 r8 >>= 23
2115# asm 1: psrld $23,<r8=int6464#13
2116# asm 2: psrld $23,<r8=%xmm12
2117psrld $23,%xmm12
2118
2119# qhasm: z8 ^= r8
2120# asm 1: pxor <r8=int6464#13,<z8=int6464#16
2121# asm 2: pxor <r8=%xmm12,<z8=%xmm15
2122pxor %xmm12,%xmm15
2123
2124# qhasm: y13 = z15
2125# asm 1: movdqa <z15=int6464#3,>y13=int6464#1
2126# asm 2: movdqa <z15=%xmm2,>y13=%xmm0
2127movdqa %xmm2,%xmm0
2128
2129# qhasm: uint32323232 y13 += z12
2130# asm 1: paddd <z12=int6464#14,<y13=int6464#1
2131# asm 2: paddd <z12=%xmm13,<y13=%xmm0
2132paddd %xmm13,%xmm0
2133
2134# qhasm: r13 = y13
2135# asm 1: movdqa <y13=int6464#1,>r13=int6464#13
2136# asm 2: movdqa <y13=%xmm0,>r13=%xmm12
2137movdqa %xmm0,%xmm12
2138
2139# qhasm: uint32323232 y13 <<= 9
2140# asm 1: pslld $9,<y13=int6464#1
2141# asm 2: pslld $9,<y13=%xmm0
2142pslld $9,%xmm0
2143
2144# qhasm: z13 ^= y13
2145# asm 1: pxor <y13=int6464#1,<z13=int6464#10
2146# asm 2: pxor <y13=%xmm0,<z13=%xmm9
2147pxor %xmm0,%xmm9
2148
2149# qhasm: uint32323232 r13 >>= 23
2150# asm 1: psrld $23,<r13=int6464#13
2151# asm 2: psrld $23,<r13=%xmm12
2152psrld $23,%xmm12
2153
2154# qhasm: z13 ^= r13
2155# asm 1: pxor <r13=int6464#13,<z13=int6464#10
2156# asm 2: pxor <r13=%xmm12,<z13=%xmm9
2157pxor %xmm12,%xmm9
2158
2159# qhasm: y9 = z11
2160# asm 1: movdqa <z11=int6464#7,>y9=int6464#1
2161# asm 2: movdqa <z11=%xmm6,>y9=%xmm0
2162movdqa %xmm6,%xmm0
2163
2164# qhasm: uint32323232 y9 += z8
2165# asm 1: paddd <z8=int6464#16,<y9=int6464#1
2166# asm 2: paddd <z8=%xmm15,<y9=%xmm0
2167paddd %xmm15,%xmm0
2168
2169# qhasm: r9 = y9
2170# asm 1: movdqa <y9=int6464#1,>r9=int6464#13
2171# asm 2: movdqa <y9=%xmm0,>r9=%xmm12
2172movdqa %xmm0,%xmm12
2173
2174# qhasm: uint32323232 y9 <<= 13
2175# asm 1: pslld $13,<y9=int6464#1
2176# asm 2: pslld $13,<y9=%xmm0
2177pslld $13,%xmm0
2178
2179# qhasm: z9 ^= y9
2180# asm 1: pxor <y9=int6464#1,<z9=int6464#12
2181# asm 2: pxor <y9=%xmm0,<z9=%xmm11
2182pxor %xmm0,%xmm11
2183
2184# qhasm: uint32323232 r9 >>= 19
2185# asm 1: psrld $19,<r9=int6464#13
2186# asm 2: psrld $19,<r9=%xmm12
2187psrld $19,%xmm12
2188
2189# qhasm: z9 ^= r9
2190# asm 1: pxor <r9=int6464#13,<z9=int6464#12
2191# asm 2: pxor <r9=%xmm12,<z9=%xmm11
2192pxor %xmm12,%xmm11
2193
2194# qhasm: y14 = z12
2195# asm 1: movdqa <z12=int6464#14,>y14=int6464#1
2196# asm 2: movdqa <z12=%xmm13,>y14=%xmm0
2197movdqa %xmm13,%xmm0
2198
2199# qhasm: uint32323232 y14 += z13
2200# asm 1: paddd <z13=int6464#10,<y14=int6464#1
2201# asm 2: paddd <z13=%xmm9,<y14=%xmm0
2202paddd %xmm9,%xmm0
2203
2204# qhasm: r14 = y14
2205# asm 1: movdqa <y14=int6464#1,>r14=int6464#13
2206# asm 2: movdqa <y14=%xmm0,>r14=%xmm12
2207movdqa %xmm0,%xmm12
2208
2209# qhasm: uint32323232 y14 <<= 13
2210# asm 1: pslld $13,<y14=int6464#1
2211# asm 2: pslld $13,<y14=%xmm0
2212pslld $13,%xmm0
2213
2214# qhasm: z14 ^= y14
2215# asm 1: pxor <y14=int6464#1,<z14=int6464#4
2216# asm 2: pxor <y14=%xmm0,<z14=%xmm3
2217pxor %xmm0,%xmm3
2218
2219# qhasm: uint32323232 r14 >>= 19
2220# asm 1: psrld $19,<r14=int6464#13
2221# asm 2: psrld $19,<r14=%xmm12
2222psrld $19,%xmm12
2223
2224# qhasm: z14 ^= r14
2225# asm 1: pxor <r14=int6464#13,<z14=int6464#4
2226# asm 2: pxor <r14=%xmm12,<z14=%xmm3
2227pxor %xmm12,%xmm3
2228
2229# qhasm: y10 = z8
2230# asm 1: movdqa <z8=int6464#16,>y10=int6464#1
2231# asm 2: movdqa <z8=%xmm15,>y10=%xmm0
2232movdqa %xmm15,%xmm0
2233
2234# qhasm: uint32323232 y10 += z9
2235# asm 1: paddd <z9=int6464#12,<y10=int6464#1
2236# asm 2: paddd <z9=%xmm11,<y10=%xmm0
2237paddd %xmm11,%xmm0
2238
2239# qhasm: r10 = y10
2240# asm 1: movdqa <y10=int6464#1,>r10=int6464#13
2241# asm 2: movdqa <y10=%xmm0,>r10=%xmm12
2242movdqa %xmm0,%xmm12
2243
2244# qhasm: uint32323232 y10 <<= 18
2245# asm 1: pslld $18,<y10=int6464#1
2246# asm 2: pslld $18,<y10=%xmm0
2247pslld $18,%xmm0
2248
2249# qhasm: z10 ^= y10
2250# asm 1: pxor <y10=int6464#1,<z10=int6464#2
2251# asm 2: pxor <y10=%xmm0,<z10=%xmm1
2252pxor %xmm0,%xmm1
2253
2254# qhasm: uint32323232 r10 >>= 14
2255# asm 1: psrld $14,<r10=int6464#13
2256# asm 2: psrld $14,<r10=%xmm12
2257psrld $14,%xmm12
2258
2259# qhasm: z10 ^= r10
2260# asm 1: pxor <r10=int6464#13,<z10=int6464#2
2261# asm 2: pxor <r10=%xmm12,<z10=%xmm1
2262pxor %xmm12,%xmm1
2263
2264# qhasm: y15 = z13
2265# asm 1: movdqa <z13=int6464#10,>y15=int6464#1
2266# asm 2: movdqa <z13=%xmm9,>y15=%xmm0
2267movdqa %xmm9,%xmm0
2268
2269# qhasm: uint32323232 y15 += z14
2270# asm 1: paddd <z14=int6464#4,<y15=int6464#1
2271# asm 2: paddd <z14=%xmm3,<y15=%xmm0
2272paddd %xmm3,%xmm0
2273
2274# qhasm: r15 = y15
2275# asm 1: movdqa <y15=int6464#1,>r15=int6464#13
2276# asm 2: movdqa <y15=%xmm0,>r15=%xmm12
2277movdqa %xmm0,%xmm12
2278
2279# qhasm: uint32323232 y15 <<= 18
2280# asm 1: pslld $18,<y15=int6464#1
2281# asm 2: pslld $18,<y15=%xmm0
2282pslld $18,%xmm0
2283
2284# qhasm: z15 ^= y15
2285# asm 1: pxor <y15=int6464#1,<z15=int6464#3
2286# asm 2: pxor <y15=%xmm0,<z15=%xmm2
2287pxor %xmm0,%xmm2
2288
2289# qhasm: uint32323232 r15 >>= 14
2290# asm 1: psrld $14,<r15=int6464#13
2291# asm 2: psrld $14,<r15=%xmm12
2292psrld $14,%xmm12
2293
2294# qhasm: z15 ^= r15
2295# asm 1: pxor <r15=int6464#13,<z15=int6464#3
2296# asm 2: pxor <r15=%xmm12,<z15=%xmm2
2297pxor %xmm12,%xmm2
2298
2299# qhasm: z0 = z0_stack
2300# asm 1: movdqa <z0_stack=stack128#21,>z0=int6464#13
2301# asm 2: movdqa <z0_stack=320(%rsp),>z0=%xmm12
2302movdqa 320(%rsp),%xmm12
2303
2304# qhasm: z5 = z5_stack
2305# asm 1: movdqa <z5_stack=stack128#22,>z5=int6464#1
2306# asm 2: movdqa <z5_stack=336(%rsp),>z5=%xmm0
2307movdqa 336(%rsp),%xmm0
2308
2309# qhasm: unsigned>? i -= 2
2310# asm 1: sub $2,<i=int64#3
2311# asm 2: sub $2,<i=%rdx
2312sub $2,%rdx
2313# comment:fp stack unchanged by jump
2314
2315# qhasm: goto mainloop1 if unsigned>
2316ja ._mainloop1
2317
2318# qhasm: uint32323232 z0 += orig0
2319# asm 1: paddd <orig0=stack128#8,<z0=int6464#13
2320# asm 2: paddd <orig0=112(%rsp),<z0=%xmm12
2321paddd 112(%rsp),%xmm12
2322
2323# qhasm: uint32323232 z1 += orig1
2324# asm 1: paddd <orig1=stack128#12,<z1=int6464#8
2325# asm 2: paddd <orig1=176(%rsp),<z1=%xmm7
2326paddd 176(%rsp),%xmm7
2327
2328# qhasm: uint32323232 z2 += orig2
2329# asm 1: paddd <orig2=stack128#15,<z2=int6464#11
2330# asm 2: paddd <orig2=224(%rsp),<z2=%xmm10
2331paddd 224(%rsp),%xmm10
2332
2333# qhasm: uint32323232 z3 += orig3
2334# asm 1: paddd <orig3=stack128#18,<z3=int6464#5
2335# asm 2: paddd <orig3=272(%rsp),<z3=%xmm4
2336paddd 272(%rsp),%xmm4
2337
2338# qhasm: in0 = z0
2339# asm 1: movd <z0=int6464#13,>in0=int64#3
2340# asm 2: movd <z0=%xmm12,>in0=%rdx
2341movd %xmm12,%rdx
2342
2343# qhasm: in1 = z1
2344# asm 1: movd <z1=int6464#8,>in1=int64#4
2345# asm 2: movd <z1=%xmm7,>in1=%rcx
2346movd %xmm7,%rcx
2347
2348# qhasm: in2 = z2
2349# asm 1: movd <z2=int6464#11,>in2=int64#5
2350# asm 2: movd <z2=%xmm10,>in2=%r8
2351movd %xmm10,%r8
2352
2353# qhasm: in3 = z3
2354# asm 1: movd <z3=int6464#5,>in3=int64#6
2355# asm 2: movd <z3=%xmm4,>in3=%r9
2356movd %xmm4,%r9
2357
2358# qhasm: z0 <<<= 96
2359# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2360# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2361pshufd $0x39,%xmm12,%xmm12
2362
2363# qhasm: z1 <<<= 96
2364# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2365# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2366pshufd $0x39,%xmm7,%xmm7
2367
2368# qhasm: z2 <<<= 96
2369# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2370# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2371pshufd $0x39,%xmm10,%xmm10
2372
2373# qhasm: z3 <<<= 96
2374# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2375# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2376pshufd $0x39,%xmm4,%xmm4
2377
2378# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
2379# asm 1: xorl 0(<m=int64#2),<in0=int64#3d
2380# asm 2: xorl 0(<m=%rsi),<in0=%edx
2381xorl 0(%rsi),%edx
2382
2383# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
2384# asm 1: xorl 4(<m=int64#2),<in1=int64#4d
2385# asm 2: xorl 4(<m=%rsi),<in1=%ecx
2386xorl 4(%rsi),%ecx
2387
2388# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
2389# asm 1: xorl 8(<m=int64#2),<in2=int64#5d
2390# asm 2: xorl 8(<m=%rsi),<in2=%r8d
2391xorl 8(%rsi),%r8d
2392
2393# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
2394# asm 1: xorl 12(<m=int64#2),<in3=int64#6d
2395# asm 2: xorl 12(<m=%rsi),<in3=%r9d
2396xorl 12(%rsi),%r9d
2397
2398# qhasm: *(uint32 *) (out + 0) = in0
2399# asm 1: movl <in0=int64#3d,0(<out=int64#1)
2400# asm 2: movl <in0=%edx,0(<out=%rdi)
2401movl %edx,0(%rdi)
2402
2403# qhasm: *(uint32 *) (out + 4) = in1
2404# asm 1: movl <in1=int64#4d,4(<out=int64#1)
2405# asm 2: movl <in1=%ecx,4(<out=%rdi)
2406movl %ecx,4(%rdi)
2407
2408# qhasm: *(uint32 *) (out + 8) = in2
2409# asm 1: movl <in2=int64#5d,8(<out=int64#1)
2410# asm 2: movl <in2=%r8d,8(<out=%rdi)
2411movl %r8d,8(%rdi)
2412
2413# qhasm: *(uint32 *) (out + 12) = in3
2414# asm 1: movl <in3=int64#6d,12(<out=int64#1)
2415# asm 2: movl <in3=%r9d,12(<out=%rdi)
2416movl %r9d,12(%rdi)
2417
2418# qhasm: in0 = z0
2419# asm 1: movd <z0=int6464#13,>in0=int64#3
2420# asm 2: movd <z0=%xmm12,>in0=%rdx
2421movd %xmm12,%rdx
2422
2423# qhasm: in1 = z1
2424# asm 1: movd <z1=int6464#8,>in1=int64#4
2425# asm 2: movd <z1=%xmm7,>in1=%rcx
2426movd %xmm7,%rcx
2427
2428# qhasm: in2 = z2
2429# asm 1: movd <z2=int6464#11,>in2=int64#5
2430# asm 2: movd <z2=%xmm10,>in2=%r8
2431movd %xmm10,%r8
2432
2433# qhasm: in3 = z3
2434# asm 1: movd <z3=int6464#5,>in3=int64#6
2435# asm 2: movd <z3=%xmm4,>in3=%r9
2436movd %xmm4,%r9
2437
2438# qhasm: z0 <<<= 96
2439# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2440# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2441pshufd $0x39,%xmm12,%xmm12
2442
2443# qhasm: z1 <<<= 96
2444# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2445# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2446pshufd $0x39,%xmm7,%xmm7
2447
2448# qhasm: z2 <<<= 96
2449# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2450# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2451pshufd $0x39,%xmm10,%xmm10
2452
2453# qhasm: z3 <<<= 96
2454# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2455# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2456pshufd $0x39,%xmm4,%xmm4
2457
2458# qhasm: (uint32) in0 ^= *(uint32 *) (m + 64)
2459# asm 1: xorl 64(<m=int64#2),<in0=int64#3d
2460# asm 2: xorl 64(<m=%rsi),<in0=%edx
2461xorl 64(%rsi),%edx
2462
2463# qhasm: (uint32) in1 ^= *(uint32 *) (m + 68)
2464# asm 1: xorl 68(<m=int64#2),<in1=int64#4d
2465# asm 2: xorl 68(<m=%rsi),<in1=%ecx
2466xorl 68(%rsi),%ecx
2467
2468# qhasm: (uint32) in2 ^= *(uint32 *) (m + 72)
2469# asm 1: xorl 72(<m=int64#2),<in2=int64#5d
2470# asm 2: xorl 72(<m=%rsi),<in2=%r8d
2471xorl 72(%rsi),%r8d
2472
2473# qhasm: (uint32) in3 ^= *(uint32 *) (m + 76)
2474# asm 1: xorl 76(<m=int64#2),<in3=int64#6d
2475# asm 2: xorl 76(<m=%rsi),<in3=%r9d
2476xorl 76(%rsi),%r9d
2477
2478# qhasm: *(uint32 *) (out + 64) = in0
2479# asm 1: movl <in0=int64#3d,64(<out=int64#1)
2480# asm 2: movl <in0=%edx,64(<out=%rdi)
2481movl %edx,64(%rdi)
2482
2483# qhasm: *(uint32 *) (out + 68) = in1
2484# asm 1: movl <in1=int64#4d,68(<out=int64#1)
2485# asm 2: movl <in1=%ecx,68(<out=%rdi)
2486movl %ecx,68(%rdi)
2487
2488# qhasm: *(uint32 *) (out + 72) = in2
2489# asm 1: movl <in2=int64#5d,72(<out=int64#1)
2490# asm 2: movl <in2=%r8d,72(<out=%rdi)
2491movl %r8d,72(%rdi)
2492
2493# qhasm: *(uint32 *) (out + 76) = in3
2494# asm 1: movl <in3=int64#6d,76(<out=int64#1)
2495# asm 2: movl <in3=%r9d,76(<out=%rdi)
2496movl %r9d,76(%rdi)
2497
2498# qhasm: in0 = z0
2499# asm 1: movd <z0=int6464#13,>in0=int64#3
2500# asm 2: movd <z0=%xmm12,>in0=%rdx
2501movd %xmm12,%rdx
2502
2503# qhasm: in1 = z1
2504# asm 1: movd <z1=int6464#8,>in1=int64#4
2505# asm 2: movd <z1=%xmm7,>in1=%rcx
2506movd %xmm7,%rcx
2507
2508# qhasm: in2 = z2
2509# asm 1: movd <z2=int6464#11,>in2=int64#5
2510# asm 2: movd <z2=%xmm10,>in2=%r8
2511movd %xmm10,%r8
2512
2513# qhasm: in3 = z3
2514# asm 1: movd <z3=int6464#5,>in3=int64#6
2515# asm 2: movd <z3=%xmm4,>in3=%r9
2516movd %xmm4,%r9
2517
2518# qhasm: z0 <<<= 96
2519# asm 1: pshufd $0x39,<z0=int6464#13,<z0=int6464#13
2520# asm 2: pshufd $0x39,<z0=%xmm12,<z0=%xmm12
2521pshufd $0x39,%xmm12,%xmm12
2522
2523# qhasm: z1 <<<= 96
2524# asm 1: pshufd $0x39,<z1=int6464#8,<z1=int6464#8
2525# asm 2: pshufd $0x39,<z1=%xmm7,<z1=%xmm7
2526pshufd $0x39,%xmm7,%xmm7
2527
2528# qhasm: z2 <<<= 96
2529# asm 1: pshufd $0x39,<z2=int6464#11,<z2=int6464#11
2530# asm 2: pshufd $0x39,<z2=%xmm10,<z2=%xmm10
2531pshufd $0x39,%xmm10,%xmm10
2532
2533# qhasm: z3 <<<= 96
2534# asm 1: pshufd $0x39,<z3=int6464#5,<z3=int6464#5
2535# asm 2: pshufd $0x39,<z3=%xmm4,<z3=%xmm4
2536pshufd $0x39,%xmm4,%xmm4
2537
2538# qhasm: (uint32) in0 ^= *(uint32 *) (m + 128)
2539# asm 1: xorl 128(<m=int64#2),<in0=int64#3d
2540# asm 2: xorl 128(<m=%rsi),<in0=%edx
2541xorl 128(%rsi),%edx
2542
2543# qhasm: (uint32) in1 ^= *(uint32 *) (m + 132)
2544# asm 1: xorl 132(<m=int64#2),<in1=int64#4d
2545# asm 2: xorl 132(<m=%rsi),<in1=%ecx
2546xorl 132(%rsi),%ecx
2547
2548# qhasm: (uint32) in2 ^= *(uint32 *) (m + 136)
2549# asm 1: xorl 136(<m=int64#2),<in2=int64#5d
2550# asm 2: xorl 136(<m=%rsi),<in2=%r8d
2551xorl 136(%rsi),%r8d
2552
2553# qhasm: (uint32) in3 ^= *(uint32 *) (m + 140)
2554# asm 1: xorl 140(<m=int64#2),<in3=int64#6d
2555# asm 2: xorl 140(<m=%rsi),<in3=%r9d
2556xorl 140(%rsi),%r9d
2557
2558# qhasm: *(uint32 *) (out + 128) = in0
2559# asm 1: movl <in0=int64#3d,128(<out=int64#1)
2560# asm 2: movl <in0=%edx,128(<out=%rdi)
2561movl %edx,128(%rdi)
2562
2563# qhasm: *(uint32 *) (out + 132) = in1
2564# asm 1: movl <in1=int64#4d,132(<out=int64#1)
2565# asm 2: movl <in1=%ecx,132(<out=%rdi)
2566movl %ecx,132(%rdi)
2567
2568# qhasm: *(uint32 *) (out + 136) = in2
2569# asm 1: movl <in2=int64#5d,136(<out=int64#1)
2570# asm 2: movl <in2=%r8d,136(<out=%rdi)
2571movl %r8d,136(%rdi)
2572
2573# qhasm: *(uint32 *) (out + 140) = in3
2574# asm 1: movl <in3=int64#6d,140(<out=int64#1)
2575# asm 2: movl <in3=%r9d,140(<out=%rdi)
2576movl %r9d,140(%rdi)
2577
2578# qhasm: in0 = z0
2579# asm 1: movd <z0=int6464#13,>in0=int64#3
2580# asm 2: movd <z0=%xmm12,>in0=%rdx
2581movd %xmm12,%rdx
2582
2583# qhasm: in1 = z1
2584# asm 1: movd <z1=int6464#8,>in1=int64#4
2585# asm 2: movd <z1=%xmm7,>in1=%rcx
2586movd %xmm7,%rcx
2587
2588# qhasm: in2 = z2
2589# asm 1: movd <z2=int6464#11,>in2=int64#5
2590# asm 2: movd <z2=%xmm10,>in2=%r8
2591movd %xmm10,%r8
2592
2593# qhasm: in3 = z3
2594# asm 1: movd <z3=int6464#5,>in3=int64#6
2595# asm 2: movd <z3=%xmm4,>in3=%r9
2596movd %xmm4,%r9
2597
2598# qhasm: (uint32) in0 ^= *(uint32 *) (m + 192)
2599# asm 1: xorl 192(<m=int64#2),<in0=int64#3d
2600# asm 2: xorl 192(<m=%rsi),<in0=%edx
2601xorl 192(%rsi),%edx
2602
2603# qhasm: (uint32) in1 ^= *(uint32 *) (m + 196)
2604# asm 1: xorl 196(<m=int64#2),<in1=int64#4d
2605# asm 2: xorl 196(<m=%rsi),<in1=%ecx
2606xorl 196(%rsi),%ecx
2607
2608# qhasm: (uint32) in2 ^= *(uint32 *) (m + 200)
2609# asm 1: xorl 200(<m=int64#2),<in2=int64#5d
2610# asm 2: xorl 200(<m=%rsi),<in2=%r8d
2611xorl 200(%rsi),%r8d
2612
2613# qhasm: (uint32) in3 ^= *(uint32 *) (m + 204)
2614# asm 1: xorl 204(<m=int64#2),<in3=int64#6d
2615# asm 2: xorl 204(<m=%rsi),<in3=%r9d
2616xorl 204(%rsi),%r9d
2617
2618# qhasm: *(uint32 *) (out + 192) = in0
2619# asm 1: movl <in0=int64#3d,192(<out=int64#1)
2620# asm 2: movl <in0=%edx,192(<out=%rdi)
2621movl %edx,192(%rdi)
2622
2623# qhasm: *(uint32 *) (out + 196) = in1
2624# asm 1: movl <in1=int64#4d,196(<out=int64#1)
2625# asm 2: movl <in1=%ecx,196(<out=%rdi)
2626movl %ecx,196(%rdi)
2627
2628# qhasm: *(uint32 *) (out + 200) = in2
2629# asm 1: movl <in2=int64#5d,200(<out=int64#1)
2630# asm 2: movl <in2=%r8d,200(<out=%rdi)
2631movl %r8d,200(%rdi)
2632
2633# qhasm: *(uint32 *) (out + 204) = in3
2634# asm 1: movl <in3=int64#6d,204(<out=int64#1)
2635# asm 2: movl <in3=%r9d,204(<out=%rdi)
2636movl %r9d,204(%rdi)
2637
2638# qhasm: uint32323232 z4 += orig4
2639# asm 1: paddd <orig4=stack128#16,<z4=int6464#15
2640# asm 2: paddd <orig4=240(%rsp),<z4=%xmm14
2641paddd 240(%rsp),%xmm14
2642
2643# qhasm: uint32323232 z5 += orig5
2644# asm 1: paddd <orig5=stack128#5,<z5=int6464#1
2645# asm 2: paddd <orig5=64(%rsp),<z5=%xmm0
2646paddd 64(%rsp),%xmm0
2647
2648# qhasm: uint32323232 z6 += orig6
2649# asm 1: paddd <orig6=stack128#9,<z6=int6464#6
2650# asm 2: paddd <orig6=128(%rsp),<z6=%xmm5
2651paddd 128(%rsp),%xmm5
2652
2653# qhasm: uint32323232 z7 += orig7
2654# asm 1: paddd <orig7=stack128#13,<z7=int6464#9
2655# asm 2: paddd <orig7=192(%rsp),<z7=%xmm8
2656paddd 192(%rsp),%xmm8
2657
2658# qhasm: in4 = z4
2659# asm 1: movd <z4=int6464#15,>in4=int64#3
2660# asm 2: movd <z4=%xmm14,>in4=%rdx
2661movd %xmm14,%rdx
2662
2663# qhasm: in5 = z5
2664# asm 1: movd <z5=int6464#1,>in5=int64#4
2665# asm 2: movd <z5=%xmm0,>in5=%rcx
2666movd %xmm0,%rcx
2667
2668# qhasm: in6 = z6
2669# asm 1: movd <z6=int6464#6,>in6=int64#5
2670# asm 2: movd <z6=%xmm5,>in6=%r8
2671movd %xmm5,%r8
2672
2673# qhasm: in7 = z7
2674# asm 1: movd <z7=int6464#9,>in7=int64#6
2675# asm 2: movd <z7=%xmm8,>in7=%r9
2676movd %xmm8,%r9
2677
2678# qhasm: z4 <<<= 96
2679# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2680# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2681pshufd $0x39,%xmm14,%xmm14
2682
2683# qhasm: z5 <<<= 96
2684# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2685# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2686pshufd $0x39,%xmm0,%xmm0
2687
2688# qhasm: z6 <<<= 96
2689# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2690# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2691pshufd $0x39,%xmm5,%xmm5
2692
2693# qhasm: z7 <<<= 96
2694# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2695# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2696pshufd $0x39,%xmm8,%xmm8
2697
2698# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
2699# asm 1: xorl 16(<m=int64#2),<in4=int64#3d
2700# asm 2: xorl 16(<m=%rsi),<in4=%edx
2701xorl 16(%rsi),%edx
2702
2703# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
2704# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
2705# asm 2: xorl 20(<m=%rsi),<in5=%ecx
2706xorl 20(%rsi),%ecx
2707
2708# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
2709# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
2710# asm 2: xorl 24(<m=%rsi),<in6=%r8d
2711xorl 24(%rsi),%r8d
2712
2713# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
2714# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
2715# asm 2: xorl 28(<m=%rsi),<in7=%r9d
2716xorl 28(%rsi),%r9d
2717
2718# qhasm: *(uint32 *) (out + 16) = in4
2719# asm 1: movl <in4=int64#3d,16(<out=int64#1)
2720# asm 2: movl <in4=%edx,16(<out=%rdi)
2721movl %edx,16(%rdi)
2722
2723# qhasm: *(uint32 *) (out + 20) = in5
2724# asm 1: movl <in5=int64#4d,20(<out=int64#1)
2725# asm 2: movl <in5=%ecx,20(<out=%rdi)
2726movl %ecx,20(%rdi)
2727
2728# qhasm: *(uint32 *) (out + 24) = in6
2729# asm 1: movl <in6=int64#5d,24(<out=int64#1)
2730# asm 2: movl <in6=%r8d,24(<out=%rdi)
2731movl %r8d,24(%rdi)
2732
2733# qhasm: *(uint32 *) (out + 28) = in7
2734# asm 1: movl <in7=int64#6d,28(<out=int64#1)
2735# asm 2: movl <in7=%r9d,28(<out=%rdi)
2736movl %r9d,28(%rdi)
2737
2738# qhasm: in4 = z4
2739# asm 1: movd <z4=int6464#15,>in4=int64#3
2740# asm 2: movd <z4=%xmm14,>in4=%rdx
2741movd %xmm14,%rdx
2742
2743# qhasm: in5 = z5
2744# asm 1: movd <z5=int6464#1,>in5=int64#4
2745# asm 2: movd <z5=%xmm0,>in5=%rcx
2746movd %xmm0,%rcx
2747
2748# qhasm: in6 = z6
2749# asm 1: movd <z6=int6464#6,>in6=int64#5
2750# asm 2: movd <z6=%xmm5,>in6=%r8
2751movd %xmm5,%r8
2752
2753# qhasm: in7 = z7
2754# asm 1: movd <z7=int6464#9,>in7=int64#6
2755# asm 2: movd <z7=%xmm8,>in7=%r9
2756movd %xmm8,%r9
2757
2758# qhasm: z4 <<<= 96
2759# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2760# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2761pshufd $0x39,%xmm14,%xmm14
2762
2763# qhasm: z5 <<<= 96
2764# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2765# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2766pshufd $0x39,%xmm0,%xmm0
2767
2768# qhasm: z6 <<<= 96
2769# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2770# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2771pshufd $0x39,%xmm5,%xmm5
2772
2773# qhasm: z7 <<<= 96
2774# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2775# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2776pshufd $0x39,%xmm8,%xmm8
2777
2778# qhasm: (uint32) in4 ^= *(uint32 *) (m + 80)
2779# asm 1: xorl 80(<m=int64#2),<in4=int64#3d
2780# asm 2: xorl 80(<m=%rsi),<in4=%edx
2781xorl 80(%rsi),%edx
2782
2783# qhasm: (uint32) in5 ^= *(uint32 *) (m + 84)
2784# asm 1: xorl 84(<m=int64#2),<in5=int64#4d
2785# asm 2: xorl 84(<m=%rsi),<in5=%ecx
2786xorl 84(%rsi),%ecx
2787
2788# qhasm: (uint32) in6 ^= *(uint32 *) (m + 88)
2789# asm 1: xorl 88(<m=int64#2),<in6=int64#5d
2790# asm 2: xorl 88(<m=%rsi),<in6=%r8d
2791xorl 88(%rsi),%r8d
2792
2793# qhasm: (uint32) in7 ^= *(uint32 *) (m + 92)
2794# asm 1: xorl 92(<m=int64#2),<in7=int64#6d
2795# asm 2: xorl 92(<m=%rsi),<in7=%r9d
2796xorl 92(%rsi),%r9d
2797
2798# qhasm: *(uint32 *) (out + 80) = in4
2799# asm 1: movl <in4=int64#3d,80(<out=int64#1)
2800# asm 2: movl <in4=%edx,80(<out=%rdi)
2801movl %edx,80(%rdi)
2802
2803# qhasm: *(uint32 *) (out + 84) = in5
2804# asm 1: movl <in5=int64#4d,84(<out=int64#1)
2805# asm 2: movl <in5=%ecx,84(<out=%rdi)
2806movl %ecx,84(%rdi)
2807
2808# qhasm: *(uint32 *) (out + 88) = in6
2809# asm 1: movl <in6=int64#5d,88(<out=int64#1)
2810# asm 2: movl <in6=%r8d,88(<out=%rdi)
2811movl %r8d,88(%rdi)
2812
2813# qhasm: *(uint32 *) (out + 92) = in7
2814# asm 1: movl <in7=int64#6d,92(<out=int64#1)
2815# asm 2: movl <in7=%r9d,92(<out=%rdi)
2816movl %r9d,92(%rdi)
2817
2818# qhasm: in4 = z4
2819# asm 1: movd <z4=int6464#15,>in4=int64#3
2820# asm 2: movd <z4=%xmm14,>in4=%rdx
2821movd %xmm14,%rdx
2822
2823# qhasm: in5 = z5
2824# asm 1: movd <z5=int6464#1,>in5=int64#4
2825# asm 2: movd <z5=%xmm0,>in5=%rcx
2826movd %xmm0,%rcx
2827
2828# qhasm: in6 = z6
2829# asm 1: movd <z6=int6464#6,>in6=int64#5
2830# asm 2: movd <z6=%xmm5,>in6=%r8
2831movd %xmm5,%r8
2832
2833# qhasm: in7 = z7
2834# asm 1: movd <z7=int6464#9,>in7=int64#6
2835# asm 2: movd <z7=%xmm8,>in7=%r9
2836movd %xmm8,%r9
2837
2838# qhasm: z4 <<<= 96
2839# asm 1: pshufd $0x39,<z4=int6464#15,<z4=int6464#15
2840# asm 2: pshufd $0x39,<z4=%xmm14,<z4=%xmm14
2841pshufd $0x39,%xmm14,%xmm14
2842
2843# qhasm: z5 <<<= 96
2844# asm 1: pshufd $0x39,<z5=int6464#1,<z5=int6464#1
2845# asm 2: pshufd $0x39,<z5=%xmm0,<z5=%xmm0
2846pshufd $0x39,%xmm0,%xmm0
2847
2848# qhasm: z6 <<<= 96
2849# asm 1: pshufd $0x39,<z6=int6464#6,<z6=int6464#6
2850# asm 2: pshufd $0x39,<z6=%xmm5,<z6=%xmm5
2851pshufd $0x39,%xmm5,%xmm5
2852
2853# qhasm: z7 <<<= 96
2854# asm 1: pshufd $0x39,<z7=int6464#9,<z7=int6464#9
2855# asm 2: pshufd $0x39,<z7=%xmm8,<z7=%xmm8
2856pshufd $0x39,%xmm8,%xmm8
2857
2858# qhasm: (uint32) in4 ^= *(uint32 *) (m + 144)
2859# asm 1: xorl 144(<m=int64#2),<in4=int64#3d
2860# asm 2: xorl 144(<m=%rsi),<in4=%edx
2861xorl 144(%rsi),%edx
2862
2863# qhasm: (uint32) in5 ^= *(uint32 *) (m + 148)
2864# asm 1: xorl 148(<m=int64#2),<in5=int64#4d
2865# asm 2: xorl 148(<m=%rsi),<in5=%ecx
2866xorl 148(%rsi),%ecx
2867
2868# qhasm: (uint32) in6 ^= *(uint32 *) (m + 152)
2869# asm 1: xorl 152(<m=int64#2),<in6=int64#5d
2870# asm 2: xorl 152(<m=%rsi),<in6=%r8d
2871xorl 152(%rsi),%r8d
2872
2873# qhasm: (uint32) in7 ^= *(uint32 *) (m + 156)
2874# asm 1: xorl 156(<m=int64#2),<in7=int64#6d
2875# asm 2: xorl 156(<m=%rsi),<in7=%r9d
2876xorl 156(%rsi),%r9d
2877
2878# qhasm: *(uint32 *) (out + 144) = in4
2879# asm 1: movl <in4=int64#3d,144(<out=int64#1)
2880# asm 2: movl <in4=%edx,144(<out=%rdi)
2881movl %edx,144(%rdi)
2882
2883# qhasm: *(uint32 *) (out + 148) = in5
2884# asm 1: movl <in5=int64#4d,148(<out=int64#1)
2885# asm 2: movl <in5=%ecx,148(<out=%rdi)
2886movl %ecx,148(%rdi)
2887
2888# qhasm: *(uint32 *) (out + 152) = in6
2889# asm 1: movl <in6=int64#5d,152(<out=int64#1)
2890# asm 2: movl <in6=%r8d,152(<out=%rdi)
2891movl %r8d,152(%rdi)
2892
2893# qhasm: *(uint32 *) (out + 156) = in7
2894# asm 1: movl <in7=int64#6d,156(<out=int64#1)
2895# asm 2: movl <in7=%r9d,156(<out=%rdi)
2896movl %r9d,156(%rdi)
2897
2898# qhasm: in4 = z4
2899# asm 1: movd <z4=int6464#15,>in4=int64#3
2900# asm 2: movd <z4=%xmm14,>in4=%rdx
2901movd %xmm14,%rdx
2902
2903# qhasm: in5 = z5
2904# asm 1: movd <z5=int6464#1,>in5=int64#4
2905# asm 2: movd <z5=%xmm0,>in5=%rcx
2906movd %xmm0,%rcx
2907
2908# qhasm: in6 = z6
2909# asm 1: movd <z6=int6464#6,>in6=int64#5
2910# asm 2: movd <z6=%xmm5,>in6=%r8
2911movd %xmm5,%r8
2912
2913# qhasm: in7 = z7
2914# asm 1: movd <z7=int6464#9,>in7=int64#6
2915# asm 2: movd <z7=%xmm8,>in7=%r9
2916movd %xmm8,%r9
2917
2918# qhasm: (uint32) in4 ^= *(uint32 *) (m + 208)
2919# asm 1: xorl 208(<m=int64#2),<in4=int64#3d
2920# asm 2: xorl 208(<m=%rsi),<in4=%edx
2921xorl 208(%rsi),%edx
2922
2923# qhasm: (uint32) in5 ^= *(uint32 *) (m + 212)
2924# asm 1: xorl 212(<m=int64#2),<in5=int64#4d
2925# asm 2: xorl 212(<m=%rsi),<in5=%ecx
2926xorl 212(%rsi),%ecx
2927
2928# qhasm: (uint32) in6 ^= *(uint32 *) (m + 216)
2929# asm 1: xorl 216(<m=int64#2),<in6=int64#5d
2930# asm 2: xorl 216(<m=%rsi),<in6=%r8d
2931xorl 216(%rsi),%r8d
2932
2933# qhasm: (uint32) in7 ^= *(uint32 *) (m + 220)
2934# asm 1: xorl 220(<m=int64#2),<in7=int64#6d
2935# asm 2: xorl 220(<m=%rsi),<in7=%r9d
2936xorl 220(%rsi),%r9d
2937
2938# qhasm: *(uint32 *) (out + 208) = in4
2939# asm 1: movl <in4=int64#3d,208(<out=int64#1)
2940# asm 2: movl <in4=%edx,208(<out=%rdi)
2941movl %edx,208(%rdi)
2942
2943# qhasm: *(uint32 *) (out + 212) = in5
2944# asm 1: movl <in5=int64#4d,212(<out=int64#1)
2945# asm 2: movl <in5=%ecx,212(<out=%rdi)
2946movl %ecx,212(%rdi)
2947
2948# qhasm: *(uint32 *) (out + 216) = in6
2949# asm 1: movl <in6=int64#5d,216(<out=int64#1)
2950# asm 2: movl <in6=%r8d,216(<out=%rdi)
2951movl %r8d,216(%rdi)
2952
2953# qhasm: *(uint32 *) (out + 220) = in7
2954# asm 1: movl <in7=int64#6d,220(<out=int64#1)
2955# asm 2: movl <in7=%r9d,220(<out=%rdi)
2956movl %r9d,220(%rdi)
2957
2958# qhasm: uint32323232 z8 += orig8
2959# asm 1: paddd <orig8=stack128#19,<z8=int6464#16
2960# asm 2: paddd <orig8=288(%rsp),<z8=%xmm15
2961paddd 288(%rsp),%xmm15
2962
2963# qhasm: uint32323232 z9 += orig9
2964# asm 1: paddd <orig9=stack128#20,<z9=int6464#12
2965# asm 2: paddd <orig9=304(%rsp),<z9=%xmm11
2966paddd 304(%rsp),%xmm11
2967
2968# qhasm: uint32323232 z10 += orig10
2969# asm 1: paddd <orig10=stack128#6,<z10=int6464#2
2970# asm 2: paddd <orig10=80(%rsp),<z10=%xmm1
2971paddd 80(%rsp),%xmm1
2972
2973# qhasm: uint32323232 z11 += orig11
2974# asm 1: paddd <orig11=stack128#10,<z11=int6464#7
2975# asm 2: paddd <orig11=144(%rsp),<z11=%xmm6
2976paddd 144(%rsp),%xmm6
2977
2978# qhasm: in8 = z8
2979# asm 1: movd <z8=int6464#16,>in8=int64#3
2980# asm 2: movd <z8=%xmm15,>in8=%rdx
2981movd %xmm15,%rdx
2982
2983# qhasm: in9 = z9
2984# asm 1: movd <z9=int6464#12,>in9=int64#4
2985# asm 2: movd <z9=%xmm11,>in9=%rcx
2986movd %xmm11,%rcx
2987
2988# qhasm: in10 = z10
2989# asm 1: movd <z10=int6464#2,>in10=int64#5
2990# asm 2: movd <z10=%xmm1,>in10=%r8
2991movd %xmm1,%r8
2992
2993# qhasm: in11 = z11
2994# asm 1: movd <z11=int6464#7,>in11=int64#6
2995# asm 2: movd <z11=%xmm6,>in11=%r9
2996movd %xmm6,%r9
2997
2998# qhasm: z8 <<<= 96
2999# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3000# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3001pshufd $0x39,%xmm15,%xmm15
3002
3003# qhasm: z9 <<<= 96
3004# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3005# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3006pshufd $0x39,%xmm11,%xmm11
3007
3008# qhasm: z10 <<<= 96
3009# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3010# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3011pshufd $0x39,%xmm1,%xmm1
3012
3013# qhasm: z11 <<<= 96
3014# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3015# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3016pshufd $0x39,%xmm6,%xmm6
3017
3018# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
3019# asm 1: xorl 32(<m=int64#2),<in8=int64#3d
3020# asm 2: xorl 32(<m=%rsi),<in8=%edx
3021xorl 32(%rsi),%edx
3022
3023# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
3024# asm 1: xorl 36(<m=int64#2),<in9=int64#4d
3025# asm 2: xorl 36(<m=%rsi),<in9=%ecx
3026xorl 36(%rsi),%ecx
3027
3028# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
3029# asm 1: xorl 40(<m=int64#2),<in10=int64#5d
3030# asm 2: xorl 40(<m=%rsi),<in10=%r8d
3031xorl 40(%rsi),%r8d
3032
3033# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
3034# asm 1: xorl 44(<m=int64#2),<in11=int64#6d
3035# asm 2: xorl 44(<m=%rsi),<in11=%r9d
3036xorl 44(%rsi),%r9d
3037
3038# qhasm: *(uint32 *) (out + 32) = in8
3039# asm 1: movl <in8=int64#3d,32(<out=int64#1)
3040# asm 2: movl <in8=%edx,32(<out=%rdi)
3041movl %edx,32(%rdi)
3042
3043# qhasm: *(uint32 *) (out + 36) = in9
3044# asm 1: movl <in9=int64#4d,36(<out=int64#1)
3045# asm 2: movl <in9=%ecx,36(<out=%rdi)
3046movl %ecx,36(%rdi)
3047
3048# qhasm: *(uint32 *) (out + 40) = in10
3049# asm 1: movl <in10=int64#5d,40(<out=int64#1)
3050# asm 2: movl <in10=%r8d,40(<out=%rdi)
3051movl %r8d,40(%rdi)
3052
3053# qhasm: *(uint32 *) (out + 44) = in11
3054# asm 1: movl <in11=int64#6d,44(<out=int64#1)
3055# asm 2: movl <in11=%r9d,44(<out=%rdi)
3056movl %r9d,44(%rdi)
3057
3058# qhasm: in8 = z8
3059# asm 1: movd <z8=int6464#16,>in8=int64#3
3060# asm 2: movd <z8=%xmm15,>in8=%rdx
3061movd %xmm15,%rdx
3062
3063# qhasm: in9 = z9
3064# asm 1: movd <z9=int6464#12,>in9=int64#4
3065# asm 2: movd <z9=%xmm11,>in9=%rcx
3066movd %xmm11,%rcx
3067
3068# qhasm: in10 = z10
3069# asm 1: movd <z10=int6464#2,>in10=int64#5
3070# asm 2: movd <z10=%xmm1,>in10=%r8
3071movd %xmm1,%r8
3072
3073# qhasm: in11 = z11
3074# asm 1: movd <z11=int6464#7,>in11=int64#6
3075# asm 2: movd <z11=%xmm6,>in11=%r9
3076movd %xmm6,%r9
3077
3078# qhasm: z8 <<<= 96
3079# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3080# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3081pshufd $0x39,%xmm15,%xmm15
3082
3083# qhasm: z9 <<<= 96
3084# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3085# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3086pshufd $0x39,%xmm11,%xmm11
3087
3088# qhasm: z10 <<<= 96
3089# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3090# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3091pshufd $0x39,%xmm1,%xmm1
3092
3093# qhasm: z11 <<<= 96
3094# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3095# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3096pshufd $0x39,%xmm6,%xmm6
3097
3098# qhasm: (uint32) in8 ^= *(uint32 *) (m + 96)
3099# asm 1: xorl 96(<m=int64#2),<in8=int64#3d
3100# asm 2: xorl 96(<m=%rsi),<in8=%edx
3101xorl 96(%rsi),%edx
3102
3103# qhasm: (uint32) in9 ^= *(uint32 *) (m + 100)
3104# asm 1: xorl 100(<m=int64#2),<in9=int64#4d
3105# asm 2: xorl 100(<m=%rsi),<in9=%ecx
3106xorl 100(%rsi),%ecx
3107
3108# qhasm: (uint32) in10 ^= *(uint32 *) (m + 104)
3109# asm 1: xorl 104(<m=int64#2),<in10=int64#5d
3110# asm 2: xorl 104(<m=%rsi),<in10=%r8d
3111xorl 104(%rsi),%r8d
3112
3113# qhasm: (uint32) in11 ^= *(uint32 *) (m + 108)
3114# asm 1: xorl 108(<m=int64#2),<in11=int64#6d
3115# asm 2: xorl 108(<m=%rsi),<in11=%r9d
3116xorl 108(%rsi),%r9d
3117
3118# qhasm: *(uint32 *) (out + 96) = in8
3119# asm 1: movl <in8=int64#3d,96(<out=int64#1)
3120# asm 2: movl <in8=%edx,96(<out=%rdi)
3121movl %edx,96(%rdi)
3122
3123# qhasm: *(uint32 *) (out + 100) = in9
3124# asm 1: movl <in9=int64#4d,100(<out=int64#1)
3125# asm 2: movl <in9=%ecx,100(<out=%rdi)
3126movl %ecx,100(%rdi)
3127
3128# qhasm: *(uint32 *) (out + 104) = in10
3129# asm 1: movl <in10=int64#5d,104(<out=int64#1)
3130# asm 2: movl <in10=%r8d,104(<out=%rdi)
3131movl %r8d,104(%rdi)
3132
3133# qhasm: *(uint32 *) (out + 108) = in11
3134# asm 1: movl <in11=int64#6d,108(<out=int64#1)
3135# asm 2: movl <in11=%r9d,108(<out=%rdi)
3136movl %r9d,108(%rdi)
3137
3138# qhasm: in8 = z8
3139# asm 1: movd <z8=int6464#16,>in8=int64#3
3140# asm 2: movd <z8=%xmm15,>in8=%rdx
3141movd %xmm15,%rdx
3142
3143# qhasm: in9 = z9
3144# asm 1: movd <z9=int6464#12,>in9=int64#4
3145# asm 2: movd <z9=%xmm11,>in9=%rcx
3146movd %xmm11,%rcx
3147
3148# qhasm: in10 = z10
3149# asm 1: movd <z10=int6464#2,>in10=int64#5
3150# asm 2: movd <z10=%xmm1,>in10=%r8
3151movd %xmm1,%r8
3152
3153# qhasm: in11 = z11
3154# asm 1: movd <z11=int6464#7,>in11=int64#6
3155# asm 2: movd <z11=%xmm6,>in11=%r9
3156movd %xmm6,%r9
3157
3158# qhasm: z8 <<<= 96
3159# asm 1: pshufd $0x39,<z8=int6464#16,<z8=int6464#16
3160# asm 2: pshufd $0x39,<z8=%xmm15,<z8=%xmm15
3161pshufd $0x39,%xmm15,%xmm15
3162
3163# qhasm: z9 <<<= 96
3164# asm 1: pshufd $0x39,<z9=int6464#12,<z9=int6464#12
3165# asm 2: pshufd $0x39,<z9=%xmm11,<z9=%xmm11
3166pshufd $0x39,%xmm11,%xmm11
3167
3168# qhasm: z10 <<<= 96
3169# asm 1: pshufd $0x39,<z10=int6464#2,<z10=int6464#2
3170# asm 2: pshufd $0x39,<z10=%xmm1,<z10=%xmm1
3171pshufd $0x39,%xmm1,%xmm1
3172
3173# qhasm: z11 <<<= 96
3174# asm 1: pshufd $0x39,<z11=int6464#7,<z11=int6464#7
3175# asm 2: pshufd $0x39,<z11=%xmm6,<z11=%xmm6
3176pshufd $0x39,%xmm6,%xmm6
3177
3178# qhasm: (uint32) in8 ^= *(uint32 *) (m + 160)
3179# asm 1: xorl 160(<m=int64#2),<in8=int64#3d
3180# asm 2: xorl 160(<m=%rsi),<in8=%edx
3181xorl 160(%rsi),%edx
3182
3183# qhasm: (uint32) in9 ^= *(uint32 *) (m + 164)
3184# asm 1: xorl 164(<m=int64#2),<in9=int64#4d
3185# asm 2: xorl 164(<m=%rsi),<in9=%ecx
3186xorl 164(%rsi),%ecx
3187
3188# qhasm: (uint32) in10 ^= *(uint32 *) (m + 168)
3189# asm 1: xorl 168(<m=int64#2),<in10=int64#5d
3190# asm 2: xorl 168(<m=%rsi),<in10=%r8d
3191xorl 168(%rsi),%r8d
3192
3193# qhasm: (uint32) in11 ^= *(uint32 *) (m + 172)
3194# asm 1: xorl 172(<m=int64#2),<in11=int64#6d
3195# asm 2: xorl 172(<m=%rsi),<in11=%r9d
3196xorl 172(%rsi),%r9d
3197
3198# qhasm: *(uint32 *) (out + 160) = in8
3199# asm 1: movl <in8=int64#3d,160(<out=int64#1)
3200# asm 2: movl <in8=%edx,160(<out=%rdi)
3201movl %edx,160(%rdi)
3202
3203# qhasm: *(uint32 *) (out + 164) = in9
3204# asm 1: movl <in9=int64#4d,164(<out=int64#1)
3205# asm 2: movl <in9=%ecx,164(<out=%rdi)
3206movl %ecx,164(%rdi)
3207
3208# qhasm: *(uint32 *) (out + 168) = in10
3209# asm 1: movl <in10=int64#5d,168(<out=int64#1)
3210# asm 2: movl <in10=%r8d,168(<out=%rdi)
3211movl %r8d,168(%rdi)
3212
3213# qhasm: *(uint32 *) (out + 172) = in11
3214# asm 1: movl <in11=int64#6d,172(<out=int64#1)
3215# asm 2: movl <in11=%r9d,172(<out=%rdi)
3216movl %r9d,172(%rdi)
3217
3218# qhasm: in8 = z8
3219# asm 1: movd <z8=int6464#16,>in8=int64#3
3220# asm 2: movd <z8=%xmm15,>in8=%rdx
3221movd %xmm15,%rdx
3222
3223# qhasm: in9 = z9
3224# asm 1: movd <z9=int6464#12,>in9=int64#4
3225# asm 2: movd <z9=%xmm11,>in9=%rcx
3226movd %xmm11,%rcx
3227
3228# qhasm: in10 = z10
3229# asm 1: movd <z10=int6464#2,>in10=int64#5
3230# asm 2: movd <z10=%xmm1,>in10=%r8
3231movd %xmm1,%r8
3232
3233# qhasm: in11 = z11
3234# asm 1: movd <z11=int6464#7,>in11=int64#6
3235# asm 2: movd <z11=%xmm6,>in11=%r9
3236movd %xmm6,%r9
3237
3238# qhasm: (uint32) in8 ^= *(uint32 *) (m + 224)
3239# asm 1: xorl 224(<m=int64#2),<in8=int64#3d
3240# asm 2: xorl 224(<m=%rsi),<in8=%edx
3241xorl 224(%rsi),%edx
3242
3243# qhasm: (uint32) in9 ^= *(uint32 *) (m + 228)
3244# asm 1: xorl 228(<m=int64#2),<in9=int64#4d
3245# asm 2: xorl 228(<m=%rsi),<in9=%ecx
3246xorl 228(%rsi),%ecx
3247
3248# qhasm: (uint32) in10 ^= *(uint32 *) (m + 232)
3249# asm 1: xorl 232(<m=int64#2),<in10=int64#5d
3250# asm 2: xorl 232(<m=%rsi),<in10=%r8d
3251xorl 232(%rsi),%r8d
3252
3253# qhasm: (uint32) in11 ^= *(uint32 *) (m + 236)
3254# asm 1: xorl 236(<m=int64#2),<in11=int64#6d
3255# asm 2: xorl 236(<m=%rsi),<in11=%r9d
3256xorl 236(%rsi),%r9d
3257
3258# qhasm: *(uint32 *) (out + 224) = in8
3259# asm 1: movl <in8=int64#3d,224(<out=int64#1)
3260# asm 2: movl <in8=%edx,224(<out=%rdi)
3261movl %edx,224(%rdi)
3262
3263# qhasm: *(uint32 *) (out + 228) = in9
3264# asm 1: movl <in9=int64#4d,228(<out=int64#1)
3265# asm 2: movl <in9=%ecx,228(<out=%rdi)
3266movl %ecx,228(%rdi)
3267
3268# qhasm: *(uint32 *) (out + 232) = in10
3269# asm 1: movl <in10=int64#5d,232(<out=int64#1)
3270# asm 2: movl <in10=%r8d,232(<out=%rdi)
3271movl %r8d,232(%rdi)
3272
3273# qhasm: *(uint32 *) (out + 236) = in11
3274# asm 1: movl <in11=int64#6d,236(<out=int64#1)
3275# asm 2: movl <in11=%r9d,236(<out=%rdi)
3276movl %r9d,236(%rdi)
3277
3278# qhasm: uint32323232 z12 += orig12
3279# asm 1: paddd <orig12=stack128#11,<z12=int6464#14
3280# asm 2: paddd <orig12=160(%rsp),<z12=%xmm13
3281paddd 160(%rsp),%xmm13
3282
3283# qhasm: uint32323232 z13 += orig13
3284# asm 1: paddd <orig13=stack128#14,<z13=int6464#10
3285# asm 2: paddd <orig13=208(%rsp),<z13=%xmm9
3286paddd 208(%rsp),%xmm9
3287
3288# qhasm: uint32323232 z14 += orig14
3289# asm 1: paddd <orig14=stack128#17,<z14=int6464#4
3290# asm 2: paddd <orig14=256(%rsp),<z14=%xmm3
3291paddd 256(%rsp),%xmm3
3292
3293# qhasm: uint32323232 z15 += orig15
3294# asm 1: paddd <orig15=stack128#7,<z15=int6464#3
3295# asm 2: paddd <orig15=96(%rsp),<z15=%xmm2
3296paddd 96(%rsp),%xmm2
3297
3298# qhasm: in12 = z12
3299# asm 1: movd <z12=int6464#14,>in12=int64#3
3300# asm 2: movd <z12=%xmm13,>in12=%rdx
3301movd %xmm13,%rdx
3302
3303# qhasm: in13 = z13
3304# asm 1: movd <z13=int6464#10,>in13=int64#4
3305# asm 2: movd <z13=%xmm9,>in13=%rcx
3306movd %xmm9,%rcx
3307
3308# qhasm: in14 = z14
3309# asm 1: movd <z14=int6464#4,>in14=int64#5
3310# asm 2: movd <z14=%xmm3,>in14=%r8
3311movd %xmm3,%r8
3312
3313# qhasm: in15 = z15
3314# asm 1: movd <z15=int6464#3,>in15=int64#6
3315# asm 2: movd <z15=%xmm2,>in15=%r9
3316movd %xmm2,%r9
3317
3318# qhasm: z12 <<<= 96
3319# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3320# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3321pshufd $0x39,%xmm13,%xmm13
3322
3323# qhasm: z13 <<<= 96
3324# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3325# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3326pshufd $0x39,%xmm9,%xmm9
3327
3328# qhasm: z14 <<<= 96
3329# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3330# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3331pshufd $0x39,%xmm3,%xmm3
3332
3333# qhasm: z15 <<<= 96
3334# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3335# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3336pshufd $0x39,%xmm2,%xmm2
3337
3338# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
3339# asm 1: xorl 48(<m=int64#2),<in12=int64#3d
3340# asm 2: xorl 48(<m=%rsi),<in12=%edx
3341xorl 48(%rsi),%edx
3342
3343# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
3344# asm 1: xorl 52(<m=int64#2),<in13=int64#4d
3345# asm 2: xorl 52(<m=%rsi),<in13=%ecx
3346xorl 52(%rsi),%ecx
3347
3348# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
3349# asm 1: xorl 56(<m=int64#2),<in14=int64#5d
3350# asm 2: xorl 56(<m=%rsi),<in14=%r8d
3351xorl 56(%rsi),%r8d
3352
3353# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
3354# asm 1: xorl 60(<m=int64#2),<in15=int64#6d
3355# asm 2: xorl 60(<m=%rsi),<in15=%r9d
3356xorl 60(%rsi),%r9d
3357
3358# qhasm: *(uint32 *) (out + 48) = in12
3359# asm 1: movl <in12=int64#3d,48(<out=int64#1)
3360# asm 2: movl <in12=%edx,48(<out=%rdi)
3361movl %edx,48(%rdi)
3362
3363# qhasm: *(uint32 *) (out + 52) = in13
3364# asm 1: movl <in13=int64#4d,52(<out=int64#1)
3365# asm 2: movl <in13=%ecx,52(<out=%rdi)
3366movl %ecx,52(%rdi)
3367
3368# qhasm: *(uint32 *) (out + 56) = in14
3369# asm 1: movl <in14=int64#5d,56(<out=int64#1)
3370# asm 2: movl <in14=%r8d,56(<out=%rdi)
3371movl %r8d,56(%rdi)
3372
3373# qhasm: *(uint32 *) (out + 60) = in15
3374# asm 1: movl <in15=int64#6d,60(<out=int64#1)
3375# asm 2: movl <in15=%r9d,60(<out=%rdi)
3376movl %r9d,60(%rdi)
3377
3378# qhasm: in12 = z12
3379# asm 1: movd <z12=int6464#14,>in12=int64#3
3380# asm 2: movd <z12=%xmm13,>in12=%rdx
3381movd %xmm13,%rdx
3382
3383# qhasm: in13 = z13
3384# asm 1: movd <z13=int6464#10,>in13=int64#4
3385# asm 2: movd <z13=%xmm9,>in13=%rcx
3386movd %xmm9,%rcx
3387
3388# qhasm: in14 = z14
3389# asm 1: movd <z14=int6464#4,>in14=int64#5
3390# asm 2: movd <z14=%xmm3,>in14=%r8
3391movd %xmm3,%r8
3392
3393# qhasm: in15 = z15
3394# asm 1: movd <z15=int6464#3,>in15=int64#6
3395# asm 2: movd <z15=%xmm2,>in15=%r9
3396movd %xmm2,%r9
3397
3398# qhasm: z12 <<<= 96
3399# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3400# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3401pshufd $0x39,%xmm13,%xmm13
3402
3403# qhasm: z13 <<<= 96
3404# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3405# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3406pshufd $0x39,%xmm9,%xmm9
3407
3408# qhasm: z14 <<<= 96
3409# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3410# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3411pshufd $0x39,%xmm3,%xmm3
3412
3413# qhasm: z15 <<<= 96
3414# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3415# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3416pshufd $0x39,%xmm2,%xmm2
3417
3418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 112)
3419# asm 1: xorl 112(<m=int64#2),<in12=int64#3d
3420# asm 2: xorl 112(<m=%rsi),<in12=%edx
3421xorl 112(%rsi),%edx
3422
3423# qhasm: (uint32) in13 ^= *(uint32 *) (m + 116)
3424# asm 1: xorl 116(<m=int64#2),<in13=int64#4d
3425# asm 2: xorl 116(<m=%rsi),<in13=%ecx
3426xorl 116(%rsi),%ecx
3427
3428# qhasm: (uint32) in14 ^= *(uint32 *) (m + 120)
3429# asm 1: xorl 120(<m=int64#2),<in14=int64#5d
3430# asm 2: xorl 120(<m=%rsi),<in14=%r8d
3431xorl 120(%rsi),%r8d
3432
3433# qhasm: (uint32) in15 ^= *(uint32 *) (m + 124)
3434# asm 1: xorl 124(<m=int64#2),<in15=int64#6d
3435# asm 2: xorl 124(<m=%rsi),<in15=%r9d
3436xorl 124(%rsi),%r9d
3437
3438# qhasm: *(uint32 *) (out + 112) = in12
3439# asm 1: movl <in12=int64#3d,112(<out=int64#1)
3440# asm 2: movl <in12=%edx,112(<out=%rdi)
3441movl %edx,112(%rdi)
3442
3443# qhasm: *(uint32 *) (out + 116) = in13
3444# asm 1: movl <in13=int64#4d,116(<out=int64#1)
3445# asm 2: movl <in13=%ecx,116(<out=%rdi)
3446movl %ecx,116(%rdi)
3447
3448# qhasm: *(uint32 *) (out + 120) = in14
3449# asm 1: movl <in14=int64#5d,120(<out=int64#1)
3450# asm 2: movl <in14=%r8d,120(<out=%rdi)
3451movl %r8d,120(%rdi)
3452
3453# qhasm: *(uint32 *) (out + 124) = in15
3454# asm 1: movl <in15=int64#6d,124(<out=int64#1)
3455# asm 2: movl <in15=%r9d,124(<out=%rdi)
3456movl %r9d,124(%rdi)
3457
3458# qhasm: in12 = z12
3459# asm 1: movd <z12=int6464#14,>in12=int64#3
3460# asm 2: movd <z12=%xmm13,>in12=%rdx
3461movd %xmm13,%rdx
3462
3463# qhasm: in13 = z13
3464# asm 1: movd <z13=int6464#10,>in13=int64#4
3465# asm 2: movd <z13=%xmm9,>in13=%rcx
3466movd %xmm9,%rcx
3467
3468# qhasm: in14 = z14
3469# asm 1: movd <z14=int6464#4,>in14=int64#5
3470# asm 2: movd <z14=%xmm3,>in14=%r8
3471movd %xmm3,%r8
3472
3473# qhasm: in15 = z15
3474# asm 1: movd <z15=int6464#3,>in15=int64#6
3475# asm 2: movd <z15=%xmm2,>in15=%r9
3476movd %xmm2,%r9
3477
3478# qhasm: z12 <<<= 96
3479# asm 1: pshufd $0x39,<z12=int6464#14,<z12=int6464#14
3480# asm 2: pshufd $0x39,<z12=%xmm13,<z12=%xmm13
3481pshufd $0x39,%xmm13,%xmm13
3482
3483# qhasm: z13 <<<= 96
3484# asm 1: pshufd $0x39,<z13=int6464#10,<z13=int6464#10
3485# asm 2: pshufd $0x39,<z13=%xmm9,<z13=%xmm9
3486pshufd $0x39,%xmm9,%xmm9
3487
3488# qhasm: z14 <<<= 96
3489# asm 1: pshufd $0x39,<z14=int6464#4,<z14=int6464#4
3490# asm 2: pshufd $0x39,<z14=%xmm3,<z14=%xmm3
3491pshufd $0x39,%xmm3,%xmm3
3492
3493# qhasm: z15 <<<= 96
3494# asm 1: pshufd $0x39,<z15=int6464#3,<z15=int6464#3
3495# asm 2: pshufd $0x39,<z15=%xmm2,<z15=%xmm2
3496pshufd $0x39,%xmm2,%xmm2
3497
3498# qhasm: (uint32) in12 ^= *(uint32 *) (m + 176)
3499# asm 1: xorl 176(<m=int64#2),<in12=int64#3d
3500# asm 2: xorl 176(<m=%rsi),<in12=%edx
3501xorl 176(%rsi),%edx
3502
3503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 180)
3504# asm 1: xorl 180(<m=int64#2),<in13=int64#4d
3505# asm 2: xorl 180(<m=%rsi),<in13=%ecx
3506xorl 180(%rsi),%ecx
3507
3508# qhasm: (uint32) in14 ^= *(uint32 *) (m + 184)
3509# asm 1: xorl 184(<m=int64#2),<in14=int64#5d
3510# asm 2: xorl 184(<m=%rsi),<in14=%r8d
3511xorl 184(%rsi),%r8d
3512
3513# qhasm: (uint32) in15 ^= *(uint32 *) (m + 188)
3514# asm 1: xorl 188(<m=int64#2),<in15=int64#6d
3515# asm 2: xorl 188(<m=%rsi),<in15=%r9d
3516xorl 188(%rsi),%r9d
3517
3518# qhasm: *(uint32 *) (out + 176) = in12
3519# asm 1: movl <in12=int64#3d,176(<out=int64#1)
3520# asm 2: movl <in12=%edx,176(<out=%rdi)
3521movl %edx,176(%rdi)
3522
3523# qhasm: *(uint32 *) (out + 180) = in13
3524# asm 1: movl <in13=int64#4d,180(<out=int64#1)
3525# asm 2: movl <in13=%ecx,180(<out=%rdi)
3526movl %ecx,180(%rdi)
3527
3528# qhasm: *(uint32 *) (out + 184) = in14
3529# asm 1: movl <in14=int64#5d,184(<out=int64#1)
3530# asm 2: movl <in14=%r8d,184(<out=%rdi)
3531movl %r8d,184(%rdi)
3532
3533# qhasm: *(uint32 *) (out + 188) = in15
3534# asm 1: movl <in15=int64#6d,188(<out=int64#1)
3535# asm 2: movl <in15=%r9d,188(<out=%rdi)
3536movl %r9d,188(%rdi)
3537
3538# qhasm: in12 = z12
3539# asm 1: movd <z12=int6464#14,>in12=int64#3
3540# asm 2: movd <z12=%xmm13,>in12=%rdx
3541movd %xmm13,%rdx
3542
3543# qhasm: in13 = z13
3544# asm 1: movd <z13=int6464#10,>in13=int64#4
3545# asm 2: movd <z13=%xmm9,>in13=%rcx
3546movd %xmm9,%rcx
3547
3548# qhasm: in14 = z14
3549# asm 1: movd <z14=int6464#4,>in14=int64#5
3550# asm 2: movd <z14=%xmm3,>in14=%r8
3551movd %xmm3,%r8
3552
3553# qhasm: in15 = z15
3554# asm 1: movd <z15=int6464#3,>in15=int64#6
3555# asm 2: movd <z15=%xmm2,>in15=%r9
3556movd %xmm2,%r9
3557
3558# qhasm: (uint32) in12 ^= *(uint32 *) (m + 240)
3559# asm 1: xorl 240(<m=int64#2),<in12=int64#3d
3560# asm 2: xorl 240(<m=%rsi),<in12=%edx
3561xorl 240(%rsi),%edx
3562
3563# qhasm: (uint32) in13 ^= *(uint32 *) (m + 244)
3564# asm 1: xorl 244(<m=int64#2),<in13=int64#4d
3565# asm 2: xorl 244(<m=%rsi),<in13=%ecx
3566xorl 244(%rsi),%ecx
3567
3568# qhasm: (uint32) in14 ^= *(uint32 *) (m + 248)
3569# asm 1: xorl 248(<m=int64#2),<in14=int64#5d
3570# asm 2: xorl 248(<m=%rsi),<in14=%r8d
3571xorl 248(%rsi),%r8d
3572
3573# qhasm: (uint32) in15 ^= *(uint32 *) (m + 252)
3574# asm 1: xorl 252(<m=int64#2),<in15=int64#6d
3575# asm 2: xorl 252(<m=%rsi),<in15=%r9d
3576xorl 252(%rsi),%r9d
3577
3578# qhasm: *(uint32 *) (out + 240) = in12
3579# asm 1: movl <in12=int64#3d,240(<out=int64#1)
3580# asm 2: movl <in12=%edx,240(<out=%rdi)
3581movl %edx,240(%rdi)
3582
3583# qhasm: *(uint32 *) (out + 244) = in13
3584# asm 1: movl <in13=int64#4d,244(<out=int64#1)
3585# asm 2: movl <in13=%ecx,244(<out=%rdi)
3586movl %ecx,244(%rdi)
3587
3588# qhasm: *(uint32 *) (out + 248) = in14
3589# asm 1: movl <in14=int64#5d,248(<out=int64#1)
3590# asm 2: movl <in14=%r8d,248(<out=%rdi)
3591movl %r8d,248(%rdi)
3592
3593# qhasm: *(uint32 *) (out + 252) = in15
3594# asm 1: movl <in15=int64#6d,252(<out=int64#1)
3595# asm 2: movl <in15=%r9d,252(<out=%rdi)
3596movl %r9d,252(%rdi)
3597
3598# qhasm: bytes = bytes_backup
3599# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
3600# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
3601movq 408(%rsp),%r9
3602
3603# qhasm: bytes -= 256
3604# asm 1: sub $256,<bytes=int64#6
3605# asm 2: sub $256,<bytes=%r9
3606sub $256,%r9
3607
3608# qhasm: m += 256
3609# asm 1: add $256,<m=int64#2
3610# asm 2: add $256,<m=%rsi
3611add $256,%rsi
3612
3613# qhasm: out += 256
3614# asm 1: add $256,<out=int64#1
3615# asm 2: add $256,<out=%rdi
3616add $256,%rdi
3617
3618# qhasm: unsigned<? bytes - 256
3619# asm 1: cmp $256,<bytes=int64#6
3620# asm 2: cmp $256,<bytes=%r9
3621cmp $256,%r9
3622# comment:fp stack unchanged by jump
3623
3624# qhasm: goto bytesatleast256 if !unsigned<
3625jae ._bytesatleast256
3626
3627# qhasm: unsigned>? bytes - 0
3628# asm 1: cmp $0,<bytes=int64#6
3629# asm 2: cmp $0,<bytes=%r9
3630cmp $0,%r9
3631# comment:fp stack unchanged by jump
3632
3633# qhasm: goto done if !unsigned>
3634jbe ._done
3635# comment:fp stack unchanged by fallthrough
3636
3637# qhasm: bytesbetween1and255:
3638._bytesbetween1and255:
3639
3640# qhasm: unsigned<? bytes - 64
3641# asm 1: cmp $64,<bytes=int64#6
3642# asm 2: cmp $64,<bytes=%r9
3643cmp $64,%r9
3644# comment:fp stack unchanged by jump
3645
3646# qhasm: goto nocopy if !unsigned<
3647jae ._nocopy
3648
3649# qhasm: ctarget = out
3650# asm 1: mov <out=int64#1,>ctarget=int64#3
3651# asm 2: mov <out=%rdi,>ctarget=%rdx
3652mov %rdi,%rdx
3653
3654# qhasm: out = &tmp
3655# asm 1: leaq <tmp=stack512#1,>out=int64#1
3656# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3657leaq 416(%rsp),%rdi
3658
3659# qhasm: i = bytes
3660# asm 1: mov <bytes=int64#6,>i=int64#4
3661# asm 2: mov <bytes=%r9,>i=%rcx
3662mov %r9,%rcx
3663
3664# qhasm: while (i) { *out++ = *m++; --i }
3665rep movsb
3666
3667# qhasm: out = &tmp
3668# asm 1: leaq <tmp=stack512#1,>out=int64#1
3669# asm 2: leaq <tmp=416(%rsp),>out=%rdi
3670leaq 416(%rsp),%rdi
3671
3672# qhasm: m = &tmp
3673# asm 1: leaq <tmp=stack512#1,>m=int64#2
3674# asm 2: leaq <tmp=416(%rsp),>m=%rsi
3675leaq 416(%rsp),%rsi
3676# comment:fp stack unchanged by fallthrough
3677
3678# qhasm: nocopy:
3679._nocopy:
3680
3681# qhasm: bytes_backup = bytes
3682# asm 1: movq <bytes=int64#6,>bytes_backup=stack64#8
3683# asm 2: movq <bytes=%r9,>bytes_backup=408(%rsp)
3684movq %r9,408(%rsp)
3685
3686# qhasm: diag0 = x0
3687# asm 1: movdqa <x0=stack128#4,>diag0=int6464#1
3688# asm 2: movdqa <x0=48(%rsp),>diag0=%xmm0
3689movdqa 48(%rsp),%xmm0
3690
3691# qhasm: diag1 = x1
3692# asm 1: movdqa <x1=stack128#1,>diag1=int6464#2
3693# asm 2: movdqa <x1=0(%rsp),>diag1=%xmm1
3694movdqa 0(%rsp),%xmm1
3695
3696# qhasm: diag2 = x2
3697# asm 1: movdqa <x2=stack128#2,>diag2=int6464#3
3698# asm 2: movdqa <x2=16(%rsp),>diag2=%xmm2
3699movdqa 16(%rsp),%xmm2
3700
3701# qhasm: diag3 = x3
3702# asm 1: movdqa <x3=stack128#3,>diag3=int6464#4
3703# asm 2: movdqa <x3=32(%rsp),>diag3=%xmm3
3704movdqa 32(%rsp),%xmm3
3705
3706# qhasm: a0 = diag1
3707# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3708# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3709movdqa %xmm1,%xmm4
3710
3711# qhasm: i = 12
3712# asm 1: mov $12,>i=int64#4
3713# asm 2: mov $12,>i=%rcx
3714mov $12,%rcx
3715
3716# qhasm: mainloop2:
3717._mainloop2:
3718
3719# qhasm: uint32323232 a0 += diag0
3720# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
3721# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
3722paddd %xmm0,%xmm4
3723
3724# qhasm: a1 = diag0
3725# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
3726# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
3727movdqa %xmm0,%xmm5
3728
3729# qhasm: b0 = a0
3730# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
3731# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
3732movdqa %xmm4,%xmm6
3733
3734# qhasm: uint32323232 a0 <<= 7
3735# asm 1: pslld $7,<a0=int6464#5
3736# asm 2: pslld $7,<a0=%xmm4
3737pslld $7,%xmm4
3738
3739# qhasm: uint32323232 b0 >>= 25
3740# asm 1: psrld $25,<b0=int6464#7
3741# asm 2: psrld $25,<b0=%xmm6
3742psrld $25,%xmm6
3743
3744# qhasm: diag3 ^= a0
3745# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
3746# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
3747pxor %xmm4,%xmm3
3748
3749# qhasm: diag3 ^= b0
3750# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
3751# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
3752pxor %xmm6,%xmm3
3753
3754# qhasm: uint32323232 a1 += diag3
3755# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
3756# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
3757paddd %xmm3,%xmm5
3758
3759# qhasm: a2 = diag3
3760# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
3761# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
3762movdqa %xmm3,%xmm4
3763
3764# qhasm: b1 = a1
3765# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
3766# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
3767movdqa %xmm5,%xmm6
3768
3769# qhasm: uint32323232 a1 <<= 9
3770# asm 1: pslld $9,<a1=int6464#6
3771# asm 2: pslld $9,<a1=%xmm5
3772pslld $9,%xmm5
3773
3774# qhasm: uint32323232 b1 >>= 23
3775# asm 1: psrld $23,<b1=int6464#7
3776# asm 2: psrld $23,<b1=%xmm6
3777psrld $23,%xmm6
3778
3779# qhasm: diag2 ^= a1
3780# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
3781# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
3782pxor %xmm5,%xmm2
3783
3784# qhasm: diag3 <<<= 32
3785# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
3786# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
3787pshufd $0x93,%xmm3,%xmm3
3788
3789# qhasm: diag2 ^= b1
3790# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
3791# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
3792pxor %xmm6,%xmm2
3793
3794# qhasm: uint32323232 a2 += diag2
3795# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
3796# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
3797paddd %xmm2,%xmm4
3798
3799# qhasm: a3 = diag2
3800# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
3801# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
3802movdqa %xmm2,%xmm5
3803
3804# qhasm: b2 = a2
3805# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
3806# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
3807movdqa %xmm4,%xmm6
3808
3809# qhasm: uint32323232 a2 <<= 13
3810# asm 1: pslld $13,<a2=int6464#5
3811# asm 2: pslld $13,<a2=%xmm4
3812pslld $13,%xmm4
3813
3814# qhasm: uint32323232 b2 >>= 19
3815# asm 1: psrld $19,<b2=int6464#7
3816# asm 2: psrld $19,<b2=%xmm6
3817psrld $19,%xmm6
3818
3819# qhasm: diag1 ^= a2
3820# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
3821# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
3822pxor %xmm4,%xmm1
3823
3824# qhasm: diag2 <<<= 64
3825# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3826# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3827pshufd $0x4e,%xmm2,%xmm2
3828
3829# qhasm: diag1 ^= b2
3830# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
3831# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
3832pxor %xmm6,%xmm1
3833
3834# qhasm: uint32323232 a3 += diag1
3835# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
3836# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
3837paddd %xmm1,%xmm5
3838
3839# qhasm: a4 = diag3
3840# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
3841# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
3842movdqa %xmm3,%xmm4
3843
3844# qhasm: b3 = a3
3845# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
3846# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
3847movdqa %xmm5,%xmm6
3848
3849# qhasm: uint32323232 a3 <<= 18
3850# asm 1: pslld $18,<a3=int6464#6
3851# asm 2: pslld $18,<a3=%xmm5
3852pslld $18,%xmm5
3853
3854# qhasm: uint32323232 b3 >>= 14
3855# asm 1: psrld $14,<b3=int6464#7
3856# asm 2: psrld $14,<b3=%xmm6
3857psrld $14,%xmm6
3858
3859# qhasm: diag0 ^= a3
3860# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
3861# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
3862pxor %xmm5,%xmm0
3863
3864# qhasm: diag1 <<<= 96
3865# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
3866# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
3867pshufd $0x39,%xmm1,%xmm1
3868
3869# qhasm: diag0 ^= b3
3870# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
3871# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
3872pxor %xmm6,%xmm0
3873
3874# qhasm: uint32323232 a4 += diag0
3875# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
3876# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
3877paddd %xmm0,%xmm4
3878
3879# qhasm: a5 = diag0
3880# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
3881# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
3882movdqa %xmm0,%xmm5
3883
3884# qhasm: b4 = a4
3885# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
3886# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
3887movdqa %xmm4,%xmm6
3888
3889# qhasm: uint32323232 a4 <<= 7
3890# asm 1: pslld $7,<a4=int6464#5
3891# asm 2: pslld $7,<a4=%xmm4
3892pslld $7,%xmm4
3893
3894# qhasm: uint32323232 b4 >>= 25
3895# asm 1: psrld $25,<b4=int6464#7
3896# asm 2: psrld $25,<b4=%xmm6
3897psrld $25,%xmm6
3898
3899# qhasm: diag1 ^= a4
3900# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
3901# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
3902pxor %xmm4,%xmm1
3903
3904# qhasm: diag1 ^= b4
3905# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
3906# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
3907pxor %xmm6,%xmm1
3908
3909# qhasm: uint32323232 a5 += diag1
3910# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
3911# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
3912paddd %xmm1,%xmm5
3913
3914# qhasm: a6 = diag1
3915# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
3916# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
3917movdqa %xmm1,%xmm4
3918
3919# qhasm: b5 = a5
3920# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
3921# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
3922movdqa %xmm5,%xmm6
3923
3924# qhasm: uint32323232 a5 <<= 9
3925# asm 1: pslld $9,<a5=int6464#6
3926# asm 2: pslld $9,<a5=%xmm5
3927pslld $9,%xmm5
3928
3929# qhasm: uint32323232 b5 >>= 23
3930# asm 1: psrld $23,<b5=int6464#7
3931# asm 2: psrld $23,<b5=%xmm6
3932psrld $23,%xmm6
3933
3934# qhasm: diag2 ^= a5
3935# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
3936# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
3937pxor %xmm5,%xmm2
3938
3939# qhasm: diag1 <<<= 32
3940# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
3941# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
3942pshufd $0x93,%xmm1,%xmm1
3943
3944# qhasm: diag2 ^= b5
3945# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
3946# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
3947pxor %xmm6,%xmm2
3948
3949# qhasm: uint32323232 a6 += diag2
3950# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
3951# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
3952paddd %xmm2,%xmm4
3953
3954# qhasm: a7 = diag2
3955# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
3956# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
3957movdqa %xmm2,%xmm5
3958
3959# qhasm: b6 = a6
3960# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
3961# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
3962movdqa %xmm4,%xmm6
3963
3964# qhasm: uint32323232 a6 <<= 13
3965# asm 1: pslld $13,<a6=int6464#5
3966# asm 2: pslld $13,<a6=%xmm4
3967pslld $13,%xmm4
3968
3969# qhasm: uint32323232 b6 >>= 19
3970# asm 1: psrld $19,<b6=int6464#7
3971# asm 2: psrld $19,<b6=%xmm6
3972psrld $19,%xmm6
3973
3974# qhasm: diag3 ^= a6
3975# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
3976# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
3977pxor %xmm4,%xmm3
3978
3979# qhasm: diag2 <<<= 64
3980# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
3981# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
3982pshufd $0x4e,%xmm2,%xmm2
3983
3984# qhasm: diag3 ^= b6
3985# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
3986# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
3987pxor %xmm6,%xmm3
3988
3989# qhasm: uint32323232 a7 += diag3
3990# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
3991# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
3992paddd %xmm3,%xmm5
3993
3994# qhasm: a0 = diag1
3995# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3996# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3997movdqa %xmm1,%xmm4
3998
3999# qhasm: b7 = a7
4000# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4001# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4002movdqa %xmm5,%xmm6
4003
4004# qhasm: uint32323232 a7 <<= 18
4005# asm 1: pslld $18,<a7=int6464#6
4006# asm 2: pslld $18,<a7=%xmm5
4007pslld $18,%xmm5
4008
4009# qhasm: uint32323232 b7 >>= 14
4010# asm 1: psrld $14,<b7=int6464#7
4011# asm 2: psrld $14,<b7=%xmm6
4012psrld $14,%xmm6
4013
4014# qhasm: diag0 ^= a7
4015# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4016# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4017pxor %xmm5,%xmm0
4018
4019# qhasm: diag3 <<<= 96
4020# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4021# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4022pshufd $0x39,%xmm3,%xmm3
4023
4024# qhasm: diag0 ^= b7
4025# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4026# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4027pxor %xmm6,%xmm0
4028
4029# qhasm: uint32323232 a0 += diag0
4030# asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4031# asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4032paddd %xmm0,%xmm4
4033
4034# qhasm: a1 = diag0
4035# asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4036# asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4037movdqa %xmm0,%xmm5
4038
4039# qhasm: b0 = a0
4040# asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4041# asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4042movdqa %xmm4,%xmm6
4043
4044# qhasm: uint32323232 a0 <<= 7
4045# asm 1: pslld $7,<a0=int6464#5
4046# asm 2: pslld $7,<a0=%xmm4
4047pslld $7,%xmm4
4048
4049# qhasm: uint32323232 b0 >>= 25
4050# asm 1: psrld $25,<b0=int6464#7
4051# asm 2: psrld $25,<b0=%xmm6
4052psrld $25,%xmm6
4053
4054# qhasm: diag3 ^= a0
4055# asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4056# asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4057pxor %xmm4,%xmm3
4058
4059# qhasm: diag3 ^= b0
4060# asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4061# asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4062pxor %xmm6,%xmm3
4063
4064# qhasm: uint32323232 a1 += diag3
4065# asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4066# asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4067paddd %xmm3,%xmm5
4068
4069# qhasm: a2 = diag3
4070# asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4071# asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4072movdqa %xmm3,%xmm4
4073
4074# qhasm: b1 = a1
4075# asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4076# asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4077movdqa %xmm5,%xmm6
4078
4079# qhasm: uint32323232 a1 <<= 9
4080# asm 1: pslld $9,<a1=int6464#6
4081# asm 2: pslld $9,<a1=%xmm5
4082pslld $9,%xmm5
4083
4084# qhasm: uint32323232 b1 >>= 23
4085# asm 1: psrld $23,<b1=int6464#7
4086# asm 2: psrld $23,<b1=%xmm6
4087psrld $23,%xmm6
4088
4089# qhasm: diag2 ^= a1
4090# asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4091# asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4092pxor %xmm5,%xmm2
4093
4094# qhasm: diag3 <<<= 32
4095# asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4096# asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4097pshufd $0x93,%xmm3,%xmm3
4098
4099# qhasm: diag2 ^= b1
4100# asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4101# asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4102pxor %xmm6,%xmm2
4103
4104# qhasm: uint32323232 a2 += diag2
4105# asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4106# asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4107paddd %xmm2,%xmm4
4108
4109# qhasm: a3 = diag2
4110# asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4111# asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4112movdqa %xmm2,%xmm5
4113
4114# qhasm: b2 = a2
4115# asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4116# asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4117movdqa %xmm4,%xmm6
4118
4119# qhasm: uint32323232 a2 <<= 13
4120# asm 1: pslld $13,<a2=int6464#5
4121# asm 2: pslld $13,<a2=%xmm4
4122pslld $13,%xmm4
4123
4124# qhasm: uint32323232 b2 >>= 19
4125# asm 1: psrld $19,<b2=int6464#7
4126# asm 2: psrld $19,<b2=%xmm6
4127psrld $19,%xmm6
4128
4129# qhasm: diag1 ^= a2
4130# asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4131# asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4132pxor %xmm4,%xmm1
4133
4134# qhasm: diag2 <<<= 64
4135# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4136# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4137pshufd $0x4e,%xmm2,%xmm2
4138
4139# qhasm: diag1 ^= b2
4140# asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4141# asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4142pxor %xmm6,%xmm1
4143
4144# qhasm: uint32323232 a3 += diag1
4145# asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4146# asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4147paddd %xmm1,%xmm5
4148
4149# qhasm: a4 = diag3
4150# asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4151# asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4152movdqa %xmm3,%xmm4
4153
4154# qhasm: b3 = a3
4155# asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4156# asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4157movdqa %xmm5,%xmm6
4158
4159# qhasm: uint32323232 a3 <<= 18
4160# asm 1: pslld $18,<a3=int6464#6
4161# asm 2: pslld $18,<a3=%xmm5
4162pslld $18,%xmm5
4163
4164# qhasm: uint32323232 b3 >>= 14
4165# asm 1: psrld $14,<b3=int6464#7
4166# asm 2: psrld $14,<b3=%xmm6
4167psrld $14,%xmm6
4168
4169# qhasm: diag0 ^= a3
4170# asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4171# asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4172pxor %xmm5,%xmm0
4173
4174# qhasm: diag1 <<<= 96
4175# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4176# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4177pshufd $0x39,%xmm1,%xmm1
4178
4179# qhasm: diag0 ^= b3
4180# asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4181# asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4182pxor %xmm6,%xmm0
4183
4184# qhasm: uint32323232 a4 += diag0
4185# asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4186# asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4187paddd %xmm0,%xmm4
4188
4189# qhasm: a5 = diag0
4190# asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4191# asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4192movdqa %xmm0,%xmm5
4193
4194# qhasm: b4 = a4
4195# asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4196# asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4197movdqa %xmm4,%xmm6
4198
4199# qhasm: uint32323232 a4 <<= 7
4200# asm 1: pslld $7,<a4=int6464#5
4201# asm 2: pslld $7,<a4=%xmm4
4202pslld $7,%xmm4
4203
4204# qhasm: uint32323232 b4 >>= 25
4205# asm 1: psrld $25,<b4=int6464#7
4206# asm 2: psrld $25,<b4=%xmm6
4207psrld $25,%xmm6
4208
4209# qhasm: diag1 ^= a4
4210# asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4211# asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4212pxor %xmm4,%xmm1
4213
4214# qhasm: diag1 ^= b4
4215# asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4216# asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4217pxor %xmm6,%xmm1
4218
4219# qhasm: uint32323232 a5 += diag1
4220# asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4221# asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4222paddd %xmm1,%xmm5
4223
4224# qhasm: a6 = diag1
4225# asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4226# asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4227movdqa %xmm1,%xmm4
4228
4229# qhasm: b5 = a5
4230# asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4231# asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4232movdqa %xmm5,%xmm6
4233
4234# qhasm: uint32323232 a5 <<= 9
4235# asm 1: pslld $9,<a5=int6464#6
4236# asm 2: pslld $9,<a5=%xmm5
4237pslld $9,%xmm5
4238
4239# qhasm: uint32323232 b5 >>= 23
4240# asm 1: psrld $23,<b5=int6464#7
4241# asm 2: psrld $23,<b5=%xmm6
4242psrld $23,%xmm6
4243
4244# qhasm: diag2 ^= a5
4245# asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4246# asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4247pxor %xmm5,%xmm2
4248
4249# qhasm: diag1 <<<= 32
4250# asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4251# asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4252pshufd $0x93,%xmm1,%xmm1
4253
4254# qhasm: diag2 ^= b5
4255# asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4256# asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4257pxor %xmm6,%xmm2
4258
4259# qhasm: uint32323232 a6 += diag2
4260# asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4261# asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4262paddd %xmm2,%xmm4
4263
4264# qhasm: a7 = diag2
4265# asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4266# asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4267movdqa %xmm2,%xmm5
4268
4269# qhasm: b6 = a6
4270# asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4271# asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4272movdqa %xmm4,%xmm6
4273
4274# qhasm: uint32323232 a6 <<= 13
4275# asm 1: pslld $13,<a6=int6464#5
4276# asm 2: pslld $13,<a6=%xmm4
4277pslld $13,%xmm4
4278
4279# qhasm: uint32323232 b6 >>= 19
4280# asm 1: psrld $19,<b6=int6464#7
4281# asm 2: psrld $19,<b6=%xmm6
4282psrld $19,%xmm6
4283
4284# qhasm: diag3 ^= a6
4285# asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4286# asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4287pxor %xmm4,%xmm3
4288
4289# qhasm: diag2 <<<= 64
4290# asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4291# asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4292pshufd $0x4e,%xmm2,%xmm2
4293
4294# qhasm: diag3 ^= b6
4295# asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4296# asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4297pxor %xmm6,%xmm3
4298
4299# qhasm: unsigned>? i -= 4
4300# asm 1: sub $4,<i=int64#4
4301# asm 2: sub $4,<i=%rcx
4302sub $4,%rcx
4303
4304# qhasm: uint32323232 a7 += diag3
4305# asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4306# asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4307paddd %xmm3,%xmm5
4308
4309# qhasm: a0 = diag1
4310# asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4311# asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4312movdqa %xmm1,%xmm4
4313
4314# qhasm: b7 = a7
4315# asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4316# asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4317movdqa %xmm5,%xmm6
4318
4319# qhasm: uint32323232 a7 <<= 18
4320# asm 1: pslld $18,<a7=int6464#6
4321# asm 2: pslld $18,<a7=%xmm5
4322pslld $18,%xmm5
4323
4324# qhasm: b0 = 0
4325# asm 1: pxor >b0=int6464#8,>b0=int6464#8
4326# asm 2: pxor >b0=%xmm7,>b0=%xmm7
4327pxor %xmm7,%xmm7
4328
4329# qhasm: uint32323232 b7 >>= 14
4330# asm 1: psrld $14,<b7=int6464#7
4331# asm 2: psrld $14,<b7=%xmm6
4332psrld $14,%xmm6
4333
4334# qhasm: diag0 ^= a7
4335# asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4336# asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4337pxor %xmm5,%xmm0
4338
4339# qhasm: diag3 <<<= 96
4340# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4341# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4342pshufd $0x39,%xmm3,%xmm3
4343
4344# qhasm: diag0 ^= b7
4345# asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4346# asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4347pxor %xmm6,%xmm0
4348# comment:fp stack unchanged by jump
4349
4350# qhasm: goto mainloop2 if unsigned>
4351ja ._mainloop2
4352
4353# qhasm: uint32323232 diag0 += x0
4354# asm 1: paddd <x0=stack128#4,<diag0=int6464#1
4355# asm 2: paddd <x0=48(%rsp),<diag0=%xmm0
4356paddd 48(%rsp),%xmm0
4357
4358# qhasm: uint32323232 diag1 += x1
4359# asm 1: paddd <x1=stack128#1,<diag1=int6464#2
4360# asm 2: paddd <x1=0(%rsp),<diag1=%xmm1
4361paddd 0(%rsp),%xmm1
4362
4363# qhasm: uint32323232 diag2 += x2
4364# asm 1: paddd <x2=stack128#2,<diag2=int6464#3
4365# asm 2: paddd <x2=16(%rsp),<diag2=%xmm2
4366paddd 16(%rsp),%xmm2
4367
4368# qhasm: uint32323232 diag3 += x3
4369# asm 1: paddd <x3=stack128#3,<diag3=int6464#4
4370# asm 2: paddd <x3=32(%rsp),<diag3=%xmm3
4371paddd 32(%rsp),%xmm3
4372
4373# qhasm: in0 = diag0
4374# asm 1: movd <diag0=int6464#1,>in0=int64#4
4375# asm 2: movd <diag0=%xmm0,>in0=%rcx
4376movd %xmm0,%rcx
4377
4378# qhasm: in12 = diag1
4379# asm 1: movd <diag1=int6464#2,>in12=int64#5
4380# asm 2: movd <diag1=%xmm1,>in12=%r8
4381movd %xmm1,%r8
4382
4383# qhasm: in8 = diag2
4384# asm 1: movd <diag2=int6464#3,>in8=int64#6
4385# asm 2: movd <diag2=%xmm2,>in8=%r9
4386movd %xmm2,%r9
4387
4388# qhasm: in4 = diag3
4389# asm 1: movd <diag3=int6464#4,>in4=int64#7
4390# asm 2: movd <diag3=%xmm3,>in4=%rax
4391movd %xmm3,%rax
4392
4393# qhasm: diag0 <<<= 96
4394# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4395# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4396pshufd $0x39,%xmm0,%xmm0
4397
4398# qhasm: diag1 <<<= 96
4399# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4400# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4401pshufd $0x39,%xmm1,%xmm1
4402
4403# qhasm: diag2 <<<= 96
4404# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4405# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4406pshufd $0x39,%xmm2,%xmm2
4407
4408# qhasm: diag3 <<<= 96
4409# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4410# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4411pshufd $0x39,%xmm3,%xmm3
4412
4413# qhasm: (uint32) in0 ^= *(uint32 *) (m + 0)
4414# asm 1: xorl 0(<m=int64#2),<in0=int64#4d
4415# asm 2: xorl 0(<m=%rsi),<in0=%ecx
4416xorl 0(%rsi),%ecx
4417
4418# qhasm: (uint32) in12 ^= *(uint32 *) (m + 48)
4419# asm 1: xorl 48(<m=int64#2),<in12=int64#5d
4420# asm 2: xorl 48(<m=%rsi),<in12=%r8d
4421xorl 48(%rsi),%r8d
4422
4423# qhasm: (uint32) in8 ^= *(uint32 *) (m + 32)
4424# asm 1: xorl 32(<m=int64#2),<in8=int64#6d
4425# asm 2: xorl 32(<m=%rsi),<in8=%r9d
4426xorl 32(%rsi),%r9d
4427
4428# qhasm: (uint32) in4 ^= *(uint32 *) (m + 16)
4429# asm 1: xorl 16(<m=int64#2),<in4=int64#7d
4430# asm 2: xorl 16(<m=%rsi),<in4=%eax
4431xorl 16(%rsi),%eax
4432
4433# qhasm: *(uint32 *) (out + 0) = in0
4434# asm 1: movl <in0=int64#4d,0(<out=int64#1)
4435# asm 2: movl <in0=%ecx,0(<out=%rdi)
4436movl %ecx,0(%rdi)
4437
4438# qhasm: *(uint32 *) (out + 48) = in12
4439# asm 1: movl <in12=int64#5d,48(<out=int64#1)
4440# asm 2: movl <in12=%r8d,48(<out=%rdi)
4441movl %r8d,48(%rdi)
4442
4443# qhasm: *(uint32 *) (out + 32) = in8
4444# asm 1: movl <in8=int64#6d,32(<out=int64#1)
4445# asm 2: movl <in8=%r9d,32(<out=%rdi)
4446movl %r9d,32(%rdi)
4447
4448# qhasm: *(uint32 *) (out + 16) = in4
4449# asm 1: movl <in4=int64#7d,16(<out=int64#1)
4450# asm 2: movl <in4=%eax,16(<out=%rdi)
4451movl %eax,16(%rdi)
4452
4453# qhasm: in5 = diag0
4454# asm 1: movd <diag0=int6464#1,>in5=int64#4
4455# asm 2: movd <diag0=%xmm0,>in5=%rcx
4456movd %xmm0,%rcx
4457
4458# qhasm: in1 = diag1
4459# asm 1: movd <diag1=int6464#2,>in1=int64#5
4460# asm 2: movd <diag1=%xmm1,>in1=%r8
4461movd %xmm1,%r8
4462
4463# qhasm: in13 = diag2
4464# asm 1: movd <diag2=int6464#3,>in13=int64#6
4465# asm 2: movd <diag2=%xmm2,>in13=%r9
4466movd %xmm2,%r9
4467
4468# qhasm: in9 = diag3
4469# asm 1: movd <diag3=int6464#4,>in9=int64#7
4470# asm 2: movd <diag3=%xmm3,>in9=%rax
4471movd %xmm3,%rax
4472
4473# qhasm: diag0 <<<= 96
4474# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4475# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4476pshufd $0x39,%xmm0,%xmm0
4477
4478# qhasm: diag1 <<<= 96
4479# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4480# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4481pshufd $0x39,%xmm1,%xmm1
4482
4483# qhasm: diag2 <<<= 96
4484# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4485# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4486pshufd $0x39,%xmm2,%xmm2
4487
4488# qhasm: diag3 <<<= 96
4489# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4490# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4491pshufd $0x39,%xmm3,%xmm3
4492
4493# qhasm: (uint32) in5 ^= *(uint32 *) (m + 20)
4494# asm 1: xorl 20(<m=int64#2),<in5=int64#4d
4495# asm 2: xorl 20(<m=%rsi),<in5=%ecx
4496xorl 20(%rsi),%ecx
4497
4498# qhasm: (uint32) in1 ^= *(uint32 *) (m + 4)
4499# asm 1: xorl 4(<m=int64#2),<in1=int64#5d
4500# asm 2: xorl 4(<m=%rsi),<in1=%r8d
4501xorl 4(%rsi),%r8d
4502
4503# qhasm: (uint32) in13 ^= *(uint32 *) (m + 52)
4504# asm 1: xorl 52(<m=int64#2),<in13=int64#6d
4505# asm 2: xorl 52(<m=%rsi),<in13=%r9d
4506xorl 52(%rsi),%r9d
4507
4508# qhasm: (uint32) in9 ^= *(uint32 *) (m + 36)
4509# asm 1: xorl 36(<m=int64#2),<in9=int64#7d
4510# asm 2: xorl 36(<m=%rsi),<in9=%eax
4511xorl 36(%rsi),%eax
4512
4513# qhasm: *(uint32 *) (out + 20) = in5
4514# asm 1: movl <in5=int64#4d,20(<out=int64#1)
4515# asm 2: movl <in5=%ecx,20(<out=%rdi)
4516movl %ecx,20(%rdi)
4517
4518# qhasm: *(uint32 *) (out + 4) = in1
4519# asm 1: movl <in1=int64#5d,4(<out=int64#1)
4520# asm 2: movl <in1=%r8d,4(<out=%rdi)
4521movl %r8d,4(%rdi)
4522
4523# qhasm: *(uint32 *) (out + 52) = in13
4524# asm 1: movl <in13=int64#6d,52(<out=int64#1)
4525# asm 2: movl <in13=%r9d,52(<out=%rdi)
4526movl %r9d,52(%rdi)
4527
4528# qhasm: *(uint32 *) (out + 36) = in9
4529# asm 1: movl <in9=int64#7d,36(<out=int64#1)
4530# asm 2: movl <in9=%eax,36(<out=%rdi)
4531movl %eax,36(%rdi)
4532
4533# qhasm: in10 = diag0
4534# asm 1: movd <diag0=int6464#1,>in10=int64#4
4535# asm 2: movd <diag0=%xmm0,>in10=%rcx
4536movd %xmm0,%rcx
4537
4538# qhasm: in6 = diag1
4539# asm 1: movd <diag1=int6464#2,>in6=int64#5
4540# asm 2: movd <diag1=%xmm1,>in6=%r8
4541movd %xmm1,%r8
4542
4543# qhasm: in2 = diag2
4544# asm 1: movd <diag2=int6464#3,>in2=int64#6
4545# asm 2: movd <diag2=%xmm2,>in2=%r9
4546movd %xmm2,%r9
4547
4548# qhasm: in14 = diag3
4549# asm 1: movd <diag3=int6464#4,>in14=int64#7
4550# asm 2: movd <diag3=%xmm3,>in14=%rax
4551movd %xmm3,%rax
4552
4553# qhasm: diag0 <<<= 96
4554# asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4555# asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4556pshufd $0x39,%xmm0,%xmm0
4557
4558# qhasm: diag1 <<<= 96
4559# asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4560# asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4561pshufd $0x39,%xmm1,%xmm1
4562
4563# qhasm: diag2 <<<= 96
4564# asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4565# asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4566pshufd $0x39,%xmm2,%xmm2
4567
4568# qhasm: diag3 <<<= 96
4569# asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4570# asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4571pshufd $0x39,%xmm3,%xmm3
4572
4573# qhasm: (uint32) in10 ^= *(uint32 *) (m + 40)
4574# asm 1: xorl 40(<m=int64#2),<in10=int64#4d
4575# asm 2: xorl 40(<m=%rsi),<in10=%ecx
4576xorl 40(%rsi),%ecx
4577
4578# qhasm: (uint32) in6 ^= *(uint32 *) (m + 24)
4579# asm 1: xorl 24(<m=int64#2),<in6=int64#5d
4580# asm 2: xorl 24(<m=%rsi),<in6=%r8d
4581xorl 24(%rsi),%r8d
4582
4583# qhasm: (uint32) in2 ^= *(uint32 *) (m + 8)
4584# asm 1: xorl 8(<m=int64#2),<in2=int64#6d
4585# asm 2: xorl 8(<m=%rsi),<in2=%r9d
4586xorl 8(%rsi),%r9d
4587
4588# qhasm: (uint32) in14 ^= *(uint32 *) (m + 56)
4589# asm 1: xorl 56(<m=int64#2),<in14=int64#7d
4590# asm 2: xorl 56(<m=%rsi),<in14=%eax
4591xorl 56(%rsi),%eax
4592
4593# qhasm: *(uint32 *) (out + 40) = in10
4594# asm 1: movl <in10=int64#4d,40(<out=int64#1)
4595# asm 2: movl <in10=%ecx,40(<out=%rdi)
4596movl %ecx,40(%rdi)
4597
4598# qhasm: *(uint32 *) (out + 24) = in6
4599# asm 1: movl <in6=int64#5d,24(<out=int64#1)
4600# asm 2: movl <in6=%r8d,24(<out=%rdi)
4601movl %r8d,24(%rdi)
4602
4603# qhasm: *(uint32 *) (out + 8) = in2
4604# asm 1: movl <in2=int64#6d,8(<out=int64#1)
4605# asm 2: movl <in2=%r9d,8(<out=%rdi)
4606movl %r9d,8(%rdi)
4607
4608# qhasm: *(uint32 *) (out + 56) = in14
4609# asm 1: movl <in14=int64#7d,56(<out=int64#1)
4610# asm 2: movl <in14=%eax,56(<out=%rdi)
4611movl %eax,56(%rdi)
4612
4613# qhasm: in15 = diag0
4614# asm 1: movd <diag0=int6464#1,>in15=int64#4
4615# asm 2: movd <diag0=%xmm0,>in15=%rcx
4616movd %xmm0,%rcx
4617
4618# qhasm: in11 = diag1
4619# asm 1: movd <diag1=int6464#2,>in11=int64#5
4620# asm 2: movd <diag1=%xmm1,>in11=%r8
4621movd %xmm1,%r8
4622
4623# qhasm: in7 = diag2
4624# asm 1: movd <diag2=int6464#3,>in7=int64#6
4625# asm 2: movd <diag2=%xmm2,>in7=%r9
4626movd %xmm2,%r9
4627
4628# qhasm: in3 = diag3
4629# asm 1: movd <diag3=int6464#4,>in3=int64#7
4630# asm 2: movd <diag3=%xmm3,>in3=%rax
4631movd %xmm3,%rax
4632
4633# qhasm: (uint32) in15 ^= *(uint32 *) (m + 60)
4634# asm 1: xorl 60(<m=int64#2),<in15=int64#4d
4635# asm 2: xorl 60(<m=%rsi),<in15=%ecx
4636xorl 60(%rsi),%ecx
4637
4638# qhasm: (uint32) in11 ^= *(uint32 *) (m + 44)
4639# asm 1: xorl 44(<m=int64#2),<in11=int64#5d
4640# asm 2: xorl 44(<m=%rsi),<in11=%r8d
4641xorl 44(%rsi),%r8d
4642
4643# qhasm: (uint32) in7 ^= *(uint32 *) (m + 28)
4644# asm 1: xorl 28(<m=int64#2),<in7=int64#6d
4645# asm 2: xorl 28(<m=%rsi),<in7=%r9d
4646xorl 28(%rsi),%r9d
4647
4648# qhasm: (uint32) in3 ^= *(uint32 *) (m + 12)
4649# asm 1: xorl 12(<m=int64#2),<in3=int64#7d
4650# asm 2: xorl 12(<m=%rsi),<in3=%eax
4651xorl 12(%rsi),%eax
4652
4653# qhasm: *(uint32 *) (out + 60) = in15
4654# asm 1: movl <in15=int64#4d,60(<out=int64#1)
4655# asm 2: movl <in15=%ecx,60(<out=%rdi)
4656movl %ecx,60(%rdi)
4657
4658# qhasm: *(uint32 *) (out + 44) = in11
4659# asm 1: movl <in11=int64#5d,44(<out=int64#1)
4660# asm 2: movl <in11=%r8d,44(<out=%rdi)
4661movl %r8d,44(%rdi)
4662
4663# qhasm: *(uint32 *) (out + 28) = in7
4664# asm 1: movl <in7=int64#6d,28(<out=int64#1)
4665# asm 2: movl <in7=%r9d,28(<out=%rdi)
4666movl %r9d,28(%rdi)
4667
4668# qhasm: *(uint32 *) (out + 12) = in3
4669# asm 1: movl <in3=int64#7d,12(<out=int64#1)
4670# asm 2: movl <in3=%eax,12(<out=%rdi)
4671movl %eax,12(%rdi)
4672
4673# qhasm: bytes = bytes_backup
4674# asm 1: movq <bytes_backup=stack64#8,>bytes=int64#6
4675# asm 2: movq <bytes_backup=408(%rsp),>bytes=%r9
4676movq 408(%rsp),%r9
4677
4678# qhasm: in8 = ((uint32 *)&x2)[0]
4679# asm 1: movl <x2=stack128#2,>in8=int64#4d
4680# asm 2: movl <x2=16(%rsp),>in8=%ecx
4681movl 16(%rsp),%ecx
4682
4683# qhasm: in9 = ((uint32 *)&x3)[1]
4684# asm 1: movl 4+<x3=stack128#3,>in9=int64#5d
4685# asm 2: movl 4+<x3=32(%rsp),>in9=%r8d
4686movl 4+32(%rsp),%r8d
4687
4688# qhasm: in8 += 1
4689# asm 1: add $1,<in8=int64#4
4690# asm 2: add $1,<in8=%rcx
4691add $1,%rcx
4692
4693# qhasm: in9 <<= 32
4694# asm 1: shl $32,<in9=int64#5
4695# asm 2: shl $32,<in9=%r8
4696shl $32,%r8
4697
4698# qhasm: in8 += in9
4699# asm 1: add <in9=int64#5,<in8=int64#4
4700# asm 2: add <in9=%r8,<in8=%rcx
4701add %r8,%rcx
4702
4703# qhasm: in9 = in8
4704# asm 1: mov <in8=int64#4,>in9=int64#5
4705# asm 2: mov <in8=%rcx,>in9=%r8
4706mov %rcx,%r8
4707
4708# qhasm: (uint64) in9 >>= 32
4709# asm 1: shr $32,<in9=int64#5
4710# asm 2: shr $32,<in9=%r8
4711shr $32,%r8
4712
4713# qhasm: ((uint32 *)&x2)[0] = in8
4714# asm 1: movl <in8=int64#4d,>x2=stack128#2
4715# asm 2: movl <in8=%ecx,>x2=16(%rsp)
4716movl %ecx,16(%rsp)
4717
4718# qhasm: ((uint32 *)&x3)[1] = in9
4719# asm 1: movl <in9=int64#5d,4+<x3=stack128#3
4720# asm 2: movl <in9=%r8d,4+<x3=32(%rsp)
4721movl %r8d,4+32(%rsp)
4722
4723# qhasm: unsigned>? unsigned<? bytes - 64
4724# asm 1: cmp $64,<bytes=int64#6
4725# asm 2: cmp $64,<bytes=%r9
4726cmp $64,%r9
4727# comment:fp stack unchanged by jump
4728
4729# qhasm: goto bytesatleast65 if unsigned>
4730ja ._bytesatleast65
4731# comment:fp stack unchanged by jump
4732
4733# qhasm: goto bytesatleast64 if !unsigned<
4734jae ._bytesatleast64
4735
4736# qhasm: m = out
4737# asm 1: mov <out=int64#1,>m=int64#2
4738# asm 2: mov <out=%rdi,>m=%rsi
4739mov %rdi,%rsi
4740
4741# qhasm: out = ctarget
4742# asm 1: mov <ctarget=int64#3,>out=int64#1
4743# asm 2: mov <ctarget=%rdx,>out=%rdi
4744mov %rdx,%rdi
4745
4746# qhasm: i = bytes
4747# asm 1: mov <bytes=int64#6,>i=int64#4
4748# asm 2: mov <bytes=%r9,>i=%rcx
4749mov %r9,%rcx
4750
4751# qhasm: while (i) { *out++ = *m++; --i }
4752rep movsb
4753# comment:fp stack unchanged by fallthrough
4754
4755# qhasm: bytesatleast64:
4756._bytesatleast64:
4757# comment:fp stack unchanged by fallthrough
4758
4759# qhasm: done:
4760._done:
4761
4762# qhasm: r11_caller = r11_stack
4763# asm 1: movq <r11_stack=stack64#1,>r11_caller=int64#9
4764# asm 2: movq <r11_stack=352(%rsp),>r11_caller=%r11
4765movq 352(%rsp),%r11
4766
4767# qhasm: r12_caller = r12_stack
4768# asm 1: movq <r12_stack=stack64#2,>r12_caller=int64#10
4769# asm 2: movq <r12_stack=360(%rsp),>r12_caller=%r12
4770movq 360(%rsp),%r12
4771
4772# qhasm: r13_caller = r13_stack
4773# asm 1: movq <r13_stack=stack64#3,>r13_caller=int64#11
4774# asm 2: movq <r13_stack=368(%rsp),>r13_caller=%r13
4775movq 368(%rsp),%r13
4776
4777# qhasm: r14_caller = r14_stack
4778# asm 1: movq <r14_stack=stack64#4,>r14_caller=int64#12
4779# asm 2: movq <r14_stack=376(%rsp),>r14_caller=%r14
4780movq 376(%rsp),%r14
4781
4782# qhasm: r15_caller = r15_stack
4783# asm 1: movq <r15_stack=stack64#5,>r15_caller=int64#13
4784# asm 2: movq <r15_stack=384(%rsp),>r15_caller=%r15
4785movq 384(%rsp),%r15
4786
4787# qhasm: rbx_caller = rbx_stack
4788# asm 1: movq <rbx_stack=stack64#6,>rbx_caller=int64#14
4789# asm 2: movq <rbx_stack=392(%rsp),>rbx_caller=%rbx
4790movq 392(%rsp),%rbx
4791
4792# qhasm: rbp_caller = rbp_stack
4793# asm 1: movq <rbp_stack=stack64#7,>rbp_caller=int64#15
4794# asm 2: movq <rbp_stack=400(%rsp),>rbp_caller=%rbp
4795movq 400(%rsp),%rbp
4796
4797# qhasm: leave
4798add %r11,%rsp
4799xor %rax,%rax
4800xor %rdx,%rdx
4801ret
4802
4803# qhasm: bytesatleast65:
4804._bytesatleast65:
4805
4806# qhasm: bytes -= 64
4807# asm 1: sub $64,<bytes=int64#6
4808# asm 2: sub $64,<bytes=%r9
4809sub $64,%r9
4810
4811# qhasm: out += 64
4812# asm 1: add $64,<out=int64#1
4813# asm 2: add $64,<out=%rdi
4814add $64,%rdi
4815
4816# qhasm: m += 64
4817# asm 1: add $64,<m=int64#2
4818# asm 2: add $64,<m=%rsi
4819add $64,%rsi
4820# comment:fp stack unchanged by jump
4821
4822# qhasm: goto bytesbetween1and255
4823jmp ._bytesbetween1and255