# qhasm: int64 r11_caller # qhasm: int64 r12_caller # qhasm: int64 r13_caller # qhasm: int64 r14_caller # qhasm: int64 r15_caller # qhasm: int64 rbx_caller # qhasm: int64 rbp_caller # qhasm: caller r11_caller # qhasm: caller r12_caller # qhasm: caller r13_caller # qhasm: caller r14_caller # qhasm: caller r15_caller # qhasm: caller rbx_caller # qhasm: caller rbp_caller # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 out # qhasm: stack64 out_stack # qhasm: int64 m # qhasm: int64 l # qhasm: int64 k # qhasm: stack64 k_stack # qhasm: int64 m0 # qhasm: int64 m1 # qhasm: int64 m2 # qhasm: int64 m3 # qhasm: float80 a0 # qhasm: float80 a1 # qhasm: float80 a2 # qhasm: float80 a3 # qhasm: float80 h0 # qhasm: float80 h1 # qhasm: float80 h2 # qhasm: float80 h3 # qhasm: float80 x0 # qhasm: float80 x1 # qhasm: float80 x2 # qhasm: float80 x3 # qhasm: float80 y0 # qhasm: float80 y1 # qhasm: float80 y2 # qhasm: float80 y3 # qhasm: float80 r0x0 # qhasm: float80 r1x0 # qhasm: float80 r2x0 # qhasm: float80 r3x0 # qhasm: float80 r0x1 # qhasm: float80 r1x1 # qhasm: float80 r2x1 # qhasm: float80 sr3x1 # qhasm: float80 r0x2 # qhasm: float80 r1x2 # qhasm: float80 sr2x2 # qhasm: float80 sr3x2 # qhasm: float80 r0x3 # qhasm: float80 sr1x3 # qhasm: float80 sr2x3 # qhasm: float80 sr3x3 # qhasm: stack64 d0 # qhasm: stack64 d1 # qhasm: stack64 d2 # qhasm: stack64 d3 # qhasm: stack64 r0 # qhasm: stack64 r1 # qhasm: stack64 r2 # qhasm: stack64 r3 # qhasm: stack64 sr1 # qhasm: stack64 sr2 # qhasm: stack64 sr3 # qhasm: enter crypto_onetimeauth_poly1305_amd64 .text .p2align 5 .globl _crypto_onetimeauth_poly1305_amd64 .globl crypto_onetimeauth_poly1305_amd64 _crypto_onetimeauth_poly1305_amd64: crypto_onetimeauth_poly1305_amd64: mov %rsp,%r11 and $31,%r11 add $192,%r11 sub %r11,%rsp # qhasm: input out # qhasm: input m # qhasm: input l # qhasm: input k # qhasm: r11_stack = r11_caller # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=32(%rsp) movq %r11,32(%rsp) # qhasm: r12_stack = r12_caller # asm 1: movq r12_stack=stack64#2 # asm 2: movq r12_stack=40(%rsp) movq %r12,40(%rsp) # qhasm: r13_stack = r13_caller # asm 1: movq r13_stack=stack64#3 # asm 2: movq r13_stack=48(%rsp) movq %r13,48(%rsp) # qhasm: r14_stack = r14_caller # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=56(%rsp) movq %r14,56(%rsp) # qhasm: r15_stack = r15_caller # asm 1: movq r15_stack=stack64#5 # asm 2: movq r15_stack=64(%rsp) movq %r15,64(%rsp) # qhasm: rbx_stack = rbx_caller # asm 1: movq rbx_stack=stack64#6 # asm 2: movq rbx_stack=72(%rsp) movq %rbx,72(%rsp) # qhasm: rbp_stack = rbp_caller # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=80(%rsp) movq %rbp,80(%rsp) # qhasm: round *(uint16 *) &crypto_onetimeauth_poly1305_amd64_rounding fldcw crypto_onetimeauth_poly1305_amd64_rounding(%rip) # qhasm: m0 = *(uint32 *) (k + 0) # asm 1: movl 0(m0=int64#5d # asm 2: movl 0(m0=%r8d movl 0(%rcx),%r8d # qhasm: m1 = *(uint32 *) (k + 4) # asm 1: movl 4(m1=int64#6d # asm 2: movl 4(m1=%r9d movl 4(%rcx),%r9d # qhasm: m2 = *(uint32 *) (k + 8) # asm 1: movl 8(m2=int64#7d # asm 2: movl 8(m2=%eax movl 8(%rcx),%eax # qhasm: m3 = *(uint32 *) (k + 12) # asm 1: movl 12(m3=int64#8d # asm 2: movl 12(m3=%r10d movl 12(%rcx),%r10d # qhasm: out_stack = out # asm 1: movq out_stack=stack64#8 # asm 2: movq out_stack=88(%rsp) movq %rdi,88(%rsp) # qhasm: k_stack = k # asm 1: movq k_stack=stack64#9 # asm 2: movq k_stack=96(%rsp) movq %rcx,96(%rsp) # qhasm: d0 top = 0x43300000 # asm 1: movl $0x43300000,>d0=stack64#10 # asm 2: movl $0x43300000,>d0=108(%rsp) movl $0x43300000,108(%rsp) # qhasm: d1 top = 0x45300000 # asm 1: movl $0x45300000,>d1=stack64#11 # asm 2: movl $0x45300000,>d1=116(%rsp) movl $0x45300000,116(%rsp) # qhasm: d2 top = 0x47300000 # asm 1: movl $0x47300000,>d2=stack64#12 # asm 2: movl $0x47300000,>d2=124(%rsp) movl $0x47300000,124(%rsp) # qhasm: d3 top = 0x49300000 # asm 1: movl $0x49300000,>d3=stack64#13 # asm 2: movl $0x49300000,>d3=132(%rsp) movl $0x49300000,132(%rsp) # qhasm: (uint32) m0 &= 0x0fffffff # asm 1: and $0x0fffffff,r0=stack64#14 # asm 2: fstpl >r0=136(%rsp) fstpl 136(%rsp) # comment:fpstackfrombottom:r1=stack64#15 # asm 2: fstl >r1=144(%rsp) fstl 144(%rsp) # comment:fpstackfrombottom:sr1=stack64#16 # asm 2: fstpl >sr1=152(%rsp) fstpl 152(%rsp) # comment:fpstackfrombottom:r2=stack64#17 # asm 2: fstl >r2=160(%rsp) fstl 160(%rsp) # comment:fpstackfrombottom:sr2=stack64#18 # asm 2: fstpl >sr2=168(%rsp) fstpl 168(%rsp) # comment:fpstackfrombottom:r3=stack64#19 # asm 2: fstl >r3=176(%rsp) fstl 176(%rsp) # comment:fpstackfrombottom:sr3=stack64#20 # asm 2: fstpl >sr3=184(%rsp) fstpl 184(%rsp) # comment:fpstackfrombottom: # qhasm: h3 = 0 fldz # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12(m3=%edi movl 12(%rsi),%edi # comment:fpstackfrombottom:m2=int64#4d # asm 2: movl 8(m2=%ecx movl 8(%rsi),%ecx # comment:fpstackfrombottom:m1=int64#5d # asm 2: movl 4(m1=%r8d movl 4(%rsi),%r8d # comment:fpstackfrombottom:m0=int64#6d # asm 2: movl 0(m0=%r9d movl 0(%rsi),%r9d # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12(m3=%edi movl 12(%rsi),%edi # comment:fpstackfrombottom:m2=int64#4d # asm 2: movl 8(m2=%ecx movl 8(%rsi),%ecx # comment:fpstackfrombottom:m1=int64#5d # asm 2: movl 4(m1=%r8d movl 4(%rsi),%r8d # comment:fpstackfrombottom:m0=int64#6d # asm 2: movl 0(m0=%r9d movl 0(%rsi),%r9d # comment:fpstackfrombottom:lastchunk=stack128#1 # asm 2: movl $0,>lastchunk=0(%rsp) movl $0,0(%rsp) # comment:fpstackfrombottom:destination=int64#1 # asm 2: leaq destination=%rdi leaq 0(%rsp),%rdi # comment:fpstackfrombottom:numbytes=int64#4 # asm 2: mov numbytes=%rcx mov %rdx,%rcx # comment:fpstackfrombottom:m3=int64#1d # asm 2: movl 12+m3=%edi movl 12+0(%rsp),%edi # comment:fpstackfrombottom:m2=int64#2d # asm 2: movl 8+m2=%esi movl 8+0(%rsp),%esi # comment:fpstackfrombottom:m1=int64#3d # asm 2: movl 4+m1=%edx movl 4+0(%rsp),%edx # comment:fpstackfrombottom:m0=int64#4d # asm 2: movl m0=%ecx movl 0(%rsp),%ecx # comment:fpstackfrombottom:d0=stack64#10 # asm 2: fstpl >d0=104(%rsp) fstpl 104(%rsp) # comment:fpstackfrombottom:d1=stack64#11 # asm 2: fstpl >d1=112(%rsp) fstpl 112(%rsp) # comment:fpstackfrombottom:d2=stack64#12 # asm 2: fstpl >d2=120(%rsp) fstpl 120(%rsp) # comment:fpstackfrombottom:d3=stack64#13 # asm 2: fstpl >d3=128(%rsp) fstpl 128(%rsp) # comment:fpstackfrombottom: # qhasm: int64 f0 # qhasm: int64 f1 # qhasm: int64 f2 # qhasm: int64 f3 # qhasm: int64 f4 # qhasm: int64 g0 # qhasm: int64 g1 # qhasm: int64 g2 # qhasm: int64 g3 # qhasm: int64 f # qhasm: int64 notf # qhasm: stack64 f1_stack # qhasm: stack64 f2_stack # qhasm: stack64 f3_stack # qhasm: stack64 f4_stack # qhasm: stack64 g0_stack # qhasm: stack64 g1_stack # qhasm: stack64 g2_stack # qhasm: stack64 g3_stack # qhasm: g0 = top d0 # asm 1: movl g0=int64#1d # asm 2: movl g0=%edi movl 108(%rsp),%edi # qhasm: (uint32) g0 &= 63 # asm 1: and $63,g1=int64#2d # asm 2: movl g1=%esi movl 116(%rsp),%esi # qhasm: (uint32) g1 &= 63 # asm 1: and $63,g2=int64#3d # asm 2: movl g2=%edx movl 124(%rsp),%edx # qhasm: (uint32) g2 &= 63 # asm 1: and $63,g3=int64#4d # asm 2: movl g3=%ecx movl 132(%rsp),%ecx # qhasm: (uint32) g3 &= 63 # asm 1: and $63,f1=int64#5d # asm 2: movl f1=%r8d movl 112(%rsp),%r8d # qhasm: carry? (uint32) f1 += g0 # asm 1: add f1_stack=stack64#11 # asm 2: movq f1_stack=112(%rsp) movq %r8,112(%rsp) # qhasm: f2 = bottom d2 # asm 1: movl f2=int64#1d # asm 2: movl f2=%edi movl 120(%rsp),%edi # qhasm: carry? (uint32) f2 += g1 + carry # asm 1: adc f2_stack=stack64#12 # asm 2: movq f2_stack=120(%rsp) movq %rdi,120(%rsp) # qhasm: f3 = bottom d3 # asm 1: movl f3=int64#1d # asm 2: movl f3=%edi movl 128(%rsp),%edi # qhasm: carry? (uint32) f3 += g2 + carry # asm 1: adc f3_stack=stack64#13 # asm 2: movq f3_stack=128(%rsp) movq %rdi,128(%rsp) # qhasm: f4 = 0 # asm 1: mov $0,>f4=int64#1 # asm 2: mov $0,>f4=%rdi mov $0,%rdi # qhasm: carry? (uint32) f4 += g3 + carry # asm 1: adc f4_stack=stack64#14 # asm 2: movq f4_stack=136(%rsp) movq %rdi,136(%rsp) # qhasm: g0 = 5 # asm 1: mov $5,>g0=int64#1 # asm 2: mov $5,>g0=%rdi mov $5,%rdi # qhasm: f0 = bottom d0 # asm 1: movl f0=int64#2d # asm 2: movl f0=%esi movl 104(%rsp),%esi # qhasm: carry? (uint32) g0 += f0 # asm 1: add g0_stack=stack64#10 # asm 2: movq g0_stack=104(%rsp) movq %rdi,104(%rsp) # qhasm: g1 = 0 # asm 1: mov $0,>g1=int64#1 # asm 2: mov $0,>g1=%rdi mov $0,%rdi # qhasm: f1 = f1_stack # asm 1: movq f1=int64#3 # asm 2: movq f1=%rdx movq 112(%rsp),%rdx # qhasm: carry? (uint32) g1 += f1 + carry # asm 1: adc g1_stack=stack64#11 # asm 2: movq g1_stack=112(%rsp) movq %rdi,112(%rsp) # qhasm: g2 = 0 # asm 1: mov $0,>g2=int64#1 # asm 2: mov $0,>g2=%rdi mov $0,%rdi # qhasm: f2 = f2_stack # asm 1: movq f2=int64#4 # asm 2: movq f2=%rcx movq 120(%rsp),%rcx # qhasm: carry? (uint32) g2 += f2 + carry # asm 1: adc g2_stack=stack64#12 # asm 2: movq g2_stack=120(%rsp) movq %rdi,120(%rsp) # qhasm: g3 = 0 # asm 1: mov $0,>g3=int64#1 # asm 2: mov $0,>g3=%rdi mov $0,%rdi # qhasm: f3 = f3_stack # asm 1: movq f3=int64#5 # asm 2: movq f3=%r8 movq 128(%rsp),%r8 # qhasm: carry? (uint32) g3 += f3 + carry # asm 1: adc g3_stack=stack64#13 # asm 2: movq g3_stack=128(%rsp) movq %rdi,128(%rsp) # qhasm: f = 0xfffffffc # asm 1: mov $0xfffffffc,>f=int64#1 # asm 2: mov $0xfffffffc,>f=%rdi mov $0xfffffffc,%rdi # qhasm: f4 = f4_stack # asm 1: movq f4=int64#6 # asm 2: movq f4=%r9 movq 136(%rsp),%r9 # qhasm: carry? (uint32) f += f4 + carry # asm 1: adc >= 16 # asm 1: sar $16,notf=int64#6 # asm 2: mov notf=%r9 mov %rdi,%r9 # qhasm: (uint32) notf ^= 0xffffffff # asm 1: xor $0xffffffff,g0=int64#7 # asm 2: movq g0=%rax movq 104(%rsp),%rax # qhasm: g0 &= notf # asm 1: and g1=int64#7 # asm 2: movq g1=%rax movq 112(%rsp),%rax # qhasm: g1 &= notf # asm 1: and g2=int64#7 # asm 2: movq g2=%rax movq 120(%rsp),%rax # qhasm: g2 &= notf # asm 1: and g3=int64#1 # asm 2: movq g3=%rdi movq 128(%rsp),%rdi # qhasm: g3 &= notf # asm 1: and out=int64#1 # asm 2: movq out=%rdi movq 88(%rsp),%rdi # qhasm: k = k_stack # asm 1: movq k=int64#6 # asm 2: movq k=%r9 movq 96(%rsp),%r9 # qhasm: carry? (uint32) f0 += *(uint32 *) (k + 16) # asm 1: addl 16(r11_caller=int64#9 # asm 2: movq r11_caller=%r11 movq 32(%rsp),%r11 # qhasm: r12_caller = r12_stack # asm 1: movq r12_caller=int64#10 # asm 2: movq r12_caller=%r12 movq 40(%rsp),%r12 # qhasm: r13_caller = r13_stack # asm 1: movq r13_caller=int64#11 # asm 2: movq r13_caller=%r13 movq 48(%rsp),%r13 # qhasm: r14_caller = r14_stack # asm 1: movq r14_caller=int64#12 # asm 2: movq r14_caller=%r14 movq 56(%rsp),%r14 # qhasm: r15_caller = r15_stack # asm 1: movq r15_caller=int64#13 # asm 2: movq r15_caller=%r15 movq 64(%rsp),%r15 # qhasm: rbx_caller = rbx_stack # asm 1: movq rbx_caller=int64#14 # asm 2: movq rbx_caller=%rbx movq 72(%rsp),%rbx # qhasm: rbp_caller = rbp_stack # asm 1: movq rbp_caller=int64#15 # asm 2: movq rbp_caller=%rbp movq 80(%rsp),%rbp # qhasm: leave add %r11,%rsp xor %rax,%rax xor %rdx,%rdx ret