123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215 |
- #! /usr/bin/env perl
- # Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
- #
- # Licensed under the OpenSSL license (the "License"). You may not use
- # this file except in compliance with the License. You can obtain a copy
- # in the file LICENSE in the source distribution or at
- # https://www.openssl.org/source/license.html
- ######################################################################
- ## Constant-time SSSE3 AES core implementation.
- ## version 0.1
- ##
- ## By Mike Hamburg (Stanford University), 2009
- ## Public domain.
- ##
- ## For details see http://shiftleft.org/papers/vector_aes/ and
- ## http://crypto.stanford.edu/vpaes/.
- ######################################################################
- # September 2011.
- #
- # Interface to OpenSSL as "almost" drop-in replacement for
- # aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
- # doesn't handle partial vectors (doesn't have to if called from
- # EVP only). "Drop-in" implies that this module doesn't share key
- # schedule structure with the original nor does it make assumption
- # about its alignment...
- #
- # Performance summary. aes-x86_64.pl column lists large-block CBC
- # encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
- # byte processed with 128-bit key, and vpaes-x86_64.pl column -
- # [also large-block CBC] encrypt/decrypt.
- #
- # aes-x86_64.pl vpaes-x86_64.pl
- #
- # Core 2(**) 29.6/41.1/14.3 21.9/25.2(***)
- # Nehalem 29.6/40.3/14.6 10.0/11.8
- # Atom 57.3/74.2/32.1 60.9/77.2(***)
- # Silvermont 52.7/64.0/19.5 48.8/60.8(***)
- # Goldmont 38.9/49.0/17.8 10.6/12.6
- #
- # (*) "Hyper-threading" in the context refers rather to cache shared
- # among multiple cores, than to specifically Intel HTT. As vast
- # majority of contemporary cores share cache, slower code path
- # is common place. In other words "with-hyper-threading-off"
- # results are presented mostly for reference purposes.
- #
- # (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
- #
- # (***) Less impressive improvement on Core 2 and Atom is due to slow
- # pshufb, yet it's respectable +36%/62% improvement on Core 2
- # (as implied, over "hyper-threading-safe" code path).
- #
- # <appro@openssl.org>
- $flavour = shift;
- $output = shift;
- if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
- $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
- die "can't locate x86_64-xlate.pl";
- open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
- *STDOUT=*OUT;
- $PREFIX="vpaes";
- $code.=<<___;
- .text
- ##
- ## _aes_encrypt_core
- ##
- ## AES-encrypt %xmm0.
- ##
- ## Inputs:
- ## %xmm0 = input
- ## %xmm9-%xmm15 as in _vpaes_preheat
- ## (%rdx) = scheduled keys
- ##
- ## Output in %xmm0
- ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
- ## Preserves %xmm6 - %xmm8 so you get some local vectors
- ##
- ##
- .type _vpaes_encrypt_core,\@abi-omnipotent
- .align 16
- _vpaes_encrypt_core:
- mov %rdx, %r9
- mov \$16, %r11
- mov 240(%rdx),%eax
- movdqa %xmm9, %xmm1
- movdqa .Lk_ipt(%rip), %xmm2 # iptlo
- pandn %xmm0, %xmm1
- movdqu (%r9), %xmm5 # round0 key
- psrld \$4, %xmm1
- pand %xmm9, %xmm0
- pshufb %xmm0, %xmm2
- movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
- pshufb %xmm1, %xmm0
- pxor %xmm5, %xmm2
- add \$16, %r9
- pxor %xmm2, %xmm0
- lea .Lk_mc_backward(%rip),%r10
- jmp .Lenc_entry
- .align 16
- .Lenc_loop:
- # middle of middle round
- movdqa %xmm13, %xmm4 # 4 : sb1u
- movdqa %xmm12, %xmm0 # 0 : sb1t
- pshufb %xmm2, %xmm4 # 4 = sb1u
- pshufb %xmm3, %xmm0 # 0 = sb1t
- pxor %xmm5, %xmm4 # 4 = sb1u + k
- movdqa %xmm15, %xmm5 # 4 : sb2u
- pxor %xmm4, %xmm0 # 0 = A
- movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
- pshufb %xmm2, %xmm5 # 4 = sb2u
- movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
- movdqa %xmm14, %xmm2 # 2 : sb2t
- pshufb %xmm3, %xmm2 # 2 = sb2t
- movdqa %xmm0, %xmm3 # 3 = A
- pxor %xmm5, %xmm2 # 2 = 2A
- pshufb %xmm1, %xmm0 # 0 = B
- add \$16, %r9 # next key
- pxor %xmm2, %xmm0 # 0 = 2A+B
- pshufb %xmm4, %xmm3 # 3 = D
- add \$16, %r11 # next mc
- pxor %xmm0, %xmm3 # 3 = 2A+B+D
- pshufb %xmm1, %xmm0 # 0 = 2B+C
- and \$0x30, %r11 # ... mod 4
- sub \$1,%rax # nr--
- pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D
- .Lenc_entry:
- # top of round
- movdqa %xmm9, %xmm1 # 1 : i
- movdqa %xmm11, %xmm5 # 2 : a/k
- pandn %xmm0, %xmm1 # 1 = i<<4
- psrld \$4, %xmm1 # 1 = i
- pand %xmm9, %xmm0 # 0 = k
- pshufb %xmm0, %xmm5 # 2 = a/k
- movdqa %xmm10, %xmm3 # 3 : 1/i
- pxor %xmm1, %xmm0 # 0 = j
- pshufb %xmm1, %xmm3 # 3 = 1/i
- movdqa %xmm10, %xmm4 # 4 : 1/j
- pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k
- pshufb %xmm0, %xmm4 # 4 = 1/j
- movdqa %xmm10, %xmm2 # 2 : 1/iak
- pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
- pshufb %xmm3, %xmm2 # 2 = 1/iak
- movdqa %xmm10, %xmm3 # 3 : 1/jak
- pxor %xmm0, %xmm2 # 2 = io
- pshufb %xmm4, %xmm3 # 3 = 1/jak
- movdqu (%r9), %xmm5
- pxor %xmm1, %xmm3 # 3 = jo
- jnz .Lenc_loop
- # middle of last round
- movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
- movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
- pshufb %xmm2, %xmm4 # 4 = sbou
- pxor %xmm5, %xmm4 # 4 = sb1u + k
- pshufb %xmm3, %xmm0 # 0 = sb1t
- movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
- pxor %xmm4, %xmm0 # 0 = A
- pshufb %xmm1, %xmm0
- ret
- .size _vpaes_encrypt_core,.-_vpaes_encrypt_core
- ##
- ## Decryption core
- ##
- ## Same API as encryption core.
- ##
- .type _vpaes_decrypt_core,\@abi-omnipotent
- .align 16
- _vpaes_decrypt_core:
- mov %rdx, %r9 # load key
- mov 240(%rdx),%eax
- movdqa %xmm9, %xmm1
- movdqa .Lk_dipt(%rip), %xmm2 # iptlo
- pandn %xmm0, %xmm1
- mov %rax, %r11
- psrld \$4, %xmm1
- movdqu (%r9), %xmm5 # round0 key
- shl \$4, %r11
- pand %xmm9, %xmm0
- pshufb %xmm0, %xmm2
- movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi
- xor \$0x30, %r11
- lea .Lk_dsbd(%rip),%r10
- pshufb %xmm1, %xmm0
- and \$0x30, %r11
- pxor %xmm5, %xmm2
- movdqa .Lk_mc_forward+48(%rip), %xmm5
- pxor %xmm2, %xmm0
- add \$16, %r9
- add %r10, %r11
- jmp .Ldec_entry
- .align 16
- .Ldec_loop:
- ##
- ## Inverse mix columns
- ##
- movdqa -0x20(%r10),%xmm4 # 4 : sb9u
- movdqa -0x10(%r10),%xmm1 # 0 : sb9t
- pshufb %xmm2, %xmm4 # 4 = sb9u
- pshufb %xmm3, %xmm1 # 0 = sb9t
- pxor %xmm4, %xmm0
- movdqa 0x00(%r10),%xmm4 # 4 : sbdu
- pxor %xmm1, %xmm0 # 0 = ch
- movdqa 0x10(%r10),%xmm1 # 0 : sbdt
- pshufb %xmm2, %xmm4 # 4 = sbdu
- pshufb %xmm5, %xmm0 # MC ch
- pshufb %xmm3, %xmm1 # 0 = sbdt
- pxor %xmm4, %xmm0 # 4 = ch
- movdqa 0x20(%r10),%xmm4 # 4 : sbbu
- pxor %xmm1, %xmm0 # 0 = ch
- movdqa 0x30(%r10),%xmm1 # 0 : sbbt
- pshufb %xmm2, %xmm4 # 4 = sbbu
- pshufb %xmm5, %xmm0 # MC ch
- pshufb %xmm3, %xmm1 # 0 = sbbt
- pxor %xmm4, %xmm0 # 4 = ch
- movdqa 0x40(%r10),%xmm4 # 4 : sbeu
- pxor %xmm1, %xmm0 # 0 = ch
- movdqa 0x50(%r10),%xmm1 # 0 : sbet
- pshufb %xmm2, %xmm4 # 4 = sbeu
- pshufb %xmm5, %xmm0 # MC ch
- pshufb %xmm3, %xmm1 # 0 = sbet
- pxor %xmm4, %xmm0 # 4 = ch
- add \$16, %r9 # next round key
- palignr \$12, %xmm5, %xmm5
- pxor %xmm1, %xmm0 # 0 = ch
- sub \$1,%rax # nr--
- .Ldec_entry:
- # top of round
- movdqa %xmm9, %xmm1 # 1 : i
- pandn %xmm0, %xmm1 # 1 = i<<4
- movdqa %xmm11, %xmm2 # 2 : a/k
- psrld \$4, %xmm1 # 1 = i
- pand %xmm9, %xmm0 # 0 = k
- pshufb %xmm0, %xmm2 # 2 = a/k
- movdqa %xmm10, %xmm3 # 3 : 1/i
- pxor %xmm1, %xmm0 # 0 = j
- pshufb %xmm1, %xmm3 # 3 = 1/i
- movdqa %xmm10, %xmm4 # 4 : 1/j
- pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
- pshufb %xmm0, %xmm4 # 4 = 1/j
- pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
- movdqa %xmm10, %xmm2 # 2 : 1/iak
- pshufb %xmm3, %xmm2 # 2 = 1/iak
- movdqa %xmm10, %xmm3 # 3 : 1/jak
- pxor %xmm0, %xmm2 # 2 = io
- pshufb %xmm4, %xmm3 # 3 = 1/jak
- movdqu (%r9), %xmm0
- pxor %xmm1, %xmm3 # 3 = jo
- jnz .Ldec_loop
- # middle of last round
- movdqa 0x60(%r10), %xmm4 # 3 : sbou
- pshufb %xmm2, %xmm4 # 4 = sbou
- pxor %xmm0, %xmm4 # 4 = sb1u + k
- movdqa 0x70(%r10), %xmm0 # 0 : sbot
- movdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
- pshufb %xmm3, %xmm0 # 0 = sb1t
- pxor %xmm4, %xmm0 # 0 = A
- pshufb %xmm2, %xmm0
- ret
- .size _vpaes_decrypt_core,.-_vpaes_decrypt_core
- ########################################################
- ## ##
- ## AES key schedule ##
- ## ##
- ########################################################
- .type _vpaes_schedule_core,\@abi-omnipotent
- .align 16
- _vpaes_schedule_core:
- # rdi = key
- # rsi = size in bits
- # rdx = buffer
- # rcx = direction. 0=encrypt, 1=decrypt
- call _vpaes_preheat # load the tables
- movdqa .Lk_rcon(%rip), %xmm8 # load rcon
- movdqu (%rdi), %xmm0 # load key (unaligned)
- # input transform
- movdqa %xmm0, %xmm3
- lea .Lk_ipt(%rip), %r11
- call _vpaes_schedule_transform
- movdqa %xmm0, %xmm7
- lea .Lk_sr(%rip),%r10
- test %rcx, %rcx
- jnz .Lschedule_am_decrypting
- # encrypting, output zeroth round key after transform
- movdqu %xmm0, (%rdx)
- jmp .Lschedule_go
- .Lschedule_am_decrypting:
- # decrypting, output zeroth round key after shiftrows
- movdqa (%r8,%r10),%xmm1
- pshufb %xmm1, %xmm3
- movdqu %xmm3, (%rdx)
- xor \$0x30, %r8
- .Lschedule_go:
- cmp \$192, %esi
- ja .Lschedule_256
- je .Lschedule_192
- # 128: fall though
- ##
- ## .schedule_128
- ##
- ## 128-bit specific part of key schedule.
- ##
- ## This schedule is really simple, because all its parts
- ## are accomplished by the subroutines.
- ##
- .Lschedule_128:
- mov \$10, %esi
- .Loop_schedule_128:
- call _vpaes_schedule_round
- dec %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle # write output
- jmp .Loop_schedule_128
- ##
- ## .aes_schedule_192
- ##
- ## 192-bit specific part of key schedule.
- ##
- ## The main body of this schedule is the same as the 128-bit
- ## schedule, but with more smearing. The long, high side is
- ## stored in %xmm7 as before, and the short, low side is in
- ## the high bits of %xmm6.
- ##
- ## This schedule is somewhat nastier, however, because each
- ## round produces 192 bits of key material, or 1.5 round keys.
- ## Therefore, on each cycle we do 2 rounds and produce 3 round
- ## keys.
- ##
- .align 16
- .Lschedule_192:
- movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
- call _vpaes_schedule_transform # input transform
- movdqa %xmm0, %xmm6 # save short part
- pxor %xmm4, %xmm4 # clear 4
- movhlps %xmm4, %xmm6 # clobber low side with zeros
- mov \$4, %esi
- .Loop_schedule_192:
- call _vpaes_schedule_round
- palignr \$8,%xmm6,%xmm0
- call _vpaes_schedule_mangle # save key n
- call _vpaes_schedule_192_smear
- call _vpaes_schedule_mangle # save key n+1
- call _vpaes_schedule_round
- dec %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle # save key n+2
- call _vpaes_schedule_192_smear
- jmp .Loop_schedule_192
- ##
- ## .aes_schedule_256
- ##
- ## 256-bit specific part of key schedule.
- ##
- ## The structure here is very similar to the 128-bit
- ## schedule, but with an additional "low side" in
- ## %xmm6. The low side's rounds are the same as the
- ## high side's, except no rcon and no rotation.
- ##
- .align 16
- .Lschedule_256:
- movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
- call _vpaes_schedule_transform # input transform
- mov \$7, %esi
- .Loop_schedule_256:
- call _vpaes_schedule_mangle # output low result
- movdqa %xmm0, %xmm6 # save cur_lo in xmm6
- # high round
- call _vpaes_schedule_round
- dec %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
- # low round. swap xmm7 and xmm6
- pshufd \$0xFF, %xmm0, %xmm0
- movdqa %xmm7, %xmm5
- movdqa %xmm6, %xmm7
- call _vpaes_schedule_low_round
- movdqa %xmm5, %xmm7
- jmp .Loop_schedule_256
- ##
- ## .aes_schedule_mangle_last
- ##
- ## Mangler for last round of key schedule
- ## Mangles %xmm0
- ## when encrypting, outputs out(%xmm0) ^ 63
- ## when decrypting, outputs unskew(%xmm0)
- ##
- ## Always called right before return... jumps to cleanup and exits
- ##
- .align 16
- .Lschedule_mangle_last:
- # schedule last round key from xmm0
- lea .Lk_deskew(%rip),%r11 # prepare to deskew
- test %rcx, %rcx
- jnz .Lschedule_mangle_last_dec
- # encrypting
- movdqa (%r8,%r10),%xmm1
- pshufb %xmm1, %xmm0 # output permute
- lea .Lk_opt(%rip), %r11 # prepare to output transform
- add \$32, %rdx
- .Lschedule_mangle_last_dec:
- add \$-16, %rdx
- pxor .Lk_s63(%rip), %xmm0
- call _vpaes_schedule_transform # output transform
- movdqu %xmm0, (%rdx) # save last key
- # cleanup
- pxor %xmm0, %xmm0
- pxor %xmm1, %xmm1
- pxor %xmm2, %xmm2
- pxor %xmm3, %xmm3
- pxor %xmm4, %xmm4
- pxor %xmm5, %xmm5
- pxor %xmm6, %xmm6
- pxor %xmm7, %xmm7
- ret
- .size _vpaes_schedule_core,.-_vpaes_schedule_core
- ##
- ## .aes_schedule_192_smear
- ##
- ## Smear the short, low side in the 192-bit key schedule.
- ##
- ## Inputs:
- ## %xmm7: high side, b a x y
- ## %xmm6: low side, d c 0 0
- ## %xmm13: 0
- ##
- ## Outputs:
- ## %xmm6: b+c+d b+c 0 0
- ## %xmm0: b+c+d b+c b a
- ##
- .type _vpaes_schedule_192_smear,\@abi-omnipotent
- .align 16
- _vpaes_schedule_192_smear:
- pshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
- pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
- pxor %xmm1, %xmm6 # -> c+d c 0 0
- pxor %xmm1, %xmm1
- pxor %xmm0, %xmm6 # -> b+c+d b+c b a
- movdqa %xmm6, %xmm0
- movhlps %xmm1, %xmm6 # clobber low side with zeros
- ret
- .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
- ##
- ## .aes_schedule_round
- ##
- ## Runs one main round of the key schedule on %xmm0, %xmm7
- ##
- ## Specifically, runs subbytes on the high dword of %xmm0
- ## then rotates it by one byte and xors into the low dword of
- ## %xmm7.
- ##
- ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
- ## next rcon.
- ##
- ## Smears the dwords of %xmm7 by xoring the low into the
- ## second low, result into third, result into highest.
- ##
- ## Returns results in %xmm7 = %xmm0.
- ## Clobbers %xmm1-%xmm4, %r11.
- ##
- .type _vpaes_schedule_round,\@abi-omnipotent
- .align 16
- _vpaes_schedule_round:
- # extract rcon from xmm8
- pxor %xmm1, %xmm1
- palignr \$15, %xmm8, %xmm1
- palignr \$15, %xmm8, %xmm8
- pxor %xmm1, %xmm7
- # rotate
- pshufd \$0xFF, %xmm0, %xmm0
- palignr \$1, %xmm0, %xmm0
- # fall through...
- # low round: same as high round, but no rotation and no rcon.
- _vpaes_schedule_low_round:
- # smear xmm7
- movdqa %xmm7, %xmm1
- pslldq \$4, %xmm7
- pxor %xmm1, %xmm7
- movdqa %xmm7, %xmm1
- pslldq \$8, %xmm7
- pxor %xmm1, %xmm7
- pxor .Lk_s63(%rip), %xmm7
- # subbytes
- movdqa %xmm9, %xmm1
- pandn %xmm0, %xmm1
- psrld \$4, %xmm1 # 1 = i
- pand %xmm9, %xmm0 # 0 = k
- movdqa %xmm11, %xmm2 # 2 : a/k
- pshufb %xmm0, %xmm2 # 2 = a/k
- pxor %xmm1, %xmm0 # 0 = j
- movdqa %xmm10, %xmm3 # 3 : 1/i
- pshufb %xmm1, %xmm3 # 3 = 1/i
- pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
- movdqa %xmm10, %xmm4 # 4 : 1/j
- pshufb %xmm0, %xmm4 # 4 = 1/j
- pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
- movdqa %xmm10, %xmm2 # 2 : 1/iak
- pshufb %xmm3, %xmm2 # 2 = 1/iak
- pxor %xmm0, %xmm2 # 2 = io
- movdqa %xmm10, %xmm3 # 3 : 1/jak
- pshufb %xmm4, %xmm3 # 3 = 1/jak
- pxor %xmm1, %xmm3 # 3 = jo
- movdqa %xmm13, %xmm4 # 4 : sbou
- pshufb %xmm2, %xmm4 # 4 = sbou
- movdqa %xmm12, %xmm0 # 0 : sbot
- pshufb %xmm3, %xmm0 # 0 = sb1t
- pxor %xmm4, %xmm0 # 0 = sbox output
- # add in smeared stuff
- pxor %xmm7, %xmm0
- movdqa %xmm0, %xmm7
- ret
- .size _vpaes_schedule_round,.-_vpaes_schedule_round
- ##
- ## .aes_schedule_transform
- ##
- ## Linear-transform %xmm0 according to tables at (%r11)
- ##
- ## Requires that %xmm9 = 0x0F0F... as in preheat
- ## Output in %xmm0
- ## Clobbers %xmm1, %xmm2
- ##
- .type _vpaes_schedule_transform,\@abi-omnipotent
- .align 16
- _vpaes_schedule_transform:
- movdqa %xmm9, %xmm1
- pandn %xmm0, %xmm1
- psrld \$4, %xmm1
- pand %xmm9, %xmm0
- movdqa (%r11), %xmm2 # lo
- pshufb %xmm0, %xmm2
- movdqa 16(%r11), %xmm0 # hi
- pshufb %xmm1, %xmm0
- pxor %xmm2, %xmm0
- ret
- .size _vpaes_schedule_transform,.-_vpaes_schedule_transform
- ##
- ## .aes_schedule_mangle
- ##
- ## Mangle xmm0 from (basis-transformed) standard version
- ## to our version.
- ##
- ## On encrypt,
- ## xor with 0x63
- ## multiply by circulant 0,1,1,1
- ## apply shiftrows transform
- ##
- ## On decrypt,
- ## xor with 0x63
- ## multiply by "inverse mixcolumns" circulant E,B,D,9
- ## deskew
- ## apply shiftrows transform
- ##
- ##
- ## Writes out to (%rdx), and increments or decrements it
- ## Keeps track of round number mod 4 in %r8
- ## Preserves xmm0
- ## Clobbers xmm1-xmm5
- ##
- .type _vpaes_schedule_mangle,\@abi-omnipotent
- .align 16
- _vpaes_schedule_mangle:
- movdqa %xmm0, %xmm4 # save xmm0 for later
- movdqa .Lk_mc_forward(%rip),%xmm5
- test %rcx, %rcx
- jnz .Lschedule_mangle_dec
- # encrypting
- add \$16, %rdx
- pxor .Lk_s63(%rip),%xmm4
- pshufb %xmm5, %xmm4
- movdqa %xmm4, %xmm3
- pshufb %xmm5, %xmm4
- pxor %xmm4, %xmm3
- pshufb %xmm5, %xmm4
- pxor %xmm4, %xmm3
- jmp .Lschedule_mangle_both
- .align 16
- .Lschedule_mangle_dec:
- # inverse mix columns
- lea .Lk_dksd(%rip),%r11
- movdqa %xmm9, %xmm1
- pandn %xmm4, %xmm1
- psrld \$4, %xmm1 # 1 = hi
- pand %xmm9, %xmm4 # 4 = lo
- movdqa 0x00(%r11), %xmm2
- pshufb %xmm4, %xmm2
- movdqa 0x10(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- pshufb %xmm5, %xmm3
- movdqa 0x20(%r11), %xmm2
- pshufb %xmm4, %xmm2
- pxor %xmm3, %xmm2
- movdqa 0x30(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- pshufb %xmm5, %xmm3
- movdqa 0x40(%r11), %xmm2
- pshufb %xmm4, %xmm2
- pxor %xmm3, %xmm2
- movdqa 0x50(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- pshufb %xmm5, %xmm3
- movdqa 0x60(%r11), %xmm2
- pshufb %xmm4, %xmm2
- pxor %xmm3, %xmm2
- movdqa 0x70(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- add \$-16, %rdx
- .Lschedule_mangle_both:
- movdqa (%r8,%r10),%xmm1
- pshufb %xmm1,%xmm3
- add \$-16, %r8
- and \$0x30, %r8
- movdqu %xmm3, (%rdx)
- ret
- .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
- #
- # Interface to OpenSSL
- #
- .globl ${PREFIX}_set_encrypt_key
- .type ${PREFIX}_set_encrypt_key,\@function,3
- .align 16
- ${PREFIX}_set_encrypt_key:
- ___
- $code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
- .Lenc_key_body:
- ___
- $code.=<<___;
- mov %esi,%eax
- shr \$5,%eax
- add \$5,%eax
- mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
- mov \$0,%ecx
- mov \$0x30,%r8d
- call _vpaes_schedule_core
- ___
- $code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
- .Lenc_key_epilogue:
- ___
- $code.=<<___;
- xor %eax,%eax
- ret
- .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
- .globl ${PREFIX}_set_decrypt_key
- .type ${PREFIX}_set_decrypt_key,\@function,3
- .align 16
- ${PREFIX}_set_decrypt_key:
- ___
- $code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
- .Ldec_key_body:
- ___
- $code.=<<___;
- mov %esi,%eax
- shr \$5,%eax
- add \$5,%eax
- mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
- shl \$4,%eax
- lea 16(%rdx,%rax),%rdx
- mov \$1,%ecx
- mov %esi,%r8d
- shr \$1,%r8d
- and \$32,%r8d
- xor \$32,%r8d # nbits==192?0:32
- call _vpaes_schedule_core
- ___
- $code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
- .Ldec_key_epilogue:
- ___
- $code.=<<___;
- xor %eax,%eax
- ret
- .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
- .globl ${PREFIX}_encrypt
- .type ${PREFIX}_encrypt,\@function,3
- .align 16
- ${PREFIX}_encrypt:
- ___
- $code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
- .Lenc_body:
- ___
- $code.=<<___;
- movdqu (%rdi),%xmm0
- call _vpaes_preheat
- call _vpaes_encrypt_core
- movdqu %xmm0,(%rsi)
- ___
- $code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
- .Lenc_epilogue:
- ___
- $code.=<<___;
- ret
- .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
- .globl ${PREFIX}_decrypt
- .type ${PREFIX}_decrypt,\@function,3
- .align 16
- ${PREFIX}_decrypt:
- ___
- $code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
- .Ldec_body:
- ___
- $code.=<<___;
- movdqu (%rdi),%xmm0
- call _vpaes_preheat
- call _vpaes_decrypt_core
- movdqu %xmm0,(%rsi)
- ___
- $code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
- .Ldec_epilogue:
- ___
- $code.=<<___;
- ret
- .size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
- ___
- {
- my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
- # void AES_cbc_encrypt (const void char *inp, unsigned char *out,
- # size_t length, const AES_KEY *key,
- # unsigned char *ivp,const int enc);
- $code.=<<___;
- .globl ${PREFIX}_cbc_encrypt
- .type ${PREFIX}_cbc_encrypt,\@function,6
- .align 16
- ${PREFIX}_cbc_encrypt:
- xchg $key,$len
- ___
- ($len,$key)=($key,$len);
- $code.=<<___;
- sub \$16,$len
- jc .Lcbc_abort
- ___
- $code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
- .Lcbc_body:
- ___
- $code.=<<___;
- movdqu ($ivp),%xmm6 # load IV
- sub $inp,$out
- call _vpaes_preheat
- cmp \$0,${enc}d
- je .Lcbc_dec_loop
- jmp .Lcbc_enc_loop
- .align 16
- .Lcbc_enc_loop:
- movdqu ($inp),%xmm0
- pxor %xmm6,%xmm0
- call _vpaes_encrypt_core
- movdqa %xmm0,%xmm6
- movdqu %xmm0,($out,$inp)
- lea 16($inp),$inp
- sub \$16,$len
- jnc .Lcbc_enc_loop
- jmp .Lcbc_done
- .align 16
- .Lcbc_dec_loop:
- movdqu ($inp),%xmm0
- movdqa %xmm0,%xmm7
- call _vpaes_decrypt_core
- pxor %xmm6,%xmm0
- movdqa %xmm7,%xmm6
- movdqu %xmm0,($out,$inp)
- lea 16($inp),$inp
- sub \$16,$len
- jnc .Lcbc_dec_loop
- .Lcbc_done:
- movdqu %xmm6,($ivp) # save IV
- ___
- $code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
- .Lcbc_epilogue:
- ___
- $code.=<<___;
- .Lcbc_abort:
- ret
- .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
- ___
- }
- $code.=<<___;
- ##
- ## _aes_preheat
- ##
- ## Fills register %r10 -> .aes_consts (so you can -fPIC)
- ## and %xmm9-%xmm15 as specified below.
- ##
- .type _vpaes_preheat,\@abi-omnipotent
- .align 16
- _vpaes_preheat:
- lea .Lk_s0F(%rip), %r10
- movdqa -0x20(%r10), %xmm10 # .Lk_inv
- movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
- movdqa 0x00(%r10), %xmm9 # .Lk_s0F
- movdqa 0x30(%r10), %xmm13 # .Lk_sb1
- movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16
- movdqa 0x50(%r10), %xmm15 # .Lk_sb2
- movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
- ret
- .size _vpaes_preheat,.-_vpaes_preheat
- ########################################################
- ## ##
- ## Constants ##
- ## ##
- ########################################################
- .type _vpaes_consts,\@object
- .align 64
- _vpaes_consts:
- .Lk_inv: # inv, inva
- .quad 0x0E05060F0D080180, 0x040703090A0B0C02
- .quad 0x01040A060F0B0780, 0x030D0E0C02050809
- .Lk_s0F: # s0F
- .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
- .Lk_ipt: # input transform (lo, hi)
- .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
- .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
- .Lk_sb1: # sb1u, sb1t
- .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
- .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
- .Lk_sb2: # sb2u, sb2t
- .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
- .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
- .Lk_sbo: # sbou, sbot
- .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
- .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
- .Lk_mc_forward: # mc_forward
- .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
- .quad 0x080B0A0904070605, 0x000302010C0F0E0D
- .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
- .quad 0x000302010C0F0E0D, 0x080B0A0904070605
- .Lk_mc_backward:# mc_backward
- .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
- .quad 0x020100030E0D0C0F, 0x0A09080B06050407
- .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
- .quad 0x0A09080B06050407, 0x020100030E0D0C0F
- .Lk_sr: # sr
- .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
- .quad 0x030E09040F0A0500, 0x0B06010C07020D08
- .quad 0x0F060D040B020900, 0x070E050C030A0108
- .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
- .Lk_rcon: # rcon
- .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
- .Lk_s63: # s63: all equal to 0x63 transformed
- .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
- .Lk_opt: # output transform
- .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
- .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
- .Lk_deskew: # deskew tables: inverts the sbox's "skew"
- .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
- .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
- ##
- ## Decryption stuff
- ## Key schedule constants
- ##
- .Lk_dksd: # decryption key schedule: invskew x*D
- .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
- .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
- .Lk_dksb: # decryption key schedule: invskew x*B
- .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
- .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
- .Lk_dkse: # decryption key schedule: invskew x*E + 0x63
- .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
- .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
- .Lk_dks9: # decryption key schedule: invskew x*9
- .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
- .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
- ##
- ## Decryption stuff
- ## Round function constants
- ##
- .Lk_dipt: # decryption input transform
- .quad 0x0F505B040B545F00, 0x154A411E114E451A
- .quad 0x86E383E660056500, 0x12771772F491F194
- .Lk_dsb9: # decryption sbox output *9*u, *9*t
- .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
- .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
- .Lk_dsbd: # decryption sbox output *D*u, *D*t
- .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
- .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
- .Lk_dsbb: # decryption sbox output *B*u, *B*t
- .quad 0xD022649296B44200, 0x602646F6B0F2D404
- .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
- .Lk_dsbe: # decryption sbox output *E*u, *E*t
- .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
- .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
- .Lk_dsbo: # decryption sbox final output
- .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
- .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
- .asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
- .align 64
- .size _vpaes_consts,.-_vpaes_consts
- ___
- if ($win64) {
- # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
- # CONTEXT *context,DISPATCHER_CONTEXT *disp)
- $rec="%rcx";
- $frame="%rdx";
- $context="%r8";
- $disp="%r9";
- $code.=<<___;
- .extern __imp_RtlVirtualUnwind
- .type se_handler,\@abi-omnipotent
- .align 16
- se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lin_prologue
- mov 152($context),%rax # pull context->Rsp
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lin_prologue
- lea 16(%rax),%rsi # %xmm save area
- lea 512($context),%rdi # &context.Xmm6
- mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0xb8(%rax),%rax # adjust stack pointer
- .Lin_prologue:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$`1232/8`,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
- .size se_handler,.-se_handler
- .section .pdata
- .align 4
- .rva .LSEH_begin_${PREFIX}_set_encrypt_key
- .rva .LSEH_end_${PREFIX}_set_encrypt_key
- .rva .LSEH_info_${PREFIX}_set_encrypt_key
- .rva .LSEH_begin_${PREFIX}_set_decrypt_key
- .rva .LSEH_end_${PREFIX}_set_decrypt_key
- .rva .LSEH_info_${PREFIX}_set_decrypt_key
- .rva .LSEH_begin_${PREFIX}_encrypt
- .rva .LSEH_end_${PREFIX}_encrypt
- .rva .LSEH_info_${PREFIX}_encrypt
- .rva .LSEH_begin_${PREFIX}_decrypt
- .rva .LSEH_end_${PREFIX}_decrypt
- .rva .LSEH_info_${PREFIX}_decrypt
- .rva .LSEH_begin_${PREFIX}_cbc_encrypt
- .rva .LSEH_end_${PREFIX}_cbc_encrypt
- .rva .LSEH_info_${PREFIX}_cbc_encrypt
- .section .xdata
- .align 8
- .LSEH_info_${PREFIX}_set_encrypt_key:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[]
- .LSEH_info_${PREFIX}_set_decrypt_key:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[]
- .LSEH_info_${PREFIX}_encrypt:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lenc_body,.Lenc_epilogue # HandlerData[]
- .LSEH_info_${PREFIX}_decrypt:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Ldec_body,.Ldec_epilogue # HandlerData[]
- .LSEH_info_${PREFIX}_cbc_encrypt:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lcbc_body,.Lcbc_epilogue # HandlerData[]
- ___
- }
- $code =~ s/\`([^\`]*)\`/eval($1)/gem;
- print $code;
- close STDOUT;
|