vpaes-armv8.pl 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. #! /usr/bin/env perl
  2. # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. ######################################################################
  9. ## Constant-time SSSE3 AES core implementation.
  10. ## version 0.1
  11. ##
  12. ## By Mike Hamburg (Stanford University), 2009
  13. ## Public domain.
  14. ##
  15. ## For details see http://shiftleft.org/papers/vector_aes/ and
  16. ## http://crypto.stanford.edu/vpaes/.
  17. ##
  18. ######################################################################
  19. # ARMv8 NEON adaptation by <appro@openssl.org>
  20. #
  21. # Reason for undertaken effort is that there is at least one popular
  22. # SoC based on Cortex-A53 that doesn't have crypto extensions.
  23. #
  24. # CBC enc ECB enc/dec(*) [bit-sliced enc/dec]
  25. # Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ]
  26. # Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ]
  27. # X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ]
  28. # Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ]
  29. # Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ]
  30. # Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ]
  31. # ThunderX2(***) 39.4(**) 33.8/48.6(**)
  32. #
  33. # (*) ECB denotes approximate result for parallelizable modes
  34. # such as CBC decrypt, CTR, etc.;
  35. # (**) these results are worse than scalar compiler-generated
  36. # code, but it's constant-time and therefore preferred;
  37. # (***) presented for reference/comparison purposes;
  38. # $output is the last argument if it looks like a file (it has an extension)
  39. # $flavour is the first argument if it doesn't look like a file
  40. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  41. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  42. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  43. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  44. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  45. die "can't locate arm-xlate.pl";
  46. open OUT,"| \"$^X\" $xlate $flavour \"$output\""
  47. or die "can't call $xlate: $!";
  48. *STDOUT=*OUT;
  49. $code.=<<___;
  50. #include "arm_arch.h"
  51. .text
  52. .type _vpaes_consts,%object
  53. .align 7 // totally strategic alignment
  54. _vpaes_consts:
  55. .Lk_mc_forward: // mc_forward
  56. .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
  57. .quad 0x080B0A0904070605, 0x000302010C0F0E0D
  58. .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
  59. .quad 0x000302010C0F0E0D, 0x080B0A0904070605
  60. .Lk_mc_backward:// mc_backward
  61. .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
  62. .quad 0x020100030E0D0C0F, 0x0A09080B06050407
  63. .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
  64. .quad 0x0A09080B06050407, 0x020100030E0D0C0F
  65. .Lk_sr: // sr
  66. .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
  67. .quad 0x030E09040F0A0500, 0x0B06010C07020D08
  68. .quad 0x0F060D040B020900, 0x070E050C030A0108
  69. .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
  70. //
  71. // "Hot" constants
  72. //
  73. .Lk_inv: // inv, inva
  74. .quad 0x0E05060F0D080180, 0x040703090A0B0C02
  75. .quad 0x01040A060F0B0780, 0x030D0E0C02050809
  76. .Lk_ipt: // input transform (lo, hi)
  77. .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
  78. .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
  79. .Lk_sbo: // sbou, sbot
  80. .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
  81. .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
  82. .Lk_sb1: // sb1u, sb1t
  83. .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
  84. .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
  85. .Lk_sb2: // sb2u, sb2t
  86. .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
  87. .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
  88. //
  89. // Decryption stuff
  90. //
  91. .Lk_dipt: // decryption input transform
  92. .quad 0x0F505B040B545F00, 0x154A411E114E451A
  93. .quad 0x86E383E660056500, 0x12771772F491F194
  94. .Lk_dsbo: // decryption sbox final output
  95. .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
  96. .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
  97. .Lk_dsb9: // decryption sbox output *9*u, *9*t
  98. .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
  99. .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
  100. .Lk_dsbd: // decryption sbox output *D*u, *D*t
  101. .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
  102. .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
  103. .Lk_dsbb: // decryption sbox output *B*u, *B*t
  104. .quad 0xD022649296B44200, 0x602646F6B0F2D404
  105. .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
  106. .Lk_dsbe: // decryption sbox output *E*u, *E*t
  107. .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
  108. .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
  109. //
  110. // Key schedule constants
  111. //
  112. .Lk_dksd: // decryption key schedule: invskew x*D
  113. .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
  114. .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
  115. .Lk_dksb: // decryption key schedule: invskew x*B
  116. .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
  117. .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
  118. .Lk_dkse: // decryption key schedule: invskew x*E + 0x63
  119. .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
  120. .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
  121. .Lk_dks9: // decryption key schedule: invskew x*9
  122. .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
  123. .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
  124. .Lk_rcon: // rcon
  125. .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
  126. .Lk_opt: // output transform
  127. .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
  128. .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
  129. .Lk_deskew: // deskew tables: inverts the sbox's "skew"
  130. .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
  131. .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
  132. .asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
  133. .size _vpaes_consts,.-_vpaes_consts
  134. .align 6
  135. ___
  136. {
  137. my ($inp,$out,$key) = map("x$_",(0..2));
  138. my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23));
  139. my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27));
  140. my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31));
  141. $code.=<<___;
  142. //
  143. // _aes_preheat
  144. //
  145. // Fills register %r10 -> .aes_consts (so you can -fPIC)
  146. // and %xmm9-%xmm15 as specified below.
  147. //
  148. .type _vpaes_encrypt_preheat,%function
  149. .align 4
  150. _vpaes_encrypt_preheat:
  151. adr x10, .Lk_inv
  152. movi v17.16b, #0x0f
  153. ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
  154. ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
  155. ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
  156. ret
  157. .size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
  158. //
  159. // _aes_encrypt_core
  160. //
  161. // AES-encrypt %xmm0.
  162. //
  163. // Inputs:
  164. // %xmm0 = input
  165. // %xmm9-%xmm15 as in _vpaes_preheat
  166. // (%rdx) = scheduled keys
  167. //
  168. // Output in %xmm0
  169. // Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
  170. // Preserves %xmm6 - %xmm8 so you get some local vectors
  171. //
  172. //
  173. .type _vpaes_encrypt_core,%function
  174. .align 4
  175. _vpaes_encrypt_core:
  176. mov x9, $key
  177. ldr w8, [$key,#240] // pull rounds
  178. adr x11, .Lk_mc_forward+16
  179. // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
  180. ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
  181. and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  182. ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  183. tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
  184. // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
  185. tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
  186. eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
  187. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  188. b .Lenc_entry
  189. .align 4
  190. .Lenc_loop:
  191. // middle of middle round
  192. add x10, x11, #0x40
  193. tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
  194. ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
  195. tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
  196. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  197. tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
  198. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  199. tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
  200. ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
  201. tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
  202. eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
  203. tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
  204. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
  205. tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
  206. eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
  207. and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
  208. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
  209. sub w8, w8, #1 // nr--
  210. .Lenc_entry:
  211. // top of round
  212. and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
  213. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  214. tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
  215. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  216. tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  217. tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  218. eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  219. eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  220. tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  221. tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  222. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  223. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  224. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
  225. cbnz w8, .Lenc_loop
  226. // middle of last round
  227. add x10, x11, #0x80
  228. // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
  229. // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
  230. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  231. ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
  232. tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
  233. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  234. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  235. tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
  236. ret
  237. .size _vpaes_encrypt_core,.-_vpaes_encrypt_core
  238. .globl vpaes_encrypt
  239. .type vpaes_encrypt,%function
  240. .align 4
  241. vpaes_encrypt:
  242. AARCH64_SIGN_LINK_REGISTER
  243. stp x29,x30,[sp,#-16]!
  244. add x29,sp,#0
  245. ld1 {v7.16b}, [$inp]
  246. bl _vpaes_encrypt_preheat
  247. bl _vpaes_encrypt_core
  248. st1 {v0.16b}, [$out]
  249. ldp x29,x30,[sp],#16
  250. AARCH64_VALIDATE_LINK_REGISTER
  251. ret
  252. .size vpaes_encrypt,.-vpaes_encrypt
  253. .type _vpaes_encrypt_2x,%function
  254. .align 4
  255. _vpaes_encrypt_2x:
  256. mov x9, $key
  257. ldr w8, [$key,#240] // pull rounds
  258. adr x11, .Lk_mc_forward+16
  259. // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
  260. ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
  261. and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  262. ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  263. and v9.16b, v15.16b, v17.16b
  264. ushr v8.16b, v15.16b, #4
  265. tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
  266. tbl v9.16b, {$iptlo}, v9.16b
  267. // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
  268. tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
  269. tbl v10.16b, {$ipthi}, v8.16b
  270. eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
  271. eor v8.16b, v9.16b, v16.16b
  272. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  273. eor v8.16b, v8.16b, v10.16b
  274. b .Lenc_2x_entry
  275. .align 4
  276. .Lenc_2x_loop:
  277. // middle of middle round
  278. add x10, x11, #0x40
  279. tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
  280. tbl v12.16b, {$sb1t}, v10.16b
  281. ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
  282. tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
  283. tbl v8.16b, {$sb1u}, v11.16b
  284. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  285. eor v12.16b, v12.16b, v16.16b
  286. tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
  287. tbl v13.16b, {$sb2t}, v10.16b
  288. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  289. eor v8.16b, v8.16b, v12.16b
  290. tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
  291. tbl v10.16b, {$sb2u}, v11.16b
  292. ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
  293. tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
  294. tbl v11.16b, {v8.16b}, v1.16b
  295. eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
  296. eor v10.16b, v10.16b, v13.16b
  297. tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
  298. tbl v8.16b, {v8.16b}, v4.16b
  299. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
  300. eor v11.16b, v11.16b, v10.16b
  301. tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
  302. tbl v12.16b, {v11.16b},v1.16b
  303. eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
  304. eor v8.16b, v8.16b, v11.16b
  305. and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
  306. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
  307. eor v8.16b, v8.16b, v12.16b
  308. sub w8, w8, #1 // nr--
  309. .Lenc_2x_entry:
  310. // top of round
  311. and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
  312. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  313. and v9.16b, v8.16b, v17.16b
  314. ushr v8.16b, v8.16b, #4
  315. tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
  316. tbl v13.16b, {$invhi},v9.16b
  317. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  318. eor v9.16b, v9.16b, v8.16b
  319. tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  320. tbl v11.16b, {$invlo},v8.16b
  321. tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  322. tbl v12.16b, {$invlo},v9.16b
  323. eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  324. eor v11.16b, v11.16b, v13.16b
  325. eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  326. eor v12.16b, v12.16b, v13.16b
  327. tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  328. tbl v10.16b, {$invlo},v11.16b
  329. tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  330. tbl v11.16b, {$invlo},v12.16b
  331. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  332. eor v10.16b, v10.16b, v9.16b
  333. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  334. eor v11.16b, v11.16b, v8.16b
  335. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
  336. cbnz w8, .Lenc_2x_loop
  337. // middle of last round
  338. add x10, x11, #0x80
  339. // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
  340. // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
  341. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  342. tbl v12.16b, {$sbou}, v10.16b
  343. ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
  344. tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
  345. tbl v8.16b, {$sbot}, v11.16b
  346. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  347. eor v12.16b, v12.16b, v16.16b
  348. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  349. eor v8.16b, v8.16b, v12.16b
  350. tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
  351. tbl v1.16b, {v8.16b},v1.16b
  352. ret
  353. .size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
  354. .type _vpaes_decrypt_preheat,%function
  355. .align 4
  356. _vpaes_decrypt_preheat:
  357. adr x10, .Lk_inv
  358. movi v17.16b, #0x0f
  359. adr x11, .Lk_dipt
  360. ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
  361. ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo
  362. ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd
  363. ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe
  364. ret
  365. .size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
  366. //
  367. // Decryption core
  368. //
  369. // Same API as encryption core.
  370. //
  371. .type _vpaes_decrypt_core,%function
  372. .align 4
  373. _vpaes_decrypt_core:
  374. mov x9, $key
  375. ldr w8, [$key,#240] // pull rounds
  376. // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
  377. lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
  378. eor x11, x11, #0x30 // xor \$0x30, %r11
  379. adr x10, .Lk_sr
  380. and x11, x11, #0x30 // and \$0x30, %r11
  381. add x11, x11, x10
  382. adr x10, .Lk_mc_forward+48
  383. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
  384. and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  385. ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  386. tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
  387. ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
  388. // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
  389. tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
  390. eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
  391. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  392. b .Ldec_entry
  393. .align 4
  394. .Ldec_loop:
  395. //
  396. // Inverse mix columns
  397. //
  398. // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
  399. // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
  400. tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
  401. tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
  402. eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
  403. // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
  404. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  405. // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
  406. tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
  407. tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  408. tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
  409. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  410. // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
  411. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  412. // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
  413. tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
  414. tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  415. tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
  416. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  417. // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
  418. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  419. // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
  420. tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
  421. tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  422. tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
  423. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  424. ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
  425. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  426. sub w8, w8, #1 // sub \$1,%rax # nr--
  427. .Ldec_entry:
  428. // top of round
  429. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
  430. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  431. tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
  432. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  433. tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  434. tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  435. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  436. eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  437. tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  438. tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  439. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  440. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  441. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
  442. cbnz w8, .Ldec_loop
  443. // middle of last round
  444. // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
  445. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  446. // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
  447. ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
  448. tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
  449. eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
  450. eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
  451. tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
  452. ret
  453. .size _vpaes_decrypt_core,.-_vpaes_decrypt_core
  454. .globl vpaes_decrypt
  455. .type vpaes_decrypt,%function
  456. .align 4
  457. vpaes_decrypt:
  458. AARCH64_SIGN_LINK_REGISTER
  459. stp x29,x30,[sp,#-16]!
  460. add x29,sp,#0
  461. ld1 {v7.16b}, [$inp]
  462. bl _vpaes_decrypt_preheat
  463. bl _vpaes_decrypt_core
  464. st1 {v0.16b}, [$out]
  465. ldp x29,x30,[sp],#16
  466. AARCH64_VALIDATE_LINK_REGISTER
  467. ret
  468. .size vpaes_decrypt,.-vpaes_decrypt
  469. // v14-v15 input, v0-v1 output
  470. .type _vpaes_decrypt_2x,%function
  471. .align 4
  472. _vpaes_decrypt_2x:
  473. mov x9, $key
  474. ldr w8, [$key,#240] // pull rounds
  475. // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
  476. lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
  477. eor x11, x11, #0x30 // xor \$0x30, %r11
  478. adr x10, .Lk_sr
  479. and x11, x11, #0x30 // and \$0x30, %r11
  480. add x11, x11, x10
  481. adr x10, .Lk_mc_forward+48
  482. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
  483. and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  484. ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  485. and v9.16b, v15.16b, v17.16b
  486. ushr v8.16b, v15.16b, #4
  487. tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
  488. tbl v10.16b, {$iptlo},v9.16b
  489. ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
  490. // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
  491. tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
  492. tbl v8.16b, {$ipthi},v8.16b
  493. eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
  494. eor v10.16b, v10.16b, v16.16b
  495. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  496. eor v8.16b, v8.16b, v10.16b
  497. b .Ldec_2x_entry
  498. .align 4
  499. .Ldec_2x_loop:
  500. //
  501. // Inverse mix columns
  502. //
  503. // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
  504. // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
  505. tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
  506. tbl v12.16b, {$sb9u}, v10.16b
  507. tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
  508. tbl v9.16b, {$sb9t}, v11.16b
  509. eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
  510. eor v8.16b, v12.16b, v16.16b
  511. // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
  512. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  513. eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  514. // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
  515. tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
  516. tbl v12.16b, {$sbdu}, v10.16b
  517. tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  518. tbl v8.16b, {v8.16b},v5.16b
  519. tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
  520. tbl v9.16b, {$sbdt}, v11.16b
  521. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  522. eor v8.16b, v8.16b, v12.16b
  523. // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
  524. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  525. eor v8.16b, v8.16b, v9.16b
  526. // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
  527. tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
  528. tbl v12.16b, {$sbbu}, v10.16b
  529. tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  530. tbl v8.16b, {v8.16b},v5.16b
  531. tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
  532. tbl v9.16b, {$sbbt}, v11.16b
  533. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  534. eor v8.16b, v8.16b, v12.16b
  535. // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
  536. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  537. eor v8.16b, v8.16b, v9.16b
  538. // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
  539. tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
  540. tbl v12.16b, {$sbeu}, v10.16b
  541. tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  542. tbl v8.16b, {v8.16b},v5.16b
  543. tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
  544. tbl v9.16b, {$sbet}, v11.16b
  545. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  546. eor v8.16b, v8.16b, v12.16b
  547. ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
  548. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  549. eor v8.16b, v8.16b, v9.16b
  550. sub w8, w8, #1 // sub \$1,%rax # nr--
  551. .Ldec_2x_entry:
  552. // top of round
  553. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
  554. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  555. and v9.16b, v8.16b, v17.16b
  556. ushr v8.16b, v8.16b, #4
  557. tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
  558. tbl v10.16b, {$invhi},v9.16b
  559. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  560. eor v9.16b, v9.16b, v8.16b
  561. tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  562. tbl v11.16b, {$invlo},v8.16b
  563. tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  564. tbl v12.16b, {$invlo},v9.16b
  565. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  566. eor v11.16b, v11.16b, v10.16b
  567. eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  568. eor v12.16b, v12.16b, v10.16b
  569. tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  570. tbl v10.16b, {$invlo},v11.16b
  571. tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  572. tbl v11.16b, {$invlo},v12.16b
  573. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  574. eor v10.16b, v10.16b, v9.16b
  575. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  576. eor v11.16b, v11.16b, v8.16b
  577. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
  578. cbnz w8, .Ldec_2x_loop
  579. // middle of last round
  580. // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
  581. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  582. tbl v12.16b, {$sbou}, v10.16b
  583. // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
  584. tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
  585. tbl v9.16b, {$sbot}, v11.16b
  586. ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
  587. eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
  588. eor v12.16b, v12.16b, v16.16b
  589. eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
  590. eor v8.16b, v9.16b, v12.16b
  591. tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
  592. tbl v1.16b, {v8.16b},v2.16b
  593. ret
  594. .size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x
  595. ___
  596. }
  597. {
  598. my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3");
  599. my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8));
  600. $code.=<<___;
  601. ////////////////////////////////////////////////////////
  602. // //
  603. // AES key schedule //
  604. // //
  605. ////////////////////////////////////////////////////////
  606. .type _vpaes_key_preheat,%function
  607. .align 4
  608. _vpaes_key_preheat:
  609. adr x10, .Lk_inv
  610. movi v16.16b, #0x5b // .Lk_s63
  611. adr x11, .Lk_sb1
  612. movi v17.16b, #0x0f // .Lk_s0F
  613. ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt
  614. adr x10, .Lk_dksd
  615. ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1
  616. adr x11, .Lk_mc_forward
  617. ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
  618. ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
  619. ld1 {v8.2d}, [x10] // .Lk_rcon
  620. ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
  621. ret
  622. .size _vpaes_key_preheat,.-_vpaes_key_preheat
  623. .type _vpaes_schedule_core,%function
  624. .align 4
  625. _vpaes_schedule_core:
  626. AARCH64_SIGN_LINK_REGISTER
  627. stp x29, x30, [sp,#-16]!
  628. add x29,sp,#0
  629. bl _vpaes_key_preheat // load the tables
  630. ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
  631. // input transform
  632. mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
  633. bl _vpaes_schedule_transform
  634. mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
  635. adr x10, .Lk_sr // lea .Lk_sr(%rip),%r10
  636. add x8, x8, x10
  637. cbnz $dir, .Lschedule_am_decrypting
  638. // encrypting, output zeroth round key after transform
  639. st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx)
  640. b .Lschedule_go
  641. .Lschedule_am_decrypting:
  642. // decrypting, output zeroth round key after shiftrows
  643. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
  644. tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  645. st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
  646. eor x8, x8, #0x30 // xor \$0x30, %r8
  647. .Lschedule_go:
  648. cmp $bits, #192 // cmp \$192, %esi
  649. b.hi .Lschedule_256
  650. b.eq .Lschedule_192
  651. // 128: fall though
  652. //
  653. // .schedule_128
  654. //
  655. // 128-bit specific part of key schedule.
  656. //
  657. // This schedule is really simple, because all its parts
  658. // are accomplished by the subroutines.
  659. //
  660. .Lschedule_128:
  661. mov $inp, #10 // mov \$10, %esi
  662. .Loop_schedule_128:
  663. sub $inp, $inp, #1 // dec %esi
  664. bl _vpaes_schedule_round
  665. cbz $inp, .Lschedule_mangle_last
  666. bl _vpaes_schedule_mangle // write output
  667. b .Loop_schedule_128
  668. //
  669. // .aes_schedule_192
  670. //
  671. // 192-bit specific part of key schedule.
  672. //
  673. // The main body of this schedule is the same as the 128-bit
  674. // schedule, but with more smearing. The long, high side is
  675. // stored in %xmm7 as before, and the short, low side is in
  676. // the high bits of %xmm6.
  677. //
  678. // This schedule is somewhat nastier, however, because each
  679. // round produces 192 bits of key material, or 1.5 round keys.
  680. // Therefore, on each cycle we do 2 rounds and produce 3 round
  681. // keys.
  682. //
  683. .align 4
  684. .Lschedule_192:
  685. sub $inp, $inp, #8
  686. ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
  687. bl _vpaes_schedule_transform // input transform
  688. mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
  689. eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
  690. ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
  691. mov $inp, #4 // mov \$4, %esi
  692. .Loop_schedule_192:
  693. sub $inp, $inp, #1 // dec %esi
  694. bl _vpaes_schedule_round
  695. ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0
  696. bl _vpaes_schedule_mangle // save key n
  697. bl _vpaes_schedule_192_smear
  698. bl _vpaes_schedule_mangle // save key n+1
  699. bl _vpaes_schedule_round
  700. cbz $inp, .Lschedule_mangle_last
  701. bl _vpaes_schedule_mangle // save key n+2
  702. bl _vpaes_schedule_192_smear
  703. b .Loop_schedule_192
  704. //
  705. // .aes_schedule_256
  706. //
  707. // 256-bit specific part of key schedule.
  708. //
  709. // The structure here is very similar to the 128-bit
  710. // schedule, but with an additional "low side" in
  711. // %xmm6. The low side's rounds are the same as the
  712. // high side's, except no rcon and no rotation.
  713. //
  714. .align 4
  715. .Lschedule_256:
  716. ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
  717. bl _vpaes_schedule_transform // input transform
  718. mov $inp, #7 // mov \$7, %esi
  719. .Loop_schedule_256:
  720. sub $inp, $inp, #1 // dec %esi
  721. bl _vpaes_schedule_mangle // output low result
  722. mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
  723. // high round
  724. bl _vpaes_schedule_round
  725. cbz $inp, .Lschedule_mangle_last
  726. bl _vpaes_schedule_mangle
  727. // low round. swap xmm7 and xmm6
  728. dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
  729. movi v4.16b, #0
  730. mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
  731. mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
  732. bl _vpaes_schedule_low_round
  733. mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
  734. b .Loop_schedule_256
  735. //
  736. // .aes_schedule_mangle_last
  737. //
  738. // Mangler for last round of key schedule
  739. // Mangles %xmm0
  740. // when encrypting, outputs out(%xmm0) ^ 63
  741. // when decrypting, outputs unskew(%xmm0)
  742. //
  743. // Always called right before return... jumps to cleanup and exits
  744. //
  745. .align 4
  746. .Lschedule_mangle_last:
  747. // schedule last round key from xmm0
  748. adr x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
  749. cbnz $dir, .Lschedule_mangle_last_dec
  750. // encrypting
  751. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
  752. adr x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
  753. add $out, $out, #32 // add \$32, %rdx
  754. tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
  755. .Lschedule_mangle_last_dec:
  756. ld1 {v20.2d-v21.2d}, [x11] // reload constants
  757. sub $out, $out, #16 // add \$-16, %rdx
  758. eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
  759. bl _vpaes_schedule_transform // output transform
  760. st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key
  761. // cleanup
  762. eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
  763. eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
  764. eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
  765. eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
  766. eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
  767. eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
  768. eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
  769. eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
  770. ldp x29, x30, [sp],#16
  771. AARCH64_VALIDATE_LINK_REGISTER
  772. ret
  773. .size _vpaes_schedule_core,.-_vpaes_schedule_core
  774. //
  775. // .aes_schedule_192_smear
  776. //
  777. // Smear the short, low side in the 192-bit key schedule.
  778. //
  779. // Inputs:
  780. // %xmm7: high side, b a x y
  781. // %xmm6: low side, d c 0 0
  782. // %xmm13: 0
  783. //
  784. // Outputs:
  785. // %xmm6: b+c+d b+c 0 0
  786. // %xmm0: b+c+d b+c b a
  787. //
  788. .type _vpaes_schedule_192_smear,%function
  789. .align 4
  790. _vpaes_schedule_192_smear:
  791. movi v1.16b, #0
  792. dup v0.4s, v7.s[3]
  793. ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
  794. ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
  795. eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
  796. eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
  797. eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
  798. mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
  799. ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
  800. ret
  801. .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
  802. //
  803. // .aes_schedule_round
  804. //
  805. // Runs one main round of the key schedule on %xmm0, %xmm7
  806. //
  807. // Specifically, runs subbytes on the high dword of %xmm0
  808. // then rotates it by one byte and xors into the low dword of
  809. // %xmm7.
  810. //
  811. // Adds rcon from low byte of %xmm8, then rotates %xmm8 for
  812. // next rcon.
  813. //
  814. // Smears the dwords of %xmm7 by xoring the low into the
  815. // second low, result into third, result into highest.
  816. //
  817. // Returns results in %xmm7 = %xmm0.
  818. // Clobbers %xmm1-%xmm4, %r11.
  819. //
  820. .type _vpaes_schedule_round,%function
  821. .align 4
  822. _vpaes_schedule_round:
  823. // extract rcon from xmm8
  824. movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
  825. ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1
  826. ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8
  827. eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
  828. // rotate
  829. dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
  830. ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0
  831. // fall through...
  832. // low round: same as high round, but no rotation and no rcon.
  833. _vpaes_schedule_low_round:
  834. // smear xmm7
  835. ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1
  836. eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
  837. ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4
  838. // subbytes
  839. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
  840. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  841. eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
  842. tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
  843. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  844. tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  845. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  846. tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  847. eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
  848. tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
  849. eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  850. tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
  851. eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
  852. eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
  853. tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
  854. tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
  855. eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
  856. // add in smeared stuff
  857. eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
  858. eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
  859. ret
  860. .size _vpaes_schedule_round,.-_vpaes_schedule_round
  861. //
  862. // .aes_schedule_transform
  863. //
  864. // Linear-transform %xmm0 according to tables at (%r11)
  865. //
  866. // Requires that %xmm9 = 0x0F0F... as in preheat
  867. // Output in %xmm0
  868. // Clobbers %xmm1, %xmm2
  869. //
  870. .type _vpaes_schedule_transform,%function
  871. .align 4
  872. _vpaes_schedule_transform:
  873. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  874. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  875. // vmovdqa (%r11), %xmm2 # lo
  876. tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
  877. // vmovdqa 16(%r11), %xmm1 # hi
  878. tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
  879. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  880. ret
  881. .size _vpaes_schedule_transform,.-_vpaes_schedule_transform
  882. //
  883. // .aes_schedule_mangle
  884. //
  885. // Mangle xmm0 from (basis-transformed) standard version
  886. // to our version.
  887. //
  888. // On encrypt,
  889. // xor with 0x63
  890. // multiply by circulant 0,1,1,1
  891. // apply shiftrows transform
  892. //
  893. // On decrypt,
  894. // xor with 0x63
  895. // multiply by "inverse mixcolumns" circulant E,B,D,9
  896. // deskew
  897. // apply shiftrows transform
  898. //
  899. //
  900. // Writes out to (%rdx), and increments or decrements it
  901. // Keeps track of round number mod 4 in %r8
  902. // Preserves xmm0
  903. // Clobbers xmm1-xmm5
  904. //
  905. .type _vpaes_schedule_mangle,%function
  906. .align 4
  907. _vpaes_schedule_mangle:
  908. mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
  909. // vmovdqa .Lk_mc_forward(%rip),%xmm5
  910. cbnz $dir, .Lschedule_mangle_dec
  911. // encrypting
  912. eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
  913. add $out, $out, #16 // add \$16, %rdx
  914. tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
  915. tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
  916. tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
  917. eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
  918. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
  919. eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
  920. b .Lschedule_mangle_both
  921. .align 4
  922. .Lschedule_mangle_dec:
  923. // inverse mix columns
  924. // lea .Lk_dksd(%rip),%r11
  925. ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
  926. and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
  927. // vmovdqa 0x00(%r11), %xmm2
  928. tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  929. // vmovdqa 0x10(%r11), %xmm3
  930. tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  931. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
  932. tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
  933. // vmovdqa 0x20(%r11), %xmm2
  934. tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  935. eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
  936. // vmovdqa 0x30(%r11), %xmm3
  937. tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  938. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
  939. tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
  940. // vmovdqa 0x40(%r11), %xmm2
  941. tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  942. eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
  943. // vmovdqa 0x50(%r11), %xmm3
  944. tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  945. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
  946. // vmovdqa 0x60(%r11), %xmm2
  947. tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  948. tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
  949. // vmovdqa 0x70(%r11), %xmm4
  950. tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
  951. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
  952. eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
  953. eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
  954. sub $out, $out, #16 // add \$-16, %rdx
  955. .Lschedule_mangle_both:
  956. tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  957. add x8, x8, #64-16 // add \$-16, %r8
  958. and x8, x8, #~(1<<6) // and \$0x30, %r8
  959. st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
  960. ret
  961. .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
  962. .globl vpaes_set_encrypt_key
  963. .type vpaes_set_encrypt_key,%function
  964. .align 4
  965. vpaes_set_encrypt_key:
  966. AARCH64_SIGN_LINK_REGISTER
  967. stp x29,x30,[sp,#-16]!
  968. add x29,sp,#0
  969. stp d8,d9,[sp,#-16]! // ABI spec says so
  970. lsr w9, $bits, #5 // shr \$5,%eax
  971. add w9, w9, #5 // \$5,%eax
  972. str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  973. mov $dir, #0 // mov \$0,%ecx
  974. mov x8, #0x30 // mov \$0x30,%r8d
  975. bl _vpaes_schedule_core
  976. eor x0, x0, x0
  977. ldp d8,d9,[sp],#16
  978. ldp x29,x30,[sp],#16
  979. AARCH64_VALIDATE_LINK_REGISTER
  980. ret
  981. .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
  982. .globl vpaes_set_decrypt_key
  983. .type vpaes_set_decrypt_key,%function
  984. .align 4
  985. vpaes_set_decrypt_key:
  986. AARCH64_SIGN_LINK_REGISTER
  987. stp x29,x30,[sp,#-16]!
  988. add x29,sp,#0
  989. stp d8,d9,[sp,#-16]! // ABI spec says so
  990. lsr w9, $bits, #5 // shr \$5,%eax
  991. add w9, w9, #5 // \$5,%eax
  992. str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  993. lsl w9, w9, #4 // shl \$4,%eax
  994. add $out, $out, #16 // lea 16(%rdx,%rax),%rdx
  995. add $out, $out, x9
  996. mov $dir, #1 // mov \$1,%ecx
  997. lsr w8, $bits, #1 // shr \$1,%r8d
  998. and x8, x8, #32 // and \$32,%r8d
  999. eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32
  1000. bl _vpaes_schedule_core
  1001. ldp d8,d9,[sp],#16
  1002. ldp x29,x30,[sp],#16
  1003. AARCH64_VALIDATE_LINK_REGISTER
  1004. ret
  1005. .size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
  1006. ___
  1007. }
  1008. {
  1009. my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5));
  1010. $code.=<<___;
  1011. .globl vpaes_cbc_encrypt
  1012. .type vpaes_cbc_encrypt,%function
  1013. .align 4
  1014. vpaes_cbc_encrypt:
  1015. AARCH64_SIGN_LINK_REGISTER
  1016. cbz $len, .Lcbc_abort
  1017. cmp w5, #0 // check direction
  1018. b.eq vpaes_cbc_decrypt
  1019. stp x29,x30,[sp,#-16]!
  1020. add x29,sp,#0
  1021. mov x17, $len // reassign
  1022. mov x2, $key // reassign
  1023. ld1 {v0.16b}, [$ivec] // load ivec
  1024. bl _vpaes_encrypt_preheat
  1025. b .Lcbc_enc_loop
  1026. .align 4
  1027. .Lcbc_enc_loop:
  1028. ld1 {v7.16b}, [$inp],#16 // load input
  1029. eor v7.16b, v7.16b, v0.16b // xor with ivec
  1030. bl _vpaes_encrypt_core
  1031. st1 {v0.16b}, [$out],#16 // save output
  1032. subs x17, x17, #16
  1033. b.hi .Lcbc_enc_loop
  1034. st1 {v0.16b}, [$ivec] // write ivec
  1035. ldp x29,x30,[sp],#16
  1036. .Lcbc_abort:
  1037. AARCH64_VALIDATE_LINK_REGISTER
  1038. ret
  1039. .size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
  1040. .type vpaes_cbc_decrypt,%function
  1041. .align 4
  1042. vpaes_cbc_decrypt:
  1043. // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to
  1044. // only from vpaes_cbc_encrypt which has already signed the return address.
  1045. stp x29,x30,[sp,#-16]!
  1046. add x29,sp,#0
  1047. stp d8,d9,[sp,#-16]! // ABI spec says so
  1048. stp d10,d11,[sp,#-16]!
  1049. stp d12,d13,[sp,#-16]!
  1050. stp d14,d15,[sp,#-16]!
  1051. mov x17, $len // reassign
  1052. mov x2, $key // reassign
  1053. ld1 {v6.16b}, [$ivec] // load ivec
  1054. bl _vpaes_decrypt_preheat
  1055. tst x17, #16
  1056. b.eq .Lcbc_dec_loop2x
  1057. ld1 {v7.16b}, [$inp], #16 // load input
  1058. bl _vpaes_decrypt_core
  1059. eor v0.16b, v0.16b, v6.16b // xor with ivec
  1060. orr v6.16b, v7.16b, v7.16b // next ivec value
  1061. st1 {v0.16b}, [$out], #16
  1062. subs x17, x17, #16
  1063. b.ls .Lcbc_dec_done
  1064. .align 4
  1065. .Lcbc_dec_loop2x:
  1066. ld1 {v14.16b,v15.16b}, [$inp], #32
  1067. bl _vpaes_decrypt_2x
  1068. eor v0.16b, v0.16b, v6.16b // xor with ivec
  1069. eor v1.16b, v1.16b, v14.16b
  1070. orr v6.16b, v15.16b, v15.16b
  1071. st1 {v0.16b,v1.16b}, [$out], #32
  1072. subs x17, x17, #32
  1073. b.hi .Lcbc_dec_loop2x
  1074. .Lcbc_dec_done:
  1075. st1 {v6.16b}, [$ivec]
  1076. ldp d14,d15,[sp],#16
  1077. ldp d12,d13,[sp],#16
  1078. ldp d10,d11,[sp],#16
  1079. ldp d8,d9,[sp],#16
  1080. ldp x29,x30,[sp],#16
  1081. AARCH64_VALIDATE_LINK_REGISTER
  1082. ret
  1083. .size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
  1084. ___
  1085. if (1) {
  1086. $code.=<<___;
  1087. .globl vpaes_ecb_encrypt
  1088. .type vpaes_ecb_encrypt,%function
  1089. .align 4
  1090. vpaes_ecb_encrypt:
  1091. AARCH64_SIGN_LINK_REGISTER
  1092. stp x29,x30,[sp,#-16]!
  1093. add x29,sp,#0
  1094. stp d8,d9,[sp,#-16]! // ABI spec says so
  1095. stp d10,d11,[sp,#-16]!
  1096. stp d12,d13,[sp,#-16]!
  1097. stp d14,d15,[sp,#-16]!
  1098. mov x17, $len
  1099. mov x2, $key
  1100. bl _vpaes_encrypt_preheat
  1101. tst x17, #16
  1102. b.eq .Lecb_enc_loop
  1103. ld1 {v7.16b}, [$inp],#16
  1104. bl _vpaes_encrypt_core
  1105. st1 {v0.16b}, [$out],#16
  1106. subs x17, x17, #16
  1107. b.ls .Lecb_enc_done
  1108. .align 4
  1109. .Lecb_enc_loop:
  1110. ld1 {v14.16b,v15.16b}, [$inp], #32
  1111. bl _vpaes_encrypt_2x
  1112. st1 {v0.16b,v1.16b}, [$out], #32
  1113. subs x17, x17, #32
  1114. b.hi .Lecb_enc_loop
  1115. .Lecb_enc_done:
  1116. ldp d14,d15,[sp],#16
  1117. ldp d12,d13,[sp],#16
  1118. ldp d10,d11,[sp],#16
  1119. ldp d8,d9,[sp],#16
  1120. ldp x29,x30,[sp],#16
  1121. AARCH64_VALIDATE_LINK_REGISTER
  1122. ret
  1123. .size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt
  1124. .globl vpaes_ecb_decrypt
  1125. .type vpaes_ecb_decrypt,%function
  1126. .align 4
  1127. vpaes_ecb_decrypt:
  1128. AARCH64_SIGN_LINK_REGISTER
  1129. stp x29,x30,[sp,#-16]!
  1130. add x29,sp,#0
  1131. stp d8,d9,[sp,#-16]! // ABI spec says so
  1132. stp d10,d11,[sp,#-16]!
  1133. stp d12,d13,[sp,#-16]!
  1134. stp d14,d15,[sp,#-16]!
  1135. mov x17, $len
  1136. mov x2, $key
  1137. bl _vpaes_decrypt_preheat
  1138. tst x17, #16
  1139. b.eq .Lecb_dec_loop
  1140. ld1 {v7.16b}, [$inp],#16
  1141. bl _vpaes_encrypt_core
  1142. st1 {v0.16b}, [$out],#16
  1143. subs x17, x17, #16
  1144. b.ls .Lecb_dec_done
  1145. .align 4
  1146. .Lecb_dec_loop:
  1147. ld1 {v14.16b,v15.16b}, [$inp], #32
  1148. bl _vpaes_decrypt_2x
  1149. st1 {v0.16b,v1.16b}, [$out], #32
  1150. subs x17, x17, #32
  1151. b.hi .Lecb_dec_loop
  1152. .Lecb_dec_done:
  1153. ldp d14,d15,[sp],#16
  1154. ldp d12,d13,[sp],#16
  1155. ldp d10,d11,[sp],#16
  1156. ldp d8,d9,[sp],#16
  1157. ldp x29,x30,[sp],#16
  1158. AARCH64_VALIDATE_LINK_REGISTER
  1159. ret
  1160. .size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt
  1161. ___
  1162. } }
  1163. print $code;
  1164. close STDOUT or die "error closing STDOUT: $!";