vpaes-armv8.pl 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. #! /usr/bin/env perl
  2. # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. ######################################################################
  9. ## Constant-time SSSE3 AES core implementation.
  10. ## version 0.1
  11. ##
  12. ## By Mike Hamburg (Stanford University), 2009
  13. ## Public domain.
  14. ##
  15. ## For details see http://shiftleft.org/papers/vector_aes/ and
  16. ## http://crypto.stanford.edu/vpaes/.
  17. ##
  18. ######################################################################
  19. # ARMv8 NEON adaptation by <appro@openssl.org>
  20. #
  21. # Reason for undertaken effort is that there is at least one popular
  22. # SoC based on Cortex-A53 that doesn't have crypto extensions.
  23. #
  24. # CBC enc ECB enc/dec(*) [bit-sliced enc/dec]
  25. # Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ]
  26. # Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ]
  27. # X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ]
  28. # Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ]
  29. # Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ]
  30. # Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ]
  31. # ThunderX2(***) 39.4(**) 33.8/48.6(**)
  32. #
  33. # (*) ECB denotes approximate result for parallelizable modes
  34. # such as CBC decrypt, CTR, etc.;
  35. # (**) these results are worse than scalar compiler-generated
  36. # code, but it's constant-time and therefore preferred;
  37. # (***) presented for reference/comparison purposes;
  38. # $output is the last argument if it looks like a file (it has an extension)
  39. # $flavour is the first argument if it doesn't look like a file
  40. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  41. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  42. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  43. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  44. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  45. die "can't locate arm-xlate.pl";
  46. open OUT,"| \"$^X\" $xlate $flavour \"$output\""
  47. or die "can't call $xlate: $!";
  48. *STDOUT=*OUT;
  49. $code.=<<___;
  50. .text
  51. .type _vpaes_consts,%object
  52. .align 7 // totally strategic alignment
  53. _vpaes_consts:
  54. .Lk_mc_forward: // mc_forward
  55. .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
  56. .quad 0x080B0A0904070605, 0x000302010C0F0E0D
  57. .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
  58. .quad 0x000302010C0F0E0D, 0x080B0A0904070605
  59. .Lk_mc_backward:// mc_backward
  60. .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
  61. .quad 0x020100030E0D0C0F, 0x0A09080B06050407
  62. .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
  63. .quad 0x0A09080B06050407, 0x020100030E0D0C0F
  64. .Lk_sr: // sr
  65. .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
  66. .quad 0x030E09040F0A0500, 0x0B06010C07020D08
  67. .quad 0x0F060D040B020900, 0x070E050C030A0108
  68. .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
  69. //
  70. // "Hot" constants
  71. //
  72. .Lk_inv: // inv, inva
  73. .quad 0x0E05060F0D080180, 0x040703090A0B0C02
  74. .quad 0x01040A060F0B0780, 0x030D0E0C02050809
  75. .Lk_ipt: // input transform (lo, hi)
  76. .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
  77. .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
  78. .Lk_sbo: // sbou, sbot
  79. .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
  80. .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
  81. .Lk_sb1: // sb1u, sb1t
  82. .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
  83. .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
  84. .Lk_sb2: // sb2u, sb2t
  85. .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
  86. .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
  87. //
  88. // Decryption stuff
  89. //
  90. .Lk_dipt: // decryption input transform
  91. .quad 0x0F505B040B545F00, 0x154A411E114E451A
  92. .quad 0x86E383E660056500, 0x12771772F491F194
  93. .Lk_dsbo: // decryption sbox final output
  94. .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
  95. .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
  96. .Lk_dsb9: // decryption sbox output *9*u, *9*t
  97. .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
  98. .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
  99. .Lk_dsbd: // decryption sbox output *D*u, *D*t
  100. .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
  101. .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
  102. .Lk_dsbb: // decryption sbox output *B*u, *B*t
  103. .quad 0xD022649296B44200, 0x602646F6B0F2D404
  104. .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
  105. .Lk_dsbe: // decryption sbox output *E*u, *E*t
  106. .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
  107. .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
  108. //
  109. // Key schedule constants
  110. //
  111. .Lk_dksd: // decryption key schedule: invskew x*D
  112. .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
  113. .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
  114. .Lk_dksb: // decryption key schedule: invskew x*B
  115. .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
  116. .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
  117. .Lk_dkse: // decryption key schedule: invskew x*E + 0x63
  118. .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
  119. .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
  120. .Lk_dks9: // decryption key schedule: invskew x*9
  121. .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
  122. .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
  123. .Lk_rcon: // rcon
  124. .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
  125. .Lk_opt: // output transform
  126. .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
  127. .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
  128. .Lk_deskew: // deskew tables: inverts the sbox's "skew"
  129. .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
  130. .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
  131. .asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
  132. .size _vpaes_consts,.-_vpaes_consts
  133. .align 6
  134. ___
  135. {
  136. my ($inp,$out,$key) = map("x$_",(0..2));
  137. my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23));
  138. my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27));
  139. my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31));
  140. $code.=<<___;
  141. //
  142. // _aes_preheat
  143. //
  144. // Fills register %r10 -> .aes_consts (so you can -fPIC)
  145. // and %xmm9-%xmm15 as specified below.
  146. //
  147. .type _vpaes_encrypt_preheat,%function
  148. .align 4
  149. _vpaes_encrypt_preheat:
  150. adr x10, .Lk_inv
  151. movi v17.16b, #0x0f
  152. ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
  153. ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
  154. ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
  155. ret
  156. .size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
  157. //
  158. // _aes_encrypt_core
  159. //
  160. // AES-encrypt %xmm0.
  161. //
  162. // Inputs:
  163. // %xmm0 = input
  164. // %xmm9-%xmm15 as in _vpaes_preheat
  165. // (%rdx) = scheduled keys
  166. //
  167. // Output in %xmm0
  168. // Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
  169. // Preserves %xmm6 - %xmm8 so you get some local vectors
  170. //
  171. //
  172. .type _vpaes_encrypt_core,%function
  173. .align 4
  174. _vpaes_encrypt_core:
  175. mov x9, $key
  176. ldr w8, [$key,#240] // pull rounds
  177. adr x11, .Lk_mc_forward+16
  178. // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
  179. ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
  180. and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  181. ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  182. tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
  183. // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
  184. tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
  185. eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
  186. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  187. b .Lenc_entry
  188. .align 4
  189. .Lenc_loop:
  190. // middle of middle round
  191. add x10, x11, #0x40
  192. tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
  193. ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
  194. tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
  195. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  196. tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
  197. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  198. tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
  199. ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
  200. tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
  201. eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
  202. tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
  203. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
  204. tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
  205. eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
  206. and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
  207. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
  208. sub w8, w8, #1 // nr--
  209. .Lenc_entry:
  210. // top of round
  211. and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
  212. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  213. tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
  214. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  215. tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  216. tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  217. eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  218. eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  219. tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  220. tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  221. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  222. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  223. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
  224. cbnz w8, .Lenc_loop
  225. // middle of last round
  226. add x10, x11, #0x80
  227. // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
  228. // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
  229. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  230. ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
  231. tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
  232. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  233. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  234. tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
  235. ret
  236. .size _vpaes_encrypt_core,.-_vpaes_encrypt_core
  237. .globl vpaes_encrypt
  238. .type vpaes_encrypt,%function
  239. .align 4
  240. vpaes_encrypt:
  241. .inst 0xd503233f // paciasp
  242. stp x29,x30,[sp,#-16]!
  243. add x29,sp,#0
  244. ld1 {v7.16b}, [$inp]
  245. bl _vpaes_encrypt_preheat
  246. bl _vpaes_encrypt_core
  247. st1 {v0.16b}, [$out]
  248. ldp x29,x30,[sp],#16
  249. .inst 0xd50323bf // autiasp
  250. ret
  251. .size vpaes_encrypt,.-vpaes_encrypt
  252. .type _vpaes_encrypt_2x,%function
  253. .align 4
  254. _vpaes_encrypt_2x:
  255. mov x9, $key
  256. ldr w8, [$key,#240] // pull rounds
  257. adr x11, .Lk_mc_forward+16
  258. // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
  259. ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
  260. and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  261. ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  262. and v9.16b, v15.16b, v17.16b
  263. ushr v8.16b, v15.16b, #4
  264. tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
  265. tbl v9.16b, {$iptlo}, v9.16b
  266. // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
  267. tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
  268. tbl v10.16b, {$ipthi}, v8.16b
  269. eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
  270. eor v8.16b, v9.16b, v16.16b
  271. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  272. eor v8.16b, v8.16b, v10.16b
  273. b .Lenc_2x_entry
  274. .align 4
  275. .Lenc_2x_loop:
  276. // middle of middle round
  277. add x10, x11, #0x40
  278. tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
  279. tbl v12.16b, {$sb1t}, v10.16b
  280. ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
  281. tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
  282. tbl v8.16b, {$sb1u}, v11.16b
  283. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  284. eor v12.16b, v12.16b, v16.16b
  285. tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
  286. tbl v13.16b, {$sb2t}, v10.16b
  287. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  288. eor v8.16b, v8.16b, v12.16b
  289. tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
  290. tbl v10.16b, {$sb2u}, v11.16b
  291. ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
  292. tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
  293. tbl v11.16b, {v8.16b}, v1.16b
  294. eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
  295. eor v10.16b, v10.16b, v13.16b
  296. tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
  297. tbl v8.16b, {v8.16b}, v4.16b
  298. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
  299. eor v11.16b, v11.16b, v10.16b
  300. tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
  301. tbl v12.16b, {v11.16b},v1.16b
  302. eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
  303. eor v8.16b, v8.16b, v11.16b
  304. and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
  305. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
  306. eor v8.16b, v8.16b, v12.16b
  307. sub w8, w8, #1 // nr--
  308. .Lenc_2x_entry:
  309. // top of round
  310. and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
  311. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  312. and v9.16b, v8.16b, v17.16b
  313. ushr v8.16b, v8.16b, #4
  314. tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
  315. tbl v13.16b, {$invhi},v9.16b
  316. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  317. eor v9.16b, v9.16b, v8.16b
  318. tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  319. tbl v11.16b, {$invlo},v8.16b
  320. tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  321. tbl v12.16b, {$invlo},v9.16b
  322. eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  323. eor v11.16b, v11.16b, v13.16b
  324. eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  325. eor v12.16b, v12.16b, v13.16b
  326. tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  327. tbl v10.16b, {$invlo},v11.16b
  328. tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  329. tbl v11.16b, {$invlo},v12.16b
  330. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  331. eor v10.16b, v10.16b, v9.16b
  332. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  333. eor v11.16b, v11.16b, v8.16b
  334. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
  335. cbnz w8, .Lenc_2x_loop
  336. // middle of last round
  337. add x10, x11, #0x80
  338. // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
  339. // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
  340. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  341. tbl v12.16b, {$sbou}, v10.16b
  342. ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
  343. tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
  344. tbl v8.16b, {$sbot}, v11.16b
  345. eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
  346. eor v12.16b, v12.16b, v16.16b
  347. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
  348. eor v8.16b, v8.16b, v12.16b
  349. tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
  350. tbl v1.16b, {v8.16b},v1.16b
  351. ret
  352. .size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
  353. .type _vpaes_decrypt_preheat,%function
  354. .align 4
  355. _vpaes_decrypt_preheat:
  356. adr x10, .Lk_inv
  357. movi v17.16b, #0x0f
  358. adr x11, .Lk_dipt
  359. ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
  360. ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo
  361. ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd
  362. ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe
  363. ret
  364. .size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
  365. //
  366. // Decryption core
  367. //
  368. // Same API as encryption core.
  369. //
  370. .type _vpaes_decrypt_core,%function
  371. .align 4
  372. _vpaes_decrypt_core:
  373. mov x9, $key
  374. ldr w8, [$key,#240] // pull rounds
  375. // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
  376. lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
  377. eor x11, x11, #0x30 // xor \$0x30, %r11
  378. adr x10, .Lk_sr
  379. and x11, x11, #0x30 // and \$0x30, %r11
  380. add x11, x11, x10
  381. adr x10, .Lk_mc_forward+48
  382. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
  383. and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  384. ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  385. tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
  386. ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
  387. // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
  388. tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
  389. eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
  390. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  391. b .Ldec_entry
  392. .align 4
  393. .Ldec_loop:
  394. //
  395. // Inverse mix columns
  396. //
  397. // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
  398. // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
  399. tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
  400. tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
  401. eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
  402. // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
  403. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  404. // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
  405. tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
  406. tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  407. tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
  408. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  409. // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
  410. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  411. // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
  412. tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
  413. tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  414. tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
  415. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  416. // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
  417. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  418. // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
  419. tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
  420. tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  421. tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
  422. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  423. ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
  424. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  425. sub w8, w8, #1 // sub \$1,%rax # nr--
  426. .Ldec_entry:
  427. // top of round
  428. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
  429. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  430. tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
  431. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  432. tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  433. tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  434. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  435. eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  436. tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  437. tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  438. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  439. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  440. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
  441. cbnz w8, .Ldec_loop
  442. // middle of last round
  443. // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
  444. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  445. // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
  446. ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
  447. tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
  448. eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
  449. eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
  450. tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
  451. ret
  452. .size _vpaes_decrypt_core,.-_vpaes_decrypt_core
  453. .globl vpaes_decrypt
  454. .type vpaes_decrypt,%function
  455. .align 4
  456. vpaes_decrypt:
  457. .inst 0xd503233f // paciasp
  458. stp x29,x30,[sp,#-16]!
  459. add x29,sp,#0
  460. ld1 {v7.16b}, [$inp]
  461. bl _vpaes_decrypt_preheat
  462. bl _vpaes_decrypt_core
  463. st1 {v0.16b}, [$out]
  464. ldp x29,x30,[sp],#16
  465. .inst 0xd50323bf // autiasp
  466. ret
  467. .size vpaes_decrypt,.-vpaes_decrypt
  468. // v14-v15 input, v0-v1 output
  469. .type _vpaes_decrypt_2x,%function
  470. .align 4
  471. _vpaes_decrypt_2x:
  472. mov x9, $key
  473. ldr w8, [$key,#240] // pull rounds
  474. // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
  475. lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
  476. eor x11, x11, #0x30 // xor \$0x30, %r11
  477. adr x10, .Lk_sr
  478. and x11, x11, #0x30 // and \$0x30, %r11
  479. add x11, x11, x10
  480. adr x10, .Lk_mc_forward+48
  481. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
  482. and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  483. ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  484. and v9.16b, v15.16b, v17.16b
  485. ushr v8.16b, v15.16b, #4
  486. tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
  487. tbl v10.16b, {$iptlo},v9.16b
  488. ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
  489. // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
  490. tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
  491. tbl v8.16b, {$ipthi},v8.16b
  492. eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
  493. eor v10.16b, v10.16b, v16.16b
  494. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  495. eor v8.16b, v8.16b, v10.16b
  496. b .Ldec_2x_entry
  497. .align 4
  498. .Ldec_2x_loop:
  499. //
  500. // Inverse mix columns
  501. //
  502. // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
  503. // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
  504. tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
  505. tbl v12.16b, {$sb9u}, v10.16b
  506. tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
  507. tbl v9.16b, {$sb9t}, v11.16b
  508. eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
  509. eor v8.16b, v12.16b, v16.16b
  510. // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
  511. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  512. eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  513. // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
  514. tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
  515. tbl v12.16b, {$sbdu}, v10.16b
  516. tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  517. tbl v8.16b, {v8.16b},v5.16b
  518. tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
  519. tbl v9.16b, {$sbdt}, v11.16b
  520. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  521. eor v8.16b, v8.16b, v12.16b
  522. // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
  523. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  524. eor v8.16b, v8.16b, v9.16b
  525. // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
  526. tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
  527. tbl v12.16b, {$sbbu}, v10.16b
  528. tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  529. tbl v8.16b, {v8.16b},v5.16b
  530. tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
  531. tbl v9.16b, {$sbbt}, v11.16b
  532. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  533. eor v8.16b, v8.16b, v12.16b
  534. // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
  535. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  536. eor v8.16b, v8.16b, v9.16b
  537. // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
  538. tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
  539. tbl v12.16b, {$sbeu}, v10.16b
  540. tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
  541. tbl v8.16b, {v8.16b},v5.16b
  542. tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
  543. tbl v9.16b, {$sbet}, v11.16b
  544. eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
  545. eor v8.16b, v8.16b, v12.16b
  546. ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
  547. eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
  548. eor v8.16b, v8.16b, v9.16b
  549. sub w8, w8, #1 // sub \$1,%rax # nr--
  550. .Ldec_2x_entry:
  551. // top of round
  552. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
  553. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  554. and v9.16b, v8.16b, v17.16b
  555. ushr v8.16b, v8.16b, #4
  556. tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
  557. tbl v10.16b, {$invhi},v9.16b
  558. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  559. eor v9.16b, v9.16b, v8.16b
  560. tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  561. tbl v11.16b, {$invlo},v8.16b
  562. tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  563. tbl v12.16b, {$invlo},v9.16b
  564. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  565. eor v11.16b, v11.16b, v10.16b
  566. eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  567. eor v12.16b, v12.16b, v10.16b
  568. tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
  569. tbl v10.16b, {$invlo},v11.16b
  570. tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
  571. tbl v11.16b, {$invlo},v12.16b
  572. eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
  573. eor v10.16b, v10.16b, v9.16b
  574. eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
  575. eor v11.16b, v11.16b, v8.16b
  576. ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
  577. cbnz w8, .Ldec_2x_loop
  578. // middle of last round
  579. // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
  580. tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
  581. tbl v12.16b, {$sbou}, v10.16b
  582. // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
  583. tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
  584. tbl v9.16b, {$sbot}, v11.16b
  585. ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
  586. eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
  587. eor v12.16b, v12.16b, v16.16b
  588. eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
  589. eor v8.16b, v9.16b, v12.16b
  590. tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
  591. tbl v1.16b, {v8.16b},v2.16b
  592. ret
  593. .size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x
  594. ___
  595. }
  596. {
  597. my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3");
  598. my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8));
  599. $code.=<<___;
  600. ////////////////////////////////////////////////////////
  601. // //
  602. // AES key schedule //
  603. // //
  604. ////////////////////////////////////////////////////////
  605. .type _vpaes_key_preheat,%function
  606. .align 4
  607. _vpaes_key_preheat:
  608. adr x10, .Lk_inv
  609. movi v16.16b, #0x5b // .Lk_s63
  610. adr x11, .Lk_sb1
  611. movi v17.16b, #0x0f // .Lk_s0F
  612. ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt
  613. adr x10, .Lk_dksd
  614. ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1
  615. adr x11, .Lk_mc_forward
  616. ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
  617. ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
  618. ld1 {v8.2d}, [x10] // .Lk_rcon
  619. ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
  620. ret
  621. .size _vpaes_key_preheat,.-_vpaes_key_preheat
  622. .type _vpaes_schedule_core,%function
  623. .align 4
  624. _vpaes_schedule_core:
  625. .inst 0xd503233f // paciasp
  626. stp x29, x30, [sp,#-16]!
  627. add x29,sp,#0
  628. bl _vpaes_key_preheat // load the tables
  629. ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
  630. // input transform
  631. mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
  632. bl _vpaes_schedule_transform
  633. mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
  634. adr x10, .Lk_sr // lea .Lk_sr(%rip),%r10
  635. add x8, x8, x10
  636. cbnz $dir, .Lschedule_am_decrypting
  637. // encrypting, output zeroth round key after transform
  638. st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx)
  639. b .Lschedule_go
  640. .Lschedule_am_decrypting:
  641. // decrypting, output zeroth round key after shiftrows
  642. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
  643. tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  644. st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
  645. eor x8, x8, #0x30 // xor \$0x30, %r8
  646. .Lschedule_go:
  647. cmp $bits, #192 // cmp \$192, %esi
  648. b.hi .Lschedule_256
  649. b.eq .Lschedule_192
  650. // 128: fall though
  651. //
  652. // .schedule_128
  653. //
  654. // 128-bit specific part of key schedule.
  655. //
  656. // This schedule is really simple, because all its parts
  657. // are accomplished by the subroutines.
  658. //
  659. .Lschedule_128:
  660. mov $inp, #10 // mov \$10, %esi
  661. .Loop_schedule_128:
  662. sub $inp, $inp, #1 // dec %esi
  663. bl _vpaes_schedule_round
  664. cbz $inp, .Lschedule_mangle_last
  665. bl _vpaes_schedule_mangle // write output
  666. b .Loop_schedule_128
  667. //
  668. // .aes_schedule_192
  669. //
  670. // 192-bit specific part of key schedule.
  671. //
  672. // The main body of this schedule is the same as the 128-bit
  673. // schedule, but with more smearing. The long, high side is
  674. // stored in %xmm7 as before, and the short, low side is in
  675. // the high bits of %xmm6.
  676. //
  677. // This schedule is somewhat nastier, however, because each
  678. // round produces 192 bits of key material, or 1.5 round keys.
  679. // Therefore, on each cycle we do 2 rounds and produce 3 round
  680. // keys.
  681. //
  682. .align 4
  683. .Lschedule_192:
  684. sub $inp, $inp, #8
  685. ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
  686. bl _vpaes_schedule_transform // input transform
  687. mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
  688. eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
  689. ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
  690. mov $inp, #4 // mov \$4, %esi
  691. .Loop_schedule_192:
  692. sub $inp, $inp, #1 // dec %esi
  693. bl _vpaes_schedule_round
  694. ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0
  695. bl _vpaes_schedule_mangle // save key n
  696. bl _vpaes_schedule_192_smear
  697. bl _vpaes_schedule_mangle // save key n+1
  698. bl _vpaes_schedule_round
  699. cbz $inp, .Lschedule_mangle_last
  700. bl _vpaes_schedule_mangle // save key n+2
  701. bl _vpaes_schedule_192_smear
  702. b .Loop_schedule_192
  703. //
  704. // .aes_schedule_256
  705. //
  706. // 256-bit specific part of key schedule.
  707. //
  708. // The structure here is very similar to the 128-bit
  709. // schedule, but with an additional "low side" in
  710. // %xmm6. The low side's rounds are the same as the
  711. // high side's, except no rcon and no rotation.
  712. //
  713. .align 4
  714. .Lschedule_256:
  715. ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
  716. bl _vpaes_schedule_transform // input transform
  717. mov $inp, #7 // mov \$7, %esi
  718. .Loop_schedule_256:
  719. sub $inp, $inp, #1 // dec %esi
  720. bl _vpaes_schedule_mangle // output low result
  721. mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
  722. // high round
  723. bl _vpaes_schedule_round
  724. cbz $inp, .Lschedule_mangle_last
  725. bl _vpaes_schedule_mangle
  726. // low round. swap xmm7 and xmm6
  727. dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
  728. movi v4.16b, #0
  729. mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
  730. mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
  731. bl _vpaes_schedule_low_round
  732. mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
  733. b .Loop_schedule_256
  734. //
  735. // .aes_schedule_mangle_last
  736. //
  737. // Mangler for last round of key schedule
  738. // Mangles %xmm0
  739. // when encrypting, outputs out(%xmm0) ^ 63
  740. // when decrypting, outputs unskew(%xmm0)
  741. //
  742. // Always called right before return... jumps to cleanup and exits
  743. //
  744. .align 4
  745. .Lschedule_mangle_last:
  746. // schedule last round key from xmm0
  747. adr x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
  748. cbnz $dir, .Lschedule_mangle_last_dec
  749. // encrypting
  750. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
  751. adr x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
  752. add $out, $out, #32 // add \$32, %rdx
  753. tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
  754. .Lschedule_mangle_last_dec:
  755. ld1 {v20.2d-v21.2d}, [x11] // reload constants
  756. sub $out, $out, #16 // add \$-16, %rdx
  757. eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
  758. bl _vpaes_schedule_transform // output transform
  759. st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key
  760. // cleanup
  761. eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
  762. eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
  763. eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
  764. eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
  765. eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
  766. eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
  767. eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
  768. eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
  769. ldp x29, x30, [sp],#16
  770. .inst 0xd50323bf // autiasp
  771. ret
  772. .size _vpaes_schedule_core,.-_vpaes_schedule_core
  773. //
  774. // .aes_schedule_192_smear
  775. //
  776. // Smear the short, low side in the 192-bit key schedule.
  777. //
  778. // Inputs:
  779. // %xmm7: high side, b a x y
  780. // %xmm6: low side, d c 0 0
  781. // %xmm13: 0
  782. //
  783. // Outputs:
  784. // %xmm6: b+c+d b+c 0 0
  785. // %xmm0: b+c+d b+c b a
  786. //
  787. .type _vpaes_schedule_192_smear,%function
  788. .align 4
  789. _vpaes_schedule_192_smear:
  790. movi v1.16b, #0
  791. dup v0.4s, v7.s[3]
  792. ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
  793. ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
  794. eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
  795. eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
  796. eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
  797. mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
  798. ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
  799. ret
  800. .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
  801. //
  802. // .aes_schedule_round
  803. //
  804. // Runs one main round of the key schedule on %xmm0, %xmm7
  805. //
  806. // Specifically, runs subbytes on the high dword of %xmm0
  807. // then rotates it by one byte and xors into the low dword of
  808. // %xmm7.
  809. //
  810. // Adds rcon from low byte of %xmm8, then rotates %xmm8 for
  811. // next rcon.
  812. //
  813. // Smears the dwords of %xmm7 by xoring the low into the
  814. // second low, result into third, result into highest.
  815. //
  816. // Returns results in %xmm7 = %xmm0.
  817. // Clobbers %xmm1-%xmm4, %r11.
  818. //
  819. .type _vpaes_schedule_round,%function
  820. .align 4
  821. _vpaes_schedule_round:
  822. // extract rcon from xmm8
  823. movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
  824. ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1
  825. ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8
  826. eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
  827. // rotate
  828. dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
  829. ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0
  830. // fall through...
  831. // low round: same as high round, but no rotation and no rcon.
  832. _vpaes_schedule_low_round:
  833. // smear xmm7
  834. ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1
  835. eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
  836. ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4
  837. // subbytes
  838. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
  839. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
  840. eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
  841. tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
  842. eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
  843. tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
  844. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
  845. tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
  846. eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
  847. tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
  848. eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
  849. tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
  850. eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
  851. eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
  852. tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
  853. tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
  854. eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
  855. // add in smeared stuff
  856. eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
  857. eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
  858. ret
  859. .size _vpaes_schedule_round,.-_vpaes_schedule_round
  860. //
  861. // .aes_schedule_transform
  862. //
  863. // Linear-transform %xmm0 according to tables at (%r11)
  864. //
  865. // Requires that %xmm9 = 0x0F0F... as in preheat
  866. // Output in %xmm0
  867. // Clobbers %xmm1, %xmm2
  868. //
  869. .type _vpaes_schedule_transform,%function
  870. .align 4
  871. _vpaes_schedule_transform:
  872. and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
  873. ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
  874. // vmovdqa (%r11), %xmm2 # lo
  875. tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
  876. // vmovdqa 16(%r11), %xmm1 # hi
  877. tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
  878. eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
  879. ret
  880. .size _vpaes_schedule_transform,.-_vpaes_schedule_transform
  881. //
  882. // .aes_schedule_mangle
  883. //
  884. // Mangle xmm0 from (basis-transformed) standard version
  885. // to our version.
  886. //
  887. // On encrypt,
  888. // xor with 0x63
  889. // multiply by circulant 0,1,1,1
  890. // apply shiftrows transform
  891. //
  892. // On decrypt,
  893. // xor with 0x63
  894. // multiply by "inverse mixcolumns" circulant E,B,D,9
  895. // deskew
  896. // apply shiftrows transform
  897. //
  898. //
  899. // Writes out to (%rdx), and increments or decrements it
  900. // Keeps track of round number mod 4 in %r8
  901. // Preserves xmm0
  902. // Clobbers xmm1-xmm5
  903. //
  904. .type _vpaes_schedule_mangle,%function
  905. .align 4
  906. _vpaes_schedule_mangle:
  907. mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
  908. // vmovdqa .Lk_mc_forward(%rip),%xmm5
  909. cbnz $dir, .Lschedule_mangle_dec
  910. // encrypting
  911. eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
  912. add $out, $out, #16 // add \$16, %rdx
  913. tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
  914. tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
  915. tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
  916. eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
  917. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
  918. eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
  919. b .Lschedule_mangle_both
  920. .align 4
  921. .Lschedule_mangle_dec:
  922. // inverse mix columns
  923. // lea .Lk_dksd(%rip),%r11
  924. ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
  925. and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
  926. // vmovdqa 0x00(%r11), %xmm2
  927. tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  928. // vmovdqa 0x10(%r11), %xmm3
  929. tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  930. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
  931. tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
  932. // vmovdqa 0x20(%r11), %xmm2
  933. tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  934. eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
  935. // vmovdqa 0x30(%r11), %xmm3
  936. tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  937. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
  938. tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
  939. // vmovdqa 0x40(%r11), %xmm2
  940. tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  941. eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
  942. // vmovdqa 0x50(%r11), %xmm3
  943. tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  944. eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
  945. // vmovdqa 0x60(%r11), %xmm2
  946. tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
  947. tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
  948. // vmovdqa 0x70(%r11), %xmm4
  949. tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
  950. ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
  951. eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
  952. eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
  953. sub $out, $out, #16 // add \$-16, %rdx
  954. .Lschedule_mangle_both:
  955. tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
  956. add x8, x8, #64-16 // add \$-16, %r8
  957. and x8, x8, #~(1<<6) // and \$0x30, %r8
  958. st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
  959. ret
  960. .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
  961. .globl vpaes_set_encrypt_key
  962. .type vpaes_set_encrypt_key,%function
  963. .align 4
  964. vpaes_set_encrypt_key:
  965. .inst 0xd503233f // paciasp
  966. stp x29,x30,[sp,#-16]!
  967. add x29,sp,#0
  968. stp d8,d9,[sp,#-16]! // ABI spec says so
  969. lsr w9, $bits, #5 // shr \$5,%eax
  970. add w9, w9, #5 // \$5,%eax
  971. str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  972. mov $dir, #0 // mov \$0,%ecx
  973. mov x8, #0x30 // mov \$0x30,%r8d
  974. bl _vpaes_schedule_core
  975. eor x0, x0, x0
  976. ldp d8,d9,[sp],#16
  977. ldp x29,x30,[sp],#16
  978. .inst 0xd50323bf // autiasp
  979. ret
  980. .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
  981. .globl vpaes_set_decrypt_key
  982. .type vpaes_set_decrypt_key,%function
  983. .align 4
  984. vpaes_set_decrypt_key:
  985. .inst 0xd503233f // paciasp
  986. stp x29,x30,[sp,#-16]!
  987. add x29,sp,#0
  988. stp d8,d9,[sp,#-16]! // ABI spec says so
  989. lsr w9, $bits, #5 // shr \$5,%eax
  990. add w9, w9, #5 // \$5,%eax
  991. str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
  992. lsl w9, w9, #4 // shl \$4,%eax
  993. add $out, $out, #16 // lea 16(%rdx,%rax),%rdx
  994. add $out, $out, x9
  995. mov $dir, #1 // mov \$1,%ecx
  996. lsr w8, $bits, #1 // shr \$1,%r8d
  997. and x8, x8, #32 // and \$32,%r8d
  998. eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32
  999. bl _vpaes_schedule_core
  1000. ldp d8,d9,[sp],#16
  1001. ldp x29,x30,[sp],#16
  1002. .inst 0xd50323bf // autiasp
  1003. ret
  1004. .size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
  1005. ___
  1006. }
  1007. {
  1008. my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5));
  1009. $code.=<<___;
  1010. .globl vpaes_cbc_encrypt
  1011. .type vpaes_cbc_encrypt,%function
  1012. .align 4
  1013. vpaes_cbc_encrypt:
  1014. cbz $len, .Lcbc_abort
  1015. cmp w5, #0 // check direction
  1016. b.eq vpaes_cbc_decrypt
  1017. .inst 0xd503233f // paciasp
  1018. stp x29,x30,[sp,#-16]!
  1019. add x29,sp,#0
  1020. mov x17, $len // reassign
  1021. mov x2, $key // reassign
  1022. ld1 {v0.16b}, [$ivec] // load ivec
  1023. bl _vpaes_encrypt_preheat
  1024. b .Lcbc_enc_loop
  1025. .align 4
  1026. .Lcbc_enc_loop:
  1027. ld1 {v7.16b}, [$inp],#16 // load input
  1028. eor v7.16b, v7.16b, v0.16b // xor with ivec
  1029. bl _vpaes_encrypt_core
  1030. st1 {v0.16b}, [$out],#16 // save output
  1031. subs x17, x17, #16
  1032. b.hi .Lcbc_enc_loop
  1033. st1 {v0.16b}, [$ivec] // write ivec
  1034. ldp x29,x30,[sp],#16
  1035. .inst 0xd50323bf // autiasp
  1036. .Lcbc_abort:
  1037. ret
  1038. .size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
  1039. .type vpaes_cbc_decrypt,%function
  1040. .align 4
  1041. vpaes_cbc_decrypt:
  1042. .inst 0xd503233f // paciasp
  1043. stp x29,x30,[sp,#-16]!
  1044. add x29,sp,#0
  1045. stp d8,d9,[sp,#-16]! // ABI spec says so
  1046. stp d10,d11,[sp,#-16]!
  1047. stp d12,d13,[sp,#-16]!
  1048. stp d14,d15,[sp,#-16]!
  1049. mov x17, $len // reassign
  1050. mov x2, $key // reassign
  1051. ld1 {v6.16b}, [$ivec] // load ivec
  1052. bl _vpaes_decrypt_preheat
  1053. tst x17, #16
  1054. b.eq .Lcbc_dec_loop2x
  1055. ld1 {v7.16b}, [$inp], #16 // load input
  1056. bl _vpaes_decrypt_core
  1057. eor v0.16b, v0.16b, v6.16b // xor with ivec
  1058. orr v6.16b, v7.16b, v7.16b // next ivec value
  1059. st1 {v0.16b}, [$out], #16
  1060. subs x17, x17, #16
  1061. b.ls .Lcbc_dec_done
  1062. .align 4
  1063. .Lcbc_dec_loop2x:
  1064. ld1 {v14.16b,v15.16b}, [$inp], #32
  1065. bl _vpaes_decrypt_2x
  1066. eor v0.16b, v0.16b, v6.16b // xor with ivec
  1067. eor v1.16b, v1.16b, v14.16b
  1068. orr v6.16b, v15.16b, v15.16b
  1069. st1 {v0.16b,v1.16b}, [$out], #32
  1070. subs x17, x17, #32
  1071. b.hi .Lcbc_dec_loop2x
  1072. .Lcbc_dec_done:
  1073. st1 {v6.16b}, [$ivec]
  1074. ldp d14,d15,[sp],#16
  1075. ldp d12,d13,[sp],#16
  1076. ldp d10,d11,[sp],#16
  1077. ldp d8,d9,[sp],#16
  1078. ldp x29,x30,[sp],#16
  1079. .inst 0xd50323bf // autiasp
  1080. ret
  1081. .size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
  1082. ___
  1083. if (1) {
  1084. $code.=<<___;
  1085. .globl vpaes_ecb_encrypt
  1086. .type vpaes_ecb_encrypt,%function
  1087. .align 4
  1088. vpaes_ecb_encrypt:
  1089. .inst 0xd503233f // paciasp
  1090. stp x29,x30,[sp,#-16]!
  1091. add x29,sp,#0
  1092. stp d8,d9,[sp,#-16]! // ABI spec says so
  1093. stp d10,d11,[sp,#-16]!
  1094. stp d12,d13,[sp,#-16]!
  1095. stp d14,d15,[sp,#-16]!
  1096. mov x17, $len
  1097. mov x2, $key
  1098. bl _vpaes_encrypt_preheat
  1099. tst x17, #16
  1100. b.eq .Lecb_enc_loop
  1101. ld1 {v7.16b}, [$inp],#16
  1102. bl _vpaes_encrypt_core
  1103. st1 {v0.16b}, [$out],#16
  1104. subs x17, x17, #16
  1105. b.ls .Lecb_enc_done
  1106. .align 4
  1107. .Lecb_enc_loop:
  1108. ld1 {v14.16b,v15.16b}, [$inp], #32
  1109. bl _vpaes_encrypt_2x
  1110. st1 {v0.16b,v1.16b}, [$out], #32
  1111. subs x17, x17, #32
  1112. b.hi .Lecb_enc_loop
  1113. .Lecb_enc_done:
  1114. ldp d14,d15,[sp],#16
  1115. ldp d12,d13,[sp],#16
  1116. ldp d10,d11,[sp],#16
  1117. ldp d8,d9,[sp],#16
  1118. ldp x29,x30,[sp],#16
  1119. .inst 0xd50323bf // autiasp
  1120. ret
  1121. .size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt
  1122. .globl vpaes_ecb_decrypt
  1123. .type vpaes_ecb_decrypt,%function
  1124. .align 4
  1125. vpaes_ecb_decrypt:
  1126. .inst 0xd503233f // paciasp
  1127. stp x29,x30,[sp,#-16]!
  1128. add x29,sp,#0
  1129. stp d8,d9,[sp,#-16]! // ABI spec says so
  1130. stp d10,d11,[sp,#-16]!
  1131. stp d12,d13,[sp,#-16]!
  1132. stp d14,d15,[sp,#-16]!
  1133. mov x17, $len
  1134. mov x2, $key
  1135. bl _vpaes_decrypt_preheat
  1136. tst x17, #16
  1137. b.eq .Lecb_dec_loop
  1138. ld1 {v7.16b}, [$inp],#16
  1139. bl _vpaes_encrypt_core
  1140. st1 {v0.16b}, [$out],#16
  1141. subs x17, x17, #16
  1142. b.ls .Lecb_dec_done
  1143. .align 4
  1144. .Lecb_dec_loop:
  1145. ld1 {v14.16b,v15.16b}, [$inp], #32
  1146. bl _vpaes_decrypt_2x
  1147. st1 {v0.16b,v1.16b}, [$out], #32
  1148. subs x17, x17, #32
  1149. b.hi .Lecb_dec_loop
  1150. .Lecb_dec_done:
  1151. ldp d14,d15,[sp],#16
  1152. ldp d12,d13,[sp],#16
  1153. ldp d10,d11,[sp],#16
  1154. ldp d8,d9,[sp],#16
  1155. ldp x29,x30,[sp],#16
  1156. .inst 0xd50323bf // autiasp
  1157. ret
  1158. .size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt
  1159. ___
  1160. } }
  1161. print $code;
  1162. close STDOUT or die "error closing STDOUT: $!";