aesv8-armx.pl 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. #! /usr/bin/env perl
  2. # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # This module implements support for ARMv8 AES instructions. The
  17. # module is endian-agnostic in sense that it supports both big- and
  18. # little-endian cases. As does it support both 32- and 64-bit modes
  19. # of operation. Latter is achieved by limiting amount of utilized
  20. # registers to 16, which implies additional NEON load and integer
  21. # instructions. This has no effect on mighty Apple A7, where results
  22. # are literally equal to the theoretical estimates based on AES
  23. # instruction latencies and issue rates. On Cortex-A53, an in-order
  24. # execution core, this costs up to 10-15%, which is partially
  25. # compensated by implementing dedicated code path for 128-bit
  26. # CBC encrypt case. On Cortex-A57 parallelizable mode performance
  27. # seems to be limited by sheer amount of NEON instructions...
  28. #
  29. # April 2019
  30. #
  31. # Key to performance of parallelize-able modes is round instruction
  32. # interleaving. But which factor to use? There is optimal one for
  33. # each combination of instruction latency and issue rate, beyond
  34. # which increasing interleave factor doesn't pay off. While on cons
  35. # side we have code size increase and resource waste on platforms for
  36. # which interleave factor is too high. In other words you want it to
  37. # be just right. So far interleave factor of 3x was serving well all
  38. # platforms. But for ThunderX2 optimal interleave factor was measured
  39. # to be 5x...
  40. #
  41. # Performance in cycles per byte processed with 128-bit key:
  42. #
  43. # CBC enc CBC dec CTR
  44. # Apple A7 2.39 1.20 1.20
  45. # Cortex-A53 1.32 1.17/1.29(**) 1.36/1.46
  46. # Cortex-A57(*) 1.95 0.82/0.85 0.89/0.93
  47. # Cortex-A72 1.33 0.85/0.88 0.92/0.96
  48. # Denver 1.96 0.65/0.86 0.76/0.80
  49. # Mongoose 1.33 1.23/1.20 1.30/1.20
  50. # Kryo 1.26 0.87/0.94 1.00/1.00
  51. # ThunderX2 5.95 1.25 1.30
  52. #
  53. # (*) original 3.64/1.34/1.32 results were for r0p0 revision
  54. # and are still same even for updated module;
  55. # (**) numbers after slash are for 32-bit code, which is 3x-
  56. # interleaved;
  57. # $output is the last argument if it looks like a file (it has an extension)
  58. # $flavour is the first argument if it doesn't look like a file
  59. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  60. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  61. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  62. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  63. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  64. die "can't locate arm-xlate.pl";
  65. open OUT,"| \"$^X\" $xlate $flavour \"$output\""
  66. or die "can't call $xlate: $!";
  67. *STDOUT=*OUT;
  68. $prefix="aes_v8";
  69. $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
  70. $code=<<___;
  71. #include "arm_arch.h"
  72. #if __ARM_MAX_ARCH__>=7
  73. ___
  74. $code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
  75. $code.=<<___ if ($flavour !~ /64/);
  76. .arch armv7-a // don't confuse not-so-latest binutils with argv8 :-)
  77. .fpu neon
  78. #ifdef __thumb2__
  79. .syntax unified
  80. .thumb
  81. # define INST(a,b,c,d) $_byte c,d|0xc,a,b
  82. #else
  83. .code 32
  84. # define INST(a,b,c,d) $_byte a,b,c,d
  85. #endif
  86. .text
  87. ___
  88. # Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
  89. # NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
  90. # maintain both 32- and 64-bit codes within single module and
  91. # transliterate common code to either flavour with regex vodoo.
  92. #
  93. {{{
  94. my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
  95. my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
  96. $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
  97. $code.=<<___;
  98. .align 5
  99. .Lrcon:
  100. .long 0x01,0x01,0x01,0x01
  101. .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
  102. .long 0x1b,0x1b,0x1b,0x1b
  103. .globl ${prefix}_set_encrypt_key
  104. .type ${prefix}_set_encrypt_key,%function
  105. .align 5
  106. ${prefix}_set_encrypt_key:
  107. .Lenc_key:
  108. ___
  109. $code.=<<___ if ($flavour =~ /64/);
  110. stp x29,x30,[sp,#-16]!
  111. add x29,sp,#0
  112. ___
  113. $code.=<<___;
  114. mov $ptr,#-1
  115. cmp $inp,#0
  116. b.eq .Lenc_key_abort
  117. cmp $out,#0
  118. b.eq .Lenc_key_abort
  119. mov $ptr,#-2
  120. cmp $bits,#128
  121. b.lt .Lenc_key_abort
  122. cmp $bits,#256
  123. b.gt .Lenc_key_abort
  124. tst $bits,#0x3f
  125. b.ne .Lenc_key_abort
  126. adr $ptr,.Lrcon
  127. cmp $bits,#192
  128. veor $zero,$zero,$zero
  129. vld1.8 {$in0},[$inp],#16
  130. mov $bits,#8 // reuse $bits
  131. vld1.32 {$rcon,$mask},[$ptr],#32
  132. b.lt .Loop128
  133. b.eq .L192
  134. b .L256
  135. .align 4
  136. .Loop128:
  137. vtbl.8 $key,{$in0},$mask
  138. vext.8 $tmp,$zero,$in0,#12
  139. vst1.32 {$in0},[$out],#16
  140. aese $key,$zero
  141. subs $bits,$bits,#1
  142. veor $in0,$in0,$tmp
  143. vext.8 $tmp,$zero,$tmp,#12
  144. veor $in0,$in0,$tmp
  145. vext.8 $tmp,$zero,$tmp,#12
  146. veor $key,$key,$rcon
  147. veor $in0,$in0,$tmp
  148. vshl.u8 $rcon,$rcon,#1
  149. veor $in0,$in0,$key
  150. b.ne .Loop128
  151. vld1.32 {$rcon},[$ptr]
  152. vtbl.8 $key,{$in0},$mask
  153. vext.8 $tmp,$zero,$in0,#12
  154. vst1.32 {$in0},[$out],#16
  155. aese $key,$zero
  156. veor $in0,$in0,$tmp
  157. vext.8 $tmp,$zero,$tmp,#12
  158. veor $in0,$in0,$tmp
  159. vext.8 $tmp,$zero,$tmp,#12
  160. veor $key,$key,$rcon
  161. veor $in0,$in0,$tmp
  162. vshl.u8 $rcon,$rcon,#1
  163. veor $in0,$in0,$key
  164. vtbl.8 $key,{$in0},$mask
  165. vext.8 $tmp,$zero,$in0,#12
  166. vst1.32 {$in0},[$out],#16
  167. aese $key,$zero
  168. veor $in0,$in0,$tmp
  169. vext.8 $tmp,$zero,$tmp,#12
  170. veor $in0,$in0,$tmp
  171. vext.8 $tmp,$zero,$tmp,#12
  172. veor $key,$key,$rcon
  173. veor $in0,$in0,$tmp
  174. veor $in0,$in0,$key
  175. vst1.32 {$in0},[$out]
  176. add $out,$out,#0x50
  177. mov $rounds,#10
  178. b .Ldone
  179. .align 4
  180. .L192:
  181. vld1.8 {$in1},[$inp],#8
  182. vmov.i8 $key,#8 // borrow $key
  183. vst1.32 {$in0},[$out],#16
  184. vsub.i8 $mask,$mask,$key // adjust the mask
  185. .Loop192:
  186. vtbl.8 $key,{$in1},$mask
  187. vext.8 $tmp,$zero,$in0,#12
  188. vst1.32 {$in1},[$out],#8
  189. aese $key,$zero
  190. subs $bits,$bits,#1
  191. veor $in0,$in0,$tmp
  192. vext.8 $tmp,$zero,$tmp,#12
  193. veor $in0,$in0,$tmp
  194. vext.8 $tmp,$zero,$tmp,#12
  195. veor $in0,$in0,$tmp
  196. vdup.32 $tmp,${in0}[3]
  197. veor $tmp,$tmp,$in1
  198. veor $key,$key,$rcon
  199. vext.8 $in1,$zero,$in1,#12
  200. vshl.u8 $rcon,$rcon,#1
  201. veor $in1,$in1,$tmp
  202. veor $in0,$in0,$key
  203. veor $in1,$in1,$key
  204. vst1.32 {$in0},[$out],#16
  205. b.ne .Loop192
  206. mov $rounds,#12
  207. add $out,$out,#0x20
  208. b .Ldone
  209. .align 4
  210. .L256:
  211. vld1.8 {$in1},[$inp]
  212. mov $bits,#7
  213. mov $rounds,#14
  214. vst1.32 {$in0},[$out],#16
  215. .Loop256:
  216. vtbl.8 $key,{$in1},$mask
  217. vext.8 $tmp,$zero,$in0,#12
  218. vst1.32 {$in1},[$out],#16
  219. aese $key,$zero
  220. subs $bits,$bits,#1
  221. veor $in0,$in0,$tmp
  222. vext.8 $tmp,$zero,$tmp,#12
  223. veor $in0,$in0,$tmp
  224. vext.8 $tmp,$zero,$tmp,#12
  225. veor $key,$key,$rcon
  226. veor $in0,$in0,$tmp
  227. vshl.u8 $rcon,$rcon,#1
  228. veor $in0,$in0,$key
  229. vst1.32 {$in0},[$out],#16
  230. b.eq .Ldone
  231. vdup.32 $key,${in0}[3] // just splat
  232. vext.8 $tmp,$zero,$in1,#12
  233. aese $key,$zero
  234. veor $in1,$in1,$tmp
  235. vext.8 $tmp,$zero,$tmp,#12
  236. veor $in1,$in1,$tmp
  237. vext.8 $tmp,$zero,$tmp,#12
  238. veor $in1,$in1,$tmp
  239. veor $in1,$in1,$key
  240. b .Loop256
  241. .Ldone:
  242. str $rounds,[$out]
  243. mov $ptr,#0
  244. .Lenc_key_abort:
  245. mov x0,$ptr // return value
  246. `"ldr x29,[sp],#16" if ($flavour =~ /64/)`
  247. ret
  248. .size ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
  249. .globl ${prefix}_set_decrypt_key
  250. .type ${prefix}_set_decrypt_key,%function
  251. .align 5
  252. ${prefix}_set_decrypt_key:
  253. ___
  254. $code.=<<___ if ($flavour =~ /64/);
  255. .inst 0xd503233f // paciasp
  256. stp x29,x30,[sp,#-16]!
  257. add x29,sp,#0
  258. ___
  259. $code.=<<___ if ($flavour !~ /64/);
  260. stmdb sp!,{r4,lr}
  261. ___
  262. $code.=<<___;
  263. bl .Lenc_key
  264. cmp x0,#0
  265. b.ne .Ldec_key_abort
  266. sub $out,$out,#240 // restore original $out
  267. mov x4,#-16
  268. add $inp,$out,x12,lsl#4 // end of key schedule
  269. vld1.32 {v0.16b},[$out]
  270. vld1.32 {v1.16b},[$inp]
  271. vst1.32 {v0.16b},[$inp],x4
  272. vst1.32 {v1.16b},[$out],#16
  273. .Loop_imc:
  274. vld1.32 {v0.16b},[$out]
  275. vld1.32 {v1.16b},[$inp]
  276. aesimc v0.16b,v0.16b
  277. aesimc v1.16b,v1.16b
  278. vst1.32 {v0.16b},[$inp],x4
  279. vst1.32 {v1.16b},[$out],#16
  280. cmp $inp,$out
  281. b.hi .Loop_imc
  282. vld1.32 {v0.16b},[$out]
  283. aesimc v0.16b,v0.16b
  284. vst1.32 {v0.16b},[$inp]
  285. eor x0,x0,x0 // return value
  286. .Ldec_key_abort:
  287. ___
  288. $code.=<<___ if ($flavour !~ /64/);
  289. ldmia sp!,{r4,pc}
  290. ___
  291. $code.=<<___ if ($flavour =~ /64/);
  292. ldp x29,x30,[sp],#16
  293. .inst 0xd50323bf // autiasp
  294. ret
  295. ___
  296. $code.=<<___;
  297. .size ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
  298. ___
  299. }}}
  300. {{{
  301. sub gen_block () {
  302. my $dir = shift;
  303. my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
  304. my ($inp,$out,$key)=map("x$_",(0..2));
  305. my $rounds="w3";
  306. my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
  307. $code.=<<___;
  308. .globl ${prefix}_${dir}crypt
  309. .type ${prefix}_${dir}crypt,%function
  310. .align 5
  311. ${prefix}_${dir}crypt:
  312. ldr $rounds,[$key,#240]
  313. vld1.32 {$rndkey0},[$key],#16
  314. vld1.8 {$inout},[$inp]
  315. sub $rounds,$rounds,#2
  316. vld1.32 {$rndkey1},[$key],#16
  317. .Loop_${dir}c:
  318. aes$e $inout,$rndkey0
  319. aes$mc $inout,$inout
  320. vld1.32 {$rndkey0},[$key],#16
  321. subs $rounds,$rounds,#2
  322. aes$e $inout,$rndkey1
  323. aes$mc $inout,$inout
  324. vld1.32 {$rndkey1},[$key],#16
  325. b.gt .Loop_${dir}c
  326. aes$e $inout,$rndkey0
  327. aes$mc $inout,$inout
  328. vld1.32 {$rndkey0},[$key]
  329. aes$e $inout,$rndkey1
  330. veor $inout,$inout,$rndkey0
  331. vst1.8 {$inout},[$out]
  332. ret
  333. .size ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
  334. ___
  335. }
  336. &gen_block("en");
  337. &gen_block("de");
  338. }}}
  339. {{{
  340. my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
  341. my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
  342. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
  343. my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
  344. my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
  345. ### q8-q15 preloaded key schedule
  346. $code.=<<___;
  347. .globl ${prefix}_cbc_encrypt
  348. .type ${prefix}_cbc_encrypt,%function
  349. .align 5
  350. ${prefix}_cbc_encrypt:
  351. ___
  352. $code.=<<___ if ($flavour =~ /64/);
  353. stp x29,x30,[sp,#-16]!
  354. add x29,sp,#0
  355. ___
  356. $code.=<<___ if ($flavour !~ /64/);
  357. mov ip,sp
  358. stmdb sp!,{r4-r8,lr}
  359. vstmdb sp!,{d8-d15} @ ABI specification says so
  360. ldmia ip,{r4-r5} @ load remaining args
  361. ___
  362. $code.=<<___;
  363. subs $len,$len,#16
  364. mov $step,#16
  365. b.lo .Lcbc_abort
  366. cclr $step,eq
  367. cmp $enc,#0 // en- or decrypting?
  368. ldr $rounds,[$key,#240]
  369. and $len,$len,#-16
  370. vld1.8 {$ivec},[$ivp]
  371. vld1.8 {$dat},[$inp],$step
  372. vld1.32 {q8-q9},[$key] // load key schedule...
  373. sub $rounds,$rounds,#6
  374. add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
  375. sub $rounds,$rounds,#2
  376. vld1.32 {q10-q11},[$key_],#32
  377. vld1.32 {q12-q13},[$key_],#32
  378. vld1.32 {q14-q15},[$key_],#32
  379. vld1.32 {$rndlast},[$key_]
  380. add $key_,$key,#32
  381. mov $cnt,$rounds
  382. b.eq .Lcbc_dec
  383. cmp $rounds,#2
  384. veor $dat,$dat,$ivec
  385. veor $rndzero_n_last,q8,$rndlast
  386. b.eq .Lcbc_enc128
  387. vld1.32 {$in0-$in1},[$key_]
  388. add $key_,$key,#16
  389. add $key4,$key,#16*4
  390. add $key5,$key,#16*5
  391. aese $dat,q8
  392. aesmc $dat,$dat
  393. add $key6,$key,#16*6
  394. add $key7,$key,#16*7
  395. b .Lenter_cbc_enc
  396. .align 4
  397. .Loop_cbc_enc:
  398. aese $dat,q8
  399. aesmc $dat,$dat
  400. vst1.8 {$ivec},[$out],#16
  401. .Lenter_cbc_enc:
  402. aese $dat,q9
  403. aesmc $dat,$dat
  404. aese $dat,$in0
  405. aesmc $dat,$dat
  406. vld1.32 {q8},[$key4]
  407. cmp $rounds,#4
  408. aese $dat,$in1
  409. aesmc $dat,$dat
  410. vld1.32 {q9},[$key5]
  411. b.eq .Lcbc_enc192
  412. aese $dat,q8
  413. aesmc $dat,$dat
  414. vld1.32 {q8},[$key6]
  415. aese $dat,q9
  416. aesmc $dat,$dat
  417. vld1.32 {q9},[$key7]
  418. nop
  419. .Lcbc_enc192:
  420. aese $dat,q8
  421. aesmc $dat,$dat
  422. subs $len,$len,#16
  423. aese $dat,q9
  424. aesmc $dat,$dat
  425. cclr $step,eq
  426. aese $dat,q10
  427. aesmc $dat,$dat
  428. aese $dat,q11
  429. aesmc $dat,$dat
  430. vld1.8 {q8},[$inp],$step
  431. aese $dat,q12
  432. aesmc $dat,$dat
  433. veor q8,q8,$rndzero_n_last
  434. aese $dat,q13
  435. aesmc $dat,$dat
  436. vld1.32 {q9},[$key_] // re-pre-load rndkey[1]
  437. aese $dat,q14
  438. aesmc $dat,$dat
  439. aese $dat,q15
  440. veor $ivec,$dat,$rndlast
  441. b.hs .Loop_cbc_enc
  442. vst1.8 {$ivec},[$out],#16
  443. b .Lcbc_done
  444. .align 5
  445. .Lcbc_enc128:
  446. vld1.32 {$in0-$in1},[$key_]
  447. aese $dat,q8
  448. aesmc $dat,$dat
  449. b .Lenter_cbc_enc128
  450. .Loop_cbc_enc128:
  451. aese $dat,q8
  452. aesmc $dat,$dat
  453. vst1.8 {$ivec},[$out],#16
  454. .Lenter_cbc_enc128:
  455. aese $dat,q9
  456. aesmc $dat,$dat
  457. subs $len,$len,#16
  458. aese $dat,$in0
  459. aesmc $dat,$dat
  460. cclr $step,eq
  461. aese $dat,$in1
  462. aesmc $dat,$dat
  463. aese $dat,q10
  464. aesmc $dat,$dat
  465. aese $dat,q11
  466. aesmc $dat,$dat
  467. vld1.8 {q8},[$inp],$step
  468. aese $dat,q12
  469. aesmc $dat,$dat
  470. aese $dat,q13
  471. aesmc $dat,$dat
  472. aese $dat,q14
  473. aesmc $dat,$dat
  474. veor q8,q8,$rndzero_n_last
  475. aese $dat,q15
  476. veor $ivec,$dat,$rndlast
  477. b.hs .Loop_cbc_enc128
  478. vst1.8 {$ivec},[$out],#16
  479. b .Lcbc_done
  480. ___
  481. {
  482. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  483. my ($dat3,$in3,$tmp3); # used only in 64-bit mode
  484. my ($dat4,$in4,$tmp4);
  485. if ($flavour =~ /64/) {
  486. ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
  487. }
  488. $code.=<<___;
  489. .align 5
  490. .Lcbc_dec:
  491. vld1.8 {$dat2},[$inp],#16
  492. subs $len,$len,#32 // bias
  493. add $cnt,$rounds,#2
  494. vorr $in1,$dat,$dat
  495. vorr $dat1,$dat,$dat
  496. vorr $in2,$dat2,$dat2
  497. b.lo .Lcbc_dec_tail
  498. vorr $dat1,$dat2,$dat2
  499. vld1.8 {$dat2},[$inp],#16
  500. vorr $in0,$dat,$dat
  501. vorr $in1,$dat1,$dat1
  502. vorr $in2,$dat2,$dat2
  503. ___
  504. $code.=<<___ if ($flavour =~ /64/);
  505. cmp $len,#32
  506. b.lo .Loop3x_cbc_dec
  507. vld1.8 {$dat3},[$inp],#16
  508. vld1.8 {$dat4},[$inp],#16
  509. sub $len,$len,#32 // bias
  510. mov $cnt,$rounds
  511. vorr $in3,$dat3,$dat3
  512. vorr $in4,$dat4,$dat4
  513. .Loop5x_cbc_dec:
  514. aesd $dat0,q8
  515. aesimc $dat0,$dat0
  516. aesd $dat1,q8
  517. aesimc $dat1,$dat1
  518. aesd $dat2,q8
  519. aesimc $dat2,$dat2
  520. aesd $dat3,q8
  521. aesimc $dat3,$dat3
  522. aesd $dat4,q8
  523. aesimc $dat4,$dat4
  524. vld1.32 {q8},[$key_],#16
  525. subs $cnt,$cnt,#2
  526. aesd $dat0,q9
  527. aesimc $dat0,$dat0
  528. aesd $dat1,q9
  529. aesimc $dat1,$dat1
  530. aesd $dat2,q9
  531. aesimc $dat2,$dat2
  532. aesd $dat3,q9
  533. aesimc $dat3,$dat3
  534. aesd $dat4,q9
  535. aesimc $dat4,$dat4
  536. vld1.32 {q9},[$key_],#16
  537. b.gt .Loop5x_cbc_dec
  538. aesd $dat0,q8
  539. aesimc $dat0,$dat0
  540. aesd $dat1,q8
  541. aesimc $dat1,$dat1
  542. aesd $dat2,q8
  543. aesimc $dat2,$dat2
  544. aesd $dat3,q8
  545. aesimc $dat3,$dat3
  546. aesd $dat4,q8
  547. aesimc $dat4,$dat4
  548. cmp $len,#0x40 // because .Lcbc_tail4x
  549. sub $len,$len,#0x50
  550. aesd $dat0,q9
  551. aesimc $dat0,$dat0
  552. aesd $dat1,q9
  553. aesimc $dat1,$dat1
  554. aesd $dat2,q9
  555. aesimc $dat2,$dat2
  556. aesd $dat3,q9
  557. aesimc $dat3,$dat3
  558. aesd $dat4,q9
  559. aesimc $dat4,$dat4
  560. csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
  561. mov $key_,$key
  562. aesd $dat0,q10
  563. aesimc $dat0,$dat0
  564. aesd $dat1,q10
  565. aesimc $dat1,$dat1
  566. aesd $dat2,q10
  567. aesimc $dat2,$dat2
  568. aesd $dat3,q10
  569. aesimc $dat3,$dat3
  570. aesd $dat4,q10
  571. aesimc $dat4,$dat4
  572. add $inp,$inp,x6 // $inp is adjusted in such way that
  573. // at exit from the loop $dat1-$dat4
  574. // are loaded with last "words"
  575. add x6,$len,#0x60 // because .Lcbc_tail4x
  576. aesd $dat0,q11
  577. aesimc $dat0,$dat0
  578. aesd $dat1,q11
  579. aesimc $dat1,$dat1
  580. aesd $dat2,q11
  581. aesimc $dat2,$dat2
  582. aesd $dat3,q11
  583. aesimc $dat3,$dat3
  584. aesd $dat4,q11
  585. aesimc $dat4,$dat4
  586. aesd $dat0,q12
  587. aesimc $dat0,$dat0
  588. aesd $dat1,q12
  589. aesimc $dat1,$dat1
  590. aesd $dat2,q12
  591. aesimc $dat2,$dat2
  592. aesd $dat3,q12
  593. aesimc $dat3,$dat3
  594. aesd $dat4,q12
  595. aesimc $dat4,$dat4
  596. aesd $dat0,q13
  597. aesimc $dat0,$dat0
  598. aesd $dat1,q13
  599. aesimc $dat1,$dat1
  600. aesd $dat2,q13
  601. aesimc $dat2,$dat2
  602. aesd $dat3,q13
  603. aesimc $dat3,$dat3
  604. aesd $dat4,q13
  605. aesimc $dat4,$dat4
  606. aesd $dat0,q14
  607. aesimc $dat0,$dat0
  608. aesd $dat1,q14
  609. aesimc $dat1,$dat1
  610. aesd $dat2,q14
  611. aesimc $dat2,$dat2
  612. aesd $dat3,q14
  613. aesimc $dat3,$dat3
  614. aesd $dat4,q14
  615. aesimc $dat4,$dat4
  616. veor $tmp0,$ivec,$rndlast
  617. aesd $dat0,q15
  618. veor $tmp1,$in0,$rndlast
  619. vld1.8 {$in0},[$inp],#16
  620. aesd $dat1,q15
  621. veor $tmp2,$in1,$rndlast
  622. vld1.8 {$in1},[$inp],#16
  623. aesd $dat2,q15
  624. veor $tmp3,$in2,$rndlast
  625. vld1.8 {$in2},[$inp],#16
  626. aesd $dat3,q15
  627. veor $tmp4,$in3,$rndlast
  628. vld1.8 {$in3},[$inp],#16
  629. aesd $dat4,q15
  630. vorr $ivec,$in4,$in4
  631. vld1.8 {$in4},[$inp],#16
  632. cbz x6,.Lcbc_tail4x
  633. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  634. veor $tmp0,$tmp0,$dat0
  635. vorr $dat0,$in0,$in0
  636. veor $tmp1,$tmp1,$dat1
  637. vorr $dat1,$in1,$in1
  638. veor $tmp2,$tmp2,$dat2
  639. vorr $dat2,$in2,$in2
  640. veor $tmp3,$tmp3,$dat3
  641. vorr $dat3,$in3,$in3
  642. veor $tmp4,$tmp4,$dat4
  643. vst1.8 {$tmp0},[$out],#16
  644. vorr $dat4,$in4,$in4
  645. vst1.8 {$tmp1},[$out],#16
  646. mov $cnt,$rounds
  647. vst1.8 {$tmp2},[$out],#16
  648. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  649. vst1.8 {$tmp3},[$out],#16
  650. vst1.8 {$tmp4},[$out],#16
  651. b.hs .Loop5x_cbc_dec
  652. add $len,$len,#0x50
  653. cbz $len,.Lcbc_done
  654. add $cnt,$rounds,#2
  655. subs $len,$len,#0x30
  656. vorr $dat0,$in2,$in2
  657. vorr $in0,$in2,$in2
  658. vorr $dat1,$in3,$in3
  659. vorr $in1,$in3,$in3
  660. vorr $dat2,$in4,$in4
  661. vorr $in2,$in4,$in4
  662. b.lo .Lcbc_dec_tail
  663. b .Loop3x_cbc_dec
  664. .align 4
  665. .Lcbc_tail4x:
  666. veor $tmp1,$tmp0,$dat1
  667. veor $tmp2,$tmp2,$dat2
  668. veor $tmp3,$tmp3,$dat3
  669. veor $tmp4,$tmp4,$dat4
  670. vst1.8 {$tmp1},[$out],#16
  671. vst1.8 {$tmp2},[$out],#16
  672. vst1.8 {$tmp3},[$out],#16
  673. vst1.8 {$tmp4},[$out],#16
  674. b .Lcbc_done
  675. .align 4
  676. ___
  677. $code.=<<___;
  678. .Loop3x_cbc_dec:
  679. aesd $dat0,q8
  680. aesimc $dat0,$dat0
  681. aesd $dat1,q8
  682. aesimc $dat1,$dat1
  683. aesd $dat2,q8
  684. aesimc $dat2,$dat2
  685. vld1.32 {q8},[$key_],#16
  686. subs $cnt,$cnt,#2
  687. aesd $dat0,q9
  688. aesimc $dat0,$dat0
  689. aesd $dat1,q9
  690. aesimc $dat1,$dat1
  691. aesd $dat2,q9
  692. aesimc $dat2,$dat2
  693. vld1.32 {q9},[$key_],#16
  694. b.gt .Loop3x_cbc_dec
  695. aesd $dat0,q8
  696. aesimc $dat0,$dat0
  697. aesd $dat1,q8
  698. aesimc $dat1,$dat1
  699. aesd $dat2,q8
  700. aesimc $dat2,$dat2
  701. veor $tmp0,$ivec,$rndlast
  702. subs $len,$len,#0x30
  703. veor $tmp1,$in0,$rndlast
  704. mov.lo x6,$len // x6, $cnt, is zero at this point
  705. aesd $dat0,q9
  706. aesimc $dat0,$dat0
  707. aesd $dat1,q9
  708. aesimc $dat1,$dat1
  709. aesd $dat2,q9
  710. aesimc $dat2,$dat2
  711. veor $tmp2,$in1,$rndlast
  712. add $inp,$inp,x6 // $inp is adjusted in such way that
  713. // at exit from the loop $dat1-$dat2
  714. // are loaded with last "words"
  715. vorr $ivec,$in2,$in2
  716. mov $key_,$key
  717. aesd $dat0,q12
  718. aesimc $dat0,$dat0
  719. aesd $dat1,q12
  720. aesimc $dat1,$dat1
  721. aesd $dat2,q12
  722. aesimc $dat2,$dat2
  723. vld1.8 {$in0},[$inp],#16
  724. aesd $dat0,q13
  725. aesimc $dat0,$dat0
  726. aesd $dat1,q13
  727. aesimc $dat1,$dat1
  728. aesd $dat2,q13
  729. aesimc $dat2,$dat2
  730. vld1.8 {$in1},[$inp],#16
  731. aesd $dat0,q14
  732. aesimc $dat0,$dat0
  733. aesd $dat1,q14
  734. aesimc $dat1,$dat1
  735. aesd $dat2,q14
  736. aesimc $dat2,$dat2
  737. vld1.8 {$in2},[$inp],#16
  738. aesd $dat0,q15
  739. aesd $dat1,q15
  740. aesd $dat2,q15
  741. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  742. add $cnt,$rounds,#2
  743. veor $tmp0,$tmp0,$dat0
  744. veor $tmp1,$tmp1,$dat1
  745. veor $dat2,$dat2,$tmp2
  746. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  747. vst1.8 {$tmp0},[$out],#16
  748. vorr $dat0,$in0,$in0
  749. vst1.8 {$tmp1},[$out],#16
  750. vorr $dat1,$in1,$in1
  751. vst1.8 {$dat2},[$out],#16
  752. vorr $dat2,$in2,$in2
  753. b.hs .Loop3x_cbc_dec
  754. cmn $len,#0x30
  755. b.eq .Lcbc_done
  756. nop
  757. .Lcbc_dec_tail:
  758. aesd $dat1,q8
  759. aesimc $dat1,$dat1
  760. aesd $dat2,q8
  761. aesimc $dat2,$dat2
  762. vld1.32 {q8},[$key_],#16
  763. subs $cnt,$cnt,#2
  764. aesd $dat1,q9
  765. aesimc $dat1,$dat1
  766. aesd $dat2,q9
  767. aesimc $dat2,$dat2
  768. vld1.32 {q9},[$key_],#16
  769. b.gt .Lcbc_dec_tail
  770. aesd $dat1,q8
  771. aesimc $dat1,$dat1
  772. aesd $dat2,q8
  773. aesimc $dat2,$dat2
  774. aesd $dat1,q9
  775. aesimc $dat1,$dat1
  776. aesd $dat2,q9
  777. aesimc $dat2,$dat2
  778. aesd $dat1,q12
  779. aesimc $dat1,$dat1
  780. aesd $dat2,q12
  781. aesimc $dat2,$dat2
  782. cmn $len,#0x20
  783. aesd $dat1,q13
  784. aesimc $dat1,$dat1
  785. aesd $dat2,q13
  786. aesimc $dat2,$dat2
  787. veor $tmp1,$ivec,$rndlast
  788. aesd $dat1,q14
  789. aesimc $dat1,$dat1
  790. aesd $dat2,q14
  791. aesimc $dat2,$dat2
  792. veor $tmp2,$in1,$rndlast
  793. aesd $dat1,q15
  794. aesd $dat2,q15
  795. b.eq .Lcbc_dec_one
  796. veor $tmp1,$tmp1,$dat1
  797. veor $tmp2,$tmp2,$dat2
  798. vorr $ivec,$in2,$in2
  799. vst1.8 {$tmp1},[$out],#16
  800. vst1.8 {$tmp2},[$out],#16
  801. b .Lcbc_done
  802. .Lcbc_dec_one:
  803. veor $tmp1,$tmp1,$dat2
  804. vorr $ivec,$in2,$in2
  805. vst1.8 {$tmp1},[$out],#16
  806. .Lcbc_done:
  807. vst1.8 {$ivec},[$ivp]
  808. .Lcbc_abort:
  809. ___
  810. }
  811. $code.=<<___ if ($flavour !~ /64/);
  812. vldmia sp!,{d8-d15}
  813. ldmia sp!,{r4-r8,pc}
  814. ___
  815. $code.=<<___ if ($flavour =~ /64/);
  816. ldr x29,[sp],#16
  817. ret
  818. ___
  819. $code.=<<___;
  820. .size ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
  821. ___
  822. }}}
  823. {{{
  824. my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
  825. my ($rounds,$cnt,$key_)=("w5","w6","x7");
  826. my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
  827. my $step="x12"; # aliases with $tctr2
  828. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
  829. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  830. # used only in 64-bit mode...
  831. my ($dat3,$dat4,$in3,$in4)=map("q$_",(16..23));
  832. my ($dat,$tmp)=($dat0,$tmp0);
  833. ### q8-q15 preloaded key schedule
  834. $code.=<<___;
  835. .globl ${prefix}_ctr32_encrypt_blocks
  836. .type ${prefix}_ctr32_encrypt_blocks,%function
  837. .align 5
  838. ${prefix}_ctr32_encrypt_blocks:
  839. ___
  840. $code.=<<___ if ($flavour =~ /64/);
  841. stp x29,x30,[sp,#-16]!
  842. add x29,sp,#0
  843. ___
  844. $code.=<<___ if ($flavour !~ /64/);
  845. mov ip,sp
  846. stmdb sp!,{r4-r10,lr}
  847. vstmdb sp!,{d8-d15} @ ABI specification says so
  848. ldr r4, [ip] @ load remaining arg
  849. ___
  850. $code.=<<___;
  851. ldr $rounds,[$key,#240]
  852. ldr $ctr, [$ivp, #12]
  853. vld1.32 {$dat0},[$ivp]
  854. vld1.32 {q8-q9},[$key] // load key schedule...
  855. sub $rounds,$rounds,#4
  856. mov $step,#16
  857. cmp $len,#2
  858. add $key_,$key,x5,lsl#4 // pointer to last 5 round keys
  859. sub $rounds,$rounds,#2
  860. vld1.32 {q12-q13},[$key_],#32
  861. vld1.32 {q14-q15},[$key_],#32
  862. vld1.32 {$rndlast},[$key_]
  863. add $key_,$key,#32
  864. mov $cnt,$rounds
  865. cclr $step,lo
  866. #ifndef __ARMEB__
  867. rev $ctr, $ctr
  868. #endif
  869. vorr $dat1,$dat0,$dat0
  870. add $tctr1, $ctr, #1
  871. vorr $dat2,$dat0,$dat0
  872. add $ctr, $ctr, #2
  873. vorr $ivec,$dat0,$dat0
  874. rev $tctr1, $tctr1
  875. vmov.32 ${dat1}[3],$tctr1
  876. b.ls .Lctr32_tail
  877. rev $tctr2, $ctr
  878. sub $len,$len,#3 // bias
  879. vmov.32 ${dat2}[3],$tctr2
  880. ___
  881. $code.=<<___ if ($flavour =~ /64/);
  882. cmp $len,#2
  883. b.lo .Loop3x_ctr32
  884. add w13,$ctr,#1
  885. add w14,$ctr,#2
  886. vorr $dat3,$dat0,$dat0
  887. rev w13,w13
  888. vorr $dat4,$dat0,$dat0
  889. rev w14,w14
  890. vmov.32 ${dat3}[3],w13
  891. sub $len,$len,#2 // bias
  892. vmov.32 ${dat4}[3],w14
  893. add $ctr,$ctr,#2
  894. b .Loop5x_ctr32
  895. .align 4
  896. .Loop5x_ctr32:
  897. aese $dat0,q8
  898. aesmc $dat0,$dat0
  899. aese $dat1,q8
  900. aesmc $dat1,$dat1
  901. aese $dat2,q8
  902. aesmc $dat2,$dat2
  903. aese $dat3,q8
  904. aesmc $dat3,$dat3
  905. aese $dat4,q8
  906. aesmc $dat4,$dat4
  907. vld1.32 {q8},[$key_],#16
  908. subs $cnt,$cnt,#2
  909. aese $dat0,q9
  910. aesmc $dat0,$dat0
  911. aese $dat1,q9
  912. aesmc $dat1,$dat1
  913. aese $dat2,q9
  914. aesmc $dat2,$dat2
  915. aese $dat3,q9
  916. aesmc $dat3,$dat3
  917. aese $dat4,q9
  918. aesmc $dat4,$dat4
  919. vld1.32 {q9},[$key_],#16
  920. b.gt .Loop5x_ctr32
  921. mov $key_,$key
  922. aese $dat0,q8
  923. aesmc $dat0,$dat0
  924. aese $dat1,q8
  925. aesmc $dat1,$dat1
  926. aese $dat2,q8
  927. aesmc $dat2,$dat2
  928. aese $dat3,q8
  929. aesmc $dat3,$dat3
  930. aese $dat4,q8
  931. aesmc $dat4,$dat4
  932. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  933. aese $dat0,q9
  934. aesmc $dat0,$dat0
  935. aese $dat1,q9
  936. aesmc $dat1,$dat1
  937. aese $dat2,q9
  938. aesmc $dat2,$dat2
  939. aese $dat3,q9
  940. aesmc $dat3,$dat3
  941. aese $dat4,q9
  942. aesmc $dat4,$dat4
  943. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  944. aese $dat0,q12
  945. aesmc $dat0,$dat0
  946. add $tctr0,$ctr,#1
  947. add $tctr1,$ctr,#2
  948. aese $dat1,q12
  949. aesmc $dat1,$dat1
  950. add $tctr2,$ctr,#3
  951. add w13,$ctr,#4
  952. aese $dat2,q12
  953. aesmc $dat2,$dat2
  954. add w14,$ctr,#5
  955. rev $tctr0,$tctr0
  956. aese $dat3,q12
  957. aesmc $dat3,$dat3
  958. rev $tctr1,$tctr1
  959. rev $tctr2,$tctr2
  960. aese $dat4,q12
  961. aesmc $dat4,$dat4
  962. rev w13,w13
  963. rev w14,w14
  964. aese $dat0,q13
  965. aesmc $dat0,$dat0
  966. aese $dat1,q13
  967. aesmc $dat1,$dat1
  968. aese $dat2,q13
  969. aesmc $dat2,$dat2
  970. aese $dat3,q13
  971. aesmc $dat3,$dat3
  972. aese $dat4,q13
  973. aesmc $dat4,$dat4
  974. aese $dat0,q14
  975. aesmc $dat0,$dat0
  976. vld1.8 {$in0},[$inp],#16
  977. aese $dat1,q14
  978. aesmc $dat1,$dat1
  979. vld1.8 {$in1},[$inp],#16
  980. aese $dat2,q14
  981. aesmc $dat2,$dat2
  982. vld1.8 {$in2},[$inp],#16
  983. aese $dat3,q14
  984. aesmc $dat3,$dat3
  985. vld1.8 {$in3},[$inp],#16
  986. aese $dat4,q14
  987. aesmc $dat4,$dat4
  988. vld1.8 {$in4},[$inp],#16
  989. aese $dat0,q15
  990. veor $in0,$in0,$rndlast
  991. aese $dat1,q15
  992. veor $in1,$in1,$rndlast
  993. aese $dat2,q15
  994. veor $in2,$in2,$rndlast
  995. aese $dat3,q15
  996. veor $in3,$in3,$rndlast
  997. aese $dat4,q15
  998. veor $in4,$in4,$rndlast
  999. veor $in0,$in0,$dat0
  1000. vorr $dat0,$ivec,$ivec
  1001. veor $in1,$in1,$dat1
  1002. vorr $dat1,$ivec,$ivec
  1003. veor $in2,$in2,$dat2
  1004. vorr $dat2,$ivec,$ivec
  1005. veor $in3,$in3,$dat3
  1006. vorr $dat3,$ivec,$ivec
  1007. veor $in4,$in4,$dat4
  1008. vorr $dat4,$ivec,$ivec
  1009. vst1.8 {$in0},[$out],#16
  1010. vmov.32 ${dat0}[3],$tctr0
  1011. vst1.8 {$in1},[$out],#16
  1012. vmov.32 ${dat1}[3],$tctr1
  1013. vst1.8 {$in2},[$out],#16
  1014. vmov.32 ${dat2}[3],$tctr2
  1015. vst1.8 {$in3},[$out],#16
  1016. vmov.32 ${dat3}[3],w13
  1017. vst1.8 {$in4},[$out],#16
  1018. vmov.32 ${dat4}[3],w14
  1019. mov $cnt,$rounds
  1020. cbz $len,.Lctr32_done
  1021. add $ctr,$ctr,#5
  1022. subs $len,$len,#5
  1023. b.hs .Loop5x_ctr32
  1024. add $len,$len,#5
  1025. sub $ctr,$ctr,#5
  1026. cmp $len,#2
  1027. mov $step,#16
  1028. cclr $step,lo
  1029. b.ls .Lctr32_tail
  1030. sub $len,$len,#3 // bias
  1031. add $ctr,$ctr,#3
  1032. ___
  1033. $code.=<<___;
  1034. b .Loop3x_ctr32
  1035. .align 4
  1036. .Loop3x_ctr32:
  1037. aese $dat0,q8
  1038. aesmc $dat0,$dat0
  1039. aese $dat1,q8
  1040. aesmc $dat1,$dat1
  1041. aese $dat2,q8
  1042. aesmc $dat2,$dat2
  1043. vld1.32 {q8},[$key_],#16
  1044. subs $cnt,$cnt,#2
  1045. aese $dat0,q9
  1046. aesmc $dat0,$dat0
  1047. aese $dat1,q9
  1048. aesmc $dat1,$dat1
  1049. aese $dat2,q9
  1050. aesmc $dat2,$dat2
  1051. vld1.32 {q9},[$key_],#16
  1052. b.gt .Loop3x_ctr32
  1053. aese $dat0,q8
  1054. aesmc $tmp0,$dat0
  1055. aese $dat1,q8
  1056. aesmc $tmp1,$dat1
  1057. vld1.8 {$in0},[$inp],#16
  1058. vorr $dat0,$ivec,$ivec
  1059. aese $dat2,q8
  1060. aesmc $dat2,$dat2
  1061. vld1.8 {$in1},[$inp],#16
  1062. vorr $dat1,$ivec,$ivec
  1063. aese $tmp0,q9
  1064. aesmc $tmp0,$tmp0
  1065. aese $tmp1,q9
  1066. aesmc $tmp1,$tmp1
  1067. vld1.8 {$in2},[$inp],#16
  1068. mov $key_,$key
  1069. aese $dat2,q9
  1070. aesmc $tmp2,$dat2
  1071. vorr $dat2,$ivec,$ivec
  1072. add $tctr0,$ctr,#1
  1073. aese $tmp0,q12
  1074. aesmc $tmp0,$tmp0
  1075. aese $tmp1,q12
  1076. aesmc $tmp1,$tmp1
  1077. veor $in0,$in0,$rndlast
  1078. add $tctr1,$ctr,#2
  1079. aese $tmp2,q12
  1080. aesmc $tmp2,$tmp2
  1081. veor $in1,$in1,$rndlast
  1082. add $ctr,$ctr,#3
  1083. aese $tmp0,q13
  1084. aesmc $tmp0,$tmp0
  1085. aese $tmp1,q13
  1086. aesmc $tmp1,$tmp1
  1087. veor $in2,$in2,$rndlast
  1088. rev $tctr0,$tctr0
  1089. aese $tmp2,q13
  1090. aesmc $tmp2,$tmp2
  1091. vmov.32 ${dat0}[3], $tctr0
  1092. rev $tctr1,$tctr1
  1093. aese $tmp0,q14
  1094. aesmc $tmp0,$tmp0
  1095. aese $tmp1,q14
  1096. aesmc $tmp1,$tmp1
  1097. vmov.32 ${dat1}[3], $tctr1
  1098. rev $tctr2,$ctr
  1099. aese $tmp2,q14
  1100. aesmc $tmp2,$tmp2
  1101. vmov.32 ${dat2}[3], $tctr2
  1102. subs $len,$len,#3
  1103. aese $tmp0,q15
  1104. aese $tmp1,q15
  1105. aese $tmp2,q15
  1106. veor $in0,$in0,$tmp0
  1107. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1108. vst1.8 {$in0},[$out],#16
  1109. veor $in1,$in1,$tmp1
  1110. mov $cnt,$rounds
  1111. vst1.8 {$in1},[$out],#16
  1112. veor $in2,$in2,$tmp2
  1113. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1114. vst1.8 {$in2},[$out],#16
  1115. b.hs .Loop3x_ctr32
  1116. adds $len,$len,#3
  1117. b.eq .Lctr32_done
  1118. cmp $len,#1
  1119. mov $step,#16
  1120. cclr $step,eq
  1121. .Lctr32_tail:
  1122. aese $dat0,q8
  1123. aesmc $dat0,$dat0
  1124. aese $dat1,q8
  1125. aesmc $dat1,$dat1
  1126. vld1.32 {q8},[$key_],#16
  1127. subs $cnt,$cnt,#2
  1128. aese $dat0,q9
  1129. aesmc $dat0,$dat0
  1130. aese $dat1,q9
  1131. aesmc $dat1,$dat1
  1132. vld1.32 {q9},[$key_],#16
  1133. b.gt .Lctr32_tail
  1134. aese $dat0,q8
  1135. aesmc $dat0,$dat0
  1136. aese $dat1,q8
  1137. aesmc $dat1,$dat1
  1138. aese $dat0,q9
  1139. aesmc $dat0,$dat0
  1140. aese $dat1,q9
  1141. aesmc $dat1,$dat1
  1142. vld1.8 {$in0},[$inp],$step
  1143. aese $dat0,q12
  1144. aesmc $dat0,$dat0
  1145. aese $dat1,q12
  1146. aesmc $dat1,$dat1
  1147. vld1.8 {$in1},[$inp]
  1148. aese $dat0,q13
  1149. aesmc $dat0,$dat0
  1150. aese $dat1,q13
  1151. aesmc $dat1,$dat1
  1152. veor $in0,$in0,$rndlast
  1153. aese $dat0,q14
  1154. aesmc $dat0,$dat0
  1155. aese $dat1,q14
  1156. aesmc $dat1,$dat1
  1157. veor $in1,$in1,$rndlast
  1158. aese $dat0,q15
  1159. aese $dat1,q15
  1160. cmp $len,#1
  1161. veor $in0,$in0,$dat0
  1162. veor $in1,$in1,$dat1
  1163. vst1.8 {$in0},[$out],#16
  1164. b.eq .Lctr32_done
  1165. vst1.8 {$in1},[$out]
  1166. .Lctr32_done:
  1167. ___
  1168. $code.=<<___ if ($flavour !~ /64/);
  1169. vldmia sp!,{d8-d15}
  1170. ldmia sp!,{r4-r10,pc}
  1171. ___
  1172. $code.=<<___ if ($flavour =~ /64/);
  1173. ldr x29,[sp],#16
  1174. ret
  1175. ___
  1176. $code.=<<___;
  1177. .size ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
  1178. ___
  1179. }}}
  1180. $code.=<<___;
  1181. #endif
  1182. ___
  1183. ########################################
  1184. if ($flavour =~ /64/) { ######## 64-bit code
  1185. my %opcode = (
  1186. "aesd" => 0x4e285800, "aese" => 0x4e284800,
  1187. "aesimc"=> 0x4e287800, "aesmc" => 0x4e286800 );
  1188. local *unaes = sub {
  1189. my ($mnemonic,$arg)=@_;
  1190. $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o &&
  1191. sprintf ".inst\t0x%08x\t//%s %s",
  1192. $opcode{$mnemonic}|$1|($2<<5),
  1193. $mnemonic,$arg;
  1194. };
  1195. foreach(split("\n",$code)) {
  1196. s/\`([^\`]*)\`/eval($1)/geo;
  1197. s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
  1198. s/@\s/\/\//o; # old->new style commentary
  1199. #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
  1200. s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
  1201. s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel $2,$3,$2,$1/o or
  1202. s/vmov\.i8/movi/o or # fix up legacy mnemonics
  1203. s/vext\.8/ext/o or
  1204. s/vrev32\.8/rev32/o or
  1205. s/vtst\.8/cmtst/o or
  1206. s/vshr/ushr/o or
  1207. s/^(\s+)v/$1/o or # strip off v prefix
  1208. s/\bbx\s+lr\b/ret/o;
  1209. # fix up remaining legacy suffixes
  1210. s/\.[ui]?8//o;
  1211. m/\],#8/o and s/\.16b/\.8b/go;
  1212. s/\.[ui]?32//o and s/\.16b/\.4s/go;
  1213. s/\.[ui]?64//o and s/\.16b/\.2d/go;
  1214. s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
  1215. print $_,"\n";
  1216. }
  1217. } else { ######## 32-bit code
  1218. my %opcode = (
  1219. "aesd" => 0xf3b00340, "aese" => 0xf3b00300,
  1220. "aesimc"=> 0xf3b003c0, "aesmc" => 0xf3b00380 );
  1221. local *unaes = sub {
  1222. my ($mnemonic,$arg)=@_;
  1223. if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
  1224. my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
  1225. |(($2&7)<<1) |(($2&8)<<2);
  1226. # since ARMv7 instructions are always encoded little-endian.
  1227. # correct solution is to use .inst directive, but older
  1228. # assemblers don't implement it:-(
  1229. sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
  1230. $word&0xff,($word>>8)&0xff,
  1231. ($word>>16)&0xff,($word>>24)&0xff,
  1232. $mnemonic,$arg;
  1233. }
  1234. };
  1235. sub unvtbl {
  1236. my $arg=shift;
  1237. $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
  1238. sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
  1239. "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
  1240. }
  1241. sub unvdup32 {
  1242. my $arg=shift;
  1243. $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
  1244. sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
  1245. }
  1246. sub unvmov32 {
  1247. my $arg=shift;
  1248. $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
  1249. sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
  1250. }
  1251. foreach(split("\n",$code)) {
  1252. s/\`([^\`]*)\`/eval($1)/geo;
  1253. s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
  1254. s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
  1255. s/\/\/\s?/@ /o; # new->old style commentary
  1256. # fix up remaining new-style suffixes
  1257. s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
  1258. s/\],#[0-9]+/]!/o;
  1259. s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
  1260. s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2 $1,#0/o or
  1261. s/vtbl\.8\s+(.*)/unvtbl($1)/geo or
  1262. s/vdup\.32\s+(.*)/unvdup32($1)/geo or
  1263. s/vmov\.32\s+(.*)/unvmov32($1)/geo or
  1264. s/^(\s+)b\./$1b/o or
  1265. s/^(\s+)ret/$1bx\tlr/o;
  1266. if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
  1267. print " it $2\n";
  1268. }
  1269. print $_,"\n";
  1270. }
  1271. }
  1272. close STDOUT;