2
0

aesv8-armx.pl 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. #! /usr/bin/env perl
  2. # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # This module implements support for ARMv8 AES instructions. The
  17. # module is endian-agnostic in sense that it supports both big- and
  18. # little-endian cases. As does it support both 32- and 64-bit modes
  19. # of operation. Latter is achieved by limiting amount of utilized
  20. # registers to 16, which implies additional NEON load and integer
  21. # instructions. This has no effect on mighty Apple A7, where results
  22. # are literally equal to the theoretical estimates based on AES
  23. # instruction latencies and issue rates. On Cortex-A53, an in-order
  24. # execution core, this costs up to 10-15%, which is partially
  25. # compensated by implementing dedicated code path for 128-bit
  26. # CBC encrypt case. On Cortex-A57 parallelizable mode performance
  27. # seems to be limited by sheer amount of NEON instructions...
  28. #
  29. # April 2019
  30. #
  31. # Key to performance of parallelize-able modes is round instruction
  32. # interleaving. But which factor to use? There is optimal one for
  33. # each combination of instruction latency and issue rate, beyond
  34. # which increasing interleave factor doesn't pay off. While on cons
  35. # side we have code size increase and resource waste on platforms for
  36. # which interleave factor is too high. In other words you want it to
  37. # be just right. So far interleave factor of 3x was serving well all
  38. # platforms. But for ThunderX2 optimal interleave factor was measured
  39. # to be 5x...
  40. #
  41. # Performance in cycles per byte processed with 128-bit key:
  42. #
  43. # CBC enc CBC dec CTR
  44. # Apple A7 2.39 1.20 1.20
  45. # Cortex-A53 1.32 1.17/1.29(**) 1.36/1.46
  46. # Cortex-A57(*) 1.95 0.82/0.85 0.89/0.93
  47. # Cortex-A72 1.33 0.85/0.88 0.92/0.96
  48. # Denver 1.96 0.65/0.86 0.76/0.80
  49. # Mongoose 1.33 1.23/1.20 1.30/1.20
  50. # Kryo 1.26 0.87/0.94 1.00/1.00
  51. # ThunderX2 5.95 1.25 1.30
  52. #
  53. # (*) original 3.64/1.34/1.32 results were for r0p0 revision
  54. # and are still same even for updated module;
  55. # (**) numbers after slash are for 32-bit code, which is 3x-
  56. # interleaved;
  57. $flavour = shift;
  58. $output = shift;
  59. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  60. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  61. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  62. die "can't locate arm-xlate.pl";
  63. open OUT,"| \"$^X\" $xlate $flavour $output";
  64. *STDOUT=*OUT;
  65. $prefix="aes_v8";
  66. $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
  67. $code=<<___;
  68. #include "arm_arch.h"
  69. #if __ARM_MAX_ARCH__>=7
  70. ___
  71. $code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
  72. $code.=<<___ if ($flavour !~ /64/);
  73. .arch armv7-a // don't confuse not-so-latest binutils with argv8 :-)
  74. .fpu neon
  75. #ifdef __thumb2__
  76. .syntax unified
  77. .thumb
  78. # define INST(a,b,c,d) $_byte c,d|0xc,a,b
  79. #else
  80. .code 32
  81. # define INST(a,b,c,d) $_byte a,b,c,d
  82. #endif
  83. .text
  84. ___
  85. # Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
  86. # NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
  87. # maintain both 32- and 64-bit codes within single module and
  88. # transliterate common code to either flavour with regex vodoo.
  89. #
  90. {{{
  91. my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
  92. my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
  93. $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
  94. $code.=<<___;
  95. .align 5
  96. .Lrcon:
  97. .long 0x01,0x01,0x01,0x01
  98. .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
  99. .long 0x1b,0x1b,0x1b,0x1b
  100. .globl ${prefix}_set_encrypt_key
  101. .type ${prefix}_set_encrypt_key,%function
  102. .align 5
  103. ${prefix}_set_encrypt_key:
  104. .Lenc_key:
  105. ___
  106. $code.=<<___ if ($flavour =~ /64/);
  107. stp x29,x30,[sp,#-16]!
  108. add x29,sp,#0
  109. ___
  110. $code.=<<___;
  111. mov $ptr,#-1
  112. cmp $inp,#0
  113. b.eq .Lenc_key_abort
  114. cmp $out,#0
  115. b.eq .Lenc_key_abort
  116. mov $ptr,#-2
  117. cmp $bits,#128
  118. b.lt .Lenc_key_abort
  119. cmp $bits,#256
  120. b.gt .Lenc_key_abort
  121. tst $bits,#0x3f
  122. b.ne .Lenc_key_abort
  123. adr $ptr,.Lrcon
  124. cmp $bits,#192
  125. veor $zero,$zero,$zero
  126. vld1.8 {$in0},[$inp],#16
  127. mov $bits,#8 // reuse $bits
  128. vld1.32 {$rcon,$mask},[$ptr],#32
  129. b.lt .Loop128
  130. b.eq .L192
  131. b .L256
  132. .align 4
  133. .Loop128:
  134. vtbl.8 $key,{$in0},$mask
  135. vext.8 $tmp,$zero,$in0,#12
  136. vst1.32 {$in0},[$out],#16
  137. aese $key,$zero
  138. subs $bits,$bits,#1
  139. veor $in0,$in0,$tmp
  140. vext.8 $tmp,$zero,$tmp,#12
  141. veor $in0,$in0,$tmp
  142. vext.8 $tmp,$zero,$tmp,#12
  143. veor $key,$key,$rcon
  144. veor $in0,$in0,$tmp
  145. vshl.u8 $rcon,$rcon,#1
  146. veor $in0,$in0,$key
  147. b.ne .Loop128
  148. vld1.32 {$rcon},[$ptr]
  149. vtbl.8 $key,{$in0},$mask
  150. vext.8 $tmp,$zero,$in0,#12
  151. vst1.32 {$in0},[$out],#16
  152. aese $key,$zero
  153. veor $in0,$in0,$tmp
  154. vext.8 $tmp,$zero,$tmp,#12
  155. veor $in0,$in0,$tmp
  156. vext.8 $tmp,$zero,$tmp,#12
  157. veor $key,$key,$rcon
  158. veor $in0,$in0,$tmp
  159. vshl.u8 $rcon,$rcon,#1
  160. veor $in0,$in0,$key
  161. vtbl.8 $key,{$in0},$mask
  162. vext.8 $tmp,$zero,$in0,#12
  163. vst1.32 {$in0},[$out],#16
  164. aese $key,$zero
  165. veor $in0,$in0,$tmp
  166. vext.8 $tmp,$zero,$tmp,#12
  167. veor $in0,$in0,$tmp
  168. vext.8 $tmp,$zero,$tmp,#12
  169. veor $key,$key,$rcon
  170. veor $in0,$in0,$tmp
  171. veor $in0,$in0,$key
  172. vst1.32 {$in0},[$out]
  173. add $out,$out,#0x50
  174. mov $rounds,#10
  175. b .Ldone
  176. .align 4
  177. .L192:
  178. vld1.8 {$in1},[$inp],#8
  179. vmov.i8 $key,#8 // borrow $key
  180. vst1.32 {$in0},[$out],#16
  181. vsub.i8 $mask,$mask,$key // adjust the mask
  182. .Loop192:
  183. vtbl.8 $key,{$in1},$mask
  184. vext.8 $tmp,$zero,$in0,#12
  185. vst1.32 {$in1},[$out],#8
  186. aese $key,$zero
  187. subs $bits,$bits,#1
  188. veor $in0,$in0,$tmp
  189. vext.8 $tmp,$zero,$tmp,#12
  190. veor $in0,$in0,$tmp
  191. vext.8 $tmp,$zero,$tmp,#12
  192. veor $in0,$in0,$tmp
  193. vdup.32 $tmp,${in0}[3]
  194. veor $tmp,$tmp,$in1
  195. veor $key,$key,$rcon
  196. vext.8 $in1,$zero,$in1,#12
  197. vshl.u8 $rcon,$rcon,#1
  198. veor $in1,$in1,$tmp
  199. veor $in0,$in0,$key
  200. veor $in1,$in1,$key
  201. vst1.32 {$in0},[$out],#16
  202. b.ne .Loop192
  203. mov $rounds,#12
  204. add $out,$out,#0x20
  205. b .Ldone
  206. .align 4
  207. .L256:
  208. vld1.8 {$in1},[$inp]
  209. mov $bits,#7
  210. mov $rounds,#14
  211. vst1.32 {$in0},[$out],#16
  212. .Loop256:
  213. vtbl.8 $key,{$in1},$mask
  214. vext.8 $tmp,$zero,$in0,#12
  215. vst1.32 {$in1},[$out],#16
  216. aese $key,$zero
  217. subs $bits,$bits,#1
  218. veor $in0,$in0,$tmp
  219. vext.8 $tmp,$zero,$tmp,#12
  220. veor $in0,$in0,$tmp
  221. vext.8 $tmp,$zero,$tmp,#12
  222. veor $key,$key,$rcon
  223. veor $in0,$in0,$tmp
  224. vshl.u8 $rcon,$rcon,#1
  225. veor $in0,$in0,$key
  226. vst1.32 {$in0},[$out],#16
  227. b.eq .Ldone
  228. vdup.32 $key,${in0}[3] // just splat
  229. vext.8 $tmp,$zero,$in1,#12
  230. aese $key,$zero
  231. veor $in1,$in1,$tmp
  232. vext.8 $tmp,$zero,$tmp,#12
  233. veor $in1,$in1,$tmp
  234. vext.8 $tmp,$zero,$tmp,#12
  235. veor $in1,$in1,$tmp
  236. veor $in1,$in1,$key
  237. b .Loop256
  238. .Ldone:
  239. str $rounds,[$out]
  240. mov $ptr,#0
  241. .Lenc_key_abort:
  242. mov x0,$ptr // return value
  243. `"ldr x29,[sp],#16" if ($flavour =~ /64/)`
  244. ret
  245. .size ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
  246. .globl ${prefix}_set_decrypt_key
  247. .type ${prefix}_set_decrypt_key,%function
  248. .align 5
  249. ${prefix}_set_decrypt_key:
  250. ___
  251. $code.=<<___ if ($flavour =~ /64/);
  252. .inst 0xd503233f // paciasp
  253. stp x29,x30,[sp,#-16]!
  254. add x29,sp,#0
  255. ___
  256. $code.=<<___ if ($flavour !~ /64/);
  257. stmdb sp!,{r4,lr}
  258. ___
  259. $code.=<<___;
  260. bl .Lenc_key
  261. cmp x0,#0
  262. b.ne .Ldec_key_abort
  263. sub $out,$out,#240 // restore original $out
  264. mov x4,#-16
  265. add $inp,$out,x12,lsl#4 // end of key schedule
  266. vld1.32 {v0.16b},[$out]
  267. vld1.32 {v1.16b},[$inp]
  268. vst1.32 {v0.16b},[$inp],x4
  269. vst1.32 {v1.16b},[$out],#16
  270. .Loop_imc:
  271. vld1.32 {v0.16b},[$out]
  272. vld1.32 {v1.16b},[$inp]
  273. aesimc v0.16b,v0.16b
  274. aesimc v1.16b,v1.16b
  275. vst1.32 {v0.16b},[$inp],x4
  276. vst1.32 {v1.16b},[$out],#16
  277. cmp $inp,$out
  278. b.hi .Loop_imc
  279. vld1.32 {v0.16b},[$out]
  280. aesimc v0.16b,v0.16b
  281. vst1.32 {v0.16b},[$inp]
  282. eor x0,x0,x0 // return value
  283. .Ldec_key_abort:
  284. ___
  285. $code.=<<___ if ($flavour !~ /64/);
  286. ldmia sp!,{r4,pc}
  287. ___
  288. $code.=<<___ if ($flavour =~ /64/);
  289. ldp x29,x30,[sp],#16
  290. .inst 0xd50323bf // autiasp
  291. ret
  292. ___
  293. $code.=<<___;
  294. .size ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
  295. ___
  296. }}}
  297. {{{
  298. sub gen_block () {
  299. my $dir = shift;
  300. my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
  301. my ($inp,$out,$key)=map("x$_",(0..2));
  302. my $rounds="w3";
  303. my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
  304. $code.=<<___;
  305. .globl ${prefix}_${dir}crypt
  306. .type ${prefix}_${dir}crypt,%function
  307. .align 5
  308. ${prefix}_${dir}crypt:
  309. ldr $rounds,[$key,#240]
  310. vld1.32 {$rndkey0},[$key],#16
  311. vld1.8 {$inout},[$inp]
  312. sub $rounds,$rounds,#2
  313. vld1.32 {$rndkey1},[$key],#16
  314. .Loop_${dir}c:
  315. aes$e $inout,$rndkey0
  316. aes$mc $inout,$inout
  317. vld1.32 {$rndkey0},[$key],#16
  318. subs $rounds,$rounds,#2
  319. aes$e $inout,$rndkey1
  320. aes$mc $inout,$inout
  321. vld1.32 {$rndkey1},[$key],#16
  322. b.gt .Loop_${dir}c
  323. aes$e $inout,$rndkey0
  324. aes$mc $inout,$inout
  325. vld1.32 {$rndkey0},[$key]
  326. aes$e $inout,$rndkey1
  327. veor $inout,$inout,$rndkey0
  328. vst1.8 {$inout},[$out]
  329. ret
  330. .size ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
  331. ___
  332. }
  333. &gen_block("en");
  334. &gen_block("de");
  335. }}}
  336. {{{
  337. my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
  338. my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
  339. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
  340. my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
  341. my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
  342. ### q8-q15 preloaded key schedule
  343. $code.=<<___;
  344. .globl ${prefix}_cbc_encrypt
  345. .type ${prefix}_cbc_encrypt,%function
  346. .align 5
  347. ${prefix}_cbc_encrypt:
  348. ___
  349. $code.=<<___ if ($flavour =~ /64/);
  350. stp x29,x30,[sp,#-16]!
  351. add x29,sp,#0
  352. ___
  353. $code.=<<___ if ($flavour !~ /64/);
  354. mov ip,sp
  355. stmdb sp!,{r4-r8,lr}
  356. vstmdb sp!,{d8-d15} @ ABI specification says so
  357. ldmia ip,{r4-r5} @ load remaining args
  358. ___
  359. $code.=<<___;
  360. subs $len,$len,#16
  361. mov $step,#16
  362. b.lo .Lcbc_abort
  363. cclr $step,eq
  364. cmp $enc,#0 // en- or decrypting?
  365. ldr $rounds,[$key,#240]
  366. and $len,$len,#-16
  367. vld1.8 {$ivec},[$ivp]
  368. vld1.8 {$dat},[$inp],$step
  369. vld1.32 {q8-q9},[$key] // load key schedule...
  370. sub $rounds,$rounds,#6
  371. add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
  372. sub $rounds,$rounds,#2
  373. vld1.32 {q10-q11},[$key_],#32
  374. vld1.32 {q12-q13},[$key_],#32
  375. vld1.32 {q14-q15},[$key_],#32
  376. vld1.32 {$rndlast},[$key_]
  377. add $key_,$key,#32
  378. mov $cnt,$rounds
  379. b.eq .Lcbc_dec
  380. cmp $rounds,#2
  381. veor $dat,$dat,$ivec
  382. veor $rndzero_n_last,q8,$rndlast
  383. b.eq .Lcbc_enc128
  384. vld1.32 {$in0-$in1},[$key_]
  385. add $key_,$key,#16
  386. add $key4,$key,#16*4
  387. add $key5,$key,#16*5
  388. aese $dat,q8
  389. aesmc $dat,$dat
  390. add $key6,$key,#16*6
  391. add $key7,$key,#16*7
  392. b .Lenter_cbc_enc
  393. .align 4
  394. .Loop_cbc_enc:
  395. aese $dat,q8
  396. aesmc $dat,$dat
  397. vst1.8 {$ivec},[$out],#16
  398. .Lenter_cbc_enc:
  399. aese $dat,q9
  400. aesmc $dat,$dat
  401. aese $dat,$in0
  402. aesmc $dat,$dat
  403. vld1.32 {q8},[$key4]
  404. cmp $rounds,#4
  405. aese $dat,$in1
  406. aesmc $dat,$dat
  407. vld1.32 {q9},[$key5]
  408. b.eq .Lcbc_enc192
  409. aese $dat,q8
  410. aesmc $dat,$dat
  411. vld1.32 {q8},[$key6]
  412. aese $dat,q9
  413. aesmc $dat,$dat
  414. vld1.32 {q9},[$key7]
  415. nop
  416. .Lcbc_enc192:
  417. aese $dat,q8
  418. aesmc $dat,$dat
  419. subs $len,$len,#16
  420. aese $dat,q9
  421. aesmc $dat,$dat
  422. cclr $step,eq
  423. aese $dat,q10
  424. aesmc $dat,$dat
  425. aese $dat,q11
  426. aesmc $dat,$dat
  427. vld1.8 {q8},[$inp],$step
  428. aese $dat,q12
  429. aesmc $dat,$dat
  430. veor q8,q8,$rndzero_n_last
  431. aese $dat,q13
  432. aesmc $dat,$dat
  433. vld1.32 {q9},[$key_] // re-pre-load rndkey[1]
  434. aese $dat,q14
  435. aesmc $dat,$dat
  436. aese $dat,q15
  437. veor $ivec,$dat,$rndlast
  438. b.hs .Loop_cbc_enc
  439. vst1.8 {$ivec},[$out],#16
  440. b .Lcbc_done
  441. .align 5
  442. .Lcbc_enc128:
  443. vld1.32 {$in0-$in1},[$key_]
  444. aese $dat,q8
  445. aesmc $dat,$dat
  446. b .Lenter_cbc_enc128
  447. .Loop_cbc_enc128:
  448. aese $dat,q8
  449. aesmc $dat,$dat
  450. vst1.8 {$ivec},[$out],#16
  451. .Lenter_cbc_enc128:
  452. aese $dat,q9
  453. aesmc $dat,$dat
  454. subs $len,$len,#16
  455. aese $dat,$in0
  456. aesmc $dat,$dat
  457. cclr $step,eq
  458. aese $dat,$in1
  459. aesmc $dat,$dat
  460. aese $dat,q10
  461. aesmc $dat,$dat
  462. aese $dat,q11
  463. aesmc $dat,$dat
  464. vld1.8 {q8},[$inp],$step
  465. aese $dat,q12
  466. aesmc $dat,$dat
  467. aese $dat,q13
  468. aesmc $dat,$dat
  469. aese $dat,q14
  470. aesmc $dat,$dat
  471. veor q8,q8,$rndzero_n_last
  472. aese $dat,q15
  473. veor $ivec,$dat,$rndlast
  474. b.hs .Loop_cbc_enc128
  475. vst1.8 {$ivec},[$out],#16
  476. b .Lcbc_done
  477. ___
  478. {
  479. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  480. my ($dat3,$in3,$tmp3); # used only in 64-bit mode
  481. my ($dat4,$in4,$tmp4);
  482. if ($flavour =~ /64/) {
  483. ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
  484. }
  485. $code.=<<___;
  486. .align 5
  487. .Lcbc_dec:
  488. vld1.8 {$dat2},[$inp],#16
  489. subs $len,$len,#32 // bias
  490. add $cnt,$rounds,#2
  491. vorr $in1,$dat,$dat
  492. vorr $dat1,$dat,$dat
  493. vorr $in2,$dat2,$dat2
  494. b.lo .Lcbc_dec_tail
  495. vorr $dat1,$dat2,$dat2
  496. vld1.8 {$dat2},[$inp],#16
  497. vorr $in0,$dat,$dat
  498. vorr $in1,$dat1,$dat1
  499. vorr $in2,$dat2,$dat2
  500. ___
  501. $code.=<<___ if ($flavour =~ /64/);
  502. cmp $len,#32
  503. b.lo .Loop3x_cbc_dec
  504. vld1.8 {$dat3},[$inp],#16
  505. vld1.8 {$dat4},[$inp],#16
  506. sub $len,$len,#32 // bias
  507. mov $cnt,$rounds
  508. vorr $in3,$dat3,$dat3
  509. vorr $in4,$dat4,$dat4
  510. .Loop5x_cbc_dec:
  511. aesd $dat0,q8
  512. aesimc $dat0,$dat0
  513. aesd $dat1,q8
  514. aesimc $dat1,$dat1
  515. aesd $dat2,q8
  516. aesimc $dat2,$dat2
  517. aesd $dat3,q8
  518. aesimc $dat3,$dat3
  519. aesd $dat4,q8
  520. aesimc $dat4,$dat4
  521. vld1.32 {q8},[$key_],#16
  522. subs $cnt,$cnt,#2
  523. aesd $dat0,q9
  524. aesimc $dat0,$dat0
  525. aesd $dat1,q9
  526. aesimc $dat1,$dat1
  527. aesd $dat2,q9
  528. aesimc $dat2,$dat2
  529. aesd $dat3,q9
  530. aesimc $dat3,$dat3
  531. aesd $dat4,q9
  532. aesimc $dat4,$dat4
  533. vld1.32 {q9},[$key_],#16
  534. b.gt .Loop5x_cbc_dec
  535. aesd $dat0,q8
  536. aesimc $dat0,$dat0
  537. aesd $dat1,q8
  538. aesimc $dat1,$dat1
  539. aesd $dat2,q8
  540. aesimc $dat2,$dat2
  541. aesd $dat3,q8
  542. aesimc $dat3,$dat3
  543. aesd $dat4,q8
  544. aesimc $dat4,$dat4
  545. cmp $len,#0x40 // because .Lcbc_tail4x
  546. sub $len,$len,#0x50
  547. aesd $dat0,q9
  548. aesimc $dat0,$dat0
  549. aesd $dat1,q9
  550. aesimc $dat1,$dat1
  551. aesd $dat2,q9
  552. aesimc $dat2,$dat2
  553. aesd $dat3,q9
  554. aesimc $dat3,$dat3
  555. aesd $dat4,q9
  556. aesimc $dat4,$dat4
  557. csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
  558. mov $key_,$key
  559. aesd $dat0,q10
  560. aesimc $dat0,$dat0
  561. aesd $dat1,q10
  562. aesimc $dat1,$dat1
  563. aesd $dat2,q10
  564. aesimc $dat2,$dat2
  565. aesd $dat3,q10
  566. aesimc $dat3,$dat3
  567. aesd $dat4,q10
  568. aesimc $dat4,$dat4
  569. add $inp,$inp,x6 // $inp is adjusted in such way that
  570. // at exit from the loop $dat1-$dat4
  571. // are loaded with last "words"
  572. add x6,$len,#0x60 // because .Lcbc_tail4x
  573. aesd $dat0,q11
  574. aesimc $dat0,$dat0
  575. aesd $dat1,q11
  576. aesimc $dat1,$dat1
  577. aesd $dat2,q11
  578. aesimc $dat2,$dat2
  579. aesd $dat3,q11
  580. aesimc $dat3,$dat3
  581. aesd $dat4,q11
  582. aesimc $dat4,$dat4
  583. aesd $dat0,q12
  584. aesimc $dat0,$dat0
  585. aesd $dat1,q12
  586. aesimc $dat1,$dat1
  587. aesd $dat2,q12
  588. aesimc $dat2,$dat2
  589. aesd $dat3,q12
  590. aesimc $dat3,$dat3
  591. aesd $dat4,q12
  592. aesimc $dat4,$dat4
  593. aesd $dat0,q13
  594. aesimc $dat0,$dat0
  595. aesd $dat1,q13
  596. aesimc $dat1,$dat1
  597. aesd $dat2,q13
  598. aesimc $dat2,$dat2
  599. aesd $dat3,q13
  600. aesimc $dat3,$dat3
  601. aesd $dat4,q13
  602. aesimc $dat4,$dat4
  603. aesd $dat0,q14
  604. aesimc $dat0,$dat0
  605. aesd $dat1,q14
  606. aesimc $dat1,$dat1
  607. aesd $dat2,q14
  608. aesimc $dat2,$dat2
  609. aesd $dat3,q14
  610. aesimc $dat3,$dat3
  611. aesd $dat4,q14
  612. aesimc $dat4,$dat4
  613. veor $tmp0,$ivec,$rndlast
  614. aesd $dat0,q15
  615. veor $tmp1,$in0,$rndlast
  616. vld1.8 {$in0},[$inp],#16
  617. aesd $dat1,q15
  618. veor $tmp2,$in1,$rndlast
  619. vld1.8 {$in1},[$inp],#16
  620. aesd $dat2,q15
  621. veor $tmp3,$in2,$rndlast
  622. vld1.8 {$in2},[$inp],#16
  623. aesd $dat3,q15
  624. veor $tmp4,$in3,$rndlast
  625. vld1.8 {$in3},[$inp],#16
  626. aesd $dat4,q15
  627. vorr $ivec,$in4,$in4
  628. vld1.8 {$in4},[$inp],#16
  629. cbz x6,.Lcbc_tail4x
  630. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  631. veor $tmp0,$tmp0,$dat0
  632. vorr $dat0,$in0,$in0
  633. veor $tmp1,$tmp1,$dat1
  634. vorr $dat1,$in1,$in1
  635. veor $tmp2,$tmp2,$dat2
  636. vorr $dat2,$in2,$in2
  637. veor $tmp3,$tmp3,$dat3
  638. vorr $dat3,$in3,$in3
  639. veor $tmp4,$tmp4,$dat4
  640. vst1.8 {$tmp0},[$out],#16
  641. vorr $dat4,$in4,$in4
  642. vst1.8 {$tmp1},[$out],#16
  643. mov $cnt,$rounds
  644. vst1.8 {$tmp2},[$out],#16
  645. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  646. vst1.8 {$tmp3},[$out],#16
  647. vst1.8 {$tmp4},[$out],#16
  648. b.hs .Loop5x_cbc_dec
  649. add $len,$len,#0x50
  650. cbz $len,.Lcbc_done
  651. add $cnt,$rounds,#2
  652. subs $len,$len,#0x30
  653. vorr $dat0,$in2,$in2
  654. vorr $in0,$in2,$in2
  655. vorr $dat1,$in3,$in3
  656. vorr $in1,$in3,$in3
  657. vorr $dat2,$in4,$in4
  658. vorr $in2,$in4,$in4
  659. b.lo .Lcbc_dec_tail
  660. b .Loop3x_cbc_dec
  661. .align 4
  662. .Lcbc_tail4x:
  663. veor $tmp1,$tmp0,$dat1
  664. veor $tmp2,$tmp2,$dat2
  665. veor $tmp3,$tmp3,$dat3
  666. veor $tmp4,$tmp4,$dat4
  667. vst1.8 {$tmp1},[$out],#16
  668. vst1.8 {$tmp2},[$out],#16
  669. vst1.8 {$tmp3},[$out],#16
  670. vst1.8 {$tmp4},[$out],#16
  671. b .Lcbc_done
  672. .align 4
  673. ___
  674. $code.=<<___;
  675. .Loop3x_cbc_dec:
  676. aesd $dat0,q8
  677. aesimc $dat0,$dat0
  678. aesd $dat1,q8
  679. aesimc $dat1,$dat1
  680. aesd $dat2,q8
  681. aesimc $dat2,$dat2
  682. vld1.32 {q8},[$key_],#16
  683. subs $cnt,$cnt,#2
  684. aesd $dat0,q9
  685. aesimc $dat0,$dat0
  686. aesd $dat1,q9
  687. aesimc $dat1,$dat1
  688. aesd $dat2,q9
  689. aesimc $dat2,$dat2
  690. vld1.32 {q9},[$key_],#16
  691. b.gt .Loop3x_cbc_dec
  692. aesd $dat0,q8
  693. aesimc $dat0,$dat0
  694. aesd $dat1,q8
  695. aesimc $dat1,$dat1
  696. aesd $dat2,q8
  697. aesimc $dat2,$dat2
  698. veor $tmp0,$ivec,$rndlast
  699. subs $len,$len,#0x30
  700. veor $tmp1,$in0,$rndlast
  701. mov.lo x6,$len // x6, $cnt, is zero at this point
  702. aesd $dat0,q9
  703. aesimc $dat0,$dat0
  704. aesd $dat1,q9
  705. aesimc $dat1,$dat1
  706. aesd $dat2,q9
  707. aesimc $dat2,$dat2
  708. veor $tmp2,$in1,$rndlast
  709. add $inp,$inp,x6 // $inp is adjusted in such way that
  710. // at exit from the loop $dat1-$dat2
  711. // are loaded with last "words"
  712. vorr $ivec,$in2,$in2
  713. mov $key_,$key
  714. aesd $dat0,q12
  715. aesimc $dat0,$dat0
  716. aesd $dat1,q12
  717. aesimc $dat1,$dat1
  718. aesd $dat2,q12
  719. aesimc $dat2,$dat2
  720. vld1.8 {$in0},[$inp],#16
  721. aesd $dat0,q13
  722. aesimc $dat0,$dat0
  723. aesd $dat1,q13
  724. aesimc $dat1,$dat1
  725. aesd $dat2,q13
  726. aesimc $dat2,$dat2
  727. vld1.8 {$in1},[$inp],#16
  728. aesd $dat0,q14
  729. aesimc $dat0,$dat0
  730. aesd $dat1,q14
  731. aesimc $dat1,$dat1
  732. aesd $dat2,q14
  733. aesimc $dat2,$dat2
  734. vld1.8 {$in2},[$inp],#16
  735. aesd $dat0,q15
  736. aesd $dat1,q15
  737. aesd $dat2,q15
  738. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  739. add $cnt,$rounds,#2
  740. veor $tmp0,$tmp0,$dat0
  741. veor $tmp1,$tmp1,$dat1
  742. veor $dat2,$dat2,$tmp2
  743. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  744. vst1.8 {$tmp0},[$out],#16
  745. vorr $dat0,$in0,$in0
  746. vst1.8 {$tmp1},[$out],#16
  747. vorr $dat1,$in1,$in1
  748. vst1.8 {$dat2},[$out],#16
  749. vorr $dat2,$in2,$in2
  750. b.hs .Loop3x_cbc_dec
  751. cmn $len,#0x30
  752. b.eq .Lcbc_done
  753. nop
  754. .Lcbc_dec_tail:
  755. aesd $dat1,q8
  756. aesimc $dat1,$dat1
  757. aesd $dat2,q8
  758. aesimc $dat2,$dat2
  759. vld1.32 {q8},[$key_],#16
  760. subs $cnt,$cnt,#2
  761. aesd $dat1,q9
  762. aesimc $dat1,$dat1
  763. aesd $dat2,q9
  764. aesimc $dat2,$dat2
  765. vld1.32 {q9},[$key_],#16
  766. b.gt .Lcbc_dec_tail
  767. aesd $dat1,q8
  768. aesimc $dat1,$dat1
  769. aesd $dat2,q8
  770. aesimc $dat2,$dat2
  771. aesd $dat1,q9
  772. aesimc $dat1,$dat1
  773. aesd $dat2,q9
  774. aesimc $dat2,$dat2
  775. aesd $dat1,q12
  776. aesimc $dat1,$dat1
  777. aesd $dat2,q12
  778. aesimc $dat2,$dat2
  779. cmn $len,#0x20
  780. aesd $dat1,q13
  781. aesimc $dat1,$dat1
  782. aesd $dat2,q13
  783. aesimc $dat2,$dat2
  784. veor $tmp1,$ivec,$rndlast
  785. aesd $dat1,q14
  786. aesimc $dat1,$dat1
  787. aesd $dat2,q14
  788. aesimc $dat2,$dat2
  789. veor $tmp2,$in1,$rndlast
  790. aesd $dat1,q15
  791. aesd $dat2,q15
  792. b.eq .Lcbc_dec_one
  793. veor $tmp1,$tmp1,$dat1
  794. veor $tmp2,$tmp2,$dat2
  795. vorr $ivec,$in2,$in2
  796. vst1.8 {$tmp1},[$out],#16
  797. vst1.8 {$tmp2},[$out],#16
  798. b .Lcbc_done
  799. .Lcbc_dec_one:
  800. veor $tmp1,$tmp1,$dat2
  801. vorr $ivec,$in2,$in2
  802. vst1.8 {$tmp1},[$out],#16
  803. .Lcbc_done:
  804. vst1.8 {$ivec},[$ivp]
  805. .Lcbc_abort:
  806. ___
  807. }
  808. $code.=<<___ if ($flavour !~ /64/);
  809. vldmia sp!,{d8-d15}
  810. ldmia sp!,{r4-r8,pc}
  811. ___
  812. $code.=<<___ if ($flavour =~ /64/);
  813. ldr x29,[sp],#16
  814. ret
  815. ___
  816. $code.=<<___;
  817. .size ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
  818. ___
  819. }}}
  820. {{{
  821. my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
  822. my ($rounds,$cnt,$key_)=("w5","w6","x7");
  823. my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
  824. my $step="x12"; # aliases with $tctr2
  825. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
  826. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  827. # used only in 64-bit mode...
  828. my ($dat3,$dat4,$in3,$in4)=map("q$_",(16..23));
  829. my ($dat,$tmp)=($dat0,$tmp0);
  830. ### q8-q15 preloaded key schedule
  831. $code.=<<___;
  832. .globl ${prefix}_ctr32_encrypt_blocks
  833. .type ${prefix}_ctr32_encrypt_blocks,%function
  834. .align 5
  835. ${prefix}_ctr32_encrypt_blocks:
  836. ___
  837. $code.=<<___ if ($flavour =~ /64/);
  838. stp x29,x30,[sp,#-16]!
  839. add x29,sp,#0
  840. ___
  841. $code.=<<___ if ($flavour !~ /64/);
  842. mov ip,sp
  843. stmdb sp!,{r4-r10,lr}
  844. vstmdb sp!,{d8-d15} @ ABI specification says so
  845. ldr r4, [ip] @ load remaining arg
  846. ___
  847. $code.=<<___;
  848. ldr $rounds,[$key,#240]
  849. ldr $ctr, [$ivp, #12]
  850. vld1.32 {$dat0},[$ivp]
  851. vld1.32 {q8-q9},[$key] // load key schedule...
  852. sub $rounds,$rounds,#4
  853. mov $step,#16
  854. cmp $len,#2
  855. add $key_,$key,x5,lsl#4 // pointer to last 5 round keys
  856. sub $rounds,$rounds,#2
  857. vld1.32 {q12-q13},[$key_],#32
  858. vld1.32 {q14-q15},[$key_],#32
  859. vld1.32 {$rndlast},[$key_]
  860. add $key_,$key,#32
  861. mov $cnt,$rounds
  862. cclr $step,lo
  863. #ifndef __ARMEB__
  864. rev $ctr, $ctr
  865. #endif
  866. vorr $dat1,$dat0,$dat0
  867. add $tctr1, $ctr, #1
  868. vorr $dat2,$dat0,$dat0
  869. add $ctr, $ctr, #2
  870. vorr $ivec,$dat0,$dat0
  871. rev $tctr1, $tctr1
  872. vmov.32 ${dat1}[3],$tctr1
  873. b.ls .Lctr32_tail
  874. rev $tctr2, $ctr
  875. sub $len,$len,#3 // bias
  876. vmov.32 ${dat2}[3],$tctr2
  877. ___
  878. $code.=<<___ if ($flavour =~ /64/);
  879. cmp $len,#2
  880. b.lo .Loop3x_ctr32
  881. add w13,$ctr,#1
  882. add w14,$ctr,#2
  883. vorr $dat3,$dat0,$dat0
  884. rev w13,w13
  885. vorr $dat4,$dat0,$dat0
  886. rev w14,w14
  887. vmov.32 ${dat3}[3],w13
  888. sub $len,$len,#2 // bias
  889. vmov.32 ${dat4}[3],w14
  890. add $ctr,$ctr,#2
  891. b .Loop5x_ctr32
  892. .align 4
  893. .Loop5x_ctr32:
  894. aese $dat0,q8
  895. aesmc $dat0,$dat0
  896. aese $dat1,q8
  897. aesmc $dat1,$dat1
  898. aese $dat2,q8
  899. aesmc $dat2,$dat2
  900. aese $dat3,q8
  901. aesmc $dat3,$dat3
  902. aese $dat4,q8
  903. aesmc $dat4,$dat4
  904. vld1.32 {q8},[$key_],#16
  905. subs $cnt,$cnt,#2
  906. aese $dat0,q9
  907. aesmc $dat0,$dat0
  908. aese $dat1,q9
  909. aesmc $dat1,$dat1
  910. aese $dat2,q9
  911. aesmc $dat2,$dat2
  912. aese $dat3,q9
  913. aesmc $dat3,$dat3
  914. aese $dat4,q9
  915. aesmc $dat4,$dat4
  916. vld1.32 {q9},[$key_],#16
  917. b.gt .Loop5x_ctr32
  918. mov $key_,$key
  919. aese $dat0,q8
  920. aesmc $dat0,$dat0
  921. aese $dat1,q8
  922. aesmc $dat1,$dat1
  923. aese $dat2,q8
  924. aesmc $dat2,$dat2
  925. aese $dat3,q8
  926. aesmc $dat3,$dat3
  927. aese $dat4,q8
  928. aesmc $dat4,$dat4
  929. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  930. aese $dat0,q9
  931. aesmc $dat0,$dat0
  932. aese $dat1,q9
  933. aesmc $dat1,$dat1
  934. aese $dat2,q9
  935. aesmc $dat2,$dat2
  936. aese $dat3,q9
  937. aesmc $dat3,$dat3
  938. aese $dat4,q9
  939. aesmc $dat4,$dat4
  940. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  941. aese $dat0,q12
  942. aesmc $dat0,$dat0
  943. add $tctr0,$ctr,#1
  944. add $tctr1,$ctr,#2
  945. aese $dat1,q12
  946. aesmc $dat1,$dat1
  947. add $tctr2,$ctr,#3
  948. add w13,$ctr,#4
  949. aese $dat2,q12
  950. aesmc $dat2,$dat2
  951. add w14,$ctr,#5
  952. rev $tctr0,$tctr0
  953. aese $dat3,q12
  954. aesmc $dat3,$dat3
  955. rev $tctr1,$tctr1
  956. rev $tctr2,$tctr2
  957. aese $dat4,q12
  958. aesmc $dat4,$dat4
  959. rev w13,w13
  960. rev w14,w14
  961. aese $dat0,q13
  962. aesmc $dat0,$dat0
  963. aese $dat1,q13
  964. aesmc $dat1,$dat1
  965. aese $dat2,q13
  966. aesmc $dat2,$dat2
  967. aese $dat3,q13
  968. aesmc $dat3,$dat3
  969. aese $dat4,q13
  970. aesmc $dat4,$dat4
  971. aese $dat0,q14
  972. aesmc $dat0,$dat0
  973. vld1.8 {$in0},[$inp],#16
  974. aese $dat1,q14
  975. aesmc $dat1,$dat1
  976. vld1.8 {$in1},[$inp],#16
  977. aese $dat2,q14
  978. aesmc $dat2,$dat2
  979. vld1.8 {$in2},[$inp],#16
  980. aese $dat3,q14
  981. aesmc $dat3,$dat3
  982. vld1.8 {$in3},[$inp],#16
  983. aese $dat4,q14
  984. aesmc $dat4,$dat4
  985. vld1.8 {$in4},[$inp],#16
  986. aese $dat0,q15
  987. veor $in0,$in0,$rndlast
  988. aese $dat1,q15
  989. veor $in1,$in1,$rndlast
  990. aese $dat2,q15
  991. veor $in2,$in2,$rndlast
  992. aese $dat3,q15
  993. veor $in3,$in3,$rndlast
  994. aese $dat4,q15
  995. veor $in4,$in4,$rndlast
  996. veor $in0,$in0,$dat0
  997. vorr $dat0,$ivec,$ivec
  998. veor $in1,$in1,$dat1
  999. vorr $dat1,$ivec,$ivec
  1000. veor $in2,$in2,$dat2
  1001. vorr $dat2,$ivec,$ivec
  1002. veor $in3,$in3,$dat3
  1003. vorr $dat3,$ivec,$ivec
  1004. veor $in4,$in4,$dat4
  1005. vorr $dat4,$ivec,$ivec
  1006. vst1.8 {$in0},[$out],#16
  1007. vmov.32 ${dat0}[3],$tctr0
  1008. vst1.8 {$in1},[$out],#16
  1009. vmov.32 ${dat1}[3],$tctr1
  1010. vst1.8 {$in2},[$out],#16
  1011. vmov.32 ${dat2}[3],$tctr2
  1012. vst1.8 {$in3},[$out],#16
  1013. vmov.32 ${dat3}[3],w13
  1014. vst1.8 {$in4},[$out],#16
  1015. vmov.32 ${dat4}[3],w14
  1016. mov $cnt,$rounds
  1017. cbz $len,.Lctr32_done
  1018. add $ctr,$ctr,#5
  1019. subs $len,$len,#5
  1020. b.hs .Loop5x_ctr32
  1021. add $len,$len,#5
  1022. sub $ctr,$ctr,#5
  1023. cmp $len,#2
  1024. mov $step,#16
  1025. cclr $step,lo
  1026. b.ls .Lctr32_tail
  1027. sub $len,$len,#3 // bias
  1028. add $ctr,$ctr,#3
  1029. ___
  1030. $code.=<<___;
  1031. b .Loop3x_ctr32
  1032. .align 4
  1033. .Loop3x_ctr32:
  1034. aese $dat0,q8
  1035. aesmc $dat0,$dat0
  1036. aese $dat1,q8
  1037. aesmc $dat1,$dat1
  1038. aese $dat2,q8
  1039. aesmc $dat2,$dat2
  1040. vld1.32 {q8},[$key_],#16
  1041. subs $cnt,$cnt,#2
  1042. aese $dat0,q9
  1043. aesmc $dat0,$dat0
  1044. aese $dat1,q9
  1045. aesmc $dat1,$dat1
  1046. aese $dat2,q9
  1047. aesmc $dat2,$dat2
  1048. vld1.32 {q9},[$key_],#16
  1049. b.gt .Loop3x_ctr32
  1050. aese $dat0,q8
  1051. aesmc $tmp0,$dat0
  1052. aese $dat1,q8
  1053. aesmc $tmp1,$dat1
  1054. vld1.8 {$in0},[$inp],#16
  1055. vorr $dat0,$ivec,$ivec
  1056. aese $dat2,q8
  1057. aesmc $dat2,$dat2
  1058. vld1.8 {$in1},[$inp],#16
  1059. vorr $dat1,$ivec,$ivec
  1060. aese $tmp0,q9
  1061. aesmc $tmp0,$tmp0
  1062. aese $tmp1,q9
  1063. aesmc $tmp1,$tmp1
  1064. vld1.8 {$in2},[$inp],#16
  1065. mov $key_,$key
  1066. aese $dat2,q9
  1067. aesmc $tmp2,$dat2
  1068. vorr $dat2,$ivec,$ivec
  1069. add $tctr0,$ctr,#1
  1070. aese $tmp0,q12
  1071. aesmc $tmp0,$tmp0
  1072. aese $tmp1,q12
  1073. aesmc $tmp1,$tmp1
  1074. veor $in0,$in0,$rndlast
  1075. add $tctr1,$ctr,#2
  1076. aese $tmp2,q12
  1077. aesmc $tmp2,$tmp2
  1078. veor $in1,$in1,$rndlast
  1079. add $ctr,$ctr,#3
  1080. aese $tmp0,q13
  1081. aesmc $tmp0,$tmp0
  1082. aese $tmp1,q13
  1083. aesmc $tmp1,$tmp1
  1084. veor $in2,$in2,$rndlast
  1085. rev $tctr0,$tctr0
  1086. aese $tmp2,q13
  1087. aesmc $tmp2,$tmp2
  1088. vmov.32 ${dat0}[3], $tctr0
  1089. rev $tctr1,$tctr1
  1090. aese $tmp0,q14
  1091. aesmc $tmp0,$tmp0
  1092. aese $tmp1,q14
  1093. aesmc $tmp1,$tmp1
  1094. vmov.32 ${dat1}[3], $tctr1
  1095. rev $tctr2,$ctr
  1096. aese $tmp2,q14
  1097. aesmc $tmp2,$tmp2
  1098. vmov.32 ${dat2}[3], $tctr2
  1099. subs $len,$len,#3
  1100. aese $tmp0,q15
  1101. aese $tmp1,q15
  1102. aese $tmp2,q15
  1103. veor $in0,$in0,$tmp0
  1104. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1105. vst1.8 {$in0},[$out],#16
  1106. veor $in1,$in1,$tmp1
  1107. mov $cnt,$rounds
  1108. vst1.8 {$in1},[$out],#16
  1109. veor $in2,$in2,$tmp2
  1110. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1111. vst1.8 {$in2},[$out],#16
  1112. b.hs .Loop3x_ctr32
  1113. adds $len,$len,#3
  1114. b.eq .Lctr32_done
  1115. cmp $len,#1
  1116. mov $step,#16
  1117. cclr $step,eq
  1118. .Lctr32_tail:
  1119. aese $dat0,q8
  1120. aesmc $dat0,$dat0
  1121. aese $dat1,q8
  1122. aesmc $dat1,$dat1
  1123. vld1.32 {q8},[$key_],#16
  1124. subs $cnt,$cnt,#2
  1125. aese $dat0,q9
  1126. aesmc $dat0,$dat0
  1127. aese $dat1,q9
  1128. aesmc $dat1,$dat1
  1129. vld1.32 {q9},[$key_],#16
  1130. b.gt .Lctr32_tail
  1131. aese $dat0,q8
  1132. aesmc $dat0,$dat0
  1133. aese $dat1,q8
  1134. aesmc $dat1,$dat1
  1135. aese $dat0,q9
  1136. aesmc $dat0,$dat0
  1137. aese $dat1,q9
  1138. aesmc $dat1,$dat1
  1139. vld1.8 {$in0},[$inp],$step
  1140. aese $dat0,q12
  1141. aesmc $dat0,$dat0
  1142. aese $dat1,q12
  1143. aesmc $dat1,$dat1
  1144. vld1.8 {$in1},[$inp]
  1145. aese $dat0,q13
  1146. aesmc $dat0,$dat0
  1147. aese $dat1,q13
  1148. aesmc $dat1,$dat1
  1149. veor $in0,$in0,$rndlast
  1150. aese $dat0,q14
  1151. aesmc $dat0,$dat0
  1152. aese $dat1,q14
  1153. aesmc $dat1,$dat1
  1154. veor $in1,$in1,$rndlast
  1155. aese $dat0,q15
  1156. aese $dat1,q15
  1157. cmp $len,#1
  1158. veor $in0,$in0,$dat0
  1159. veor $in1,$in1,$dat1
  1160. vst1.8 {$in0},[$out],#16
  1161. b.eq .Lctr32_done
  1162. vst1.8 {$in1},[$out]
  1163. .Lctr32_done:
  1164. ___
  1165. $code.=<<___ if ($flavour !~ /64/);
  1166. vldmia sp!,{d8-d15}
  1167. ldmia sp!,{r4-r10,pc}
  1168. ___
  1169. $code.=<<___ if ($flavour =~ /64/);
  1170. ldr x29,[sp],#16
  1171. ret
  1172. ___
  1173. $code.=<<___;
  1174. .size ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
  1175. ___
  1176. }}}
  1177. $code.=<<___;
  1178. #endif
  1179. ___
  1180. ########################################
  1181. if ($flavour =~ /64/) { ######## 64-bit code
  1182. my %opcode = (
  1183. "aesd" => 0x4e285800, "aese" => 0x4e284800,
  1184. "aesimc"=> 0x4e287800, "aesmc" => 0x4e286800 );
  1185. local *unaes = sub {
  1186. my ($mnemonic,$arg)=@_;
  1187. $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o &&
  1188. sprintf ".inst\t0x%08x\t//%s %s",
  1189. $opcode{$mnemonic}|$1|($2<<5),
  1190. $mnemonic,$arg;
  1191. };
  1192. foreach(split("\n",$code)) {
  1193. s/\`([^\`]*)\`/eval($1)/geo;
  1194. s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
  1195. s/@\s/\/\//o; # old->new style commentary
  1196. #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
  1197. s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
  1198. s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel $2,$3,$2,$1/o or
  1199. s/vmov\.i8/movi/o or # fix up legacy mnemonics
  1200. s/vext\.8/ext/o or
  1201. s/vrev32\.8/rev32/o or
  1202. s/vtst\.8/cmtst/o or
  1203. s/vshr/ushr/o or
  1204. s/^(\s+)v/$1/o or # strip off v prefix
  1205. s/\bbx\s+lr\b/ret/o;
  1206. # fix up remaining legacy suffixes
  1207. s/\.[ui]?8//o;
  1208. m/\],#8/o and s/\.16b/\.8b/go;
  1209. s/\.[ui]?32//o and s/\.16b/\.4s/go;
  1210. s/\.[ui]?64//o and s/\.16b/\.2d/go;
  1211. s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
  1212. print $_,"\n";
  1213. }
  1214. } else { ######## 32-bit code
  1215. my %opcode = (
  1216. "aesd" => 0xf3b00340, "aese" => 0xf3b00300,
  1217. "aesimc"=> 0xf3b003c0, "aesmc" => 0xf3b00380 );
  1218. local *unaes = sub {
  1219. my ($mnemonic,$arg)=@_;
  1220. if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
  1221. my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
  1222. |(($2&7)<<1) |(($2&8)<<2);
  1223. # since ARMv7 instructions are always encoded little-endian.
  1224. # correct solution is to use .inst directive, but older
  1225. # assemblers don't implement it:-(
  1226. sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
  1227. $word&0xff,($word>>8)&0xff,
  1228. ($word>>16)&0xff,($word>>24)&0xff,
  1229. $mnemonic,$arg;
  1230. }
  1231. };
  1232. sub unvtbl {
  1233. my $arg=shift;
  1234. $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
  1235. sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
  1236. "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
  1237. }
  1238. sub unvdup32 {
  1239. my $arg=shift;
  1240. $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
  1241. sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
  1242. }
  1243. sub unvmov32 {
  1244. my $arg=shift;
  1245. $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
  1246. sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
  1247. }
  1248. foreach(split("\n",$code)) {
  1249. s/\`([^\`]*)\`/eval($1)/geo;
  1250. s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
  1251. s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
  1252. s/\/\/\s?/@ /o; # new->old style commentary
  1253. # fix up remaining new-style suffixes
  1254. s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
  1255. s/\],#[0-9]+/]!/o;
  1256. s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
  1257. s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2 $1,#0/o or
  1258. s/vtbl\.8\s+(.*)/unvtbl($1)/geo or
  1259. s/vdup\.32\s+(.*)/unvdup32($1)/geo or
  1260. s/vmov\.32\s+(.*)/unvmov32($1)/geo or
  1261. s/^(\s+)b\./$1b/o or
  1262. s/^(\s+)ret/$1bx\tlr/o;
  1263. if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
  1264. print " it $2\n";
  1265. }
  1266. print $_,"\n";
  1267. }
  1268. }
  1269. close STDOUT;