2
0

aesv8-armx.pl 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248
  1. #! /usr/bin/env perl
  2. # Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # This module implements support for ARMv8 AES instructions. The
  17. # module is endian-agnostic in sense that it supports both big- and
  18. # little-endian cases. As does it support both 32- and 64-bit modes
  19. # of operation. Latter is achieved by limiting amount of utilized
  20. # registers to 16, which implies additional NEON load and integer
  21. # instructions. This has no effect on mighty Apple A7, where results
  22. # are literally equal to the theoretical estimates based on AES
  23. # instruction latencies and issue rates. On Cortex-A53, an in-order
  24. # execution core, this costs up to 10-15%, which is partially
  25. # compensated by implementing dedicated code path for 128-bit
  26. # CBC encrypt case. On Cortex-A57 parallelizable mode performance
  27. # seems to be limited by sheer amount of NEON instructions...
  28. #
  29. # April 2019
  30. #
  31. # Key to performance of parallelize-able modes is round instruction
  32. # interleaving. But which factor to use? There is optimal one for
  33. # each combination of instruction latency and issue rate, beyond
  34. # which increasing interleave factor doesn't pay off. While on cons
  35. # side we have code size increase and resource waste on platforms for
  36. # which interleave factor is too high. In other words you want it to
  37. # be just right. So far interleave factor of 3x was serving well all
  38. # platforms. But for ThunderX2 optimal interleave factor was measured
  39. # to be 5x...
  40. #
  41. # Performance in cycles per byte processed with 128-bit key:
  42. #
  43. # CBC enc CBC dec CTR
  44. # Apple A7 2.39 1.20 1.20
  45. # Cortex-A53 1.32 1.17/1.29(**) 1.36/1.46
  46. # Cortex-A57(*) 1.95 0.82/0.85 0.89/0.93
  47. # Cortex-A72 1.33 0.85/0.88 0.92/0.96
  48. # Denver 1.96 0.65/0.86 0.76/0.80
  49. # Mongoose 1.33 1.23/1.20 1.30/1.20
  50. # Kryo 1.26 0.87/0.94 1.00/1.00
  51. # ThunderX2 5.95 1.25 1.30
  52. #
  53. # (*) original 3.64/1.34/1.32 results were for r0p0 revision
  54. # and are still same even for updated module;
  55. # (**) numbers after slash are for 32-bit code, which is 3x-
  56. # interleaved;
  57. # $output is the last argument if it looks like a file (it has an extension)
  58. # $flavour is the first argument if it doesn't look like a file
  59. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  60. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  61. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  62. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  63. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  64. die "can't locate arm-xlate.pl";
  65. open OUT,"| \"$^X\" $xlate $flavour \"$output\""
  66. or die "can't call $xlate: $!";
  67. *STDOUT=*OUT;
  68. $prefix="aes_v8";
  69. $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
  70. $code=<<___;
  71. #include "arm_arch.h"
  72. #if __ARM_MAX_ARCH__>=7
  73. ___
  74. $code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
  75. $code.=<<___ if ($flavour !~ /64/);
  76. .arch armv7-a // don't confuse not-so-latest binutils with argv8 :-)
  77. .fpu neon
  78. #ifdef __thumb2__
  79. .syntax unified
  80. .thumb
  81. # define INST(a,b,c,d) $_byte c,d|0xc,a,b
  82. #else
  83. .code 32
  84. # define INST(a,b,c,d) $_byte a,b,c,d
  85. #endif
  86. .text
  87. ___
  88. # Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
  89. # NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
  90. # maintain both 32- and 64-bit codes within single module and
  91. # transliterate common code to either flavour with regex vodoo.
  92. #
  93. {{{
  94. my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
  95. my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
  96. $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
  97. $code.=<<___;
  98. .align 5
  99. .Lrcon:
  100. .long 0x01,0x01,0x01,0x01
  101. .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
  102. .long 0x1b,0x1b,0x1b,0x1b
  103. .globl ${prefix}_set_encrypt_key
  104. .type ${prefix}_set_encrypt_key,%function
  105. .align 5
  106. ${prefix}_set_encrypt_key:
  107. .Lenc_key:
  108. ___
  109. $code.=<<___ if ($flavour =~ /64/);
  110. stp x29,x30,[sp,#-16]!
  111. add x29,sp,#0
  112. ___
  113. $code.=<<___;
  114. mov $ptr,#-1
  115. cmp $inp,#0
  116. b.eq .Lenc_key_abort
  117. cmp $out,#0
  118. b.eq .Lenc_key_abort
  119. mov $ptr,#-2
  120. cmp $bits,#128
  121. b.lt .Lenc_key_abort
  122. cmp $bits,#256
  123. b.gt .Lenc_key_abort
  124. tst $bits,#0x3f
  125. b.ne .Lenc_key_abort
  126. adr $ptr,.Lrcon
  127. cmp $bits,#192
  128. veor $zero,$zero,$zero
  129. vld1.8 {$in0},[$inp],#16
  130. mov $bits,#8 // reuse $bits
  131. vld1.32 {$rcon,$mask},[$ptr],#32
  132. b.lt .Loop128
  133. b.eq .L192
  134. b .L256
  135. .align 4
  136. .Loop128:
  137. vtbl.8 $key,{$in0},$mask
  138. vext.8 $tmp,$zero,$in0,#12
  139. vst1.32 {$in0},[$out],#16
  140. aese $key,$zero
  141. subs $bits,$bits,#1
  142. veor $in0,$in0,$tmp
  143. vext.8 $tmp,$zero,$tmp,#12
  144. veor $in0,$in0,$tmp
  145. vext.8 $tmp,$zero,$tmp,#12
  146. veor $key,$key,$rcon
  147. veor $in0,$in0,$tmp
  148. vshl.u8 $rcon,$rcon,#1
  149. veor $in0,$in0,$key
  150. b.ne .Loop128
  151. vld1.32 {$rcon},[$ptr]
  152. vtbl.8 $key,{$in0},$mask
  153. vext.8 $tmp,$zero,$in0,#12
  154. vst1.32 {$in0},[$out],#16
  155. aese $key,$zero
  156. veor $in0,$in0,$tmp
  157. vext.8 $tmp,$zero,$tmp,#12
  158. veor $in0,$in0,$tmp
  159. vext.8 $tmp,$zero,$tmp,#12
  160. veor $key,$key,$rcon
  161. veor $in0,$in0,$tmp
  162. vshl.u8 $rcon,$rcon,#1
  163. veor $in0,$in0,$key
  164. vtbl.8 $key,{$in0},$mask
  165. vext.8 $tmp,$zero,$in0,#12
  166. vst1.32 {$in0},[$out],#16
  167. aese $key,$zero
  168. veor $in0,$in0,$tmp
  169. vext.8 $tmp,$zero,$tmp,#12
  170. veor $in0,$in0,$tmp
  171. vext.8 $tmp,$zero,$tmp,#12
  172. veor $key,$key,$rcon
  173. veor $in0,$in0,$tmp
  174. veor $in0,$in0,$key
  175. vst1.32 {$in0},[$out]
  176. add $out,$out,#0x50
  177. mov $rounds,#10
  178. b .Ldone
  179. .align 4
  180. .L192:
  181. vld1.8 {$in1},[$inp],#8
  182. vmov.i8 $key,#8 // borrow $key
  183. vst1.32 {$in0},[$out],#16
  184. vsub.i8 $mask,$mask,$key // adjust the mask
  185. .Loop192:
  186. vtbl.8 $key,{$in1},$mask
  187. vext.8 $tmp,$zero,$in0,#12
  188. #ifdef __ARMEB__
  189. vst1.32 {$in1},[$out],#16
  190. sub $out,$out,#8
  191. #else
  192. vst1.32 {$in1},[$out],#8
  193. #endif
  194. aese $key,$zero
  195. subs $bits,$bits,#1
  196. veor $in0,$in0,$tmp
  197. vext.8 $tmp,$zero,$tmp,#12
  198. veor $in0,$in0,$tmp
  199. vext.8 $tmp,$zero,$tmp,#12
  200. veor $in0,$in0,$tmp
  201. vdup.32 $tmp,${in0}[3]
  202. veor $tmp,$tmp,$in1
  203. veor $key,$key,$rcon
  204. vext.8 $in1,$zero,$in1,#12
  205. vshl.u8 $rcon,$rcon,#1
  206. veor $in1,$in1,$tmp
  207. veor $in0,$in0,$key
  208. veor $in1,$in1,$key
  209. vst1.32 {$in0},[$out],#16
  210. b.ne .Loop192
  211. mov $rounds,#12
  212. add $out,$out,#0x20
  213. b .Ldone
  214. .align 4
  215. .L256:
  216. vld1.8 {$in1},[$inp]
  217. mov $bits,#7
  218. mov $rounds,#14
  219. vst1.32 {$in0},[$out],#16
  220. .Loop256:
  221. vtbl.8 $key,{$in1},$mask
  222. vext.8 $tmp,$zero,$in0,#12
  223. vst1.32 {$in1},[$out],#16
  224. aese $key,$zero
  225. subs $bits,$bits,#1
  226. veor $in0,$in0,$tmp
  227. vext.8 $tmp,$zero,$tmp,#12
  228. veor $in0,$in0,$tmp
  229. vext.8 $tmp,$zero,$tmp,#12
  230. veor $key,$key,$rcon
  231. veor $in0,$in0,$tmp
  232. vshl.u8 $rcon,$rcon,#1
  233. veor $in0,$in0,$key
  234. vst1.32 {$in0},[$out],#16
  235. b.eq .Ldone
  236. vdup.32 $key,${in0}[3] // just splat
  237. vext.8 $tmp,$zero,$in1,#12
  238. aese $key,$zero
  239. veor $in1,$in1,$tmp
  240. vext.8 $tmp,$zero,$tmp,#12
  241. veor $in1,$in1,$tmp
  242. vext.8 $tmp,$zero,$tmp,#12
  243. veor $in1,$in1,$tmp
  244. veor $in1,$in1,$key
  245. b .Loop256
  246. .Ldone:
  247. str $rounds,[$out]
  248. mov $ptr,#0
  249. .Lenc_key_abort:
  250. mov x0,$ptr // return value
  251. `"ldr x29,[sp],#16" if ($flavour =~ /64/)`
  252. ret
  253. .size ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
  254. .globl ${prefix}_set_decrypt_key
  255. .type ${prefix}_set_decrypt_key,%function
  256. .align 5
  257. ${prefix}_set_decrypt_key:
  258. ___
  259. $code.=<<___ if ($flavour =~ /64/);
  260. .inst 0xd503233f // paciasp
  261. stp x29,x30,[sp,#-16]!
  262. add x29,sp,#0
  263. ___
  264. $code.=<<___ if ($flavour !~ /64/);
  265. stmdb sp!,{r4,lr}
  266. ___
  267. $code.=<<___;
  268. bl .Lenc_key
  269. cmp x0,#0
  270. b.ne .Ldec_key_abort
  271. sub $out,$out,#240 // restore original $out
  272. mov x4,#-16
  273. add $inp,$out,x12,lsl#4 // end of key schedule
  274. vld1.32 {v0.16b},[$out]
  275. vld1.32 {v1.16b},[$inp]
  276. vst1.32 {v0.16b},[$inp],x4
  277. vst1.32 {v1.16b},[$out],#16
  278. .Loop_imc:
  279. vld1.32 {v0.16b},[$out]
  280. vld1.32 {v1.16b},[$inp]
  281. aesimc v0.16b,v0.16b
  282. aesimc v1.16b,v1.16b
  283. vst1.32 {v0.16b},[$inp],x4
  284. vst1.32 {v1.16b},[$out],#16
  285. cmp $inp,$out
  286. b.hi .Loop_imc
  287. vld1.32 {v0.16b},[$out]
  288. aesimc v0.16b,v0.16b
  289. vst1.32 {v0.16b},[$inp]
  290. eor x0,x0,x0 // return value
  291. .Ldec_key_abort:
  292. ___
  293. $code.=<<___ if ($flavour !~ /64/);
  294. ldmia sp!,{r4,pc}
  295. ___
  296. $code.=<<___ if ($flavour =~ /64/);
  297. ldp x29,x30,[sp],#16
  298. .inst 0xd50323bf // autiasp
  299. ret
  300. ___
  301. $code.=<<___;
  302. .size ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
  303. ___
  304. }}}
  305. {{{
  306. sub gen_block () {
  307. my $dir = shift;
  308. my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
  309. my ($inp,$out,$key)=map("x$_",(0..2));
  310. my $rounds="w3";
  311. my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
  312. $code.=<<___;
  313. .globl ${prefix}_${dir}crypt
  314. .type ${prefix}_${dir}crypt,%function
  315. .align 5
  316. ${prefix}_${dir}crypt:
  317. ldr $rounds,[$key,#240]
  318. vld1.32 {$rndkey0},[$key],#16
  319. vld1.8 {$inout},[$inp]
  320. sub $rounds,$rounds,#2
  321. vld1.32 {$rndkey1},[$key],#16
  322. .Loop_${dir}c:
  323. aes$e $inout,$rndkey0
  324. aes$mc $inout,$inout
  325. vld1.32 {$rndkey0},[$key],#16
  326. subs $rounds,$rounds,#2
  327. aes$e $inout,$rndkey1
  328. aes$mc $inout,$inout
  329. vld1.32 {$rndkey1},[$key],#16
  330. b.gt .Loop_${dir}c
  331. aes$e $inout,$rndkey0
  332. aes$mc $inout,$inout
  333. vld1.32 {$rndkey0},[$key]
  334. aes$e $inout,$rndkey1
  335. veor $inout,$inout,$rndkey0
  336. vst1.8 {$inout},[$out]
  337. ret
  338. .size ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
  339. ___
  340. }
  341. &gen_block("en");
  342. &gen_block("de");
  343. }}}
  344. # Performance in cycles per byte.
  345. # Processed with AES-ECB different key size.
  346. # It shows the value before and after optimization as below:
  347. # (before/after):
  348. #
  349. # AES-128-ECB AES-192-ECB AES-256-ECB
  350. # Cortex-A57 1.85/0.82 2.16/0.96 2.47/1.10
  351. # Cortex-A72 1.64/0.85 1.82/0.99 2.13/1.14
  352. # Optimization is implemented by loop unrolling and interleaving.
  353. # Commonly, we choose the unrolling factor as 5, if the input
  354. # data size smaller than 5 blocks, but not smaller than 3 blocks,
  355. # choose 3 as the unrolling factor.
  356. # If the input data size dsize >= 5*16 bytes, then take 5 blocks
  357. # as one iteration, every loop the left size lsize -= 5*16.
  358. # If 5*16 > lsize >= 3*16 bytes, take 3 blocks as one iteration,
  359. # every loop lsize -=3*16.
  360. # If lsize < 3*16 bytes, treat them as the tail, interleave the
  361. # two blocks AES instructions.
  362. # There is one special case, if the original input data size dsize
  363. # = 16 bytes, we will treat it seperately to improve the
  364. # performance: one independent code block without LR, FP load and
  365. # store, just looks like what the original ECB implementation does.
  366. {{{
  367. my ($inp,$out,$len,$key)=map("x$_",(0..3));
  368. my ($enc,$rounds,$cnt,$key_,$step)=("w4","w5","w6","x7","x8");
  369. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$tmp2,$rndlast)=map("q$_",(0..7));
  370. my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
  371. ### q7 last round key
  372. ### q10-q15 q7 Last 7 round keys
  373. ### q8-q9 preloaded round keys except last 7 keys for big size
  374. ### q5, q6, q8-q9 preloaded round keys except last 7 keys for only 16 byte
  375. {
  376. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  377. my ($dat3,$in3,$tmp3); # used only in 64-bit mode
  378. my ($dat4,$in4,$tmp4);
  379. if ($flavour =~ /64/) {
  380. ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
  381. }
  382. $code.=<<___;
  383. .globl ${prefix}_ecb_encrypt
  384. .type ${prefix}_ecb_encrypt,%function
  385. .align 5
  386. ${prefix}_ecb_encrypt:
  387. ___
  388. $code.=<<___ if ($flavour =~ /64/);
  389. subs $len,$len,#16
  390. // Original input data size bigger than 16, jump to big size processing.
  391. b.ne .Lecb_big_size
  392. vld1.8 {$dat0},[$inp]
  393. cmp $enc,#0 // en- or decrypting?
  394. ldr $rounds,[$key,#240]
  395. vld1.32 {q5-q6},[$key],#32 // load key schedule...
  396. b.eq .Lecb_small_dec
  397. aese $dat0,q5
  398. aesmc $dat0,$dat0
  399. vld1.32 {q8-q9},[$key],#32 // load key schedule...
  400. aese $dat0,q6
  401. aesmc $dat0,$dat0
  402. subs $rounds,$rounds,#10 // if rounds==10, jump to aes-128-ecb processing
  403. b.eq .Lecb_128_enc
  404. .Lecb_round_loop:
  405. aese $dat0,q8
  406. aesmc $dat0,$dat0
  407. vld1.32 {q8},[$key],#16 // load key schedule...
  408. aese $dat0,q9
  409. aesmc $dat0,$dat0
  410. vld1.32 {q9},[$key],#16 // load key schedule...
  411. subs $rounds,$rounds,#2 // bias
  412. b.gt .Lecb_round_loop
  413. .Lecb_128_enc:
  414. vld1.32 {q10-q11},[$key],#32 // load key schedule...
  415. aese $dat0,q8
  416. aesmc $dat0,$dat0
  417. aese $dat0,q9
  418. aesmc $dat0,$dat0
  419. vld1.32 {q12-q13},[$key],#32 // load key schedule...
  420. aese $dat0,q10
  421. aesmc $dat0,$dat0
  422. aese $dat0,q11
  423. aesmc $dat0,$dat0
  424. vld1.32 {q14-q15},[$key],#32 // load key schedule...
  425. aese $dat0,q12
  426. aesmc $dat0,$dat0
  427. aese $dat0,q13
  428. aesmc $dat0,$dat0
  429. vld1.32 {$rndlast},[$key]
  430. aese $dat0,q14
  431. aesmc $dat0,$dat0
  432. aese $dat0,q15
  433. veor $dat0,$dat0,$rndlast
  434. vst1.8 {$dat0},[$out]
  435. b .Lecb_Final_abort
  436. .Lecb_small_dec:
  437. aesd $dat0,q5
  438. aesimc $dat0,$dat0
  439. vld1.32 {q8-q9},[$key],#32 // load key schedule...
  440. aesd $dat0,q6
  441. aesimc $dat0,$dat0
  442. subs $rounds,$rounds,#10 // bias
  443. b.eq .Lecb_128_dec
  444. .Lecb_dec_round_loop:
  445. aesd $dat0,q8
  446. aesimc $dat0,$dat0
  447. vld1.32 {q8},[$key],#16 // load key schedule...
  448. aesd $dat0,q9
  449. aesimc $dat0,$dat0
  450. vld1.32 {q9},[$key],#16 // load key schedule...
  451. subs $rounds,$rounds,#2 // bias
  452. b.gt .Lecb_dec_round_loop
  453. .Lecb_128_dec:
  454. vld1.32 {q10-q11},[$key],#32 // load key schedule...
  455. aesd $dat0,q8
  456. aesimc $dat0,$dat0
  457. aesd $dat0,q9
  458. aesimc $dat0,$dat0
  459. vld1.32 {q12-q13},[$key],#32 // load key schedule...
  460. aesd $dat0,q10
  461. aesimc $dat0,$dat0
  462. aesd $dat0,q11
  463. aesimc $dat0,$dat0
  464. vld1.32 {q14-q15},[$key],#32 // load key schedule...
  465. aesd $dat0,q12
  466. aesimc $dat0,$dat0
  467. aesd $dat0,q13
  468. aesimc $dat0,$dat0
  469. vld1.32 {$rndlast},[$key]
  470. aesd $dat0,q14
  471. aesimc $dat0,$dat0
  472. aesd $dat0,q15
  473. veor $dat0,$dat0,$rndlast
  474. vst1.8 {$dat0},[$out]
  475. b .Lecb_Final_abort
  476. .Lecb_big_size:
  477. ___
  478. $code.=<<___ if ($flavour =~ /64/);
  479. stp x29,x30,[sp,#-16]!
  480. add x29,sp,#0
  481. ___
  482. $code.=<<___ if ($flavour !~ /64/);
  483. mov ip,sp
  484. stmdb sp!,{r4-r8,lr}
  485. vstmdb sp!,{d8-d15} @ ABI specification says so
  486. ldmia ip,{r4-r5} @ load remaining args
  487. subs $len,$len,#16
  488. ___
  489. $code.=<<___;
  490. mov $step,#16
  491. b.lo .Lecb_done
  492. cclr $step,eq
  493. cmp $enc,#0 // en- or decrypting?
  494. ldr $rounds,[$key,#240]
  495. and $len,$len,#-16
  496. vld1.8 {$dat},[$inp],$step
  497. vld1.32 {q8-q9},[$key] // load key schedule...
  498. sub $rounds,$rounds,#6
  499. add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
  500. sub $rounds,$rounds,#2
  501. vld1.32 {q10-q11},[$key_],#32
  502. vld1.32 {q12-q13},[$key_],#32
  503. vld1.32 {q14-q15},[$key_],#32
  504. vld1.32 {$rndlast},[$key_]
  505. add $key_,$key,#32
  506. mov $cnt,$rounds
  507. b.eq .Lecb_dec
  508. vld1.8 {$dat1},[$inp],#16
  509. subs $len,$len,#32 // bias
  510. add $cnt,$rounds,#2
  511. vorr $in1,$dat1,$dat1
  512. vorr $dat2,$dat1,$dat1
  513. vorr $dat1,$dat,$dat
  514. b.lo .Lecb_enc_tail
  515. vorr $dat1,$in1,$in1
  516. vld1.8 {$dat2},[$inp],#16
  517. ___
  518. $code.=<<___ if ($flavour =~ /64/);
  519. cmp $len,#32
  520. b.lo .Loop3x_ecb_enc
  521. vld1.8 {$dat3},[$inp],#16
  522. vld1.8 {$dat4},[$inp],#16
  523. sub $len,$len,#32 // bias
  524. mov $cnt,$rounds
  525. .Loop5x_ecb_enc:
  526. aese $dat0,q8
  527. aesmc $dat0,$dat0
  528. aese $dat1,q8
  529. aesmc $dat1,$dat1
  530. aese $dat2,q8
  531. aesmc $dat2,$dat2
  532. aese $dat3,q8
  533. aesmc $dat3,$dat3
  534. aese $dat4,q8
  535. aesmc $dat4,$dat4
  536. vld1.32 {q8},[$key_],#16
  537. subs $cnt,$cnt,#2
  538. aese $dat0,q9
  539. aesmc $dat0,$dat0
  540. aese $dat1,q9
  541. aesmc $dat1,$dat1
  542. aese $dat2,q9
  543. aesmc $dat2,$dat2
  544. aese $dat3,q9
  545. aesmc $dat3,$dat3
  546. aese $dat4,q9
  547. aesmc $dat4,$dat4
  548. vld1.32 {q9},[$key_],#16
  549. b.gt .Loop5x_ecb_enc
  550. aese $dat0,q8
  551. aesmc $dat0,$dat0
  552. aese $dat1,q8
  553. aesmc $dat1,$dat1
  554. aese $dat2,q8
  555. aesmc $dat2,$dat2
  556. aese $dat3,q8
  557. aesmc $dat3,$dat3
  558. aese $dat4,q8
  559. aesmc $dat4,$dat4
  560. cmp $len,#0x40 // because .Lecb_enc_tail4x
  561. sub $len,$len,#0x50
  562. aese $dat0,q9
  563. aesmc $dat0,$dat0
  564. aese $dat1,q9
  565. aesmc $dat1,$dat1
  566. aese $dat2,q9
  567. aesmc $dat2,$dat2
  568. aese $dat3,q9
  569. aesmc $dat3,$dat3
  570. aese $dat4,q9
  571. aesmc $dat4,$dat4
  572. csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
  573. mov $key_,$key
  574. aese $dat0,q10
  575. aesmc $dat0,$dat0
  576. aese $dat1,q10
  577. aesmc $dat1,$dat1
  578. aese $dat2,q10
  579. aesmc $dat2,$dat2
  580. aese $dat3,q10
  581. aesmc $dat3,$dat3
  582. aese $dat4,q10
  583. aesmc $dat4,$dat4
  584. add $inp,$inp,x6 // $inp is adjusted in such way that
  585. // at exit from the loop $dat1-$dat4
  586. // are loaded with last "words"
  587. add x6,$len,#0x60 // because .Lecb_enc_tail4x
  588. aese $dat0,q11
  589. aesmc $dat0,$dat0
  590. aese $dat1,q11
  591. aesmc $dat1,$dat1
  592. aese $dat2,q11
  593. aesmc $dat2,$dat2
  594. aese $dat3,q11
  595. aesmc $dat3,$dat3
  596. aese $dat4,q11
  597. aesmc $dat4,$dat4
  598. aese $dat0,q12
  599. aesmc $dat0,$dat0
  600. aese $dat1,q12
  601. aesmc $dat1,$dat1
  602. aese $dat2,q12
  603. aesmc $dat2,$dat2
  604. aese $dat3,q12
  605. aesmc $dat3,$dat3
  606. aese $dat4,q12
  607. aesmc $dat4,$dat4
  608. aese $dat0,q13
  609. aesmc $dat0,$dat0
  610. aese $dat1,q13
  611. aesmc $dat1,$dat1
  612. aese $dat2,q13
  613. aesmc $dat2,$dat2
  614. aese $dat3,q13
  615. aesmc $dat3,$dat3
  616. aese $dat4,q13
  617. aesmc $dat4,$dat4
  618. aese $dat0,q14
  619. aesmc $dat0,$dat0
  620. aese $dat1,q14
  621. aesmc $dat1,$dat1
  622. aese $dat2,q14
  623. aesmc $dat2,$dat2
  624. aese $dat3,q14
  625. aesmc $dat3,$dat3
  626. aese $dat4,q14
  627. aesmc $dat4,$dat4
  628. aese $dat0,q15
  629. vld1.8 {$in0},[$inp],#16
  630. aese $dat1,q15
  631. vld1.8 {$in1},[$inp],#16
  632. aese $dat2,q15
  633. vld1.8 {$in2},[$inp],#16
  634. aese $dat3,q15
  635. vld1.8 {$in3},[$inp],#16
  636. aese $dat4,q15
  637. vld1.8 {$in4},[$inp],#16
  638. cbz x6,.Lecb_enc_tail4x
  639. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  640. veor $tmp0,$rndlast,$dat0
  641. vorr $dat0,$in0,$in0
  642. veor $tmp1,$rndlast,$dat1
  643. vorr $dat1,$in1,$in1
  644. veor $tmp2,$rndlast,$dat2
  645. vorr $dat2,$in2,$in2
  646. veor $tmp3,$rndlast,$dat3
  647. vorr $dat3,$in3,$in3
  648. veor $tmp4,$rndlast,$dat4
  649. vst1.8 {$tmp0},[$out],#16
  650. vorr $dat4,$in4,$in4
  651. vst1.8 {$tmp1},[$out],#16
  652. mov $cnt,$rounds
  653. vst1.8 {$tmp2},[$out],#16
  654. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  655. vst1.8 {$tmp3},[$out],#16
  656. vst1.8 {$tmp4},[$out],#16
  657. b.hs .Loop5x_ecb_enc
  658. add $len,$len,#0x50
  659. cbz $len,.Lecb_done
  660. add $cnt,$rounds,#2
  661. subs $len,$len,#0x30
  662. vorr $dat0,$in2,$in2
  663. vorr $dat1,$in3,$in3
  664. vorr $dat2,$in4,$in4
  665. b.lo .Lecb_enc_tail
  666. b .Loop3x_ecb_enc
  667. .align 4
  668. .Lecb_enc_tail4x:
  669. veor $tmp1,$rndlast,$dat1
  670. veor $tmp2,$rndlast,$dat2
  671. veor $tmp3,$rndlast,$dat3
  672. veor $tmp4,$rndlast,$dat4
  673. vst1.8 {$tmp1},[$out],#16
  674. vst1.8 {$tmp2},[$out],#16
  675. vst1.8 {$tmp3},[$out],#16
  676. vst1.8 {$tmp4},[$out],#16
  677. b .Lecb_done
  678. .align 4
  679. ___
  680. $code.=<<___;
  681. .Loop3x_ecb_enc:
  682. aese $dat0,q8
  683. aesmc $dat0,$dat0
  684. aese $dat1,q8
  685. aesmc $dat1,$dat1
  686. aese $dat2,q8
  687. aesmc $dat2,$dat2
  688. vld1.32 {q8},[$key_],#16
  689. subs $cnt,$cnt,#2
  690. aese $dat0,q9
  691. aesmc $dat0,$dat0
  692. aese $dat1,q9
  693. aesmc $dat1,$dat1
  694. aese $dat2,q9
  695. aesmc $dat2,$dat2
  696. vld1.32 {q9},[$key_],#16
  697. b.gt .Loop3x_ecb_enc
  698. aese $dat0,q8
  699. aesmc $dat0,$dat0
  700. aese $dat1,q8
  701. aesmc $dat1,$dat1
  702. aese $dat2,q8
  703. aesmc $dat2,$dat2
  704. subs $len,$len,#0x30
  705. mov.lo x6,$len // x6, $cnt, is zero at this point
  706. aese $dat0,q9
  707. aesmc $dat0,$dat0
  708. aese $dat1,q9
  709. aesmc $dat1,$dat1
  710. aese $dat2,q9
  711. aesmc $dat2,$dat2
  712. add $inp,$inp,x6 // $inp is adjusted in such way that
  713. // at exit from the loop $dat1-$dat2
  714. // are loaded with last "words"
  715. mov $key_,$key
  716. aese $dat0,q12
  717. aesmc $dat0,$dat0
  718. aese $dat1,q12
  719. aesmc $dat1,$dat1
  720. aese $dat2,q12
  721. aesmc $dat2,$dat2
  722. vld1.8 {$in0},[$inp],#16
  723. aese $dat0,q13
  724. aesmc $dat0,$dat0
  725. aese $dat1,q13
  726. aesmc $dat1,$dat1
  727. aese $dat2,q13
  728. aesmc $dat2,$dat2
  729. vld1.8 {$in1},[$inp],#16
  730. aese $dat0,q14
  731. aesmc $dat0,$dat0
  732. aese $dat1,q14
  733. aesmc $dat1,$dat1
  734. aese $dat2,q14
  735. aesmc $dat2,$dat2
  736. vld1.8 {$in2},[$inp],#16
  737. aese $dat0,q15
  738. aese $dat1,q15
  739. aese $dat2,q15
  740. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  741. add $cnt,$rounds,#2
  742. veor $tmp0,$rndlast,$dat0
  743. veor $tmp1,$rndlast,$dat1
  744. veor $dat2,$dat2,$rndlast
  745. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  746. vst1.8 {$tmp0},[$out],#16
  747. vorr $dat0,$in0,$in0
  748. vst1.8 {$tmp1},[$out],#16
  749. vorr $dat1,$in1,$in1
  750. vst1.8 {$dat2},[$out],#16
  751. vorr $dat2,$in2,$in2
  752. b.hs .Loop3x_ecb_enc
  753. cmn $len,#0x30
  754. b.eq .Lecb_done
  755. nop
  756. .Lecb_enc_tail:
  757. aese $dat1,q8
  758. aesmc $dat1,$dat1
  759. aese $dat2,q8
  760. aesmc $dat2,$dat2
  761. vld1.32 {q8},[$key_],#16
  762. subs $cnt,$cnt,#2
  763. aese $dat1,q9
  764. aesmc $dat1,$dat1
  765. aese $dat2,q9
  766. aesmc $dat2,$dat2
  767. vld1.32 {q9},[$key_],#16
  768. b.gt .Lecb_enc_tail
  769. aese $dat1,q8
  770. aesmc $dat1,$dat1
  771. aese $dat2,q8
  772. aesmc $dat2,$dat2
  773. aese $dat1,q9
  774. aesmc $dat1,$dat1
  775. aese $dat2,q9
  776. aesmc $dat2,$dat2
  777. aese $dat1,q12
  778. aesmc $dat1,$dat1
  779. aese $dat2,q12
  780. aesmc $dat2,$dat2
  781. cmn $len,#0x20
  782. aese $dat1,q13
  783. aesmc $dat1,$dat1
  784. aese $dat2,q13
  785. aesmc $dat2,$dat2
  786. aese $dat1,q14
  787. aesmc $dat1,$dat1
  788. aese $dat2,q14
  789. aesmc $dat2,$dat2
  790. aese $dat1,q15
  791. aese $dat2,q15
  792. b.eq .Lecb_enc_one
  793. veor $tmp1,$rndlast,$dat1
  794. veor $tmp2,$rndlast,$dat2
  795. vst1.8 {$tmp1},[$out],#16
  796. vst1.8 {$tmp2},[$out],#16
  797. b .Lecb_done
  798. .Lecb_enc_one:
  799. veor $tmp1,$rndlast,$dat2
  800. vst1.8 {$tmp1},[$out],#16
  801. b .Lecb_done
  802. ___
  803. $code.=<<___;
  804. .align 5
  805. .Lecb_dec:
  806. vld1.8 {$dat1},[$inp],#16
  807. subs $len,$len,#32 // bias
  808. add $cnt,$rounds,#2
  809. vorr $in1,$dat1,$dat1
  810. vorr $dat2,$dat1,$dat1
  811. vorr $dat1,$dat,$dat
  812. b.lo .Lecb_dec_tail
  813. vorr $dat1,$in1,$in1
  814. vld1.8 {$dat2},[$inp],#16
  815. ___
  816. $code.=<<___ if ($flavour =~ /64/);
  817. cmp $len,#32
  818. b.lo .Loop3x_ecb_dec
  819. vld1.8 {$dat3},[$inp],#16
  820. vld1.8 {$dat4},[$inp],#16
  821. sub $len,$len,#32 // bias
  822. mov $cnt,$rounds
  823. .Loop5x_ecb_dec:
  824. aesd $dat0,q8
  825. aesimc $dat0,$dat0
  826. aesd $dat1,q8
  827. aesimc $dat1,$dat1
  828. aesd $dat2,q8
  829. aesimc $dat2,$dat2
  830. aesd $dat3,q8
  831. aesimc $dat3,$dat3
  832. aesd $dat4,q8
  833. aesimc $dat4,$dat4
  834. vld1.32 {q8},[$key_],#16
  835. subs $cnt,$cnt,#2
  836. aesd $dat0,q9
  837. aesimc $dat0,$dat0
  838. aesd $dat1,q9
  839. aesimc $dat1,$dat1
  840. aesd $dat2,q9
  841. aesimc $dat2,$dat2
  842. aesd $dat3,q9
  843. aesimc $dat3,$dat3
  844. aesd $dat4,q9
  845. aesimc $dat4,$dat4
  846. vld1.32 {q9},[$key_],#16
  847. b.gt .Loop5x_ecb_dec
  848. aesd $dat0,q8
  849. aesimc $dat0,$dat0
  850. aesd $dat1,q8
  851. aesimc $dat1,$dat1
  852. aesd $dat2,q8
  853. aesimc $dat2,$dat2
  854. aesd $dat3,q8
  855. aesimc $dat3,$dat3
  856. aesd $dat4,q8
  857. aesimc $dat4,$dat4
  858. cmp $len,#0x40 // because .Lecb_tail4x
  859. sub $len,$len,#0x50
  860. aesd $dat0,q9
  861. aesimc $dat0,$dat0
  862. aesd $dat1,q9
  863. aesimc $dat1,$dat1
  864. aesd $dat2,q9
  865. aesimc $dat2,$dat2
  866. aesd $dat3,q9
  867. aesimc $dat3,$dat3
  868. aesd $dat4,q9
  869. aesimc $dat4,$dat4
  870. csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
  871. mov $key_,$key
  872. aesd $dat0,q10
  873. aesimc $dat0,$dat0
  874. aesd $dat1,q10
  875. aesimc $dat1,$dat1
  876. aesd $dat2,q10
  877. aesimc $dat2,$dat2
  878. aesd $dat3,q10
  879. aesimc $dat3,$dat3
  880. aesd $dat4,q10
  881. aesimc $dat4,$dat4
  882. add $inp,$inp,x6 // $inp is adjusted in such way that
  883. // at exit from the loop $dat1-$dat4
  884. // are loaded with last "words"
  885. add x6,$len,#0x60 // because .Lecb_tail4x
  886. aesd $dat0,q11
  887. aesimc $dat0,$dat0
  888. aesd $dat1,q11
  889. aesimc $dat1,$dat1
  890. aesd $dat2,q11
  891. aesimc $dat2,$dat2
  892. aesd $dat3,q11
  893. aesimc $dat3,$dat3
  894. aesd $dat4,q11
  895. aesimc $dat4,$dat4
  896. aesd $dat0,q12
  897. aesimc $dat0,$dat0
  898. aesd $dat1,q12
  899. aesimc $dat1,$dat1
  900. aesd $dat2,q12
  901. aesimc $dat2,$dat2
  902. aesd $dat3,q12
  903. aesimc $dat3,$dat3
  904. aesd $dat4,q12
  905. aesimc $dat4,$dat4
  906. aesd $dat0,q13
  907. aesimc $dat0,$dat0
  908. aesd $dat1,q13
  909. aesimc $dat1,$dat1
  910. aesd $dat2,q13
  911. aesimc $dat2,$dat2
  912. aesd $dat3,q13
  913. aesimc $dat3,$dat3
  914. aesd $dat4,q13
  915. aesimc $dat4,$dat4
  916. aesd $dat0,q14
  917. aesimc $dat0,$dat0
  918. aesd $dat1,q14
  919. aesimc $dat1,$dat1
  920. aesd $dat2,q14
  921. aesimc $dat2,$dat2
  922. aesd $dat3,q14
  923. aesimc $dat3,$dat3
  924. aesd $dat4,q14
  925. aesimc $dat4,$dat4
  926. aesd $dat0,q15
  927. vld1.8 {$in0},[$inp],#16
  928. aesd $dat1,q15
  929. vld1.8 {$in1},[$inp],#16
  930. aesd $dat2,q15
  931. vld1.8 {$in2},[$inp],#16
  932. aesd $dat3,q15
  933. vld1.8 {$in3},[$inp],#16
  934. aesd $dat4,q15
  935. vld1.8 {$in4},[$inp],#16
  936. cbz x6,.Lecb_tail4x
  937. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  938. veor $tmp0,$rndlast,$dat0
  939. vorr $dat0,$in0,$in0
  940. veor $tmp1,$rndlast,$dat1
  941. vorr $dat1,$in1,$in1
  942. veor $tmp2,$rndlast,$dat2
  943. vorr $dat2,$in2,$in2
  944. veor $tmp3,$rndlast,$dat3
  945. vorr $dat3,$in3,$in3
  946. veor $tmp4,$rndlast,$dat4
  947. vst1.8 {$tmp0},[$out],#16
  948. vorr $dat4,$in4,$in4
  949. vst1.8 {$tmp1},[$out],#16
  950. mov $cnt,$rounds
  951. vst1.8 {$tmp2},[$out],#16
  952. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  953. vst1.8 {$tmp3},[$out],#16
  954. vst1.8 {$tmp4},[$out],#16
  955. b.hs .Loop5x_ecb_dec
  956. add $len,$len,#0x50
  957. cbz $len,.Lecb_done
  958. add $cnt,$rounds,#2
  959. subs $len,$len,#0x30
  960. vorr $dat0,$in2,$in2
  961. vorr $dat1,$in3,$in3
  962. vorr $dat2,$in4,$in4
  963. b.lo .Lecb_dec_tail
  964. b .Loop3x_ecb_dec
  965. .align 4
  966. .Lecb_tail4x:
  967. veor $tmp1,$rndlast,$dat1
  968. veor $tmp2,$rndlast,$dat2
  969. veor $tmp3,$rndlast,$dat3
  970. veor $tmp4,$rndlast,$dat4
  971. vst1.8 {$tmp1},[$out],#16
  972. vst1.8 {$tmp2},[$out],#16
  973. vst1.8 {$tmp3},[$out],#16
  974. vst1.8 {$tmp4},[$out],#16
  975. b .Lecb_done
  976. .align 4
  977. ___
  978. $code.=<<___;
  979. .Loop3x_ecb_dec:
  980. aesd $dat0,q8
  981. aesimc $dat0,$dat0
  982. aesd $dat1,q8
  983. aesimc $dat1,$dat1
  984. aesd $dat2,q8
  985. aesimc $dat2,$dat2
  986. vld1.32 {q8},[$key_],#16
  987. subs $cnt,$cnt,#2
  988. aesd $dat0,q9
  989. aesimc $dat0,$dat0
  990. aesd $dat1,q9
  991. aesimc $dat1,$dat1
  992. aesd $dat2,q9
  993. aesimc $dat2,$dat2
  994. vld1.32 {q9},[$key_],#16
  995. b.gt .Loop3x_ecb_dec
  996. aesd $dat0,q8
  997. aesimc $dat0,$dat0
  998. aesd $dat1,q8
  999. aesimc $dat1,$dat1
  1000. aesd $dat2,q8
  1001. aesimc $dat2,$dat2
  1002. subs $len,$len,#0x30
  1003. mov.lo x6,$len // x6, $cnt, is zero at this point
  1004. aesd $dat0,q9
  1005. aesimc $dat0,$dat0
  1006. aesd $dat1,q9
  1007. aesimc $dat1,$dat1
  1008. aesd $dat2,q9
  1009. aesimc $dat2,$dat2
  1010. add $inp,$inp,x6 // $inp is adjusted in such way that
  1011. // at exit from the loop $dat1-$dat2
  1012. // are loaded with last "words"
  1013. mov $key_,$key
  1014. aesd $dat0,q12
  1015. aesimc $dat0,$dat0
  1016. aesd $dat1,q12
  1017. aesimc $dat1,$dat1
  1018. aesd $dat2,q12
  1019. aesimc $dat2,$dat2
  1020. vld1.8 {$in0},[$inp],#16
  1021. aesd $dat0,q13
  1022. aesimc $dat0,$dat0
  1023. aesd $dat1,q13
  1024. aesimc $dat1,$dat1
  1025. aesd $dat2,q13
  1026. aesimc $dat2,$dat2
  1027. vld1.8 {$in1},[$inp],#16
  1028. aesd $dat0,q14
  1029. aesimc $dat0,$dat0
  1030. aesd $dat1,q14
  1031. aesimc $dat1,$dat1
  1032. aesd $dat2,q14
  1033. aesimc $dat2,$dat2
  1034. vld1.8 {$in2},[$inp],#16
  1035. aesd $dat0,q15
  1036. aesd $dat1,q15
  1037. aesd $dat2,q15
  1038. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1039. add $cnt,$rounds,#2
  1040. veor $tmp0,$rndlast,$dat0
  1041. veor $tmp1,$rndlast,$dat1
  1042. veor $dat2,$dat2,$rndlast
  1043. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1044. vst1.8 {$tmp0},[$out],#16
  1045. vorr $dat0,$in0,$in0
  1046. vst1.8 {$tmp1},[$out],#16
  1047. vorr $dat1,$in1,$in1
  1048. vst1.8 {$dat2},[$out],#16
  1049. vorr $dat2,$in2,$in2
  1050. b.hs .Loop3x_ecb_dec
  1051. cmn $len,#0x30
  1052. b.eq .Lecb_done
  1053. nop
  1054. .Lecb_dec_tail:
  1055. aesd $dat1,q8
  1056. aesimc $dat1,$dat1
  1057. aesd $dat2,q8
  1058. aesimc $dat2,$dat2
  1059. vld1.32 {q8},[$key_],#16
  1060. subs $cnt,$cnt,#2
  1061. aesd $dat1,q9
  1062. aesimc $dat1,$dat1
  1063. aesd $dat2,q9
  1064. aesimc $dat2,$dat2
  1065. vld1.32 {q9},[$key_],#16
  1066. b.gt .Lecb_dec_tail
  1067. aesd $dat1,q8
  1068. aesimc $dat1,$dat1
  1069. aesd $dat2,q8
  1070. aesimc $dat2,$dat2
  1071. aesd $dat1,q9
  1072. aesimc $dat1,$dat1
  1073. aesd $dat2,q9
  1074. aesimc $dat2,$dat2
  1075. aesd $dat1,q12
  1076. aesimc $dat1,$dat1
  1077. aesd $dat2,q12
  1078. aesimc $dat2,$dat2
  1079. cmn $len,#0x20
  1080. aesd $dat1,q13
  1081. aesimc $dat1,$dat1
  1082. aesd $dat2,q13
  1083. aesimc $dat2,$dat2
  1084. aesd $dat1,q14
  1085. aesimc $dat1,$dat1
  1086. aesd $dat2,q14
  1087. aesimc $dat2,$dat2
  1088. aesd $dat1,q15
  1089. aesd $dat2,q15
  1090. b.eq .Lecb_dec_one
  1091. veor $tmp1,$rndlast,$dat1
  1092. veor $tmp2,$rndlast,$dat2
  1093. vst1.8 {$tmp1},[$out],#16
  1094. vst1.8 {$tmp2},[$out],#16
  1095. b .Lecb_done
  1096. .Lecb_dec_one:
  1097. veor $tmp1,$rndlast,$dat2
  1098. vst1.8 {$tmp1},[$out],#16
  1099. .Lecb_done:
  1100. ___
  1101. }
  1102. $code.=<<___ if ($flavour !~ /64/);
  1103. vldmia sp!,{d8-d15}
  1104. ldmia sp!,{r4-r8,pc}
  1105. ___
  1106. $code.=<<___ if ($flavour =~ /64/);
  1107. ldr x29,[sp],#16
  1108. ___
  1109. $code.=<<___ if ($flavour =~ /64/);
  1110. .Lecb_Final_abort:
  1111. ret
  1112. ___
  1113. $code.=<<___;
  1114. .size ${prefix}_ecb_encrypt,.-${prefix}_ecb_encrypt
  1115. ___
  1116. }}}
  1117. {{{
  1118. my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
  1119. my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
  1120. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
  1121. my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
  1122. my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
  1123. ### q8-q15 preloaded key schedule
  1124. $code.=<<___;
  1125. .globl ${prefix}_cbc_encrypt
  1126. .type ${prefix}_cbc_encrypt,%function
  1127. .align 5
  1128. ${prefix}_cbc_encrypt:
  1129. ___
  1130. $code.=<<___ if ($flavour =~ /64/);
  1131. stp x29,x30,[sp,#-16]!
  1132. add x29,sp,#0
  1133. ___
  1134. $code.=<<___ if ($flavour !~ /64/);
  1135. mov ip,sp
  1136. stmdb sp!,{r4-r8,lr}
  1137. vstmdb sp!,{d8-d15} @ ABI specification says so
  1138. ldmia ip,{r4-r5} @ load remaining args
  1139. ___
  1140. $code.=<<___;
  1141. subs $len,$len,#16
  1142. mov $step,#16
  1143. b.lo .Lcbc_abort
  1144. cclr $step,eq
  1145. cmp $enc,#0 // en- or decrypting?
  1146. ldr $rounds,[$key,#240]
  1147. and $len,$len,#-16
  1148. vld1.8 {$ivec},[$ivp]
  1149. vld1.8 {$dat},[$inp],$step
  1150. vld1.32 {q8-q9},[$key] // load key schedule...
  1151. sub $rounds,$rounds,#6
  1152. add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
  1153. sub $rounds,$rounds,#2
  1154. vld1.32 {q10-q11},[$key_],#32
  1155. vld1.32 {q12-q13},[$key_],#32
  1156. vld1.32 {q14-q15},[$key_],#32
  1157. vld1.32 {$rndlast},[$key_]
  1158. add $key_,$key,#32
  1159. mov $cnt,$rounds
  1160. b.eq .Lcbc_dec
  1161. cmp $rounds,#2
  1162. veor $dat,$dat,$ivec
  1163. veor $rndzero_n_last,q8,$rndlast
  1164. b.eq .Lcbc_enc128
  1165. vld1.32 {$in0-$in1},[$key_]
  1166. add $key_,$key,#16
  1167. add $key4,$key,#16*4
  1168. add $key5,$key,#16*5
  1169. aese $dat,q8
  1170. aesmc $dat,$dat
  1171. add $key6,$key,#16*6
  1172. add $key7,$key,#16*7
  1173. b .Lenter_cbc_enc
  1174. .align 4
  1175. .Loop_cbc_enc:
  1176. aese $dat,q8
  1177. aesmc $dat,$dat
  1178. vst1.8 {$ivec},[$out],#16
  1179. .Lenter_cbc_enc:
  1180. aese $dat,q9
  1181. aesmc $dat,$dat
  1182. aese $dat,$in0
  1183. aesmc $dat,$dat
  1184. vld1.32 {q8},[$key4]
  1185. cmp $rounds,#4
  1186. aese $dat,$in1
  1187. aesmc $dat,$dat
  1188. vld1.32 {q9},[$key5]
  1189. b.eq .Lcbc_enc192
  1190. aese $dat,q8
  1191. aesmc $dat,$dat
  1192. vld1.32 {q8},[$key6]
  1193. aese $dat,q9
  1194. aesmc $dat,$dat
  1195. vld1.32 {q9},[$key7]
  1196. nop
  1197. .Lcbc_enc192:
  1198. aese $dat,q8
  1199. aesmc $dat,$dat
  1200. subs $len,$len,#16
  1201. aese $dat,q9
  1202. aesmc $dat,$dat
  1203. cclr $step,eq
  1204. aese $dat,q10
  1205. aesmc $dat,$dat
  1206. aese $dat,q11
  1207. aesmc $dat,$dat
  1208. vld1.8 {q8},[$inp],$step
  1209. aese $dat,q12
  1210. aesmc $dat,$dat
  1211. veor q8,q8,$rndzero_n_last
  1212. aese $dat,q13
  1213. aesmc $dat,$dat
  1214. vld1.32 {q9},[$key_] // re-pre-load rndkey[1]
  1215. aese $dat,q14
  1216. aesmc $dat,$dat
  1217. aese $dat,q15
  1218. veor $ivec,$dat,$rndlast
  1219. b.hs .Loop_cbc_enc
  1220. vst1.8 {$ivec},[$out],#16
  1221. b .Lcbc_done
  1222. .align 5
  1223. .Lcbc_enc128:
  1224. vld1.32 {$in0-$in1},[$key_]
  1225. aese $dat,q8
  1226. aesmc $dat,$dat
  1227. b .Lenter_cbc_enc128
  1228. .Loop_cbc_enc128:
  1229. aese $dat,q8
  1230. aesmc $dat,$dat
  1231. vst1.8 {$ivec},[$out],#16
  1232. .Lenter_cbc_enc128:
  1233. aese $dat,q9
  1234. aesmc $dat,$dat
  1235. subs $len,$len,#16
  1236. aese $dat,$in0
  1237. aesmc $dat,$dat
  1238. cclr $step,eq
  1239. aese $dat,$in1
  1240. aesmc $dat,$dat
  1241. aese $dat,q10
  1242. aesmc $dat,$dat
  1243. aese $dat,q11
  1244. aesmc $dat,$dat
  1245. vld1.8 {q8},[$inp],$step
  1246. aese $dat,q12
  1247. aesmc $dat,$dat
  1248. aese $dat,q13
  1249. aesmc $dat,$dat
  1250. aese $dat,q14
  1251. aesmc $dat,$dat
  1252. veor q8,q8,$rndzero_n_last
  1253. aese $dat,q15
  1254. veor $ivec,$dat,$rndlast
  1255. b.hs .Loop_cbc_enc128
  1256. vst1.8 {$ivec},[$out],#16
  1257. b .Lcbc_done
  1258. ___
  1259. {
  1260. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  1261. my ($dat3,$in3,$tmp3); # used only in 64-bit mode
  1262. my ($dat4,$in4,$tmp4);
  1263. if ($flavour =~ /64/) {
  1264. ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
  1265. }
  1266. $code.=<<___;
  1267. .align 5
  1268. .Lcbc_dec:
  1269. vld1.8 {$dat2},[$inp],#16
  1270. subs $len,$len,#32 // bias
  1271. add $cnt,$rounds,#2
  1272. vorr $in1,$dat,$dat
  1273. vorr $dat1,$dat,$dat
  1274. vorr $in2,$dat2,$dat2
  1275. b.lo .Lcbc_dec_tail
  1276. vorr $dat1,$dat2,$dat2
  1277. vld1.8 {$dat2},[$inp],#16
  1278. vorr $in0,$dat,$dat
  1279. vorr $in1,$dat1,$dat1
  1280. vorr $in2,$dat2,$dat2
  1281. ___
  1282. $code.=<<___ if ($flavour =~ /64/);
  1283. cmp $len,#32
  1284. b.lo .Loop3x_cbc_dec
  1285. vld1.8 {$dat3},[$inp],#16
  1286. vld1.8 {$dat4},[$inp],#16
  1287. sub $len,$len,#32 // bias
  1288. mov $cnt,$rounds
  1289. vorr $in3,$dat3,$dat3
  1290. vorr $in4,$dat4,$dat4
  1291. .Loop5x_cbc_dec:
  1292. aesd $dat0,q8
  1293. aesimc $dat0,$dat0
  1294. aesd $dat1,q8
  1295. aesimc $dat1,$dat1
  1296. aesd $dat2,q8
  1297. aesimc $dat2,$dat2
  1298. aesd $dat3,q8
  1299. aesimc $dat3,$dat3
  1300. aesd $dat4,q8
  1301. aesimc $dat4,$dat4
  1302. vld1.32 {q8},[$key_],#16
  1303. subs $cnt,$cnt,#2
  1304. aesd $dat0,q9
  1305. aesimc $dat0,$dat0
  1306. aesd $dat1,q9
  1307. aesimc $dat1,$dat1
  1308. aesd $dat2,q9
  1309. aesimc $dat2,$dat2
  1310. aesd $dat3,q9
  1311. aesimc $dat3,$dat3
  1312. aesd $dat4,q9
  1313. aesimc $dat4,$dat4
  1314. vld1.32 {q9},[$key_],#16
  1315. b.gt .Loop5x_cbc_dec
  1316. aesd $dat0,q8
  1317. aesimc $dat0,$dat0
  1318. aesd $dat1,q8
  1319. aesimc $dat1,$dat1
  1320. aesd $dat2,q8
  1321. aesimc $dat2,$dat2
  1322. aesd $dat3,q8
  1323. aesimc $dat3,$dat3
  1324. aesd $dat4,q8
  1325. aesimc $dat4,$dat4
  1326. cmp $len,#0x40 // because .Lcbc_tail4x
  1327. sub $len,$len,#0x50
  1328. aesd $dat0,q9
  1329. aesimc $dat0,$dat0
  1330. aesd $dat1,q9
  1331. aesimc $dat1,$dat1
  1332. aesd $dat2,q9
  1333. aesimc $dat2,$dat2
  1334. aesd $dat3,q9
  1335. aesimc $dat3,$dat3
  1336. aesd $dat4,q9
  1337. aesimc $dat4,$dat4
  1338. csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
  1339. mov $key_,$key
  1340. aesd $dat0,q10
  1341. aesimc $dat0,$dat0
  1342. aesd $dat1,q10
  1343. aesimc $dat1,$dat1
  1344. aesd $dat2,q10
  1345. aesimc $dat2,$dat2
  1346. aesd $dat3,q10
  1347. aesimc $dat3,$dat3
  1348. aesd $dat4,q10
  1349. aesimc $dat4,$dat4
  1350. add $inp,$inp,x6 // $inp is adjusted in such way that
  1351. // at exit from the loop $dat1-$dat4
  1352. // are loaded with last "words"
  1353. add x6,$len,#0x60 // because .Lcbc_tail4x
  1354. aesd $dat0,q11
  1355. aesimc $dat0,$dat0
  1356. aesd $dat1,q11
  1357. aesimc $dat1,$dat1
  1358. aesd $dat2,q11
  1359. aesimc $dat2,$dat2
  1360. aesd $dat3,q11
  1361. aesimc $dat3,$dat3
  1362. aesd $dat4,q11
  1363. aesimc $dat4,$dat4
  1364. aesd $dat0,q12
  1365. aesimc $dat0,$dat0
  1366. aesd $dat1,q12
  1367. aesimc $dat1,$dat1
  1368. aesd $dat2,q12
  1369. aesimc $dat2,$dat2
  1370. aesd $dat3,q12
  1371. aesimc $dat3,$dat3
  1372. aesd $dat4,q12
  1373. aesimc $dat4,$dat4
  1374. aesd $dat0,q13
  1375. aesimc $dat0,$dat0
  1376. aesd $dat1,q13
  1377. aesimc $dat1,$dat1
  1378. aesd $dat2,q13
  1379. aesimc $dat2,$dat2
  1380. aesd $dat3,q13
  1381. aesimc $dat3,$dat3
  1382. aesd $dat4,q13
  1383. aesimc $dat4,$dat4
  1384. aesd $dat0,q14
  1385. aesimc $dat0,$dat0
  1386. aesd $dat1,q14
  1387. aesimc $dat1,$dat1
  1388. aesd $dat2,q14
  1389. aesimc $dat2,$dat2
  1390. aesd $dat3,q14
  1391. aesimc $dat3,$dat3
  1392. aesd $dat4,q14
  1393. aesimc $dat4,$dat4
  1394. veor $tmp0,$ivec,$rndlast
  1395. aesd $dat0,q15
  1396. veor $tmp1,$in0,$rndlast
  1397. vld1.8 {$in0},[$inp],#16
  1398. aesd $dat1,q15
  1399. veor $tmp2,$in1,$rndlast
  1400. vld1.8 {$in1},[$inp],#16
  1401. aesd $dat2,q15
  1402. veor $tmp3,$in2,$rndlast
  1403. vld1.8 {$in2},[$inp],#16
  1404. aesd $dat3,q15
  1405. veor $tmp4,$in3,$rndlast
  1406. vld1.8 {$in3},[$inp],#16
  1407. aesd $dat4,q15
  1408. vorr $ivec,$in4,$in4
  1409. vld1.8 {$in4},[$inp],#16
  1410. cbz x6,.Lcbc_tail4x
  1411. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1412. veor $tmp0,$tmp0,$dat0
  1413. vorr $dat0,$in0,$in0
  1414. veor $tmp1,$tmp1,$dat1
  1415. vorr $dat1,$in1,$in1
  1416. veor $tmp2,$tmp2,$dat2
  1417. vorr $dat2,$in2,$in2
  1418. veor $tmp3,$tmp3,$dat3
  1419. vorr $dat3,$in3,$in3
  1420. veor $tmp4,$tmp4,$dat4
  1421. vst1.8 {$tmp0},[$out],#16
  1422. vorr $dat4,$in4,$in4
  1423. vst1.8 {$tmp1},[$out],#16
  1424. mov $cnt,$rounds
  1425. vst1.8 {$tmp2},[$out],#16
  1426. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1427. vst1.8 {$tmp3},[$out],#16
  1428. vst1.8 {$tmp4},[$out],#16
  1429. b.hs .Loop5x_cbc_dec
  1430. add $len,$len,#0x50
  1431. cbz $len,.Lcbc_done
  1432. add $cnt,$rounds,#2
  1433. subs $len,$len,#0x30
  1434. vorr $dat0,$in2,$in2
  1435. vorr $in0,$in2,$in2
  1436. vorr $dat1,$in3,$in3
  1437. vorr $in1,$in3,$in3
  1438. vorr $dat2,$in4,$in4
  1439. vorr $in2,$in4,$in4
  1440. b.lo .Lcbc_dec_tail
  1441. b .Loop3x_cbc_dec
  1442. .align 4
  1443. .Lcbc_tail4x:
  1444. veor $tmp1,$tmp0,$dat1
  1445. veor $tmp2,$tmp2,$dat2
  1446. veor $tmp3,$tmp3,$dat3
  1447. veor $tmp4,$tmp4,$dat4
  1448. vst1.8 {$tmp1},[$out],#16
  1449. vst1.8 {$tmp2},[$out],#16
  1450. vst1.8 {$tmp3},[$out],#16
  1451. vst1.8 {$tmp4},[$out],#16
  1452. b .Lcbc_done
  1453. .align 4
  1454. ___
  1455. $code.=<<___;
  1456. .Loop3x_cbc_dec:
  1457. aesd $dat0,q8
  1458. aesimc $dat0,$dat0
  1459. aesd $dat1,q8
  1460. aesimc $dat1,$dat1
  1461. aesd $dat2,q8
  1462. aesimc $dat2,$dat2
  1463. vld1.32 {q8},[$key_],#16
  1464. subs $cnt,$cnt,#2
  1465. aesd $dat0,q9
  1466. aesimc $dat0,$dat0
  1467. aesd $dat1,q9
  1468. aesimc $dat1,$dat1
  1469. aesd $dat2,q9
  1470. aesimc $dat2,$dat2
  1471. vld1.32 {q9},[$key_],#16
  1472. b.gt .Loop3x_cbc_dec
  1473. aesd $dat0,q8
  1474. aesimc $dat0,$dat0
  1475. aesd $dat1,q8
  1476. aesimc $dat1,$dat1
  1477. aesd $dat2,q8
  1478. aesimc $dat2,$dat2
  1479. veor $tmp0,$ivec,$rndlast
  1480. subs $len,$len,#0x30
  1481. veor $tmp1,$in0,$rndlast
  1482. mov.lo x6,$len // x6, $cnt, is zero at this point
  1483. aesd $dat0,q9
  1484. aesimc $dat0,$dat0
  1485. aesd $dat1,q9
  1486. aesimc $dat1,$dat1
  1487. aesd $dat2,q9
  1488. aesimc $dat2,$dat2
  1489. veor $tmp2,$in1,$rndlast
  1490. add $inp,$inp,x6 // $inp is adjusted in such way that
  1491. // at exit from the loop $dat1-$dat2
  1492. // are loaded with last "words"
  1493. vorr $ivec,$in2,$in2
  1494. mov $key_,$key
  1495. aesd $dat0,q12
  1496. aesimc $dat0,$dat0
  1497. aesd $dat1,q12
  1498. aesimc $dat1,$dat1
  1499. aesd $dat2,q12
  1500. aesimc $dat2,$dat2
  1501. vld1.8 {$in0},[$inp],#16
  1502. aesd $dat0,q13
  1503. aesimc $dat0,$dat0
  1504. aesd $dat1,q13
  1505. aesimc $dat1,$dat1
  1506. aesd $dat2,q13
  1507. aesimc $dat2,$dat2
  1508. vld1.8 {$in1},[$inp],#16
  1509. aesd $dat0,q14
  1510. aesimc $dat0,$dat0
  1511. aesd $dat1,q14
  1512. aesimc $dat1,$dat1
  1513. aesd $dat2,q14
  1514. aesimc $dat2,$dat2
  1515. vld1.8 {$in2},[$inp],#16
  1516. aesd $dat0,q15
  1517. aesd $dat1,q15
  1518. aesd $dat2,q15
  1519. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1520. add $cnt,$rounds,#2
  1521. veor $tmp0,$tmp0,$dat0
  1522. veor $tmp1,$tmp1,$dat1
  1523. veor $dat2,$dat2,$tmp2
  1524. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1525. vst1.8 {$tmp0},[$out],#16
  1526. vorr $dat0,$in0,$in0
  1527. vst1.8 {$tmp1},[$out],#16
  1528. vorr $dat1,$in1,$in1
  1529. vst1.8 {$dat2},[$out],#16
  1530. vorr $dat2,$in2,$in2
  1531. b.hs .Loop3x_cbc_dec
  1532. cmn $len,#0x30
  1533. b.eq .Lcbc_done
  1534. nop
  1535. .Lcbc_dec_tail:
  1536. aesd $dat1,q8
  1537. aesimc $dat1,$dat1
  1538. aesd $dat2,q8
  1539. aesimc $dat2,$dat2
  1540. vld1.32 {q8},[$key_],#16
  1541. subs $cnt,$cnt,#2
  1542. aesd $dat1,q9
  1543. aesimc $dat1,$dat1
  1544. aesd $dat2,q9
  1545. aesimc $dat2,$dat2
  1546. vld1.32 {q9},[$key_],#16
  1547. b.gt .Lcbc_dec_tail
  1548. aesd $dat1,q8
  1549. aesimc $dat1,$dat1
  1550. aesd $dat2,q8
  1551. aesimc $dat2,$dat2
  1552. aesd $dat1,q9
  1553. aesimc $dat1,$dat1
  1554. aesd $dat2,q9
  1555. aesimc $dat2,$dat2
  1556. aesd $dat1,q12
  1557. aesimc $dat1,$dat1
  1558. aesd $dat2,q12
  1559. aesimc $dat2,$dat2
  1560. cmn $len,#0x20
  1561. aesd $dat1,q13
  1562. aesimc $dat1,$dat1
  1563. aesd $dat2,q13
  1564. aesimc $dat2,$dat2
  1565. veor $tmp1,$ivec,$rndlast
  1566. aesd $dat1,q14
  1567. aesimc $dat1,$dat1
  1568. aesd $dat2,q14
  1569. aesimc $dat2,$dat2
  1570. veor $tmp2,$in1,$rndlast
  1571. aesd $dat1,q15
  1572. aesd $dat2,q15
  1573. b.eq .Lcbc_dec_one
  1574. veor $tmp1,$tmp1,$dat1
  1575. veor $tmp2,$tmp2,$dat2
  1576. vorr $ivec,$in2,$in2
  1577. vst1.8 {$tmp1},[$out],#16
  1578. vst1.8 {$tmp2},[$out],#16
  1579. b .Lcbc_done
  1580. .Lcbc_dec_one:
  1581. veor $tmp1,$tmp1,$dat2
  1582. vorr $ivec,$in2,$in2
  1583. vst1.8 {$tmp1},[$out],#16
  1584. .Lcbc_done:
  1585. vst1.8 {$ivec},[$ivp]
  1586. .Lcbc_abort:
  1587. ___
  1588. }
  1589. $code.=<<___ if ($flavour !~ /64/);
  1590. vldmia sp!,{d8-d15}
  1591. ldmia sp!,{r4-r8,pc}
  1592. ___
  1593. $code.=<<___ if ($flavour =~ /64/);
  1594. ldr x29,[sp],#16
  1595. ret
  1596. ___
  1597. $code.=<<___;
  1598. .size ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
  1599. ___
  1600. }}}
  1601. {{{
  1602. my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
  1603. my ($rounds,$cnt,$key_)=("w5","w6","x7");
  1604. my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
  1605. my $step="x12"; # aliases with $tctr2
  1606. my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
  1607. my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
  1608. # used only in 64-bit mode...
  1609. my ($dat3,$dat4,$in3,$in4)=map("q$_",(16..23));
  1610. my ($dat,$tmp)=($dat0,$tmp0);
  1611. ### q8-q15 preloaded key schedule
  1612. $code.=<<___;
  1613. .globl ${prefix}_ctr32_encrypt_blocks
  1614. .type ${prefix}_ctr32_encrypt_blocks,%function
  1615. .align 5
  1616. ${prefix}_ctr32_encrypt_blocks:
  1617. ___
  1618. $code.=<<___ if ($flavour =~ /64/);
  1619. stp x29,x30,[sp,#-16]!
  1620. add x29,sp,#0
  1621. ___
  1622. $code.=<<___ if ($flavour !~ /64/);
  1623. mov ip,sp
  1624. stmdb sp!,{r4-r10,lr}
  1625. vstmdb sp!,{d8-d15} @ ABI specification says so
  1626. ldr r4, [ip] @ load remaining arg
  1627. ___
  1628. $code.=<<___;
  1629. ldr $rounds,[$key,#240]
  1630. ldr $ctr, [$ivp, #12]
  1631. #ifdef __ARMEB__
  1632. vld1.8 {$dat0},[$ivp]
  1633. #else
  1634. vld1.32 {$dat0},[$ivp]
  1635. #endif
  1636. vld1.32 {q8-q9},[$key] // load key schedule...
  1637. sub $rounds,$rounds,#4
  1638. mov $step,#16
  1639. cmp $len,#2
  1640. add $key_,$key,x5,lsl#4 // pointer to last 5 round keys
  1641. sub $rounds,$rounds,#2
  1642. vld1.32 {q12-q13},[$key_],#32
  1643. vld1.32 {q14-q15},[$key_],#32
  1644. vld1.32 {$rndlast},[$key_]
  1645. add $key_,$key,#32
  1646. mov $cnt,$rounds
  1647. cclr $step,lo
  1648. #ifndef __ARMEB__
  1649. rev $ctr, $ctr
  1650. #endif
  1651. vorr $dat1,$dat0,$dat0
  1652. add $tctr1, $ctr, #1
  1653. vorr $dat2,$dat0,$dat0
  1654. add $ctr, $ctr, #2
  1655. vorr $ivec,$dat0,$dat0
  1656. rev $tctr1, $tctr1
  1657. vmov.32 ${dat1}[3],$tctr1
  1658. b.ls .Lctr32_tail
  1659. rev $tctr2, $ctr
  1660. sub $len,$len,#3 // bias
  1661. vmov.32 ${dat2}[3],$tctr2
  1662. ___
  1663. $code.=<<___ if ($flavour =~ /64/);
  1664. cmp $len,#2
  1665. b.lo .Loop3x_ctr32
  1666. add w13,$ctr,#1
  1667. add w14,$ctr,#2
  1668. vorr $dat3,$dat0,$dat0
  1669. rev w13,w13
  1670. vorr $dat4,$dat0,$dat0
  1671. rev w14,w14
  1672. vmov.32 ${dat3}[3],w13
  1673. sub $len,$len,#2 // bias
  1674. vmov.32 ${dat4}[3],w14
  1675. add $ctr,$ctr,#2
  1676. b .Loop5x_ctr32
  1677. .align 4
  1678. .Loop5x_ctr32:
  1679. aese $dat0,q8
  1680. aesmc $dat0,$dat0
  1681. aese $dat1,q8
  1682. aesmc $dat1,$dat1
  1683. aese $dat2,q8
  1684. aesmc $dat2,$dat2
  1685. aese $dat3,q8
  1686. aesmc $dat3,$dat3
  1687. aese $dat4,q8
  1688. aesmc $dat4,$dat4
  1689. vld1.32 {q8},[$key_],#16
  1690. subs $cnt,$cnt,#2
  1691. aese $dat0,q9
  1692. aesmc $dat0,$dat0
  1693. aese $dat1,q9
  1694. aesmc $dat1,$dat1
  1695. aese $dat2,q9
  1696. aesmc $dat2,$dat2
  1697. aese $dat3,q9
  1698. aesmc $dat3,$dat3
  1699. aese $dat4,q9
  1700. aesmc $dat4,$dat4
  1701. vld1.32 {q9},[$key_],#16
  1702. b.gt .Loop5x_ctr32
  1703. mov $key_,$key
  1704. aese $dat0,q8
  1705. aesmc $dat0,$dat0
  1706. aese $dat1,q8
  1707. aesmc $dat1,$dat1
  1708. aese $dat2,q8
  1709. aesmc $dat2,$dat2
  1710. aese $dat3,q8
  1711. aesmc $dat3,$dat3
  1712. aese $dat4,q8
  1713. aesmc $dat4,$dat4
  1714. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1715. aese $dat0,q9
  1716. aesmc $dat0,$dat0
  1717. aese $dat1,q9
  1718. aesmc $dat1,$dat1
  1719. aese $dat2,q9
  1720. aesmc $dat2,$dat2
  1721. aese $dat3,q9
  1722. aesmc $dat3,$dat3
  1723. aese $dat4,q9
  1724. aesmc $dat4,$dat4
  1725. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1726. aese $dat0,q12
  1727. aesmc $dat0,$dat0
  1728. add $tctr0,$ctr,#1
  1729. add $tctr1,$ctr,#2
  1730. aese $dat1,q12
  1731. aesmc $dat1,$dat1
  1732. add $tctr2,$ctr,#3
  1733. add w13,$ctr,#4
  1734. aese $dat2,q12
  1735. aesmc $dat2,$dat2
  1736. add w14,$ctr,#5
  1737. rev $tctr0,$tctr0
  1738. aese $dat3,q12
  1739. aesmc $dat3,$dat3
  1740. rev $tctr1,$tctr1
  1741. rev $tctr2,$tctr2
  1742. aese $dat4,q12
  1743. aesmc $dat4,$dat4
  1744. rev w13,w13
  1745. rev w14,w14
  1746. aese $dat0,q13
  1747. aesmc $dat0,$dat0
  1748. aese $dat1,q13
  1749. aesmc $dat1,$dat1
  1750. aese $dat2,q13
  1751. aesmc $dat2,$dat2
  1752. aese $dat3,q13
  1753. aesmc $dat3,$dat3
  1754. aese $dat4,q13
  1755. aesmc $dat4,$dat4
  1756. aese $dat0,q14
  1757. aesmc $dat0,$dat0
  1758. vld1.8 {$in0},[$inp],#16
  1759. aese $dat1,q14
  1760. aesmc $dat1,$dat1
  1761. vld1.8 {$in1},[$inp],#16
  1762. aese $dat2,q14
  1763. aesmc $dat2,$dat2
  1764. vld1.8 {$in2},[$inp],#16
  1765. aese $dat3,q14
  1766. aesmc $dat3,$dat3
  1767. vld1.8 {$in3},[$inp],#16
  1768. aese $dat4,q14
  1769. aesmc $dat4,$dat4
  1770. vld1.8 {$in4},[$inp],#16
  1771. aese $dat0,q15
  1772. veor $in0,$in0,$rndlast
  1773. aese $dat1,q15
  1774. veor $in1,$in1,$rndlast
  1775. aese $dat2,q15
  1776. veor $in2,$in2,$rndlast
  1777. aese $dat3,q15
  1778. veor $in3,$in3,$rndlast
  1779. aese $dat4,q15
  1780. veor $in4,$in4,$rndlast
  1781. veor $in0,$in0,$dat0
  1782. vorr $dat0,$ivec,$ivec
  1783. veor $in1,$in1,$dat1
  1784. vorr $dat1,$ivec,$ivec
  1785. veor $in2,$in2,$dat2
  1786. vorr $dat2,$ivec,$ivec
  1787. veor $in3,$in3,$dat3
  1788. vorr $dat3,$ivec,$ivec
  1789. veor $in4,$in4,$dat4
  1790. vorr $dat4,$ivec,$ivec
  1791. vst1.8 {$in0},[$out],#16
  1792. vmov.32 ${dat0}[3],$tctr0
  1793. vst1.8 {$in1},[$out],#16
  1794. vmov.32 ${dat1}[3],$tctr1
  1795. vst1.8 {$in2},[$out],#16
  1796. vmov.32 ${dat2}[3],$tctr2
  1797. vst1.8 {$in3},[$out],#16
  1798. vmov.32 ${dat3}[3],w13
  1799. vst1.8 {$in4},[$out],#16
  1800. vmov.32 ${dat4}[3],w14
  1801. mov $cnt,$rounds
  1802. cbz $len,.Lctr32_done
  1803. add $ctr,$ctr,#5
  1804. subs $len,$len,#5
  1805. b.hs .Loop5x_ctr32
  1806. add $len,$len,#5
  1807. sub $ctr,$ctr,#5
  1808. cmp $len,#2
  1809. mov $step,#16
  1810. cclr $step,lo
  1811. b.ls .Lctr32_tail
  1812. sub $len,$len,#3 // bias
  1813. add $ctr,$ctr,#3
  1814. ___
  1815. $code.=<<___;
  1816. b .Loop3x_ctr32
  1817. .align 4
  1818. .Loop3x_ctr32:
  1819. aese $dat0,q8
  1820. aesmc $dat0,$dat0
  1821. aese $dat1,q8
  1822. aesmc $dat1,$dat1
  1823. aese $dat2,q8
  1824. aesmc $dat2,$dat2
  1825. vld1.32 {q8},[$key_],#16
  1826. subs $cnt,$cnt,#2
  1827. aese $dat0,q9
  1828. aesmc $dat0,$dat0
  1829. aese $dat1,q9
  1830. aesmc $dat1,$dat1
  1831. aese $dat2,q9
  1832. aesmc $dat2,$dat2
  1833. vld1.32 {q9},[$key_],#16
  1834. b.gt .Loop3x_ctr32
  1835. aese $dat0,q8
  1836. aesmc $tmp0,$dat0
  1837. aese $dat1,q8
  1838. aesmc $tmp1,$dat1
  1839. vld1.8 {$in0},[$inp],#16
  1840. vorr $dat0,$ivec,$ivec
  1841. aese $dat2,q8
  1842. aesmc $dat2,$dat2
  1843. vld1.8 {$in1},[$inp],#16
  1844. vorr $dat1,$ivec,$ivec
  1845. aese $tmp0,q9
  1846. aesmc $tmp0,$tmp0
  1847. aese $tmp1,q9
  1848. aesmc $tmp1,$tmp1
  1849. vld1.8 {$in2},[$inp],#16
  1850. mov $key_,$key
  1851. aese $dat2,q9
  1852. aesmc $tmp2,$dat2
  1853. vorr $dat2,$ivec,$ivec
  1854. add $tctr0,$ctr,#1
  1855. aese $tmp0,q12
  1856. aesmc $tmp0,$tmp0
  1857. aese $tmp1,q12
  1858. aesmc $tmp1,$tmp1
  1859. veor $in0,$in0,$rndlast
  1860. add $tctr1,$ctr,#2
  1861. aese $tmp2,q12
  1862. aesmc $tmp2,$tmp2
  1863. veor $in1,$in1,$rndlast
  1864. add $ctr,$ctr,#3
  1865. aese $tmp0,q13
  1866. aesmc $tmp0,$tmp0
  1867. aese $tmp1,q13
  1868. aesmc $tmp1,$tmp1
  1869. veor $in2,$in2,$rndlast
  1870. rev $tctr0,$tctr0
  1871. aese $tmp2,q13
  1872. aesmc $tmp2,$tmp2
  1873. vmov.32 ${dat0}[3], $tctr0
  1874. rev $tctr1,$tctr1
  1875. aese $tmp0,q14
  1876. aesmc $tmp0,$tmp0
  1877. aese $tmp1,q14
  1878. aesmc $tmp1,$tmp1
  1879. vmov.32 ${dat1}[3], $tctr1
  1880. rev $tctr2,$ctr
  1881. aese $tmp2,q14
  1882. aesmc $tmp2,$tmp2
  1883. vmov.32 ${dat2}[3], $tctr2
  1884. subs $len,$len,#3
  1885. aese $tmp0,q15
  1886. aese $tmp1,q15
  1887. aese $tmp2,q15
  1888. veor $in0,$in0,$tmp0
  1889. vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
  1890. vst1.8 {$in0},[$out],#16
  1891. veor $in1,$in1,$tmp1
  1892. mov $cnt,$rounds
  1893. vst1.8 {$in1},[$out],#16
  1894. veor $in2,$in2,$tmp2
  1895. vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
  1896. vst1.8 {$in2},[$out],#16
  1897. b.hs .Loop3x_ctr32
  1898. adds $len,$len,#3
  1899. b.eq .Lctr32_done
  1900. cmp $len,#1
  1901. mov $step,#16
  1902. cclr $step,eq
  1903. .Lctr32_tail:
  1904. aese $dat0,q8
  1905. aesmc $dat0,$dat0
  1906. aese $dat1,q8
  1907. aesmc $dat1,$dat1
  1908. vld1.32 {q8},[$key_],#16
  1909. subs $cnt,$cnt,#2
  1910. aese $dat0,q9
  1911. aesmc $dat0,$dat0
  1912. aese $dat1,q9
  1913. aesmc $dat1,$dat1
  1914. vld1.32 {q9},[$key_],#16
  1915. b.gt .Lctr32_tail
  1916. aese $dat0,q8
  1917. aesmc $dat0,$dat0
  1918. aese $dat1,q8
  1919. aesmc $dat1,$dat1
  1920. aese $dat0,q9
  1921. aesmc $dat0,$dat0
  1922. aese $dat1,q9
  1923. aesmc $dat1,$dat1
  1924. vld1.8 {$in0},[$inp],$step
  1925. aese $dat0,q12
  1926. aesmc $dat0,$dat0
  1927. aese $dat1,q12
  1928. aesmc $dat1,$dat1
  1929. vld1.8 {$in1},[$inp]
  1930. aese $dat0,q13
  1931. aesmc $dat0,$dat0
  1932. aese $dat1,q13
  1933. aesmc $dat1,$dat1
  1934. veor $in0,$in0,$rndlast
  1935. aese $dat0,q14
  1936. aesmc $dat0,$dat0
  1937. aese $dat1,q14
  1938. aesmc $dat1,$dat1
  1939. veor $in1,$in1,$rndlast
  1940. aese $dat0,q15
  1941. aese $dat1,q15
  1942. cmp $len,#1
  1943. veor $in0,$in0,$dat0
  1944. veor $in1,$in1,$dat1
  1945. vst1.8 {$in0},[$out],#16
  1946. b.eq .Lctr32_done
  1947. vst1.8 {$in1},[$out]
  1948. .Lctr32_done:
  1949. ___
  1950. $code.=<<___ if ($flavour !~ /64/);
  1951. vldmia sp!,{d8-d15}
  1952. ldmia sp!,{r4-r10,pc}
  1953. ___
  1954. $code.=<<___ if ($flavour =~ /64/);
  1955. ldr x29,[sp],#16
  1956. ret
  1957. ___
  1958. $code.=<<___;
  1959. .size ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
  1960. ___
  1961. }}}
  1962. $code.=<<___;
  1963. #endif
  1964. ___
  1965. ########################################
  1966. if ($flavour =~ /64/) { ######## 64-bit code
  1967. my %opcode = (
  1968. "aesd" => 0x4e285800, "aese" => 0x4e284800,
  1969. "aesimc"=> 0x4e287800, "aesmc" => 0x4e286800 );
  1970. local *unaes = sub {
  1971. my ($mnemonic,$arg)=@_;
  1972. $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o &&
  1973. sprintf ".inst\t0x%08x\t//%s %s",
  1974. $opcode{$mnemonic}|$1|($2<<5),
  1975. $mnemonic,$arg;
  1976. };
  1977. foreach(split("\n",$code)) {
  1978. s/\`([^\`]*)\`/eval($1)/geo;
  1979. s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
  1980. s/@\s/\/\//o; # old->new style commentary
  1981. #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
  1982. s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
  1983. s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel $2,$3,$2,$1/o or
  1984. s/vmov\.i8/movi/o or # fix up legacy mnemonics
  1985. s/vext\.8/ext/o or
  1986. s/vrev32\.8/rev32/o or
  1987. s/vtst\.8/cmtst/o or
  1988. s/vshr/ushr/o or
  1989. s/^(\s+)v/$1/o or # strip off v prefix
  1990. s/\bbx\s+lr\b/ret/o;
  1991. # fix up remaining legacy suffixes
  1992. s/\.[ui]?8//o;
  1993. m/\],#8/o and s/\.16b/\.8b/go;
  1994. s/\.[ui]?32//o and s/\.16b/\.4s/go;
  1995. s/\.[ui]?64//o and s/\.16b/\.2d/go;
  1996. s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
  1997. print $_,"\n";
  1998. }
  1999. } else { ######## 32-bit code
  2000. my %opcode = (
  2001. "aesd" => 0xf3b00340, "aese" => 0xf3b00300,
  2002. "aesimc"=> 0xf3b003c0, "aesmc" => 0xf3b00380 );
  2003. local *unaes = sub {
  2004. my ($mnemonic,$arg)=@_;
  2005. if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
  2006. my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
  2007. |(($2&7)<<1) |(($2&8)<<2);
  2008. # since ARMv7 instructions are always encoded little-endian.
  2009. # correct solution is to use .inst directive, but older
  2010. # assemblers don't implement it:-(
  2011. sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
  2012. $word&0xff,($word>>8)&0xff,
  2013. ($word>>16)&0xff,($word>>24)&0xff,
  2014. $mnemonic,$arg;
  2015. }
  2016. };
  2017. sub unvtbl {
  2018. my $arg=shift;
  2019. $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
  2020. sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
  2021. "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
  2022. }
  2023. sub unvdup32 {
  2024. my $arg=shift;
  2025. $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
  2026. sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
  2027. }
  2028. sub unvmov32 {
  2029. my $arg=shift;
  2030. $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
  2031. sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
  2032. }
  2033. foreach(split("\n",$code)) {
  2034. s/\`([^\`]*)\`/eval($1)/geo;
  2035. s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
  2036. s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
  2037. s/\/\/\s?/@ /o; # new->old style commentary
  2038. # fix up remaining new-style suffixes
  2039. s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
  2040. s/\],#[0-9]+/]!/o;
  2041. s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
  2042. s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2 $1,#0/o or
  2043. s/vtbl\.8\s+(.*)/unvtbl($1)/geo or
  2044. s/vdup\.32\s+(.*)/unvdup32($1)/geo or
  2045. s/vmov\.32\s+(.*)/unvmov32($1)/geo or
  2046. s/^(\s+)b\./$1b/o or
  2047. s/^(\s+)ret/$1bx\tlr/o;
  2048. if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
  2049. print " it $2\n";
  2050. }
  2051. print $_,"\n";
  2052. }
  2053. }
  2054. close STDOUT or die "error closing STDOUT: $!";