1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248 |
- #! /usr/bin/env perl
- # Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
- #
- # Licensed under the Apache License 2.0 (the "License"). You may not use
- # this file except in compliance with the License. You can obtain a copy
- # in the file LICENSE in the source distribution or at
- # https://www.openssl.org/source/license.html
- #
- # ====================================================================
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
- # project. The module is, however, dual licensed under OpenSSL and
- # CRYPTOGAMS licenses depending on where you obtain it. For further
- # details see http://www.openssl.org/~appro/cryptogams/.
- # ====================================================================
- #
- # This module implements support for ARMv8 AES instructions. The
- # module is endian-agnostic in sense that it supports both big- and
- # little-endian cases. As does it support both 32- and 64-bit modes
- # of operation. Latter is achieved by limiting amount of utilized
- # registers to 16, which implies additional NEON load and integer
- # instructions. This has no effect on mighty Apple A7, where results
- # are literally equal to the theoretical estimates based on AES
- # instruction latencies and issue rates. On Cortex-A53, an in-order
- # execution core, this costs up to 10-15%, which is partially
- # compensated by implementing dedicated code path for 128-bit
- # CBC encrypt case. On Cortex-A57 parallelizable mode performance
- # seems to be limited by sheer amount of NEON instructions...
- #
- # April 2019
- #
- # Key to performance of parallelize-able modes is round instruction
- # interleaving. But which factor to use? There is optimal one for
- # each combination of instruction latency and issue rate, beyond
- # which increasing interleave factor doesn't pay off. While on cons
- # side we have code size increase and resource waste on platforms for
- # which interleave factor is too high. In other words you want it to
- # be just right. So far interleave factor of 3x was serving well all
- # platforms. But for ThunderX2 optimal interleave factor was measured
- # to be 5x...
- #
- # Performance in cycles per byte processed with 128-bit key:
- #
- # CBC enc CBC dec CTR
- # Apple A7 2.39 1.20 1.20
- # Cortex-A53 1.32 1.17/1.29(**) 1.36/1.46
- # Cortex-A57(*) 1.95 0.82/0.85 0.89/0.93
- # Cortex-A72 1.33 0.85/0.88 0.92/0.96
- # Denver 1.96 0.65/0.86 0.76/0.80
- # Mongoose 1.33 1.23/1.20 1.30/1.20
- # Kryo 1.26 0.87/0.94 1.00/1.00
- # ThunderX2 5.95 1.25 1.30
- #
- # (*) original 3.64/1.34/1.32 results were for r0p0 revision
- # and are still same even for updated module;
- # (**) numbers after slash are for 32-bit code, which is 3x-
- # interleaved;
- # $output is the last argument if it looks like a file (it has an extension)
- # $flavour is the first argument if it doesn't look like a file
- $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
- $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
- die "can't locate arm-xlate.pl";
- open OUT,"| \"$^X\" $xlate $flavour \"$output\""
- or die "can't call $xlate: $!";
- *STDOUT=*OUT;
- $prefix="aes_v8";
- $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
- $code=<<___;
- #include "arm_arch.h"
- #if __ARM_MAX_ARCH__>=7
- ___
- $code.=".arch armv8-a+crypto\n.text\n" if ($flavour =~ /64/);
- $code.=<<___ if ($flavour !~ /64/);
- .arch armv7-a // don't confuse not-so-latest binutils with argv8 :-)
- .fpu neon
- #ifdef __thumb2__
- .syntax unified
- .thumb
- # define INST(a,b,c,d) $_byte c,d|0xc,a,b
- #else
- .code 32
- # define INST(a,b,c,d) $_byte a,b,c,d
- #endif
- .text
- ___
- # Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
- # NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
- # maintain both 32- and 64-bit codes within single module and
- # transliterate common code to either flavour with regex vodoo.
- #
- {{{
- my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
- my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
- $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
- $code.=<<___;
- .align 5
- .Lrcon:
- .long 0x01,0x01,0x01,0x01
- .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
- .long 0x1b,0x1b,0x1b,0x1b
- .globl ${prefix}_set_encrypt_key
- .type ${prefix}_set_encrypt_key,%function
- .align 5
- ${prefix}_set_encrypt_key:
- .Lenc_key:
- ___
- $code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ___
- $code.=<<___;
- mov $ptr,#-1
- cmp $inp,#0
- b.eq .Lenc_key_abort
- cmp $out,#0
- b.eq .Lenc_key_abort
- mov $ptr,#-2
- cmp $bits,#128
- b.lt .Lenc_key_abort
- cmp $bits,#256
- b.gt .Lenc_key_abort
- tst $bits,#0x3f
- b.ne .Lenc_key_abort
- adr $ptr,.Lrcon
- cmp $bits,#192
- veor $zero,$zero,$zero
- vld1.8 {$in0},[$inp],#16
- mov $bits,#8 // reuse $bits
- vld1.32 {$rcon,$mask},[$ptr],#32
- b.lt .Loop128
- b.eq .L192
- b .L256
- .align 4
- .Loop128:
- vtbl.8 $key,{$in0},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in0},[$out],#16
- aese $key,$zero
- subs $bits,$bits,#1
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- vshl.u8 $rcon,$rcon,#1
- veor $in0,$in0,$key
- b.ne .Loop128
- vld1.32 {$rcon},[$ptr]
- vtbl.8 $key,{$in0},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in0},[$out],#16
- aese $key,$zero
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- vshl.u8 $rcon,$rcon,#1
- veor $in0,$in0,$key
- vtbl.8 $key,{$in0},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in0},[$out],#16
- aese $key,$zero
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- veor $in0,$in0,$key
- vst1.32 {$in0},[$out]
- add $out,$out,#0x50
- mov $rounds,#10
- b .Ldone
- .align 4
- .L192:
- vld1.8 {$in1},[$inp],#8
- vmov.i8 $key,#8 // borrow $key
- vst1.32 {$in0},[$out],#16
- vsub.i8 $mask,$mask,$key // adjust the mask
- .Loop192:
- vtbl.8 $key,{$in1},$mask
- vext.8 $tmp,$zero,$in0,#12
- #ifdef __ARMEB__
- vst1.32 {$in1},[$out],#16
- sub $out,$out,#8
- #else
- vst1.32 {$in1},[$out],#8
- #endif
- aese $key,$zero
- subs $bits,$bits,#1
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vdup.32 $tmp,${in0}[3]
- veor $tmp,$tmp,$in1
- veor $key,$key,$rcon
- vext.8 $in1,$zero,$in1,#12
- vshl.u8 $rcon,$rcon,#1
- veor $in1,$in1,$tmp
- veor $in0,$in0,$key
- veor $in1,$in1,$key
- vst1.32 {$in0},[$out],#16
- b.ne .Loop192
- mov $rounds,#12
- add $out,$out,#0x20
- b .Ldone
- .align 4
- .L256:
- vld1.8 {$in1},[$inp]
- mov $bits,#7
- mov $rounds,#14
- vst1.32 {$in0},[$out],#16
- .Loop256:
- vtbl.8 $key,{$in1},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in1},[$out],#16
- aese $key,$zero
- subs $bits,$bits,#1
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- vshl.u8 $rcon,$rcon,#1
- veor $in0,$in0,$key
- vst1.32 {$in0},[$out],#16
- b.eq .Ldone
- vdup.32 $key,${in0}[3] // just splat
- vext.8 $tmp,$zero,$in1,#12
- aese $key,$zero
- veor $in1,$in1,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in1,$in1,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in1,$in1,$tmp
- veor $in1,$in1,$key
- b .Loop256
- .Ldone:
- str $rounds,[$out]
- mov $ptr,#0
- .Lenc_key_abort:
- mov x0,$ptr // return value
- `"ldr x29,[sp],#16" if ($flavour =~ /64/)`
- ret
- .size ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
- .globl ${prefix}_set_decrypt_key
- .type ${prefix}_set_decrypt_key,%function
- .align 5
- ${prefix}_set_decrypt_key:
- ___
- $code.=<<___ if ($flavour =~ /64/);
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ___
- $code.=<<___ if ($flavour !~ /64/);
- stmdb sp!,{r4,lr}
- ___
- $code.=<<___;
- bl .Lenc_key
- cmp x0,#0
- b.ne .Ldec_key_abort
- sub $out,$out,#240 // restore original $out
- mov x4,#-16
- add $inp,$out,x12,lsl#4 // end of key schedule
- vld1.32 {v0.16b},[$out]
- vld1.32 {v1.16b},[$inp]
- vst1.32 {v0.16b},[$inp],x4
- vst1.32 {v1.16b},[$out],#16
- .Loop_imc:
- vld1.32 {v0.16b},[$out]
- vld1.32 {v1.16b},[$inp]
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- vst1.32 {v0.16b},[$inp],x4
- vst1.32 {v1.16b},[$out],#16
- cmp $inp,$out
- b.hi .Loop_imc
- vld1.32 {v0.16b},[$out]
- aesimc v0.16b,v0.16b
- vst1.32 {v0.16b},[$inp]
- eor x0,x0,x0 // return value
- .Ldec_key_abort:
- ___
- $code.=<<___ if ($flavour !~ /64/);
- ldmia sp!,{r4,pc}
- ___
- $code.=<<___ if ($flavour =~ /64/);
- ldp x29,x30,[sp],#16
- .inst 0xd50323bf // autiasp
- ret
- ___
- $code.=<<___;
- .size ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
- ___
- }}}
- {{{
- sub gen_block () {
- my $dir = shift;
- my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
- my ($inp,$out,$key)=map("x$_",(0..2));
- my $rounds="w3";
- my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
- $code.=<<___;
- .globl ${prefix}_${dir}crypt
- .type ${prefix}_${dir}crypt,%function
- .align 5
- ${prefix}_${dir}crypt:
- ldr $rounds,[$key,#240]
- vld1.32 {$rndkey0},[$key],#16
- vld1.8 {$inout},[$inp]
- sub $rounds,$rounds,#2
- vld1.32 {$rndkey1},[$key],#16
- .Loop_${dir}c:
- aes$e $inout,$rndkey0
- aes$mc $inout,$inout
- vld1.32 {$rndkey0},[$key],#16
- subs $rounds,$rounds,#2
- aes$e $inout,$rndkey1
- aes$mc $inout,$inout
- vld1.32 {$rndkey1},[$key],#16
- b.gt .Loop_${dir}c
- aes$e $inout,$rndkey0
- aes$mc $inout,$inout
- vld1.32 {$rndkey0},[$key]
- aes$e $inout,$rndkey1
- veor $inout,$inout,$rndkey0
- vst1.8 {$inout},[$out]
- ret
- .size ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
- ___
- }
- &gen_block("en");
- &gen_block("de");
- }}}
- # Performance in cycles per byte.
- # Processed with AES-ECB different key size.
- # It shows the value before and after optimization as below:
- # (before/after):
- #
- # AES-128-ECB AES-192-ECB AES-256-ECB
- # Cortex-A57 1.85/0.82 2.16/0.96 2.47/1.10
- # Cortex-A72 1.64/0.85 1.82/0.99 2.13/1.14
- # Optimization is implemented by loop unrolling and interleaving.
- # Commonly, we choose the unrolling factor as 5, if the input
- # data size smaller than 5 blocks, but not smaller than 3 blocks,
- # choose 3 as the unrolling factor.
- # If the input data size dsize >= 5*16 bytes, then take 5 blocks
- # as one iteration, every loop the left size lsize -= 5*16.
- # If 5*16 > lsize >= 3*16 bytes, take 3 blocks as one iteration,
- # every loop lsize -=3*16.
- # If lsize < 3*16 bytes, treat them as the tail, interleave the
- # two blocks AES instructions.
- # There is one special case, if the original input data size dsize
- # = 16 bytes, we will treat it seperately to improve the
- # performance: one independent code block without LR, FP load and
- # store, just looks like what the original ECB implementation does.
- {{{
- my ($inp,$out,$len,$key)=map("x$_",(0..3));
- my ($enc,$rounds,$cnt,$key_,$step)=("w4","w5","w6","x7","x8");
- my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$tmp2,$rndlast)=map("q$_",(0..7));
- my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
- ### q7 last round key
- ### q10-q15 q7 Last 7 round keys
- ### q8-q9 preloaded round keys except last 7 keys for big size
- ### q5, q6, q8-q9 preloaded round keys except last 7 keys for only 16 byte
- {
- my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
- my ($dat3,$in3,$tmp3); # used only in 64-bit mode
- my ($dat4,$in4,$tmp4);
- if ($flavour =~ /64/) {
- ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
- }
- $code.=<<___;
- .globl ${prefix}_ecb_encrypt
- .type ${prefix}_ecb_encrypt,%function
- .align 5
- ${prefix}_ecb_encrypt:
- ___
- $code.=<<___ if ($flavour =~ /64/);
- subs $len,$len,#16
- // Original input data size bigger than 16, jump to big size processing.
- b.ne .Lecb_big_size
- vld1.8 {$dat0},[$inp]
- cmp $enc,#0 // en- or decrypting?
- ldr $rounds,[$key,#240]
- vld1.32 {q5-q6},[$key],#32 // load key schedule...
- b.eq .Lecb_small_dec
- aese $dat0,q5
- aesmc $dat0,$dat0
- vld1.32 {q8-q9},[$key],#32 // load key schedule...
- aese $dat0,q6
- aesmc $dat0,$dat0
- subs $rounds,$rounds,#10 // if rounds==10, jump to aes-128-ecb processing
- b.eq .Lecb_128_enc
- .Lecb_round_loop:
- aese $dat0,q8
- aesmc $dat0,$dat0
- vld1.32 {q8},[$key],#16 // load key schedule...
- aese $dat0,q9
- aesmc $dat0,$dat0
- vld1.32 {q9},[$key],#16 // load key schedule...
- subs $rounds,$rounds,#2 // bias
- b.gt .Lecb_round_loop
- .Lecb_128_enc:
- vld1.32 {q10-q11},[$key],#32 // load key schedule...
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat0,q9
- aesmc $dat0,$dat0
- vld1.32 {q12-q13},[$key],#32 // load key schedule...
- aese $dat0,q10
- aesmc $dat0,$dat0
- aese $dat0,q11
- aesmc $dat0,$dat0
- vld1.32 {q14-q15},[$key],#32 // load key schedule...
- aese $dat0,q12
- aesmc $dat0,$dat0
- aese $dat0,q13
- aesmc $dat0,$dat0
- vld1.32 {$rndlast},[$key]
- aese $dat0,q14
- aesmc $dat0,$dat0
- aese $dat0,q15
- veor $dat0,$dat0,$rndlast
- vst1.8 {$dat0},[$out]
- b .Lecb_Final_abort
- .Lecb_small_dec:
- aesd $dat0,q5
- aesimc $dat0,$dat0
- vld1.32 {q8-q9},[$key],#32 // load key schedule...
- aesd $dat0,q6
- aesimc $dat0,$dat0
- subs $rounds,$rounds,#10 // bias
- b.eq .Lecb_128_dec
- .Lecb_dec_round_loop:
- aesd $dat0,q8
- aesimc $dat0,$dat0
- vld1.32 {q8},[$key],#16 // load key schedule...
- aesd $dat0,q9
- aesimc $dat0,$dat0
- vld1.32 {q9},[$key],#16 // load key schedule...
- subs $rounds,$rounds,#2 // bias
- b.gt .Lecb_dec_round_loop
- .Lecb_128_dec:
- vld1.32 {q10-q11},[$key],#32 // load key schedule...
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat0,q9
- aesimc $dat0,$dat0
- vld1.32 {q12-q13},[$key],#32 // load key schedule...
- aesd $dat0,q10
- aesimc $dat0,$dat0
- aesd $dat0,q11
- aesimc $dat0,$dat0
- vld1.32 {q14-q15},[$key],#32 // load key schedule...
- aesd $dat0,q12
- aesimc $dat0,$dat0
- aesd $dat0,q13
- aesimc $dat0,$dat0
- vld1.32 {$rndlast},[$key]
- aesd $dat0,q14
- aesimc $dat0,$dat0
- aesd $dat0,q15
- veor $dat0,$dat0,$rndlast
- vst1.8 {$dat0},[$out]
- b .Lecb_Final_abort
- .Lecb_big_size:
- ___
- $code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ___
- $code.=<<___ if ($flavour !~ /64/);
- mov ip,sp
- stmdb sp!,{r4-r8,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldmia ip,{r4-r5} @ load remaining args
- subs $len,$len,#16
- ___
- $code.=<<___;
- mov $step,#16
- b.lo .Lecb_done
- cclr $step,eq
- cmp $enc,#0 // en- or decrypting?
- ldr $rounds,[$key,#240]
- and $len,$len,#-16
- vld1.8 {$dat},[$inp],$step
- vld1.32 {q8-q9},[$key] // load key schedule...
- sub $rounds,$rounds,#6
- add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
- sub $rounds,$rounds,#2
- vld1.32 {q10-q11},[$key_],#32
- vld1.32 {q12-q13},[$key_],#32
- vld1.32 {q14-q15},[$key_],#32
- vld1.32 {$rndlast},[$key_]
- add $key_,$key,#32
- mov $cnt,$rounds
- b.eq .Lecb_dec
- vld1.8 {$dat1},[$inp],#16
- subs $len,$len,#32 // bias
- add $cnt,$rounds,#2
- vorr $in1,$dat1,$dat1
- vorr $dat2,$dat1,$dat1
- vorr $dat1,$dat,$dat
- b.lo .Lecb_enc_tail
- vorr $dat1,$in1,$in1
- vld1.8 {$dat2},[$inp],#16
- ___
- $code.=<<___ if ($flavour =~ /64/);
- cmp $len,#32
- b.lo .Loop3x_ecb_enc
- vld1.8 {$dat3},[$inp],#16
- vld1.8 {$dat4},[$inp],#16
- sub $len,$len,#32 // bias
- mov $cnt,$rounds
- .Loop5x_ecb_enc:
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- aese $dat3,q8
- aesmc $dat3,$dat3
- aese $dat4,q8
- aesmc $dat4,$dat4
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- aese $dat3,q9
- aesmc $dat3,$dat3
- aese $dat4,q9
- aesmc $dat4,$dat4
- vld1.32 {q9},[$key_],#16
- b.gt .Loop5x_ecb_enc
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- aese $dat3,q8
- aesmc $dat3,$dat3
- aese $dat4,q8
- aesmc $dat4,$dat4
- cmp $len,#0x40 // because .Lecb_enc_tail4x
- sub $len,$len,#0x50
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- aese $dat3,q9
- aesmc $dat3,$dat3
- aese $dat4,q9
- aesmc $dat4,$dat4
- csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
- mov $key_,$key
- aese $dat0,q10
- aesmc $dat0,$dat0
- aese $dat1,q10
- aesmc $dat1,$dat1
- aese $dat2,q10
- aesmc $dat2,$dat2
- aese $dat3,q10
- aesmc $dat3,$dat3
- aese $dat4,q10
- aesmc $dat4,$dat4
- add $inp,$inp,x6 // $inp is adjusted in such way that
- // at exit from the loop $dat1-$dat4
- // are loaded with last "words"
- add x6,$len,#0x60 // because .Lecb_enc_tail4x
- aese $dat0,q11
- aesmc $dat0,$dat0
- aese $dat1,q11
- aesmc $dat1,$dat1
- aese $dat2,q11
- aesmc $dat2,$dat2
- aese $dat3,q11
- aesmc $dat3,$dat3
- aese $dat4,q11
- aesmc $dat4,$dat4
- aese $dat0,q12
- aesmc $dat0,$dat0
- aese $dat1,q12
- aesmc $dat1,$dat1
- aese $dat2,q12
- aesmc $dat2,$dat2
- aese $dat3,q12
- aesmc $dat3,$dat3
- aese $dat4,q12
- aesmc $dat4,$dat4
- aese $dat0,q13
- aesmc $dat0,$dat0
- aese $dat1,q13
- aesmc $dat1,$dat1
- aese $dat2,q13
- aesmc $dat2,$dat2
- aese $dat3,q13
- aesmc $dat3,$dat3
- aese $dat4,q13
- aesmc $dat4,$dat4
- aese $dat0,q14
- aesmc $dat0,$dat0
- aese $dat1,q14
- aesmc $dat1,$dat1
- aese $dat2,q14
- aesmc $dat2,$dat2
- aese $dat3,q14
- aesmc $dat3,$dat3
- aese $dat4,q14
- aesmc $dat4,$dat4
- aese $dat0,q15
- vld1.8 {$in0},[$inp],#16
- aese $dat1,q15
- vld1.8 {$in1},[$inp],#16
- aese $dat2,q15
- vld1.8 {$in2},[$inp],#16
- aese $dat3,q15
- vld1.8 {$in3},[$inp],#16
- aese $dat4,q15
- vld1.8 {$in4},[$inp],#16
- cbz x6,.Lecb_enc_tail4x
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- veor $tmp0,$rndlast,$dat0
- vorr $dat0,$in0,$in0
- veor $tmp1,$rndlast,$dat1
- vorr $dat1,$in1,$in1
- veor $tmp2,$rndlast,$dat2
- vorr $dat2,$in2,$in2
- veor $tmp3,$rndlast,$dat3
- vorr $dat3,$in3,$in3
- veor $tmp4,$rndlast,$dat4
- vst1.8 {$tmp0},[$out],#16
- vorr $dat4,$in4,$in4
- vst1.8 {$tmp1},[$out],#16
- mov $cnt,$rounds
- vst1.8 {$tmp2},[$out],#16
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$tmp3},[$out],#16
- vst1.8 {$tmp4},[$out],#16
- b.hs .Loop5x_ecb_enc
- add $len,$len,#0x50
- cbz $len,.Lecb_done
- add $cnt,$rounds,#2
- subs $len,$len,#0x30
- vorr $dat0,$in2,$in2
- vorr $dat1,$in3,$in3
- vorr $dat2,$in4,$in4
- b.lo .Lecb_enc_tail
- b .Loop3x_ecb_enc
- .align 4
- .Lecb_enc_tail4x:
- veor $tmp1,$rndlast,$dat1
- veor $tmp2,$rndlast,$dat2
- veor $tmp3,$rndlast,$dat3
- veor $tmp4,$rndlast,$dat4
- vst1.8 {$tmp1},[$out],#16
- vst1.8 {$tmp2},[$out],#16
- vst1.8 {$tmp3},[$out],#16
- vst1.8 {$tmp4},[$out],#16
- b .Lecb_done
- .align 4
- ___
- $code.=<<___;
- .Loop3x_ecb_enc:
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Loop3x_ecb_enc
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- subs $len,$len,#0x30
- mov.lo x6,$len // x6, $cnt, is zero at this point
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- add $inp,$inp,x6 // $inp is adjusted in such way that
- // at exit from the loop $dat1-$dat2
- // are loaded with last "words"
- mov $key_,$key
- aese $dat0,q12
- aesmc $dat0,$dat0
- aese $dat1,q12
- aesmc $dat1,$dat1
- aese $dat2,q12
- aesmc $dat2,$dat2
- vld1.8 {$in0},[$inp],#16
- aese $dat0,q13
- aesmc $dat0,$dat0
- aese $dat1,q13
- aesmc $dat1,$dat1
- aese $dat2,q13
- aesmc $dat2,$dat2
- vld1.8 {$in1},[$inp],#16
- aese $dat0,q14
- aesmc $dat0,$dat0
- aese $dat1,q14
- aesmc $dat1,$dat1
- aese $dat2,q14
- aesmc $dat2,$dat2
- vld1.8 {$in2},[$inp],#16
- aese $dat0,q15
- aese $dat1,q15
- aese $dat2,q15
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- add $cnt,$rounds,#2
- veor $tmp0,$rndlast,$dat0
- veor $tmp1,$rndlast,$dat1
- veor $dat2,$dat2,$rndlast
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$tmp0},[$out],#16
- vorr $dat0,$in0,$in0
- vst1.8 {$tmp1},[$out],#16
- vorr $dat1,$in1,$in1
- vst1.8 {$dat2},[$out],#16
- vorr $dat2,$in2,$in2
- b.hs .Loop3x_ecb_enc
- cmn $len,#0x30
- b.eq .Lecb_done
- nop
- .Lecb_enc_tail:
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Lecb_enc_tail
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- aese $dat1,q12
- aesmc $dat1,$dat1
- aese $dat2,q12
- aesmc $dat2,$dat2
- cmn $len,#0x20
- aese $dat1,q13
- aesmc $dat1,$dat1
- aese $dat2,q13
- aesmc $dat2,$dat2
- aese $dat1,q14
- aesmc $dat1,$dat1
- aese $dat2,q14
- aesmc $dat2,$dat2
- aese $dat1,q15
- aese $dat2,q15
- b.eq .Lecb_enc_one
- veor $tmp1,$rndlast,$dat1
- veor $tmp2,$rndlast,$dat2
- vst1.8 {$tmp1},[$out],#16
- vst1.8 {$tmp2},[$out],#16
- b .Lecb_done
- .Lecb_enc_one:
- veor $tmp1,$rndlast,$dat2
- vst1.8 {$tmp1},[$out],#16
- b .Lecb_done
- ___
- $code.=<<___;
- .align 5
- .Lecb_dec:
- vld1.8 {$dat1},[$inp],#16
- subs $len,$len,#32 // bias
- add $cnt,$rounds,#2
- vorr $in1,$dat1,$dat1
- vorr $dat2,$dat1,$dat1
- vorr $dat1,$dat,$dat
- b.lo .Lecb_dec_tail
- vorr $dat1,$in1,$in1
- vld1.8 {$dat2},[$inp],#16
- ___
- $code.=<<___ if ($flavour =~ /64/);
- cmp $len,#32
- b.lo .Loop3x_ecb_dec
- vld1.8 {$dat3},[$inp],#16
- vld1.8 {$dat4},[$inp],#16
- sub $len,$len,#32 // bias
- mov $cnt,$rounds
- .Loop5x_ecb_dec:
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- aesd $dat3,q8
- aesimc $dat3,$dat3
- aesd $dat4,q8
- aesimc $dat4,$dat4
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- aesd $dat3,q9
- aesimc $dat3,$dat3
- aesd $dat4,q9
- aesimc $dat4,$dat4
- vld1.32 {q9},[$key_],#16
- b.gt .Loop5x_ecb_dec
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- aesd $dat3,q8
- aesimc $dat3,$dat3
- aesd $dat4,q8
- aesimc $dat4,$dat4
- cmp $len,#0x40 // because .Lecb_tail4x
- sub $len,$len,#0x50
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- aesd $dat3,q9
- aesimc $dat3,$dat3
- aesd $dat4,q9
- aesimc $dat4,$dat4
- csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
- mov $key_,$key
- aesd $dat0,q10
- aesimc $dat0,$dat0
- aesd $dat1,q10
- aesimc $dat1,$dat1
- aesd $dat2,q10
- aesimc $dat2,$dat2
- aesd $dat3,q10
- aesimc $dat3,$dat3
- aesd $dat4,q10
- aesimc $dat4,$dat4
- add $inp,$inp,x6 // $inp is adjusted in such way that
- // at exit from the loop $dat1-$dat4
- // are loaded with last "words"
- add x6,$len,#0x60 // because .Lecb_tail4x
- aesd $dat0,q11
- aesimc $dat0,$dat0
- aesd $dat1,q11
- aesimc $dat1,$dat1
- aesd $dat2,q11
- aesimc $dat2,$dat2
- aesd $dat3,q11
- aesimc $dat3,$dat3
- aesd $dat4,q11
- aesimc $dat4,$dat4
- aesd $dat0,q12
- aesimc $dat0,$dat0
- aesd $dat1,q12
- aesimc $dat1,$dat1
- aesd $dat2,q12
- aesimc $dat2,$dat2
- aesd $dat3,q12
- aesimc $dat3,$dat3
- aesd $dat4,q12
- aesimc $dat4,$dat4
- aesd $dat0,q13
- aesimc $dat0,$dat0
- aesd $dat1,q13
- aesimc $dat1,$dat1
- aesd $dat2,q13
- aesimc $dat2,$dat2
- aesd $dat3,q13
- aesimc $dat3,$dat3
- aesd $dat4,q13
- aesimc $dat4,$dat4
- aesd $dat0,q14
- aesimc $dat0,$dat0
- aesd $dat1,q14
- aesimc $dat1,$dat1
- aesd $dat2,q14
- aesimc $dat2,$dat2
- aesd $dat3,q14
- aesimc $dat3,$dat3
- aesd $dat4,q14
- aesimc $dat4,$dat4
- aesd $dat0,q15
- vld1.8 {$in0},[$inp],#16
- aesd $dat1,q15
- vld1.8 {$in1},[$inp],#16
- aesd $dat2,q15
- vld1.8 {$in2},[$inp],#16
- aesd $dat3,q15
- vld1.8 {$in3},[$inp],#16
- aesd $dat4,q15
- vld1.8 {$in4},[$inp],#16
- cbz x6,.Lecb_tail4x
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- veor $tmp0,$rndlast,$dat0
- vorr $dat0,$in0,$in0
- veor $tmp1,$rndlast,$dat1
- vorr $dat1,$in1,$in1
- veor $tmp2,$rndlast,$dat2
- vorr $dat2,$in2,$in2
- veor $tmp3,$rndlast,$dat3
- vorr $dat3,$in3,$in3
- veor $tmp4,$rndlast,$dat4
- vst1.8 {$tmp0},[$out],#16
- vorr $dat4,$in4,$in4
- vst1.8 {$tmp1},[$out],#16
- mov $cnt,$rounds
- vst1.8 {$tmp2},[$out],#16
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$tmp3},[$out],#16
- vst1.8 {$tmp4},[$out],#16
- b.hs .Loop5x_ecb_dec
- add $len,$len,#0x50
- cbz $len,.Lecb_done
- add $cnt,$rounds,#2
- subs $len,$len,#0x30
- vorr $dat0,$in2,$in2
- vorr $dat1,$in3,$in3
- vorr $dat2,$in4,$in4
- b.lo .Lecb_dec_tail
- b .Loop3x_ecb_dec
- .align 4
- .Lecb_tail4x:
- veor $tmp1,$rndlast,$dat1
- veor $tmp2,$rndlast,$dat2
- veor $tmp3,$rndlast,$dat3
- veor $tmp4,$rndlast,$dat4
- vst1.8 {$tmp1},[$out],#16
- vst1.8 {$tmp2},[$out],#16
- vst1.8 {$tmp3},[$out],#16
- vst1.8 {$tmp4},[$out],#16
- b .Lecb_done
- .align 4
- ___
- $code.=<<___;
- .Loop3x_ecb_dec:
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Loop3x_ecb_dec
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- subs $len,$len,#0x30
- mov.lo x6,$len // x6, $cnt, is zero at this point
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- add $inp,$inp,x6 // $inp is adjusted in such way that
- // at exit from the loop $dat1-$dat2
- // are loaded with last "words"
- mov $key_,$key
- aesd $dat0,q12
- aesimc $dat0,$dat0
- aesd $dat1,q12
- aesimc $dat1,$dat1
- aesd $dat2,q12
- aesimc $dat2,$dat2
- vld1.8 {$in0},[$inp],#16
- aesd $dat0,q13
- aesimc $dat0,$dat0
- aesd $dat1,q13
- aesimc $dat1,$dat1
- aesd $dat2,q13
- aesimc $dat2,$dat2
- vld1.8 {$in1},[$inp],#16
- aesd $dat0,q14
- aesimc $dat0,$dat0
- aesd $dat1,q14
- aesimc $dat1,$dat1
- aesd $dat2,q14
- aesimc $dat2,$dat2
- vld1.8 {$in2},[$inp],#16
- aesd $dat0,q15
- aesd $dat1,q15
- aesd $dat2,q15
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- add $cnt,$rounds,#2
- veor $tmp0,$rndlast,$dat0
- veor $tmp1,$rndlast,$dat1
- veor $dat2,$dat2,$rndlast
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$tmp0},[$out],#16
- vorr $dat0,$in0,$in0
- vst1.8 {$tmp1},[$out],#16
- vorr $dat1,$in1,$in1
- vst1.8 {$dat2},[$out],#16
- vorr $dat2,$in2,$in2
- b.hs .Loop3x_ecb_dec
- cmn $len,#0x30
- b.eq .Lecb_done
- nop
- .Lecb_dec_tail:
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Lecb_dec_tail
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- aesd $dat1,q12
- aesimc $dat1,$dat1
- aesd $dat2,q12
- aesimc $dat2,$dat2
- cmn $len,#0x20
- aesd $dat1,q13
- aesimc $dat1,$dat1
- aesd $dat2,q13
- aesimc $dat2,$dat2
- aesd $dat1,q14
- aesimc $dat1,$dat1
- aesd $dat2,q14
- aesimc $dat2,$dat2
- aesd $dat1,q15
- aesd $dat2,q15
- b.eq .Lecb_dec_one
- veor $tmp1,$rndlast,$dat1
- veor $tmp2,$rndlast,$dat2
- vst1.8 {$tmp1},[$out],#16
- vst1.8 {$tmp2},[$out],#16
- b .Lecb_done
- .Lecb_dec_one:
- veor $tmp1,$rndlast,$dat2
- vst1.8 {$tmp1},[$out],#16
- .Lecb_done:
- ___
- }
- $code.=<<___ if ($flavour !~ /64/);
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r8,pc}
- ___
- $code.=<<___ if ($flavour =~ /64/);
- ldr x29,[sp],#16
- ___
- $code.=<<___ if ($flavour =~ /64/);
- .Lecb_Final_abort:
- ret
- ___
- $code.=<<___;
- .size ${prefix}_ecb_encrypt,.-${prefix}_ecb_encrypt
- ___
- }}}
- {{{
- my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
- my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
- my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
- my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
- my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
- ### q8-q15 preloaded key schedule
- $code.=<<___;
- .globl ${prefix}_cbc_encrypt
- .type ${prefix}_cbc_encrypt,%function
- .align 5
- ${prefix}_cbc_encrypt:
- ___
- $code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ___
- $code.=<<___ if ($flavour !~ /64/);
- mov ip,sp
- stmdb sp!,{r4-r8,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldmia ip,{r4-r5} @ load remaining args
- ___
- $code.=<<___;
- subs $len,$len,#16
- mov $step,#16
- b.lo .Lcbc_abort
- cclr $step,eq
- cmp $enc,#0 // en- or decrypting?
- ldr $rounds,[$key,#240]
- and $len,$len,#-16
- vld1.8 {$ivec},[$ivp]
- vld1.8 {$dat},[$inp],$step
- vld1.32 {q8-q9},[$key] // load key schedule...
- sub $rounds,$rounds,#6
- add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
- sub $rounds,$rounds,#2
- vld1.32 {q10-q11},[$key_],#32
- vld1.32 {q12-q13},[$key_],#32
- vld1.32 {q14-q15},[$key_],#32
- vld1.32 {$rndlast},[$key_]
- add $key_,$key,#32
- mov $cnt,$rounds
- b.eq .Lcbc_dec
- cmp $rounds,#2
- veor $dat,$dat,$ivec
- veor $rndzero_n_last,q8,$rndlast
- b.eq .Lcbc_enc128
- vld1.32 {$in0-$in1},[$key_]
- add $key_,$key,#16
- add $key4,$key,#16*4
- add $key5,$key,#16*5
- aese $dat,q8
- aesmc $dat,$dat
- add $key6,$key,#16*6
- add $key7,$key,#16*7
- b .Lenter_cbc_enc
- .align 4
- .Loop_cbc_enc:
- aese $dat,q8
- aesmc $dat,$dat
- vst1.8 {$ivec},[$out],#16
- .Lenter_cbc_enc:
- aese $dat,q9
- aesmc $dat,$dat
- aese $dat,$in0
- aesmc $dat,$dat
- vld1.32 {q8},[$key4]
- cmp $rounds,#4
- aese $dat,$in1
- aesmc $dat,$dat
- vld1.32 {q9},[$key5]
- b.eq .Lcbc_enc192
- aese $dat,q8
- aesmc $dat,$dat
- vld1.32 {q8},[$key6]
- aese $dat,q9
- aesmc $dat,$dat
- vld1.32 {q9},[$key7]
- nop
- .Lcbc_enc192:
- aese $dat,q8
- aesmc $dat,$dat
- subs $len,$len,#16
- aese $dat,q9
- aesmc $dat,$dat
- cclr $step,eq
- aese $dat,q10
- aesmc $dat,$dat
- aese $dat,q11
- aesmc $dat,$dat
- vld1.8 {q8},[$inp],$step
- aese $dat,q12
- aesmc $dat,$dat
- veor q8,q8,$rndzero_n_last
- aese $dat,q13
- aesmc $dat,$dat
- vld1.32 {q9},[$key_] // re-pre-load rndkey[1]
- aese $dat,q14
- aesmc $dat,$dat
- aese $dat,q15
- veor $ivec,$dat,$rndlast
- b.hs .Loop_cbc_enc
- vst1.8 {$ivec},[$out],#16
- b .Lcbc_done
- .align 5
- .Lcbc_enc128:
- vld1.32 {$in0-$in1},[$key_]
- aese $dat,q8
- aesmc $dat,$dat
- b .Lenter_cbc_enc128
- .Loop_cbc_enc128:
- aese $dat,q8
- aesmc $dat,$dat
- vst1.8 {$ivec},[$out],#16
- .Lenter_cbc_enc128:
- aese $dat,q9
- aesmc $dat,$dat
- subs $len,$len,#16
- aese $dat,$in0
- aesmc $dat,$dat
- cclr $step,eq
- aese $dat,$in1
- aesmc $dat,$dat
- aese $dat,q10
- aesmc $dat,$dat
- aese $dat,q11
- aesmc $dat,$dat
- vld1.8 {q8},[$inp],$step
- aese $dat,q12
- aesmc $dat,$dat
- aese $dat,q13
- aesmc $dat,$dat
- aese $dat,q14
- aesmc $dat,$dat
- veor q8,q8,$rndzero_n_last
- aese $dat,q15
- veor $ivec,$dat,$rndlast
- b.hs .Loop_cbc_enc128
- vst1.8 {$ivec},[$out],#16
- b .Lcbc_done
- ___
- {
- my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
- my ($dat3,$in3,$tmp3); # used only in 64-bit mode
- my ($dat4,$in4,$tmp4);
- if ($flavour =~ /64/) {
- ($dat2,$dat3,$dat4,$in2,$in3,$in4,$tmp3,$tmp4)=map("q$_",(16..23));
- }
- $code.=<<___;
- .align 5
- .Lcbc_dec:
- vld1.8 {$dat2},[$inp],#16
- subs $len,$len,#32 // bias
- add $cnt,$rounds,#2
- vorr $in1,$dat,$dat
- vorr $dat1,$dat,$dat
- vorr $in2,$dat2,$dat2
- b.lo .Lcbc_dec_tail
- vorr $dat1,$dat2,$dat2
- vld1.8 {$dat2},[$inp],#16
- vorr $in0,$dat,$dat
- vorr $in1,$dat1,$dat1
- vorr $in2,$dat2,$dat2
- ___
- $code.=<<___ if ($flavour =~ /64/);
- cmp $len,#32
- b.lo .Loop3x_cbc_dec
- vld1.8 {$dat3},[$inp],#16
- vld1.8 {$dat4},[$inp],#16
- sub $len,$len,#32 // bias
- mov $cnt,$rounds
- vorr $in3,$dat3,$dat3
- vorr $in4,$dat4,$dat4
- .Loop5x_cbc_dec:
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- aesd $dat3,q8
- aesimc $dat3,$dat3
- aesd $dat4,q8
- aesimc $dat4,$dat4
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- aesd $dat3,q9
- aesimc $dat3,$dat3
- aesd $dat4,q9
- aesimc $dat4,$dat4
- vld1.32 {q9},[$key_],#16
- b.gt .Loop5x_cbc_dec
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- aesd $dat3,q8
- aesimc $dat3,$dat3
- aesd $dat4,q8
- aesimc $dat4,$dat4
- cmp $len,#0x40 // because .Lcbc_tail4x
- sub $len,$len,#0x50
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- aesd $dat3,q9
- aesimc $dat3,$dat3
- aesd $dat4,q9
- aesimc $dat4,$dat4
- csel x6,xzr,$len,gt // borrow x6, $cnt, "gt" is not typo
- mov $key_,$key
- aesd $dat0,q10
- aesimc $dat0,$dat0
- aesd $dat1,q10
- aesimc $dat1,$dat1
- aesd $dat2,q10
- aesimc $dat2,$dat2
- aesd $dat3,q10
- aesimc $dat3,$dat3
- aesd $dat4,q10
- aesimc $dat4,$dat4
- add $inp,$inp,x6 // $inp is adjusted in such way that
- // at exit from the loop $dat1-$dat4
- // are loaded with last "words"
- add x6,$len,#0x60 // because .Lcbc_tail4x
- aesd $dat0,q11
- aesimc $dat0,$dat0
- aesd $dat1,q11
- aesimc $dat1,$dat1
- aesd $dat2,q11
- aesimc $dat2,$dat2
- aesd $dat3,q11
- aesimc $dat3,$dat3
- aesd $dat4,q11
- aesimc $dat4,$dat4
- aesd $dat0,q12
- aesimc $dat0,$dat0
- aesd $dat1,q12
- aesimc $dat1,$dat1
- aesd $dat2,q12
- aesimc $dat2,$dat2
- aesd $dat3,q12
- aesimc $dat3,$dat3
- aesd $dat4,q12
- aesimc $dat4,$dat4
- aesd $dat0,q13
- aesimc $dat0,$dat0
- aesd $dat1,q13
- aesimc $dat1,$dat1
- aesd $dat2,q13
- aesimc $dat2,$dat2
- aesd $dat3,q13
- aesimc $dat3,$dat3
- aesd $dat4,q13
- aesimc $dat4,$dat4
- aesd $dat0,q14
- aesimc $dat0,$dat0
- aesd $dat1,q14
- aesimc $dat1,$dat1
- aesd $dat2,q14
- aesimc $dat2,$dat2
- aesd $dat3,q14
- aesimc $dat3,$dat3
- aesd $dat4,q14
- aesimc $dat4,$dat4
- veor $tmp0,$ivec,$rndlast
- aesd $dat0,q15
- veor $tmp1,$in0,$rndlast
- vld1.8 {$in0},[$inp],#16
- aesd $dat1,q15
- veor $tmp2,$in1,$rndlast
- vld1.8 {$in1},[$inp],#16
- aesd $dat2,q15
- veor $tmp3,$in2,$rndlast
- vld1.8 {$in2},[$inp],#16
- aesd $dat3,q15
- veor $tmp4,$in3,$rndlast
- vld1.8 {$in3},[$inp],#16
- aesd $dat4,q15
- vorr $ivec,$in4,$in4
- vld1.8 {$in4},[$inp],#16
- cbz x6,.Lcbc_tail4x
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- veor $tmp0,$tmp0,$dat0
- vorr $dat0,$in0,$in0
- veor $tmp1,$tmp1,$dat1
- vorr $dat1,$in1,$in1
- veor $tmp2,$tmp2,$dat2
- vorr $dat2,$in2,$in2
- veor $tmp3,$tmp3,$dat3
- vorr $dat3,$in3,$in3
- veor $tmp4,$tmp4,$dat4
- vst1.8 {$tmp0},[$out],#16
- vorr $dat4,$in4,$in4
- vst1.8 {$tmp1},[$out],#16
- mov $cnt,$rounds
- vst1.8 {$tmp2},[$out],#16
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$tmp3},[$out],#16
- vst1.8 {$tmp4},[$out],#16
- b.hs .Loop5x_cbc_dec
- add $len,$len,#0x50
- cbz $len,.Lcbc_done
- add $cnt,$rounds,#2
- subs $len,$len,#0x30
- vorr $dat0,$in2,$in2
- vorr $in0,$in2,$in2
- vorr $dat1,$in3,$in3
- vorr $in1,$in3,$in3
- vorr $dat2,$in4,$in4
- vorr $in2,$in4,$in4
- b.lo .Lcbc_dec_tail
- b .Loop3x_cbc_dec
- .align 4
- .Lcbc_tail4x:
- veor $tmp1,$tmp0,$dat1
- veor $tmp2,$tmp2,$dat2
- veor $tmp3,$tmp3,$dat3
- veor $tmp4,$tmp4,$dat4
- vst1.8 {$tmp1},[$out],#16
- vst1.8 {$tmp2},[$out],#16
- vst1.8 {$tmp3},[$out],#16
- vst1.8 {$tmp4},[$out],#16
- b .Lcbc_done
- .align 4
- ___
- $code.=<<___;
- .Loop3x_cbc_dec:
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Loop3x_cbc_dec
- aesd $dat0,q8
- aesimc $dat0,$dat0
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- veor $tmp0,$ivec,$rndlast
- subs $len,$len,#0x30
- veor $tmp1,$in0,$rndlast
- mov.lo x6,$len // x6, $cnt, is zero at this point
- aesd $dat0,q9
- aesimc $dat0,$dat0
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- veor $tmp2,$in1,$rndlast
- add $inp,$inp,x6 // $inp is adjusted in such way that
- // at exit from the loop $dat1-$dat2
- // are loaded with last "words"
- vorr $ivec,$in2,$in2
- mov $key_,$key
- aesd $dat0,q12
- aesimc $dat0,$dat0
- aesd $dat1,q12
- aesimc $dat1,$dat1
- aesd $dat2,q12
- aesimc $dat2,$dat2
- vld1.8 {$in0},[$inp],#16
- aesd $dat0,q13
- aesimc $dat0,$dat0
- aesd $dat1,q13
- aesimc $dat1,$dat1
- aesd $dat2,q13
- aesimc $dat2,$dat2
- vld1.8 {$in1},[$inp],#16
- aesd $dat0,q14
- aesimc $dat0,$dat0
- aesd $dat1,q14
- aesimc $dat1,$dat1
- aesd $dat2,q14
- aesimc $dat2,$dat2
- vld1.8 {$in2},[$inp],#16
- aesd $dat0,q15
- aesd $dat1,q15
- aesd $dat2,q15
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- add $cnt,$rounds,#2
- veor $tmp0,$tmp0,$dat0
- veor $tmp1,$tmp1,$dat1
- veor $dat2,$dat2,$tmp2
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$tmp0},[$out],#16
- vorr $dat0,$in0,$in0
- vst1.8 {$tmp1},[$out],#16
- vorr $dat1,$in1,$in1
- vst1.8 {$dat2},[$out],#16
- vorr $dat2,$in2,$in2
- b.hs .Loop3x_cbc_dec
- cmn $len,#0x30
- b.eq .Lcbc_done
- nop
- .Lcbc_dec_tail:
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Lcbc_dec_tail
- aesd $dat1,q8
- aesimc $dat1,$dat1
- aesd $dat2,q8
- aesimc $dat2,$dat2
- aesd $dat1,q9
- aesimc $dat1,$dat1
- aesd $dat2,q9
- aesimc $dat2,$dat2
- aesd $dat1,q12
- aesimc $dat1,$dat1
- aesd $dat2,q12
- aesimc $dat2,$dat2
- cmn $len,#0x20
- aesd $dat1,q13
- aesimc $dat1,$dat1
- aesd $dat2,q13
- aesimc $dat2,$dat2
- veor $tmp1,$ivec,$rndlast
- aesd $dat1,q14
- aesimc $dat1,$dat1
- aesd $dat2,q14
- aesimc $dat2,$dat2
- veor $tmp2,$in1,$rndlast
- aesd $dat1,q15
- aesd $dat2,q15
- b.eq .Lcbc_dec_one
- veor $tmp1,$tmp1,$dat1
- veor $tmp2,$tmp2,$dat2
- vorr $ivec,$in2,$in2
- vst1.8 {$tmp1},[$out],#16
- vst1.8 {$tmp2},[$out],#16
- b .Lcbc_done
- .Lcbc_dec_one:
- veor $tmp1,$tmp1,$dat2
- vorr $ivec,$in2,$in2
- vst1.8 {$tmp1},[$out],#16
- .Lcbc_done:
- vst1.8 {$ivec},[$ivp]
- .Lcbc_abort:
- ___
- }
- $code.=<<___ if ($flavour !~ /64/);
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r8,pc}
- ___
- $code.=<<___ if ($flavour =~ /64/);
- ldr x29,[sp],#16
- ret
- ___
- $code.=<<___;
- .size ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
- ___
- }}}
- {{{
- my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
- my ($rounds,$cnt,$key_)=("w5","w6","x7");
- my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
- my $step="x12"; # aliases with $tctr2
- my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
- my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
- # used only in 64-bit mode...
- my ($dat3,$dat4,$in3,$in4)=map("q$_",(16..23));
- my ($dat,$tmp)=($dat0,$tmp0);
- ### q8-q15 preloaded key schedule
- $code.=<<___;
- .globl ${prefix}_ctr32_encrypt_blocks
- .type ${prefix}_ctr32_encrypt_blocks,%function
- .align 5
- ${prefix}_ctr32_encrypt_blocks:
- ___
- $code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ___
- $code.=<<___ if ($flavour !~ /64/);
- mov ip,sp
- stmdb sp!,{r4-r10,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldr r4, [ip] @ load remaining arg
- ___
- $code.=<<___;
- ldr $rounds,[$key,#240]
- ldr $ctr, [$ivp, #12]
- #ifdef __ARMEB__
- vld1.8 {$dat0},[$ivp]
- #else
- vld1.32 {$dat0},[$ivp]
- #endif
- vld1.32 {q8-q9},[$key] // load key schedule...
- sub $rounds,$rounds,#4
- mov $step,#16
- cmp $len,#2
- add $key_,$key,x5,lsl#4 // pointer to last 5 round keys
- sub $rounds,$rounds,#2
- vld1.32 {q12-q13},[$key_],#32
- vld1.32 {q14-q15},[$key_],#32
- vld1.32 {$rndlast},[$key_]
- add $key_,$key,#32
- mov $cnt,$rounds
- cclr $step,lo
- #ifndef __ARMEB__
- rev $ctr, $ctr
- #endif
- vorr $dat1,$dat0,$dat0
- add $tctr1, $ctr, #1
- vorr $dat2,$dat0,$dat0
- add $ctr, $ctr, #2
- vorr $ivec,$dat0,$dat0
- rev $tctr1, $tctr1
- vmov.32 ${dat1}[3],$tctr1
- b.ls .Lctr32_tail
- rev $tctr2, $ctr
- sub $len,$len,#3 // bias
- vmov.32 ${dat2}[3],$tctr2
- ___
- $code.=<<___ if ($flavour =~ /64/);
- cmp $len,#2
- b.lo .Loop3x_ctr32
- add w13,$ctr,#1
- add w14,$ctr,#2
- vorr $dat3,$dat0,$dat0
- rev w13,w13
- vorr $dat4,$dat0,$dat0
- rev w14,w14
- vmov.32 ${dat3}[3],w13
- sub $len,$len,#2 // bias
- vmov.32 ${dat4}[3],w14
- add $ctr,$ctr,#2
- b .Loop5x_ctr32
- .align 4
- .Loop5x_ctr32:
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- aese $dat3,q8
- aesmc $dat3,$dat3
- aese $dat4,q8
- aesmc $dat4,$dat4
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- aese $dat3,q9
- aesmc $dat3,$dat3
- aese $dat4,q9
- aesmc $dat4,$dat4
- vld1.32 {q9},[$key_],#16
- b.gt .Loop5x_ctr32
- mov $key_,$key
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- aese $dat3,q8
- aesmc $dat3,$dat3
- aese $dat4,q8
- aesmc $dat4,$dat4
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- aese $dat3,q9
- aesmc $dat3,$dat3
- aese $dat4,q9
- aesmc $dat4,$dat4
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- aese $dat0,q12
- aesmc $dat0,$dat0
- add $tctr0,$ctr,#1
- add $tctr1,$ctr,#2
- aese $dat1,q12
- aesmc $dat1,$dat1
- add $tctr2,$ctr,#3
- add w13,$ctr,#4
- aese $dat2,q12
- aesmc $dat2,$dat2
- add w14,$ctr,#5
- rev $tctr0,$tctr0
- aese $dat3,q12
- aesmc $dat3,$dat3
- rev $tctr1,$tctr1
- rev $tctr2,$tctr2
- aese $dat4,q12
- aesmc $dat4,$dat4
- rev w13,w13
- rev w14,w14
- aese $dat0,q13
- aesmc $dat0,$dat0
- aese $dat1,q13
- aesmc $dat1,$dat1
- aese $dat2,q13
- aesmc $dat2,$dat2
- aese $dat3,q13
- aesmc $dat3,$dat3
- aese $dat4,q13
- aesmc $dat4,$dat4
- aese $dat0,q14
- aesmc $dat0,$dat0
- vld1.8 {$in0},[$inp],#16
- aese $dat1,q14
- aesmc $dat1,$dat1
- vld1.8 {$in1},[$inp],#16
- aese $dat2,q14
- aesmc $dat2,$dat2
- vld1.8 {$in2},[$inp],#16
- aese $dat3,q14
- aesmc $dat3,$dat3
- vld1.8 {$in3},[$inp],#16
- aese $dat4,q14
- aesmc $dat4,$dat4
- vld1.8 {$in4},[$inp],#16
- aese $dat0,q15
- veor $in0,$in0,$rndlast
- aese $dat1,q15
- veor $in1,$in1,$rndlast
- aese $dat2,q15
- veor $in2,$in2,$rndlast
- aese $dat3,q15
- veor $in3,$in3,$rndlast
- aese $dat4,q15
- veor $in4,$in4,$rndlast
- veor $in0,$in0,$dat0
- vorr $dat0,$ivec,$ivec
- veor $in1,$in1,$dat1
- vorr $dat1,$ivec,$ivec
- veor $in2,$in2,$dat2
- vorr $dat2,$ivec,$ivec
- veor $in3,$in3,$dat3
- vorr $dat3,$ivec,$ivec
- veor $in4,$in4,$dat4
- vorr $dat4,$ivec,$ivec
- vst1.8 {$in0},[$out],#16
- vmov.32 ${dat0}[3],$tctr0
- vst1.8 {$in1},[$out],#16
- vmov.32 ${dat1}[3],$tctr1
- vst1.8 {$in2},[$out],#16
- vmov.32 ${dat2}[3],$tctr2
- vst1.8 {$in3},[$out],#16
- vmov.32 ${dat3}[3],w13
- vst1.8 {$in4},[$out],#16
- vmov.32 ${dat4}[3],w14
- mov $cnt,$rounds
- cbz $len,.Lctr32_done
- add $ctr,$ctr,#5
- subs $len,$len,#5
- b.hs .Loop5x_ctr32
- add $len,$len,#5
- sub $ctr,$ctr,#5
- cmp $len,#2
- mov $step,#16
- cclr $step,lo
- b.ls .Lctr32_tail
- sub $len,$len,#3 // bias
- add $ctr,$ctr,#3
- ___
- $code.=<<___;
- b .Loop3x_ctr32
- .align 4
- .Loop3x_ctr32:
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat2,q8
- aesmc $dat2,$dat2
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- aese $dat2,q9
- aesmc $dat2,$dat2
- vld1.32 {q9},[$key_],#16
- b.gt .Loop3x_ctr32
- aese $dat0,q8
- aesmc $tmp0,$dat0
- aese $dat1,q8
- aesmc $tmp1,$dat1
- vld1.8 {$in0},[$inp],#16
- vorr $dat0,$ivec,$ivec
- aese $dat2,q8
- aesmc $dat2,$dat2
- vld1.8 {$in1},[$inp],#16
- vorr $dat1,$ivec,$ivec
- aese $tmp0,q9
- aesmc $tmp0,$tmp0
- aese $tmp1,q9
- aesmc $tmp1,$tmp1
- vld1.8 {$in2},[$inp],#16
- mov $key_,$key
- aese $dat2,q9
- aesmc $tmp2,$dat2
- vorr $dat2,$ivec,$ivec
- add $tctr0,$ctr,#1
- aese $tmp0,q12
- aesmc $tmp0,$tmp0
- aese $tmp1,q12
- aesmc $tmp1,$tmp1
- veor $in0,$in0,$rndlast
- add $tctr1,$ctr,#2
- aese $tmp2,q12
- aesmc $tmp2,$tmp2
- veor $in1,$in1,$rndlast
- add $ctr,$ctr,#3
- aese $tmp0,q13
- aesmc $tmp0,$tmp0
- aese $tmp1,q13
- aesmc $tmp1,$tmp1
- veor $in2,$in2,$rndlast
- rev $tctr0,$tctr0
- aese $tmp2,q13
- aesmc $tmp2,$tmp2
- vmov.32 ${dat0}[3], $tctr0
- rev $tctr1,$tctr1
- aese $tmp0,q14
- aesmc $tmp0,$tmp0
- aese $tmp1,q14
- aesmc $tmp1,$tmp1
- vmov.32 ${dat1}[3], $tctr1
- rev $tctr2,$ctr
- aese $tmp2,q14
- aesmc $tmp2,$tmp2
- vmov.32 ${dat2}[3], $tctr2
- subs $len,$len,#3
- aese $tmp0,q15
- aese $tmp1,q15
- aese $tmp2,q15
- veor $in0,$in0,$tmp0
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- vst1.8 {$in0},[$out],#16
- veor $in1,$in1,$tmp1
- mov $cnt,$rounds
- vst1.8 {$in1},[$out],#16
- veor $in2,$in2,$tmp2
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- vst1.8 {$in2},[$out],#16
- b.hs .Loop3x_ctr32
- adds $len,$len,#3
- b.eq .Lctr32_done
- cmp $len,#1
- mov $step,#16
- cclr $step,eq
- .Lctr32_tail:
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- vld1.32 {q8},[$key_],#16
- subs $cnt,$cnt,#2
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- vld1.32 {q9},[$key_],#16
- b.gt .Lctr32_tail
- aese $dat0,q8
- aesmc $dat0,$dat0
- aese $dat1,q8
- aesmc $dat1,$dat1
- aese $dat0,q9
- aesmc $dat0,$dat0
- aese $dat1,q9
- aesmc $dat1,$dat1
- vld1.8 {$in0},[$inp],$step
- aese $dat0,q12
- aesmc $dat0,$dat0
- aese $dat1,q12
- aesmc $dat1,$dat1
- vld1.8 {$in1},[$inp]
- aese $dat0,q13
- aesmc $dat0,$dat0
- aese $dat1,q13
- aesmc $dat1,$dat1
- veor $in0,$in0,$rndlast
- aese $dat0,q14
- aesmc $dat0,$dat0
- aese $dat1,q14
- aesmc $dat1,$dat1
- veor $in1,$in1,$rndlast
- aese $dat0,q15
- aese $dat1,q15
- cmp $len,#1
- veor $in0,$in0,$dat0
- veor $in1,$in1,$dat1
- vst1.8 {$in0},[$out],#16
- b.eq .Lctr32_done
- vst1.8 {$in1},[$out]
- .Lctr32_done:
- ___
- $code.=<<___ if ($flavour !~ /64/);
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r10,pc}
- ___
- $code.=<<___ if ($flavour =~ /64/);
- ldr x29,[sp],#16
- ret
- ___
- $code.=<<___;
- .size ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
- ___
- }}}
- $code.=<<___;
- #endif
- ___
- ########################################
- if ($flavour =~ /64/) { ######## 64-bit code
- my %opcode = (
- "aesd" => 0x4e285800, "aese" => 0x4e284800,
- "aesimc"=> 0x4e287800, "aesmc" => 0x4e286800 );
- local *unaes = sub {
- my ($mnemonic,$arg)=@_;
- $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o &&
- sprintf ".inst\t0x%08x\t//%s %s",
- $opcode{$mnemonic}|$1|($2<<5),
- $mnemonic,$arg;
- };
- foreach(split("\n",$code)) {
- s/\`([^\`]*)\`/eval($1)/geo;
- s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
- s/@\s/\/\//o; # old->new style commentary
- #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
- s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
- s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel $2,$3,$2,$1/o or
- s/vmov\.i8/movi/o or # fix up legacy mnemonics
- s/vext\.8/ext/o or
- s/vrev32\.8/rev32/o or
- s/vtst\.8/cmtst/o or
- s/vshr/ushr/o or
- s/^(\s+)v/$1/o or # strip off v prefix
- s/\bbx\s+lr\b/ret/o;
- # fix up remaining legacy suffixes
- s/\.[ui]?8//o;
- m/\],#8/o and s/\.16b/\.8b/go;
- s/\.[ui]?32//o and s/\.16b/\.4s/go;
- s/\.[ui]?64//o and s/\.16b/\.2d/go;
- s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
- print $_,"\n";
- }
- } else { ######## 32-bit code
- my %opcode = (
- "aesd" => 0xf3b00340, "aese" => 0xf3b00300,
- "aesimc"=> 0xf3b003c0, "aesmc" => 0xf3b00380 );
- local *unaes = sub {
- my ($mnemonic,$arg)=@_;
- if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
- my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
- |(($2&7)<<1) |(($2&8)<<2);
- # since ARMv7 instructions are always encoded little-endian.
- # correct solution is to use .inst directive, but older
- # assemblers don't implement it:-(
- sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
- $word&0xff,($word>>8)&0xff,
- ($word>>16)&0xff,($word>>24)&0xff,
- $mnemonic,$arg;
- }
- };
- sub unvtbl {
- my $arg=shift;
- $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
- sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
- "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
- }
- sub unvdup32 {
- my $arg=shift;
- $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
- sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
- }
- sub unvmov32 {
- my $arg=shift;
- $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
- sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
- }
- foreach(split("\n",$code)) {
- s/\`([^\`]*)\`/eval($1)/geo;
- s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
- s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
- s/\/\/\s?/@ /o; # new->old style commentary
- # fix up remaining new-style suffixes
- s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
- s/\],#[0-9]+/]!/o;
- s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
- s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2 $1,#0/o or
- s/vtbl\.8\s+(.*)/unvtbl($1)/geo or
- s/vdup\.32\s+(.*)/unvdup32($1)/geo or
- s/vmov\.32\s+(.*)/unvmov32($1)/geo or
- s/^(\s+)b\./$1b/o or
- s/^(\s+)ret/$1bx\tlr/o;
- if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
- print " it $2\n";
- }
- print $_,"\n";
- }
- }
- close STDOUT or die "error closing STDOUT: $!";
|