123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290 |
- #! /usr/bin/env perl
- # Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
- #
- # Licensed under the Apache License 2.0 (the "License"). You may not use
- # this file except in compliance with the License. You can obtain a copy
- # in the file LICENSE in the source distribution or at
- # https://www.openssl.org/source/license.html
- #
- # ====================================================================
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
- # project. The module is, however, dual licensed under OpenSSL and
- # CRYPTOGAMS licenses depending on where you obtain it. For further
- # details see http://www.openssl.org/~appro/cryptogams/.
- # ====================================================================
- #
- # October 2015
- #
- # ChaCha20 for PowerPC/AltiVec.
- #
- # June 2018
- #
- # Add VSX 2.07 code path. Original 3xAltiVec+1xIALU is well-suited for
- # processors that can't issue more than one vector instruction per
- # cycle. But POWER8 (and POWER9) can issue a pair, and vector-only 4x
- # interleave would perform better. Incidentally PowerISA 2.07 (first
- # implemented by POWER8) defined new usable instructions, hence 4xVSX
- # code path...
- #
- # Performance in cycles per byte out of large buffer.
- #
- # IALU/gcc-4.x 3xAltiVec+1xIALU 4xVSX
- #
- # Freescale e300 13.6/+115% - -
- # PPC74x0/G4e 6.81/+310% 3.81 -
- # PPC970/G5 9.29/+160% ? -
- # POWER7 8.62/+61% 3.35 -
- # POWER8 8.70/+51% 2.91 2.09
- # POWER9 8.80/+29% 4.44(*) 2.45(**)
- #
- # (*) this is trade-off result, it's possible to improve it, but
- # then it would negatively affect all others;
- # (**) POWER9 seems to be "allergic" to mixing vector and integer
- # instructions, which is why switch to vector-only code pays
- # off that much;
- # $output is the last argument if it looks like a file (it has an extension)
- # $flavour is the first argument if it doesn't look like a file
- $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
- $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
- if ($flavour =~ /64/) {
- $SIZE_T =8;
- $LRSAVE =2*$SIZE_T;
- $STU ="stdu";
- $POP ="ld";
- $PUSH ="std";
- $UCMP ="cmpld";
- } elsif ($flavour =~ /32/) {
- $SIZE_T =4;
- $LRSAVE =$SIZE_T;
- $STU ="stwu";
- $POP ="lwz";
- $PUSH ="stw";
- $UCMP ="cmplw";
- } else { die "nonsense $flavour"; }
- $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
- ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
- ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
- die "can't locate ppc-xlate.pl";
- open STDOUT,"| $^X $xlate $flavour \"$output\""
- or die "can't call $xlate: $!";
- $LOCALS=6*$SIZE_T;
- $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
- sub AUTOLOAD() # thunk [simplified] x86-style perlasm
- { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
- $code .= "\t$opcode\t".join(',',@_)."\n";
- }
- my $sp = "r1";
- my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
- {{{
- my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
- $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = map("v$_",(0..15));
- my @K = map("v$_",(16..19));
- my $CTR = "v26";
- my ($xt0,$xt1,$xt2,$xt3) = map("v$_",(27..30));
- my ($sixteen,$twelve,$eight,$seven) = ($xt0,$xt1,$xt2,$xt3);
- my $beperm = "v31";
- my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
- my $FRAME=$LOCALS+64+7*16; # 7*16 is for v26-v31 offload
- sub VSX_lane_ROUND_4x {
- my ($a0,$b0,$c0,$d0)=@_;
- my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
- my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
- my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
- my @x=map("\"v$_\"",(0..15));
- (
- "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
- "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
- "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
- "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
- "&vxor (@x[$d0],@x[$d0],@x[$a0])",
- "&vxor (@x[$d1],@x[$d1],@x[$a1])",
- "&vxor (@x[$d2],@x[$d2],@x[$a2])",
- "&vxor (@x[$d3],@x[$d3],@x[$a3])",
- "&vrlw (@x[$d0],@x[$d0],'$sixteen')",
- "&vrlw (@x[$d1],@x[$d1],'$sixteen')",
- "&vrlw (@x[$d2],@x[$d2],'$sixteen')",
- "&vrlw (@x[$d3],@x[$d3],'$sixteen')",
- "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
- "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
- "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
- "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
- "&vxor (@x[$b0],@x[$b0],@x[$c0])",
- "&vxor (@x[$b1],@x[$b1],@x[$c1])",
- "&vxor (@x[$b2],@x[$b2],@x[$c2])",
- "&vxor (@x[$b3],@x[$b3],@x[$c3])",
- "&vrlw (@x[$b0],@x[$b0],'$twelve')",
- "&vrlw (@x[$b1],@x[$b1],'$twelve')",
- "&vrlw (@x[$b2],@x[$b2],'$twelve')",
- "&vrlw (@x[$b3],@x[$b3],'$twelve')",
- "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
- "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
- "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
- "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
- "&vxor (@x[$d0],@x[$d0],@x[$a0])",
- "&vxor (@x[$d1],@x[$d1],@x[$a1])",
- "&vxor (@x[$d2],@x[$d2],@x[$a2])",
- "&vxor (@x[$d3],@x[$d3],@x[$a3])",
- "&vrlw (@x[$d0],@x[$d0],'$eight')",
- "&vrlw (@x[$d1],@x[$d1],'$eight')",
- "&vrlw (@x[$d2],@x[$d2],'$eight')",
- "&vrlw (@x[$d3],@x[$d3],'$eight')",
- "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
- "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
- "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
- "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
- "&vxor (@x[$b0],@x[$b0],@x[$c0])",
- "&vxor (@x[$b1],@x[$b1],@x[$c1])",
- "&vxor (@x[$b2],@x[$b2],@x[$c2])",
- "&vxor (@x[$b3],@x[$b3],@x[$c3])",
- "&vrlw (@x[$b0],@x[$b0],'$seven')",
- "&vrlw (@x[$b1],@x[$b1],'$seven')",
- "&vrlw (@x[$b2],@x[$b2],'$seven')",
- "&vrlw (@x[$b3],@x[$b3],'$seven')"
- );
- }
- $code.=<<___;
- .globl .ChaCha20_ctr32_vsx_p10
- .align 5
- .ChaCha20_ctr32_vsx_p10:
- ${UCMP}i $len,255
- ble .Not_greater_than_8x
- b ChaCha20_ctr32_vsx_8x
- .Not_greater_than_8x:
- $STU $sp,-$FRAME($sp)
- mflr r0
- li r10,`15+$LOCALS+64`
- li r11,`31+$LOCALS+64`
- mfspr r12,256
- stvx v26,r10,$sp
- addi r10,r10,32
- stvx v27,r11,$sp
- addi r11,r11,32
- stvx v28,r10,$sp
- addi r10,r10,32
- stvx v29,r11,$sp
- addi r11,r11,32
- stvx v30,r10,$sp
- stvx v31,r11,$sp
- stw r12,`$FRAME-4`($sp) # save vrsave
- li r12,-4096+63
- $PUSH r0, `$FRAME+$LRSAVE`($sp)
- mtspr 256,r12 # preserve 29 AltiVec registers
- bl Lconsts # returns pointer Lsigma in r12
- lvx_4w @K[0],0,r12 # load sigma
- addi r12,r12,0x70
- li $x10,16
- li $x20,32
- li $x30,48
- li r11,64
- lvx_4w @K[1],0,$key # load key
- lvx_4w @K[2],$x10,$key
- lvx_4w @K[3],0,$ctr # load counter
- vxor $xt0,$xt0,$xt0
- lvx_4w $xt1,r11,r12
- vspltw $CTR,@K[3],0
- vsldoi @K[3],@K[3],$xt0,4
- vsldoi @K[3],$xt0,@K[3],12 # clear @K[3].word[0]
- vadduwm $CTR,$CTR,$xt1
- be?lvsl $beperm,0,$x10 # 0x00..0f
- be?vspltisb $xt0,3 # 0x03..03
- be?vxor $beperm,$beperm,$xt0 # swap bytes within words
- li r0,10 # inner loop counter
- mtctr r0
- b Loop_outer_vsx
- .align 5
- Loop_outer_vsx:
- lvx $xa0,$x00,r12 # load [smashed] sigma
- lvx $xa1,$x10,r12
- lvx $xa2,$x20,r12
- lvx $xa3,$x30,r12
- vspltw $xb0,@K[1],0 # smash the key
- vspltw $xb1,@K[1],1
- vspltw $xb2,@K[1],2
- vspltw $xb3,@K[1],3
- vspltw $xc0,@K[2],0
- vspltw $xc1,@K[2],1
- vspltw $xc2,@K[2],2
- vspltw $xc3,@K[2],3
- vmr $xd0,$CTR # smash the counter
- vspltw $xd1,@K[3],1
- vspltw $xd2,@K[3],2
- vspltw $xd3,@K[3],3
- vspltisw $sixteen,-16 # synthesize constants
- vspltisw $twelve,12
- vspltisw $eight,8
- vspltisw $seven,7
- Loop_vsx_4x:
- ___
- foreach (&VSX_lane_ROUND_4x(0, 4, 8,12)) { eval; }
- foreach (&VSX_lane_ROUND_4x(0, 5,10,15)) { eval; }
- $code.=<<___;
- bdnz Loop_vsx_4x
- vadduwm $xd0,$xd0,$CTR
- vmrgew $xt0,$xa0,$xa1 # transpose data
- vmrgew $xt1,$xa2,$xa3
- vmrgow $xa0,$xa0,$xa1
- vmrgow $xa2,$xa2,$xa3
- vmrgew $xt2,$xb0,$xb1
- vmrgew $xt3,$xb2,$xb3
- vpermdi $xa1,$xa0,$xa2,0b00
- vpermdi $xa3,$xa0,$xa2,0b11
- vpermdi $xa0,$xt0,$xt1,0b00
- vpermdi $xa2,$xt0,$xt1,0b11
- vmrgow $xb0,$xb0,$xb1
- vmrgow $xb2,$xb2,$xb3
- vmrgew $xt0,$xc0,$xc1
- vmrgew $xt1,$xc2,$xc3
- vpermdi $xb1,$xb0,$xb2,0b00
- vpermdi $xb3,$xb0,$xb2,0b11
- vpermdi $xb0,$xt2,$xt3,0b00
- vpermdi $xb2,$xt2,$xt3,0b11
- vmrgow $xc0,$xc0,$xc1
- vmrgow $xc2,$xc2,$xc3
- vmrgew $xt2,$xd0,$xd1
- vmrgew $xt3,$xd2,$xd3
- vpermdi $xc1,$xc0,$xc2,0b00
- vpermdi $xc3,$xc0,$xc2,0b11
- vpermdi $xc0,$xt0,$xt1,0b00
- vpermdi $xc2,$xt0,$xt1,0b11
- vmrgow $xd0,$xd0,$xd1
- vmrgow $xd2,$xd2,$xd3
- vspltisw $xt0,4
- vadduwm $CTR,$CTR,$xt0 # next counter value
- vpermdi $xd1,$xd0,$xd2,0b00
- vpermdi $xd3,$xd0,$xd2,0b11
- vpermdi $xd0,$xt2,$xt3,0b00
- vpermdi $xd2,$xt2,$xt3,0b11
- vadduwm $xa0,$xa0,@K[0]
- vadduwm $xb0,$xb0,@K[1]
- vadduwm $xc0,$xc0,@K[2]
- vadduwm $xd0,$xd0,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx
- vadduwm $xa0,$xa1,@K[0]
- vadduwm $xb0,$xb1,@K[1]
- vadduwm $xc0,$xc1,@K[2]
- vadduwm $xd0,$xd1,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx
- vadduwm $xa0,$xa2,@K[0]
- vadduwm $xb0,$xb2,@K[1]
- vadduwm $xc0,$xc2,@K[2]
- vadduwm $xd0,$xd2,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx
- vadduwm $xa0,$xa3,@K[0]
- vadduwm $xb0,$xb3,@K[1]
- vadduwm $xc0,$xc3,@K[2]
- vadduwm $xd0,$xd3,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- mtctr r0
- bne Loop_outer_vsx
- Ldone_vsx:
- lwz r12,`$FRAME-4`($sp) # pull vrsave
- li r10,`15+$LOCALS+64`
- li r11,`31+$LOCALS+64`
- $POP r0, `$FRAME+$LRSAVE`($sp)
- mtspr 256,r12 # restore vrsave
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- mtlr r0
- addi $sp,$sp,$FRAME
- blr
- .align 4
- Ltail_vsx:
- addi r11,$sp,$LOCALS
- mtctr $len
- stvx_4w $xa0,$x00,r11 # offload block to stack
- stvx_4w $xb0,$x10,r11
- stvx_4w $xc0,$x20,r11
- stvx_4w $xd0,$x30,r11
- subi r12,r11,1 # prepare for *++ptr
- subi $inp,$inp,1
- subi $out,$out,1
- Loop_tail_vsx:
- lbzu r6,1(r12)
- lbzu r7,1($inp)
- xor r6,r6,r7
- stbu r6,1($out)
- bdnz Loop_tail_vsx
- stvx_4w $K[0],$x00,r11 # wipe copy of the block
- stvx_4w $K[0],$x10,r11
- stvx_4w $K[0],$x20,r11
- stvx_4w $K[0],$x30,r11
- b Ldone_vsx
- .long 0
- .byte 0,12,0x04,1,0x80,0,5,0
- .long 0
- .size .ChaCha20_ctr32_vsx_p10,.-.ChaCha20_ctr32_vsx_p10
- ___
- }}}
- ##This is 8 block in parallel implementation. The heart of chacha round uses vector instruction that has access to
- # vsr[32+X]. To perform the 8 parallel block we tend to use all 32 register to hold the 8 block info.
- # WE need to store few register value on side, so we can use VSR{32+X} for few vector instructions used in round op and hold intermediate value.
- # WE use the VSR[0]-VSR[31] for holding intermediate value and perform 8 block in parallel.
- #
- {{{
- #### ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
- my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
- $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3,
- $xa4,$xa5,$xa6,$xa7, $xb4,$xb5,$xb6,$xb7,
- $xc4,$xc5,$xc6,$xc7, $xd4,$xd5,$xd6,$xd7) = map("v$_",(0..31));
- my ($xcn4,$xcn5,$xcn6,$xcn7, $xdn4,$xdn5,$xdn6,$xdn7) = map("v$_",(8..15));
- my ($xan0,$xbn0,$xcn0,$xdn0) = map("v$_",(0..3));
- my @K = map("v$_",27,(24..26));
- my ($xt0,$xt1,$xt2,$xt3,$xt4) = map("v$_",23,(28..31));
- my $xr0 = "v4";
- my $CTR0 = "v22";
- my $CTR1 = "v5";
- my $beperm = "v31";
- my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
- my ($xv0,$xv1,$xv2,$xv3,$xv4,$xv5,$xv6,$xv7) = map("v$_",(0..7));
- my ($xv8,$xv9,$xv10,$xv11,$xv12,$xv13,$xv14,$xv15,$xv16,$xv17) = map("v$_",(8..17));
- my ($xv18,$xv19,$xv20,$xv21) = map("v$_",(18..21));
- my ($xv22,$xv23,$xv24,$xv25,$xv26) = map("v$_",(22..26));
- my $FRAME=$LOCALS+64+9*16; # 8*16 is for v24-v31 offload
- sub VSX_lane_ROUND_8x {
- my ($a0,$b0,$c0,$d0,$a4,$b4,$c4,$d4)=@_;
- my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
- my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
- my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
- my ($a5,$b5,$c5,$d5)=map(($_&~3)+(($_+1)&3),($a4,$b4,$c4,$d4));
- my ($a6,$b6,$c6,$d6)=map(($_&~3)+(($_+1)&3),($a5,$b5,$c5,$d5));
- my ($a7,$b7,$c7,$d7)=map(($_&~3)+(($_+1)&3),($a6,$b6,$c6,$d6));
- my ($xv8,$xv9,$xv10,$xv11,$xv12,$xv13,$xv14,$xv15,$xv16,$xv17) = map("\"v$_\"",(8..17));
- my @x=map("\"v$_\"",(0..31));
- (
- "&vxxlor ($xv15 ,@x[$c7],@x[$c7])", #copy v30 to v13
- "&vxxlorc (@x[$c7], $xv9,$xv9)",
- "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
- "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
- "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
- "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
- "&vadduwm (@x[$a4],@x[$a4],@x[$b4])", # Q1
- "&vadduwm (@x[$a5],@x[$a5],@x[$b5])", # Q2
- "&vadduwm (@x[$a6],@x[$a6],@x[$b6])", # Q3
- "&vadduwm (@x[$a7],@x[$a7],@x[$b7])", # Q4
- "&vxor (@x[$d0],@x[$d0],@x[$a0])",
- "&vxor (@x[$d1],@x[$d1],@x[$a1])",
- "&vxor (@x[$d2],@x[$d2],@x[$a2])",
- "&vxor (@x[$d3],@x[$d3],@x[$a3])",
- "&vxor (@x[$d4],@x[$d4],@x[$a4])",
- "&vxor (@x[$d5],@x[$d5],@x[$a5])",
- "&vxor (@x[$d6],@x[$d6],@x[$a6])",
- "&vxor (@x[$d7],@x[$d7],@x[$a7])",
- "&vrlw (@x[$d0],@x[$d0],@x[$c7])",
- "&vrlw (@x[$d1],@x[$d1],@x[$c7])",
- "&vrlw (@x[$d2],@x[$d2],@x[$c7])",
- "&vrlw (@x[$d3],@x[$d3],@x[$c7])",
- "&vrlw (@x[$d4],@x[$d4],@x[$c7])",
- "&vrlw (@x[$d5],@x[$d5],@x[$c7])",
- "&vrlw (@x[$d6],@x[$d6],@x[$c7])",
- "&vrlw (@x[$d7],@x[$d7],@x[$c7])",
- "&vxxlor ($xv13 ,@x[$a7],@x[$a7])",
- "&vxxlorc (@x[$c7], $xv15,$xv15)",
- "&vxxlorc (@x[$a7], $xv10,$xv10)",
- "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
- "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
- "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
- "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
- "&vadduwm (@x[$c4],@x[$c4],@x[$d4])",
- "&vadduwm (@x[$c5],@x[$c5],@x[$d5])",
- "&vadduwm (@x[$c6],@x[$c6],@x[$d6])",
- "&vadduwm (@x[$c7],@x[$c7],@x[$d7])",
- "&vxor (@x[$b0],@x[$b0],@x[$c0])",
- "&vxor (@x[$b1],@x[$b1],@x[$c1])",
- "&vxor (@x[$b2],@x[$b2],@x[$c2])",
- "&vxor (@x[$b3],@x[$b3],@x[$c3])",
- "&vxor (@x[$b4],@x[$b4],@x[$c4])",
- "&vxor (@x[$b5],@x[$b5],@x[$c5])",
- "&vxor (@x[$b6],@x[$b6],@x[$c6])",
- "&vxor (@x[$b7],@x[$b7],@x[$c7])",
- "&vrlw (@x[$b0],@x[$b0],@x[$a7])",
- "&vrlw (@x[$b1],@x[$b1],@x[$a7])",
- "&vrlw (@x[$b2],@x[$b2],@x[$a7])",
- "&vrlw (@x[$b3],@x[$b3],@x[$a7])",
- "&vrlw (@x[$b4],@x[$b4],@x[$a7])",
- "&vrlw (@x[$b5],@x[$b5],@x[$a7])",
- "&vrlw (@x[$b6],@x[$b6],@x[$a7])",
- "&vrlw (@x[$b7],@x[$b7],@x[$a7])",
- "&vxxlorc (@x[$a7], $xv13,$xv13)",
- "&vxxlor ($xv15 ,@x[$c7],@x[$c7])",
- "&vxxlorc (@x[$c7], $xv11,$xv11)",
- "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
- "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
- "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
- "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
- "&vadduwm (@x[$a4],@x[$a4],@x[$b4])",
- "&vadduwm (@x[$a5],@x[$a5],@x[$b5])",
- "&vadduwm (@x[$a6],@x[$a6],@x[$b6])",
- "&vadduwm (@x[$a7],@x[$a7],@x[$b7])",
- "&vxor (@x[$d0],@x[$d0],@x[$a0])",
- "&vxor (@x[$d1],@x[$d1],@x[$a1])",
- "&vxor (@x[$d2],@x[$d2],@x[$a2])",
- "&vxor (@x[$d3],@x[$d3],@x[$a3])",
- "&vxor (@x[$d4],@x[$d4],@x[$a4])",
- "&vxor (@x[$d5],@x[$d5],@x[$a5])",
- "&vxor (@x[$d6],@x[$d6],@x[$a6])",
- "&vxor (@x[$d7],@x[$d7],@x[$a7])",
- "&vrlw (@x[$d0],@x[$d0],@x[$c7])",
- "&vrlw (@x[$d1],@x[$d1],@x[$c7])",
- "&vrlw (@x[$d2],@x[$d2],@x[$c7])",
- "&vrlw (@x[$d3],@x[$d3],@x[$c7])",
- "&vrlw (@x[$d4],@x[$d4],@x[$c7])",
- "&vrlw (@x[$d5],@x[$d5],@x[$c7])",
- "&vrlw (@x[$d6],@x[$d6],@x[$c7])",
- "&vrlw (@x[$d7],@x[$d7],@x[$c7])",
- "&vxxlorc (@x[$c7], $xv15,$xv15)",
- "&vxxlor ($xv13 ,@x[$a7],@x[$a7])",
- "&vxxlorc (@x[$a7], $xv12,$xv12)",
- "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
- "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
- "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
- "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
- "&vadduwm (@x[$c4],@x[$c4],@x[$d4])",
- "&vadduwm (@x[$c5],@x[$c5],@x[$d5])",
- "&vadduwm (@x[$c6],@x[$c6],@x[$d6])",
- "&vadduwm (@x[$c7],@x[$c7],@x[$d7])",
- "&vxor (@x[$b0],@x[$b0],@x[$c0])",
- "&vxor (@x[$b1],@x[$b1],@x[$c1])",
- "&vxor (@x[$b2],@x[$b2],@x[$c2])",
- "&vxor (@x[$b3],@x[$b3],@x[$c3])",
- "&vxor (@x[$b4],@x[$b4],@x[$c4])",
- "&vxor (@x[$b5],@x[$b5],@x[$c5])",
- "&vxor (@x[$b6],@x[$b6],@x[$c6])",
- "&vxor (@x[$b7],@x[$b7],@x[$c7])",
- "&vrlw (@x[$b0],@x[$b0],@x[$a7])",
- "&vrlw (@x[$b1],@x[$b1],@x[$a7])",
- "&vrlw (@x[$b2],@x[$b2],@x[$a7])",
- "&vrlw (@x[$b3],@x[$b3],@x[$a7])",
- "&vrlw (@x[$b4],@x[$b4],@x[$a7])",
- "&vrlw (@x[$b5],@x[$b5],@x[$a7])",
- "&vrlw (@x[$b6],@x[$b6],@x[$a7])",
- "&vrlw (@x[$b7],@x[$b7],@x[$a7])",
- "&vxxlorc (@x[$a7], $xv13,$xv13)",
- );
- }
- $code.=<<___;
- .globl .ChaCha20_ctr32_vsx_8x
- .align 5
- .ChaCha20_ctr32_vsx_8x:
- $STU $sp,-$FRAME($sp)
- mflr r0
- li r10,`15+$LOCALS+64`
- li r11,`31+$LOCALS+64`
- mfspr r12,256
- stvx v24,r10,$sp
- addi r10,r10,32
- stvx v25,r11,$sp
- addi r11,r11,32
- stvx v26,r10,$sp
- addi r10,r10,32
- stvx v27,r11,$sp
- addi r11,r11,32
- stvx v28,r10,$sp
- addi r10,r10,32
- stvx v29,r11,$sp
- addi r11,r11,32
- stvx v30,r10,$sp
- stvx v31,r11,$sp
- stw r12,`$FRAME-4`($sp) # save vrsave
- li r12,-4096+63
- $PUSH r0, `$FRAME+$LRSAVE`($sp)
- mtspr 256,r12 # preserve 29 AltiVec registers
- bl Lconsts # returns pointer Lsigma in r12
- lvx_4w @K[0],0,r12 # load sigma
- addi r12,r12,0x70
- li $x10,16
- li $x20,32
- li $x30,48
- li r11,64
- vspltisw $xa4,-16 # synthesize constants
- vspltisw $xb4,12 # synthesize constants
- vspltisw $xc4,8 # synthesize constants
- vspltisw $xd4,7 # synthesize constants
- lvx $xa0,$x00,r12 # load [smashed] sigma
- lvx $xa1,$x10,r12
- lvx $xa2,$x20,r12
- lvx $xa3,$x30,r12
- vxxlor $xv9 ,$xa4,$xa4 #save shift val in vr9-12
- vxxlor $xv10 ,$xb4,$xb4
- vxxlor $xv11 ,$xc4,$xc4
- vxxlor $xv12 ,$xd4,$xd4
- vxxlor $xv22 ,$xa0,$xa0 #save sigma in vr22-25
- vxxlor $xv23 ,$xa1,$xa1
- vxxlor $xv24 ,$xa2,$xa2
- vxxlor $xv25 ,$xa3,$xa3
- lvx_4w @K[1],0,$key # load key
- lvx_4w @K[2],$x10,$key
- lvx_4w @K[3],0,$ctr # load counter
- vspltisw $xt3,4
- vxor $xt2,$xt2,$xt2
- lvx_4w $xt1,r11,r12
- vspltw $xa2,@K[3],0 #save the original count after spltw
- vsldoi @K[3],@K[3],$xt2,4
- vsldoi @K[3],$xt2,@K[3],12 # clear @K[3].word[0]
- vadduwm $xt1,$xa2,$xt1
- vadduwm $xt3,$xt1,$xt3 # next counter value
- vspltw $xa0,@K[2],2 # save the K[2] spltw 2 and save v8.
- be?lvsl $beperm,0,$x10 # 0x00..0f
- be?vspltisb $xt0,3 # 0x03..03
- be?vxor $beperm,$beperm,$xt0 # swap bytes within words
- be?vxxlor $xv26 ,$beperm,$beperm
- vxxlor $xv0 ,@K[0],@K[0] # K0,k1,k2 to vr0,1,2
- vxxlor $xv1 ,@K[1],@K[1]
- vxxlor $xv2 ,@K[2],@K[2]
- vxxlor $xv3 ,@K[3],@K[3]
- vxxlor $xv4 ,$xt1,$xt1 #CTR ->4, CTR+4-> 5
- vxxlor $xv5 ,$xt3,$xt3
- vxxlor $xv8 ,$xa0,$xa0
- li r0,10 # inner loop counter
- mtctr r0
- b Loop_outer_vsx_8x
- .align 5
- Loop_outer_vsx_8x:
- vxxlorc $xa0,$xv22,$xv22 # load [smashed] sigma
- vxxlorc $xa1,$xv23,$xv23
- vxxlorc $xa2,$xv24,$xv24
- vxxlorc $xa3,$xv25,$xv25
- vxxlorc $xa4,$xv22,$xv22
- vxxlorc $xa5,$xv23,$xv23
- vxxlorc $xa6,$xv24,$xv24
- vxxlorc $xa7,$xv25,$xv25
- vspltw $xb0,@K[1],0 # smash the key
- vspltw $xb1,@K[1],1
- vspltw $xb2,@K[1],2
- vspltw $xb3,@K[1],3
- vspltw $xb4,@K[1],0 # smash the key
- vspltw $xb5,@K[1],1
- vspltw $xb6,@K[1],2
- vspltw $xb7,@K[1],3
- vspltw $xc0,@K[2],0
- vspltw $xc1,@K[2],1
- vspltw $xc2,@K[2],2
- vspltw $xc3,@K[2],3
- vspltw $xc4,@K[2],0
- vspltw $xc7,@K[2],3
- vspltw $xc5,@K[2],1
- vxxlorc $xd0,$xv4,$xv4 # smash the counter
- vspltw $xd1,@K[3],1
- vspltw $xd2,@K[3],2
- vspltw $xd3,@K[3],3
- vxxlorc $xd4,$xv5,$xv5 # smash the counter
- vspltw $xd5,@K[3],1
- vspltw $xd6,@K[3],2
- vspltw $xd7,@K[3],3
- vxxlorc $xc6,$xv8,$xv8 #copy of vlspt k[2],2 is in v8.v26 ->k[3] so need to wait until k3 is done
- Loop_vsx_8x:
- ___
- foreach (&VSX_lane_ROUND_8x(0,4, 8,12,16,20,24,28)) { eval; }
- foreach (&VSX_lane_ROUND_8x(0,5,10,15,16,21,26,31)) { eval; }
- $code.=<<___;
- bdnz Loop_vsx_8x
- vxxlor $xv13 ,$xd4,$xd4 # save the register vr24-31
- vxxlor $xv14 ,$xd5,$xd5 #
- vxxlor $xv15 ,$xd6,$xd6 #
- vxxlor $xv16 ,$xd7,$xd7 #
- vxxlor $xv18 ,$xc4,$xc4 #
- vxxlor $xv19 ,$xc5,$xc5 #
- vxxlor $xv20 ,$xc6,$xc6 #
- vxxlor $xv21 ,$xc7,$xc7 #
- vxxlor $xv6 ,$xb6,$xb6 # save vr23, so we get 8 regs
- vxxlor $xv7 ,$xb7,$xb7 # save vr23, so we get 8 regs
- be?vxxlorc $beperm,$xv26,$xv26 # copy back the beperm.
- vxxlorc @K[0],$xv0,$xv0 #27
- vxxlorc @K[1],$xv1,$xv1 #24
- vxxlorc @K[2],$xv2,$xv2 #25
- vxxlorc @K[3],$xv3,$xv3 #26
- vxxlorc $CTR0,$xv4,$xv4
- ###changing to vertical
- vmrgew $xt0,$xa0,$xa1 # transpose data
- vmrgew $xt1,$xa2,$xa3
- vmrgow $xa0,$xa0,$xa1
- vmrgow $xa2,$xa2,$xa3
- vmrgew $xt2,$xb0,$xb1
- vmrgew $xt3,$xb2,$xb3
- vmrgow $xb0,$xb0,$xb1
- vmrgow $xb2,$xb2,$xb3
- vadduwm $xd0,$xd0,$CTR0
- vpermdi $xa1,$xa0,$xa2,0b00
- vpermdi $xa3,$xa0,$xa2,0b11
- vpermdi $xa0,$xt0,$xt1,0b00
- vpermdi $xa2,$xt0,$xt1,0b11
- vpermdi $xb1,$xb0,$xb2,0b00
- vpermdi $xb3,$xb0,$xb2,0b11
- vpermdi $xb0,$xt2,$xt3,0b00
- vpermdi $xb2,$xt2,$xt3,0b11
- vmrgew $xt0,$xc0,$xc1
- vmrgew $xt1,$xc2,$xc3
- vmrgow $xc0,$xc0,$xc1
- vmrgow $xc2,$xc2,$xc3
- vmrgew $xt2,$xd0,$xd1
- vmrgew $xt3,$xd2,$xd3
- vmrgow $xd0,$xd0,$xd1
- vmrgow $xd2,$xd2,$xd3
- vpermdi $xc1,$xc0,$xc2,0b00
- vpermdi $xc3,$xc0,$xc2,0b11
- vpermdi $xc0,$xt0,$xt1,0b00
- vpermdi $xc2,$xt0,$xt1,0b11
- vpermdi $xd1,$xd0,$xd2,0b00
- vpermdi $xd3,$xd0,$xd2,0b11
- vpermdi $xd0,$xt2,$xt3,0b00
- vpermdi $xd2,$xt2,$xt3,0b11
- vspltisw $xt0,8
- vadduwm $CTR0,$CTR0,$xt0 # next counter value
- vxxlor $xv4 ,$CTR0,$CTR0 #CTR+4-> 5
- vadduwm $xa0,$xa0,@K[0]
- vadduwm $xb0,$xb0,@K[1]
- vadduwm $xc0,$xc0,@K[2]
- vadduwm $xd0,$xd0,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- vadduwm $xa0,$xa1,@K[0]
- vadduwm $xb0,$xb1,@K[1]
- vadduwm $xc0,$xc1,@K[2]
- vadduwm $xd0,$xd1,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- vadduwm $xa0,$xa2,@K[0]
- vadduwm $xb0,$xb2,@K[1]
- vadduwm $xc0,$xc2,@K[2]
- vadduwm $xd0,$xd2,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- vadduwm $xa0,$xa3,@K[0]
- vadduwm $xb0,$xb3,@K[1]
- vadduwm $xc0,$xc3,@K[2]
- vadduwm $xd0,$xd3,@K[3]
- be?vperm $xa0,$xa0,$xa0,$beperm
- be?vperm $xb0,$xb0,$xb0,$beperm
- be?vperm $xc0,$xc0,$xc0,$beperm
- be?vperm $xd0,$xd0,$xd0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x
- lvx_4w $xt0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xt0,$xt0,$xa0
- vxor $xt1,$xt1,$xb0
- vxor $xt2,$xt2,$xc0
- vxor $xt3,$xt3,$xd0
- stvx_4w $xt0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- #blk4-7: 24:31 remain the same as we can use the same logic above . Reg a4-b7 remain same.Load c4,d7--> position 8-15.we can reuse vr24-31.
- #VR0-3 : are used to load temp value, vr4 --> as xr0 instead of xt0.
- vxxlorc $CTR1 ,$xv5,$xv5
- vxxlorc $xcn4 ,$xv18,$xv18
- vxxlorc $xcn5 ,$xv19,$xv19
- vxxlorc $xcn6 ,$xv20,$xv20
- vxxlorc $xcn7 ,$xv21,$xv21
- vxxlorc $xdn4 ,$xv13,$xv13
- vxxlorc $xdn5 ,$xv14,$xv14
- vxxlorc $xdn6 ,$xv15,$xv15
- vxxlorc $xdn7 ,$xv16,$xv16
- vadduwm $xdn4,$xdn4,$CTR1
- vxxlorc $xb6 ,$xv6,$xv6
- vxxlorc $xb7 ,$xv7,$xv7
- #use xa1->xr0, as xt0...in the block 4-7
- vmrgew $xr0,$xa4,$xa5 # transpose data
- vmrgew $xt1,$xa6,$xa7
- vmrgow $xa4,$xa4,$xa5
- vmrgow $xa6,$xa6,$xa7
- vmrgew $xt2,$xb4,$xb5
- vmrgew $xt3,$xb6,$xb7
- vmrgow $xb4,$xb4,$xb5
- vmrgow $xb6,$xb6,$xb7
- vpermdi $xa5,$xa4,$xa6,0b00
- vpermdi $xa7,$xa4,$xa6,0b11
- vpermdi $xa4,$xr0,$xt1,0b00
- vpermdi $xa6,$xr0,$xt1,0b11
- vpermdi $xb5,$xb4,$xb6,0b00
- vpermdi $xb7,$xb4,$xb6,0b11
- vpermdi $xb4,$xt2,$xt3,0b00
- vpermdi $xb6,$xt2,$xt3,0b11
- vmrgew $xr0,$xcn4,$xcn5
- vmrgew $xt1,$xcn6,$xcn7
- vmrgow $xcn4,$xcn4,$xcn5
- vmrgow $xcn6,$xcn6,$xcn7
- vmrgew $xt2,$xdn4,$xdn5
- vmrgew $xt3,$xdn6,$xdn7
- vmrgow $xdn4,$xdn4,$xdn5
- vmrgow $xdn6,$xdn6,$xdn7
- vpermdi $xcn5,$xcn4,$xcn6,0b00
- vpermdi $xcn7,$xcn4,$xcn6,0b11
- vpermdi $xcn4,$xr0,$xt1,0b00
- vpermdi $xcn6,$xr0,$xt1,0b11
- vpermdi $xdn5,$xdn4,$xdn6,0b00
- vpermdi $xdn7,$xdn4,$xdn6,0b11
- vpermdi $xdn4,$xt2,$xt3,0b00
- vpermdi $xdn6,$xt2,$xt3,0b11
- vspltisw $xr0,8
- vadduwm $CTR1,$CTR1,$xr0 # next counter value
- vxxlor $xv5 ,$CTR1,$CTR1 #CTR+4-> 5
- vadduwm $xan0,$xa4,@K[0]
- vadduwm $xbn0,$xb4,@K[1]
- vadduwm $xcn0,$xcn4,@K[2]
- vadduwm $xdn0,$xdn4,@K[3]
- be?vperm $xan0,$xa4,$xa4,$beperm
- be?vperm $xbn0,$xb4,$xb4,$beperm
- be?vperm $xcn0,$xcn4,$xcn4,$beperm
- be?vperm $xdn0,$xdn4,$xdn4,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x_1
- lvx_4w $xr0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xr0,$xr0,$xan0
- vxor $xt1,$xt1,$xbn0
- vxor $xt2,$xt2,$xcn0
- vxor $xt3,$xt3,$xdn0
- stvx_4w $xr0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- vadduwm $xan0,$xa5,@K[0]
- vadduwm $xbn0,$xb5,@K[1]
- vadduwm $xcn0,$xcn5,@K[2]
- vadduwm $xdn0,$xdn5,@K[3]
- be?vperm $xan0,$xan0,$xan0,$beperm
- be?vperm $xbn0,$xbn0,$xbn0,$beperm
- be?vperm $xcn0,$xcn0,$xcn0,$beperm
- be?vperm $xdn0,$xdn0,$xdn0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x_1
- lvx_4w $xr0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xr0,$xr0,$xan0
- vxor $xt1,$xt1,$xbn0
- vxor $xt2,$xt2,$xcn0
- vxor $xt3,$xt3,$xdn0
- stvx_4w $xr0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- vadduwm $xan0,$xa6,@K[0]
- vadduwm $xbn0,$xb6,@K[1]
- vadduwm $xcn0,$xcn6,@K[2]
- vadduwm $xdn0,$xdn6,@K[3]
- be?vperm $xan0,$xan0,$xan0,$beperm
- be?vperm $xbn0,$xbn0,$xbn0,$beperm
- be?vperm $xcn0,$xcn0,$xcn0,$beperm
- be?vperm $xdn0,$xdn0,$xdn0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x_1
- lvx_4w $xr0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xr0,$xr0,$xan0
- vxor $xt1,$xt1,$xbn0
- vxor $xt2,$xt2,$xcn0
- vxor $xt3,$xt3,$xdn0
- stvx_4w $xr0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- vadduwm $xan0,$xa7,@K[0]
- vadduwm $xbn0,$xb7,@K[1]
- vadduwm $xcn0,$xcn7,@K[2]
- vadduwm $xdn0,$xdn7,@K[3]
- be?vperm $xan0,$xan0,$xan0,$beperm
- be?vperm $xbn0,$xbn0,$xbn0,$beperm
- be?vperm $xcn0,$xcn0,$xcn0,$beperm
- be?vperm $xdn0,$xdn0,$xdn0,$beperm
- ${UCMP}i $len,0x40
- blt Ltail_vsx_8x_1
- lvx_4w $xr0,$x00,$inp
- lvx_4w $xt1,$x10,$inp
- lvx_4w $xt2,$x20,$inp
- lvx_4w $xt3,$x30,$inp
- vxor $xr0,$xr0,$xan0
- vxor $xt1,$xt1,$xbn0
- vxor $xt2,$xt2,$xcn0
- vxor $xt3,$xt3,$xdn0
- stvx_4w $xr0,$x00,$out
- stvx_4w $xt1,$x10,$out
- addi $inp,$inp,0x40
- stvx_4w $xt2,$x20,$out
- subi $len,$len,0x40
- stvx_4w $xt3,$x30,$out
- addi $out,$out,0x40
- beq Ldone_vsx_8x
- mtctr r0
- bne Loop_outer_vsx_8x
- Ldone_vsx_8x:
- lwz r12,`$FRAME-4`($sp) # pull vrsave
- li r10,`15+$LOCALS+64`
- li r11,`31+$LOCALS+64`
- $POP r0, `$FRAME+$LRSAVE`($sp)
- mtspr 256,r12 # restore vrsave
- lvx v24,r10,$sp
- addi r10,r10,32
- lvx v25,r11,$sp
- addi r11,r11,32
- lvx v26,r10,$sp
- addi r10,r10,32
- lvx v27,r11,$sp
- addi r11,r11,32
- lvx v28,r10,$sp
- addi r10,r10,32
- lvx v29,r11,$sp
- addi r11,r11,32
- lvx v30,r10,$sp
- lvx v31,r11,$sp
- mtlr r0
- addi $sp,$sp,$FRAME
- blr
- .align 4
- Ltail_vsx_8x:
- addi r11,$sp,$LOCALS
- mtctr $len
- stvx_4w $xa0,$x00,r11 # offload block to stack
- stvx_4w $xb0,$x10,r11
- stvx_4w $xc0,$x20,r11
- stvx_4w $xd0,$x30,r11
- subi r12,r11,1 # prepare for *++ptr
- subi $inp,$inp,1
- subi $out,$out,1
- bl Loop_tail_vsx_8x
- Ltail_vsx_8x_1:
- addi r11,$sp,$LOCALS
- mtctr $len
- stvx_4w $xan0,$x00,r11 # offload block to stack
- stvx_4w $xbn0,$x10,r11
- stvx_4w $xcn0,$x20,r11
- stvx_4w $xdn0,$x30,r11
- subi r12,r11,1 # prepare for *++ptr
- subi $inp,$inp,1
- subi $out,$out,1
- bl Loop_tail_vsx_8x
- Loop_tail_vsx_8x:
- lbzu r6,1(r12)
- lbzu r7,1($inp)
- xor r6,r6,r7
- stbu r6,1($out)
- bdnz Loop_tail_vsx_8x
- stvx_4w $K[0],$x00,r11 # wipe copy of the block
- stvx_4w $K[0],$x10,r11
- stvx_4w $K[0],$x20,r11
- stvx_4w $K[0],$x30,r11
- b Ldone_vsx_8x
- .long 0
- .byte 0,12,0x04,1,0x80,0,5,0
- .long 0
- .size .ChaCha20_ctr32_vsx_8x,.-.ChaCha20_ctr32_vsx_8x
- ___
- }}}
- $code.=<<___;
- .align 5
- Lconsts:
- mflr r0
- bcl 20,31,\$+4
- mflr r12 #vvvvv "distance between . and Lsigma
- addi r12,r12,`64-8`
- mtlr r0
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
- .space `64-9*4`
- Lsigma:
- .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
- .long 1,0,0,0
- .long 2,0,0,0
- .long 3,0,0,0
- .long 4,0,0,0
- ___
- $code.=<<___ if ($LITTLE_ENDIAN);
- .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
- .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
- ___
- $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
- .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
- .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
- ___
- $code.=<<___;
- .long 0x61707865,0x61707865,0x61707865,0x61707865
- .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
- .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
- .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
- .long 0,1,2,3
- .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c
- .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
- .align 2
- ___
- foreach (split("\n",$code)) {
- s/\`([^\`]*)\`/eval $1/ge;
- # instructions prefixed with '?' are endian-specific and need
- # to be adjusted accordingly...
- if ($flavour !~ /le$/) { # big-endian
- s/be\?// or
- s/le\?/#le#/ or
- s/\?lvsr/lvsl/ or
- s/\?lvsl/lvsr/ or
- s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
- s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 16-$3/;
- } else { # little-endian
- s/le\?// or
- s/be\?/#be#/ or
- s/\?([a-z]+)/$1/ or
- s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 $3/;
- }
- print $_,"\n";
- }
- close STDOUT or die "error closing STDOUT: $!";
|