chachap10-ppc.pl 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # October 2015
  17. #
  18. # ChaCha20 for PowerPC/AltiVec.
  19. #
  20. # June 2018
  21. #
  22. # Add VSX 2.07 code path. Original 3xAltiVec+1xIALU is well-suited for
  23. # processors that can't issue more than one vector instruction per
  24. # cycle. But POWER8 (and POWER9) can issue a pair, and vector-only 4x
  25. # interleave would perform better. Incidentally PowerISA 2.07 (first
  26. # implemented by POWER8) defined new usable instructions, hence 4xVSX
  27. # code path...
  28. #
  29. # Performance in cycles per byte out of large buffer.
  30. #
  31. # IALU/gcc-4.x 3xAltiVec+1xIALU 4xVSX
  32. #
  33. # Freescale e300 13.6/+115% - -
  34. # PPC74x0/G4e 6.81/+310% 3.81 -
  35. # PPC970/G5 9.29/+160% ? -
  36. # POWER7 8.62/+61% 3.35 -
  37. # POWER8 8.70/+51% 2.91 2.09
  38. # POWER9 8.80/+29% 4.44(*) 2.45(**)
  39. #
  40. # (*) this is trade-off result, it's possible to improve it, but
  41. # then it would negatively affect all others;
  42. # (**) POWER9 seems to be "allergic" to mixing vector and integer
  43. # instructions, which is why switch to vector-only code pays
  44. # off that much;
  45. # $output is the last argument if it looks like a file (it has an extension)
  46. # $flavour is the first argument if it doesn't look like a file
  47. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  48. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  49. if ($flavour =~ /64/) {
  50. $SIZE_T =8;
  51. $LRSAVE =2*$SIZE_T;
  52. $STU ="stdu";
  53. $POP ="ld";
  54. $PUSH ="std";
  55. $UCMP ="cmpld";
  56. } elsif ($flavour =~ /32/) {
  57. $SIZE_T =4;
  58. $LRSAVE =$SIZE_T;
  59. $STU ="stwu";
  60. $POP ="lwz";
  61. $PUSH ="stw";
  62. $UCMP ="cmplw";
  63. } else { die "nonsense $flavour"; }
  64. $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
  65. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  66. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  67. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  68. die "can't locate ppc-xlate.pl";
  69. open STDOUT,"| $^X $xlate $flavour \"$output\""
  70. or die "can't call $xlate: $!";
  71. $LOCALS=6*$SIZE_T;
  72. $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
  73. sub AUTOLOAD() # thunk [simplified] x86-style perlasm
  74. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
  75. $code .= "\t$opcode\t".join(',',@_)."\n";
  76. }
  77. my $sp = "r1";
  78. my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
  79. {{{
  80. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  81. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = map("v$_",(0..15));
  82. my @K = map("v$_",(16..19));
  83. my $CTR = "v26";
  84. my ($xt0,$xt1,$xt2,$xt3) = map("v$_",(27..30));
  85. my ($sixteen,$twelve,$eight,$seven) = ($xt0,$xt1,$xt2,$xt3);
  86. my $beperm = "v31";
  87. my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
  88. my $FRAME=$LOCALS+64+7*16; # 7*16 is for v26-v31 offload
  89. sub VSX_lane_ROUND_4x {
  90. my ($a0,$b0,$c0,$d0)=@_;
  91. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  92. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  93. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  94. my @x=map("\"v$_\"",(0..15));
  95. (
  96. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
  97. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
  98. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
  99. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
  100. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  101. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  102. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  103. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  104. "&vrlw (@x[$d0],@x[$d0],'$sixteen')",
  105. "&vrlw (@x[$d1],@x[$d1],'$sixteen')",
  106. "&vrlw (@x[$d2],@x[$d2],'$sixteen')",
  107. "&vrlw (@x[$d3],@x[$d3],'$sixteen')",
  108. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  109. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  110. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  111. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  112. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  113. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  114. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  115. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  116. "&vrlw (@x[$b0],@x[$b0],'$twelve')",
  117. "&vrlw (@x[$b1],@x[$b1],'$twelve')",
  118. "&vrlw (@x[$b2],@x[$b2],'$twelve')",
  119. "&vrlw (@x[$b3],@x[$b3],'$twelve')",
  120. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
  121. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
  122. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
  123. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
  124. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  125. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  126. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  127. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  128. "&vrlw (@x[$d0],@x[$d0],'$eight')",
  129. "&vrlw (@x[$d1],@x[$d1],'$eight')",
  130. "&vrlw (@x[$d2],@x[$d2],'$eight')",
  131. "&vrlw (@x[$d3],@x[$d3],'$eight')",
  132. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  133. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  134. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  135. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  136. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  137. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  138. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  139. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  140. "&vrlw (@x[$b0],@x[$b0],'$seven')",
  141. "&vrlw (@x[$b1],@x[$b1],'$seven')",
  142. "&vrlw (@x[$b2],@x[$b2],'$seven')",
  143. "&vrlw (@x[$b3],@x[$b3],'$seven')"
  144. );
  145. }
  146. $code.=<<___;
  147. .globl .ChaCha20_ctr32_vsx_p10
  148. .align 5
  149. .ChaCha20_ctr32_vsx_p10:
  150. ${UCMP}i $len,255
  151. ble .Not_greater_than_8x
  152. b ChaCha20_ctr32_vsx_8x
  153. .Not_greater_than_8x:
  154. $STU $sp,-$FRAME($sp)
  155. mflr r0
  156. li r10,`15+$LOCALS+64`
  157. li r11,`31+$LOCALS+64`
  158. mfspr r12,256
  159. stvx v26,r10,$sp
  160. addi r10,r10,32
  161. stvx v27,r11,$sp
  162. addi r11,r11,32
  163. stvx v28,r10,$sp
  164. addi r10,r10,32
  165. stvx v29,r11,$sp
  166. addi r11,r11,32
  167. stvx v30,r10,$sp
  168. stvx v31,r11,$sp
  169. stw r12,`$FRAME-4`($sp) # save vrsave
  170. li r12,-4096+63
  171. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  172. mtspr 256,r12 # preserve 29 AltiVec registers
  173. bl Lconsts # returns pointer Lsigma in r12
  174. lvx_4w @K[0],0,r12 # load sigma
  175. addi r12,r12,0x70
  176. li $x10,16
  177. li $x20,32
  178. li $x30,48
  179. li r11,64
  180. lvx_4w @K[1],0,$key # load key
  181. lvx_4w @K[2],$x10,$key
  182. lvx_4w @K[3],0,$ctr # load counter
  183. vxor $xt0,$xt0,$xt0
  184. lvx_4w $xt1,r11,r12
  185. vspltw $CTR,@K[3],0
  186. vsldoi @K[3],@K[3],$xt0,4
  187. vsldoi @K[3],$xt0,@K[3],12 # clear @K[3].word[0]
  188. vadduwm $CTR,$CTR,$xt1
  189. be?lvsl $beperm,0,$x10 # 0x00..0f
  190. be?vspltisb $xt0,3 # 0x03..03
  191. be?vxor $beperm,$beperm,$xt0 # swap bytes within words
  192. li r0,10 # inner loop counter
  193. mtctr r0
  194. b Loop_outer_vsx
  195. .align 5
  196. Loop_outer_vsx:
  197. lvx $xa0,$x00,r12 # load [smashed] sigma
  198. lvx $xa1,$x10,r12
  199. lvx $xa2,$x20,r12
  200. lvx $xa3,$x30,r12
  201. vspltw $xb0,@K[1],0 # smash the key
  202. vspltw $xb1,@K[1],1
  203. vspltw $xb2,@K[1],2
  204. vspltw $xb3,@K[1],3
  205. vspltw $xc0,@K[2],0
  206. vspltw $xc1,@K[2],1
  207. vspltw $xc2,@K[2],2
  208. vspltw $xc3,@K[2],3
  209. vmr $xd0,$CTR # smash the counter
  210. vspltw $xd1,@K[3],1
  211. vspltw $xd2,@K[3],2
  212. vspltw $xd3,@K[3],3
  213. vspltisw $sixteen,-16 # synthesize constants
  214. vspltisw $twelve,12
  215. vspltisw $eight,8
  216. vspltisw $seven,7
  217. Loop_vsx_4x:
  218. ___
  219. foreach (&VSX_lane_ROUND_4x(0, 4, 8,12)) { eval; }
  220. foreach (&VSX_lane_ROUND_4x(0, 5,10,15)) { eval; }
  221. $code.=<<___;
  222. bdnz Loop_vsx_4x
  223. vadduwm $xd0,$xd0,$CTR
  224. vmrgew $xt0,$xa0,$xa1 # transpose data
  225. vmrgew $xt1,$xa2,$xa3
  226. vmrgow $xa0,$xa0,$xa1
  227. vmrgow $xa2,$xa2,$xa3
  228. vmrgew $xt2,$xb0,$xb1
  229. vmrgew $xt3,$xb2,$xb3
  230. vpermdi $xa1,$xa0,$xa2,0b00
  231. vpermdi $xa3,$xa0,$xa2,0b11
  232. vpermdi $xa0,$xt0,$xt1,0b00
  233. vpermdi $xa2,$xt0,$xt1,0b11
  234. vmrgow $xb0,$xb0,$xb1
  235. vmrgow $xb2,$xb2,$xb3
  236. vmrgew $xt0,$xc0,$xc1
  237. vmrgew $xt1,$xc2,$xc3
  238. vpermdi $xb1,$xb0,$xb2,0b00
  239. vpermdi $xb3,$xb0,$xb2,0b11
  240. vpermdi $xb0,$xt2,$xt3,0b00
  241. vpermdi $xb2,$xt2,$xt3,0b11
  242. vmrgow $xc0,$xc0,$xc1
  243. vmrgow $xc2,$xc2,$xc3
  244. vmrgew $xt2,$xd0,$xd1
  245. vmrgew $xt3,$xd2,$xd3
  246. vpermdi $xc1,$xc0,$xc2,0b00
  247. vpermdi $xc3,$xc0,$xc2,0b11
  248. vpermdi $xc0,$xt0,$xt1,0b00
  249. vpermdi $xc2,$xt0,$xt1,0b11
  250. vmrgow $xd0,$xd0,$xd1
  251. vmrgow $xd2,$xd2,$xd3
  252. vspltisw $xt0,4
  253. vadduwm $CTR,$CTR,$xt0 # next counter value
  254. vpermdi $xd1,$xd0,$xd2,0b00
  255. vpermdi $xd3,$xd0,$xd2,0b11
  256. vpermdi $xd0,$xt2,$xt3,0b00
  257. vpermdi $xd2,$xt2,$xt3,0b11
  258. vadduwm $xa0,$xa0,@K[0]
  259. vadduwm $xb0,$xb0,@K[1]
  260. vadduwm $xc0,$xc0,@K[2]
  261. vadduwm $xd0,$xd0,@K[3]
  262. be?vperm $xa0,$xa0,$xa0,$beperm
  263. be?vperm $xb0,$xb0,$xb0,$beperm
  264. be?vperm $xc0,$xc0,$xc0,$beperm
  265. be?vperm $xd0,$xd0,$xd0,$beperm
  266. ${UCMP}i $len,0x40
  267. blt Ltail_vsx
  268. lvx_4w $xt0,$x00,$inp
  269. lvx_4w $xt1,$x10,$inp
  270. lvx_4w $xt2,$x20,$inp
  271. lvx_4w $xt3,$x30,$inp
  272. vxor $xt0,$xt0,$xa0
  273. vxor $xt1,$xt1,$xb0
  274. vxor $xt2,$xt2,$xc0
  275. vxor $xt3,$xt3,$xd0
  276. stvx_4w $xt0,$x00,$out
  277. stvx_4w $xt1,$x10,$out
  278. addi $inp,$inp,0x40
  279. stvx_4w $xt2,$x20,$out
  280. subi $len,$len,0x40
  281. stvx_4w $xt3,$x30,$out
  282. addi $out,$out,0x40
  283. beq Ldone_vsx
  284. vadduwm $xa0,$xa1,@K[0]
  285. vadduwm $xb0,$xb1,@K[1]
  286. vadduwm $xc0,$xc1,@K[2]
  287. vadduwm $xd0,$xd1,@K[3]
  288. be?vperm $xa0,$xa0,$xa0,$beperm
  289. be?vperm $xb0,$xb0,$xb0,$beperm
  290. be?vperm $xc0,$xc0,$xc0,$beperm
  291. be?vperm $xd0,$xd0,$xd0,$beperm
  292. ${UCMP}i $len,0x40
  293. blt Ltail_vsx
  294. lvx_4w $xt0,$x00,$inp
  295. lvx_4w $xt1,$x10,$inp
  296. lvx_4w $xt2,$x20,$inp
  297. lvx_4w $xt3,$x30,$inp
  298. vxor $xt0,$xt0,$xa0
  299. vxor $xt1,$xt1,$xb0
  300. vxor $xt2,$xt2,$xc0
  301. vxor $xt3,$xt3,$xd0
  302. stvx_4w $xt0,$x00,$out
  303. stvx_4w $xt1,$x10,$out
  304. addi $inp,$inp,0x40
  305. stvx_4w $xt2,$x20,$out
  306. subi $len,$len,0x40
  307. stvx_4w $xt3,$x30,$out
  308. addi $out,$out,0x40
  309. beq Ldone_vsx
  310. vadduwm $xa0,$xa2,@K[0]
  311. vadduwm $xb0,$xb2,@K[1]
  312. vadduwm $xc0,$xc2,@K[2]
  313. vadduwm $xd0,$xd2,@K[3]
  314. be?vperm $xa0,$xa0,$xa0,$beperm
  315. be?vperm $xb0,$xb0,$xb0,$beperm
  316. be?vperm $xc0,$xc0,$xc0,$beperm
  317. be?vperm $xd0,$xd0,$xd0,$beperm
  318. ${UCMP}i $len,0x40
  319. blt Ltail_vsx
  320. lvx_4w $xt0,$x00,$inp
  321. lvx_4w $xt1,$x10,$inp
  322. lvx_4w $xt2,$x20,$inp
  323. lvx_4w $xt3,$x30,$inp
  324. vxor $xt0,$xt0,$xa0
  325. vxor $xt1,$xt1,$xb0
  326. vxor $xt2,$xt2,$xc0
  327. vxor $xt3,$xt3,$xd0
  328. stvx_4w $xt0,$x00,$out
  329. stvx_4w $xt1,$x10,$out
  330. addi $inp,$inp,0x40
  331. stvx_4w $xt2,$x20,$out
  332. subi $len,$len,0x40
  333. stvx_4w $xt3,$x30,$out
  334. addi $out,$out,0x40
  335. beq Ldone_vsx
  336. vadduwm $xa0,$xa3,@K[0]
  337. vadduwm $xb0,$xb3,@K[1]
  338. vadduwm $xc0,$xc3,@K[2]
  339. vadduwm $xd0,$xd3,@K[3]
  340. be?vperm $xa0,$xa0,$xa0,$beperm
  341. be?vperm $xb0,$xb0,$xb0,$beperm
  342. be?vperm $xc0,$xc0,$xc0,$beperm
  343. be?vperm $xd0,$xd0,$xd0,$beperm
  344. ${UCMP}i $len,0x40
  345. blt Ltail_vsx
  346. lvx_4w $xt0,$x00,$inp
  347. lvx_4w $xt1,$x10,$inp
  348. lvx_4w $xt2,$x20,$inp
  349. lvx_4w $xt3,$x30,$inp
  350. vxor $xt0,$xt0,$xa0
  351. vxor $xt1,$xt1,$xb0
  352. vxor $xt2,$xt2,$xc0
  353. vxor $xt3,$xt3,$xd0
  354. stvx_4w $xt0,$x00,$out
  355. stvx_4w $xt1,$x10,$out
  356. addi $inp,$inp,0x40
  357. stvx_4w $xt2,$x20,$out
  358. subi $len,$len,0x40
  359. stvx_4w $xt3,$x30,$out
  360. addi $out,$out,0x40
  361. mtctr r0
  362. bne Loop_outer_vsx
  363. Ldone_vsx:
  364. lwz r12,`$FRAME-4`($sp) # pull vrsave
  365. li r10,`15+$LOCALS+64`
  366. li r11,`31+$LOCALS+64`
  367. $POP r0, `$FRAME+$LRSAVE`($sp)
  368. mtspr 256,r12 # restore vrsave
  369. lvx v26,r10,$sp
  370. addi r10,r10,32
  371. lvx v27,r11,$sp
  372. addi r11,r11,32
  373. lvx v28,r10,$sp
  374. addi r10,r10,32
  375. lvx v29,r11,$sp
  376. addi r11,r11,32
  377. lvx v30,r10,$sp
  378. lvx v31,r11,$sp
  379. mtlr r0
  380. addi $sp,$sp,$FRAME
  381. blr
  382. .align 4
  383. Ltail_vsx:
  384. addi r11,$sp,$LOCALS
  385. mtctr $len
  386. stvx_4w $xa0,$x00,r11 # offload block to stack
  387. stvx_4w $xb0,$x10,r11
  388. stvx_4w $xc0,$x20,r11
  389. stvx_4w $xd0,$x30,r11
  390. subi r12,r11,1 # prepare for *++ptr
  391. subi $inp,$inp,1
  392. subi $out,$out,1
  393. Loop_tail_vsx:
  394. lbzu r6,1(r12)
  395. lbzu r7,1($inp)
  396. xor r6,r6,r7
  397. stbu r6,1($out)
  398. bdnz Loop_tail_vsx
  399. stvx_4w $K[0],$x00,r11 # wipe copy of the block
  400. stvx_4w $K[0],$x10,r11
  401. stvx_4w $K[0],$x20,r11
  402. stvx_4w $K[0],$x30,r11
  403. b Ldone_vsx
  404. .long 0
  405. .byte 0,12,0x04,1,0x80,0,5,0
  406. .long 0
  407. .size .ChaCha20_ctr32_vsx_p10,.-.ChaCha20_ctr32_vsx_p10
  408. ___
  409. }}}
  410. ##This is 8 block in parallel implementation. The heart of chacha round uses vector instruction that has access to
  411. # vsr[32+X]. To perform the 8 parallel block we tend to use all 32 register to hold the 8 block info.
  412. # WE need to store few register value on side, so we can use VSR{32+X} for few vector instructions used in round op and hold intermediate value.
  413. # WE use the VSR[0]-VSR[31] for holding intermediate value and perform 8 block in parallel.
  414. #
  415. {{{
  416. #### ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
  417. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  418. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3,
  419. $xa4,$xa5,$xa6,$xa7, $xb4,$xb5,$xb6,$xb7,
  420. $xc4,$xc5,$xc6,$xc7, $xd4,$xd5,$xd6,$xd7) = map("v$_",(0..31));
  421. my ($xcn4,$xcn5,$xcn6,$xcn7, $xdn4,$xdn5,$xdn6,$xdn7) = map("v$_",(8..15));
  422. my ($xan0,$xbn0,$xcn0,$xdn0) = map("v$_",(0..3));
  423. my @K = map("v$_",27,(24..26));
  424. my ($xt0,$xt1,$xt2,$xt3,$xt4) = map("v$_",23,(28..31));
  425. my $xr0 = "v4";
  426. my $CTR0 = "v22";
  427. my $CTR1 = "v5";
  428. my $beperm = "v31";
  429. my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
  430. my ($xv0,$xv1,$xv2,$xv3,$xv4,$xv5,$xv6,$xv7) = map("v$_",(0..7));
  431. my ($xv8,$xv9,$xv10,$xv11,$xv12,$xv13,$xv14,$xv15,$xv16,$xv17) = map("v$_",(8..17));
  432. my ($xv18,$xv19,$xv20,$xv21) = map("v$_",(18..21));
  433. my ($xv22,$xv23,$xv24,$xv25,$xv26) = map("v$_",(22..26));
  434. my $FRAME=$LOCALS+64+9*16; # 8*16 is for v24-v31 offload
  435. sub VSX_lane_ROUND_8x {
  436. my ($a0,$b0,$c0,$d0,$a4,$b4,$c4,$d4)=@_;
  437. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  438. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  439. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  440. my ($a5,$b5,$c5,$d5)=map(($_&~3)+(($_+1)&3),($a4,$b4,$c4,$d4));
  441. my ($a6,$b6,$c6,$d6)=map(($_&~3)+(($_+1)&3),($a5,$b5,$c5,$d5));
  442. my ($a7,$b7,$c7,$d7)=map(($_&~3)+(($_+1)&3),($a6,$b6,$c6,$d6));
  443. my ($xv8,$xv9,$xv10,$xv11,$xv12,$xv13,$xv14,$xv15,$xv16,$xv17) = map("\"v$_\"",(8..17));
  444. my @x=map("\"v$_\"",(0..31));
  445. (
  446. "&vxxlor ($xv15 ,@x[$c7],@x[$c7])", #copy v30 to v13
  447. "&vxxlorc (@x[$c7], $xv9,$xv9)",
  448. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
  449. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
  450. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
  451. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
  452. "&vadduwm (@x[$a4],@x[$a4],@x[$b4])", # Q1
  453. "&vadduwm (@x[$a5],@x[$a5],@x[$b5])", # Q2
  454. "&vadduwm (@x[$a6],@x[$a6],@x[$b6])", # Q3
  455. "&vadduwm (@x[$a7],@x[$a7],@x[$b7])", # Q4
  456. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  457. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  458. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  459. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  460. "&vxor (@x[$d4],@x[$d4],@x[$a4])",
  461. "&vxor (@x[$d5],@x[$d5],@x[$a5])",
  462. "&vxor (@x[$d6],@x[$d6],@x[$a6])",
  463. "&vxor (@x[$d7],@x[$d7],@x[$a7])",
  464. "&vrlw (@x[$d0],@x[$d0],@x[$c7])",
  465. "&vrlw (@x[$d1],@x[$d1],@x[$c7])",
  466. "&vrlw (@x[$d2],@x[$d2],@x[$c7])",
  467. "&vrlw (@x[$d3],@x[$d3],@x[$c7])",
  468. "&vrlw (@x[$d4],@x[$d4],@x[$c7])",
  469. "&vrlw (@x[$d5],@x[$d5],@x[$c7])",
  470. "&vrlw (@x[$d6],@x[$d6],@x[$c7])",
  471. "&vrlw (@x[$d7],@x[$d7],@x[$c7])",
  472. "&vxxlor ($xv13 ,@x[$a7],@x[$a7])",
  473. "&vxxlorc (@x[$c7], $xv15,$xv15)",
  474. "&vxxlorc (@x[$a7], $xv10,$xv10)",
  475. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  476. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  477. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  478. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  479. "&vadduwm (@x[$c4],@x[$c4],@x[$d4])",
  480. "&vadduwm (@x[$c5],@x[$c5],@x[$d5])",
  481. "&vadduwm (@x[$c6],@x[$c6],@x[$d6])",
  482. "&vadduwm (@x[$c7],@x[$c7],@x[$d7])",
  483. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  484. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  485. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  486. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  487. "&vxor (@x[$b4],@x[$b4],@x[$c4])",
  488. "&vxor (@x[$b5],@x[$b5],@x[$c5])",
  489. "&vxor (@x[$b6],@x[$b6],@x[$c6])",
  490. "&vxor (@x[$b7],@x[$b7],@x[$c7])",
  491. "&vrlw (@x[$b0],@x[$b0],@x[$a7])",
  492. "&vrlw (@x[$b1],@x[$b1],@x[$a7])",
  493. "&vrlw (@x[$b2],@x[$b2],@x[$a7])",
  494. "&vrlw (@x[$b3],@x[$b3],@x[$a7])",
  495. "&vrlw (@x[$b4],@x[$b4],@x[$a7])",
  496. "&vrlw (@x[$b5],@x[$b5],@x[$a7])",
  497. "&vrlw (@x[$b6],@x[$b6],@x[$a7])",
  498. "&vrlw (@x[$b7],@x[$b7],@x[$a7])",
  499. "&vxxlorc (@x[$a7], $xv13,$xv13)",
  500. "&vxxlor ($xv15 ,@x[$c7],@x[$c7])",
  501. "&vxxlorc (@x[$c7], $xv11,$xv11)",
  502. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
  503. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
  504. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
  505. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
  506. "&vadduwm (@x[$a4],@x[$a4],@x[$b4])",
  507. "&vadduwm (@x[$a5],@x[$a5],@x[$b5])",
  508. "&vadduwm (@x[$a6],@x[$a6],@x[$b6])",
  509. "&vadduwm (@x[$a7],@x[$a7],@x[$b7])",
  510. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  511. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  512. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  513. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  514. "&vxor (@x[$d4],@x[$d4],@x[$a4])",
  515. "&vxor (@x[$d5],@x[$d5],@x[$a5])",
  516. "&vxor (@x[$d6],@x[$d6],@x[$a6])",
  517. "&vxor (@x[$d7],@x[$d7],@x[$a7])",
  518. "&vrlw (@x[$d0],@x[$d0],@x[$c7])",
  519. "&vrlw (@x[$d1],@x[$d1],@x[$c7])",
  520. "&vrlw (@x[$d2],@x[$d2],@x[$c7])",
  521. "&vrlw (@x[$d3],@x[$d3],@x[$c7])",
  522. "&vrlw (@x[$d4],@x[$d4],@x[$c7])",
  523. "&vrlw (@x[$d5],@x[$d5],@x[$c7])",
  524. "&vrlw (@x[$d6],@x[$d6],@x[$c7])",
  525. "&vrlw (@x[$d7],@x[$d7],@x[$c7])",
  526. "&vxxlorc (@x[$c7], $xv15,$xv15)",
  527. "&vxxlor ($xv13 ,@x[$a7],@x[$a7])",
  528. "&vxxlorc (@x[$a7], $xv12,$xv12)",
  529. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  530. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  531. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  532. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  533. "&vadduwm (@x[$c4],@x[$c4],@x[$d4])",
  534. "&vadduwm (@x[$c5],@x[$c5],@x[$d5])",
  535. "&vadduwm (@x[$c6],@x[$c6],@x[$d6])",
  536. "&vadduwm (@x[$c7],@x[$c7],@x[$d7])",
  537. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  538. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  539. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  540. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  541. "&vxor (@x[$b4],@x[$b4],@x[$c4])",
  542. "&vxor (@x[$b5],@x[$b5],@x[$c5])",
  543. "&vxor (@x[$b6],@x[$b6],@x[$c6])",
  544. "&vxor (@x[$b7],@x[$b7],@x[$c7])",
  545. "&vrlw (@x[$b0],@x[$b0],@x[$a7])",
  546. "&vrlw (@x[$b1],@x[$b1],@x[$a7])",
  547. "&vrlw (@x[$b2],@x[$b2],@x[$a7])",
  548. "&vrlw (@x[$b3],@x[$b3],@x[$a7])",
  549. "&vrlw (@x[$b4],@x[$b4],@x[$a7])",
  550. "&vrlw (@x[$b5],@x[$b5],@x[$a7])",
  551. "&vrlw (@x[$b6],@x[$b6],@x[$a7])",
  552. "&vrlw (@x[$b7],@x[$b7],@x[$a7])",
  553. "&vxxlorc (@x[$a7], $xv13,$xv13)",
  554. );
  555. }
  556. $code.=<<___;
  557. .globl .ChaCha20_ctr32_vsx_8x
  558. .align 5
  559. .ChaCha20_ctr32_vsx_8x:
  560. $STU $sp,-$FRAME($sp)
  561. mflr r0
  562. li r10,`15+$LOCALS+64`
  563. li r11,`31+$LOCALS+64`
  564. mfspr r12,256
  565. stvx v24,r10,$sp
  566. addi r10,r10,32
  567. stvx v25,r11,$sp
  568. addi r11,r11,32
  569. stvx v26,r10,$sp
  570. addi r10,r10,32
  571. stvx v27,r11,$sp
  572. addi r11,r11,32
  573. stvx v28,r10,$sp
  574. addi r10,r10,32
  575. stvx v29,r11,$sp
  576. addi r11,r11,32
  577. stvx v30,r10,$sp
  578. stvx v31,r11,$sp
  579. stw r12,`$FRAME-4`($sp) # save vrsave
  580. li r12,-4096+63
  581. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  582. mtspr 256,r12 # preserve 29 AltiVec registers
  583. bl Lconsts # returns pointer Lsigma in r12
  584. lvx_4w @K[0],0,r12 # load sigma
  585. addi r12,r12,0x70
  586. li $x10,16
  587. li $x20,32
  588. li $x30,48
  589. li r11,64
  590. vspltisw $xa4,-16 # synthesize constants
  591. vspltisw $xb4,12 # synthesize constants
  592. vspltisw $xc4,8 # synthesize constants
  593. vspltisw $xd4,7 # synthesize constants
  594. lvx $xa0,$x00,r12 # load [smashed] sigma
  595. lvx $xa1,$x10,r12
  596. lvx $xa2,$x20,r12
  597. lvx $xa3,$x30,r12
  598. vxxlor $xv9 ,$xa4,$xa4 #save shift val in vr9-12
  599. vxxlor $xv10 ,$xb4,$xb4
  600. vxxlor $xv11 ,$xc4,$xc4
  601. vxxlor $xv12 ,$xd4,$xd4
  602. vxxlor $xv22 ,$xa0,$xa0 #save sigma in vr22-25
  603. vxxlor $xv23 ,$xa1,$xa1
  604. vxxlor $xv24 ,$xa2,$xa2
  605. vxxlor $xv25 ,$xa3,$xa3
  606. lvx_4w @K[1],0,$key # load key
  607. lvx_4w @K[2],$x10,$key
  608. lvx_4w @K[3],0,$ctr # load counter
  609. vspltisw $xt3,4
  610. vxor $xt2,$xt2,$xt2
  611. lvx_4w $xt1,r11,r12
  612. vspltw $xa2,@K[3],0 #save the original count after spltw
  613. vsldoi @K[3],@K[3],$xt2,4
  614. vsldoi @K[3],$xt2,@K[3],12 # clear @K[3].word[0]
  615. vadduwm $xt1,$xa2,$xt1
  616. vadduwm $xt3,$xt1,$xt3 # next counter value
  617. vspltw $xa0,@K[2],2 # save the K[2] spltw 2 and save v8.
  618. be?lvsl $beperm,0,$x10 # 0x00..0f
  619. be?vspltisb $xt0,3 # 0x03..03
  620. be?vxor $beperm,$beperm,$xt0 # swap bytes within words
  621. be?vxxlor $xv26 ,$beperm,$beperm
  622. vxxlor $xv0 ,@K[0],@K[0] # K0,k1,k2 to vr0,1,2
  623. vxxlor $xv1 ,@K[1],@K[1]
  624. vxxlor $xv2 ,@K[2],@K[2]
  625. vxxlor $xv3 ,@K[3],@K[3]
  626. vxxlor $xv4 ,$xt1,$xt1 #CTR ->4, CTR+4-> 5
  627. vxxlor $xv5 ,$xt3,$xt3
  628. vxxlor $xv8 ,$xa0,$xa0
  629. li r0,10 # inner loop counter
  630. mtctr r0
  631. b Loop_outer_vsx_8x
  632. .align 5
  633. Loop_outer_vsx_8x:
  634. vxxlorc $xa0,$xv22,$xv22 # load [smashed] sigma
  635. vxxlorc $xa1,$xv23,$xv23
  636. vxxlorc $xa2,$xv24,$xv24
  637. vxxlorc $xa3,$xv25,$xv25
  638. vxxlorc $xa4,$xv22,$xv22
  639. vxxlorc $xa5,$xv23,$xv23
  640. vxxlorc $xa6,$xv24,$xv24
  641. vxxlorc $xa7,$xv25,$xv25
  642. vspltw $xb0,@K[1],0 # smash the key
  643. vspltw $xb1,@K[1],1
  644. vspltw $xb2,@K[1],2
  645. vspltw $xb3,@K[1],3
  646. vspltw $xb4,@K[1],0 # smash the key
  647. vspltw $xb5,@K[1],1
  648. vspltw $xb6,@K[1],2
  649. vspltw $xb7,@K[1],3
  650. vspltw $xc0,@K[2],0
  651. vspltw $xc1,@K[2],1
  652. vspltw $xc2,@K[2],2
  653. vspltw $xc3,@K[2],3
  654. vspltw $xc4,@K[2],0
  655. vspltw $xc7,@K[2],3
  656. vspltw $xc5,@K[2],1
  657. vxxlorc $xd0,$xv4,$xv4 # smash the counter
  658. vspltw $xd1,@K[3],1
  659. vspltw $xd2,@K[3],2
  660. vspltw $xd3,@K[3],3
  661. vxxlorc $xd4,$xv5,$xv5 # smash the counter
  662. vspltw $xd5,@K[3],1
  663. vspltw $xd6,@K[3],2
  664. vspltw $xd7,@K[3],3
  665. vxxlorc $xc6,$xv8,$xv8 #copy of vlspt k[2],2 is in v8.v26 ->k[3] so need to wait until k3 is done
  666. Loop_vsx_8x:
  667. ___
  668. foreach (&VSX_lane_ROUND_8x(0,4, 8,12,16,20,24,28)) { eval; }
  669. foreach (&VSX_lane_ROUND_8x(0,5,10,15,16,21,26,31)) { eval; }
  670. $code.=<<___;
  671. bdnz Loop_vsx_8x
  672. vxxlor $xv13 ,$xd4,$xd4 # save the register vr24-31
  673. vxxlor $xv14 ,$xd5,$xd5 #
  674. vxxlor $xv15 ,$xd6,$xd6 #
  675. vxxlor $xv16 ,$xd7,$xd7 #
  676. vxxlor $xv18 ,$xc4,$xc4 #
  677. vxxlor $xv19 ,$xc5,$xc5 #
  678. vxxlor $xv20 ,$xc6,$xc6 #
  679. vxxlor $xv21 ,$xc7,$xc7 #
  680. vxxlor $xv6 ,$xb6,$xb6 # save vr23, so we get 8 regs
  681. vxxlor $xv7 ,$xb7,$xb7 # save vr23, so we get 8 regs
  682. be?vxxlorc $beperm,$xv26,$xv26 # copy back the beperm.
  683. vxxlorc @K[0],$xv0,$xv0 #27
  684. vxxlorc @K[1],$xv1,$xv1 #24
  685. vxxlorc @K[2],$xv2,$xv2 #25
  686. vxxlorc @K[3],$xv3,$xv3 #26
  687. vxxlorc $CTR0,$xv4,$xv4
  688. ###changing to vertical
  689. vmrgew $xt0,$xa0,$xa1 # transpose data
  690. vmrgew $xt1,$xa2,$xa3
  691. vmrgow $xa0,$xa0,$xa1
  692. vmrgow $xa2,$xa2,$xa3
  693. vmrgew $xt2,$xb0,$xb1
  694. vmrgew $xt3,$xb2,$xb3
  695. vmrgow $xb0,$xb0,$xb1
  696. vmrgow $xb2,$xb2,$xb3
  697. vadduwm $xd0,$xd0,$CTR0
  698. vpermdi $xa1,$xa0,$xa2,0b00
  699. vpermdi $xa3,$xa0,$xa2,0b11
  700. vpermdi $xa0,$xt0,$xt1,0b00
  701. vpermdi $xa2,$xt0,$xt1,0b11
  702. vpermdi $xb1,$xb0,$xb2,0b00
  703. vpermdi $xb3,$xb0,$xb2,0b11
  704. vpermdi $xb0,$xt2,$xt3,0b00
  705. vpermdi $xb2,$xt2,$xt3,0b11
  706. vmrgew $xt0,$xc0,$xc1
  707. vmrgew $xt1,$xc2,$xc3
  708. vmrgow $xc0,$xc0,$xc1
  709. vmrgow $xc2,$xc2,$xc3
  710. vmrgew $xt2,$xd0,$xd1
  711. vmrgew $xt3,$xd2,$xd3
  712. vmrgow $xd0,$xd0,$xd1
  713. vmrgow $xd2,$xd2,$xd3
  714. vpermdi $xc1,$xc0,$xc2,0b00
  715. vpermdi $xc3,$xc0,$xc2,0b11
  716. vpermdi $xc0,$xt0,$xt1,0b00
  717. vpermdi $xc2,$xt0,$xt1,0b11
  718. vpermdi $xd1,$xd0,$xd2,0b00
  719. vpermdi $xd3,$xd0,$xd2,0b11
  720. vpermdi $xd0,$xt2,$xt3,0b00
  721. vpermdi $xd2,$xt2,$xt3,0b11
  722. vspltisw $xt0,8
  723. vadduwm $CTR0,$CTR0,$xt0 # next counter value
  724. vxxlor $xv4 ,$CTR0,$CTR0 #CTR+4-> 5
  725. vadduwm $xa0,$xa0,@K[0]
  726. vadduwm $xb0,$xb0,@K[1]
  727. vadduwm $xc0,$xc0,@K[2]
  728. vadduwm $xd0,$xd0,@K[3]
  729. be?vperm $xa0,$xa0,$xa0,$beperm
  730. be?vperm $xb0,$xb0,$xb0,$beperm
  731. be?vperm $xc0,$xc0,$xc0,$beperm
  732. be?vperm $xd0,$xd0,$xd0,$beperm
  733. ${UCMP}i $len,0x40
  734. blt Ltail_vsx_8x
  735. lvx_4w $xt0,$x00,$inp
  736. lvx_4w $xt1,$x10,$inp
  737. lvx_4w $xt2,$x20,$inp
  738. lvx_4w $xt3,$x30,$inp
  739. vxor $xt0,$xt0,$xa0
  740. vxor $xt1,$xt1,$xb0
  741. vxor $xt2,$xt2,$xc0
  742. vxor $xt3,$xt3,$xd0
  743. stvx_4w $xt0,$x00,$out
  744. stvx_4w $xt1,$x10,$out
  745. addi $inp,$inp,0x40
  746. stvx_4w $xt2,$x20,$out
  747. subi $len,$len,0x40
  748. stvx_4w $xt3,$x30,$out
  749. addi $out,$out,0x40
  750. beq Ldone_vsx_8x
  751. vadduwm $xa0,$xa1,@K[0]
  752. vadduwm $xb0,$xb1,@K[1]
  753. vadduwm $xc0,$xc1,@K[2]
  754. vadduwm $xd0,$xd1,@K[3]
  755. be?vperm $xa0,$xa0,$xa0,$beperm
  756. be?vperm $xb0,$xb0,$xb0,$beperm
  757. be?vperm $xc0,$xc0,$xc0,$beperm
  758. be?vperm $xd0,$xd0,$xd0,$beperm
  759. ${UCMP}i $len,0x40
  760. blt Ltail_vsx_8x
  761. lvx_4w $xt0,$x00,$inp
  762. lvx_4w $xt1,$x10,$inp
  763. lvx_4w $xt2,$x20,$inp
  764. lvx_4w $xt3,$x30,$inp
  765. vxor $xt0,$xt0,$xa0
  766. vxor $xt1,$xt1,$xb0
  767. vxor $xt2,$xt2,$xc0
  768. vxor $xt3,$xt3,$xd0
  769. stvx_4w $xt0,$x00,$out
  770. stvx_4w $xt1,$x10,$out
  771. addi $inp,$inp,0x40
  772. stvx_4w $xt2,$x20,$out
  773. subi $len,$len,0x40
  774. stvx_4w $xt3,$x30,$out
  775. addi $out,$out,0x40
  776. beq Ldone_vsx_8x
  777. vadduwm $xa0,$xa2,@K[0]
  778. vadduwm $xb0,$xb2,@K[1]
  779. vadduwm $xc0,$xc2,@K[2]
  780. vadduwm $xd0,$xd2,@K[3]
  781. be?vperm $xa0,$xa0,$xa0,$beperm
  782. be?vperm $xb0,$xb0,$xb0,$beperm
  783. be?vperm $xc0,$xc0,$xc0,$beperm
  784. be?vperm $xd0,$xd0,$xd0,$beperm
  785. ${UCMP}i $len,0x40
  786. blt Ltail_vsx_8x
  787. lvx_4w $xt0,$x00,$inp
  788. lvx_4w $xt1,$x10,$inp
  789. lvx_4w $xt2,$x20,$inp
  790. lvx_4w $xt3,$x30,$inp
  791. vxor $xt0,$xt0,$xa0
  792. vxor $xt1,$xt1,$xb0
  793. vxor $xt2,$xt2,$xc0
  794. vxor $xt3,$xt3,$xd0
  795. stvx_4w $xt0,$x00,$out
  796. stvx_4w $xt1,$x10,$out
  797. addi $inp,$inp,0x40
  798. stvx_4w $xt2,$x20,$out
  799. subi $len,$len,0x40
  800. stvx_4w $xt3,$x30,$out
  801. addi $out,$out,0x40
  802. beq Ldone_vsx_8x
  803. vadduwm $xa0,$xa3,@K[0]
  804. vadduwm $xb0,$xb3,@K[1]
  805. vadduwm $xc0,$xc3,@K[2]
  806. vadduwm $xd0,$xd3,@K[3]
  807. be?vperm $xa0,$xa0,$xa0,$beperm
  808. be?vperm $xb0,$xb0,$xb0,$beperm
  809. be?vperm $xc0,$xc0,$xc0,$beperm
  810. be?vperm $xd0,$xd0,$xd0,$beperm
  811. ${UCMP}i $len,0x40
  812. blt Ltail_vsx_8x
  813. lvx_4w $xt0,$x00,$inp
  814. lvx_4w $xt1,$x10,$inp
  815. lvx_4w $xt2,$x20,$inp
  816. lvx_4w $xt3,$x30,$inp
  817. vxor $xt0,$xt0,$xa0
  818. vxor $xt1,$xt1,$xb0
  819. vxor $xt2,$xt2,$xc0
  820. vxor $xt3,$xt3,$xd0
  821. stvx_4w $xt0,$x00,$out
  822. stvx_4w $xt1,$x10,$out
  823. addi $inp,$inp,0x40
  824. stvx_4w $xt2,$x20,$out
  825. subi $len,$len,0x40
  826. stvx_4w $xt3,$x30,$out
  827. addi $out,$out,0x40
  828. beq Ldone_vsx_8x
  829. #blk4-7: 24:31 remain the same as we can use the same logic above . Reg a4-b7 remain same.Load c4,d7--> position 8-15.we can reuse vr24-31.
  830. #VR0-3 : are used to load temp value, vr4 --> as xr0 instead of xt0.
  831. vxxlorc $CTR1 ,$xv5,$xv5
  832. vxxlorc $xcn4 ,$xv18,$xv18
  833. vxxlorc $xcn5 ,$xv19,$xv19
  834. vxxlorc $xcn6 ,$xv20,$xv20
  835. vxxlorc $xcn7 ,$xv21,$xv21
  836. vxxlorc $xdn4 ,$xv13,$xv13
  837. vxxlorc $xdn5 ,$xv14,$xv14
  838. vxxlorc $xdn6 ,$xv15,$xv15
  839. vxxlorc $xdn7 ,$xv16,$xv16
  840. vadduwm $xdn4,$xdn4,$CTR1
  841. vxxlorc $xb6 ,$xv6,$xv6
  842. vxxlorc $xb7 ,$xv7,$xv7
  843. #use xa1->xr0, as xt0...in the block 4-7
  844. vmrgew $xr0,$xa4,$xa5 # transpose data
  845. vmrgew $xt1,$xa6,$xa7
  846. vmrgow $xa4,$xa4,$xa5
  847. vmrgow $xa6,$xa6,$xa7
  848. vmrgew $xt2,$xb4,$xb5
  849. vmrgew $xt3,$xb6,$xb7
  850. vmrgow $xb4,$xb4,$xb5
  851. vmrgow $xb6,$xb6,$xb7
  852. vpermdi $xa5,$xa4,$xa6,0b00
  853. vpermdi $xa7,$xa4,$xa6,0b11
  854. vpermdi $xa4,$xr0,$xt1,0b00
  855. vpermdi $xa6,$xr0,$xt1,0b11
  856. vpermdi $xb5,$xb4,$xb6,0b00
  857. vpermdi $xb7,$xb4,$xb6,0b11
  858. vpermdi $xb4,$xt2,$xt3,0b00
  859. vpermdi $xb6,$xt2,$xt3,0b11
  860. vmrgew $xr0,$xcn4,$xcn5
  861. vmrgew $xt1,$xcn6,$xcn7
  862. vmrgow $xcn4,$xcn4,$xcn5
  863. vmrgow $xcn6,$xcn6,$xcn7
  864. vmrgew $xt2,$xdn4,$xdn5
  865. vmrgew $xt3,$xdn6,$xdn7
  866. vmrgow $xdn4,$xdn4,$xdn5
  867. vmrgow $xdn6,$xdn6,$xdn7
  868. vpermdi $xcn5,$xcn4,$xcn6,0b00
  869. vpermdi $xcn7,$xcn4,$xcn6,0b11
  870. vpermdi $xcn4,$xr0,$xt1,0b00
  871. vpermdi $xcn6,$xr0,$xt1,0b11
  872. vpermdi $xdn5,$xdn4,$xdn6,0b00
  873. vpermdi $xdn7,$xdn4,$xdn6,0b11
  874. vpermdi $xdn4,$xt2,$xt3,0b00
  875. vpermdi $xdn6,$xt2,$xt3,0b11
  876. vspltisw $xr0,8
  877. vadduwm $CTR1,$CTR1,$xr0 # next counter value
  878. vxxlor $xv5 ,$CTR1,$CTR1 #CTR+4-> 5
  879. vadduwm $xan0,$xa4,@K[0]
  880. vadduwm $xbn0,$xb4,@K[1]
  881. vadduwm $xcn0,$xcn4,@K[2]
  882. vadduwm $xdn0,$xdn4,@K[3]
  883. be?vperm $xan0,$xa4,$xa4,$beperm
  884. be?vperm $xbn0,$xb4,$xb4,$beperm
  885. be?vperm $xcn0,$xcn4,$xcn4,$beperm
  886. be?vperm $xdn0,$xdn4,$xdn4,$beperm
  887. ${UCMP}i $len,0x40
  888. blt Ltail_vsx_8x_1
  889. lvx_4w $xr0,$x00,$inp
  890. lvx_4w $xt1,$x10,$inp
  891. lvx_4w $xt2,$x20,$inp
  892. lvx_4w $xt3,$x30,$inp
  893. vxor $xr0,$xr0,$xan0
  894. vxor $xt1,$xt1,$xbn0
  895. vxor $xt2,$xt2,$xcn0
  896. vxor $xt3,$xt3,$xdn0
  897. stvx_4w $xr0,$x00,$out
  898. stvx_4w $xt1,$x10,$out
  899. addi $inp,$inp,0x40
  900. stvx_4w $xt2,$x20,$out
  901. subi $len,$len,0x40
  902. stvx_4w $xt3,$x30,$out
  903. addi $out,$out,0x40
  904. beq Ldone_vsx_8x
  905. vadduwm $xan0,$xa5,@K[0]
  906. vadduwm $xbn0,$xb5,@K[1]
  907. vadduwm $xcn0,$xcn5,@K[2]
  908. vadduwm $xdn0,$xdn5,@K[3]
  909. be?vperm $xan0,$xan0,$xan0,$beperm
  910. be?vperm $xbn0,$xbn0,$xbn0,$beperm
  911. be?vperm $xcn0,$xcn0,$xcn0,$beperm
  912. be?vperm $xdn0,$xdn0,$xdn0,$beperm
  913. ${UCMP}i $len,0x40
  914. blt Ltail_vsx_8x_1
  915. lvx_4w $xr0,$x00,$inp
  916. lvx_4w $xt1,$x10,$inp
  917. lvx_4w $xt2,$x20,$inp
  918. lvx_4w $xt3,$x30,$inp
  919. vxor $xr0,$xr0,$xan0
  920. vxor $xt1,$xt1,$xbn0
  921. vxor $xt2,$xt2,$xcn0
  922. vxor $xt3,$xt3,$xdn0
  923. stvx_4w $xr0,$x00,$out
  924. stvx_4w $xt1,$x10,$out
  925. addi $inp,$inp,0x40
  926. stvx_4w $xt2,$x20,$out
  927. subi $len,$len,0x40
  928. stvx_4w $xt3,$x30,$out
  929. addi $out,$out,0x40
  930. beq Ldone_vsx_8x
  931. vadduwm $xan0,$xa6,@K[0]
  932. vadduwm $xbn0,$xb6,@K[1]
  933. vadduwm $xcn0,$xcn6,@K[2]
  934. vadduwm $xdn0,$xdn6,@K[3]
  935. be?vperm $xan0,$xan0,$xan0,$beperm
  936. be?vperm $xbn0,$xbn0,$xbn0,$beperm
  937. be?vperm $xcn0,$xcn0,$xcn0,$beperm
  938. be?vperm $xdn0,$xdn0,$xdn0,$beperm
  939. ${UCMP}i $len,0x40
  940. blt Ltail_vsx_8x_1
  941. lvx_4w $xr0,$x00,$inp
  942. lvx_4w $xt1,$x10,$inp
  943. lvx_4w $xt2,$x20,$inp
  944. lvx_4w $xt3,$x30,$inp
  945. vxor $xr0,$xr0,$xan0
  946. vxor $xt1,$xt1,$xbn0
  947. vxor $xt2,$xt2,$xcn0
  948. vxor $xt3,$xt3,$xdn0
  949. stvx_4w $xr0,$x00,$out
  950. stvx_4w $xt1,$x10,$out
  951. addi $inp,$inp,0x40
  952. stvx_4w $xt2,$x20,$out
  953. subi $len,$len,0x40
  954. stvx_4w $xt3,$x30,$out
  955. addi $out,$out,0x40
  956. beq Ldone_vsx_8x
  957. vadduwm $xan0,$xa7,@K[0]
  958. vadduwm $xbn0,$xb7,@K[1]
  959. vadduwm $xcn0,$xcn7,@K[2]
  960. vadduwm $xdn0,$xdn7,@K[3]
  961. be?vperm $xan0,$xan0,$xan0,$beperm
  962. be?vperm $xbn0,$xbn0,$xbn0,$beperm
  963. be?vperm $xcn0,$xcn0,$xcn0,$beperm
  964. be?vperm $xdn0,$xdn0,$xdn0,$beperm
  965. ${UCMP}i $len,0x40
  966. blt Ltail_vsx_8x_1
  967. lvx_4w $xr0,$x00,$inp
  968. lvx_4w $xt1,$x10,$inp
  969. lvx_4w $xt2,$x20,$inp
  970. lvx_4w $xt3,$x30,$inp
  971. vxor $xr0,$xr0,$xan0
  972. vxor $xt1,$xt1,$xbn0
  973. vxor $xt2,$xt2,$xcn0
  974. vxor $xt3,$xt3,$xdn0
  975. stvx_4w $xr0,$x00,$out
  976. stvx_4w $xt1,$x10,$out
  977. addi $inp,$inp,0x40
  978. stvx_4w $xt2,$x20,$out
  979. subi $len,$len,0x40
  980. stvx_4w $xt3,$x30,$out
  981. addi $out,$out,0x40
  982. beq Ldone_vsx_8x
  983. mtctr r0
  984. bne Loop_outer_vsx_8x
  985. Ldone_vsx_8x:
  986. lwz r12,`$FRAME-4`($sp) # pull vrsave
  987. li r10,`15+$LOCALS+64`
  988. li r11,`31+$LOCALS+64`
  989. $POP r0, `$FRAME+$LRSAVE`($sp)
  990. mtspr 256,r12 # restore vrsave
  991. lvx v24,r10,$sp
  992. addi r10,r10,32
  993. lvx v25,r11,$sp
  994. addi r11,r11,32
  995. lvx v26,r10,$sp
  996. addi r10,r10,32
  997. lvx v27,r11,$sp
  998. addi r11,r11,32
  999. lvx v28,r10,$sp
  1000. addi r10,r10,32
  1001. lvx v29,r11,$sp
  1002. addi r11,r11,32
  1003. lvx v30,r10,$sp
  1004. lvx v31,r11,$sp
  1005. mtlr r0
  1006. addi $sp,$sp,$FRAME
  1007. blr
  1008. .align 4
  1009. Ltail_vsx_8x:
  1010. addi r11,$sp,$LOCALS
  1011. mtctr $len
  1012. stvx_4w $xa0,$x00,r11 # offload block to stack
  1013. stvx_4w $xb0,$x10,r11
  1014. stvx_4w $xc0,$x20,r11
  1015. stvx_4w $xd0,$x30,r11
  1016. subi r12,r11,1 # prepare for *++ptr
  1017. subi $inp,$inp,1
  1018. subi $out,$out,1
  1019. bl Loop_tail_vsx_8x
  1020. Ltail_vsx_8x_1:
  1021. addi r11,$sp,$LOCALS
  1022. mtctr $len
  1023. stvx_4w $xan0,$x00,r11 # offload block to stack
  1024. stvx_4w $xbn0,$x10,r11
  1025. stvx_4w $xcn0,$x20,r11
  1026. stvx_4w $xdn0,$x30,r11
  1027. subi r12,r11,1 # prepare for *++ptr
  1028. subi $inp,$inp,1
  1029. subi $out,$out,1
  1030. bl Loop_tail_vsx_8x
  1031. Loop_tail_vsx_8x:
  1032. lbzu r6,1(r12)
  1033. lbzu r7,1($inp)
  1034. xor r6,r6,r7
  1035. stbu r6,1($out)
  1036. bdnz Loop_tail_vsx_8x
  1037. stvx_4w $K[0],$x00,r11 # wipe copy of the block
  1038. stvx_4w $K[0],$x10,r11
  1039. stvx_4w $K[0],$x20,r11
  1040. stvx_4w $K[0],$x30,r11
  1041. b Ldone_vsx_8x
  1042. .long 0
  1043. .byte 0,12,0x04,1,0x80,0,5,0
  1044. .long 0
  1045. .size .ChaCha20_ctr32_vsx_8x,.-.ChaCha20_ctr32_vsx_8x
  1046. ___
  1047. }}}
  1048. $code.=<<___;
  1049. .align 5
  1050. Lconsts:
  1051. mflr r0
  1052. bcl 20,31,\$+4
  1053. mflr r12 #vvvvv "distance between . and Lsigma
  1054. addi r12,r12,`64-8`
  1055. mtlr r0
  1056. blr
  1057. .long 0
  1058. .byte 0,12,0x14,0,0,0,0,0
  1059. .space `64-9*4`
  1060. Lsigma:
  1061. .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
  1062. .long 1,0,0,0
  1063. .long 2,0,0,0
  1064. .long 3,0,0,0
  1065. .long 4,0,0,0
  1066. ___
  1067. $code.=<<___ if ($LITTLE_ENDIAN);
  1068. .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
  1069. .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
  1070. ___
  1071. $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
  1072. .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
  1073. .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
  1074. ___
  1075. $code.=<<___;
  1076. .long 0x61707865,0x61707865,0x61707865,0x61707865
  1077. .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
  1078. .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
  1079. .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
  1080. .long 0,1,2,3
  1081. .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c
  1082. .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
  1083. .align 2
  1084. ___
  1085. foreach (split("\n",$code)) {
  1086. s/\`([^\`]*)\`/eval $1/ge;
  1087. # instructions prefixed with '?' are endian-specific and need
  1088. # to be adjusted accordingly...
  1089. if ($flavour !~ /le$/) { # big-endian
  1090. s/be\?// or
  1091. s/le\?/#le#/ or
  1092. s/\?lvsr/lvsl/ or
  1093. s/\?lvsl/lvsr/ or
  1094. s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
  1095. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 16-$3/;
  1096. } else { # little-endian
  1097. s/le\?// or
  1098. s/be\?/#be#/ or
  1099. s/\?([a-z]+)/$1/ or
  1100. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 $3/;
  1101. }
  1102. print $_,"\n";
  1103. }
  1104. close STDOUT or die "error closing STDOUT: $!";