2
0

chachap10-ppc.pl 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2022 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # October 2015
  17. #
  18. # ChaCha20 for PowerPC/AltiVec.
  19. #
  20. # June 2018
  21. #
  22. # Add VSX 2.07 code path. Original 3xAltiVec+1xIALU is well-suited for
  23. # processors that can't issue more than one vector instruction per
  24. # cycle. But POWER8 (and POWER9) can issue a pair, and vector-only 4x
  25. # interleave would perform better. Incidentally PowerISA 2.07 (first
  26. # implemented by POWER8) defined new usable instructions, hence 4xVSX
  27. # code path...
  28. #
  29. # Performance in cycles per byte out of large buffer.
  30. #
  31. # IALU/gcc-4.x 3xAltiVec+1xIALU 4xVSX
  32. #
  33. # Freescale e300 13.6/+115% - -
  34. # PPC74x0/G4e 6.81/+310% 3.81 -
  35. # PPC970/G5 9.29/+160% ? -
  36. # POWER7 8.62/+61% 3.35 -
  37. # POWER8 8.70/+51% 2.91 2.09
  38. # POWER9 8.80/+29% 4.44(*) 2.45(**)
  39. #
  40. # (*) this is trade-off result, it's possible to improve it, but
  41. # then it would negatively affect all others;
  42. # (**) POWER9 seems to be "allergic" to mixing vector and integer
  43. # instructions, which is why switch to vector-only code pays
  44. # off that much;
  45. # $output is the last argument if it looks like a file (it has an extension)
  46. # $flavour is the first argument if it doesn't look like a file
  47. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  48. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  49. if ($flavour =~ /64/) {
  50. $SIZE_T =8;
  51. $LRSAVE =2*$SIZE_T;
  52. $STU ="stdu";
  53. $POP ="ld";
  54. $PUSH ="std";
  55. $UCMP ="cmpld";
  56. } elsif ($flavour =~ /32/) {
  57. $SIZE_T =4;
  58. $LRSAVE =$SIZE_T;
  59. $STU ="stwu";
  60. $POP ="lwz";
  61. $PUSH ="stw";
  62. $UCMP ="cmplw";
  63. } else { die "nonsense $flavour"; }
  64. $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
  65. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  66. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  67. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  68. die "can't locate ppc-xlate.pl";
  69. open STDOUT,"| $^X $xlate $flavour \"$output\""
  70. or die "can't call $xlate: $!";
  71. $LOCALS=6*$SIZE_T;
  72. $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
  73. sub AUTOLOAD() # thunk [simplified] x86-style perlasm
  74. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
  75. $code .= "\t$opcode\t".join(',',@_)."\n";
  76. }
  77. my $sp = "r1";
  78. my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
  79. {{{
  80. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  81. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = map("v$_",(0..15));
  82. my @K = map("v$_",(16..19));
  83. my $CTR = "v26";
  84. my ($xt0,$xt1,$xt2,$xt3) = map("v$_",(27..30));
  85. my ($sixteen,$twelve,$eight,$seven) = ($xt0,$xt1,$xt2,$xt3);
  86. my $beperm = "v31";
  87. my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
  88. my $FRAME=$LOCALS+64+7*16; # 7*16 is for v26-v31 offload
  89. sub VSX_lane_ROUND_4x {
  90. my ($a0,$b0,$c0,$d0)=@_;
  91. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  92. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  93. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  94. my @x=map("\"v$_\"",(0..15));
  95. (
  96. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
  97. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
  98. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
  99. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
  100. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  101. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  102. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  103. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  104. "&vrlw (@x[$d0],@x[$d0],'$sixteen')",
  105. "&vrlw (@x[$d1],@x[$d1],'$sixteen')",
  106. "&vrlw (@x[$d2],@x[$d2],'$sixteen')",
  107. "&vrlw (@x[$d3],@x[$d3],'$sixteen')",
  108. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  109. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  110. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  111. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  112. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  113. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  114. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  115. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  116. "&vrlw (@x[$b0],@x[$b0],'$twelve')",
  117. "&vrlw (@x[$b1],@x[$b1],'$twelve')",
  118. "&vrlw (@x[$b2],@x[$b2],'$twelve')",
  119. "&vrlw (@x[$b3],@x[$b3],'$twelve')",
  120. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
  121. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
  122. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
  123. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
  124. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  125. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  126. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  127. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  128. "&vrlw (@x[$d0],@x[$d0],'$eight')",
  129. "&vrlw (@x[$d1],@x[$d1],'$eight')",
  130. "&vrlw (@x[$d2],@x[$d2],'$eight')",
  131. "&vrlw (@x[$d3],@x[$d3],'$eight')",
  132. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  133. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  134. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  135. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  136. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  137. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  138. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  139. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  140. "&vrlw (@x[$b0],@x[$b0],'$seven')",
  141. "&vrlw (@x[$b1],@x[$b1],'$seven')",
  142. "&vrlw (@x[$b2],@x[$b2],'$seven')",
  143. "&vrlw (@x[$b3],@x[$b3],'$seven')"
  144. );
  145. }
  146. $code.=<<___;
  147. .globl .ChaCha20_ctr32_vsx_p10
  148. .align 5
  149. .ChaCha20_ctr32_vsx_p10:
  150. ${UCMP}i $len,255
  151. bgt ChaCha20_ctr32_vsx_8x
  152. $STU $sp,-$FRAME($sp)
  153. mflr r0
  154. li r10,`15+$LOCALS+64`
  155. li r11,`31+$LOCALS+64`
  156. mfspr r12,256
  157. stvx v26,r10,$sp
  158. addi r10,r10,32
  159. stvx v27,r11,$sp
  160. addi r11,r11,32
  161. stvx v28,r10,$sp
  162. addi r10,r10,32
  163. stvx v29,r11,$sp
  164. addi r11,r11,32
  165. stvx v30,r10,$sp
  166. stvx v31,r11,$sp
  167. stw r12,`$FRAME-4`($sp) # save vrsave
  168. li r12,-4096+63
  169. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  170. mtspr 256,r12 # preserve 29 AltiVec registers
  171. bl Lconsts # returns pointer Lsigma in r12
  172. lvx_4w @K[0],0,r12 # load sigma
  173. addi r12,r12,0x70
  174. li $x10,16
  175. li $x20,32
  176. li $x30,48
  177. li r11,64
  178. lvx_4w @K[1],0,$key # load key
  179. lvx_4w @K[2],$x10,$key
  180. lvx_4w @K[3],0,$ctr # load counter
  181. vxor $xt0,$xt0,$xt0
  182. lvx_4w $xt1,r11,r12
  183. vspltw $CTR,@K[3],0
  184. vsldoi @K[3],@K[3],$xt0,4
  185. vsldoi @K[3],$xt0,@K[3],12 # clear @K[3].word[0]
  186. vadduwm $CTR,$CTR,$xt1
  187. be?lvsl $beperm,0,$x10 # 0x00..0f
  188. be?vspltisb $xt0,3 # 0x03..03
  189. be?vxor $beperm,$beperm,$xt0 # swap bytes within words
  190. li r0,10 # inner loop counter
  191. mtctr r0
  192. b Loop_outer_vsx
  193. .align 5
  194. Loop_outer_vsx:
  195. lvx $xa0,$x00,r12 # load [smashed] sigma
  196. lvx $xa1,$x10,r12
  197. lvx $xa2,$x20,r12
  198. lvx $xa3,$x30,r12
  199. vspltw $xb0,@K[1],0 # smash the key
  200. vspltw $xb1,@K[1],1
  201. vspltw $xb2,@K[1],2
  202. vspltw $xb3,@K[1],3
  203. vspltw $xc0,@K[2],0
  204. vspltw $xc1,@K[2],1
  205. vspltw $xc2,@K[2],2
  206. vspltw $xc3,@K[2],3
  207. vmr $xd0,$CTR # smash the counter
  208. vspltw $xd1,@K[3],1
  209. vspltw $xd2,@K[3],2
  210. vspltw $xd3,@K[3],3
  211. vspltisw $sixteen,-16 # synthesize constants
  212. vspltisw $twelve,12
  213. vspltisw $eight,8
  214. vspltisw $seven,7
  215. Loop_vsx_4x:
  216. ___
  217. foreach (&VSX_lane_ROUND_4x(0, 4, 8,12)) { eval; }
  218. foreach (&VSX_lane_ROUND_4x(0, 5,10,15)) { eval; }
  219. $code.=<<___;
  220. bdnz Loop_vsx_4x
  221. vadduwm $xd0,$xd0,$CTR
  222. vmrgew $xt0,$xa0,$xa1 # transpose data
  223. vmrgew $xt1,$xa2,$xa3
  224. vmrgow $xa0,$xa0,$xa1
  225. vmrgow $xa2,$xa2,$xa3
  226. vmrgew $xt2,$xb0,$xb1
  227. vmrgew $xt3,$xb2,$xb3
  228. vpermdi $xa1,$xa0,$xa2,0b00
  229. vpermdi $xa3,$xa0,$xa2,0b11
  230. vpermdi $xa0,$xt0,$xt1,0b00
  231. vpermdi $xa2,$xt0,$xt1,0b11
  232. vmrgow $xb0,$xb0,$xb1
  233. vmrgow $xb2,$xb2,$xb3
  234. vmrgew $xt0,$xc0,$xc1
  235. vmrgew $xt1,$xc2,$xc3
  236. vpermdi $xb1,$xb0,$xb2,0b00
  237. vpermdi $xb3,$xb0,$xb2,0b11
  238. vpermdi $xb0,$xt2,$xt3,0b00
  239. vpermdi $xb2,$xt2,$xt3,0b11
  240. vmrgow $xc0,$xc0,$xc1
  241. vmrgow $xc2,$xc2,$xc3
  242. vmrgew $xt2,$xd0,$xd1
  243. vmrgew $xt3,$xd2,$xd3
  244. vpermdi $xc1,$xc0,$xc2,0b00
  245. vpermdi $xc3,$xc0,$xc2,0b11
  246. vpermdi $xc0,$xt0,$xt1,0b00
  247. vpermdi $xc2,$xt0,$xt1,0b11
  248. vmrgow $xd0,$xd0,$xd1
  249. vmrgow $xd2,$xd2,$xd3
  250. vspltisw $xt0,4
  251. vadduwm $CTR,$CTR,$xt0 # next counter value
  252. vpermdi $xd1,$xd0,$xd2,0b00
  253. vpermdi $xd3,$xd0,$xd2,0b11
  254. vpermdi $xd0,$xt2,$xt3,0b00
  255. vpermdi $xd2,$xt2,$xt3,0b11
  256. vadduwm $xa0,$xa0,@K[0]
  257. vadduwm $xb0,$xb0,@K[1]
  258. vadduwm $xc0,$xc0,@K[2]
  259. vadduwm $xd0,$xd0,@K[3]
  260. be?vperm $xa0,$xa0,$xa0,$beperm
  261. be?vperm $xb0,$xb0,$xb0,$beperm
  262. be?vperm $xc0,$xc0,$xc0,$beperm
  263. be?vperm $xd0,$xd0,$xd0,$beperm
  264. ${UCMP}i $len,0x40
  265. blt Ltail_vsx
  266. lvx_4w $xt0,$x00,$inp
  267. lvx_4w $xt1,$x10,$inp
  268. lvx_4w $xt2,$x20,$inp
  269. lvx_4w $xt3,$x30,$inp
  270. vxor $xt0,$xt0,$xa0
  271. vxor $xt1,$xt1,$xb0
  272. vxor $xt2,$xt2,$xc0
  273. vxor $xt3,$xt3,$xd0
  274. stvx_4w $xt0,$x00,$out
  275. stvx_4w $xt1,$x10,$out
  276. addi $inp,$inp,0x40
  277. stvx_4w $xt2,$x20,$out
  278. subi $len,$len,0x40
  279. stvx_4w $xt3,$x30,$out
  280. addi $out,$out,0x40
  281. beq Ldone_vsx
  282. vadduwm $xa0,$xa1,@K[0]
  283. vadduwm $xb0,$xb1,@K[1]
  284. vadduwm $xc0,$xc1,@K[2]
  285. vadduwm $xd0,$xd1,@K[3]
  286. be?vperm $xa0,$xa0,$xa0,$beperm
  287. be?vperm $xb0,$xb0,$xb0,$beperm
  288. be?vperm $xc0,$xc0,$xc0,$beperm
  289. be?vperm $xd0,$xd0,$xd0,$beperm
  290. ${UCMP}i $len,0x40
  291. blt Ltail_vsx
  292. lvx_4w $xt0,$x00,$inp
  293. lvx_4w $xt1,$x10,$inp
  294. lvx_4w $xt2,$x20,$inp
  295. lvx_4w $xt3,$x30,$inp
  296. vxor $xt0,$xt0,$xa0
  297. vxor $xt1,$xt1,$xb0
  298. vxor $xt2,$xt2,$xc0
  299. vxor $xt3,$xt3,$xd0
  300. stvx_4w $xt0,$x00,$out
  301. stvx_4w $xt1,$x10,$out
  302. addi $inp,$inp,0x40
  303. stvx_4w $xt2,$x20,$out
  304. subi $len,$len,0x40
  305. stvx_4w $xt3,$x30,$out
  306. addi $out,$out,0x40
  307. beq Ldone_vsx
  308. vadduwm $xa0,$xa2,@K[0]
  309. vadduwm $xb0,$xb2,@K[1]
  310. vadduwm $xc0,$xc2,@K[2]
  311. vadduwm $xd0,$xd2,@K[3]
  312. be?vperm $xa0,$xa0,$xa0,$beperm
  313. be?vperm $xb0,$xb0,$xb0,$beperm
  314. be?vperm $xc0,$xc0,$xc0,$beperm
  315. be?vperm $xd0,$xd0,$xd0,$beperm
  316. ${UCMP}i $len,0x40
  317. blt Ltail_vsx
  318. lvx_4w $xt0,$x00,$inp
  319. lvx_4w $xt1,$x10,$inp
  320. lvx_4w $xt2,$x20,$inp
  321. lvx_4w $xt3,$x30,$inp
  322. vxor $xt0,$xt0,$xa0
  323. vxor $xt1,$xt1,$xb0
  324. vxor $xt2,$xt2,$xc0
  325. vxor $xt3,$xt3,$xd0
  326. stvx_4w $xt0,$x00,$out
  327. stvx_4w $xt1,$x10,$out
  328. addi $inp,$inp,0x40
  329. stvx_4w $xt2,$x20,$out
  330. subi $len,$len,0x40
  331. stvx_4w $xt3,$x30,$out
  332. addi $out,$out,0x40
  333. beq Ldone_vsx
  334. vadduwm $xa0,$xa3,@K[0]
  335. vadduwm $xb0,$xb3,@K[1]
  336. vadduwm $xc0,$xc3,@K[2]
  337. vadduwm $xd0,$xd3,@K[3]
  338. be?vperm $xa0,$xa0,$xa0,$beperm
  339. be?vperm $xb0,$xb0,$xb0,$beperm
  340. be?vperm $xc0,$xc0,$xc0,$beperm
  341. be?vperm $xd0,$xd0,$xd0,$beperm
  342. ${UCMP}i $len,0x40
  343. blt Ltail_vsx
  344. lvx_4w $xt0,$x00,$inp
  345. lvx_4w $xt1,$x10,$inp
  346. lvx_4w $xt2,$x20,$inp
  347. lvx_4w $xt3,$x30,$inp
  348. vxor $xt0,$xt0,$xa0
  349. vxor $xt1,$xt1,$xb0
  350. vxor $xt2,$xt2,$xc0
  351. vxor $xt3,$xt3,$xd0
  352. stvx_4w $xt0,$x00,$out
  353. stvx_4w $xt1,$x10,$out
  354. addi $inp,$inp,0x40
  355. stvx_4w $xt2,$x20,$out
  356. subi $len,$len,0x40
  357. stvx_4w $xt3,$x30,$out
  358. addi $out,$out,0x40
  359. mtctr r0
  360. bne Loop_outer_vsx
  361. Ldone_vsx:
  362. lwz r12,`$FRAME-4`($sp) # pull vrsave
  363. li r10,`15+$LOCALS+64`
  364. li r11,`31+$LOCALS+64`
  365. $POP r0, `$FRAME+$LRSAVE`($sp)
  366. mtspr 256,r12 # restore vrsave
  367. lvx v26,r10,$sp
  368. addi r10,r10,32
  369. lvx v27,r11,$sp
  370. addi r11,r11,32
  371. lvx v28,r10,$sp
  372. addi r10,r10,32
  373. lvx v29,r11,$sp
  374. addi r11,r11,32
  375. lvx v30,r10,$sp
  376. lvx v31,r11,$sp
  377. mtlr r0
  378. addi $sp,$sp,$FRAME
  379. blr
  380. .align 4
  381. Ltail_vsx:
  382. addi r11,$sp,$LOCALS
  383. mtctr $len
  384. stvx_4w $xa0,$x00,r11 # offload block to stack
  385. stvx_4w $xb0,$x10,r11
  386. stvx_4w $xc0,$x20,r11
  387. stvx_4w $xd0,$x30,r11
  388. subi r12,r11,1 # prepare for *++ptr
  389. subi $inp,$inp,1
  390. subi $out,$out,1
  391. Loop_tail_vsx:
  392. lbzu r6,1(r12)
  393. lbzu r7,1($inp)
  394. xor r6,r6,r7
  395. stbu r6,1($out)
  396. bdnz Loop_tail_vsx
  397. stvx_4w $K[0],$x00,r11 # wipe copy of the block
  398. stvx_4w $K[0],$x10,r11
  399. stvx_4w $K[0],$x20,r11
  400. stvx_4w $K[0],$x30,r11
  401. b Ldone_vsx
  402. .long 0
  403. .byte 0,12,0x04,1,0x80,0,5,0
  404. .long 0
  405. .size .ChaCha20_ctr32_vsx_p10,.-.ChaCha20_ctr32_vsx_p10
  406. ___
  407. }}}
  408. ##This is 8 block in parallel implementation. The heart of chacha round uses vector instruction that has access to
  409. # vsr[32+X]. To perform the 8 parallel block we tend to use all 32 register to hold the 8 block info.
  410. # WE need to store few register value on side, so we can use VSR{32+X} for few vector instructions used in round op and hold intermediate value.
  411. # WE use the VSR[0]-VSR[31] for holding intermediate value and perform 8 block in parallel.
  412. #
  413. {{{
  414. #### ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
  415. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  416. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3,
  417. $xa4,$xa5,$xa6,$xa7, $xb4,$xb5,$xb6,$xb7,
  418. $xc4,$xc5,$xc6,$xc7, $xd4,$xd5,$xd6,$xd7) = map("v$_",(0..31));
  419. my ($xcn4,$xcn5,$xcn6,$xcn7, $xdn4,$xdn5,$xdn6,$xdn7) = map("v$_",(8..15));
  420. my ($xan0,$xbn0,$xcn0,$xdn0) = map("v$_",(0..3));
  421. my @K = map("v$_",27,(24..26));
  422. my ($xt0,$xt1,$xt2,$xt3,$xt4) = map("v$_",23,(28..31));
  423. my $xr0 = "v4";
  424. my $CTR0 = "v22";
  425. my $CTR1 = "v5";
  426. my $beperm = "v31";
  427. my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
  428. my ($xv0,$xv1,$xv2,$xv3,$xv4,$xv5,$xv6,$xv7) = map("v$_",(0..7));
  429. my ($xv8,$xv9,$xv10,$xv11,$xv12,$xv13,$xv14,$xv15,$xv16,$xv17) = map("v$_",(8..17));
  430. my ($xv18,$xv19,$xv20,$xv21) = map("v$_",(18..21));
  431. my ($xv22,$xv23,$xv24,$xv25,$xv26) = map("v$_",(22..26));
  432. my $FRAME=$LOCALS+64+9*16; # 8*16 is for v24-v31 offload
  433. sub VSX_lane_ROUND_8x {
  434. my ($a0,$b0,$c0,$d0,$a4,$b4,$c4,$d4)=@_;
  435. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  436. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  437. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  438. my ($a5,$b5,$c5,$d5)=map(($_&~3)+(($_+1)&3),($a4,$b4,$c4,$d4));
  439. my ($a6,$b6,$c6,$d6)=map(($_&~3)+(($_+1)&3),($a5,$b5,$c5,$d5));
  440. my ($a7,$b7,$c7,$d7)=map(($_&~3)+(($_+1)&3),($a6,$b6,$c6,$d6));
  441. my ($xv8,$xv9,$xv10,$xv11,$xv12,$xv13,$xv14,$xv15,$xv16,$xv17) = map("\"v$_\"",(8..17));
  442. my @x=map("\"v$_\"",(0..31));
  443. (
  444. "&vxxlor ($xv15 ,@x[$c7],@x[$c7])", #copy v30 to v13
  445. "&vxxlorc (@x[$c7], $xv9,$xv9)",
  446. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
  447. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
  448. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
  449. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
  450. "&vadduwm (@x[$a4],@x[$a4],@x[$b4])", # Q1
  451. "&vadduwm (@x[$a5],@x[$a5],@x[$b5])", # Q2
  452. "&vadduwm (@x[$a6],@x[$a6],@x[$b6])", # Q3
  453. "&vadduwm (@x[$a7],@x[$a7],@x[$b7])", # Q4
  454. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  455. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  456. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  457. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  458. "&vxor (@x[$d4],@x[$d4],@x[$a4])",
  459. "&vxor (@x[$d5],@x[$d5],@x[$a5])",
  460. "&vxor (@x[$d6],@x[$d6],@x[$a6])",
  461. "&vxor (@x[$d7],@x[$d7],@x[$a7])",
  462. "&vrlw (@x[$d0],@x[$d0],@x[$c7])",
  463. "&vrlw (@x[$d1],@x[$d1],@x[$c7])",
  464. "&vrlw (@x[$d2],@x[$d2],@x[$c7])",
  465. "&vrlw (@x[$d3],@x[$d3],@x[$c7])",
  466. "&vrlw (@x[$d4],@x[$d4],@x[$c7])",
  467. "&vrlw (@x[$d5],@x[$d5],@x[$c7])",
  468. "&vrlw (@x[$d6],@x[$d6],@x[$c7])",
  469. "&vrlw (@x[$d7],@x[$d7],@x[$c7])",
  470. "&vxxlor ($xv13 ,@x[$a7],@x[$a7])",
  471. "&vxxlorc (@x[$c7], $xv15,$xv15)",
  472. "&vxxlorc (@x[$a7], $xv10,$xv10)",
  473. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  474. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  475. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  476. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  477. "&vadduwm (@x[$c4],@x[$c4],@x[$d4])",
  478. "&vadduwm (@x[$c5],@x[$c5],@x[$d5])",
  479. "&vadduwm (@x[$c6],@x[$c6],@x[$d6])",
  480. "&vadduwm (@x[$c7],@x[$c7],@x[$d7])",
  481. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  482. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  483. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  484. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  485. "&vxor (@x[$b4],@x[$b4],@x[$c4])",
  486. "&vxor (@x[$b5],@x[$b5],@x[$c5])",
  487. "&vxor (@x[$b6],@x[$b6],@x[$c6])",
  488. "&vxor (@x[$b7],@x[$b7],@x[$c7])",
  489. "&vrlw (@x[$b0],@x[$b0],@x[$a7])",
  490. "&vrlw (@x[$b1],@x[$b1],@x[$a7])",
  491. "&vrlw (@x[$b2],@x[$b2],@x[$a7])",
  492. "&vrlw (@x[$b3],@x[$b3],@x[$a7])",
  493. "&vrlw (@x[$b4],@x[$b4],@x[$a7])",
  494. "&vrlw (@x[$b5],@x[$b5],@x[$a7])",
  495. "&vrlw (@x[$b6],@x[$b6],@x[$a7])",
  496. "&vrlw (@x[$b7],@x[$b7],@x[$a7])",
  497. "&vxxlorc (@x[$a7], $xv13,$xv13)",
  498. "&vxxlor ($xv15 ,@x[$c7],@x[$c7])",
  499. "&vxxlorc (@x[$c7], $xv11,$xv11)",
  500. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
  501. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
  502. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
  503. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
  504. "&vadduwm (@x[$a4],@x[$a4],@x[$b4])",
  505. "&vadduwm (@x[$a5],@x[$a5],@x[$b5])",
  506. "&vadduwm (@x[$a6],@x[$a6],@x[$b6])",
  507. "&vadduwm (@x[$a7],@x[$a7],@x[$b7])",
  508. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  509. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  510. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  511. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  512. "&vxor (@x[$d4],@x[$d4],@x[$a4])",
  513. "&vxor (@x[$d5],@x[$d5],@x[$a5])",
  514. "&vxor (@x[$d6],@x[$d6],@x[$a6])",
  515. "&vxor (@x[$d7],@x[$d7],@x[$a7])",
  516. "&vrlw (@x[$d0],@x[$d0],@x[$c7])",
  517. "&vrlw (@x[$d1],@x[$d1],@x[$c7])",
  518. "&vrlw (@x[$d2],@x[$d2],@x[$c7])",
  519. "&vrlw (@x[$d3],@x[$d3],@x[$c7])",
  520. "&vrlw (@x[$d4],@x[$d4],@x[$c7])",
  521. "&vrlw (@x[$d5],@x[$d5],@x[$c7])",
  522. "&vrlw (@x[$d6],@x[$d6],@x[$c7])",
  523. "&vrlw (@x[$d7],@x[$d7],@x[$c7])",
  524. "&vxxlorc (@x[$c7], $xv15,$xv15)",
  525. "&vxxlor ($xv13 ,@x[$a7],@x[$a7])",
  526. "&vxxlorc (@x[$a7], $xv12,$xv12)",
  527. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  528. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  529. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  530. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  531. "&vadduwm (@x[$c4],@x[$c4],@x[$d4])",
  532. "&vadduwm (@x[$c5],@x[$c5],@x[$d5])",
  533. "&vadduwm (@x[$c6],@x[$c6],@x[$d6])",
  534. "&vadduwm (@x[$c7],@x[$c7],@x[$d7])",
  535. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  536. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  537. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  538. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  539. "&vxor (@x[$b4],@x[$b4],@x[$c4])",
  540. "&vxor (@x[$b5],@x[$b5],@x[$c5])",
  541. "&vxor (@x[$b6],@x[$b6],@x[$c6])",
  542. "&vxor (@x[$b7],@x[$b7],@x[$c7])",
  543. "&vrlw (@x[$b0],@x[$b0],@x[$a7])",
  544. "&vrlw (@x[$b1],@x[$b1],@x[$a7])",
  545. "&vrlw (@x[$b2],@x[$b2],@x[$a7])",
  546. "&vrlw (@x[$b3],@x[$b3],@x[$a7])",
  547. "&vrlw (@x[$b4],@x[$b4],@x[$a7])",
  548. "&vrlw (@x[$b5],@x[$b5],@x[$a7])",
  549. "&vrlw (@x[$b6],@x[$b6],@x[$a7])",
  550. "&vrlw (@x[$b7],@x[$b7],@x[$a7])",
  551. "&vxxlorc (@x[$a7], $xv13,$xv13)",
  552. );
  553. }
  554. $code.=<<___;
  555. .globl .ChaCha20_ctr32_vsx_8x
  556. .align 5
  557. .ChaCha20_ctr32_vsx_8x:
  558. $STU $sp,-$FRAME($sp)
  559. mflr r0
  560. li r10,`15+$LOCALS+64`
  561. li r11,`31+$LOCALS+64`
  562. mfspr r12,256
  563. stvx v24,r10,$sp
  564. addi r10,r10,32
  565. stvx v25,r11,$sp
  566. addi r11,r11,32
  567. stvx v26,r10,$sp
  568. addi r10,r10,32
  569. stvx v27,r11,$sp
  570. addi r11,r11,32
  571. stvx v28,r10,$sp
  572. addi r10,r10,32
  573. stvx v29,r11,$sp
  574. addi r11,r11,32
  575. stvx v30,r10,$sp
  576. stvx v31,r11,$sp
  577. stw r12,`$FRAME-4`($sp) # save vrsave
  578. li r12,-4096+63
  579. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  580. mtspr 256,r12 # preserve 29 AltiVec registers
  581. bl Lconsts # returns pointer Lsigma in r12
  582. lvx_4w @K[0],0,r12 # load sigma
  583. addi r12,r12,0x70
  584. li $x10,16
  585. li $x20,32
  586. li $x30,48
  587. li r11,64
  588. vspltisw $xa4,-16 # synthesize constants
  589. vspltisw $xb4,12 # synthesize constants
  590. vspltisw $xc4,8 # synthesize constants
  591. vspltisw $xd4,7 # synthesize constants
  592. lvx $xa0,$x00,r12 # load [smashed] sigma
  593. lvx $xa1,$x10,r12
  594. lvx $xa2,$x20,r12
  595. lvx $xa3,$x30,r12
  596. vxxlor $xv9 ,$xa4,$xa4 #save shift val in vr9-12
  597. vxxlor $xv10 ,$xb4,$xb4
  598. vxxlor $xv11 ,$xc4,$xc4
  599. vxxlor $xv12 ,$xd4,$xd4
  600. vxxlor $xv22 ,$xa0,$xa0 #save sigma in vr22-25
  601. vxxlor $xv23 ,$xa1,$xa1
  602. vxxlor $xv24 ,$xa2,$xa2
  603. vxxlor $xv25 ,$xa3,$xa3
  604. lvx_4w @K[1],0,$key # load key
  605. lvx_4w @K[2],$x10,$key
  606. lvx_4w @K[3],0,$ctr # load counter
  607. vspltisw $xt3,4
  608. vxor $xt2,$xt2,$xt2
  609. lvx_4w $xt1,r11,r12
  610. vspltw $xa2,@K[3],0 #save the original count after spltw
  611. vsldoi @K[3],@K[3],$xt2,4
  612. vsldoi @K[3],$xt2,@K[3],12 # clear @K[3].word[0]
  613. vadduwm $xt1,$xa2,$xt1
  614. vadduwm $xt3,$xt1,$xt3 # next counter value
  615. vspltw $xa0,@K[2],2 # save the K[2] spltw 2 and save v8.
  616. be?lvsl $beperm,0,$x10 # 0x00..0f
  617. be?vspltisb $xt0,3 # 0x03..03
  618. be?vxor $beperm,$beperm,$xt0 # swap bytes within words
  619. be?vxxlor $xv26 ,$beperm,$beperm
  620. vxxlor $xv0 ,@K[0],@K[0] # K0,k1,k2 to vr0,1,2
  621. vxxlor $xv1 ,@K[1],@K[1]
  622. vxxlor $xv2 ,@K[2],@K[2]
  623. vxxlor $xv3 ,@K[3],@K[3]
  624. vxxlor $xv4 ,$xt1,$xt1 #CTR ->4, CTR+4-> 5
  625. vxxlor $xv5 ,$xt3,$xt3
  626. vxxlor $xv8 ,$xa0,$xa0
  627. li r0,10 # inner loop counter
  628. mtctr r0
  629. b Loop_outer_vsx_8x
  630. .align 5
  631. Loop_outer_vsx_8x:
  632. vxxlorc $xa0,$xv22,$xv22 # load [smashed] sigma
  633. vxxlorc $xa1,$xv23,$xv23
  634. vxxlorc $xa2,$xv24,$xv24
  635. vxxlorc $xa3,$xv25,$xv25
  636. vxxlorc $xa4,$xv22,$xv22
  637. vxxlorc $xa5,$xv23,$xv23
  638. vxxlorc $xa6,$xv24,$xv24
  639. vxxlorc $xa7,$xv25,$xv25
  640. vspltw $xb0,@K[1],0 # smash the key
  641. vspltw $xb1,@K[1],1
  642. vspltw $xb2,@K[1],2
  643. vspltw $xb3,@K[1],3
  644. vspltw $xb4,@K[1],0 # smash the key
  645. vspltw $xb5,@K[1],1
  646. vspltw $xb6,@K[1],2
  647. vspltw $xb7,@K[1],3
  648. vspltw $xc0,@K[2],0
  649. vspltw $xc1,@K[2],1
  650. vspltw $xc2,@K[2],2
  651. vspltw $xc3,@K[2],3
  652. vspltw $xc4,@K[2],0
  653. vspltw $xc7,@K[2],3
  654. vspltw $xc5,@K[2],1
  655. vxxlorc $xd0,$xv4,$xv4 # smash the counter
  656. vspltw $xd1,@K[3],1
  657. vspltw $xd2,@K[3],2
  658. vspltw $xd3,@K[3],3
  659. vxxlorc $xd4,$xv5,$xv5 # smash the counter
  660. vspltw $xd5,@K[3],1
  661. vspltw $xd6,@K[3],2
  662. vspltw $xd7,@K[3],3
  663. vxxlorc $xc6,$xv8,$xv8 #copy of vlspt k[2],2 is in v8.v26 ->k[3] so need to wait until k3 is done
  664. Loop_vsx_8x:
  665. ___
  666. foreach (&VSX_lane_ROUND_8x(0,4, 8,12,16,20,24,28)) { eval; }
  667. foreach (&VSX_lane_ROUND_8x(0,5,10,15,16,21,26,31)) { eval; }
  668. $code.=<<___;
  669. bdnz Loop_vsx_8x
  670. vxxlor $xv13 ,$xd4,$xd4 # save the register vr24-31
  671. vxxlor $xv14 ,$xd5,$xd5 #
  672. vxxlor $xv15 ,$xd6,$xd6 #
  673. vxxlor $xv16 ,$xd7,$xd7 #
  674. vxxlor $xv18 ,$xc4,$xc4 #
  675. vxxlor $xv19 ,$xc5,$xc5 #
  676. vxxlor $xv20 ,$xc6,$xc6 #
  677. vxxlor $xv21 ,$xc7,$xc7 #
  678. vxxlor $xv6 ,$xb6,$xb6 # save vr23, so we get 8 regs
  679. vxxlor $xv7 ,$xb7,$xb7 # save vr23, so we get 8 regs
  680. be?vxxlorc $beperm,$xv26,$xv26 # copy back the the beperm.
  681. vxxlorc @K[0],$xv0,$xv0 #27
  682. vxxlorc @K[1],$xv1,$xv1 #24
  683. vxxlorc @K[2],$xv2,$xv2 #25
  684. vxxlorc @K[3],$xv3,$xv3 #26
  685. vxxlorc $CTR0,$xv4,$xv4
  686. ###changing to vertical
  687. vmrgew $xt0,$xa0,$xa1 # transpose data
  688. vmrgew $xt1,$xa2,$xa3
  689. vmrgow $xa0,$xa0,$xa1
  690. vmrgow $xa2,$xa2,$xa3
  691. vmrgew $xt2,$xb0,$xb1
  692. vmrgew $xt3,$xb2,$xb3
  693. vmrgow $xb0,$xb0,$xb1
  694. vmrgow $xb2,$xb2,$xb3
  695. vadduwm $xd0,$xd0,$CTR0
  696. vpermdi $xa1,$xa0,$xa2,0b00
  697. vpermdi $xa3,$xa0,$xa2,0b11
  698. vpermdi $xa0,$xt0,$xt1,0b00
  699. vpermdi $xa2,$xt0,$xt1,0b11
  700. vpermdi $xb1,$xb0,$xb2,0b00
  701. vpermdi $xb3,$xb0,$xb2,0b11
  702. vpermdi $xb0,$xt2,$xt3,0b00
  703. vpermdi $xb2,$xt2,$xt3,0b11
  704. vmrgew $xt0,$xc0,$xc1
  705. vmrgew $xt1,$xc2,$xc3
  706. vmrgow $xc0,$xc0,$xc1
  707. vmrgow $xc2,$xc2,$xc3
  708. vmrgew $xt2,$xd0,$xd1
  709. vmrgew $xt3,$xd2,$xd3
  710. vmrgow $xd0,$xd0,$xd1
  711. vmrgow $xd2,$xd2,$xd3
  712. vpermdi $xc1,$xc0,$xc2,0b00
  713. vpermdi $xc3,$xc0,$xc2,0b11
  714. vpermdi $xc0,$xt0,$xt1,0b00
  715. vpermdi $xc2,$xt0,$xt1,0b11
  716. vpermdi $xd1,$xd0,$xd2,0b00
  717. vpermdi $xd3,$xd0,$xd2,0b11
  718. vpermdi $xd0,$xt2,$xt3,0b00
  719. vpermdi $xd2,$xt2,$xt3,0b11
  720. vspltisw $xt0,8
  721. vadduwm $CTR0,$CTR0,$xt0 # next counter value
  722. vxxlor $xv4 ,$CTR0,$CTR0 #CTR+4-> 5
  723. vadduwm $xa0,$xa0,@K[0]
  724. vadduwm $xb0,$xb0,@K[1]
  725. vadduwm $xc0,$xc0,@K[2]
  726. vadduwm $xd0,$xd0,@K[3]
  727. be?vperm $xa0,$xa0,$xa0,$beperm
  728. be?vperm $xb0,$xb0,$xb0,$beperm
  729. be?vperm $xc0,$xc0,$xc0,$beperm
  730. be?vperm $xd0,$xd0,$xd0,$beperm
  731. ${UCMP}i $len,0x40
  732. blt Ltail_vsx_8x
  733. lvx_4w $xt0,$x00,$inp
  734. lvx_4w $xt1,$x10,$inp
  735. lvx_4w $xt2,$x20,$inp
  736. lvx_4w $xt3,$x30,$inp
  737. vxor $xt0,$xt0,$xa0
  738. vxor $xt1,$xt1,$xb0
  739. vxor $xt2,$xt2,$xc0
  740. vxor $xt3,$xt3,$xd0
  741. stvx_4w $xt0,$x00,$out
  742. stvx_4w $xt1,$x10,$out
  743. addi $inp,$inp,0x40
  744. stvx_4w $xt2,$x20,$out
  745. subi $len,$len,0x40
  746. stvx_4w $xt3,$x30,$out
  747. addi $out,$out,0x40
  748. beq Ldone_vsx_8x
  749. vadduwm $xa0,$xa1,@K[0]
  750. vadduwm $xb0,$xb1,@K[1]
  751. vadduwm $xc0,$xc1,@K[2]
  752. vadduwm $xd0,$xd1,@K[3]
  753. be?vperm $xa0,$xa0,$xa0,$beperm
  754. be?vperm $xb0,$xb0,$xb0,$beperm
  755. be?vperm $xc0,$xc0,$xc0,$beperm
  756. be?vperm $xd0,$xd0,$xd0,$beperm
  757. ${UCMP}i $len,0x40
  758. blt Ltail_vsx_8x
  759. lvx_4w $xt0,$x00,$inp
  760. lvx_4w $xt1,$x10,$inp
  761. lvx_4w $xt2,$x20,$inp
  762. lvx_4w $xt3,$x30,$inp
  763. vxor $xt0,$xt0,$xa0
  764. vxor $xt1,$xt1,$xb0
  765. vxor $xt2,$xt2,$xc0
  766. vxor $xt3,$xt3,$xd0
  767. stvx_4w $xt0,$x00,$out
  768. stvx_4w $xt1,$x10,$out
  769. addi $inp,$inp,0x40
  770. stvx_4w $xt2,$x20,$out
  771. subi $len,$len,0x40
  772. stvx_4w $xt3,$x30,$out
  773. addi $out,$out,0x40
  774. beq Ldone_vsx_8x
  775. vadduwm $xa0,$xa2,@K[0]
  776. vadduwm $xb0,$xb2,@K[1]
  777. vadduwm $xc0,$xc2,@K[2]
  778. vadduwm $xd0,$xd2,@K[3]
  779. be?vperm $xa0,$xa0,$xa0,$beperm
  780. be?vperm $xb0,$xb0,$xb0,$beperm
  781. be?vperm $xc0,$xc0,$xc0,$beperm
  782. be?vperm $xd0,$xd0,$xd0,$beperm
  783. ${UCMP}i $len,0x40
  784. blt Ltail_vsx_8x
  785. lvx_4w $xt0,$x00,$inp
  786. lvx_4w $xt1,$x10,$inp
  787. lvx_4w $xt2,$x20,$inp
  788. lvx_4w $xt3,$x30,$inp
  789. vxor $xt0,$xt0,$xa0
  790. vxor $xt1,$xt1,$xb0
  791. vxor $xt2,$xt2,$xc0
  792. vxor $xt3,$xt3,$xd0
  793. stvx_4w $xt0,$x00,$out
  794. stvx_4w $xt1,$x10,$out
  795. addi $inp,$inp,0x40
  796. stvx_4w $xt2,$x20,$out
  797. subi $len,$len,0x40
  798. stvx_4w $xt3,$x30,$out
  799. addi $out,$out,0x40
  800. beq Ldone_vsx_8x
  801. vadduwm $xa0,$xa3,@K[0]
  802. vadduwm $xb0,$xb3,@K[1]
  803. vadduwm $xc0,$xc3,@K[2]
  804. vadduwm $xd0,$xd3,@K[3]
  805. be?vperm $xa0,$xa0,$xa0,$beperm
  806. be?vperm $xb0,$xb0,$xb0,$beperm
  807. be?vperm $xc0,$xc0,$xc0,$beperm
  808. be?vperm $xd0,$xd0,$xd0,$beperm
  809. ${UCMP}i $len,0x40
  810. blt Ltail_vsx_8x
  811. lvx_4w $xt0,$x00,$inp
  812. lvx_4w $xt1,$x10,$inp
  813. lvx_4w $xt2,$x20,$inp
  814. lvx_4w $xt3,$x30,$inp
  815. vxor $xt0,$xt0,$xa0
  816. vxor $xt1,$xt1,$xb0
  817. vxor $xt2,$xt2,$xc0
  818. vxor $xt3,$xt3,$xd0
  819. stvx_4w $xt0,$x00,$out
  820. stvx_4w $xt1,$x10,$out
  821. addi $inp,$inp,0x40
  822. stvx_4w $xt2,$x20,$out
  823. subi $len,$len,0x40
  824. stvx_4w $xt3,$x30,$out
  825. addi $out,$out,0x40
  826. beq Ldone_vsx_8x
  827. #blk4-7: 24:31 remain the same as we can use the same logic above . Reg a4-b7 remain same.Load c4,d7--> position 8-15.we can reuse vr24-31.
  828. #VR0-3 : are used to load temp value, vr4 --> as xr0 instead of xt0.
  829. vxxlorc $CTR1 ,$xv5,$xv5
  830. vxxlorc $xcn4 ,$xv18,$xv18
  831. vxxlorc $xcn5 ,$xv19,$xv19
  832. vxxlorc $xcn6 ,$xv20,$xv20
  833. vxxlorc $xcn7 ,$xv21,$xv21
  834. vxxlorc $xdn4 ,$xv13,$xv13
  835. vxxlorc $xdn5 ,$xv14,$xv14
  836. vxxlorc $xdn6 ,$xv15,$xv15
  837. vxxlorc $xdn7 ,$xv16,$xv16
  838. vadduwm $xdn4,$xdn4,$CTR1
  839. vxxlorc $xb6 ,$xv6,$xv6
  840. vxxlorc $xb7 ,$xv7,$xv7
  841. #use xa1->xr0, as xt0...in the block 4-7
  842. vmrgew $xr0,$xa4,$xa5 # transpose data
  843. vmrgew $xt1,$xa6,$xa7
  844. vmrgow $xa4,$xa4,$xa5
  845. vmrgow $xa6,$xa6,$xa7
  846. vmrgew $xt2,$xb4,$xb5
  847. vmrgew $xt3,$xb6,$xb7
  848. vmrgow $xb4,$xb4,$xb5
  849. vmrgow $xb6,$xb6,$xb7
  850. vpermdi $xa5,$xa4,$xa6,0b00
  851. vpermdi $xa7,$xa4,$xa6,0b11
  852. vpermdi $xa4,$xr0,$xt1,0b00
  853. vpermdi $xa6,$xr0,$xt1,0b11
  854. vpermdi $xb5,$xb4,$xb6,0b00
  855. vpermdi $xb7,$xb4,$xb6,0b11
  856. vpermdi $xb4,$xt2,$xt3,0b00
  857. vpermdi $xb6,$xt2,$xt3,0b11
  858. vmrgew $xr0,$xcn4,$xcn5
  859. vmrgew $xt1,$xcn6,$xcn7
  860. vmrgow $xcn4,$xcn4,$xcn5
  861. vmrgow $xcn6,$xcn6,$xcn7
  862. vmrgew $xt2,$xdn4,$xdn5
  863. vmrgew $xt3,$xdn6,$xdn7
  864. vmrgow $xdn4,$xdn4,$xdn5
  865. vmrgow $xdn6,$xdn6,$xdn7
  866. vpermdi $xcn5,$xcn4,$xcn6,0b00
  867. vpermdi $xcn7,$xcn4,$xcn6,0b11
  868. vpermdi $xcn4,$xr0,$xt1,0b00
  869. vpermdi $xcn6,$xr0,$xt1,0b11
  870. vpermdi $xdn5,$xdn4,$xdn6,0b00
  871. vpermdi $xdn7,$xdn4,$xdn6,0b11
  872. vpermdi $xdn4,$xt2,$xt3,0b00
  873. vpermdi $xdn6,$xt2,$xt3,0b11
  874. vspltisw $xr0,8
  875. vadduwm $CTR1,$CTR1,$xr0 # next counter value
  876. vxxlor $xv5 ,$CTR1,$CTR1 #CTR+4-> 5
  877. vadduwm $xan0,$xa4,@K[0]
  878. vadduwm $xbn0,$xb4,@K[1]
  879. vadduwm $xcn0,$xcn4,@K[2]
  880. vadduwm $xdn0,$xdn4,@K[3]
  881. be?vperm $xan0,$xa4,$xa4,$beperm
  882. be?vperm $xbn0,$xb4,$xb4,$beperm
  883. be?vperm $xcn0,$xcn4,$xcn4,$beperm
  884. be?vperm $xdn0,$xdn4,$xdn4,$beperm
  885. ${UCMP}i $len,0x40
  886. blt Ltail_vsx_8x_1
  887. lvx_4w $xr0,$x00,$inp
  888. lvx_4w $xt1,$x10,$inp
  889. lvx_4w $xt2,$x20,$inp
  890. lvx_4w $xt3,$x30,$inp
  891. vxor $xr0,$xr0,$xan0
  892. vxor $xt1,$xt1,$xbn0
  893. vxor $xt2,$xt2,$xcn0
  894. vxor $xt3,$xt3,$xdn0
  895. stvx_4w $xr0,$x00,$out
  896. stvx_4w $xt1,$x10,$out
  897. addi $inp,$inp,0x40
  898. stvx_4w $xt2,$x20,$out
  899. subi $len,$len,0x40
  900. stvx_4w $xt3,$x30,$out
  901. addi $out,$out,0x40
  902. beq Ldone_vsx_8x
  903. vadduwm $xan0,$xa5,@K[0]
  904. vadduwm $xbn0,$xb5,@K[1]
  905. vadduwm $xcn0,$xcn5,@K[2]
  906. vadduwm $xdn0,$xdn5,@K[3]
  907. be?vperm $xan0,$xan0,$xan0,$beperm
  908. be?vperm $xbn0,$xbn0,$xbn0,$beperm
  909. be?vperm $xcn0,$xcn0,$xcn0,$beperm
  910. be?vperm $xdn0,$xdn0,$xdn0,$beperm
  911. ${UCMP}i $len,0x40
  912. blt Ltail_vsx_8x_1
  913. lvx_4w $xr0,$x00,$inp
  914. lvx_4w $xt1,$x10,$inp
  915. lvx_4w $xt2,$x20,$inp
  916. lvx_4w $xt3,$x30,$inp
  917. vxor $xr0,$xr0,$xan0
  918. vxor $xt1,$xt1,$xbn0
  919. vxor $xt2,$xt2,$xcn0
  920. vxor $xt3,$xt3,$xdn0
  921. stvx_4w $xr0,$x00,$out
  922. stvx_4w $xt1,$x10,$out
  923. addi $inp,$inp,0x40
  924. stvx_4w $xt2,$x20,$out
  925. subi $len,$len,0x40
  926. stvx_4w $xt3,$x30,$out
  927. addi $out,$out,0x40
  928. beq Ldone_vsx_8x
  929. vadduwm $xan0,$xa6,@K[0]
  930. vadduwm $xbn0,$xb6,@K[1]
  931. vadduwm $xcn0,$xcn6,@K[2]
  932. vadduwm $xdn0,$xdn6,@K[3]
  933. be?vperm $xan0,$xan0,$xan0,$beperm
  934. be?vperm $xbn0,$xbn0,$xbn0,$beperm
  935. be?vperm $xcn0,$xcn0,$xcn0,$beperm
  936. be?vperm $xdn0,$xdn0,$xdn0,$beperm
  937. ${UCMP}i $len,0x40
  938. blt Ltail_vsx_8x_1
  939. lvx_4w $xr0,$x00,$inp
  940. lvx_4w $xt1,$x10,$inp
  941. lvx_4w $xt2,$x20,$inp
  942. lvx_4w $xt3,$x30,$inp
  943. vxor $xr0,$xr0,$xan0
  944. vxor $xt1,$xt1,$xbn0
  945. vxor $xt2,$xt2,$xcn0
  946. vxor $xt3,$xt3,$xdn0
  947. stvx_4w $xr0,$x00,$out
  948. stvx_4w $xt1,$x10,$out
  949. addi $inp,$inp,0x40
  950. stvx_4w $xt2,$x20,$out
  951. subi $len,$len,0x40
  952. stvx_4w $xt3,$x30,$out
  953. addi $out,$out,0x40
  954. beq Ldone_vsx_8x
  955. vadduwm $xan0,$xa7,@K[0]
  956. vadduwm $xbn0,$xb7,@K[1]
  957. vadduwm $xcn0,$xcn7,@K[2]
  958. vadduwm $xdn0,$xdn7,@K[3]
  959. be?vperm $xan0,$xan0,$xan0,$beperm
  960. be?vperm $xbn0,$xbn0,$xbn0,$beperm
  961. be?vperm $xcn0,$xcn0,$xcn0,$beperm
  962. be?vperm $xdn0,$xdn0,$xdn0,$beperm
  963. ${UCMP}i $len,0x40
  964. blt Ltail_vsx_8x_1
  965. lvx_4w $xr0,$x00,$inp
  966. lvx_4w $xt1,$x10,$inp
  967. lvx_4w $xt2,$x20,$inp
  968. lvx_4w $xt3,$x30,$inp
  969. vxor $xr0,$xr0,$xan0
  970. vxor $xt1,$xt1,$xbn0
  971. vxor $xt2,$xt2,$xcn0
  972. vxor $xt3,$xt3,$xdn0
  973. stvx_4w $xr0,$x00,$out
  974. stvx_4w $xt1,$x10,$out
  975. addi $inp,$inp,0x40
  976. stvx_4w $xt2,$x20,$out
  977. subi $len,$len,0x40
  978. stvx_4w $xt3,$x30,$out
  979. addi $out,$out,0x40
  980. beq Ldone_vsx_8x
  981. mtctr r0
  982. bne Loop_outer_vsx_8x
  983. Ldone_vsx_8x:
  984. lwz r12,`$FRAME-4`($sp) # pull vrsave
  985. li r10,`15+$LOCALS+64`
  986. li r11,`31+$LOCALS+64`
  987. $POP r0, `$FRAME+$LRSAVE`($sp)
  988. mtspr 256,r12 # restore vrsave
  989. lvx v24,r10,$sp
  990. addi r10,r10,32
  991. lvx v25,r11,$sp
  992. addi r11,r11,32
  993. lvx v26,r10,$sp
  994. addi r10,r10,32
  995. lvx v27,r11,$sp
  996. addi r11,r11,32
  997. lvx v28,r10,$sp
  998. addi r10,r10,32
  999. lvx v29,r11,$sp
  1000. addi r11,r11,32
  1001. lvx v30,r10,$sp
  1002. lvx v31,r11,$sp
  1003. mtlr r0
  1004. addi $sp,$sp,$FRAME
  1005. blr
  1006. .align 4
  1007. Ltail_vsx_8x:
  1008. addi r11,$sp,$LOCALS
  1009. mtctr $len
  1010. stvx_4w $xa0,$x00,r11 # offload block to stack
  1011. stvx_4w $xb0,$x10,r11
  1012. stvx_4w $xc0,$x20,r11
  1013. stvx_4w $xd0,$x30,r11
  1014. subi r12,r11,1 # prepare for *++ptr
  1015. subi $inp,$inp,1
  1016. subi $out,$out,1
  1017. bl Loop_tail_vsx_8x
  1018. Ltail_vsx_8x_1:
  1019. addi r11,$sp,$LOCALS
  1020. mtctr $len
  1021. stvx_4w $xan0,$x00,r11 # offload block to stack
  1022. stvx_4w $xbn0,$x10,r11
  1023. stvx_4w $xcn0,$x20,r11
  1024. stvx_4w $xdn0,$x30,r11
  1025. subi r12,r11,1 # prepare for *++ptr
  1026. subi $inp,$inp,1
  1027. subi $out,$out,1
  1028. bl Loop_tail_vsx_8x
  1029. Loop_tail_vsx_8x:
  1030. lbzu r6,1(r12)
  1031. lbzu r7,1($inp)
  1032. xor r6,r6,r7
  1033. stbu r6,1($out)
  1034. bdnz Loop_tail_vsx_8x
  1035. stvx_4w $K[0],$x00,r11 # wipe copy of the block
  1036. stvx_4w $K[0],$x10,r11
  1037. stvx_4w $K[0],$x20,r11
  1038. stvx_4w $K[0],$x30,r11
  1039. b Ldone_vsx_8x
  1040. .long 0
  1041. .byte 0,12,0x04,1,0x80,0,5,0
  1042. .long 0
  1043. .size .ChaCha20_ctr32_vsx_8x,.-.ChaCha20_ctr32_vsx_8x
  1044. ___
  1045. }}}
  1046. $code.=<<___;
  1047. .align 5
  1048. Lconsts:
  1049. mflr r0
  1050. bcl 20,31,\$+4
  1051. mflr r12 #vvvvv "distance between . and Lsigma
  1052. addi r12,r12,`64-8`
  1053. mtlr r0
  1054. blr
  1055. .long 0
  1056. .byte 0,12,0x14,0,0,0,0,0
  1057. .space `64-9*4`
  1058. Lsigma:
  1059. .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
  1060. .long 1,0,0,0
  1061. .long 2,0,0,0
  1062. .long 3,0,0,0
  1063. .long 4,0,0,0
  1064. ___
  1065. $code.=<<___ if ($LITTLE_ENDIAN);
  1066. .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
  1067. .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
  1068. ___
  1069. $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
  1070. .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
  1071. .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
  1072. ___
  1073. $code.=<<___;
  1074. .long 0x61707865,0x61707865,0x61707865,0x61707865
  1075. .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
  1076. .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
  1077. .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
  1078. .long 0,1,2,3
  1079. .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c
  1080. .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
  1081. .align 2
  1082. ___
  1083. foreach (split("\n",$code)) {
  1084. s/\`([^\`]*)\`/eval $1/ge;
  1085. # instructions prefixed with '?' are endian-specific and need
  1086. # to be adjusted accordingly...
  1087. if ($flavour !~ /le$/) { # big-endian
  1088. s/be\?// or
  1089. s/le\?/#le#/ or
  1090. s/\?lvsr/lvsl/ or
  1091. s/\?lvsl/lvsr/ or
  1092. s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
  1093. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 16-$3/;
  1094. } else { # little-endian
  1095. s/le\?// or
  1096. s/be\?/#be#/ or
  1097. s/\?([a-z]+)/$1/ or
  1098. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 $3/;
  1099. }
  1100. print $_,"\n";
  1101. }
  1102. close STDOUT or die "error closing STDOUT: $!";