chacha-ppc.pl 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # October 2015
  17. #
  18. # ChaCha20 for PowerPC/AltiVec.
  19. #
  20. # June 2018
  21. #
  22. # Add VSX 2.07 code path. Original 3xAltiVec+1xIALU is well-suited for
  23. # processors that can't issue more than one vector instruction per
  24. # cycle. But POWER8 (and POWER9) can issue a pair, and vector-only 4x
  25. # interleave would perform better. Incidentally PowerISA 2.07 (first
  26. # implemented by POWER8) defined new usable instructions, hence 4xVSX
  27. # code path...
  28. #
  29. # Performance in cycles per byte out of large buffer.
  30. #
  31. # IALU/gcc-4.x 3xAltiVec+1xIALU 4xVSX
  32. #
  33. # Freescale e300 13.6/+115% - -
  34. # PPC74x0/G4e 6.81/+310% 3.81 -
  35. # PPC970/G5 9.29/+160% ? -
  36. # POWER7 8.62/+61% 3.35 -
  37. # POWER8 8.70/+51% 2.91 2.09
  38. # POWER9 8.80/+29% 4.44(*) 2.45(**)
  39. #
  40. # (*) this is trade-off result, it's possible to improve it, but
  41. # then it would negatively affect all others;
  42. # (**) POWER9 seems to be "allergic" to mixing vector and integer
  43. # instructions, which is why switch to vector-only code pays
  44. # off that much;
  45. # $output is the last argument if it looks like a file (it has an extension)
  46. # $flavour is the first argument if it doesn't look like a file
  47. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  48. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  49. if ($flavour =~ /64/) {
  50. $SIZE_T =8;
  51. $LRSAVE =2*$SIZE_T;
  52. $STU ="stdu";
  53. $POP ="ld";
  54. $PUSH ="std";
  55. $UCMP ="cmpld";
  56. } elsif ($flavour =~ /32/) {
  57. $SIZE_T =4;
  58. $LRSAVE =$SIZE_T;
  59. $STU ="stwu";
  60. $POP ="lwz";
  61. $PUSH ="stw";
  62. $UCMP ="cmplw";
  63. } else { die "nonsense $flavour"; }
  64. $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
  65. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  66. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  67. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  68. die "can't locate ppc-xlate.pl";
  69. open STDOUT,"| $^X $xlate $flavour \"$output\""
  70. or die "can't call $xlate: $!";
  71. $LOCALS=6*$SIZE_T;
  72. $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
  73. sub AUTOLOAD() # thunk [simplified] x86-style perlasm
  74. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
  75. $code .= "\t$opcode\t".join(',',@_)."\n";
  76. }
  77. my $sp = "r1";
  78. my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
  79. my @x=map("r$_",(16..31));
  80. my @d=map("r$_",(11,12,14,15));
  81. my @t=map("r$_",(7..10));
  82. sub ROUND {
  83. my ($a0,$b0,$c0,$d0)=@_;
  84. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  85. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  86. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  87. (
  88. "&add (@x[$a0],@x[$a0],@x[$b0])",
  89. "&add (@x[$a1],@x[$a1],@x[$b1])",
  90. "&add (@x[$a2],@x[$a2],@x[$b2])",
  91. "&add (@x[$a3],@x[$a3],@x[$b3])",
  92. "&xor (@x[$d0],@x[$d0],@x[$a0])",
  93. "&xor (@x[$d1],@x[$d1],@x[$a1])",
  94. "&xor (@x[$d2],@x[$d2],@x[$a2])",
  95. "&xor (@x[$d3],@x[$d3],@x[$a3])",
  96. "&rotlwi (@x[$d0],@x[$d0],16)",
  97. "&rotlwi (@x[$d1],@x[$d1],16)",
  98. "&rotlwi (@x[$d2],@x[$d2],16)",
  99. "&rotlwi (@x[$d3],@x[$d3],16)",
  100. "&add (@x[$c0],@x[$c0],@x[$d0])",
  101. "&add (@x[$c1],@x[$c1],@x[$d1])",
  102. "&add (@x[$c2],@x[$c2],@x[$d2])",
  103. "&add (@x[$c3],@x[$c3],@x[$d3])",
  104. "&xor (@x[$b0],@x[$b0],@x[$c0])",
  105. "&xor (@x[$b1],@x[$b1],@x[$c1])",
  106. "&xor (@x[$b2],@x[$b2],@x[$c2])",
  107. "&xor (@x[$b3],@x[$b3],@x[$c3])",
  108. "&rotlwi (@x[$b0],@x[$b0],12)",
  109. "&rotlwi (@x[$b1],@x[$b1],12)",
  110. "&rotlwi (@x[$b2],@x[$b2],12)",
  111. "&rotlwi (@x[$b3],@x[$b3],12)",
  112. "&add (@x[$a0],@x[$a0],@x[$b0])",
  113. "&add (@x[$a1],@x[$a1],@x[$b1])",
  114. "&add (@x[$a2],@x[$a2],@x[$b2])",
  115. "&add (@x[$a3],@x[$a3],@x[$b3])",
  116. "&xor (@x[$d0],@x[$d0],@x[$a0])",
  117. "&xor (@x[$d1],@x[$d1],@x[$a1])",
  118. "&xor (@x[$d2],@x[$d2],@x[$a2])",
  119. "&xor (@x[$d3],@x[$d3],@x[$a3])",
  120. "&rotlwi (@x[$d0],@x[$d0],8)",
  121. "&rotlwi (@x[$d1],@x[$d1],8)",
  122. "&rotlwi (@x[$d2],@x[$d2],8)",
  123. "&rotlwi (@x[$d3],@x[$d3],8)",
  124. "&add (@x[$c0],@x[$c0],@x[$d0])",
  125. "&add (@x[$c1],@x[$c1],@x[$d1])",
  126. "&add (@x[$c2],@x[$c2],@x[$d2])",
  127. "&add (@x[$c3],@x[$c3],@x[$d3])",
  128. "&xor (@x[$b0],@x[$b0],@x[$c0])",
  129. "&xor (@x[$b1],@x[$b1],@x[$c1])",
  130. "&xor (@x[$b2],@x[$b2],@x[$c2])",
  131. "&xor (@x[$b3],@x[$b3],@x[$c3])",
  132. "&rotlwi (@x[$b0],@x[$b0],7)",
  133. "&rotlwi (@x[$b1],@x[$b1],7)",
  134. "&rotlwi (@x[$b2],@x[$b2],7)",
  135. "&rotlwi (@x[$b3],@x[$b3],7)"
  136. );
  137. }
  138. $code.=<<___;
  139. .machine "any"
  140. .text
  141. .globl .ChaCha20_ctr32_int
  142. .align 5
  143. .ChaCha20_ctr32_int:
  144. __ChaCha20_ctr32_int:
  145. ${UCMP}i $len,0
  146. beqlr-
  147. $STU $sp,-$FRAME($sp)
  148. mflr r0
  149. $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
  150. $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
  151. $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
  152. $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
  153. $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
  154. $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
  155. $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
  156. $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
  157. $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
  158. $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
  159. $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
  160. $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
  161. $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
  162. $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
  163. $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
  164. $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
  165. $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
  166. $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
  167. $PUSH r0,`$FRAME+$LRSAVE`($sp)
  168. lwz @d[0],0($ctr) # load counter
  169. lwz @d[1],4($ctr)
  170. lwz @d[2],8($ctr)
  171. lwz @d[3],12($ctr)
  172. bl __ChaCha20_1x
  173. $POP r0,`$FRAME+$LRSAVE`($sp)
  174. $POP r14,`$FRAME-$SIZE_T*18`($sp)
  175. $POP r15,`$FRAME-$SIZE_T*17`($sp)
  176. $POP r16,`$FRAME-$SIZE_T*16`($sp)
  177. $POP r17,`$FRAME-$SIZE_T*15`($sp)
  178. $POP r18,`$FRAME-$SIZE_T*14`($sp)
  179. $POP r19,`$FRAME-$SIZE_T*13`($sp)
  180. $POP r20,`$FRAME-$SIZE_T*12`($sp)
  181. $POP r21,`$FRAME-$SIZE_T*11`($sp)
  182. $POP r22,`$FRAME-$SIZE_T*10`($sp)
  183. $POP r23,`$FRAME-$SIZE_T*9`($sp)
  184. $POP r24,`$FRAME-$SIZE_T*8`($sp)
  185. $POP r25,`$FRAME-$SIZE_T*7`($sp)
  186. $POP r26,`$FRAME-$SIZE_T*6`($sp)
  187. $POP r27,`$FRAME-$SIZE_T*5`($sp)
  188. $POP r28,`$FRAME-$SIZE_T*4`($sp)
  189. $POP r29,`$FRAME-$SIZE_T*3`($sp)
  190. $POP r30,`$FRAME-$SIZE_T*2`($sp)
  191. $POP r31,`$FRAME-$SIZE_T*1`($sp)
  192. mtlr r0
  193. addi $sp,$sp,$FRAME
  194. blr
  195. .long 0
  196. .byte 0,12,4,1,0x80,18,5,0
  197. .long 0
  198. .size .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
  199. .align 5
  200. __ChaCha20_1x:
  201. Loop_outer:
  202. lis @x[0],0x6170 # synthesize sigma
  203. lis @x[1],0x3320
  204. lis @x[2],0x7962
  205. lis @x[3],0x6b20
  206. ori @x[0],@x[0],0x7865
  207. ori @x[1],@x[1],0x646e
  208. ori @x[2],@x[2],0x2d32
  209. ori @x[3],@x[3],0x6574
  210. li r0,10 # inner loop counter
  211. lwz @x[4],0($key) # load key
  212. lwz @x[5],4($key)
  213. lwz @x[6],8($key)
  214. lwz @x[7],12($key)
  215. lwz @x[8],16($key)
  216. mr @x[12],@d[0] # copy counter
  217. lwz @x[9],20($key)
  218. mr @x[13],@d[1]
  219. lwz @x[10],24($key)
  220. mr @x[14],@d[2]
  221. lwz @x[11],28($key)
  222. mr @x[15],@d[3]
  223. mr @t[0],@x[4]
  224. mr @t[1],@x[5]
  225. mr @t[2],@x[6]
  226. mr @t[3],@x[7]
  227. mtctr r0
  228. Loop:
  229. ___
  230. foreach (&ROUND(0, 4, 8,12)) { eval; }
  231. foreach (&ROUND(0, 5,10,15)) { eval; }
  232. $code.=<<___;
  233. bdnz Loop
  234. subic $len,$len,64 # $len-=64
  235. addi @x[0],@x[0],0x7865 # accumulate key block
  236. addi @x[1],@x[1],0x646e
  237. addi @x[2],@x[2],0x2d32
  238. addi @x[3],@x[3],0x6574
  239. addis @x[0],@x[0],0x6170
  240. addis @x[1],@x[1],0x3320
  241. addis @x[2],@x[2],0x7962
  242. addis @x[3],@x[3],0x6b20
  243. subfe. r0,r0,r0 # borrow?-1:0
  244. add @x[4],@x[4],@t[0]
  245. lwz @t[0],16($key)
  246. add @x[5],@x[5],@t[1]
  247. lwz @t[1],20($key)
  248. add @x[6],@x[6],@t[2]
  249. lwz @t[2],24($key)
  250. add @x[7],@x[7],@t[3]
  251. lwz @t[3],28($key)
  252. add @x[8],@x[8],@t[0]
  253. add @x[9],@x[9],@t[1]
  254. add @x[10],@x[10],@t[2]
  255. add @x[11],@x[11],@t[3]
  256. add @x[12],@x[12],@d[0]
  257. add @x[13],@x[13],@d[1]
  258. add @x[14],@x[14],@d[2]
  259. add @x[15],@x[15],@d[3]
  260. addi @d[0],@d[0],1 # increment counter
  261. ___
  262. if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
  263. $code.=<<___;
  264. mr @t[$i&3],@x[$i]
  265. rotlwi @x[$i],@x[$i],8
  266. rlwimi @x[$i],@t[$i&3],24,0,7
  267. rlwimi @x[$i],@t[$i&3],24,16,23
  268. ___
  269. } }
  270. $code.=<<___;
  271. bne Ltail # $len-=64 borrowed
  272. lwz @t[0],0($inp) # load input, aligned or not
  273. lwz @t[1],4($inp)
  274. ${UCMP}i $len,0 # done already?
  275. lwz @t[2],8($inp)
  276. lwz @t[3],12($inp)
  277. xor @x[0],@x[0],@t[0] # xor with input
  278. lwz @t[0],16($inp)
  279. xor @x[1],@x[1],@t[1]
  280. lwz @t[1],20($inp)
  281. xor @x[2],@x[2],@t[2]
  282. lwz @t[2],24($inp)
  283. xor @x[3],@x[3],@t[3]
  284. lwz @t[3],28($inp)
  285. xor @x[4],@x[4],@t[0]
  286. lwz @t[0],32($inp)
  287. xor @x[5],@x[5],@t[1]
  288. lwz @t[1],36($inp)
  289. xor @x[6],@x[6],@t[2]
  290. lwz @t[2],40($inp)
  291. xor @x[7],@x[7],@t[3]
  292. lwz @t[3],44($inp)
  293. xor @x[8],@x[8],@t[0]
  294. lwz @t[0],48($inp)
  295. xor @x[9],@x[9],@t[1]
  296. lwz @t[1],52($inp)
  297. xor @x[10],@x[10],@t[2]
  298. lwz @t[2],56($inp)
  299. xor @x[11],@x[11],@t[3]
  300. lwz @t[3],60($inp)
  301. xor @x[12],@x[12],@t[0]
  302. stw @x[0],0($out) # store output, aligned or not
  303. xor @x[13],@x[13],@t[1]
  304. stw @x[1],4($out)
  305. xor @x[14],@x[14],@t[2]
  306. stw @x[2],8($out)
  307. xor @x[15],@x[15],@t[3]
  308. stw @x[3],12($out)
  309. stw @x[4],16($out)
  310. stw @x[5],20($out)
  311. stw @x[6],24($out)
  312. stw @x[7],28($out)
  313. stw @x[8],32($out)
  314. stw @x[9],36($out)
  315. stw @x[10],40($out)
  316. stw @x[11],44($out)
  317. stw @x[12],48($out)
  318. stw @x[13],52($out)
  319. stw @x[14],56($out)
  320. addi $inp,$inp,64
  321. stw @x[15],60($out)
  322. addi $out,$out,64
  323. bne Loop_outer
  324. blr
  325. .align 4
  326. Ltail:
  327. addi $len,$len,64 # restore tail length
  328. subi $inp,$inp,1 # prepare for *++ptr
  329. subi $out,$out,1
  330. addi @t[0],$sp,$LOCALS-1
  331. mtctr $len
  332. stw @x[0],`$LOCALS+0`($sp) # save whole block to stack
  333. stw @x[1],`$LOCALS+4`($sp)
  334. stw @x[2],`$LOCALS+8`($sp)
  335. stw @x[3],`$LOCALS+12`($sp)
  336. stw @x[4],`$LOCALS+16`($sp)
  337. stw @x[5],`$LOCALS+20`($sp)
  338. stw @x[6],`$LOCALS+24`($sp)
  339. stw @x[7],`$LOCALS+28`($sp)
  340. stw @x[8],`$LOCALS+32`($sp)
  341. stw @x[9],`$LOCALS+36`($sp)
  342. stw @x[10],`$LOCALS+40`($sp)
  343. stw @x[11],`$LOCALS+44`($sp)
  344. stw @x[12],`$LOCALS+48`($sp)
  345. stw @x[13],`$LOCALS+52`($sp)
  346. stw @x[14],`$LOCALS+56`($sp)
  347. stw @x[15],`$LOCALS+60`($sp)
  348. Loop_tail: # byte-by-byte loop
  349. lbzu @d[0],1($inp)
  350. lbzu @x[0],1(@t[0])
  351. xor @d[1],@d[0],@x[0]
  352. stbu @d[1],1($out)
  353. bdnz Loop_tail
  354. stw $sp,`$LOCALS+0`($sp) # wipe block on stack
  355. stw $sp,`$LOCALS+4`($sp)
  356. stw $sp,`$LOCALS+8`($sp)
  357. stw $sp,`$LOCALS+12`($sp)
  358. stw $sp,`$LOCALS+16`($sp)
  359. stw $sp,`$LOCALS+20`($sp)
  360. stw $sp,`$LOCALS+24`($sp)
  361. stw $sp,`$LOCALS+28`($sp)
  362. stw $sp,`$LOCALS+32`($sp)
  363. stw $sp,`$LOCALS+36`($sp)
  364. stw $sp,`$LOCALS+40`($sp)
  365. stw $sp,`$LOCALS+44`($sp)
  366. stw $sp,`$LOCALS+48`($sp)
  367. stw $sp,`$LOCALS+52`($sp)
  368. stw $sp,`$LOCALS+56`($sp)
  369. stw $sp,`$LOCALS+60`($sp)
  370. blr
  371. .long 0
  372. .byte 0,12,0x14,0,0,0,0,0
  373. ___
  374. {{{
  375. my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2)
  376. = map("v$_",(0..11));
  377. my @K = map("v$_",(12..17));
  378. my ($FOUR,$sixteen,$twenty4) = map("v$_",(18..19,23));
  379. my ($inpperm,$outperm,$outmask) = map("v$_",(24..26));
  380. my @D = map("v$_",(27..31));
  381. my ($twelve,$seven,$T0,$T1) = @D;
  382. my $FRAME=$LOCALS+64+10*16+18*$SIZE_T; # 10*16 is for v23-v31 offload
  383. sub VMXROUND {
  384. my $odd = pop;
  385. my ($a,$b,$c,$d)=@_;
  386. (
  387. "&vadduwm ('$a','$a','$b')",
  388. "&vxor ('$d','$d','$a')",
  389. "&vperm ('$d','$d','$d','$sixteen')",
  390. "&vadduwm ('$c','$c','$d')",
  391. "&vxor ('$b','$b','$c')",
  392. "&vrlw ('$b','$b','$twelve')",
  393. "&vadduwm ('$a','$a','$b')",
  394. "&vxor ('$d','$d','$a')",
  395. "&vperm ('$d','$d','$d','$twenty4')",
  396. "&vadduwm ('$c','$c','$d')",
  397. "&vxor ('$b','$b','$c')",
  398. "&vrlw ('$b','$b','$seven')",
  399. "&vrldoi ('$c','$c',8)",
  400. "&vrldoi ('$b','$b',$odd?4:12)",
  401. "&vrldoi ('$d','$d',$odd?12:4)"
  402. );
  403. }
  404. $code.=<<___;
  405. .globl .ChaCha20_ctr32_vmx
  406. .align 5
  407. .ChaCha20_ctr32_vmx:
  408. ${UCMP}i $len,256
  409. blt __ChaCha20_ctr32_int
  410. $STU $sp,-$FRAME($sp)
  411. mflr r0
  412. li r10,`15+$LOCALS+64`
  413. li r11,`31+$LOCALS+64`
  414. mfspr r12,256
  415. stvx v23,r10,$sp
  416. addi r10,r10,32
  417. stvx v24,r11,$sp
  418. addi r11,r11,32
  419. stvx v25,r10,$sp
  420. addi r10,r10,32
  421. stvx v26,r11,$sp
  422. addi r11,r11,32
  423. stvx v27,r10,$sp
  424. addi r10,r10,32
  425. stvx v28,r11,$sp
  426. addi r11,r11,32
  427. stvx v29,r10,$sp
  428. addi r10,r10,32
  429. stvx v30,r11,$sp
  430. stvx v31,r10,$sp
  431. stw r12,`$FRAME-$SIZE_T*18-4`($sp) # save vrsave
  432. $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
  433. $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
  434. $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
  435. $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
  436. $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
  437. $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
  438. $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
  439. $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
  440. $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
  441. $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
  442. $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
  443. $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
  444. $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
  445. $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
  446. $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
  447. $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
  448. $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
  449. $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
  450. li r12,-4096+511
  451. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  452. mtspr 256,r12 # preserve 29 AltiVec registers
  453. bl Lconsts # returns pointer Lsigma in r12
  454. li @x[0],16
  455. li @x[1],32
  456. li @x[2],48
  457. li @x[3],64
  458. li @x[4],31 # 31 is not a typo
  459. li @x[5],15 # nor is 15
  460. lvx @K[1],0,$key # load key
  461. ?lvsr $T0,0,$key # prepare unaligned load
  462. lvx @K[2],@x[0],$key
  463. lvx @D[0],@x[4],$key
  464. lvx @K[3],0,$ctr # load counter
  465. ?lvsr $T1,0,$ctr # prepare unaligned load
  466. lvx @D[1],@x[5],$ctr
  467. lvx @K[0],0,r12 # load constants
  468. lvx @K[5],@x[0],r12 # one
  469. lvx $FOUR,@x[1],r12
  470. lvx $sixteen,@x[2],r12
  471. lvx $twenty4,@x[3],r12
  472. ?vperm @K[1],@K[2],@K[1],$T0 # align key
  473. ?vperm @K[2],@D[0],@K[2],$T0
  474. ?vperm @K[3],@D[1],@K[3],$T1 # align counter
  475. lwz @d[0],0($ctr) # load counter to GPR
  476. lwz @d[1],4($ctr)
  477. vadduwm @K[3],@K[3],@K[5] # adjust AltiVec counter
  478. lwz @d[2],8($ctr)
  479. vadduwm @K[4],@K[3],@K[5]
  480. lwz @d[3],12($ctr)
  481. vadduwm @K[5],@K[4],@K[5]
  482. vxor $T0,$T0,$T0 # 0x00..00
  483. vspltisw $outmask,-1 # 0xff..ff
  484. ?lvsr $inpperm,0,$inp # prepare for unaligned load
  485. ?lvsl $outperm,0,$out # prepare for unaligned store
  486. ?vperm $outmask,$outmask,$T0,$outperm
  487. be?lvsl $T0,0,@x[0] # 0x00..0f
  488. be?vspltisb $T1,3 # 0x03..03
  489. be?vxor $T0,$T0,$T1 # swap bytes within words
  490. be?vxor $outperm,$outperm,$T1
  491. be?vperm $inpperm,$inpperm,$inpperm,$T0
  492. li r0,10 # inner loop counter
  493. b Loop_outer_vmx
  494. .align 4
  495. Loop_outer_vmx:
  496. lis @x[0],0x6170 # synthesize sigma
  497. lis @x[1],0x3320
  498. vmr $A0,@K[0]
  499. lis @x[2],0x7962
  500. lis @x[3],0x6b20
  501. vmr $A1,@K[0]
  502. ori @x[0],@x[0],0x7865
  503. ori @x[1],@x[1],0x646e
  504. vmr $A2,@K[0]
  505. ori @x[2],@x[2],0x2d32
  506. ori @x[3],@x[3],0x6574
  507. vmr $B0,@K[1]
  508. lwz @x[4],0($key) # load key to GPR
  509. vmr $B1,@K[1]
  510. lwz @x[5],4($key)
  511. vmr $B2,@K[1]
  512. lwz @x[6],8($key)
  513. vmr $C0,@K[2]
  514. lwz @x[7],12($key)
  515. vmr $C1,@K[2]
  516. lwz @x[8],16($key)
  517. vmr $C2,@K[2]
  518. mr @x[12],@d[0] # copy GPR counter
  519. lwz @x[9],20($key)
  520. vmr $D0,@K[3]
  521. mr @x[13],@d[1]
  522. lwz @x[10],24($key)
  523. vmr $D1,@K[4]
  524. mr @x[14],@d[2]
  525. lwz @x[11],28($key)
  526. vmr $D2,@K[5]
  527. mr @x[15],@d[3]
  528. mr @t[0],@x[4]
  529. mr @t[1],@x[5]
  530. mr @t[2],@x[6]
  531. mr @t[3],@x[7]
  532. vspltisw $twelve,12 # synthesize constants
  533. vspltisw $seven,7
  534. mtctr r0
  535. nop
  536. Loop_vmx:
  537. ___
  538. my @thread0=&VMXROUND($A0,$B0,$C0,$D0,0);
  539. my @thread1=&VMXROUND($A1,$B1,$C1,$D1,0);
  540. my @thread2=&VMXROUND($A2,$B2,$C2,$D2,0);
  541. my @thread3=&ROUND(0,4,8,12);
  542. foreach (@thread0) {
  543. eval;
  544. eval(shift(@thread1));
  545. eval(shift(@thread2));
  546. eval(shift(@thread3));
  547. eval(shift(@thread3));
  548. eval(shift(@thread3));
  549. }
  550. foreach (@thread3) { eval; }
  551. @thread0=&VMXROUND($A0,$B0,$C0,$D0,1);
  552. @thread1=&VMXROUND($A1,$B1,$C1,$D1,1);
  553. @thread2=&VMXROUND($A2,$B2,$C2,$D2,1);
  554. @thread3=&ROUND(0,5,10,15);
  555. foreach (@thread0) {
  556. eval;
  557. eval(shift(@thread1));
  558. eval(shift(@thread2));
  559. eval(shift(@thread3));
  560. eval(shift(@thread3));
  561. eval(shift(@thread3));
  562. }
  563. foreach (@thread3) { eval; }
  564. $code.=<<___;
  565. bdnz Loop_vmx
  566. subi $len,$len,256 # $len-=256
  567. addi @x[0],@x[0],0x7865 # accumulate key block
  568. addi @x[1],@x[1],0x646e
  569. addi @x[2],@x[2],0x2d32
  570. addi @x[3],@x[3],0x6574
  571. addis @x[0],@x[0],0x6170
  572. addis @x[1],@x[1],0x3320
  573. addis @x[2],@x[2],0x7962
  574. addis @x[3],@x[3],0x6b20
  575. add @x[4],@x[4],@t[0]
  576. lwz @t[0],16($key)
  577. add @x[5],@x[5],@t[1]
  578. lwz @t[1],20($key)
  579. add @x[6],@x[6],@t[2]
  580. lwz @t[2],24($key)
  581. add @x[7],@x[7],@t[3]
  582. lwz @t[3],28($key)
  583. add @x[8],@x[8],@t[0]
  584. add @x[9],@x[9],@t[1]
  585. add @x[10],@x[10],@t[2]
  586. add @x[11],@x[11],@t[3]
  587. add @x[12],@x[12],@d[0]
  588. add @x[13],@x[13],@d[1]
  589. add @x[14],@x[14],@d[2]
  590. add @x[15],@x[15],@d[3]
  591. vadduwm $A0,$A0,@K[0] # accumulate key block
  592. vadduwm $A1,$A1,@K[0]
  593. vadduwm $A2,$A2,@K[0]
  594. vadduwm $B0,$B0,@K[1]
  595. vadduwm $B1,$B1,@K[1]
  596. vadduwm $B2,$B2,@K[1]
  597. vadduwm $C0,$C0,@K[2]
  598. vadduwm $C1,$C1,@K[2]
  599. vadduwm $C2,$C2,@K[2]
  600. vadduwm $D0,$D0,@K[3]
  601. vadduwm $D1,$D1,@K[4]
  602. vadduwm $D2,$D2,@K[5]
  603. addi @d[0],@d[0],4 # increment counter
  604. vadduwm @K[3],@K[3],$FOUR
  605. vadduwm @K[4],@K[4],$FOUR
  606. vadduwm @K[5],@K[5],$FOUR
  607. ___
  608. if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
  609. $code.=<<___;
  610. mr @t[$i&3],@x[$i]
  611. rotlwi @x[$i],@x[$i],8
  612. rlwimi @x[$i],@t[$i&3],24,0,7
  613. rlwimi @x[$i],@t[$i&3],24,16,23
  614. ___
  615. } }
  616. $code.=<<___;
  617. lwz @t[0],0($inp) # load input, aligned or not
  618. lwz @t[1],4($inp)
  619. lwz @t[2],8($inp)
  620. lwz @t[3],12($inp)
  621. xor @x[0],@x[0],@t[0] # xor with input
  622. lwz @t[0],16($inp)
  623. xor @x[1],@x[1],@t[1]
  624. lwz @t[1],20($inp)
  625. xor @x[2],@x[2],@t[2]
  626. lwz @t[2],24($inp)
  627. xor @x[3],@x[3],@t[3]
  628. lwz @t[3],28($inp)
  629. xor @x[4],@x[4],@t[0]
  630. lwz @t[0],32($inp)
  631. xor @x[5],@x[5],@t[1]
  632. lwz @t[1],36($inp)
  633. xor @x[6],@x[6],@t[2]
  634. lwz @t[2],40($inp)
  635. xor @x[7],@x[7],@t[3]
  636. lwz @t[3],44($inp)
  637. xor @x[8],@x[8],@t[0]
  638. lwz @t[0],48($inp)
  639. xor @x[9],@x[9],@t[1]
  640. lwz @t[1],52($inp)
  641. xor @x[10],@x[10],@t[2]
  642. lwz @t[2],56($inp)
  643. xor @x[11],@x[11],@t[3]
  644. lwz @t[3],60($inp)
  645. xor @x[12],@x[12],@t[0]
  646. stw @x[0],0($out) # store output, aligned or not
  647. xor @x[13],@x[13],@t[1]
  648. stw @x[1],4($out)
  649. xor @x[14],@x[14],@t[2]
  650. stw @x[2],8($out)
  651. xor @x[15],@x[15],@t[3]
  652. stw @x[3],12($out)
  653. addi $inp,$inp,64
  654. stw @x[4],16($out)
  655. li @t[0],16
  656. stw @x[5],20($out)
  657. li @t[1],32
  658. stw @x[6],24($out)
  659. li @t[2],48
  660. stw @x[7],28($out)
  661. li @t[3],64
  662. stw @x[8],32($out)
  663. stw @x[9],36($out)
  664. stw @x[10],40($out)
  665. stw @x[11],44($out)
  666. stw @x[12],48($out)
  667. stw @x[13],52($out)
  668. stw @x[14],56($out)
  669. stw @x[15],60($out)
  670. addi $out,$out,64
  671. lvx @D[0],0,$inp # load input
  672. lvx @D[1],@t[0],$inp
  673. lvx @D[2],@t[1],$inp
  674. lvx @D[3],@t[2],$inp
  675. lvx @D[4],@t[3],$inp
  676. addi $inp,$inp,64
  677. ?vperm @D[0],@D[1],@D[0],$inpperm # align input
  678. ?vperm @D[1],@D[2],@D[1],$inpperm
  679. ?vperm @D[2],@D[3],@D[2],$inpperm
  680. ?vperm @D[3],@D[4],@D[3],$inpperm
  681. vxor $A0,$A0,@D[0] # xor with input
  682. vxor $B0,$B0,@D[1]
  683. lvx @D[1],@t[0],$inp # keep loading input
  684. vxor $C0,$C0,@D[2]
  685. lvx @D[2],@t[1],$inp
  686. vxor $D0,$D0,@D[3]
  687. lvx @D[3],@t[2],$inp
  688. lvx @D[0],@t[3],$inp
  689. addi $inp,$inp,64
  690. li @t[3],63 # 63 is not a typo
  691. vperm $A0,$A0,$A0,$outperm # pre-misalign output
  692. vperm $B0,$B0,$B0,$outperm
  693. vperm $C0,$C0,$C0,$outperm
  694. vperm $D0,$D0,$D0,$outperm
  695. ?vperm @D[4],@D[1],@D[4],$inpperm # align input
  696. ?vperm @D[1],@D[2],@D[1],$inpperm
  697. ?vperm @D[2],@D[3],@D[2],$inpperm
  698. ?vperm @D[3],@D[0],@D[3],$inpperm
  699. vxor $A1,$A1,@D[4]
  700. vxor $B1,$B1,@D[1]
  701. lvx @D[1],@t[0],$inp # keep loading input
  702. vxor $C1,$C1,@D[2]
  703. lvx @D[2],@t[1],$inp
  704. vxor $D1,$D1,@D[3]
  705. lvx @D[3],@t[2],$inp
  706. lvx @D[4],@t[3],$inp # redundant in aligned case
  707. addi $inp,$inp,64
  708. vperm $A1,$A1,$A1,$outperm # pre-misalign output
  709. vperm $B1,$B1,$B1,$outperm
  710. vperm $C1,$C1,$C1,$outperm
  711. vperm $D1,$D1,$D1,$outperm
  712. ?vperm @D[0],@D[1],@D[0],$inpperm # align input
  713. ?vperm @D[1],@D[2],@D[1],$inpperm
  714. ?vperm @D[2],@D[3],@D[2],$inpperm
  715. ?vperm @D[3],@D[4],@D[3],$inpperm
  716. vxor $A2,$A2,@D[0]
  717. vxor $B2,$B2,@D[1]
  718. vxor $C2,$C2,@D[2]
  719. vxor $D2,$D2,@D[3]
  720. vperm $A2,$A2,$A2,$outperm # pre-misalign output
  721. vperm $B2,$B2,$B2,$outperm
  722. vperm $C2,$C2,$C2,$outperm
  723. vperm $D2,$D2,$D2,$outperm
  724. andi. @x[1],$out,15 # is $out aligned?
  725. mr @x[0],$out
  726. vsel @D[0],$A0,$B0,$outmask # collect pre-misaligned output
  727. vsel @D[1],$B0,$C0,$outmask
  728. vsel @D[2],$C0,$D0,$outmask
  729. vsel @D[3],$D0,$A1,$outmask
  730. vsel $B0,$A1,$B1,$outmask
  731. vsel $C0,$B1,$C1,$outmask
  732. vsel $D0,$C1,$D1,$outmask
  733. vsel $A1,$D1,$A2,$outmask
  734. vsel $B1,$A2,$B2,$outmask
  735. vsel $C1,$B2,$C2,$outmask
  736. vsel $D1,$C2,$D2,$outmask
  737. #stvx $A0,0,$out # take it easy on the edges
  738. stvx @D[0],@t[0],$out # store output
  739. stvx @D[1],@t[1],$out
  740. stvx @D[2],@t[2],$out
  741. addi $out,$out,64
  742. stvx @D[3],0,$out
  743. stvx $B0,@t[0],$out
  744. stvx $C0,@t[1],$out
  745. stvx $D0,@t[2],$out
  746. addi $out,$out,64
  747. stvx $A1,0,$out
  748. stvx $B1,@t[0],$out
  749. stvx $C1,@t[1],$out
  750. stvx $D1,@t[2],$out
  751. addi $out,$out,64
  752. beq Laligned_vmx
  753. sub @x[2],$out,@x[1] # in misaligned case edges
  754. li @x[3],0 # are written byte-by-byte
  755. Lunaligned_tail_vmx:
  756. stvebx $D2,@x[3],@x[2]
  757. addi @x[3],@x[3],1
  758. cmpw @x[3],@x[1]
  759. bne Lunaligned_tail_vmx
  760. sub @x[2],@x[0],@x[1]
  761. Lunaligned_head_vmx:
  762. stvebx $A0,@x[1],@x[2]
  763. cmpwi @x[1],15
  764. addi @x[1],@x[1],1
  765. bne Lunaligned_head_vmx
  766. ${UCMP}i $len,255 # done with 256-byte blocks yet?
  767. bgt Loop_outer_vmx
  768. b Ldone_vmx
  769. .align 4
  770. Laligned_vmx:
  771. stvx $A0,0,@x[0] # head hexaword was not stored
  772. ${UCMP}i $len,255 # done with 256-byte blocks yet?
  773. bgt Loop_outer_vmx
  774. nop
  775. Ldone_vmx:
  776. ${UCMP}i $len,0 # done yet?
  777. bnel __ChaCha20_1x
  778. lwz r12,`$FRAME-$SIZE_T*18-4`($sp) # pull vrsave
  779. li r10,`15+$LOCALS+64`
  780. li r11,`31+$LOCALS+64`
  781. mtspr 256,r12 # restore vrsave
  782. lvx v23,r10,$sp
  783. addi r10,r10,32
  784. lvx v24,r11,$sp
  785. addi r11,r11,32
  786. lvx v25,r10,$sp
  787. addi r10,r10,32
  788. lvx v26,r11,$sp
  789. addi r11,r11,32
  790. lvx v27,r10,$sp
  791. addi r10,r10,32
  792. lvx v28,r11,$sp
  793. addi r11,r11,32
  794. lvx v29,r10,$sp
  795. addi r10,r10,32
  796. lvx v30,r11,$sp
  797. lvx v31,r10,$sp
  798. $POP r0, `$FRAME+$LRSAVE`($sp)
  799. $POP r14,`$FRAME-$SIZE_T*18`($sp)
  800. $POP r15,`$FRAME-$SIZE_T*17`($sp)
  801. $POP r16,`$FRAME-$SIZE_T*16`($sp)
  802. $POP r17,`$FRAME-$SIZE_T*15`($sp)
  803. $POP r18,`$FRAME-$SIZE_T*14`($sp)
  804. $POP r19,`$FRAME-$SIZE_T*13`($sp)
  805. $POP r20,`$FRAME-$SIZE_T*12`($sp)
  806. $POP r21,`$FRAME-$SIZE_T*11`($sp)
  807. $POP r22,`$FRAME-$SIZE_T*10`($sp)
  808. $POP r23,`$FRAME-$SIZE_T*9`($sp)
  809. $POP r24,`$FRAME-$SIZE_T*8`($sp)
  810. $POP r25,`$FRAME-$SIZE_T*7`($sp)
  811. $POP r26,`$FRAME-$SIZE_T*6`($sp)
  812. $POP r27,`$FRAME-$SIZE_T*5`($sp)
  813. $POP r28,`$FRAME-$SIZE_T*4`($sp)
  814. $POP r29,`$FRAME-$SIZE_T*3`($sp)
  815. $POP r30,`$FRAME-$SIZE_T*2`($sp)
  816. $POP r31,`$FRAME-$SIZE_T*1`($sp)
  817. mtlr r0
  818. addi $sp,$sp,$FRAME
  819. blr
  820. .long 0
  821. .byte 0,12,0x04,1,0x80,18,5,0
  822. .long 0
  823. .size .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
  824. ___
  825. }}}
  826. {{{
  827. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  828. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = map("v$_",(0..15));
  829. my @K = map("v$_",(16..19));
  830. my $CTR = "v26";
  831. my ($xt0,$xt1,$xt2,$xt3) = map("v$_",(27..30));
  832. my ($sixteen,$twelve,$eight,$seven) = ($xt0,$xt1,$xt2,$xt3);
  833. my $beperm = "v31";
  834. my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
  835. my $FRAME=$LOCALS+64+7*16; # 7*16 is for v26-v31 offload
  836. sub VSX_lane_ROUND {
  837. my ($a0,$b0,$c0,$d0)=@_;
  838. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  839. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  840. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  841. my @x=map("\"v$_\"",(0..15));
  842. (
  843. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
  844. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
  845. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
  846. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
  847. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  848. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  849. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  850. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  851. "&vrlw (@x[$d0],@x[$d0],'$sixteen')",
  852. "&vrlw (@x[$d1],@x[$d1],'$sixteen')",
  853. "&vrlw (@x[$d2],@x[$d2],'$sixteen')",
  854. "&vrlw (@x[$d3],@x[$d3],'$sixteen')",
  855. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  856. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  857. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  858. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  859. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  860. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  861. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  862. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  863. "&vrlw (@x[$b0],@x[$b0],'$twelve')",
  864. "&vrlw (@x[$b1],@x[$b1],'$twelve')",
  865. "&vrlw (@x[$b2],@x[$b2],'$twelve')",
  866. "&vrlw (@x[$b3],@x[$b3],'$twelve')",
  867. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
  868. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
  869. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
  870. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
  871. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  872. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  873. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  874. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  875. "&vrlw (@x[$d0],@x[$d0],'$eight')",
  876. "&vrlw (@x[$d1],@x[$d1],'$eight')",
  877. "&vrlw (@x[$d2],@x[$d2],'$eight')",
  878. "&vrlw (@x[$d3],@x[$d3],'$eight')",
  879. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  880. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  881. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  882. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  883. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  884. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  885. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  886. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  887. "&vrlw (@x[$b0],@x[$b0],'$seven')",
  888. "&vrlw (@x[$b1],@x[$b1],'$seven')",
  889. "&vrlw (@x[$b2],@x[$b2],'$seven')",
  890. "&vrlw (@x[$b3],@x[$b3],'$seven')"
  891. );
  892. }
  893. $code.=<<___;
  894. .globl .ChaCha20_ctr32_vsx
  895. .align 5
  896. .ChaCha20_ctr32_vsx:
  897. $STU $sp,-$FRAME($sp)
  898. mflr r0
  899. li r10,`15+$LOCALS+64`
  900. li r11,`31+$LOCALS+64`
  901. mfspr r12,256
  902. stvx v26,r10,$sp
  903. addi r10,r10,32
  904. stvx v27,r11,$sp
  905. addi r11,r11,32
  906. stvx v28,r10,$sp
  907. addi r10,r10,32
  908. stvx v29,r11,$sp
  909. addi r11,r11,32
  910. stvx v30,r10,$sp
  911. stvx v31,r11,$sp
  912. stw r12,`$FRAME-4`($sp) # save vrsave
  913. li r12,-4096+63
  914. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  915. mtspr 256,r12 # preserve 29 AltiVec registers
  916. bl Lconsts # returns pointer Lsigma in r12
  917. lvx_4w @K[0],0,r12 # load sigma
  918. addi r12,r12,0x50
  919. li $x10,16
  920. li $x20,32
  921. li $x30,48
  922. li r11,64
  923. lvx_4w @K[1],0,$key # load key
  924. lvx_4w @K[2],$x10,$key
  925. lvx_4w @K[3],0,$ctr # load counter
  926. vxor $xt0,$xt0,$xt0
  927. lvx_4w $xt1,r11,r12
  928. vspltw $CTR,@K[3],0
  929. vsldoi @K[3],@K[3],$xt0,4
  930. vsldoi @K[3],$xt0,@K[3],12 # clear @K[3].word[0]
  931. vadduwm $CTR,$CTR,$xt1
  932. be?lvsl $beperm,0,$x10 # 0x00..0f
  933. be?vspltisb $xt0,3 # 0x03..03
  934. be?vxor $beperm,$beperm,$xt0 # swap bytes within words
  935. li r0,10 # inner loop counter
  936. mtctr r0
  937. b Loop_outer_vsx
  938. .align 5
  939. Loop_outer_vsx:
  940. lvx $xa0,$x00,r12 # load [smashed] sigma
  941. lvx $xa1,$x10,r12
  942. lvx $xa2,$x20,r12
  943. lvx $xa3,$x30,r12
  944. vspltw $xb0,@K[1],0 # smash the key
  945. vspltw $xb1,@K[1],1
  946. vspltw $xb2,@K[1],2
  947. vspltw $xb3,@K[1],3
  948. vspltw $xc0,@K[2],0
  949. vspltw $xc1,@K[2],1
  950. vspltw $xc2,@K[2],2
  951. vspltw $xc3,@K[2],3
  952. vmr $xd0,$CTR # smash the counter
  953. vspltw $xd1,@K[3],1
  954. vspltw $xd2,@K[3],2
  955. vspltw $xd3,@K[3],3
  956. vspltisw $sixteen,-16 # synthesize constants
  957. vspltisw $twelve,12
  958. vspltisw $eight,8
  959. vspltisw $seven,7
  960. Loop_vsx:
  961. ___
  962. foreach (&VSX_lane_ROUND(0, 4, 8,12)) { eval; }
  963. foreach (&VSX_lane_ROUND(0, 5,10,15)) { eval; }
  964. $code.=<<___;
  965. bdnz Loop_vsx
  966. vadduwm $xd0,$xd0,$CTR
  967. vmrgew $xt0,$xa0,$xa1 # transpose data
  968. vmrgew $xt1,$xa2,$xa3
  969. vmrgow $xa0,$xa0,$xa1
  970. vmrgow $xa2,$xa2,$xa3
  971. vmrgew $xt2,$xb0,$xb1
  972. vmrgew $xt3,$xb2,$xb3
  973. vpermdi $xa1,$xa0,$xa2,0b00
  974. vpermdi $xa3,$xa0,$xa2,0b11
  975. vpermdi $xa0,$xt0,$xt1,0b00
  976. vpermdi $xa2,$xt0,$xt1,0b11
  977. vmrgow $xb0,$xb0,$xb1
  978. vmrgow $xb2,$xb2,$xb3
  979. vmrgew $xt0,$xc0,$xc1
  980. vmrgew $xt1,$xc2,$xc3
  981. vpermdi $xb1,$xb0,$xb2,0b00
  982. vpermdi $xb3,$xb0,$xb2,0b11
  983. vpermdi $xb0,$xt2,$xt3,0b00
  984. vpermdi $xb2,$xt2,$xt3,0b11
  985. vmrgow $xc0,$xc0,$xc1
  986. vmrgow $xc2,$xc2,$xc3
  987. vmrgew $xt2,$xd0,$xd1
  988. vmrgew $xt3,$xd2,$xd3
  989. vpermdi $xc1,$xc0,$xc2,0b00
  990. vpermdi $xc3,$xc0,$xc2,0b11
  991. vpermdi $xc0,$xt0,$xt1,0b00
  992. vpermdi $xc2,$xt0,$xt1,0b11
  993. vmrgow $xd0,$xd0,$xd1
  994. vmrgow $xd2,$xd2,$xd3
  995. vspltisw $xt0,4
  996. vadduwm $CTR,$CTR,$xt0 # next counter value
  997. vpermdi $xd1,$xd0,$xd2,0b00
  998. vpermdi $xd3,$xd0,$xd2,0b11
  999. vpermdi $xd0,$xt2,$xt3,0b00
  1000. vpermdi $xd2,$xt2,$xt3,0b11
  1001. vadduwm $xa0,$xa0,@K[0]
  1002. vadduwm $xb0,$xb0,@K[1]
  1003. vadduwm $xc0,$xc0,@K[2]
  1004. vadduwm $xd0,$xd0,@K[3]
  1005. be?vperm $xa0,$xa0,$xa0,$beperm
  1006. be?vperm $xb0,$xb0,$xb0,$beperm
  1007. be?vperm $xc0,$xc0,$xc0,$beperm
  1008. be?vperm $xd0,$xd0,$xd0,$beperm
  1009. ${UCMP}i $len,0x40
  1010. blt Ltail_vsx
  1011. lvx_4w $xt0,$x00,$inp
  1012. lvx_4w $xt1,$x10,$inp
  1013. lvx_4w $xt2,$x20,$inp
  1014. lvx_4w $xt3,$x30,$inp
  1015. vxor $xt0,$xt0,$xa0
  1016. vxor $xt1,$xt1,$xb0
  1017. vxor $xt2,$xt2,$xc0
  1018. vxor $xt3,$xt3,$xd0
  1019. stvx_4w $xt0,$x00,$out
  1020. stvx_4w $xt1,$x10,$out
  1021. addi $inp,$inp,0x40
  1022. stvx_4w $xt2,$x20,$out
  1023. subi $len,$len,0x40
  1024. stvx_4w $xt3,$x30,$out
  1025. addi $out,$out,0x40
  1026. beq Ldone_vsx
  1027. vadduwm $xa0,$xa1,@K[0]
  1028. vadduwm $xb0,$xb1,@K[1]
  1029. vadduwm $xc0,$xc1,@K[2]
  1030. vadduwm $xd0,$xd1,@K[3]
  1031. be?vperm $xa0,$xa0,$xa0,$beperm
  1032. be?vperm $xb0,$xb0,$xb0,$beperm
  1033. be?vperm $xc0,$xc0,$xc0,$beperm
  1034. be?vperm $xd0,$xd0,$xd0,$beperm
  1035. ${UCMP}i $len,0x40
  1036. blt Ltail_vsx
  1037. lvx_4w $xt0,$x00,$inp
  1038. lvx_4w $xt1,$x10,$inp
  1039. lvx_4w $xt2,$x20,$inp
  1040. lvx_4w $xt3,$x30,$inp
  1041. vxor $xt0,$xt0,$xa0
  1042. vxor $xt1,$xt1,$xb0
  1043. vxor $xt2,$xt2,$xc0
  1044. vxor $xt3,$xt3,$xd0
  1045. stvx_4w $xt0,$x00,$out
  1046. stvx_4w $xt1,$x10,$out
  1047. addi $inp,$inp,0x40
  1048. stvx_4w $xt2,$x20,$out
  1049. subi $len,$len,0x40
  1050. stvx_4w $xt3,$x30,$out
  1051. addi $out,$out,0x40
  1052. beq Ldone_vsx
  1053. vadduwm $xa0,$xa2,@K[0]
  1054. vadduwm $xb0,$xb2,@K[1]
  1055. vadduwm $xc0,$xc2,@K[2]
  1056. vadduwm $xd0,$xd2,@K[3]
  1057. be?vperm $xa0,$xa0,$xa0,$beperm
  1058. be?vperm $xb0,$xb0,$xb0,$beperm
  1059. be?vperm $xc0,$xc0,$xc0,$beperm
  1060. be?vperm $xd0,$xd0,$xd0,$beperm
  1061. ${UCMP}i $len,0x40
  1062. blt Ltail_vsx
  1063. lvx_4w $xt0,$x00,$inp
  1064. lvx_4w $xt1,$x10,$inp
  1065. lvx_4w $xt2,$x20,$inp
  1066. lvx_4w $xt3,$x30,$inp
  1067. vxor $xt0,$xt0,$xa0
  1068. vxor $xt1,$xt1,$xb0
  1069. vxor $xt2,$xt2,$xc0
  1070. vxor $xt3,$xt3,$xd0
  1071. stvx_4w $xt0,$x00,$out
  1072. stvx_4w $xt1,$x10,$out
  1073. addi $inp,$inp,0x40
  1074. stvx_4w $xt2,$x20,$out
  1075. subi $len,$len,0x40
  1076. stvx_4w $xt3,$x30,$out
  1077. addi $out,$out,0x40
  1078. beq Ldone_vsx
  1079. vadduwm $xa0,$xa3,@K[0]
  1080. vadduwm $xb0,$xb3,@K[1]
  1081. vadduwm $xc0,$xc3,@K[2]
  1082. vadduwm $xd0,$xd3,@K[3]
  1083. be?vperm $xa0,$xa0,$xa0,$beperm
  1084. be?vperm $xb0,$xb0,$xb0,$beperm
  1085. be?vperm $xc0,$xc0,$xc0,$beperm
  1086. be?vperm $xd0,$xd0,$xd0,$beperm
  1087. ${UCMP}i $len,0x40
  1088. blt Ltail_vsx
  1089. lvx_4w $xt0,$x00,$inp
  1090. lvx_4w $xt1,$x10,$inp
  1091. lvx_4w $xt2,$x20,$inp
  1092. lvx_4w $xt3,$x30,$inp
  1093. vxor $xt0,$xt0,$xa0
  1094. vxor $xt1,$xt1,$xb0
  1095. vxor $xt2,$xt2,$xc0
  1096. vxor $xt3,$xt3,$xd0
  1097. stvx_4w $xt0,$x00,$out
  1098. stvx_4w $xt1,$x10,$out
  1099. addi $inp,$inp,0x40
  1100. stvx_4w $xt2,$x20,$out
  1101. subi $len,$len,0x40
  1102. stvx_4w $xt3,$x30,$out
  1103. addi $out,$out,0x40
  1104. mtctr r0
  1105. bne Loop_outer_vsx
  1106. Ldone_vsx:
  1107. lwz r12,`$FRAME-4`($sp) # pull vrsave
  1108. li r10,`15+$LOCALS+64`
  1109. li r11,`31+$LOCALS+64`
  1110. $POP r0, `$FRAME+$LRSAVE`($sp)
  1111. mtspr 256,r12 # restore vrsave
  1112. lvx v26,r10,$sp
  1113. addi r10,r10,32
  1114. lvx v27,r11,$sp
  1115. addi r11,r11,32
  1116. lvx v28,r10,$sp
  1117. addi r10,r10,32
  1118. lvx v29,r11,$sp
  1119. addi r11,r11,32
  1120. lvx v30,r10,$sp
  1121. lvx v31,r11,$sp
  1122. mtlr r0
  1123. addi $sp,$sp,$FRAME
  1124. blr
  1125. .align 4
  1126. Ltail_vsx:
  1127. addi r11,$sp,$LOCALS
  1128. mtctr $len
  1129. stvx_4w $xa0,$x00,r11 # offload block to stack
  1130. stvx_4w $xb0,$x10,r11
  1131. stvx_4w $xc0,$x20,r11
  1132. stvx_4w $xd0,$x30,r11
  1133. subi r12,r11,1 # prepare for *++ptr
  1134. subi $inp,$inp,1
  1135. subi $out,$out,1
  1136. Loop_tail_vsx:
  1137. lbzu r6,1(r12)
  1138. lbzu r7,1($inp)
  1139. xor r6,r6,r7
  1140. stbu r6,1($out)
  1141. bdnz Loop_tail_vsx
  1142. stvx_4w $K[0],$x00,r11 # wipe copy of the block
  1143. stvx_4w $K[0],$x10,r11
  1144. stvx_4w $K[0],$x20,r11
  1145. stvx_4w $K[0],$x30,r11
  1146. b Ldone_vsx
  1147. .long 0
  1148. .byte 0,12,0x04,1,0x80,0,5,0
  1149. .long 0
  1150. .size .ChaCha20_ctr32_vsx,.-.ChaCha20_ctr32_vsx
  1151. ___
  1152. }}}
  1153. $code.=<<___;
  1154. .align 5
  1155. Lconsts:
  1156. mflr r0
  1157. bcl 20,31,\$+4
  1158. mflr r12 #vvvvv "distance between . and Lsigma
  1159. addi r12,r12,`64-8`
  1160. mtlr r0
  1161. blr
  1162. .long 0
  1163. .byte 0,12,0x14,0,0,0,0,0
  1164. .space `64-9*4`
  1165. Lsigma:
  1166. .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
  1167. .long 1,0,0,0
  1168. .long 4,0,0,0
  1169. ___
  1170. $code.=<<___ if ($LITTLE_ENDIAN);
  1171. .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
  1172. .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
  1173. ___
  1174. $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
  1175. .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
  1176. .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
  1177. ___
  1178. $code.=<<___;
  1179. .long 0x61707865,0x61707865,0x61707865,0x61707865
  1180. .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
  1181. .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
  1182. .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
  1183. .long 0,1,2,3
  1184. .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
  1185. .align 2
  1186. ___
  1187. foreach (split("\n",$code)) {
  1188. s/\`([^\`]*)\`/eval $1/ge;
  1189. # instructions prefixed with '?' are endian-specific and need
  1190. # to be adjusted accordingly...
  1191. if ($flavour !~ /le$/) { # big-endian
  1192. s/be\?// or
  1193. s/le\?/#le#/ or
  1194. s/\?lvsr/lvsl/ or
  1195. s/\?lvsl/lvsr/ or
  1196. s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
  1197. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 16-$3/;
  1198. } else { # little-endian
  1199. s/le\?// or
  1200. s/be\?/#be#/ or
  1201. s/\?([a-z]+)/$1/ or
  1202. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 $3/;
  1203. }
  1204. print $_,"\n";
  1205. }
  1206. close STDOUT or die "error closing STDOUT: $!";