2
0

chacha-ppc.pl 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. #! /usr/bin/env perl
  2. # Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # October 2015
  17. #
  18. # ChaCha20 for PowerPC/AltiVec.
  19. #
  20. # June 2018
  21. #
  22. # Add VSX 2.07 code path. Original 3xAltiVec+1xIALU is well-suited for
  23. # processors that can't issue more than one vector instruction per
  24. # cycle. But POWER8 (and POWER9) can issue a pair, and vector-only 4x
  25. # interleave would perform better. Incidentally PowerISA 2.07 (first
  26. # implemented by POWER8) defined new usable instructions, hence 4xVSX
  27. # code path...
  28. #
  29. # Performance in cycles per byte out of large buffer.
  30. #
  31. # IALU/gcc-4.x 3xAltiVec+1xIALU 4xVSX
  32. #
  33. # Freescale e300 13.6/+115% - -
  34. # PPC74x0/G4e 6.81/+310% 3.81 -
  35. # PPC970/G5 9.29/+160% ? -
  36. # POWER7 8.62/+61% 3.35 -
  37. # POWER8 8.70/+51% 2.91 2.09
  38. # POWER9 8.80/+29% 4.44(*) 2.45(**)
  39. #
  40. # (*) this is trade-off result, it's possible to improve it, but
  41. # then it would negatively affect all others;
  42. # (**) POWER9 seems to be "allergic" to mixing vector and integer
  43. # instructions, which is why switch to vector-only code pays
  44. # off that much;
  45. $flavour = shift;
  46. if ($flavour =~ /64/) {
  47. $SIZE_T =8;
  48. $LRSAVE =2*$SIZE_T;
  49. $STU ="stdu";
  50. $POP ="ld";
  51. $PUSH ="std";
  52. $UCMP ="cmpld";
  53. } elsif ($flavour =~ /32/) {
  54. $SIZE_T =4;
  55. $LRSAVE =$SIZE_T;
  56. $STU ="stwu";
  57. $POP ="lwz";
  58. $PUSH ="stw";
  59. $UCMP ="cmplw";
  60. } else { die "nonsense $flavour"; }
  61. $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
  62. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  63. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  64. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  65. die "can't locate ppc-xlate.pl";
  66. open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
  67. $LOCALS=6*$SIZE_T;
  68. $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
  69. sub AUTOLOAD() # thunk [simplified] x86-style perlasm
  70. { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
  71. $code .= "\t$opcode\t".join(',',@_)."\n";
  72. }
  73. my $sp = "r1";
  74. my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
  75. my @x=map("r$_",(16..31));
  76. my @d=map("r$_",(11,12,14,15));
  77. my @t=map("r$_",(7..10));
  78. sub ROUND {
  79. my ($a0,$b0,$c0,$d0)=@_;
  80. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  81. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  82. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  83. (
  84. "&add (@x[$a0],@x[$a0],@x[$b0])",
  85. "&add (@x[$a1],@x[$a1],@x[$b1])",
  86. "&add (@x[$a2],@x[$a2],@x[$b2])",
  87. "&add (@x[$a3],@x[$a3],@x[$b3])",
  88. "&xor (@x[$d0],@x[$d0],@x[$a0])",
  89. "&xor (@x[$d1],@x[$d1],@x[$a1])",
  90. "&xor (@x[$d2],@x[$d2],@x[$a2])",
  91. "&xor (@x[$d3],@x[$d3],@x[$a3])",
  92. "&rotlwi (@x[$d0],@x[$d0],16)",
  93. "&rotlwi (@x[$d1],@x[$d1],16)",
  94. "&rotlwi (@x[$d2],@x[$d2],16)",
  95. "&rotlwi (@x[$d3],@x[$d3],16)",
  96. "&add (@x[$c0],@x[$c0],@x[$d0])",
  97. "&add (@x[$c1],@x[$c1],@x[$d1])",
  98. "&add (@x[$c2],@x[$c2],@x[$d2])",
  99. "&add (@x[$c3],@x[$c3],@x[$d3])",
  100. "&xor (@x[$b0],@x[$b0],@x[$c0])",
  101. "&xor (@x[$b1],@x[$b1],@x[$c1])",
  102. "&xor (@x[$b2],@x[$b2],@x[$c2])",
  103. "&xor (@x[$b3],@x[$b3],@x[$c3])",
  104. "&rotlwi (@x[$b0],@x[$b0],12)",
  105. "&rotlwi (@x[$b1],@x[$b1],12)",
  106. "&rotlwi (@x[$b2],@x[$b2],12)",
  107. "&rotlwi (@x[$b3],@x[$b3],12)",
  108. "&add (@x[$a0],@x[$a0],@x[$b0])",
  109. "&add (@x[$a1],@x[$a1],@x[$b1])",
  110. "&add (@x[$a2],@x[$a2],@x[$b2])",
  111. "&add (@x[$a3],@x[$a3],@x[$b3])",
  112. "&xor (@x[$d0],@x[$d0],@x[$a0])",
  113. "&xor (@x[$d1],@x[$d1],@x[$a1])",
  114. "&xor (@x[$d2],@x[$d2],@x[$a2])",
  115. "&xor (@x[$d3],@x[$d3],@x[$a3])",
  116. "&rotlwi (@x[$d0],@x[$d0],8)",
  117. "&rotlwi (@x[$d1],@x[$d1],8)",
  118. "&rotlwi (@x[$d2],@x[$d2],8)",
  119. "&rotlwi (@x[$d3],@x[$d3],8)",
  120. "&add (@x[$c0],@x[$c0],@x[$d0])",
  121. "&add (@x[$c1],@x[$c1],@x[$d1])",
  122. "&add (@x[$c2],@x[$c2],@x[$d2])",
  123. "&add (@x[$c3],@x[$c3],@x[$d3])",
  124. "&xor (@x[$b0],@x[$b0],@x[$c0])",
  125. "&xor (@x[$b1],@x[$b1],@x[$c1])",
  126. "&xor (@x[$b2],@x[$b2],@x[$c2])",
  127. "&xor (@x[$b3],@x[$b3],@x[$c3])",
  128. "&rotlwi (@x[$b0],@x[$b0],7)",
  129. "&rotlwi (@x[$b1],@x[$b1],7)",
  130. "&rotlwi (@x[$b2],@x[$b2],7)",
  131. "&rotlwi (@x[$b3],@x[$b3],7)"
  132. );
  133. }
  134. $code.=<<___;
  135. .machine "any"
  136. .text
  137. .globl .ChaCha20_ctr32_int
  138. .align 5
  139. .ChaCha20_ctr32_int:
  140. __ChaCha20_ctr32_int:
  141. ${UCMP}i $len,0
  142. beqlr-
  143. $STU $sp,-$FRAME($sp)
  144. mflr r0
  145. $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
  146. $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
  147. $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
  148. $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
  149. $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
  150. $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
  151. $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
  152. $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
  153. $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
  154. $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
  155. $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
  156. $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
  157. $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
  158. $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
  159. $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
  160. $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
  161. $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
  162. $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
  163. $PUSH r0,`$FRAME+$LRSAVE`($sp)
  164. lwz @d[0],0($ctr) # load counter
  165. lwz @d[1],4($ctr)
  166. lwz @d[2],8($ctr)
  167. lwz @d[3],12($ctr)
  168. bl __ChaCha20_1x
  169. $POP r0,`$FRAME+$LRSAVE`($sp)
  170. $POP r14,`$FRAME-$SIZE_T*18`($sp)
  171. $POP r15,`$FRAME-$SIZE_T*17`($sp)
  172. $POP r16,`$FRAME-$SIZE_T*16`($sp)
  173. $POP r17,`$FRAME-$SIZE_T*15`($sp)
  174. $POP r18,`$FRAME-$SIZE_T*14`($sp)
  175. $POP r19,`$FRAME-$SIZE_T*13`($sp)
  176. $POP r20,`$FRAME-$SIZE_T*12`($sp)
  177. $POP r21,`$FRAME-$SIZE_T*11`($sp)
  178. $POP r22,`$FRAME-$SIZE_T*10`($sp)
  179. $POP r23,`$FRAME-$SIZE_T*9`($sp)
  180. $POP r24,`$FRAME-$SIZE_T*8`($sp)
  181. $POP r25,`$FRAME-$SIZE_T*7`($sp)
  182. $POP r26,`$FRAME-$SIZE_T*6`($sp)
  183. $POP r27,`$FRAME-$SIZE_T*5`($sp)
  184. $POP r28,`$FRAME-$SIZE_T*4`($sp)
  185. $POP r29,`$FRAME-$SIZE_T*3`($sp)
  186. $POP r30,`$FRAME-$SIZE_T*2`($sp)
  187. $POP r31,`$FRAME-$SIZE_T*1`($sp)
  188. mtlr r0
  189. addi $sp,$sp,$FRAME
  190. blr
  191. .long 0
  192. .byte 0,12,4,1,0x80,18,5,0
  193. .long 0
  194. .size .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
  195. .align 5
  196. __ChaCha20_1x:
  197. Loop_outer:
  198. lis @x[0],0x6170 # synthesize sigma
  199. lis @x[1],0x3320
  200. lis @x[2],0x7962
  201. lis @x[3],0x6b20
  202. ori @x[0],@x[0],0x7865
  203. ori @x[1],@x[1],0x646e
  204. ori @x[2],@x[2],0x2d32
  205. ori @x[3],@x[3],0x6574
  206. li r0,10 # inner loop counter
  207. lwz @x[4],0($key) # load key
  208. lwz @x[5],4($key)
  209. lwz @x[6],8($key)
  210. lwz @x[7],12($key)
  211. lwz @x[8],16($key)
  212. mr @x[12],@d[0] # copy counter
  213. lwz @x[9],20($key)
  214. mr @x[13],@d[1]
  215. lwz @x[10],24($key)
  216. mr @x[14],@d[2]
  217. lwz @x[11],28($key)
  218. mr @x[15],@d[3]
  219. mr @t[0],@x[4]
  220. mr @t[1],@x[5]
  221. mr @t[2],@x[6]
  222. mr @t[3],@x[7]
  223. mtctr r0
  224. Loop:
  225. ___
  226. foreach (&ROUND(0, 4, 8,12)) { eval; }
  227. foreach (&ROUND(0, 5,10,15)) { eval; }
  228. $code.=<<___;
  229. bdnz Loop
  230. subic $len,$len,64 # $len-=64
  231. addi @x[0],@x[0],0x7865 # accumulate key block
  232. addi @x[1],@x[1],0x646e
  233. addi @x[2],@x[2],0x2d32
  234. addi @x[3],@x[3],0x6574
  235. addis @x[0],@x[0],0x6170
  236. addis @x[1],@x[1],0x3320
  237. addis @x[2],@x[2],0x7962
  238. addis @x[3],@x[3],0x6b20
  239. subfe. r0,r0,r0 # borrow?-1:0
  240. add @x[4],@x[4],@t[0]
  241. lwz @t[0],16($key)
  242. add @x[5],@x[5],@t[1]
  243. lwz @t[1],20($key)
  244. add @x[6],@x[6],@t[2]
  245. lwz @t[2],24($key)
  246. add @x[7],@x[7],@t[3]
  247. lwz @t[3],28($key)
  248. add @x[8],@x[8],@t[0]
  249. add @x[9],@x[9],@t[1]
  250. add @x[10],@x[10],@t[2]
  251. add @x[11],@x[11],@t[3]
  252. add @x[12],@x[12],@d[0]
  253. add @x[13],@x[13],@d[1]
  254. add @x[14],@x[14],@d[2]
  255. add @x[15],@x[15],@d[3]
  256. addi @d[0],@d[0],1 # increment counter
  257. ___
  258. if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
  259. $code.=<<___;
  260. mr @t[$i&3],@x[$i]
  261. rotlwi @x[$i],@x[$i],8
  262. rlwimi @x[$i],@t[$i&3],24,0,7
  263. rlwimi @x[$i],@t[$i&3],24,16,23
  264. ___
  265. } }
  266. $code.=<<___;
  267. bne Ltail # $len-=64 borrowed
  268. lwz @t[0],0($inp) # load input, aligned or not
  269. lwz @t[1],4($inp)
  270. ${UCMP}i $len,0 # done already?
  271. lwz @t[2],8($inp)
  272. lwz @t[3],12($inp)
  273. xor @x[0],@x[0],@t[0] # xor with input
  274. lwz @t[0],16($inp)
  275. xor @x[1],@x[1],@t[1]
  276. lwz @t[1],20($inp)
  277. xor @x[2],@x[2],@t[2]
  278. lwz @t[2],24($inp)
  279. xor @x[3],@x[3],@t[3]
  280. lwz @t[3],28($inp)
  281. xor @x[4],@x[4],@t[0]
  282. lwz @t[0],32($inp)
  283. xor @x[5],@x[5],@t[1]
  284. lwz @t[1],36($inp)
  285. xor @x[6],@x[6],@t[2]
  286. lwz @t[2],40($inp)
  287. xor @x[7],@x[7],@t[3]
  288. lwz @t[3],44($inp)
  289. xor @x[8],@x[8],@t[0]
  290. lwz @t[0],48($inp)
  291. xor @x[9],@x[9],@t[1]
  292. lwz @t[1],52($inp)
  293. xor @x[10],@x[10],@t[2]
  294. lwz @t[2],56($inp)
  295. xor @x[11],@x[11],@t[3]
  296. lwz @t[3],60($inp)
  297. xor @x[12],@x[12],@t[0]
  298. stw @x[0],0($out) # store output, aligned or not
  299. xor @x[13],@x[13],@t[1]
  300. stw @x[1],4($out)
  301. xor @x[14],@x[14],@t[2]
  302. stw @x[2],8($out)
  303. xor @x[15],@x[15],@t[3]
  304. stw @x[3],12($out)
  305. stw @x[4],16($out)
  306. stw @x[5],20($out)
  307. stw @x[6],24($out)
  308. stw @x[7],28($out)
  309. stw @x[8],32($out)
  310. stw @x[9],36($out)
  311. stw @x[10],40($out)
  312. stw @x[11],44($out)
  313. stw @x[12],48($out)
  314. stw @x[13],52($out)
  315. stw @x[14],56($out)
  316. addi $inp,$inp,64
  317. stw @x[15],60($out)
  318. addi $out,$out,64
  319. bne Loop_outer
  320. blr
  321. .align 4
  322. Ltail:
  323. addi $len,$len,64 # restore tail length
  324. subi $inp,$inp,1 # prepare for *++ptr
  325. subi $out,$out,1
  326. addi @t[0],$sp,$LOCALS-1
  327. mtctr $len
  328. stw @x[0],`$LOCALS+0`($sp) # save whole block to stack
  329. stw @x[1],`$LOCALS+4`($sp)
  330. stw @x[2],`$LOCALS+8`($sp)
  331. stw @x[3],`$LOCALS+12`($sp)
  332. stw @x[4],`$LOCALS+16`($sp)
  333. stw @x[5],`$LOCALS+20`($sp)
  334. stw @x[6],`$LOCALS+24`($sp)
  335. stw @x[7],`$LOCALS+28`($sp)
  336. stw @x[8],`$LOCALS+32`($sp)
  337. stw @x[9],`$LOCALS+36`($sp)
  338. stw @x[10],`$LOCALS+40`($sp)
  339. stw @x[11],`$LOCALS+44`($sp)
  340. stw @x[12],`$LOCALS+48`($sp)
  341. stw @x[13],`$LOCALS+52`($sp)
  342. stw @x[14],`$LOCALS+56`($sp)
  343. stw @x[15],`$LOCALS+60`($sp)
  344. Loop_tail: # byte-by-byte loop
  345. lbzu @d[0],1($inp)
  346. lbzu @x[0],1(@t[0])
  347. xor @d[1],@d[0],@x[0]
  348. stbu @d[1],1($out)
  349. bdnz Loop_tail
  350. stw $sp,`$LOCALS+0`($sp) # wipe block on stack
  351. stw $sp,`$LOCALS+4`($sp)
  352. stw $sp,`$LOCALS+8`($sp)
  353. stw $sp,`$LOCALS+12`($sp)
  354. stw $sp,`$LOCALS+16`($sp)
  355. stw $sp,`$LOCALS+20`($sp)
  356. stw $sp,`$LOCALS+24`($sp)
  357. stw $sp,`$LOCALS+28`($sp)
  358. stw $sp,`$LOCALS+32`($sp)
  359. stw $sp,`$LOCALS+36`($sp)
  360. stw $sp,`$LOCALS+40`($sp)
  361. stw $sp,`$LOCALS+44`($sp)
  362. stw $sp,`$LOCALS+48`($sp)
  363. stw $sp,`$LOCALS+52`($sp)
  364. stw $sp,`$LOCALS+56`($sp)
  365. stw $sp,`$LOCALS+60`($sp)
  366. blr
  367. .long 0
  368. .byte 0,12,0x14,0,0,0,0,0
  369. ___
  370. {{{
  371. my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2)
  372. = map("v$_",(0..11));
  373. my @K = map("v$_",(12..17));
  374. my ($FOUR,$sixteen,$twenty4) = map("v$_",(18..19,23));
  375. my ($inpperm,$outperm,$outmask) = map("v$_",(24..26));
  376. my @D = map("v$_",(27..31));
  377. my ($twelve,$seven,$T0,$T1) = @D;
  378. my $FRAME=$LOCALS+64+10*16+18*$SIZE_T; # 10*16 is for v23-v31 offload
  379. sub VMXROUND {
  380. my $odd = pop;
  381. my ($a,$b,$c,$d)=@_;
  382. (
  383. "&vadduwm ('$a','$a','$b')",
  384. "&vxor ('$d','$d','$a')",
  385. "&vperm ('$d','$d','$d','$sixteen')",
  386. "&vadduwm ('$c','$c','$d')",
  387. "&vxor ('$b','$b','$c')",
  388. "&vrlw ('$b','$b','$twelve')",
  389. "&vadduwm ('$a','$a','$b')",
  390. "&vxor ('$d','$d','$a')",
  391. "&vperm ('$d','$d','$d','$twenty4')",
  392. "&vadduwm ('$c','$c','$d')",
  393. "&vxor ('$b','$b','$c')",
  394. "&vrlw ('$b','$b','$seven')",
  395. "&vrldoi ('$c','$c',8)",
  396. "&vrldoi ('$b','$b',$odd?4:12)",
  397. "&vrldoi ('$d','$d',$odd?12:4)"
  398. );
  399. }
  400. $code.=<<___;
  401. .globl .ChaCha20_ctr32_vmx
  402. .align 5
  403. .ChaCha20_ctr32_vmx:
  404. ${UCMP}i $len,256
  405. blt __ChaCha20_ctr32_int
  406. $STU $sp,-$FRAME($sp)
  407. mflr r0
  408. li r10,`15+$LOCALS+64`
  409. li r11,`31+$LOCALS+64`
  410. mfspr r12,256
  411. stvx v23,r10,$sp
  412. addi r10,r10,32
  413. stvx v24,r11,$sp
  414. addi r11,r11,32
  415. stvx v25,r10,$sp
  416. addi r10,r10,32
  417. stvx v26,r11,$sp
  418. addi r11,r11,32
  419. stvx v27,r10,$sp
  420. addi r10,r10,32
  421. stvx v28,r11,$sp
  422. addi r11,r11,32
  423. stvx v29,r10,$sp
  424. addi r10,r10,32
  425. stvx v30,r11,$sp
  426. stvx v31,r10,$sp
  427. stw r12,`$FRAME-$SIZE_T*18-4`($sp) # save vrsave
  428. $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
  429. $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
  430. $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
  431. $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
  432. $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
  433. $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
  434. $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
  435. $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
  436. $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
  437. $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
  438. $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
  439. $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
  440. $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
  441. $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
  442. $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
  443. $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
  444. $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
  445. $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
  446. li r12,-4096+511
  447. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  448. mtspr 256,r12 # preserve 29 AltiVec registers
  449. bl Lconsts # returns pointer Lsigma in r12
  450. li @x[0],16
  451. li @x[1],32
  452. li @x[2],48
  453. li @x[3],64
  454. li @x[4],31 # 31 is not a typo
  455. li @x[5],15 # nor is 15
  456. lvx @K[1],0,$key # load key
  457. ?lvsr $T0,0,$key # prepare unaligned load
  458. lvx @K[2],@x[0],$key
  459. lvx @D[0],@x[4],$key
  460. lvx @K[3],0,$ctr # load counter
  461. ?lvsr $T1,0,$ctr # prepare unaligned load
  462. lvx @D[1],@x[5],$ctr
  463. lvx @K[0],0,r12 # load constants
  464. lvx @K[5],@x[0],r12 # one
  465. lvx $FOUR,@x[1],r12
  466. lvx $sixteen,@x[2],r12
  467. lvx $twenty4,@x[3],r12
  468. ?vperm @K[1],@K[2],@K[1],$T0 # align key
  469. ?vperm @K[2],@D[0],@K[2],$T0
  470. ?vperm @K[3],@D[1],@K[3],$T1 # align counter
  471. lwz @d[0],0($ctr) # load counter to GPR
  472. lwz @d[1],4($ctr)
  473. vadduwm @K[3],@K[3],@K[5] # adjust AltiVec counter
  474. lwz @d[2],8($ctr)
  475. vadduwm @K[4],@K[3],@K[5]
  476. lwz @d[3],12($ctr)
  477. vadduwm @K[5],@K[4],@K[5]
  478. vxor $T0,$T0,$T0 # 0x00..00
  479. vspltisw $outmask,-1 # 0xff..ff
  480. ?lvsr $inpperm,0,$inp # prepare for unaligned load
  481. ?lvsl $outperm,0,$out # prepare for unaligned store
  482. ?vperm $outmask,$outmask,$T0,$outperm
  483. be?lvsl $T0,0,@x[0] # 0x00..0f
  484. be?vspltisb $T1,3 # 0x03..03
  485. be?vxor $T0,$T0,$T1 # swap bytes within words
  486. be?vxor $outperm,$outperm,$T1
  487. be?vperm $inpperm,$inpperm,$inpperm,$T0
  488. li r0,10 # inner loop counter
  489. b Loop_outer_vmx
  490. .align 4
  491. Loop_outer_vmx:
  492. lis @x[0],0x6170 # synthesize sigma
  493. lis @x[1],0x3320
  494. vmr $A0,@K[0]
  495. lis @x[2],0x7962
  496. lis @x[3],0x6b20
  497. vmr $A1,@K[0]
  498. ori @x[0],@x[0],0x7865
  499. ori @x[1],@x[1],0x646e
  500. vmr $A2,@K[0]
  501. ori @x[2],@x[2],0x2d32
  502. ori @x[3],@x[3],0x6574
  503. vmr $B0,@K[1]
  504. lwz @x[4],0($key) # load key to GPR
  505. vmr $B1,@K[1]
  506. lwz @x[5],4($key)
  507. vmr $B2,@K[1]
  508. lwz @x[6],8($key)
  509. vmr $C0,@K[2]
  510. lwz @x[7],12($key)
  511. vmr $C1,@K[2]
  512. lwz @x[8],16($key)
  513. vmr $C2,@K[2]
  514. mr @x[12],@d[0] # copy GPR counter
  515. lwz @x[9],20($key)
  516. vmr $D0,@K[3]
  517. mr @x[13],@d[1]
  518. lwz @x[10],24($key)
  519. vmr $D1,@K[4]
  520. mr @x[14],@d[2]
  521. lwz @x[11],28($key)
  522. vmr $D2,@K[5]
  523. mr @x[15],@d[3]
  524. mr @t[0],@x[4]
  525. mr @t[1],@x[5]
  526. mr @t[2],@x[6]
  527. mr @t[3],@x[7]
  528. vspltisw $twelve,12 # synthesize constants
  529. vspltisw $seven,7
  530. mtctr r0
  531. nop
  532. Loop_vmx:
  533. ___
  534. my @thread0=&VMXROUND($A0,$B0,$C0,$D0,0);
  535. my @thread1=&VMXROUND($A1,$B1,$C1,$D1,0);
  536. my @thread2=&VMXROUND($A2,$B2,$C2,$D2,0);
  537. my @thread3=&ROUND(0,4,8,12);
  538. foreach (@thread0) {
  539. eval;
  540. eval(shift(@thread1));
  541. eval(shift(@thread2));
  542. eval(shift(@thread3));
  543. eval(shift(@thread3));
  544. eval(shift(@thread3));
  545. }
  546. foreach (@thread3) { eval; }
  547. @thread0=&VMXROUND($A0,$B0,$C0,$D0,1);
  548. @thread1=&VMXROUND($A1,$B1,$C1,$D1,1);
  549. @thread2=&VMXROUND($A2,$B2,$C2,$D2,1);
  550. @thread3=&ROUND(0,5,10,15);
  551. foreach (@thread0) {
  552. eval;
  553. eval(shift(@thread1));
  554. eval(shift(@thread2));
  555. eval(shift(@thread3));
  556. eval(shift(@thread3));
  557. eval(shift(@thread3));
  558. }
  559. foreach (@thread3) { eval; }
  560. $code.=<<___;
  561. bdnz Loop_vmx
  562. subi $len,$len,256 # $len-=256
  563. addi @x[0],@x[0],0x7865 # accumulate key block
  564. addi @x[1],@x[1],0x646e
  565. addi @x[2],@x[2],0x2d32
  566. addi @x[3],@x[3],0x6574
  567. addis @x[0],@x[0],0x6170
  568. addis @x[1],@x[1],0x3320
  569. addis @x[2],@x[2],0x7962
  570. addis @x[3],@x[3],0x6b20
  571. add @x[4],@x[4],@t[0]
  572. lwz @t[0],16($key)
  573. add @x[5],@x[5],@t[1]
  574. lwz @t[1],20($key)
  575. add @x[6],@x[6],@t[2]
  576. lwz @t[2],24($key)
  577. add @x[7],@x[7],@t[3]
  578. lwz @t[3],28($key)
  579. add @x[8],@x[8],@t[0]
  580. add @x[9],@x[9],@t[1]
  581. add @x[10],@x[10],@t[2]
  582. add @x[11],@x[11],@t[3]
  583. add @x[12],@x[12],@d[0]
  584. add @x[13],@x[13],@d[1]
  585. add @x[14],@x[14],@d[2]
  586. add @x[15],@x[15],@d[3]
  587. vadduwm $A0,$A0,@K[0] # accumulate key block
  588. vadduwm $A1,$A1,@K[0]
  589. vadduwm $A2,$A2,@K[0]
  590. vadduwm $B0,$B0,@K[1]
  591. vadduwm $B1,$B1,@K[1]
  592. vadduwm $B2,$B2,@K[1]
  593. vadduwm $C0,$C0,@K[2]
  594. vadduwm $C1,$C1,@K[2]
  595. vadduwm $C2,$C2,@K[2]
  596. vadduwm $D0,$D0,@K[3]
  597. vadduwm $D1,$D1,@K[4]
  598. vadduwm $D2,$D2,@K[5]
  599. addi @d[0],@d[0],4 # increment counter
  600. vadduwm @K[3],@K[3],$FOUR
  601. vadduwm @K[4],@K[4],$FOUR
  602. vadduwm @K[5],@K[5],$FOUR
  603. ___
  604. if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
  605. $code.=<<___;
  606. mr @t[$i&3],@x[$i]
  607. rotlwi @x[$i],@x[$i],8
  608. rlwimi @x[$i],@t[$i&3],24,0,7
  609. rlwimi @x[$i],@t[$i&3],24,16,23
  610. ___
  611. } }
  612. $code.=<<___;
  613. lwz @t[0],0($inp) # load input, aligned or not
  614. lwz @t[1],4($inp)
  615. lwz @t[2],8($inp)
  616. lwz @t[3],12($inp)
  617. xor @x[0],@x[0],@t[0] # xor with input
  618. lwz @t[0],16($inp)
  619. xor @x[1],@x[1],@t[1]
  620. lwz @t[1],20($inp)
  621. xor @x[2],@x[2],@t[2]
  622. lwz @t[2],24($inp)
  623. xor @x[3],@x[3],@t[3]
  624. lwz @t[3],28($inp)
  625. xor @x[4],@x[4],@t[0]
  626. lwz @t[0],32($inp)
  627. xor @x[5],@x[5],@t[1]
  628. lwz @t[1],36($inp)
  629. xor @x[6],@x[6],@t[2]
  630. lwz @t[2],40($inp)
  631. xor @x[7],@x[7],@t[3]
  632. lwz @t[3],44($inp)
  633. xor @x[8],@x[8],@t[0]
  634. lwz @t[0],48($inp)
  635. xor @x[9],@x[9],@t[1]
  636. lwz @t[1],52($inp)
  637. xor @x[10],@x[10],@t[2]
  638. lwz @t[2],56($inp)
  639. xor @x[11],@x[11],@t[3]
  640. lwz @t[3],60($inp)
  641. xor @x[12],@x[12],@t[0]
  642. stw @x[0],0($out) # store output, aligned or not
  643. xor @x[13],@x[13],@t[1]
  644. stw @x[1],4($out)
  645. xor @x[14],@x[14],@t[2]
  646. stw @x[2],8($out)
  647. xor @x[15],@x[15],@t[3]
  648. stw @x[3],12($out)
  649. addi $inp,$inp,64
  650. stw @x[4],16($out)
  651. li @t[0],16
  652. stw @x[5],20($out)
  653. li @t[1],32
  654. stw @x[6],24($out)
  655. li @t[2],48
  656. stw @x[7],28($out)
  657. li @t[3],64
  658. stw @x[8],32($out)
  659. stw @x[9],36($out)
  660. stw @x[10],40($out)
  661. stw @x[11],44($out)
  662. stw @x[12],48($out)
  663. stw @x[13],52($out)
  664. stw @x[14],56($out)
  665. stw @x[15],60($out)
  666. addi $out,$out,64
  667. lvx @D[0],0,$inp # load input
  668. lvx @D[1],@t[0],$inp
  669. lvx @D[2],@t[1],$inp
  670. lvx @D[3],@t[2],$inp
  671. lvx @D[4],@t[3],$inp
  672. addi $inp,$inp,64
  673. ?vperm @D[0],@D[1],@D[0],$inpperm # align input
  674. ?vperm @D[1],@D[2],@D[1],$inpperm
  675. ?vperm @D[2],@D[3],@D[2],$inpperm
  676. ?vperm @D[3],@D[4],@D[3],$inpperm
  677. vxor $A0,$A0,@D[0] # xor with input
  678. vxor $B0,$B0,@D[1]
  679. lvx @D[1],@t[0],$inp # keep loading input
  680. vxor $C0,$C0,@D[2]
  681. lvx @D[2],@t[1],$inp
  682. vxor $D0,$D0,@D[3]
  683. lvx @D[3],@t[2],$inp
  684. lvx @D[0],@t[3],$inp
  685. addi $inp,$inp,64
  686. li @t[3],63 # 63 is not a typo
  687. vperm $A0,$A0,$A0,$outperm # pre-misalign output
  688. vperm $B0,$B0,$B0,$outperm
  689. vperm $C0,$C0,$C0,$outperm
  690. vperm $D0,$D0,$D0,$outperm
  691. ?vperm @D[4],@D[1],@D[4],$inpperm # align input
  692. ?vperm @D[1],@D[2],@D[1],$inpperm
  693. ?vperm @D[2],@D[3],@D[2],$inpperm
  694. ?vperm @D[3],@D[0],@D[3],$inpperm
  695. vxor $A1,$A1,@D[4]
  696. vxor $B1,$B1,@D[1]
  697. lvx @D[1],@t[0],$inp # keep loading input
  698. vxor $C1,$C1,@D[2]
  699. lvx @D[2],@t[1],$inp
  700. vxor $D1,$D1,@D[3]
  701. lvx @D[3],@t[2],$inp
  702. lvx @D[4],@t[3],$inp # redundant in aligned case
  703. addi $inp,$inp,64
  704. vperm $A1,$A1,$A1,$outperm # pre-misalign output
  705. vperm $B1,$B1,$B1,$outperm
  706. vperm $C1,$C1,$C1,$outperm
  707. vperm $D1,$D1,$D1,$outperm
  708. ?vperm @D[0],@D[1],@D[0],$inpperm # align input
  709. ?vperm @D[1],@D[2],@D[1],$inpperm
  710. ?vperm @D[2],@D[3],@D[2],$inpperm
  711. ?vperm @D[3],@D[4],@D[3],$inpperm
  712. vxor $A2,$A2,@D[0]
  713. vxor $B2,$B2,@D[1]
  714. vxor $C2,$C2,@D[2]
  715. vxor $D2,$D2,@D[3]
  716. vperm $A2,$A2,$A2,$outperm # pre-misalign output
  717. vperm $B2,$B2,$B2,$outperm
  718. vperm $C2,$C2,$C2,$outperm
  719. vperm $D2,$D2,$D2,$outperm
  720. andi. @x[1],$out,15 # is $out aligned?
  721. mr @x[0],$out
  722. vsel @D[0],$A0,$B0,$outmask # collect pre-misaligned output
  723. vsel @D[1],$B0,$C0,$outmask
  724. vsel @D[2],$C0,$D0,$outmask
  725. vsel @D[3],$D0,$A1,$outmask
  726. vsel $B0,$A1,$B1,$outmask
  727. vsel $C0,$B1,$C1,$outmask
  728. vsel $D0,$C1,$D1,$outmask
  729. vsel $A1,$D1,$A2,$outmask
  730. vsel $B1,$A2,$B2,$outmask
  731. vsel $C1,$B2,$C2,$outmask
  732. vsel $D1,$C2,$D2,$outmask
  733. #stvx $A0,0,$out # take it easy on the edges
  734. stvx @D[0],@t[0],$out # store output
  735. stvx @D[1],@t[1],$out
  736. stvx @D[2],@t[2],$out
  737. addi $out,$out,64
  738. stvx @D[3],0,$out
  739. stvx $B0,@t[0],$out
  740. stvx $C0,@t[1],$out
  741. stvx $D0,@t[2],$out
  742. addi $out,$out,64
  743. stvx $A1,0,$out
  744. stvx $B1,@t[0],$out
  745. stvx $C1,@t[1],$out
  746. stvx $D1,@t[2],$out
  747. addi $out,$out,64
  748. beq Laligned_vmx
  749. sub @x[2],$out,@x[1] # in misaligned case edges
  750. li @x[3],0 # are written byte-by-byte
  751. Lunaligned_tail_vmx:
  752. stvebx $D2,@x[3],@x[2]
  753. addi @x[3],@x[3],1
  754. cmpw @x[3],@x[1]
  755. bne Lunaligned_tail_vmx
  756. sub @x[2],@x[0],@x[1]
  757. Lunaligned_head_vmx:
  758. stvebx $A0,@x[1],@x[2]
  759. cmpwi @x[1],15
  760. addi @x[1],@x[1],1
  761. bne Lunaligned_head_vmx
  762. ${UCMP}i $len,255 # done with 256-byte blocks yet?
  763. bgt Loop_outer_vmx
  764. b Ldone_vmx
  765. .align 4
  766. Laligned_vmx:
  767. stvx $A0,0,@x[0] # head hexaword was not stored
  768. ${UCMP}i $len,255 # done with 256-byte blocks yet?
  769. bgt Loop_outer_vmx
  770. nop
  771. Ldone_vmx:
  772. ${UCMP}i $len,0 # done yet?
  773. bnel __ChaCha20_1x
  774. lwz r12,`$FRAME-$SIZE_T*18-4`($sp) # pull vrsave
  775. li r10,`15+$LOCALS+64`
  776. li r11,`31+$LOCALS+64`
  777. mtspr 256,r12 # restore vrsave
  778. lvx v23,r10,$sp
  779. addi r10,r10,32
  780. lvx v24,r11,$sp
  781. addi r11,r11,32
  782. lvx v25,r10,$sp
  783. addi r10,r10,32
  784. lvx v26,r11,$sp
  785. addi r11,r11,32
  786. lvx v27,r10,$sp
  787. addi r10,r10,32
  788. lvx v28,r11,$sp
  789. addi r11,r11,32
  790. lvx v29,r10,$sp
  791. addi r10,r10,32
  792. lvx v30,r11,$sp
  793. lvx v31,r10,$sp
  794. $POP r0, `$FRAME+$LRSAVE`($sp)
  795. $POP r14,`$FRAME-$SIZE_T*18`($sp)
  796. $POP r15,`$FRAME-$SIZE_T*17`($sp)
  797. $POP r16,`$FRAME-$SIZE_T*16`($sp)
  798. $POP r17,`$FRAME-$SIZE_T*15`($sp)
  799. $POP r18,`$FRAME-$SIZE_T*14`($sp)
  800. $POP r19,`$FRAME-$SIZE_T*13`($sp)
  801. $POP r20,`$FRAME-$SIZE_T*12`($sp)
  802. $POP r21,`$FRAME-$SIZE_T*11`($sp)
  803. $POP r22,`$FRAME-$SIZE_T*10`($sp)
  804. $POP r23,`$FRAME-$SIZE_T*9`($sp)
  805. $POP r24,`$FRAME-$SIZE_T*8`($sp)
  806. $POP r25,`$FRAME-$SIZE_T*7`($sp)
  807. $POP r26,`$FRAME-$SIZE_T*6`($sp)
  808. $POP r27,`$FRAME-$SIZE_T*5`($sp)
  809. $POP r28,`$FRAME-$SIZE_T*4`($sp)
  810. $POP r29,`$FRAME-$SIZE_T*3`($sp)
  811. $POP r30,`$FRAME-$SIZE_T*2`($sp)
  812. $POP r31,`$FRAME-$SIZE_T*1`($sp)
  813. mtlr r0
  814. addi $sp,$sp,$FRAME
  815. blr
  816. .long 0
  817. .byte 0,12,0x04,1,0x80,18,5,0
  818. .long 0
  819. .size .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
  820. ___
  821. }}}
  822. {{{
  823. my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
  824. $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = map("v$_",(0..15));
  825. my @K = map("v$_",(16..19));
  826. my $CTR = "v26";
  827. my ($xt0,$xt1,$xt2,$xt3) = map("v$_",(27..30));
  828. my ($sixteen,$twelve,$eight,$seven) = ($xt0,$xt1,$xt2,$xt3);
  829. my $beperm = "v31";
  830. my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
  831. my $FRAME=$LOCALS+64+7*16; # 7*16 is for v26-v31 offload
  832. sub VSX_lane_ROUND {
  833. my ($a0,$b0,$c0,$d0)=@_;
  834. my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  835. my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  836. my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  837. my @x=map("\"v$_\"",(0..15));
  838. (
  839. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])", # Q1
  840. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])", # Q2
  841. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])", # Q3
  842. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])", # Q4
  843. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  844. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  845. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  846. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  847. "&vrlw (@x[$d0],@x[$d0],'$sixteen')",
  848. "&vrlw (@x[$d1],@x[$d1],'$sixteen')",
  849. "&vrlw (@x[$d2],@x[$d2],'$sixteen')",
  850. "&vrlw (@x[$d3],@x[$d3],'$sixteen')",
  851. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  852. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  853. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  854. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  855. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  856. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  857. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  858. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  859. "&vrlw (@x[$b0],@x[$b0],'$twelve')",
  860. "&vrlw (@x[$b1],@x[$b1],'$twelve')",
  861. "&vrlw (@x[$b2],@x[$b2],'$twelve')",
  862. "&vrlw (@x[$b3],@x[$b3],'$twelve')",
  863. "&vadduwm (@x[$a0],@x[$a0],@x[$b0])",
  864. "&vadduwm (@x[$a1],@x[$a1],@x[$b1])",
  865. "&vadduwm (@x[$a2],@x[$a2],@x[$b2])",
  866. "&vadduwm (@x[$a3],@x[$a3],@x[$b3])",
  867. "&vxor (@x[$d0],@x[$d0],@x[$a0])",
  868. "&vxor (@x[$d1],@x[$d1],@x[$a1])",
  869. "&vxor (@x[$d2],@x[$d2],@x[$a2])",
  870. "&vxor (@x[$d3],@x[$d3],@x[$a3])",
  871. "&vrlw (@x[$d0],@x[$d0],'$eight')",
  872. "&vrlw (@x[$d1],@x[$d1],'$eight')",
  873. "&vrlw (@x[$d2],@x[$d2],'$eight')",
  874. "&vrlw (@x[$d3],@x[$d3],'$eight')",
  875. "&vadduwm (@x[$c0],@x[$c0],@x[$d0])",
  876. "&vadduwm (@x[$c1],@x[$c1],@x[$d1])",
  877. "&vadduwm (@x[$c2],@x[$c2],@x[$d2])",
  878. "&vadduwm (@x[$c3],@x[$c3],@x[$d3])",
  879. "&vxor (@x[$b0],@x[$b0],@x[$c0])",
  880. "&vxor (@x[$b1],@x[$b1],@x[$c1])",
  881. "&vxor (@x[$b2],@x[$b2],@x[$c2])",
  882. "&vxor (@x[$b3],@x[$b3],@x[$c3])",
  883. "&vrlw (@x[$b0],@x[$b0],'$seven')",
  884. "&vrlw (@x[$b1],@x[$b1],'$seven')",
  885. "&vrlw (@x[$b2],@x[$b2],'$seven')",
  886. "&vrlw (@x[$b3],@x[$b3],'$seven')"
  887. );
  888. }
  889. $code.=<<___;
  890. .globl .ChaCha20_ctr32_vsx
  891. .align 5
  892. .ChaCha20_ctr32_vsx:
  893. $STU $sp,-$FRAME($sp)
  894. mflr r0
  895. li r10,`15+$LOCALS+64`
  896. li r11,`31+$LOCALS+64`
  897. mfspr r12,256
  898. stvx v26,r10,$sp
  899. addi r10,r10,32
  900. stvx v27,r11,$sp
  901. addi r11,r11,32
  902. stvx v28,r10,$sp
  903. addi r10,r10,32
  904. stvx v29,r11,$sp
  905. addi r11,r11,32
  906. stvx v30,r10,$sp
  907. stvx v31,r11,$sp
  908. stw r12,`$FRAME-4`($sp) # save vrsave
  909. li r12,-4096+63
  910. $PUSH r0, `$FRAME+$LRSAVE`($sp)
  911. mtspr 256,r12 # preserve 29 AltiVec registers
  912. bl Lconsts # returns pointer Lsigma in r12
  913. lvx_4w @K[0],0,r12 # load sigma
  914. addi r12,r12,0x50
  915. li $x10,16
  916. li $x20,32
  917. li $x30,48
  918. li r11,64
  919. lvx_4w @K[1],0,$key # load key
  920. lvx_4w @K[2],$x10,$key
  921. lvx_4w @K[3],0,$ctr # load counter
  922. vxor $xt0,$xt0,$xt0
  923. lvx_4w $xt1,r11,r12
  924. vspltw $CTR,@K[3],0
  925. vsldoi @K[3],@K[3],$xt0,4
  926. vsldoi @K[3],$xt0,@K[3],12 # clear @K[3].word[0]
  927. vadduwm $CTR,$CTR,$xt1
  928. be?lvsl $beperm,0,$x10 # 0x00..0f
  929. be?vspltisb $xt0,3 # 0x03..03
  930. be?vxor $beperm,$beperm,$xt0 # swap bytes within words
  931. li r0,10 # inner loop counter
  932. mtctr r0
  933. b Loop_outer_vsx
  934. .align 5
  935. Loop_outer_vsx:
  936. lvx $xa0,$x00,r12 # load [smashed] sigma
  937. lvx $xa1,$x10,r12
  938. lvx $xa2,$x20,r12
  939. lvx $xa3,$x30,r12
  940. vspltw $xb0,@K[1],0 # smash the key
  941. vspltw $xb1,@K[1],1
  942. vspltw $xb2,@K[1],2
  943. vspltw $xb3,@K[1],3
  944. vspltw $xc0,@K[2],0
  945. vspltw $xc1,@K[2],1
  946. vspltw $xc2,@K[2],2
  947. vspltw $xc3,@K[2],3
  948. vmr $xd0,$CTR # smash the counter
  949. vspltw $xd1,@K[3],1
  950. vspltw $xd2,@K[3],2
  951. vspltw $xd3,@K[3],3
  952. vspltisw $sixteen,-16 # synthesize constants
  953. vspltisw $twelve,12
  954. vspltisw $eight,8
  955. vspltisw $seven,7
  956. Loop_vsx:
  957. ___
  958. foreach (&VSX_lane_ROUND(0, 4, 8,12)) { eval; }
  959. foreach (&VSX_lane_ROUND(0, 5,10,15)) { eval; }
  960. $code.=<<___;
  961. bdnz Loop_vsx
  962. vadduwm $xd0,$xd0,$CTR
  963. vmrgew $xt0,$xa0,$xa1 # transpose data
  964. vmrgew $xt1,$xa2,$xa3
  965. vmrgow $xa0,$xa0,$xa1
  966. vmrgow $xa2,$xa2,$xa3
  967. vmrgew $xt2,$xb0,$xb1
  968. vmrgew $xt3,$xb2,$xb3
  969. vpermdi $xa1,$xa0,$xa2,0b00
  970. vpermdi $xa3,$xa0,$xa2,0b11
  971. vpermdi $xa0,$xt0,$xt1,0b00
  972. vpermdi $xa2,$xt0,$xt1,0b11
  973. vmrgow $xb0,$xb0,$xb1
  974. vmrgow $xb2,$xb2,$xb3
  975. vmrgew $xt0,$xc0,$xc1
  976. vmrgew $xt1,$xc2,$xc3
  977. vpermdi $xb1,$xb0,$xb2,0b00
  978. vpermdi $xb3,$xb0,$xb2,0b11
  979. vpermdi $xb0,$xt2,$xt3,0b00
  980. vpermdi $xb2,$xt2,$xt3,0b11
  981. vmrgow $xc0,$xc0,$xc1
  982. vmrgow $xc2,$xc2,$xc3
  983. vmrgew $xt2,$xd0,$xd1
  984. vmrgew $xt3,$xd2,$xd3
  985. vpermdi $xc1,$xc0,$xc2,0b00
  986. vpermdi $xc3,$xc0,$xc2,0b11
  987. vpermdi $xc0,$xt0,$xt1,0b00
  988. vpermdi $xc2,$xt0,$xt1,0b11
  989. vmrgow $xd0,$xd0,$xd1
  990. vmrgow $xd2,$xd2,$xd3
  991. vspltisw $xt0,4
  992. vadduwm $CTR,$CTR,$xt0 # next counter value
  993. vpermdi $xd1,$xd0,$xd2,0b00
  994. vpermdi $xd3,$xd0,$xd2,0b11
  995. vpermdi $xd0,$xt2,$xt3,0b00
  996. vpermdi $xd2,$xt2,$xt3,0b11
  997. vadduwm $xa0,$xa0,@K[0]
  998. vadduwm $xb0,$xb0,@K[1]
  999. vadduwm $xc0,$xc0,@K[2]
  1000. vadduwm $xd0,$xd0,@K[3]
  1001. be?vperm $xa0,$xa0,$xa0,$beperm
  1002. be?vperm $xb0,$xb0,$xb0,$beperm
  1003. be?vperm $xc0,$xc0,$xc0,$beperm
  1004. be?vperm $xd0,$xd0,$xd0,$beperm
  1005. ${UCMP}i $len,0x40
  1006. blt Ltail_vsx
  1007. lvx_4w $xt0,$x00,$inp
  1008. lvx_4w $xt1,$x10,$inp
  1009. lvx_4w $xt2,$x20,$inp
  1010. lvx_4w $xt3,$x30,$inp
  1011. vxor $xt0,$xt0,$xa0
  1012. vxor $xt1,$xt1,$xb0
  1013. vxor $xt2,$xt2,$xc0
  1014. vxor $xt3,$xt3,$xd0
  1015. stvx_4w $xt0,$x00,$out
  1016. stvx_4w $xt1,$x10,$out
  1017. addi $inp,$inp,0x40
  1018. stvx_4w $xt2,$x20,$out
  1019. subi $len,$len,0x40
  1020. stvx_4w $xt3,$x30,$out
  1021. addi $out,$out,0x40
  1022. beq Ldone_vsx
  1023. vadduwm $xa0,$xa1,@K[0]
  1024. vadduwm $xb0,$xb1,@K[1]
  1025. vadduwm $xc0,$xc1,@K[2]
  1026. vadduwm $xd0,$xd1,@K[3]
  1027. be?vperm $xa0,$xa0,$xa0,$beperm
  1028. be?vperm $xb0,$xb0,$xb0,$beperm
  1029. be?vperm $xc0,$xc0,$xc0,$beperm
  1030. be?vperm $xd0,$xd0,$xd0,$beperm
  1031. ${UCMP}i $len,0x40
  1032. blt Ltail_vsx
  1033. lvx_4w $xt0,$x00,$inp
  1034. lvx_4w $xt1,$x10,$inp
  1035. lvx_4w $xt2,$x20,$inp
  1036. lvx_4w $xt3,$x30,$inp
  1037. vxor $xt0,$xt0,$xa0
  1038. vxor $xt1,$xt1,$xb0
  1039. vxor $xt2,$xt2,$xc0
  1040. vxor $xt3,$xt3,$xd0
  1041. stvx_4w $xt0,$x00,$out
  1042. stvx_4w $xt1,$x10,$out
  1043. addi $inp,$inp,0x40
  1044. stvx_4w $xt2,$x20,$out
  1045. subi $len,$len,0x40
  1046. stvx_4w $xt3,$x30,$out
  1047. addi $out,$out,0x40
  1048. beq Ldone_vsx
  1049. vadduwm $xa0,$xa2,@K[0]
  1050. vadduwm $xb0,$xb2,@K[1]
  1051. vadduwm $xc0,$xc2,@K[2]
  1052. vadduwm $xd0,$xd2,@K[3]
  1053. be?vperm $xa0,$xa0,$xa0,$beperm
  1054. be?vperm $xb0,$xb0,$xb0,$beperm
  1055. be?vperm $xc0,$xc0,$xc0,$beperm
  1056. be?vperm $xd0,$xd0,$xd0,$beperm
  1057. ${UCMP}i $len,0x40
  1058. blt Ltail_vsx
  1059. lvx_4w $xt0,$x00,$inp
  1060. lvx_4w $xt1,$x10,$inp
  1061. lvx_4w $xt2,$x20,$inp
  1062. lvx_4w $xt3,$x30,$inp
  1063. vxor $xt0,$xt0,$xa0
  1064. vxor $xt1,$xt1,$xb0
  1065. vxor $xt2,$xt2,$xc0
  1066. vxor $xt3,$xt3,$xd0
  1067. stvx_4w $xt0,$x00,$out
  1068. stvx_4w $xt1,$x10,$out
  1069. addi $inp,$inp,0x40
  1070. stvx_4w $xt2,$x20,$out
  1071. subi $len,$len,0x40
  1072. stvx_4w $xt3,$x30,$out
  1073. addi $out,$out,0x40
  1074. beq Ldone_vsx
  1075. vadduwm $xa0,$xa3,@K[0]
  1076. vadduwm $xb0,$xb3,@K[1]
  1077. vadduwm $xc0,$xc3,@K[2]
  1078. vadduwm $xd0,$xd3,@K[3]
  1079. be?vperm $xa0,$xa0,$xa0,$beperm
  1080. be?vperm $xb0,$xb0,$xb0,$beperm
  1081. be?vperm $xc0,$xc0,$xc0,$beperm
  1082. be?vperm $xd0,$xd0,$xd0,$beperm
  1083. ${UCMP}i $len,0x40
  1084. blt Ltail_vsx
  1085. lvx_4w $xt0,$x00,$inp
  1086. lvx_4w $xt1,$x10,$inp
  1087. lvx_4w $xt2,$x20,$inp
  1088. lvx_4w $xt3,$x30,$inp
  1089. vxor $xt0,$xt0,$xa0
  1090. vxor $xt1,$xt1,$xb0
  1091. vxor $xt2,$xt2,$xc0
  1092. vxor $xt3,$xt3,$xd0
  1093. stvx_4w $xt0,$x00,$out
  1094. stvx_4w $xt1,$x10,$out
  1095. addi $inp,$inp,0x40
  1096. stvx_4w $xt2,$x20,$out
  1097. subi $len,$len,0x40
  1098. stvx_4w $xt3,$x30,$out
  1099. addi $out,$out,0x40
  1100. mtctr r0
  1101. bne Loop_outer_vsx
  1102. Ldone_vsx:
  1103. lwz r12,`$FRAME-4`($sp) # pull vrsave
  1104. li r10,`15+$LOCALS+64`
  1105. li r11,`31+$LOCALS+64`
  1106. $POP r0, `$FRAME+$LRSAVE`($sp)
  1107. mtspr 256,r12 # restore vrsave
  1108. lvx v26,r10,$sp
  1109. addi r10,r10,32
  1110. lvx v27,r11,$sp
  1111. addi r11,r11,32
  1112. lvx v28,r10,$sp
  1113. addi r10,r10,32
  1114. lvx v29,r11,$sp
  1115. addi r11,r11,32
  1116. lvx v30,r10,$sp
  1117. lvx v31,r11,$sp
  1118. mtlr r0
  1119. addi $sp,$sp,$FRAME
  1120. blr
  1121. .align 4
  1122. Ltail_vsx:
  1123. addi r11,$sp,$LOCALS
  1124. mtctr $len
  1125. stvx_4w $xa0,$x00,r11 # offload block to stack
  1126. stvx_4w $xb0,$x10,r11
  1127. stvx_4w $xc0,$x20,r11
  1128. stvx_4w $xd0,$x30,r11
  1129. subi r12,r11,1 # prepare for *++ptr
  1130. subi $inp,$inp,1
  1131. subi $out,$out,1
  1132. Loop_tail_vsx:
  1133. lbzu r6,1(r12)
  1134. lbzu r7,1($inp)
  1135. xor r6,r6,r7
  1136. stbu r6,1($out)
  1137. bdnz Loop_tail_vsx
  1138. stvx_4w $K[0],$x00,r11 # wipe copy of the block
  1139. stvx_4w $K[0],$x10,r11
  1140. stvx_4w $K[0],$x20,r11
  1141. stvx_4w $K[0],$x30,r11
  1142. b Ldone_vsx
  1143. .long 0
  1144. .byte 0,12,0x04,1,0x80,0,5,0
  1145. .long 0
  1146. .size .ChaCha20_ctr32_vsx,.-.ChaCha20_ctr32_vsx
  1147. ___
  1148. }}}
  1149. $code.=<<___;
  1150. .align 5
  1151. Lconsts:
  1152. mflr r0
  1153. bcl 20,31,\$+4
  1154. mflr r12 #vvvvv "distance between . and Lsigma
  1155. addi r12,r12,`64-8`
  1156. mtlr r0
  1157. blr
  1158. .long 0
  1159. .byte 0,12,0x14,0,0,0,0,0
  1160. .space `64-9*4`
  1161. Lsigma:
  1162. .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
  1163. .long 1,0,0,0
  1164. .long 4,0,0,0
  1165. ___
  1166. $code.=<<___ if ($LITTLE_ENDIAN);
  1167. .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
  1168. .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
  1169. ___
  1170. $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
  1171. .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
  1172. .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
  1173. ___
  1174. $code.=<<___;
  1175. .long 0x61707865,0x61707865,0x61707865,0x61707865
  1176. .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e
  1177. .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32
  1178. .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574
  1179. .long 0,1,2,3
  1180. .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
  1181. .align 2
  1182. ___
  1183. foreach (split("\n",$code)) {
  1184. s/\`([^\`]*)\`/eval $1/ge;
  1185. # instructions prefixed with '?' are endian-specific and need
  1186. # to be adjusted accordingly...
  1187. if ($flavour !~ /le$/) { # big-endian
  1188. s/be\?// or
  1189. s/le\?/#le#/ or
  1190. s/\?lvsr/lvsl/ or
  1191. s/\?lvsl/lvsr/ or
  1192. s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
  1193. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 16-$3/;
  1194. } else { # little-endian
  1195. s/le\?// or
  1196. s/be\?/#be#/ or
  1197. s/\?([a-z]+)/$1/ or
  1198. s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 $3/;
  1199. }
  1200. print $_,"\n";
  1201. }
  1202. close STDOUT;