sha512-ppc.pl 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. #! /usr/bin/env perl
  2. # Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # I let hardware handle unaligned input, except on page boundaries
  15. # (see below for details). Otherwise straightforward implementation
  16. # with X vector in register bank.
  17. # sha256 | sha512
  18. # -m64 -m32 | -m64 -m32
  19. # --------------------------------------+-----------------------
  20. # PPC970,gcc-4.0.0 +50% +38% | +40% +410%(*)
  21. # Power6,xlc-7 +150% +90% | +100% +430%(*)
  22. #
  23. # (*) 64-bit code in 32-bit application context, which actually is
  24. # on TODO list. It should be noted that for safe deployment in
  25. # 32-bit *multi-threaded* context asynchronous signals should be
  26. # blocked upon entry to SHA512 block routine. This is because
  27. # 32-bit signaling procedure invalidates upper halves of GPRs.
  28. # Context switch procedure preserves them, but not signaling:-(
  29. # Second version is true multi-thread safe. Trouble with the original
  30. # version was that it was using thread local storage pointer register.
  31. # Well, it scrupulously preserved it, but the problem would arise the
  32. # moment asynchronous signal was delivered and signal handler would
  33. # dereference the TLS pointer. While it's never the case in openssl
  34. # application or test suite, we have to respect this scenario and not
  35. # use TLS pointer register. Alternative would be to require caller to
  36. # block signals prior calling this routine. For the record, in 32-bit
  37. # context R2 serves as TLS pointer, while in 64-bit context - R13.
  38. # $output is the last argument if it looks like a file (it has an extension)
  39. # $flavour is the first argument if it doesn't look like a file
  40. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  41. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  42. if ($flavour =~ /64/) {
  43. $SIZE_T=8;
  44. $LRSAVE=2*$SIZE_T;
  45. $STU="stdu";
  46. $UCMP="cmpld";
  47. $SHL="sldi";
  48. $POP="ld";
  49. $PUSH="std";
  50. } elsif ($flavour =~ /32/) {
  51. $SIZE_T=4;
  52. $LRSAVE=$SIZE_T;
  53. $STU="stwu";
  54. $UCMP="cmplw";
  55. $SHL="slwi";
  56. $POP="lwz";
  57. $PUSH="stw";
  58. } else { die "nonsense $flavour"; }
  59. $LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
  60. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  61. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  62. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  63. die "can't locate ppc-xlate.pl";
  64. open STDOUT,"| $^X $xlate $flavour \"$output\""
  65. or die "can't call $xlate: $!";
  66. if ($output =~ /512/) {
  67. $func="sha512_block_ppc";
  68. $SZ=8;
  69. @Sigma0=(28,34,39);
  70. @Sigma1=(14,18,41);
  71. @sigma0=(1, 8, 7);
  72. @sigma1=(19,61, 6);
  73. $rounds=80;
  74. $LD="ld";
  75. $ST="std";
  76. $ROR="rotrdi";
  77. $SHR="srdi";
  78. } else {
  79. $func="sha256_block_ppc";
  80. $SZ=4;
  81. @Sigma0=( 2,13,22);
  82. @Sigma1=( 6,11,25);
  83. @sigma0=( 7,18, 3);
  84. @sigma1=(17,19,10);
  85. $rounds=64;
  86. $LD="lwz";
  87. $ST="stw";
  88. $ROR="rotrwi";
  89. $SHR="srwi";
  90. }
  91. $FRAME=32*$SIZE_T+16*$SZ;
  92. $LOCALS=6*$SIZE_T;
  93. $sp ="r1";
  94. $toc="r2";
  95. $ctx="r3"; # zapped by $a0
  96. $inp="r4"; # zapped by $a1
  97. $num="r5"; # zapped by $t0
  98. $T ="r0";
  99. $a0 ="r3";
  100. $a1 ="r4";
  101. $t0 ="r5";
  102. $t1 ="r6";
  103. $Tbl="r7";
  104. $A ="r8";
  105. $B ="r9";
  106. $C ="r10";
  107. $D ="r11";
  108. $E ="r12";
  109. $F =$t1; $t1 = "r0"; # stay away from "r13";
  110. $G ="r14";
  111. $H ="r15";
  112. @V=($A,$B,$C,$D,$E,$F,$G,$H);
  113. @X=("r16","r17","r18","r19","r20","r21","r22","r23",
  114. "r24","r25","r26","r27","r28","r29","r30","r31");
  115. $inp="r31" if($SZ==4 || $SIZE_T==8); # reassigned $inp! aliases with @X[15]
  116. sub ROUND_00_15 {
  117. my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
  118. $code.=<<___;
  119. $ROR $a0,$e,$Sigma1[0]
  120. $ROR $a1,$e,$Sigma1[1]
  121. and $t0,$f,$e
  122. xor $a0,$a0,$a1
  123. add $h,$h,$t1
  124. andc $t1,$g,$e
  125. $ROR $a1,$a1,`$Sigma1[2]-$Sigma1[1]`
  126. or $t0,$t0,$t1 ; Ch(e,f,g)
  127. add $h,$h,@X[$i%16]
  128. xor $a0,$a0,$a1 ; Sigma1(e)
  129. add $h,$h,$t0
  130. add $h,$h,$a0
  131. $ROR $a0,$a,$Sigma0[0]
  132. $ROR $a1,$a,$Sigma0[1]
  133. and $t0,$a,$b
  134. and $t1,$a,$c
  135. xor $a0,$a0,$a1
  136. $ROR $a1,$a1,`$Sigma0[2]-$Sigma0[1]`
  137. xor $t0,$t0,$t1
  138. and $t1,$b,$c
  139. xor $a0,$a0,$a1 ; Sigma0(a)
  140. add $d,$d,$h
  141. xor $t0,$t0,$t1 ; Maj(a,b,c)
  142. ___
  143. $code.=<<___ if ($i<15);
  144. $LD $t1,`($i+1)*$SZ`($Tbl)
  145. ___
  146. $code.=<<___;
  147. add $h,$h,$a0
  148. add $h,$h,$t0
  149. ___
  150. }
  151. sub ROUND_16_xx {
  152. my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
  153. $i-=16;
  154. $code.=<<___;
  155. $ROR $a0,@X[($i+1)%16],$sigma0[0]
  156. $ROR $a1,@X[($i+1)%16],$sigma0[1]
  157. $ROR $t0,@X[($i+14)%16],$sigma1[0]
  158. $ROR $t1,@X[($i+14)%16],$sigma1[1]
  159. xor $a0,$a0,$a1
  160. $SHR $a1,@X[($i+1)%16],$sigma0[2]
  161. xor $t0,$t0,$t1
  162. $SHR $t1,@X[($i+14)%16],$sigma1[2]
  163. add @X[$i],@X[$i],@X[($i+9)%16]
  164. xor $a0,$a0,$a1 ; sigma0(X[(i+1)&0x0f])
  165. xor $t0,$t0,$t1 ; sigma1(X[(i+14)&0x0f])
  166. $LD $t1,`$i*$SZ`($Tbl)
  167. add @X[$i],@X[$i],$a0
  168. add @X[$i],@X[$i],$t0
  169. ___
  170. &ROUND_00_15($i+16,$a,$b,$c,$d,$e,$f,$g,$h);
  171. }
  172. $code=<<___;
  173. .machine "any"
  174. .text
  175. .globl $func
  176. .align 6
  177. $func:
  178. $STU $sp,-$FRAME($sp)
  179. mflr r0
  180. $SHL $num,$num,`log(16*$SZ)/log(2)`
  181. $PUSH $ctx,`$FRAME-$SIZE_T*22`($sp)
  182. $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
  183. $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
  184. $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
  185. $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
  186. $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
  187. $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
  188. $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
  189. $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
  190. $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
  191. $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
  192. $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
  193. $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
  194. $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
  195. $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
  196. $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
  197. $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
  198. $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
  199. $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
  200. $PUSH r0,`$FRAME+$LRSAVE`($sp)
  201. ___
  202. if ($SZ==4 || $SIZE_T==8) {
  203. $code.=<<___;
  204. $LD $A,`0*$SZ`($ctx)
  205. mr $inp,r4 ; incarnate $inp
  206. $LD $B,`1*$SZ`($ctx)
  207. $LD $C,`2*$SZ`($ctx)
  208. $LD $D,`3*$SZ`($ctx)
  209. $LD $E,`4*$SZ`($ctx)
  210. $LD $F,`5*$SZ`($ctx)
  211. $LD $G,`6*$SZ`($ctx)
  212. $LD $H,`7*$SZ`($ctx)
  213. ___
  214. } else {
  215. for ($i=16;$i<32;$i++) {
  216. $code.=<<___;
  217. lwz r$i,`$LITTLE_ENDIAN^(4*($i-16))`($ctx)
  218. ___
  219. }
  220. }
  221. $code.=<<___;
  222. bl LPICmeup
  223. LPICedup:
  224. andi. r0,$inp,3
  225. bne Lunaligned
  226. Laligned:
  227. add $num,$inp,$num
  228. $PUSH $num,`$FRAME-$SIZE_T*24`($sp) ; end pointer
  229. $PUSH $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
  230. bl Lsha2_block_private
  231. b Ldone
  232. ; PowerPC specification allows an implementation to be ill-behaved
  233. ; upon unaligned access which crosses page boundary. "Better safe
  234. ; than sorry" principle makes me treat it specially. But I don't
  235. ; look for particular offending word, but rather for the input
  236. ; block which crosses the boundary. Once found that block is aligned
  237. ; and hashed separately...
  238. .align 4
  239. Lunaligned:
  240. subfic $t1,$inp,4096
  241. andi. $t1,$t1,`4096-16*$SZ` ; distance to closest page boundary
  242. beq Lcross_page
  243. $UCMP $num,$t1
  244. ble Laligned ; didn't cross the page boundary
  245. subfc $num,$t1,$num
  246. add $t1,$inp,$t1
  247. $PUSH $num,`$FRAME-$SIZE_T*25`($sp) ; save real remaining num
  248. $PUSH $t1,`$FRAME-$SIZE_T*24`($sp) ; intermediate end pointer
  249. $PUSH $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
  250. bl Lsha2_block_private
  251. ; $inp equals to the intermediate end pointer here
  252. $POP $num,`$FRAME-$SIZE_T*25`($sp) ; restore real remaining num
  253. Lcross_page:
  254. li $t1,`16*$SZ/4`
  255. mtctr $t1
  256. ___
  257. if ($SZ==4 || $SIZE_T==8) {
  258. $code.=<<___;
  259. addi r20,$sp,$LOCALS ; aligned spot below the frame
  260. Lmemcpy:
  261. lbz r16,0($inp)
  262. lbz r17,1($inp)
  263. lbz r18,2($inp)
  264. lbz r19,3($inp)
  265. addi $inp,$inp,4
  266. stb r16,0(r20)
  267. stb r17,1(r20)
  268. stb r18,2(r20)
  269. stb r19,3(r20)
  270. addi r20,r20,4
  271. bdnz Lmemcpy
  272. ___
  273. } else {
  274. $code.=<<___;
  275. addi r12,$sp,$LOCALS ; aligned spot below the frame
  276. Lmemcpy:
  277. lbz r8,0($inp)
  278. lbz r9,1($inp)
  279. lbz r10,2($inp)
  280. lbz r11,3($inp)
  281. addi $inp,$inp,4
  282. stb r8,0(r12)
  283. stb r9,1(r12)
  284. stb r10,2(r12)
  285. stb r11,3(r12)
  286. addi r12,r12,4
  287. bdnz Lmemcpy
  288. ___
  289. }
  290. $code.=<<___;
  291. $PUSH $inp,`$FRAME-$SIZE_T*26`($sp) ; save real inp
  292. addi $t1,$sp,`$LOCALS+16*$SZ` ; fictitious end pointer
  293. addi $inp,$sp,$LOCALS ; fictitious inp pointer
  294. $PUSH $num,`$FRAME-$SIZE_T*25`($sp) ; save real num
  295. $PUSH $t1,`$FRAME-$SIZE_T*24`($sp) ; end pointer
  296. $PUSH $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
  297. bl Lsha2_block_private
  298. $POP $inp,`$FRAME-$SIZE_T*26`($sp) ; restore real inp
  299. $POP $num,`$FRAME-$SIZE_T*25`($sp) ; restore real num
  300. addic. $num,$num,`-16*$SZ` ; num--
  301. bne Lunaligned
  302. Ldone:
  303. $POP r0,`$FRAME+$LRSAVE`($sp)
  304. $POP r14,`$FRAME-$SIZE_T*18`($sp)
  305. $POP r15,`$FRAME-$SIZE_T*17`($sp)
  306. $POP r16,`$FRAME-$SIZE_T*16`($sp)
  307. $POP r17,`$FRAME-$SIZE_T*15`($sp)
  308. $POP r18,`$FRAME-$SIZE_T*14`($sp)
  309. $POP r19,`$FRAME-$SIZE_T*13`($sp)
  310. $POP r20,`$FRAME-$SIZE_T*12`($sp)
  311. $POP r21,`$FRAME-$SIZE_T*11`($sp)
  312. $POP r22,`$FRAME-$SIZE_T*10`($sp)
  313. $POP r23,`$FRAME-$SIZE_T*9`($sp)
  314. $POP r24,`$FRAME-$SIZE_T*8`($sp)
  315. $POP r25,`$FRAME-$SIZE_T*7`($sp)
  316. $POP r26,`$FRAME-$SIZE_T*6`($sp)
  317. $POP r27,`$FRAME-$SIZE_T*5`($sp)
  318. $POP r28,`$FRAME-$SIZE_T*4`($sp)
  319. $POP r29,`$FRAME-$SIZE_T*3`($sp)
  320. $POP r30,`$FRAME-$SIZE_T*2`($sp)
  321. $POP r31,`$FRAME-$SIZE_T*1`($sp)
  322. mtlr r0
  323. addi $sp,$sp,$FRAME
  324. blr
  325. .long 0
  326. .byte 0,12,4,1,0x80,18,3,0
  327. .long 0
  328. ___
  329. if ($SZ==4 || $SIZE_T==8) {
  330. $code.=<<___;
  331. .align 4
  332. Lsha2_block_private:
  333. $LD $t1,0($Tbl)
  334. ___
  335. for($i=0;$i<16;$i++) {
  336. $code.=<<___ if ($SZ==4 && !$LITTLE_ENDIAN);
  337. lwz @X[$i],`$i*$SZ`($inp)
  338. ___
  339. $code.=<<___ if ($SZ==4 && $LITTLE_ENDIAN);
  340. lwz $a0,`$i*$SZ`($inp)
  341. rotlwi @X[$i],$a0,8
  342. rlwimi @X[$i],$a0,24,0,7
  343. rlwimi @X[$i],$a0,24,16,23
  344. ___
  345. # 64-bit loads are split to 2x32-bit ones, as CPU can't handle
  346. # unaligned 64-bit loads, only 32-bit ones...
  347. $code.=<<___ if ($SZ==8 && !$LITTLE_ENDIAN);
  348. lwz $t0,`$i*$SZ`($inp)
  349. lwz @X[$i],`$i*$SZ+4`($inp)
  350. insrdi @X[$i],$t0,32,0
  351. ___
  352. $code.=<<___ if ($SZ==8 && $LITTLE_ENDIAN);
  353. lwz $a0,`$i*$SZ`($inp)
  354. lwz $a1,`$i*$SZ+4`($inp)
  355. rotlwi $t0,$a0,8
  356. rotlwi @X[$i],$a1,8
  357. rlwimi $t0,$a0,24,0,7
  358. rlwimi @X[$i],$a1,24,0,7
  359. rlwimi $t0,$a0,24,16,23
  360. rlwimi @X[$i],$a1,24,16,23
  361. insrdi @X[$i],$t0,32,0
  362. ___
  363. &ROUND_00_15($i,@V);
  364. unshift(@V,pop(@V));
  365. }
  366. $code.=<<___;
  367. li $t0,`$rounds/16-1`
  368. mtctr $t0
  369. .align 4
  370. Lrounds:
  371. addi $Tbl,$Tbl,`16*$SZ`
  372. ___
  373. for(;$i<32;$i++) {
  374. &ROUND_16_xx($i,@V);
  375. unshift(@V,pop(@V));
  376. }
  377. $code.=<<___;
  378. bdnz Lrounds
  379. $POP $ctx,`$FRAME-$SIZE_T*22`($sp)
  380. $POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
  381. $POP $num,`$FRAME-$SIZE_T*24`($sp) ; end pointer
  382. subi $Tbl,$Tbl,`($rounds-16)*$SZ` ; rewind Tbl
  383. $LD r16,`0*$SZ`($ctx)
  384. $LD r17,`1*$SZ`($ctx)
  385. $LD r18,`2*$SZ`($ctx)
  386. $LD r19,`3*$SZ`($ctx)
  387. $LD r20,`4*$SZ`($ctx)
  388. $LD r21,`5*$SZ`($ctx)
  389. $LD r22,`6*$SZ`($ctx)
  390. addi $inp,$inp,`16*$SZ` ; advance inp
  391. $LD r23,`7*$SZ`($ctx)
  392. add $A,$A,r16
  393. add $B,$B,r17
  394. $PUSH $inp,`$FRAME-$SIZE_T*23`($sp)
  395. add $C,$C,r18
  396. $ST $A,`0*$SZ`($ctx)
  397. add $D,$D,r19
  398. $ST $B,`1*$SZ`($ctx)
  399. add $E,$E,r20
  400. $ST $C,`2*$SZ`($ctx)
  401. add $F,$F,r21
  402. $ST $D,`3*$SZ`($ctx)
  403. add $G,$G,r22
  404. $ST $E,`4*$SZ`($ctx)
  405. add $H,$H,r23
  406. $ST $F,`5*$SZ`($ctx)
  407. $ST $G,`6*$SZ`($ctx)
  408. $UCMP $inp,$num
  409. $ST $H,`7*$SZ`($ctx)
  410. bne Lsha2_block_private
  411. blr
  412. .long 0
  413. .byte 0,12,0x14,0,0,0,0,0
  414. .size $func,.-$func
  415. ___
  416. } else {
  417. ########################################################################
  418. # SHA512 for PPC32, X vector is off-loaded to stack...
  419. #
  420. # | sha512
  421. # | -m32
  422. # ----------------------+-----------------------
  423. # PPC74x0,gcc-4.0.1 | +48%
  424. # POWER6,gcc-4.4.6 | +124%(*)
  425. # POWER7,gcc-4.4.6 | +79%(*)
  426. # e300,gcc-4.1.0 | +167%
  427. #
  428. # (*) ~1/3 of -m64 result [and ~20% better than -m32 code generated
  429. # by xlc-12.1]
  430. my $XOFF=$LOCALS;
  431. my @V=map("r$_",(16..31)); # A..H
  432. my ($s0,$s1,$t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("r$_",(0,5,6,8..12,14,15));
  433. my ($x0,$x1)=("r3","r4"); # zaps $ctx and $inp
  434. sub ROUND_00_15_ppc32 {
  435. my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
  436. $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
  437. $code.=<<___;
  438. lwz $t2,`$SZ*($i%16)+($LITTLE_ENDIAN^4)`($Tbl)
  439. xor $a0,$flo,$glo
  440. lwz $t3,`$SZ*($i%16)+($LITTLE_ENDIAN^0)`($Tbl)
  441. xor $a1,$fhi,$ghi
  442. addc $hlo,$hlo,$t0 ; h+=x[i]
  443. stw $t0,`$XOFF+0+$SZ*($i%16)`($sp) ; save x[i]
  444. srwi $s0,$elo,$Sigma1[0]
  445. srwi $s1,$ehi,$Sigma1[0]
  446. and $a0,$a0,$elo
  447. adde $hhi,$hhi,$t1
  448. and $a1,$a1,$ehi
  449. stw $t1,`$XOFF+4+$SZ*($i%16)`($sp)
  450. srwi $t0,$elo,$Sigma1[1]
  451. srwi $t1,$ehi,$Sigma1[1]
  452. addc $hlo,$hlo,$t2 ; h+=K512[i]
  453. insrwi $s0,$ehi,$Sigma1[0],0
  454. insrwi $s1,$elo,$Sigma1[0],0
  455. xor $a0,$a0,$glo ; Ch(e,f,g)
  456. adde $hhi,$hhi,$t3
  457. xor $a1,$a1,$ghi
  458. insrwi $t0,$ehi,$Sigma1[1],0
  459. insrwi $t1,$elo,$Sigma1[1],0
  460. addc $hlo,$hlo,$a0 ; h+=Ch(e,f,g)
  461. srwi $t2,$ehi,$Sigma1[2]-32
  462. srwi $t3,$elo,$Sigma1[2]-32
  463. xor $s0,$s0,$t0
  464. xor $s1,$s1,$t1
  465. insrwi $t2,$elo,$Sigma1[2]-32,0
  466. insrwi $t3,$ehi,$Sigma1[2]-32,0
  467. xor $a0,$alo,$blo ; a^b, b^c in next round
  468. adde $hhi,$hhi,$a1
  469. xor $a1,$ahi,$bhi
  470. xor $s0,$s0,$t2 ; Sigma1(e)
  471. xor $s1,$s1,$t3
  472. srwi $t0,$alo,$Sigma0[0]
  473. and $a2,$a2,$a0
  474. addc $hlo,$hlo,$s0 ; h+=Sigma1(e)
  475. and $a3,$a3,$a1
  476. srwi $t1,$ahi,$Sigma0[0]
  477. srwi $s0,$ahi,$Sigma0[1]-32
  478. adde $hhi,$hhi,$s1
  479. srwi $s1,$alo,$Sigma0[1]-32
  480. insrwi $t0,$ahi,$Sigma0[0],0
  481. insrwi $t1,$alo,$Sigma0[0],0
  482. xor $a2,$a2,$blo ; Maj(a,b,c)
  483. addc $dlo,$dlo,$hlo ; d+=h
  484. xor $a3,$a3,$bhi
  485. insrwi $s0,$alo,$Sigma0[1]-32,0
  486. insrwi $s1,$ahi,$Sigma0[1]-32,0
  487. adde $dhi,$dhi,$hhi
  488. srwi $t2,$ahi,$Sigma0[2]-32
  489. srwi $t3,$alo,$Sigma0[2]-32
  490. xor $s0,$s0,$t0
  491. addc $hlo,$hlo,$a2 ; h+=Maj(a,b,c)
  492. xor $s1,$s1,$t1
  493. insrwi $t2,$alo,$Sigma0[2]-32,0
  494. insrwi $t3,$ahi,$Sigma0[2]-32,0
  495. adde $hhi,$hhi,$a3
  496. ___
  497. $code.=<<___ if ($i>=15);
  498. lwz $t0,`$XOFF+0+$SZ*(($i+2)%16)`($sp)
  499. lwz $t1,`$XOFF+4+$SZ*(($i+2)%16)`($sp)
  500. ___
  501. $code.=<<___ if ($i<15 && !$LITTLE_ENDIAN);
  502. lwz $t1,`$SZ*($i+1)+0`($inp)
  503. lwz $t0,`$SZ*($i+1)+4`($inp)
  504. ___
  505. $code.=<<___ if ($i<15 && $LITTLE_ENDIAN);
  506. lwz $a2,`$SZ*($i+1)+0`($inp)
  507. lwz $a3,`$SZ*($i+1)+4`($inp)
  508. rotlwi $t1,$a2,8
  509. rotlwi $t0,$a3,8
  510. rlwimi $t1,$a2,24,0,7
  511. rlwimi $t0,$a3,24,0,7
  512. rlwimi $t1,$a2,24,16,23
  513. rlwimi $t0,$a3,24,16,23
  514. ___
  515. $code.=<<___;
  516. xor $s0,$s0,$t2 ; Sigma0(a)
  517. xor $s1,$s1,$t3
  518. addc $hlo,$hlo,$s0 ; h+=Sigma0(a)
  519. adde $hhi,$hhi,$s1
  520. ___
  521. $code.=<<___ if ($i==15);
  522. lwz $x0,`$XOFF+0+$SZ*(($i+1)%16)`($sp)
  523. lwz $x1,`$XOFF+4+$SZ*(($i+1)%16)`($sp)
  524. ___
  525. }
  526. sub ROUND_16_xx_ppc32 {
  527. my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo,
  528. $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_;
  529. $code.=<<___;
  530. srwi $s0,$t0,$sigma0[0]
  531. srwi $s1,$t1,$sigma0[0]
  532. srwi $t2,$t0,$sigma0[1]
  533. srwi $t3,$t1,$sigma0[1]
  534. insrwi $s0,$t1,$sigma0[0],0
  535. insrwi $s1,$t0,$sigma0[0],0
  536. srwi $a0,$t0,$sigma0[2]
  537. insrwi $t2,$t1,$sigma0[1],0
  538. insrwi $t3,$t0,$sigma0[1],0
  539. insrwi $a0,$t1,$sigma0[2],0
  540. xor $s0,$s0,$t2
  541. lwz $t2,`$XOFF+0+$SZ*(($i+14)%16)`($sp)
  542. srwi $a1,$t1,$sigma0[2]
  543. xor $s1,$s1,$t3
  544. lwz $t3,`$XOFF+4+$SZ*(($i+14)%16)`($sp)
  545. xor $a0,$a0,$s0
  546. srwi $s0,$t2,$sigma1[0]
  547. xor $a1,$a1,$s1
  548. srwi $s1,$t3,$sigma1[0]
  549. addc $x0,$x0,$a0 ; x[i]+=sigma0(x[i+1])
  550. srwi $a0,$t3,$sigma1[1]-32
  551. insrwi $s0,$t3,$sigma1[0],0
  552. insrwi $s1,$t2,$sigma1[0],0
  553. adde $x1,$x1,$a1
  554. srwi $a1,$t2,$sigma1[1]-32
  555. insrwi $a0,$t2,$sigma1[1]-32,0
  556. srwi $t2,$t2,$sigma1[2]
  557. insrwi $a1,$t3,$sigma1[1]-32,0
  558. insrwi $t2,$t3,$sigma1[2],0
  559. xor $s0,$s0,$a0
  560. lwz $a0,`$XOFF+0+$SZ*(($i+9)%16)`($sp)
  561. srwi $t3,$t3,$sigma1[2]
  562. xor $s1,$s1,$a1
  563. lwz $a1,`$XOFF+4+$SZ*(($i+9)%16)`($sp)
  564. xor $s0,$s0,$t2
  565. addc $x0,$x0,$a0 ; x[i]+=x[i+9]
  566. xor $s1,$s1,$t3
  567. adde $x1,$x1,$a1
  568. addc $x0,$x0,$s0 ; x[i]+=sigma1(x[i+14])
  569. adde $x1,$x1,$s1
  570. ___
  571. ($t0,$t1,$x0,$x1) = ($x0,$x1,$t0,$t1);
  572. &ROUND_00_15_ppc32(@_);
  573. }
  574. $code.=<<___;
  575. .align 4
  576. Lsha2_block_private:
  577. ___
  578. $code.=<<___ if (!$LITTLE_ENDIAN);
  579. lwz $t1,0($inp)
  580. xor $a2,@V[3],@V[5] ; B^C, magic seed
  581. lwz $t0,4($inp)
  582. xor $a3,@V[2],@V[4]
  583. ___
  584. $code.=<<___ if ($LITTLE_ENDIAN);
  585. lwz $a1,0($inp)
  586. xor $a2,@V[3],@V[5] ; B^C, magic seed
  587. lwz $a0,4($inp)
  588. xor $a3,@V[2],@V[4]
  589. rotlwi $t1,$a1,8
  590. rotlwi $t0,$a0,8
  591. rlwimi $t1,$a1,24,0,7
  592. rlwimi $t0,$a0,24,0,7
  593. rlwimi $t1,$a1,24,16,23
  594. rlwimi $t0,$a0,24,16,23
  595. ___
  596. for($i=0;$i<16;$i++) {
  597. &ROUND_00_15_ppc32($i,@V);
  598. unshift(@V,pop(@V)); unshift(@V,pop(@V));
  599. ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
  600. }
  601. $code.=<<___;
  602. li $a0,`$rounds/16-1`
  603. mtctr $a0
  604. .align 4
  605. Lrounds:
  606. addi $Tbl,$Tbl,`16*$SZ`
  607. ___
  608. for(;$i<32;$i++) {
  609. &ROUND_16_xx_ppc32($i,@V);
  610. unshift(@V,pop(@V)); unshift(@V,pop(@V));
  611. ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1);
  612. }
  613. $code.=<<___;
  614. bdnz Lrounds
  615. $POP $ctx,`$FRAME-$SIZE_T*22`($sp)
  616. $POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
  617. $POP $num,`$FRAME-$SIZE_T*24`($sp) ; end pointer
  618. subi $Tbl,$Tbl,`($rounds-16)*$SZ` ; rewind Tbl
  619. lwz $t0,`$LITTLE_ENDIAN^0`($ctx)
  620. lwz $t1,`$LITTLE_ENDIAN^4`($ctx)
  621. lwz $t2,`$LITTLE_ENDIAN^8`($ctx)
  622. lwz $t3,`$LITTLE_ENDIAN^12`($ctx)
  623. lwz $a0,`$LITTLE_ENDIAN^16`($ctx)
  624. lwz $a1,`$LITTLE_ENDIAN^20`($ctx)
  625. lwz $a2,`$LITTLE_ENDIAN^24`($ctx)
  626. addc @V[1],@V[1],$t1
  627. lwz $a3,`$LITTLE_ENDIAN^28`($ctx)
  628. adde @V[0],@V[0],$t0
  629. lwz $t0,`$LITTLE_ENDIAN^32`($ctx)
  630. addc @V[3],@V[3],$t3
  631. lwz $t1,`$LITTLE_ENDIAN^36`($ctx)
  632. adde @V[2],@V[2],$t2
  633. lwz $t2,`$LITTLE_ENDIAN^40`($ctx)
  634. addc @V[5],@V[5],$a1
  635. lwz $t3,`$LITTLE_ENDIAN^44`($ctx)
  636. adde @V[4],@V[4],$a0
  637. lwz $a0,`$LITTLE_ENDIAN^48`($ctx)
  638. addc @V[7],@V[7],$a3
  639. lwz $a1,`$LITTLE_ENDIAN^52`($ctx)
  640. adde @V[6],@V[6],$a2
  641. lwz $a2,`$LITTLE_ENDIAN^56`($ctx)
  642. addc @V[9],@V[9],$t1
  643. lwz $a3,`$LITTLE_ENDIAN^60`($ctx)
  644. adde @V[8],@V[8],$t0
  645. stw @V[0],`$LITTLE_ENDIAN^0`($ctx)
  646. stw @V[1],`$LITTLE_ENDIAN^4`($ctx)
  647. addc @V[11],@V[11],$t3
  648. stw @V[2],`$LITTLE_ENDIAN^8`($ctx)
  649. stw @V[3],`$LITTLE_ENDIAN^12`($ctx)
  650. adde @V[10],@V[10],$t2
  651. stw @V[4],`$LITTLE_ENDIAN^16`($ctx)
  652. stw @V[5],`$LITTLE_ENDIAN^20`($ctx)
  653. addc @V[13],@V[13],$a1
  654. stw @V[6],`$LITTLE_ENDIAN^24`($ctx)
  655. stw @V[7],`$LITTLE_ENDIAN^28`($ctx)
  656. adde @V[12],@V[12],$a0
  657. stw @V[8],`$LITTLE_ENDIAN^32`($ctx)
  658. stw @V[9],`$LITTLE_ENDIAN^36`($ctx)
  659. addc @V[15],@V[15],$a3
  660. stw @V[10],`$LITTLE_ENDIAN^40`($ctx)
  661. stw @V[11],`$LITTLE_ENDIAN^44`($ctx)
  662. adde @V[14],@V[14],$a2
  663. stw @V[12],`$LITTLE_ENDIAN^48`($ctx)
  664. stw @V[13],`$LITTLE_ENDIAN^52`($ctx)
  665. stw @V[14],`$LITTLE_ENDIAN^56`($ctx)
  666. stw @V[15],`$LITTLE_ENDIAN^60`($ctx)
  667. addi $inp,$inp,`16*$SZ` ; advance inp
  668. $PUSH $inp,`$FRAME-$SIZE_T*23`($sp)
  669. $UCMP $inp,$num
  670. bne Lsha2_block_private
  671. blr
  672. .long 0
  673. .byte 0,12,0x14,0,0,0,0,0
  674. .size $func,.-$func
  675. ___
  676. }
  677. # Ugly hack here, because PPC assembler syntax seem to vary too
  678. # much from platforms to platform...
  679. $code.=<<___;
  680. .align 6
  681. LPICmeup:
  682. mflr r0
  683. bcl 20,31,\$+4
  684. mflr $Tbl ; vvvvvv "distance" between . and 1st data entry
  685. addi $Tbl,$Tbl,`64-8`
  686. mtlr r0
  687. blr
  688. .long 0
  689. .byte 0,12,0x14,0,0,0,0,0
  690. .space `64-9*4`
  691. ___
  692. $code.=<<___ if ($SZ==8);
  693. .quad 0x428a2f98d728ae22,0x7137449123ef65cd
  694. .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
  695. .quad 0x3956c25bf348b538,0x59f111f1b605d019
  696. .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
  697. .quad 0xd807aa98a3030242,0x12835b0145706fbe
  698. .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
  699. .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
  700. .quad 0x9bdc06a725c71235,0xc19bf174cf692694
  701. .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
  702. .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
  703. .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
  704. .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
  705. .quad 0x983e5152ee66dfab,0xa831c66d2db43210
  706. .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
  707. .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
  708. .quad 0x06ca6351e003826f,0x142929670a0e6e70
  709. .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
  710. .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
  711. .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
  712. .quad 0x81c2c92e47edaee6,0x92722c851482353b
  713. .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
  714. .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
  715. .quad 0xd192e819d6ef5218,0xd69906245565a910
  716. .quad 0xf40e35855771202a,0x106aa07032bbd1b8
  717. .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
  718. .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
  719. .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
  720. .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
  721. .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
  722. .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
  723. .quad 0x90befffa23631e28,0xa4506cebde82bde9
  724. .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
  725. .quad 0xca273eceea26619c,0xd186b8c721c0c207
  726. .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
  727. .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
  728. .quad 0x113f9804bef90dae,0x1b710b35131c471b
  729. .quad 0x28db77f523047d84,0x32caab7b40c72493
  730. .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
  731. .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
  732. .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
  733. ___
  734. $code.=<<___ if ($SZ==4);
  735. .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
  736. .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
  737. .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
  738. .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
  739. .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
  740. .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
  741. .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
  742. .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
  743. .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
  744. .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
  745. .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
  746. .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
  747. .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
  748. .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
  749. .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
  750. .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
  751. ___
  752. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  753. print $code;
  754. close STDOUT;