keccak1600p8-ppc.pl 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. #!/usr/bin/env perl
  2. # Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the OpenSSL license (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # Keccak-1600 for PowerISA 2.07.
  17. #
  18. # June 2017.
  19. #
  20. # This is straightforward KECCAK_1X_ALT SIMD implementation, but with
  21. # disjoint Rho and Pi. The module is ABI-bitness- and endian-neutral.
  22. # POWER8 processor spends 9.8 cycles to process byte out of large
  23. # buffer for r=1088, which matches SHA3-256. This is 17% better than
  24. # scalar PPC64 code. It probably should be noted that if POWER8's
  25. # successor can achieve higher scalar instruction issue rate, then
  26. # this module will loose... And it does on POWER9 with 12.0 vs. 9.4.
  27. $flavour = shift;
  28. if ($flavour =~ /64/) {
  29. $SIZE_T =8;
  30. $LRSAVE =2*$SIZE_T;
  31. $UCMP ="cmpld";
  32. $STU ="stdu";
  33. $POP ="ld";
  34. $PUSH ="std";
  35. } elsif ($flavour =~ /32/) {
  36. $SIZE_T =4;
  37. $LRSAVE =$SIZE_T;
  38. $STU ="stwu";
  39. $POP ="lwz";
  40. $PUSH ="stw";
  41. $UCMP ="cmplw";
  42. } else { die "nonsense $flavour"; }
  43. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  44. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  45. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  46. die "can't locate ppc-xlate.pl";
  47. open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
  48. $FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload
  49. my $sp ="r1";
  50. my $iotas = "r12";
  51. ########################################################################
  52. # Register layout:
  53. #
  54. # v0 A[0][0] A[1][0]
  55. # v1 A[0][1] A[1][1]
  56. # v2 A[0][2] A[1][2]
  57. # v3 A[0][3] A[1][3]
  58. # v4 A[0][4] A[1][4]
  59. #
  60. # v5 A[2][0] A[3][0]
  61. # v6 A[2][1] A[3][1]
  62. # v7 A[2][2] A[3][2]
  63. # v8 A[2][3] A[3][3]
  64. # v9 A[2][4] A[3][4]
  65. #
  66. # v10 A[4][0] A[4][1]
  67. # v11 A[4][2] A[4][3]
  68. # v12 A[4][4] A[4][4]
  69. #
  70. # v13..25 rhotates[][]
  71. # v26..31 volatile
  72. #
  73. $code.=<<___;
  74. .machine "any"
  75. .text
  76. .type KeccakF1600_int,\@function
  77. .align 5
  78. KeccakF1600_int:
  79. li r0,24
  80. mtctr r0
  81. li r0,0
  82. b .Loop
  83. .align 4
  84. .Loop:
  85. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Theta
  86. vxor v26,v0, v5 ; A[0..1][0]^A[2..3][0]
  87. vxor v27,v1, v6 ; A[0..1][1]^A[2..3][1]
  88. vxor v28,v2, v7 ; A[0..1][2]^A[2..3][2]
  89. vxor v29,v3, v8 ; A[0..1][3]^A[2..3][3]
  90. vxor v30,v4, v9 ; A[0..1][4]^A[2..3][4]
  91. vpermdi v31,v26,v27,0b00 ; A[0][0..1]^A[2][0..1]
  92. vpermdi v26,v26,v27,0b11 ; A[1][0..1]^A[3][0..1]
  93. vpermdi v27,v28,v29,0b00 ; A[0][2..3]^A[2][2..3]
  94. vpermdi v28,v28,v29,0b11 ; A[1][2..3]^A[3][2..3]
  95. vpermdi v29,v30,v30,0b10 ; A[1..0][4]^A[3..2][4]
  96. vxor v26,v26,v31 ; C[0..1]
  97. vxor v27,v27,v28 ; C[2..3]
  98. vxor v28,v29,v30 ; C[4..4]
  99. vspltisb v31,1
  100. vxor v26,v26,v10 ; C[0..1] ^= A[4][0..1]
  101. vxor v27,v27,v11 ; C[2..3] ^= A[4][2..3]
  102. vxor v28,v28,v12 ; C[4..4] ^= A[4][4..4], low!
  103. vrld v29,v26,v31 ; ROL64(C[0..1],1)
  104. vrld v30,v27,v31 ; ROL64(C[2..3],1)
  105. vrld v31,v28,v31 ; ROL64(C[4..4],1)
  106. vpermdi v31,v31,v29,0b10
  107. vxor v26,v26,v30 ; C[0..1] ^= ROL64(C[2..3],1)
  108. vxor v27,v27,v31 ; C[2..3] ^= ROL64(C[4..0],1)
  109. vxor v28,v28,v29 ; C[4..4] ^= ROL64(C[0..1],1), low!
  110. vpermdi v29,v26,v26,0b00 ; C[0..0]
  111. vpermdi v30,v28,v26,0b10 ; C[4..0]
  112. vpermdi v31,v28,v28,0b11 ; C[4..4]
  113. vxor v1, v1, v29 ; A[0..1][1] ^= C[0..0]
  114. vxor v6, v6, v29 ; A[2..3][1] ^= C[0..0]
  115. vxor v10,v10,v30 ; A[4][0..1] ^= C[4..0]
  116. vxor v0, v0, v31 ; A[0..1][0] ^= C[4..4]
  117. vxor v5, v5, v31 ; A[2..3][0] ^= C[4..4]
  118. vpermdi v29,v27,v27,0b00 ; C[2..2]
  119. vpermdi v30,v26,v26,0b11 ; C[1..1]
  120. vpermdi v31,v26,v27,0b10 ; C[1..2]
  121. vxor v3, v3, v29 ; A[0..1][3] ^= C[2..2]
  122. vxor v8, v8, v29 ; A[2..3][3] ^= C[2..2]
  123. vxor v2, v2, v30 ; A[0..1][2] ^= C[1..1]
  124. vxor v7, v7, v30 ; A[2..3][2] ^= C[1..1]
  125. vxor v11,v11,v31 ; A[4][2..3] ^= C[1..2]
  126. vpermdi v29,v27,v27,0b11 ; C[3..3]
  127. vxor v4, v4, v29 ; A[0..1][4] ^= C[3..3]
  128. vxor v9, v9, v29 ; A[2..3][4] ^= C[3..3]
  129. vxor v12,v12,v29 ; A[4..4][4] ^= C[3..3]
  130. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Rho
  131. vrld v26,v0, v13 ; v0
  132. vrld v1, v1, v14
  133. vrld v27,v2, v15 ; v2
  134. vrld v28,v3, v16 ; v3
  135. vrld v4, v4, v17
  136. vrld v5, v5, v18
  137. vrld v6, v6, v19
  138. vrld v29,v7, v20 ; v7
  139. vrld v8, v8, v21
  140. vrld v9, v9, v22
  141. vrld v10,v10,v23
  142. vrld v30,v11,v24 ; v11
  143. vrld v12,v12,v25
  144. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Pi
  145. vpermdi v0, v26,v28,0b00 ; [0][0] [1][0] < [0][0] [0][3]
  146. vpermdi v2, v29,v5, 0b00 ; [0][2] [1][2] < [2][2] [2][0]
  147. vpermdi v11,v9, v5, 0b01 ; [4][2] [4][3] < [2][4] [3][0]
  148. vpermdi v5, v1, v4, 0b00 ; [2][0] [3][0] < [0][1] [0][4]
  149. vpermdi v1, v1, v4, 0b11 ; [0][1] [1][1] < [1][1] [1][4]
  150. vpermdi v3, v8, v6, 0b11 ; [0][3] [1][3] < [3][3] [3][1]
  151. vpermdi v4, v12,v30,0b10 ; [0][4] [1][4] < [4][4] [4][2]
  152. vpermdi v7, v8, v6, 0b00 ; [2][2] [3][2] < [2][3] [2][1]
  153. vpermdi v6, v27,v26,0b11 ; [2][1] [3][1] < [1][2] [1][0]
  154. vpermdi v8, v9, v29,0b11 ; [2][3] [3][3] < [3][4] [3][2]
  155. vpermdi v12,v10,v10,0b11 ; [4][4] [4][4] < [4][1] [4][1]
  156. vpermdi v9, v10,v30,0b01 ; [2][4] [3][4] < [4][0] [4][3]
  157. vpermdi v10,v27,v28,0b01 ; [4][0] [4][1] < [0][2] [1][3]
  158. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Chi + Iota
  159. lvx_u v31,$iotas,r0 ; iotas[index]
  160. addic r0,r0,16 ; index++
  161. vandc v26,v2, v1 ; (~A[0..1][1] & A[0..1][2])
  162. vandc v27,v3, v2 ; (~A[0..1][2] & A[0..1][3])
  163. vandc v28,v4, v3 ; (~A[0..1][3] & A[0..1][4])
  164. vandc v29,v0, v4 ; (~A[0..1][4] & A[0..1][0])
  165. vandc v30,v1, v0 ; (~A[0..1][0] & A[0..1][1])
  166. vxor v0, v0, v26 ; A[0..1][0] ^= (~A[0..1][1] & A[0..1][2])
  167. vxor v1, v1, v27 ; A[0..1][1] ^= (~A[0..1][2] & A[0..1][3])
  168. vxor v2, v2, v28 ; A[0..1][2] ^= (~A[0..1][3] & A[0..1][4])
  169. vxor v3, v3, v29 ; A[0..1][3] ^= (~A[0..1][4] & A[0..1][0])
  170. vxor v4, v4, v30 ; A[0..1][4] ^= (~A[0..1][0] & A[0..1][1])
  171. vandc v26,v7, v6 ; (~A[2..3][1] & A[2..3][2])
  172. vandc v27,v8, v7 ; (~A[2..3][2] & A[2..3][3])
  173. vandc v28,v9, v8 ; (~A[2..3][3] & A[2..3][4])
  174. vandc v29,v5, v9 ; (~A[2..3][4] & A[2..3][0])
  175. vandc v30,v6, v5 ; (~A[2..3][0] & A[2..3][1])
  176. vxor v5, v5, v26 ; A[2..3][0] ^= (~A[2..3][1] & A[2..3][2])
  177. vxor v6, v6, v27 ; A[2..3][1] ^= (~A[2..3][2] & A[2..3][3])
  178. vxor v7, v7, v28 ; A[2..3][2] ^= (~A[2..3][3] & A[2..3][4])
  179. vxor v8, v8, v29 ; A[2..3][3] ^= (~A[2..3][4] & A[2..3][0])
  180. vxor v9, v9, v30 ; A[2..3][4] ^= (~A[2..3][0] & A[2..3][1])
  181. vxor v0, v0, v31 ; A[0][0] ^= iotas[index++]
  182. vpermdi v26,v10,v11,0b10 ; A[4][1..2]
  183. vpermdi v27,v12,v10,0b00 ; A[4][4..0]
  184. vpermdi v28,v11,v12,0b10 ; A[4][3..4]
  185. vpermdi v29,v10,v10,0b10 ; A[4][1..0]
  186. vandc v26,v11,v26 ; (~A[4][1..2] & A[4][2..3])
  187. vandc v27,v27,v28 ; (~A[4][3..4] & A[4][4..0])
  188. vandc v28,v10,v29 ; (~A[4][1..0] & A[4][0..1])
  189. vxor v10,v10,v26 ; A[4][0..1] ^= (~A[4][1..2] & A[4][2..3])
  190. vxor v11,v11,v27 ; A[4][2..3] ^= (~A[4][3..4] & A[4][4..0])
  191. vxor v12,v12,v28 ; A[4][4..4] ^= (~A[4][0..1] & A[4][1..0])
  192. bdnz .Loop
  193. vpermdi v12,v12,v12,0b11 ; broadcast A[4][4]
  194. blr
  195. .long 0
  196. .byte 0,12,0x14,0,0,0,0,0
  197. .size KeccakF1600_int,.-KeccakF1600_int
  198. .type KeccakF1600,\@function
  199. .align 5
  200. KeccakF1600:
  201. $STU $sp,-$FRAME($sp)
  202. li r10,`15+6*$SIZE_T`
  203. li r11,`31+6*$SIZE_T`
  204. mflr r8
  205. mfspr r7, 256 ; save vrsave
  206. stvx v20,r10,$sp
  207. addi r10,r10,32
  208. stvx v21,r11,$sp
  209. addi r11,r11,32
  210. stvx v22,r10,$sp
  211. addi r10,r10,32
  212. stvx v23,r11,$sp
  213. addi r11,r11,32
  214. stvx v24,r10,$sp
  215. addi r10,r10,32
  216. stvx v25,r11,$sp
  217. addi r11,r11,32
  218. stvx v26,r10,$sp
  219. addi r10,r10,32
  220. stvx v27,r11,$sp
  221. addi r11,r11,32
  222. stvx v28,r10,$sp
  223. addi r10,r10,32
  224. stvx v29,r11,$sp
  225. addi r11,r11,32
  226. stvx v30,r10,$sp
  227. stvx v31,r11,$sp
  228. stw r7,`$FRAME-4`($sp) ; save vrsave
  229. li r0, -1
  230. $PUSH r8,`$FRAME+$LRSAVE`($sp)
  231. mtspr 256, r0 ; preserve all AltiVec registers
  232. li r11,16
  233. lvx_4w v0,0,r3 ; load A[5][5]
  234. li r10,32
  235. lvx_4w v1,r11,r3
  236. addi r11,r11,32
  237. lvx_4w v2,r10,r3
  238. addi r10,r10,32
  239. lvx_4w v3,r11,r3
  240. addi r11,r11,32
  241. lvx_4w v4,r10,r3
  242. addi r10,r10,32
  243. lvx_4w v5,r11,r3
  244. addi r11,r11,32
  245. lvx_4w v6,r10,r3
  246. addi r10,r10,32
  247. lvx_4w v7,r11,r3
  248. addi r11,r11,32
  249. lvx_4w v8,r10,r3
  250. addi r10,r10,32
  251. lvx_4w v9,r11,r3
  252. addi r11,r11,32
  253. lvx_4w v10,r10,r3
  254. addi r10,r10,32
  255. lvx_4w v11,r11,r3
  256. lvx_splt v12,r10,r3
  257. bl PICmeup
  258. li r11,16
  259. lvx_u v13,0,r12 ; load rhotates
  260. li r10,32
  261. lvx_u v14,r11,r12
  262. addi r11,r11,32
  263. lvx_u v15,r10,r12
  264. addi r10,r10,32
  265. lvx_u v16,r11,r12
  266. addi r11,r11,32
  267. lvx_u v17,r10,r12
  268. addi r10,r10,32
  269. lvx_u v18,r11,r12
  270. addi r11,r11,32
  271. lvx_u v19,r10,r12
  272. addi r10,r10,32
  273. lvx_u v20,r11,r12
  274. addi r11,r11,32
  275. lvx_u v21,r10,r12
  276. addi r10,r10,32
  277. lvx_u v22,r11,r12
  278. addi r11,r11,32
  279. lvx_u v23,r10,r12
  280. addi r10,r10,32
  281. lvx_u v24,r11,r12
  282. lvx_u v25,r10,r12
  283. addi r12,r12,`16*16` ; points at iotas
  284. bl KeccakF1600_int
  285. li r11,16
  286. stvx_4w v0,0,r3 ; return A[5][5]
  287. li r10,32
  288. stvx_4w v1,r11,r3
  289. addi r11,r11,32
  290. stvx_4w v2,r10,r3
  291. addi r10,r10,32
  292. stvx_4w v3,r11,r3
  293. addi r11,r11,32
  294. stvx_4w v4,r10,r3
  295. addi r10,r10,32
  296. stvx_4w v5,r11,r3
  297. addi r11,r11,32
  298. stvx_4w v6,r10,r3
  299. addi r10,r10,32
  300. stvx_4w v7,r11,r3
  301. addi r11,r11,32
  302. stvx_4w v8,r10,r3
  303. addi r10,r10,32
  304. stvx_4w v9,r11,r3
  305. addi r11,r11,32
  306. stvx_4w v10,r10,r3
  307. addi r10,r10,32
  308. stvx_4w v11,r11,r3
  309. stvdx_u v12,r10,r3
  310. li r10,`15+6*$SIZE_T`
  311. li r11,`31+6*$SIZE_T`
  312. mtlr r8
  313. mtspr 256, r7 ; restore vrsave
  314. lvx v20,r10,$sp
  315. addi r10,r10,32
  316. lvx v21,r11,$sp
  317. addi r11,r11,32
  318. lvx v22,r10,$sp
  319. addi r10,r10,32
  320. lvx v23,r11,$sp
  321. addi r11,r11,32
  322. lvx v24,r10,$sp
  323. addi r10,r10,32
  324. lvx v25,r11,$sp
  325. addi r11,r11,32
  326. lvx v26,r10,$sp
  327. addi r10,r10,32
  328. lvx v27,r11,$sp
  329. addi r11,r11,32
  330. lvx v28,r10,$sp
  331. addi r10,r10,32
  332. lvx v29,r11,$sp
  333. addi r11,r11,32
  334. lvx v30,r10,$sp
  335. lvx v31,r11,$sp
  336. addi $sp,$sp,$FRAME
  337. blr
  338. .long 0
  339. .byte 0,12,0x04,1,0x80,0,1,0
  340. .long 0
  341. .size KeccakF1600,.-KeccakF1600
  342. ___
  343. {
  344. my ($A_jagged,$inp,$len,$bsz) = map("r$_",(3..6));
  345. $code.=<<___;
  346. .globl SHA3_absorb
  347. .type SHA3_absorb,\@function
  348. .align 5
  349. SHA3_absorb:
  350. $STU $sp,-$FRAME($sp)
  351. li r10,`15+6*$SIZE_T`
  352. li r11,`31+6*$SIZE_T`
  353. mflr r8
  354. mfspr r7, 256 ; save vrsave
  355. stvx v20,r10,$sp
  356. addi r10,r10,32
  357. stvx v21,r11,$sp
  358. addi r11,r11,32
  359. stvx v22,r10,$sp
  360. addi r10,r10,32
  361. stvx v23,r11,$sp
  362. addi r11,r11,32
  363. stvx v24,r10,$sp
  364. addi r10,r10,32
  365. stvx v25,r11,$sp
  366. addi r11,r11,32
  367. stvx v26,r10,$sp
  368. addi r10,r10,32
  369. stvx v27,r11,$sp
  370. addi r11,r11,32
  371. stvx v28,r10,$sp
  372. addi r10,r10,32
  373. stvx v29,r11,$sp
  374. addi r11,r11,32
  375. stvx v30,r10,$sp
  376. stvx v31,r11,$sp
  377. stw r7,`$FRAME-4`($sp) ; save vrsave
  378. li r0, -1
  379. $PUSH r8,`$FRAME+$LRSAVE`($sp)
  380. mtspr 256, r0 ; preserve all AltiVec registers
  381. li r11,16
  382. lvx_4w v0,0,$A_jagged ; load A[5][5]
  383. li r10,32
  384. lvx_4w v1,r11,$A_jagged
  385. addi r11,r11,32
  386. lvx_4w v2,r10,$A_jagged
  387. addi r10,r10,32
  388. lvx_4w v3,r11,$A_jagged
  389. addi r11,r11,32
  390. lvx_4w v4,r10,$A_jagged
  391. addi r10,r10,32
  392. lvx_4w v5,r11,$A_jagged
  393. addi r11,r11,32
  394. lvx_4w v6,r10,$A_jagged
  395. addi r10,r10,32
  396. lvx_4w v7,r11,$A_jagged
  397. addi r11,r11,32
  398. lvx_4w v8,r10,$A_jagged
  399. addi r10,r10,32
  400. lvx_4w v9,r11,$A_jagged
  401. addi r11,r11,32
  402. lvx_4w v10,r10,$A_jagged
  403. addi r10,r10,32
  404. lvx_4w v11,r11,$A_jagged
  405. lvx_splt v12,r10,$A_jagged
  406. bl PICmeup
  407. li r11,16
  408. lvx_u v13,0,r12 ; load rhotates
  409. li r10,32
  410. lvx_u v14,r11,r12
  411. addi r11,r11,32
  412. lvx_u v15,r10,r12
  413. addi r10,r10,32
  414. lvx_u v16,r11,r12
  415. addi r11,r11,32
  416. lvx_u v17,r10,r12
  417. addi r10,r10,32
  418. lvx_u v18,r11,r12
  419. addi r11,r11,32
  420. lvx_u v19,r10,r12
  421. addi r10,r10,32
  422. lvx_u v20,r11,r12
  423. addi r11,r11,32
  424. lvx_u v21,r10,r12
  425. addi r10,r10,32
  426. lvx_u v22,r11,r12
  427. addi r11,r11,32
  428. lvx_u v23,r10,r12
  429. addi r10,r10,32
  430. lvx_u v24,r11,r12
  431. lvx_u v25,r10,r12
  432. li r10,-32
  433. li r11,-16
  434. addi r12,r12,`16*16` ; points at iotas
  435. b .Loop_absorb
  436. .align 4
  437. .Loop_absorb:
  438. $UCMP $len,$bsz ; len < bsz?
  439. blt .Labsorbed
  440. sub $len,$len,$bsz ; len -= bsz
  441. srwi r0,$bsz,3
  442. mtctr r0
  443. lvx_u v30,r10,r12 ; permutation masks
  444. lvx_u v31,r11,r12
  445. ?vspltisb v27,7 ; prepare masks for byte swap
  446. ?vxor v30,v30,v27 ; on big-endian
  447. ?vxor v31,v31,v27
  448. vxor v27,v27,v27 ; zero
  449. lvdx_u v26,0,$inp
  450. addi $inp,$inp,8
  451. vperm v26,v26,v27,v30
  452. vxor v0, v0, v26
  453. bdz .Lprocess_block
  454. lvdx_u v26,0,$inp
  455. addi $inp,$inp,8
  456. vperm v26,v26,v27,v30
  457. vxor v1, v1, v26
  458. bdz .Lprocess_block
  459. lvdx_u v26,0,$inp
  460. addi $inp,$inp,8
  461. vperm v26,v26,v27,v30
  462. vxor v2, v2, v26
  463. bdz .Lprocess_block
  464. lvdx_u v26,0,$inp
  465. addi $inp,$inp,8
  466. vperm v26,v26,v27,v30
  467. vxor v3, v3, v26
  468. bdz .Lprocess_block
  469. lvdx_u v26,0,$inp
  470. addi $inp,$inp,8
  471. vperm v26,v26,v27,v30
  472. vxor v4, v4, v26
  473. bdz .Lprocess_block
  474. lvdx_u v26,0,$inp
  475. addi $inp,$inp,8
  476. vperm v26,v26,v27,v31
  477. vxor v0, v0, v26
  478. bdz .Lprocess_block
  479. lvdx_u v26,0,$inp
  480. addi $inp,$inp,8
  481. vperm v26,v26,v27,v31
  482. vxor v1, v1, v26
  483. bdz .Lprocess_block
  484. lvdx_u v26,0,$inp
  485. addi $inp,$inp,8
  486. vperm v26,v26,v27,v31
  487. vxor v2, v2, v26
  488. bdz .Lprocess_block
  489. lvdx_u v26,0,$inp
  490. addi $inp,$inp,8
  491. vperm v26,v26,v27,v31
  492. vxor v3, v3, v26
  493. bdz .Lprocess_block
  494. lvdx_u v26,0,$inp
  495. addi $inp,$inp,8
  496. vperm v26,v26,v27,v31
  497. vxor v4, v4, v26
  498. bdz .Lprocess_block
  499. lvdx_u v26,0,$inp
  500. addi $inp,$inp,8
  501. vperm v26,v26,v27,v30
  502. vxor v5, v5, v26
  503. bdz .Lprocess_block
  504. lvdx_u v26,0,$inp
  505. addi $inp,$inp,8
  506. vperm v26,v26,v27,v30
  507. vxor v6, v6, v26
  508. bdz .Lprocess_block
  509. lvdx_u v26,0,$inp
  510. addi $inp,$inp,8
  511. vperm v26,v26,v27,v30
  512. vxor v7, v7, v26
  513. bdz .Lprocess_block
  514. lvdx_u v26,0,$inp
  515. addi $inp,$inp,8
  516. vperm v26,v26,v27,v30
  517. vxor v8, v8, v26
  518. bdz .Lprocess_block
  519. lvdx_u v26,0,$inp
  520. addi $inp,$inp,8
  521. vperm v26,v26,v27,v30
  522. vxor v9, v9, v26
  523. bdz .Lprocess_block
  524. lvdx_u v26,0,$inp
  525. addi $inp,$inp,8
  526. vperm v26,v26,v27,v31
  527. vxor v5, v5, v26
  528. bdz .Lprocess_block
  529. lvdx_u v26,0,$inp
  530. addi $inp,$inp,8
  531. vperm v26,v26,v27,v31
  532. vxor v6, v6, v26
  533. bdz .Lprocess_block
  534. lvdx_u v26,0,$inp
  535. addi $inp,$inp,8
  536. vperm v26,v26,v27,v31
  537. vxor v7, v7, v26
  538. bdz .Lprocess_block
  539. lvdx_u v26,0,$inp
  540. addi $inp,$inp,8
  541. vperm v26,v26,v27,v31
  542. vxor v8, v8, v26
  543. bdz .Lprocess_block
  544. lvdx_u v26,0,$inp
  545. addi $inp,$inp,8
  546. vperm v26,v26,v27,v31
  547. vxor v9, v9, v26
  548. bdz .Lprocess_block
  549. lvdx_u v26,0,$inp
  550. addi $inp,$inp,8
  551. vperm v26,v26,v27,v30
  552. vxor v10, v10, v26
  553. bdz .Lprocess_block
  554. lvdx_u v26,0,$inp
  555. addi $inp,$inp,8
  556. vperm v26,v26,v27,v31
  557. vxor v10, v10, v26
  558. bdz .Lprocess_block
  559. lvdx_u v26,0,$inp
  560. addi $inp,$inp,8
  561. vperm v26,v26,v27,v30
  562. vxor v11, v11, v26
  563. bdz .Lprocess_block
  564. lvdx_u v26,0,$inp
  565. addi $inp,$inp,8
  566. vperm v26,v26,v27,v31
  567. vxor v11, v11, v26
  568. bdz .Lprocess_block
  569. lvdx_u v26,0,$inp
  570. addi $inp,$inp,8
  571. vperm v26,v26,v27,v31
  572. vxor v12, v12, v26
  573. .Lprocess_block:
  574. bl KeccakF1600_int
  575. b .Loop_absorb
  576. .align 4
  577. .Labsorbed:
  578. li r11,16
  579. stvx_4w v0,0,$A_jagged ; return A[5][5]
  580. li r10,32
  581. stvx_4w v1,r11,$A_jagged
  582. addi r11,r11,32
  583. stvx_4w v2,r10,$A_jagged
  584. addi r10,r10,32
  585. stvx_4w v3,r11,$A_jagged
  586. addi r11,r11,32
  587. stvx_4w v4,r10,$A_jagged
  588. addi r10,r10,32
  589. stvx_4w v5,r11,$A_jagged
  590. addi r11,r11,32
  591. stvx_4w v6,r10,$A_jagged
  592. addi r10,r10,32
  593. stvx_4w v7,r11,$A_jagged
  594. addi r11,r11,32
  595. stvx_4w v8,r10,$A_jagged
  596. addi r10,r10,32
  597. stvx_4w v9,r11,$A_jagged
  598. addi r11,r11,32
  599. stvx_4w v10,r10,$A_jagged
  600. addi r10,r10,32
  601. stvx_4w v11,r11,$A_jagged
  602. stvdx_u v12,r10,$A_jagged
  603. mr r3,$len ; return value
  604. li r10,`15+6*$SIZE_T`
  605. li r11,`31+6*$SIZE_T`
  606. mtlr r8
  607. mtspr 256, r7 ; restore vrsave
  608. lvx v20,r10,$sp
  609. addi r10,r10,32
  610. lvx v21,r11,$sp
  611. addi r11,r11,32
  612. lvx v22,r10,$sp
  613. addi r10,r10,32
  614. lvx v23,r11,$sp
  615. addi r11,r11,32
  616. lvx v24,r10,$sp
  617. addi r10,r10,32
  618. lvx v25,r11,$sp
  619. addi r11,r11,32
  620. lvx v26,r10,$sp
  621. addi r10,r10,32
  622. lvx v27,r11,$sp
  623. addi r11,r11,32
  624. lvx v28,r10,$sp
  625. addi r10,r10,32
  626. lvx v29,r11,$sp
  627. addi r11,r11,32
  628. lvx v30,r10,$sp
  629. lvx v31,r11,$sp
  630. addi $sp,$sp,$FRAME
  631. blr
  632. .long 0
  633. .byte 0,12,0x04,1,0x80,0,4,0
  634. .long 0
  635. .size SHA3_absorb,.-SHA3_absorb
  636. ___
  637. }
  638. {
  639. my ($A_jagged,$out,$len,$bsz) = map("r$_",(3..6));
  640. $code.=<<___;
  641. .globl SHA3_squeeze
  642. .type SHA3_squeeze,\@function
  643. .align 5
  644. SHA3_squeeze:
  645. mflr r9 ; r9 is not touched by KeccakF1600
  646. subi $out,$out,1 ; prepare for stbu
  647. addi r8,$A_jagged,4 ; prepare volatiles
  648. mr r10,$bsz
  649. li r11,0
  650. b .Loop_squeeze
  651. .align 4
  652. .Loop_squeeze:
  653. lwzx r7,r11,r8 ; lo
  654. lwzx r0,r11,$A_jagged ; hi
  655. ${UCMP}i $len,8
  656. blt .Lsqueeze_tail
  657. stbu r7,1($out) ; write lo
  658. srwi r7,r7,8
  659. stbu r7,1($out)
  660. srwi r7,r7,8
  661. stbu r7,1($out)
  662. srwi r7,r7,8
  663. stbu r7,1($out)
  664. stbu r0,1($out) ; write hi
  665. srwi r0,r0,8
  666. stbu r0,1($out)
  667. srwi r0,r0,8
  668. stbu r0,1($out)
  669. srwi r0,r0,8
  670. stbu r0,1($out)
  671. subic. $len,$len,8
  672. beqlr ; return if done
  673. subic. r10,r10,8
  674. ble .Loutput_expand
  675. addi r11,r11,16 ; calculate jagged index
  676. cmplwi r11,`16*5`
  677. blt .Loop_squeeze
  678. subi r11,r11,72
  679. beq .Loop_squeeze
  680. addi r11,r11,72
  681. cmplwi r11,`16*5+8`
  682. subi r11,r11,8
  683. beq .Loop_squeeze
  684. addi r11,r11,8
  685. cmplwi r11,`16*10`
  686. subi r11,r11,72
  687. beq .Loop_squeeze
  688. addi r11,r11,72
  689. blt .Loop_squeeze
  690. subi r11,r11,8
  691. b .Loop_squeeze
  692. .align 4
  693. .Loutput_expand:
  694. bl KeccakF1600
  695. mtlr r9
  696. addi r8,$A_jagged,4 ; restore volatiles
  697. mr r10,$bsz
  698. li r11,0
  699. b .Loop_squeeze
  700. .align 4
  701. .Lsqueeze_tail:
  702. mtctr $len
  703. subic. $len,$len,4
  704. ble .Loop_tail_lo
  705. li r8,4
  706. mtctr r8
  707. .Loop_tail_lo:
  708. stbu r7,1($out)
  709. srdi r7,r7,8
  710. bdnz .Loop_tail_lo
  711. ble .Lsqueeze_done
  712. mtctr $len
  713. .Loop_tail_hi:
  714. stbu r0,1($out)
  715. srdi r0,r0,8
  716. bdnz .Loop_tail_hi
  717. .Lsqueeze_done:
  718. blr
  719. .long 0
  720. .byte 0,12,0x14,0,0,0,4,0
  721. .long 0
  722. .size SHA3_squeeze,.-SHA3_squeeze
  723. ___
  724. }
  725. $code.=<<___;
  726. .align 6
  727. PICmeup:
  728. mflr r0
  729. bcl 20,31,\$+4
  730. mflr r12 ; vvvvvv "distance" between . and 1st data entry
  731. addi r12,r12,`64-8`
  732. mtlr r0
  733. blr
  734. .long 0
  735. .byte 0,12,0x14,0,0,0,0,0
  736. .space `64-9*4`
  737. .type rhotates,\@object
  738. .align 6
  739. rhotates:
  740. .quad 0, 36
  741. .quad 1, 44
  742. .quad 62, 6
  743. .quad 28, 55
  744. .quad 27, 20
  745. .quad 3, 41
  746. .quad 10, 45
  747. .quad 43, 15
  748. .quad 25, 21
  749. .quad 39, 8
  750. .quad 18, 2
  751. .quad 61, 56
  752. .quad 14, 14
  753. .size rhotates,.-rhotates
  754. .quad 0,0
  755. .quad 0x0001020304050607,0x1011121314151617
  756. .quad 0x1011121314151617,0x0001020304050607
  757. .type iotas,\@object
  758. iotas:
  759. .quad 0x0000000000000001,0
  760. .quad 0x0000000000008082,0
  761. .quad 0x800000000000808a,0
  762. .quad 0x8000000080008000,0
  763. .quad 0x000000000000808b,0
  764. .quad 0x0000000080000001,0
  765. .quad 0x8000000080008081,0
  766. .quad 0x8000000000008009,0
  767. .quad 0x000000000000008a,0
  768. .quad 0x0000000000000088,0
  769. .quad 0x0000000080008009,0
  770. .quad 0x000000008000000a,0
  771. .quad 0x000000008000808b,0
  772. .quad 0x800000000000008b,0
  773. .quad 0x8000000000008089,0
  774. .quad 0x8000000000008003,0
  775. .quad 0x8000000000008002,0
  776. .quad 0x8000000000000080,0
  777. .quad 0x000000000000800a,0
  778. .quad 0x800000008000000a,0
  779. .quad 0x8000000080008081,0
  780. .quad 0x8000000000008080,0
  781. .quad 0x0000000080000001,0
  782. .quad 0x8000000080008008,0
  783. .size iotas,.-iotas
  784. .asciz "Keccak-1600 absorb and squeeze for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
  785. ___
  786. foreach (split("\n",$code)) {
  787. s/\`([^\`]*)\`/eval $1/ge;
  788. if ($flavour =~ /le$/) { # little-endian
  789. s/\?([a-z]+)/;$1/;
  790. } else { # big-endian
  791. s/\?([a-z]+)/$1/;
  792. }
  793. print $_,"\n";
  794. }
  795. close STDOUT;