ghashp8-ppc.pl 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # GHASH for for PowerISA v2.07.
  11. #
  12. # July 2014
  13. #
  14. # Accurate performance measurements are problematic, because it's
  15. # always virtualized setup with possibly throttled processor.
  16. # Relative comparison is therefore more informative. This initial
  17. # version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
  18. # faster than "4-bit" integer-only compiler-generated 64-bit code.
  19. # "Initial version" means that there is room for futher improvement.
  20. # May 2016
  21. #
  22. # 2x aggregated reduction improves performance by 50% (resulting
  23. # performance on POWER8 is 1 cycle per processed byte), and 4x
  24. # aggregated reduction - by 170% or 2.7x (resulting in 0.55 cpb).
  25. $flavour=shift;
  26. $output =shift;
  27. if ($flavour =~ /64/) {
  28. $SIZE_T=8;
  29. $LRSAVE=2*$SIZE_T;
  30. $STU="stdu";
  31. $POP="ld";
  32. $PUSH="std";
  33. $UCMP="cmpld";
  34. $SHRI="srdi";
  35. } elsif ($flavour =~ /32/) {
  36. $SIZE_T=4;
  37. $LRSAVE=$SIZE_T;
  38. $STU="stwu";
  39. $POP="lwz";
  40. $PUSH="stw";
  41. $UCMP="cmplw";
  42. $SHRI="srwi";
  43. } else { die "nonsense $flavour"; }
  44. $sp="r1";
  45. $FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload
  46. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  47. ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
  48. ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
  49. die "can't locate ppc-xlate.pl";
  50. open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
  51. my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
  52. my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
  53. my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
  54. my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
  55. my $vrsave="r12";
  56. $code=<<___;
  57. .machine "any"
  58. .text
  59. .globl .gcm_init_p8
  60. .align 5
  61. .gcm_init_p8:
  62. li r0,-4096
  63. li r8,0x10
  64. mfspr $vrsave,256
  65. li r9,0x20
  66. mtspr 256,r0
  67. li r10,0x30
  68. lvx_u $H,0,r4 # load H
  69. vspltisb $xC2,-16 # 0xf0
  70. vspltisb $t0,1 # one
  71. vaddubm $xC2,$xC2,$xC2 # 0xe0
  72. vxor $zero,$zero,$zero
  73. vor $xC2,$xC2,$t0 # 0xe1
  74. vsldoi $xC2,$xC2,$zero,15 # 0xe1...
  75. vsldoi $t1,$zero,$t0,1 # ...1
  76. vaddubm $xC2,$xC2,$xC2 # 0xc2...
  77. vspltisb $t2,7
  78. vor $xC2,$xC2,$t1 # 0xc2....01
  79. vspltb $t1,$H,0 # most significant byte
  80. vsl $H,$H,$t0 # H<<=1
  81. vsrab $t1,$t1,$t2 # broadcast carry bit
  82. vand $t1,$t1,$xC2
  83. vxor $IN,$H,$t1 # twisted H
  84. vsldoi $H,$IN,$IN,8 # twist even more ...
  85. vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
  86. vsldoi $Hl,$zero,$H,8 # ... and split
  87. vsldoi $Hh,$H,$zero,8
  88. stvx_u $xC2,0,r3 # save pre-computed table
  89. stvx_u $Hl,r8,r3
  90. li r8,0x40
  91. stvx_u $H, r9,r3
  92. li r9,0x50
  93. stvx_u $Hh,r10,r3
  94. li r10,0x60
  95. vpmsumd $Xl,$IN,$Hl # H.lo·H.lo
  96. vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi
  97. vpmsumd $Xh,$IN,$Hh # H.hi·H.hi
  98. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  99. vsldoi $t0,$Xm,$zero,8
  100. vsldoi $t1,$zero,$Xm,8
  101. vxor $Xl,$Xl,$t0
  102. vxor $Xh,$Xh,$t1
  103. vsldoi $Xl,$Xl,$Xl,8
  104. vxor $Xl,$Xl,$t2
  105. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  106. vpmsumd $Xl,$Xl,$xC2
  107. vxor $t1,$t1,$Xh
  108. vxor $IN1,$Xl,$t1
  109. vsldoi $H2,$IN1,$IN1,8
  110. vsldoi $H2l,$zero,$H2,8
  111. vsldoi $H2h,$H2,$zero,8
  112. stvx_u $H2l,r8,r3 # save H^2
  113. li r8,0x70
  114. stvx_u $H2,r9,r3
  115. li r9,0x80
  116. stvx_u $H2h,r10,r3
  117. li r10,0x90
  118. ___
  119. {
  120. my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
  121. $code.=<<___;
  122. vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo
  123. vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo
  124. vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi
  125. vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi
  126. vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi
  127. vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi
  128. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  129. vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase
  130. vsldoi $t0,$Xm,$zero,8
  131. vsldoi $t1,$zero,$Xm,8
  132. vsldoi $t4,$Xm1,$zero,8
  133. vsldoi $t5,$zero,$Xm1,8
  134. vxor $Xl,$Xl,$t0
  135. vxor $Xh,$Xh,$t1
  136. vxor $Xl1,$Xl1,$t4
  137. vxor $Xh1,$Xh1,$t5
  138. vsldoi $Xl,$Xl,$Xl,8
  139. vsldoi $Xl1,$Xl1,$Xl1,8
  140. vxor $Xl,$Xl,$t2
  141. vxor $Xl1,$Xl1,$t6
  142. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  143. vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
  144. vpmsumd $Xl,$Xl,$xC2
  145. vpmsumd $Xl1,$Xl1,$xC2
  146. vxor $t1,$t1,$Xh
  147. vxor $t5,$t5,$Xh1
  148. vxor $Xl,$Xl,$t1
  149. vxor $Xl1,$Xl1,$t5
  150. vsldoi $H,$Xl,$Xl,8
  151. vsldoi $H2,$Xl1,$Xl1,8
  152. vsldoi $Hl,$zero,$H,8
  153. vsldoi $Hh,$H,$zero,8
  154. vsldoi $H2l,$zero,$H2,8
  155. vsldoi $H2h,$H2,$zero,8
  156. stvx_u $Hl,r8,r3 # save H^3
  157. li r8,0xa0
  158. stvx_u $H,r9,r3
  159. li r9,0xb0
  160. stvx_u $Hh,r10,r3
  161. li r10,0xc0
  162. stvx_u $H2l,r8,r3 # save H^4
  163. stvx_u $H2,r9,r3
  164. stvx_u $H2h,r10,r3
  165. mtspr 256,$vrsave
  166. blr
  167. .long 0
  168. .byte 0,12,0x14,0,0,0,2,0
  169. .long 0
  170. .size .gcm_init_p8,.-.gcm_init_p8
  171. ___
  172. }
  173. $code.=<<___;
  174. .globl .gcm_gmult_p8
  175. .align 5
  176. .gcm_gmult_p8:
  177. lis r0,0xfff8
  178. li r8,0x10
  179. mfspr $vrsave,256
  180. li r9,0x20
  181. mtspr 256,r0
  182. li r10,0x30
  183. lvx_u $IN,0,$Xip # load Xi
  184. lvx_u $Hl,r8,$Htbl # load pre-computed table
  185. le?lvsl $lemask,r0,r0
  186. lvx_u $H, r9,$Htbl
  187. le?vspltisb $t0,0x07
  188. lvx_u $Hh,r10,$Htbl
  189. le?vxor $lemask,$lemask,$t0
  190. lvx_u $xC2,0,$Htbl
  191. le?vperm $IN,$IN,$IN,$lemask
  192. vxor $zero,$zero,$zero
  193. vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
  194. vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
  195. vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
  196. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  197. vsldoi $t0,$Xm,$zero,8
  198. vsldoi $t1,$zero,$Xm,8
  199. vxor $Xl,$Xl,$t0
  200. vxor $Xh,$Xh,$t1
  201. vsldoi $Xl,$Xl,$Xl,8
  202. vxor $Xl,$Xl,$t2
  203. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  204. vpmsumd $Xl,$Xl,$xC2
  205. vxor $t1,$t1,$Xh
  206. vxor $Xl,$Xl,$t1
  207. le?vperm $Xl,$Xl,$Xl,$lemask
  208. stvx_u $Xl,0,$Xip # write out Xi
  209. mtspr 256,$vrsave
  210. blr
  211. .long 0
  212. .byte 0,12,0x14,0,0,0,2,0
  213. .long 0
  214. .size .gcm_gmult_p8,.-.gcm_gmult_p8
  215. .globl .gcm_ghash_p8
  216. .align 5
  217. .gcm_ghash_p8:
  218. li r0,-4096
  219. li r8,0x10
  220. mfspr $vrsave,256
  221. li r9,0x20
  222. mtspr 256,r0
  223. li r10,0x30
  224. lvx_u $Xl,0,$Xip # load Xi
  225. lvx_u $Hl,r8,$Htbl # load pre-computed table
  226. li r8,0x40
  227. le?lvsl $lemask,r0,r0
  228. lvx_u $H, r9,$Htbl
  229. li r9,0x50
  230. le?vspltisb $t0,0x07
  231. lvx_u $Hh,r10,$Htbl
  232. li r10,0x60
  233. le?vxor $lemask,$lemask,$t0
  234. lvx_u $xC2,0,$Htbl
  235. le?vperm $Xl,$Xl,$Xl,$lemask
  236. vxor $zero,$zero,$zero
  237. ${UCMP}i $len,64
  238. bge Lgcm_ghash_p8_4x
  239. lvx_u $IN,0,$inp
  240. addi $inp,$inp,16
  241. subic. $len,$len,16
  242. le?vperm $IN,$IN,$IN,$lemask
  243. vxor $IN,$IN,$Xl
  244. beq Lshort
  245. lvx_u $H2l,r8,$Htbl # load H^2
  246. li r8,16
  247. lvx_u $H2, r9,$Htbl
  248. add r9,$inp,$len # end of input
  249. lvx_u $H2h,r10,$Htbl
  250. be?b Loop_2x
  251. .align 5
  252. Loop_2x:
  253. lvx_u $IN1,0,$inp
  254. le?vperm $IN1,$IN1,$IN1,$lemask
  255. subic $len,$len,32
  256. vpmsumd $Xl,$IN,$H2l # H^2.lo·Xi.lo
  257. vpmsumd $Xl1,$IN1,$Hl # H.lo·Xi+1.lo
  258. subfe r0,r0,r0 # borrow?-1:0
  259. vpmsumd $Xm,$IN,$H2 # H^2.hi·Xi.lo+H^2.lo·Xi.hi
  260. vpmsumd $Xm1,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+1.hi
  261. and r0,r0,$len
  262. vpmsumd $Xh,$IN,$H2h # H^2.hi·Xi.hi
  263. vpmsumd $Xh1,$IN1,$Hh # H.hi·Xi+1.hi
  264. add $inp,$inp,r0
  265. vxor $Xl,$Xl,$Xl1
  266. vxor $Xm,$Xm,$Xm1
  267. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  268. vsldoi $t0,$Xm,$zero,8
  269. vsldoi $t1,$zero,$Xm,8
  270. vxor $Xh,$Xh,$Xh1
  271. vxor $Xl,$Xl,$t0
  272. vxor $Xh,$Xh,$t1
  273. vsldoi $Xl,$Xl,$Xl,8
  274. vxor $Xl,$Xl,$t2
  275. lvx_u $IN,r8,$inp
  276. addi $inp,$inp,32
  277. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  278. vpmsumd $Xl,$Xl,$xC2
  279. le?vperm $IN,$IN,$IN,$lemask
  280. vxor $t1,$t1,$Xh
  281. vxor $IN,$IN,$t1
  282. vxor $IN,$IN,$Xl
  283. $UCMP r9,$inp
  284. bgt Loop_2x # done yet?
  285. cmplwi $len,0
  286. bne Leven
  287. Lshort:
  288. vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
  289. vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
  290. vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
  291. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  292. vsldoi $t0,$Xm,$zero,8
  293. vsldoi $t1,$zero,$Xm,8
  294. vxor $Xl,$Xl,$t0
  295. vxor $Xh,$Xh,$t1
  296. vsldoi $Xl,$Xl,$Xl,8
  297. vxor $Xl,$Xl,$t2
  298. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  299. vpmsumd $Xl,$Xl,$xC2
  300. vxor $t1,$t1,$Xh
  301. Leven:
  302. vxor $Xl,$Xl,$t1
  303. le?vperm $Xl,$Xl,$Xl,$lemask
  304. stvx_u $Xl,0,$Xip # write out Xi
  305. mtspr 256,$vrsave
  306. blr
  307. .long 0
  308. .byte 0,12,0x14,0,0,0,4,0
  309. .long 0
  310. ___
  311. {
  312. my ($Xl3,$Xm2,$IN2,$H3l,$H3,$H3h,
  313. $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31));
  314. my $IN0=$IN;
  315. my ($H21l,$H21h,$loperm,$hiperm) = ($Hl,$Hh,$H2l,$H2h);
  316. $code.=<<___;
  317. .align 5
  318. .gcm_ghash_p8_4x:
  319. Lgcm_ghash_p8_4x:
  320. $STU $sp,-$FRAME($sp)
  321. li r10,`15+6*$SIZE_T`
  322. li r11,`31+6*$SIZE_T`
  323. stvx v20,r10,$sp
  324. addi r10,r10,32
  325. stvx v21,r11,$sp
  326. addi r11,r11,32
  327. stvx v22,r10,$sp
  328. addi r10,r10,32
  329. stvx v23,r11,$sp
  330. addi r11,r11,32
  331. stvx v24,r10,$sp
  332. addi r10,r10,32
  333. stvx v25,r11,$sp
  334. addi r11,r11,32
  335. stvx v26,r10,$sp
  336. addi r10,r10,32
  337. stvx v27,r11,$sp
  338. addi r11,r11,32
  339. stvx v28,r10,$sp
  340. addi r10,r10,32
  341. stvx v29,r11,$sp
  342. addi r11,r11,32
  343. stvx v30,r10,$sp
  344. li r10,0x60
  345. stvx v31,r11,$sp
  346. li r0,-1
  347. stw $vrsave,`$FRAME-4`($sp) # save vrsave
  348. mtspr 256,r0 # preserve all AltiVec registers
  349. lvsl $t0,0,r8 # 0x0001..0e0f
  350. #lvx_u $H2l,r8,$Htbl # load H^2
  351. li r8,0x70
  352. lvx_u $H2, r9,$Htbl
  353. li r9,0x80
  354. vspltisb $t1,8 # 0x0808..0808
  355. #lvx_u $H2h,r10,$Htbl
  356. li r10,0x90
  357. lvx_u $H3l,r8,$Htbl # load H^3
  358. li r8,0xa0
  359. lvx_u $H3, r9,$Htbl
  360. li r9,0xb0
  361. lvx_u $H3h,r10,$Htbl
  362. li r10,0xc0
  363. lvx_u $H4l,r8,$Htbl # load H^4
  364. li r8,0x10
  365. lvx_u $H4, r9,$Htbl
  366. li r9,0x20
  367. lvx_u $H4h,r10,$Htbl
  368. li r10,0x30
  369. vsldoi $t2,$zero,$t1,8 # 0x0000..0808
  370. vaddubm $hiperm,$t0,$t2 # 0x0001..1617
  371. vaddubm $loperm,$t1,$hiperm # 0x0809..1e1f
  372. $SHRI $len,$len,4 # this allows to use sign bit
  373. # as carry
  374. lvx_u $IN0,0,$inp # load input
  375. lvx_u $IN1,r8,$inp
  376. subic. $len,$len,8
  377. lvx_u $IN2,r9,$inp
  378. lvx_u $IN3,r10,$inp
  379. addi $inp,$inp,0x40
  380. le?vperm $IN0,$IN0,$IN0,$lemask
  381. le?vperm $IN1,$IN1,$IN1,$lemask
  382. le?vperm $IN2,$IN2,$IN2,$lemask
  383. le?vperm $IN3,$IN3,$IN3,$lemask
  384. vxor $Xh,$IN0,$Xl
  385. vpmsumd $Xl1,$IN1,$H3l
  386. vpmsumd $Xm1,$IN1,$H3
  387. vpmsumd $Xh1,$IN1,$H3h
  388. vperm $H21l,$H2,$H,$hiperm
  389. vperm $t0,$IN2,$IN3,$loperm
  390. vperm $H21h,$H2,$H,$loperm
  391. vperm $t1,$IN2,$IN3,$hiperm
  392. vpmsumd $Xm2,$IN2,$H2 # H^2.lo·Xi+2.hi+H^2.hi·Xi+2.lo
  393. vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+2.lo+H.lo·Xi+3.lo
  394. vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
  395. vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+2.hi+H.hi·Xi+3.hi
  396. vxor $Xm2,$Xm2,$Xm1
  397. vxor $Xl3,$Xl3,$Xl1
  398. vxor $Xm3,$Xm3,$Xm2
  399. vxor $Xh3,$Xh3,$Xh1
  400. blt Ltail_4x
  401. Loop_4x:
  402. lvx_u $IN0,0,$inp
  403. lvx_u $IN1,r8,$inp
  404. subic. $len,$len,4
  405. lvx_u $IN2,r9,$inp
  406. lvx_u $IN3,r10,$inp
  407. addi $inp,$inp,0x40
  408. le?vperm $IN1,$IN1,$IN1,$lemask
  409. le?vperm $IN2,$IN2,$IN2,$lemask
  410. le?vperm $IN3,$IN3,$IN3,$lemask
  411. le?vperm $IN0,$IN0,$IN0,$lemask
  412. vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
  413. vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
  414. vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
  415. vpmsumd $Xl1,$IN1,$H3l
  416. vpmsumd $Xm1,$IN1,$H3
  417. vpmsumd $Xh1,$IN1,$H3h
  418. vxor $Xl,$Xl,$Xl3
  419. vxor $Xm,$Xm,$Xm3
  420. vxor $Xh,$Xh,$Xh3
  421. vperm $t0,$IN2,$IN3,$loperm
  422. vperm $t1,$IN2,$IN3,$hiperm
  423. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  424. vpmsumd $Xl3,$t0,$H21l # H.lo·Xi+3.lo +H^2.lo·Xi+2.lo
  425. vpmsumd $Xh3,$t1,$H21h # H.hi·Xi+3.hi +H^2.hi·Xi+2.hi
  426. vsldoi $t0,$Xm,$zero,8
  427. vsldoi $t1,$zero,$Xm,8
  428. vxor $Xl,$Xl,$t0
  429. vxor $Xh,$Xh,$t1
  430. vsldoi $Xl,$Xl,$Xl,8
  431. vxor $Xl,$Xl,$t2
  432. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  433. vpmsumd $Xm2,$IN2,$H2 # H^2.hi·Xi+2.lo+H^2.lo·Xi+2.hi
  434. vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
  435. vpmsumd $Xl,$Xl,$xC2
  436. vxor $Xl3,$Xl3,$Xl1
  437. vxor $Xh3,$Xh3,$Xh1
  438. vxor $Xh,$Xh,$IN0
  439. vxor $Xm2,$Xm2,$Xm1
  440. vxor $Xh,$Xh,$t1
  441. vxor $Xm3,$Xm3,$Xm2
  442. vxor $Xh,$Xh,$Xl
  443. bge Loop_4x
  444. Ltail_4x:
  445. vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
  446. vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
  447. vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
  448. vxor $Xl,$Xl,$Xl3
  449. vxor $Xm,$Xm,$Xm3
  450. vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
  451. vsldoi $t0,$Xm,$zero,8
  452. vsldoi $t1,$zero,$Xm,8
  453. vxor $Xh,$Xh,$Xh3
  454. vxor $Xl,$Xl,$t0
  455. vxor $Xh,$Xh,$t1
  456. vsldoi $Xl,$Xl,$Xl,8
  457. vxor $Xl,$Xl,$t2
  458. vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
  459. vpmsumd $Xl,$Xl,$xC2
  460. vxor $t1,$t1,$Xh
  461. vxor $Xl,$Xl,$t1
  462. addic. $len,$len,4
  463. beq Ldone_4x
  464. lvx_u $IN0,0,$inp
  465. ${UCMP}i $len,2
  466. li $len,-4
  467. blt Lone
  468. lvx_u $IN1,r8,$inp
  469. beq Ltwo
  470. Lthree:
  471. lvx_u $IN2,r9,$inp
  472. le?vperm $IN0,$IN0,$IN0,$lemask
  473. le?vperm $IN1,$IN1,$IN1,$lemask
  474. le?vperm $IN2,$IN2,$IN2,$lemask
  475. vxor $Xh,$IN0,$Xl
  476. vmr $H4l,$H3l
  477. vmr $H4, $H3
  478. vmr $H4h,$H3h
  479. vperm $t0,$IN1,$IN2,$loperm
  480. vperm $t1,$IN1,$IN2,$hiperm
  481. vpmsumd $Xm2,$IN1,$H2 # H^2.lo·Xi+1.hi+H^2.hi·Xi+1.lo
  482. vpmsumd $Xm3,$IN2,$H # H.hi·Xi+2.lo +H.lo·Xi+2.hi
  483. vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+1.lo+H.lo·Xi+2.lo
  484. vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+1.hi+H.hi·Xi+2.hi
  485. vxor $Xm3,$Xm3,$Xm2
  486. b Ltail_4x
  487. .align 4
  488. Ltwo:
  489. le?vperm $IN0,$IN0,$IN0,$lemask
  490. le?vperm $IN1,$IN1,$IN1,$lemask
  491. vxor $Xh,$IN0,$Xl
  492. vperm $t0,$zero,$IN1,$loperm
  493. vperm $t1,$zero,$IN1,$hiperm
  494. vsldoi $H4l,$zero,$H2,8
  495. vmr $H4, $H2
  496. vsldoi $H4h,$H2,$zero,8
  497. vpmsumd $Xl3,$t0, $H21l # H.lo·Xi+1.lo
  498. vpmsumd $Xm3,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+2.hi
  499. vpmsumd $Xh3,$t1, $H21h # H.hi·Xi+1.hi
  500. b Ltail_4x
  501. .align 4
  502. Lone:
  503. le?vperm $IN0,$IN0,$IN0,$lemask
  504. vsldoi $H4l,$zero,$H,8
  505. vmr $H4, $H
  506. vsldoi $H4h,$H,$zero,8
  507. vxor $Xh,$IN0,$Xl
  508. vxor $Xl3,$Xl3,$Xl3
  509. vxor $Xm3,$Xm3,$Xm3
  510. vxor $Xh3,$Xh3,$Xh3
  511. b Ltail_4x
  512. Ldone_4x:
  513. le?vperm $Xl,$Xl,$Xl,$lemask
  514. stvx_u $Xl,0,$Xip # write out Xi
  515. li r10,`15+6*$SIZE_T`
  516. li r11,`31+6*$SIZE_T`
  517. mtspr 256,$vrsave
  518. lvx v20,r10,$sp
  519. addi r10,r10,32
  520. lvx v21,r11,$sp
  521. addi r11,r11,32
  522. lvx v22,r10,$sp
  523. addi r10,r10,32
  524. lvx v23,r11,$sp
  525. addi r11,r11,32
  526. lvx v24,r10,$sp
  527. addi r10,r10,32
  528. lvx v25,r11,$sp
  529. addi r11,r11,32
  530. lvx v26,r10,$sp
  531. addi r10,r10,32
  532. lvx v27,r11,$sp
  533. addi r11,r11,32
  534. lvx v28,r10,$sp
  535. addi r10,r10,32
  536. lvx v29,r11,$sp
  537. addi r11,r11,32
  538. lvx v30,r10,$sp
  539. lvx v31,r11,$sp
  540. addi $sp,$sp,$FRAME
  541. blr
  542. .long 0
  543. .byte 0,12,0x04,0,0x80,0,4,0
  544. .long 0
  545. ___
  546. }
  547. $code.=<<___;
  548. .size .gcm_ghash_p8,.-.gcm_ghash_p8
  549. .asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
  550. .align 2
  551. ___
  552. foreach (split("\n",$code)) {
  553. s/\`([^\`]*)\`/eval $1/geo;
  554. if ($flavour =~ /le$/o) { # little-endian
  555. s/le\?//o or
  556. s/be\?/#be#/o;
  557. } else {
  558. s/le\?/#le#/o or
  559. s/be\?//o;
  560. }
  561. print $_,"\n";
  562. }
  563. close STDOUT; # enforce flush