chacha-loongarch64.pl 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438
  1. #! /usr/bin/env perl
  2. # Author: Min Zhou <zhoumin@loongson.cn>
  3. # Copyright 2023-2024 The OpenSSL Project Authors. All Rights Reserved.
  4. #
  5. # Licensed under the Apache License 2.0 (the "License"). You may not use
  6. # this file except in compliance with the License. You can obtain a copy
  7. # in the file LICENSE in the source distribution or at
  8. # https://www.openssl.org/source/license.html
  9. use strict;
  10. my $code;
  11. # Here is the scalar register layout for LoongArch.
  12. my ($zero,$ra,$tp,$sp,$fp)=map("\$r$_",(0..3,22));
  13. my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$r$_",(4..11));
  14. my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$x)=map("\$r$_",(12..21));
  15. my ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8)=map("\$r$_",(23..31));
  16. # The saved floating-point registers in the LP64D ABI. In LoongArch
  17. # with vector extension, the low 64 bits of a vector register alias with
  18. # the corresponding FPR. So we must save and restore the corresponding
  19. # FPR if we'll write into a vector register. The ABI only requires
  20. # saving and restoring the FPR (i.e. 64 bits of the corresponding vector
  21. # register), not the entire vector register.
  22. my ($fs0,$fs1,$fs2,$fs3,$fs4,$fs5,$fs6,$fs7)=map("\$f$_",(24..31));
  23. # Here is the 128-bit vector register layout for LSX extension.
  24. my ($vr0,$vr1,$vr2,$vr3,$vr4,$vr5,$vr6,$vr7,$vr8,$vr9,$vr10,
  25. $vr11,$vr12,$vr13,$vr14,$vr15,$vr16,$vr17,$vr18,$vr19,
  26. $vr20,$vr21,$vr22,$vr23,$vr24,$vr25,$vr26,$vr27,$vr28,
  27. $vr29,$vr30,$vr31)=map("\$vr$_",(0..31));
  28. # Here is the 256-bit vector register layout for LASX extension.
  29. my ($xr0,$xr1,$xr2,$xr3,$xr4,$xr5,$xr6,$xr7,$xr8,$xr9,$xr10,
  30. $xr11,$xr12,$xr13,$xr14,$xr15,$xr16,$xr17,$xr18,$xr19,
  31. $xr20,$xr21,$xr22,$xr23,$xr24,$xr25,$xr26,$xr27,$xr28,
  32. $xr29,$xr30,$xr31)=map("\$xr$_",(0..31));
  33. my $output;
  34. for (@ARGV) { $output=$_ if (/\w[\w\-]*\.\w+$/); }
  35. open STDOUT,">$output";
  36. # Input parameter block
  37. my ($out, $inp, $len, $key, $counter) = ($a0, $a1, $a2, $a3, $a4);
  38. $code .= <<EOF;
  39. #include "loongarch_arch.h"
  40. .text
  41. .extern OPENSSL_loongarch_hwcap_P
  42. .align 6
  43. .Lsigma:
  44. .ascii "expand 32-byte k"
  45. .Linc8x:
  46. .long 0,1,2,3,4,5,6,7
  47. .Linc4x:
  48. .long 0,1,2,3
  49. .globl ChaCha20_ctr32
  50. .type ChaCha20_ctr32 function
  51. .align 6
  52. ChaCha20_ctr32:
  53. # $a0 = arg #1 (out pointer)
  54. # $a1 = arg #2 (inp pointer)
  55. # $a2 = arg #3 (len)
  56. # $a3 = arg #4 (key array)
  57. # $a4 = arg #5 (counter array)
  58. beqz $len,.Lno_data
  59. ori $t3,$zero,64
  60. la.pcrel $t0,OPENSSL_loongarch_hwcap_P
  61. ld.w $t0,$t0,0
  62. bleu $len,$t3,.LChaCha20_1x # goto 1x when len <= 64
  63. andi $t0,$t0,LOONGARCH_HWCAP_LASX | LOONGARCH_HWCAP_LSX
  64. beqz $t0,.LChaCha20_1x
  65. addi.d $sp,$sp,-64
  66. fst.d $fs0,$sp,0
  67. fst.d $fs1,$sp,8
  68. fst.d $fs2,$sp,16
  69. fst.d $fs3,$sp,24
  70. fst.d $fs4,$sp,32
  71. fst.d $fs5,$sp,40
  72. fst.d $fs6,$sp,48
  73. fst.d $fs7,$sp,56
  74. andi $t1,$t0,LOONGARCH_HWCAP_LASX
  75. bnez $t1,.LChaCha20_8x
  76. b .LChaCha20_4x
  77. EOF
  78. ########################################################################
  79. # Scalar code path that handles all lengths.
  80. {
  81. # Load the initial states in array @x[*] and update directly
  82. my @x = ($t0, $t1, $t2, $t3, $t4, $t5, $t6, $t7,
  83. $s0, $s1, $s2, $s3, $s4, $s5, $s6, $s7);
  84. sub ROUND {
  85. my ($a0,$b0,$c0,$d0) = @_;
  86. my ($a1,$b1,$c1,$d1) = map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  87. my ($a2,$b2,$c2,$d2) = map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  88. my ($a3,$b3,$c3,$d3) = map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  89. $code .= <<EOF;
  90. add.w @x[$a0],@x[$a0],@x[$b0]
  91. xor @x[$d0],@x[$d0],@x[$a0]
  92. rotri.w @x[$d0],@x[$d0],16 # rotate left 16 bits
  93. add.w @x[$a1],@x[$a1],@x[$b1]
  94. xor @x[$d1],@x[$d1],@x[$a1]
  95. rotri.w @x[$d1],@x[$d1],16
  96. add.w @x[$c0],@x[$c0],@x[$d0]
  97. xor @x[$b0],@x[$b0],@x[$c0]
  98. rotri.w @x[$b0],@x[$b0],20 # rotate left 12 bits
  99. add.w @x[$c1],@x[$c1],@x[$d1]
  100. xor @x[$b1],@x[$b1],@x[$c1]
  101. rotri.w @x[$b1],@x[$b1],20
  102. add.w @x[$a0],@x[$a0],@x[$b0]
  103. xor @x[$d0],@x[$d0],@x[$a0]
  104. rotri.w @x[$d0],@x[$d0],24 # rotate left 8 bits
  105. add.w @x[$a1],@x[$a1],@x[$b1]
  106. xor @x[$d1],@x[$d1],@x[$a1]
  107. rotri.w @x[$d1],@x[$d1],24
  108. add.w @x[$c0],@x[$c0],@x[$d0]
  109. xor @x[$b0],@x[$b0],@x[$c0]
  110. rotri.w @x[$b0],@x[$b0],25 # rotate left 7 bits
  111. add.w @x[$c1],@x[$c1],@x[$d1]
  112. xor @x[$b1],@x[$b1],@x[$c1]
  113. rotri.w @x[$b1],@x[$b1],25
  114. add.w @x[$a2],@x[$a2],@x[$b2]
  115. xor @x[$d2],@x[$d2],@x[$a2]
  116. rotri.w @x[$d2],@x[$d2],16
  117. add.w @x[$a3],@x[$a3],@x[$b3]
  118. xor @x[$d3],@x[$d3],@x[$a3]
  119. rotri.w @x[$d3],@x[$d3],16
  120. add.w @x[$c2],@x[$c2],@x[$d2]
  121. xor @x[$b2],@x[$b2],@x[$c2]
  122. rotri.w @x[$b2],@x[$b2],20
  123. add.w @x[$c3],@x[$c3],@x[$d3]
  124. xor @x[$b3],@x[$b3],@x[$c3]
  125. rotri.w @x[$b3],@x[$b3],20
  126. add.w @x[$a2],@x[$a2],@x[$b2]
  127. xor @x[$d2],@x[$d2],@x[$a2]
  128. rotri.w @x[$d2],@x[$d2],24
  129. add.w @x[$a3],@x[$a3],@x[$b3]
  130. xor @x[$d3],@x[$d3],@x[$a3]
  131. rotri.w @x[$d3],@x[$d3],24
  132. add.w @x[$c2],@x[$c2],@x[$d2]
  133. xor @x[$b2],@x[$b2],@x[$c2]
  134. rotri.w @x[$b2],@x[$b2],25
  135. add.w @x[$c3],@x[$c3],@x[$d3]
  136. xor @x[$b3],@x[$b3],@x[$c3]
  137. rotri.w @x[$b3],@x[$b3],25
  138. EOF
  139. }
  140. $code .= <<EOF;
  141. .align 6
  142. .LChaCha20_1x:
  143. addi.d $sp,$sp,-256
  144. st.d $s0,$sp,0
  145. st.d $s1,$sp,8
  146. st.d $s2,$sp,16
  147. st.d $s3,$sp,24
  148. st.d $s4,$sp,32
  149. st.d $s5,$sp,40
  150. st.d $s6,$sp,48
  151. st.d $s7,$sp,56
  152. st.d $s8,$sp,64
  153. # Save the initial block counter in $s8
  154. ld.w $s8,$counter,0
  155. b .Loop_outer_1x
  156. .align 5
  157. .Loop_outer_1x:
  158. # Load constants
  159. la.local $t8,.Lsigma
  160. ld.w @x[0],$t8,0 # 'expa'
  161. ld.w @x[1],$t8,4 # 'nd 3'
  162. ld.w @x[2],$t8,8 # '2-by'
  163. ld.w @x[3],$t8,12 # 'te k'
  164. # Load key
  165. ld.w @x[4],$key,4*0
  166. ld.w @x[5],$key,4*1
  167. ld.w @x[6],$key,4*2
  168. ld.w @x[7],$key,4*3
  169. ld.w @x[8],$key,4*4
  170. ld.w @x[9],$key,4*5
  171. ld.w @x[10],$key,4*6
  172. ld.w @x[11],$key,4*7
  173. # Load block counter
  174. move @x[12],$s8
  175. # Load nonce
  176. ld.w @x[13],$counter,4*1
  177. ld.w @x[14],$counter,4*2
  178. ld.w @x[15],$counter,4*3
  179. # Update states in \@x[*] for 20 rounds
  180. ori $t8,$zero,10
  181. b .Loop_1x
  182. .align 5
  183. .Loop_1x:
  184. EOF
  185. &ROUND (0, 4, 8, 12);
  186. &ROUND (0, 5, 10, 15);
  187. $code .= <<EOF;
  188. addi.w $t8,$t8,-1
  189. bnez $t8,.Loop_1x
  190. # Get the final states by adding the initial states
  191. la.local $t8,.Lsigma
  192. ld.w $a7,$t8,4*0
  193. ld.w $a6,$t8,4*1
  194. ld.w $a5,$t8,4*2
  195. add.w @x[0],@x[0],$a7
  196. add.w @x[1],@x[1],$a6
  197. add.w @x[2],@x[2],$a5
  198. ld.w $a7,$t8,4*3
  199. add.w @x[3],@x[3],$a7
  200. ld.w $t8,$key,4*0
  201. ld.w $a7,$key,4*1
  202. ld.w $a6,$key,4*2
  203. ld.w $a5,$key,4*3
  204. add.w @x[4],@x[4],$t8
  205. add.w @x[5],@x[5],$a7
  206. add.w @x[6],@x[6],$a6
  207. add.w @x[7],@x[7],$a5
  208. ld.w $t8,$key,4*4
  209. ld.w $a7,$key,4*5
  210. ld.w $a6,$key,4*6
  211. ld.w $a5,$key,4*7
  212. add.w @x[8],@x[8],$t8
  213. add.w @x[9],@x[9],$a7
  214. add.w @x[10],@x[10],$a6
  215. add.w @x[11],@x[11],$a5
  216. add.w @x[12],@x[12],$s8
  217. ld.w $t8,$counter,4*1
  218. ld.w $a7,$counter,4*2
  219. ld.w $a6,$counter,4*3
  220. add.w @x[13],@x[13],$t8
  221. add.w @x[14],@x[14],$a7
  222. add.w @x[15],@x[15],$a6
  223. ori $t8,$zero,64
  224. bltu $len,$t8,.Ltail_1x
  225. # Get the encrypted message by xor states with plaintext
  226. ld.w $t8,$inp,4*0
  227. ld.w $a7,$inp,4*1
  228. ld.w $a6,$inp,4*2
  229. ld.w $a5,$inp,4*3
  230. xor $t8,$t8,@x[0]
  231. xor $a7,$a7,@x[1]
  232. xor $a6,$a6,@x[2]
  233. xor $a5,$a5,@x[3]
  234. st.w $t8,$out,4*0
  235. st.w $a7,$out,4*1
  236. st.w $a6,$out,4*2
  237. st.w $a5,$out,4*3
  238. ld.w $t8,$inp,4*4
  239. ld.w $a7,$inp,4*5
  240. ld.w $a6,$inp,4*6
  241. ld.w $a5,$inp,4*7
  242. xor $t8,$t8,@x[4]
  243. xor $a7,$a7,@x[5]
  244. xor $a6,$a6,@x[6]
  245. xor $a5,$a5,@x[7]
  246. st.w $t8,$out,4*4
  247. st.w $a7,$out,4*5
  248. st.w $a6,$out,4*6
  249. st.w $a5,$out,4*7
  250. ld.w $t8,$inp,4*8
  251. ld.w $a7,$inp,4*9
  252. ld.w $a6,$inp,4*10
  253. ld.w $a5,$inp,4*11
  254. xor $t8,$t8,@x[8]
  255. xor $a7,$a7,@x[9]
  256. xor $a6,$a6,@x[10]
  257. xor $a5,$a5,@x[11]
  258. st.w $t8,$out,4*8
  259. st.w $a7,$out,4*9
  260. st.w $a6,$out,4*10
  261. st.w $a5,$out,4*11
  262. ld.w $t8,$inp,4*12
  263. ld.w $a7,$inp,4*13
  264. ld.w $a6,$inp,4*14
  265. ld.w $a5,$inp,4*15
  266. xor $t8,$t8,@x[12]
  267. xor $a7,$a7,@x[13]
  268. xor $a6,$a6,@x[14]
  269. xor $a5,$a5,@x[15]
  270. st.w $t8,$out,4*12
  271. st.w $a7,$out,4*13
  272. st.w $a6,$out,4*14
  273. st.w $a5,$out,4*15
  274. addi.d $len,$len,-64
  275. beqz $len,.Ldone_1x
  276. addi.d $inp,$inp,64
  277. addi.d $out,$out,64
  278. addi.w $s8,$s8,1
  279. b .Loop_outer_1x
  280. .align 4
  281. .Ltail_1x:
  282. # Handle the tail for 1x (1 <= tail_len <= 63)
  283. addi.d $a7,$sp,72
  284. st.w @x[0],$a7,4*0
  285. st.w @x[1],$a7,4*1
  286. st.w @x[2],$a7,4*2
  287. st.w @x[3],$a7,4*3
  288. st.w @x[4],$a7,4*4
  289. st.w @x[5],$a7,4*5
  290. st.w @x[6],$a7,4*6
  291. st.w @x[7],$a7,4*7
  292. st.w @x[8],$a7,4*8
  293. st.w @x[9],$a7,4*9
  294. st.w @x[10],$a7,4*10
  295. st.w @x[11],$a7,4*11
  296. st.w @x[12],$a7,4*12
  297. st.w @x[13],$a7,4*13
  298. st.w @x[14],$a7,4*14
  299. st.w @x[15],$a7,4*15
  300. move $t8,$zero
  301. .Loop_tail_1x:
  302. # Xor input with states byte by byte
  303. ldx.bu $a6,$inp,$t8
  304. ldx.bu $a5,$a7,$t8
  305. xor $a6,$a6,$a5
  306. stx.b $a6,$out,$t8
  307. addi.w $t8,$t8,1
  308. addi.d $len,$len,-1
  309. bnez $len,.Loop_tail_1x
  310. b .Ldone_1x
  311. .Ldone_1x:
  312. ld.d $s0,$sp,0
  313. ld.d $s1,$sp,8
  314. ld.d $s2,$sp,16
  315. ld.d $s3,$sp,24
  316. ld.d $s4,$sp,32
  317. ld.d $s5,$sp,40
  318. ld.d $s6,$sp,48
  319. ld.d $s7,$sp,56
  320. ld.d $s8,$sp,64
  321. addi.d $sp,$sp,256
  322. b .Lend
  323. EOF
  324. }
  325. ########################################################################
  326. # 128-bit LSX code path that handles all lengths.
  327. {
  328. # Load the initial states in array @x[*] and update directly.
  329. my @x = ($vr0, $vr1, $vr2, $vr3, $vr4, $vr5, $vr6, $vr7,
  330. $vr8, $vr9, $vr10, $vr11, $vr12, $vr13, $vr14, $vr15);
  331. # Save the initial states in array @y[*]
  332. my @y = ($vr16, $vr17, $vr18, $vr19, $vr20, $vr21, $vr22, $vr23,
  333. $vr24, $vr25, $vr26, $vr27, $vr28, $vr29, $vr30, $vr31);
  334. sub ROUND_4x {
  335. my ($a0,$b0,$c0,$d0) = @_;
  336. my ($a1,$b1,$c1,$d1) = map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  337. my ($a2,$b2,$c2,$d2) = map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  338. my ($a3,$b3,$c3,$d3) = map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  339. $code .= <<EOF;
  340. vadd.w @x[$a0],@x[$a0],@x[$b0]
  341. vxor.v @x[$d0],@x[$d0],@x[$a0]
  342. vrotri.w @x[$d0],@x[$d0],16 # rotate left 16 bits
  343. vadd.w @x[$a1],@x[$a1],@x[$b1]
  344. vxor.v @x[$d1],@x[$d1],@x[$a1]
  345. vrotri.w @x[$d1],@x[$d1],16
  346. vadd.w @x[$c0],@x[$c0],@x[$d0]
  347. vxor.v @x[$b0],@x[$b0],@x[$c0]
  348. vrotri.w @x[$b0],@x[$b0],20 # rotate left 12 bits
  349. vadd.w @x[$c1],@x[$c1],@x[$d1]
  350. vxor.v @x[$b1],@x[$b1],@x[$c1]
  351. vrotri.w @x[$b1],@x[$b1],20
  352. vadd.w @x[$a0],@x[$a0],@x[$b0]
  353. vxor.v @x[$d0],@x[$d0],@x[$a0]
  354. vrotri.w @x[$d0],@x[$d0],24 # rotate left 8 bits
  355. vadd.w @x[$a1],@x[$a1],@x[$b1]
  356. vxor.v @x[$d1],@x[$d1],@x[$a1]
  357. vrotri.w @x[$d1],@x[$d1],24
  358. vadd.w @x[$c0],@x[$c0],@x[$d0]
  359. vxor.v @x[$b0],@x[$b0],@x[$c0]
  360. vrotri.w @x[$b0],@x[$b0],25 # rotate left 7 bits
  361. vadd.w @x[$c1],@x[$c1],@x[$d1]
  362. vxor.v @x[$b1],@x[$b1],@x[$c1]
  363. vrotri.w @x[$b1],@x[$b1],25
  364. vadd.w @x[$a2],@x[$a2],@x[$b2]
  365. vxor.v @x[$d2],@x[$d2],@x[$a2]
  366. vrotri.w @x[$d2],@x[$d2],16
  367. vadd.w @x[$a3],@x[$a3],@x[$b3]
  368. vxor.v @x[$d3],@x[$d3],@x[$a3]
  369. vrotri.w @x[$d3],@x[$d3],16
  370. vadd.w @x[$c2],@x[$c2],@x[$d2]
  371. vxor.v @x[$b2],@x[$b2],@x[$c2]
  372. vrotri.w @x[$b2],@x[$b2],20
  373. vadd.w @x[$c3],@x[$c3],@x[$d3]
  374. vxor.v @x[$b3],@x[$b3],@x[$c3]
  375. vrotri.w @x[$b3],@x[$b3],20
  376. vadd.w @x[$a2],@x[$a2],@x[$b2]
  377. vxor.v @x[$d2],@x[$d2],@x[$a2]
  378. vrotri.w @x[$d2],@x[$d2],24
  379. vadd.w @x[$a3],@x[$a3],@x[$b3]
  380. vxor.v @x[$d3],@x[$d3],@x[$a3]
  381. vrotri.w @x[$d3],@x[$d3],24
  382. vadd.w @x[$c2],@x[$c2],@x[$d2]
  383. vxor.v @x[$b2],@x[$b2],@x[$c2]
  384. vrotri.w @x[$b2],@x[$b2],25
  385. vadd.w @x[$c3],@x[$c3],@x[$d3]
  386. vxor.v @x[$b3],@x[$b3],@x[$c3]
  387. vrotri.w @x[$b3],@x[$b3],25
  388. EOF
  389. }
  390. $code .= <<EOF;
  391. .align 6
  392. .LChaCha20_4x:
  393. addi.d $sp,$sp,-128
  394. # Save the initial block counter in $t4
  395. ld.w $t4,$counter,0
  396. b .Loop_outer_4x
  397. .align 5
  398. .Loop_outer_4x:
  399. # Load constant
  400. la.local $t8,.Lsigma
  401. vldrepl.w @x[0],$t8,4*0 # 'expa'
  402. vldrepl.w @x[1],$t8,4*1 # 'nd 3'
  403. vldrepl.w @x[2],$t8,4*2 # '2-by'
  404. vldrepl.w @x[3],$t8,4*3 # 'te k'
  405. # Load key
  406. vldrepl.w @x[4],$key,4*0
  407. vldrepl.w @x[5],$key,4*1
  408. vldrepl.w @x[6],$key,4*2
  409. vldrepl.w @x[7],$key,4*3
  410. vldrepl.w @x[8],$key,4*4
  411. vldrepl.w @x[9],$key,4*5
  412. vldrepl.w @x[10],$key,4*6
  413. vldrepl.w @x[11],$key,4*7
  414. # Load block counter
  415. vreplgr2vr.w @x[12],$t4
  416. # Load nonce
  417. vldrepl.w @x[13],$counter,4*1
  418. vldrepl.w @x[14],$counter,4*2
  419. vldrepl.w @x[15],$counter,4*3
  420. # Get the correct block counter for each block
  421. la.local $t8,.Linc4x
  422. vld @y[0],$t8,0
  423. vadd.w @x[12],@x[12],@y[0]
  424. # Copy the initial states from \@x[*] to \@y[*]
  425. vori.b @y[0],@x[0],0
  426. vori.b @y[1],@x[1],0
  427. vori.b @y[2],@x[2],0
  428. vori.b @y[3],@x[3],0
  429. vori.b @y[4],@x[4],0
  430. vori.b @y[5],@x[5],0
  431. vori.b @y[6],@x[6],0
  432. vori.b @y[7],@x[7],0
  433. vori.b @y[8],@x[8],0
  434. vori.b @y[9],@x[9],0
  435. vori.b @y[10],@x[10],0
  436. vori.b @y[11],@x[11],0
  437. vori.b @y[12],@x[12],0
  438. vori.b @y[13],@x[13],0
  439. vori.b @y[14],@x[14],0
  440. vori.b @y[15],@x[15],0
  441. # Update states in \@x[*] for 20 rounds
  442. ori $t8,$zero,10
  443. b .Loop_4x
  444. .align 5
  445. .Loop_4x:
  446. EOF
  447. &ROUND_4x (0, 4, 8, 12);
  448. &ROUND_4x (0, 5, 10, 15);
  449. $code .= <<EOF;
  450. addi.w $t8,$t8,-1
  451. bnez $t8,.Loop_4x
  452. # Get the final states by adding the initial states
  453. vadd.w @x[0],@x[0],@y[0]
  454. vadd.w @x[1],@x[1],@y[1]
  455. vadd.w @x[2],@x[2],@y[2]
  456. vadd.w @x[3],@x[3],@y[3]
  457. vadd.w @x[4],@x[4],@y[4]
  458. vadd.w @x[5],@x[5],@y[5]
  459. vadd.w @x[6],@x[6],@y[6]
  460. vadd.w @x[7],@x[7],@y[7]
  461. vadd.w @x[8],@x[8],@y[8]
  462. vadd.w @x[9],@x[9],@y[9]
  463. vadd.w @x[10],@x[10],@y[10]
  464. vadd.w @x[11],@x[11],@y[11]
  465. vadd.w @x[12],@x[12],@y[12]
  466. vadd.w @x[13],@x[13],@y[13]
  467. vadd.w @x[14],@x[14],@y[14]
  468. vadd.w @x[15],@x[15],@y[15]
  469. # Get the transpose of \@x[*] and save them in \@x[*]
  470. vilvl.w @y[0],@x[1],@x[0]
  471. vilvh.w @y[1],@x[1],@x[0]
  472. vilvl.w @y[2],@x[3],@x[2]
  473. vilvh.w @y[3],@x[3],@x[2]
  474. vilvl.w @y[4],@x[5],@x[4]
  475. vilvh.w @y[5],@x[5],@x[4]
  476. vilvl.w @y[6],@x[7],@x[6]
  477. vilvh.w @y[7],@x[7],@x[6]
  478. vilvl.w @y[8],@x[9],@x[8]
  479. vilvh.w @y[9],@x[9],@x[8]
  480. vilvl.w @y[10],@x[11],@x[10]
  481. vilvh.w @y[11],@x[11],@x[10]
  482. vilvl.w @y[12],@x[13],@x[12]
  483. vilvh.w @y[13],@x[13],@x[12]
  484. vilvl.w @y[14],@x[15],@x[14]
  485. vilvh.w @y[15],@x[15],@x[14]
  486. vilvl.d @x[0],@y[2],@y[0]
  487. vilvh.d @x[1],@y[2],@y[0]
  488. vilvl.d @x[2],@y[3],@y[1]
  489. vilvh.d @x[3],@y[3],@y[1]
  490. vilvl.d @x[4],@y[6],@y[4]
  491. vilvh.d @x[5],@y[6],@y[4]
  492. vilvl.d @x[6],@y[7],@y[5]
  493. vilvh.d @x[7],@y[7],@y[5]
  494. vilvl.d @x[8],@y[10],@y[8]
  495. vilvh.d @x[9],@y[10],@y[8]
  496. vilvl.d @x[10],@y[11],@y[9]
  497. vilvh.d @x[11],@y[11],@y[9]
  498. vilvl.d @x[12],@y[14],@y[12]
  499. vilvh.d @x[13],@y[14],@y[12]
  500. vilvl.d @x[14],@y[15],@y[13]
  501. vilvh.d @x[15],@y[15],@y[13]
  502. EOF
  503. # Adjust the order of elements in @x[*] for ease of use.
  504. @x = (@x[0],@x[4],@x[8],@x[12],@x[1],@x[5],@x[9],@x[13],
  505. @x[2],@x[6],@x[10],@x[14],@x[3],@x[7],@x[11],@x[15]);
  506. $code .= <<EOF;
  507. ori $t8,$zero,64*4
  508. bltu $len,$t8,.Ltail_4x
  509. # Get the encrypted message by xor states with plaintext
  510. vld @y[0],$inp,16*0
  511. vld @y[1],$inp,16*1
  512. vld @y[2],$inp,16*2
  513. vld @y[3],$inp,16*3
  514. vxor.v @y[0],@y[0],@x[0]
  515. vxor.v @y[1],@y[1],@x[1]
  516. vxor.v @y[2],@y[2],@x[2]
  517. vxor.v @y[3],@y[3],@x[3]
  518. vst @y[0],$out,16*0
  519. vst @y[1],$out,16*1
  520. vst @y[2],$out,16*2
  521. vst @y[3],$out,16*3
  522. vld @y[0],$inp,16*4
  523. vld @y[1],$inp,16*5
  524. vld @y[2],$inp,16*6
  525. vld @y[3],$inp,16*7
  526. vxor.v @y[0],@y[0],@x[4]
  527. vxor.v @y[1],@y[1],@x[5]
  528. vxor.v @y[2],@y[2],@x[6]
  529. vxor.v @y[3],@y[3],@x[7]
  530. vst @y[0],$out,16*4
  531. vst @y[1],$out,16*5
  532. vst @y[2],$out,16*6
  533. vst @y[3],$out,16*7
  534. vld @y[0],$inp,16*8
  535. vld @y[1],$inp,16*9
  536. vld @y[2],$inp,16*10
  537. vld @y[3],$inp,16*11
  538. vxor.v @y[0],@y[0],@x[8]
  539. vxor.v @y[1],@y[1],@x[9]
  540. vxor.v @y[2],@y[2],@x[10]
  541. vxor.v @y[3],@y[3],@x[11]
  542. vst @y[0],$out,16*8
  543. vst @y[1],$out,16*9
  544. vst @y[2],$out,16*10
  545. vst @y[3],$out,16*11
  546. vld @y[0],$inp,16*12
  547. vld @y[1],$inp,16*13
  548. vld @y[2],$inp,16*14
  549. vld @y[3],$inp,16*15
  550. vxor.v @y[0],@y[0],@x[12]
  551. vxor.v @y[1],@y[1],@x[13]
  552. vxor.v @y[2],@y[2],@x[14]
  553. vxor.v @y[3],@y[3],@x[15]
  554. vst @y[0],$out,16*12
  555. vst @y[1],$out,16*13
  556. vst @y[2],$out,16*14
  557. vst @y[3],$out,16*15
  558. addi.d $len,$len,-64*4
  559. beqz $len,.Ldone_4x
  560. addi.d $inp,$inp,64*4
  561. addi.d $out,$out,64*4
  562. addi.w $t4,$t4,4
  563. b .Loop_outer_4x
  564. .Ltail_4x:
  565. # Handle the tail for 4x (1 <= tail_len <= 255)
  566. ori $t8,$zero,192
  567. bgeu $len,$t8,.L192_or_more4x
  568. ori $t8,$zero,128
  569. bgeu $len,$t8,.L128_or_more4x
  570. ori $t8,$zero,64
  571. bgeu $len,$t8,.L64_or_more4x
  572. vst @x[0],$sp,16*0
  573. vst @x[1],$sp,16*1
  574. vst @x[2],$sp,16*2
  575. vst @x[3],$sp,16*3
  576. move $t8,$zero
  577. b .Loop_tail_4x
  578. .align 5
  579. .L64_or_more4x:
  580. vld @y[0],$inp,16*0
  581. vld @y[1],$inp,16*1
  582. vld @y[2],$inp,16*2
  583. vld @y[3],$inp,16*3
  584. vxor.v @y[0],@y[0],@x[0]
  585. vxor.v @y[1],@y[1],@x[1]
  586. vxor.v @y[2],@y[2],@x[2]
  587. vxor.v @y[3],@y[3],@x[3]
  588. vst @y[0],$out,16*0
  589. vst @y[1],$out,16*1
  590. vst @y[2],$out,16*2
  591. vst @y[3],$out,16*3
  592. addi.d $len,$len,-64
  593. beqz $len,.Ldone_4x
  594. addi.d $inp,$inp,64
  595. addi.d $out,$out,64
  596. vst @x[4],$sp,16*0
  597. vst @x[5],$sp,16*1
  598. vst @x[6],$sp,16*2
  599. vst @x[7],$sp,16*3
  600. move $t8,$zero
  601. b .Loop_tail_4x
  602. .align 5
  603. .L128_or_more4x:
  604. vld @y[0],$inp,16*0
  605. vld @y[1],$inp,16*1
  606. vld @y[2],$inp,16*2
  607. vld @y[3],$inp,16*3
  608. vxor.v @y[0],@y[0],@x[0]
  609. vxor.v @y[1],@y[1],@x[1]
  610. vxor.v @y[2],@y[2],@x[2]
  611. vxor.v @y[3],@y[3],@x[3]
  612. vst @y[0],$out,16*0
  613. vst @y[1],$out,16*1
  614. vst @y[2],$out,16*2
  615. vst @y[3],$out,16*3
  616. vld @y[0],$inp,16*4
  617. vld @y[1],$inp,16*5
  618. vld @y[2],$inp,16*6
  619. vld @y[3],$inp,16*7
  620. vxor.v @y[0],@y[0],@x[4]
  621. vxor.v @y[1],@y[1],@x[5]
  622. vxor.v @y[2],@y[2],@x[6]
  623. vxor.v @y[3],@y[3],@x[7]
  624. vst @y[0],$out,16*4
  625. vst @y[1],$out,16*5
  626. vst @y[2],$out,16*6
  627. vst @y[3],$out,16*7
  628. addi.d $len,$len,-128
  629. beqz $len,.Ldone_4x
  630. addi.d $inp,$inp,128
  631. addi.d $out,$out,128
  632. vst @x[8],$sp,16*0
  633. vst @x[9],$sp,16*1
  634. vst @x[10],$sp,16*2
  635. vst @x[11],$sp,16*3
  636. move $t8,$zero
  637. b .Loop_tail_4x
  638. .align 5
  639. .L192_or_more4x:
  640. vld @y[0],$inp,16*0
  641. vld @y[1],$inp,16*1
  642. vld @y[2],$inp,16*2
  643. vld @y[3],$inp,16*3
  644. vxor.v @y[0],@y[0],@x[0]
  645. vxor.v @y[1],@y[1],@x[1]
  646. vxor.v @y[2],@y[2],@x[2]
  647. vxor.v @y[3],@y[3],@x[3]
  648. vst @y[0],$out,16*0
  649. vst @y[1],$out,16*1
  650. vst @y[2],$out,16*2
  651. vst @y[3],$out,16*3
  652. vld @y[0],$inp,16*4
  653. vld @y[1],$inp,16*5
  654. vld @y[2],$inp,16*6
  655. vld @y[3],$inp,16*7
  656. vxor.v @y[0],@y[0],@x[4]
  657. vxor.v @y[1],@y[1],@x[5]
  658. vxor.v @y[2],@y[2],@x[6]
  659. vxor.v @y[3],@y[3],@x[7]
  660. vst @y[0],$out,16*4
  661. vst @y[1],$out,16*5
  662. vst @y[2],$out,16*6
  663. vst @y[3],$out,16*7
  664. vld @y[0],$inp,16*8
  665. vld @y[1],$inp,16*9
  666. vld @y[2],$inp,16*10
  667. vld @y[3],$inp,16*11
  668. vxor.v @y[0],@y[0],@x[8]
  669. vxor.v @y[1],@y[1],@x[9]
  670. vxor.v @y[2],@y[2],@x[10]
  671. vxor.v @y[3],@y[3],@x[11]
  672. vst @y[0],$out,16*8
  673. vst @y[1],$out,16*9
  674. vst @y[2],$out,16*10
  675. vst @y[3],$out,16*11
  676. addi.d $len,$len,-192
  677. beqz $len,.Ldone_4x
  678. addi.d $inp,$inp,192
  679. addi.d $out,$out,192
  680. vst @x[12],$sp,16*0
  681. vst @x[13],$sp,16*1
  682. vst @x[14],$sp,16*2
  683. vst @x[15],$sp,16*3
  684. move $t8,$zero
  685. b .Loop_tail_4x
  686. .Loop_tail_4x:
  687. # Xor input with states byte by byte
  688. ldx.bu $t5,$inp,$t8
  689. ldx.bu $t6,$sp,$t8
  690. xor $t5,$t5,$t6
  691. stx.b $t5,$out,$t8
  692. addi.w $t8,$t8,1
  693. addi.d $len,$len,-1
  694. bnez $len,.Loop_tail_4x
  695. b .Ldone_4x
  696. .Ldone_4x:
  697. addi.d $sp,$sp,128
  698. b .Lrestore_saved_fpr
  699. EOF
  700. }
  701. ########################################################################
  702. # 256-bit LASX code path that handles all lengths.
  703. {
  704. # Load the initial states in array @x[*] and update directly.
  705. my @x = ($xr0, $xr1, $xr2, $xr3, $xr4, $xr5, $xr6, $xr7,
  706. $xr8, $xr9, $xr10, $xr11, $xr12, $xr13, $xr14, $xr15);
  707. # Save the initial states in array @y[*]
  708. my @y = ($xr16, $xr17, $xr18, $xr19, $xr20, $xr21, $xr22, $xr23,
  709. $xr24, $xr25, $xr26, $xr27, $xr28, $xr29, $xr30, $xr31);
  710. sub ROUND_8x {
  711. my ($a0,$b0,$c0,$d0) = @_;
  712. my ($a1,$b1,$c1,$d1) = map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  713. my ($a2,$b2,$c2,$d2) = map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  714. my ($a3,$b3,$c3,$d3) = map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  715. $code .= <<EOF;
  716. xvadd.w @x[$a0],@x[$a0],@x[$b0]
  717. xvxor.v @x[$d0],@x[$d0],@x[$a0]
  718. xvrotri.w @x[$d0],@x[$d0],16 # rotate left 16 bits
  719. xvadd.w @x[$a1],@x[$a1],@x[$b1]
  720. xvxor.v @x[$d1],@x[$d1],@x[$a1]
  721. xvrotri.w @x[$d1],@x[$d1],16
  722. xvadd.w @x[$c0],@x[$c0],@x[$d0]
  723. xvxor.v @x[$b0],@x[$b0],@x[$c0]
  724. xvrotri.w @x[$b0],@x[$b0],20 # rotate left 12 bits
  725. xvadd.w @x[$c1],@x[$c1],@x[$d1]
  726. xvxor.v @x[$b1],@x[$b1],@x[$c1]
  727. xvrotri.w @x[$b1],@x[$b1],20
  728. xvadd.w @x[$a0],@x[$a0],@x[$b0]
  729. xvxor.v @x[$d0],@x[$d0],@x[$a0]
  730. xvrotri.w @x[$d0],@x[$d0],24 # rotate left 8 bits
  731. xvadd.w @x[$a1],@x[$a1],@x[$b1]
  732. xvxor.v @x[$d1],@x[$d1],@x[$a1]
  733. xvrotri.w @x[$d1],@x[$d1],24
  734. xvadd.w @x[$c0],@x[$c0],@x[$d0]
  735. xvxor.v @x[$b0],@x[$b0],@x[$c0]
  736. xvrotri.w @x[$b0],@x[$b0],25 # rotate left 7 bits
  737. xvadd.w @x[$c1],@x[$c1],@x[$d1]
  738. xvxor.v @x[$b1],@x[$b1],@x[$c1]
  739. xvrotri.w @x[$b1],@x[$b1],25
  740. xvadd.w @x[$a2],@x[$a2],@x[$b2]
  741. xvxor.v @x[$d2],@x[$d2],@x[$a2]
  742. xvrotri.w @x[$d2],@x[$d2],16
  743. xvadd.w @x[$a3],@x[$a3],@x[$b3]
  744. xvxor.v @x[$d3],@x[$d3],@x[$a3]
  745. xvrotri.w @x[$d3],@x[$d3],16
  746. xvadd.w @x[$c2],@x[$c2],@x[$d2]
  747. xvxor.v @x[$b2],@x[$b2],@x[$c2]
  748. xvrotri.w @x[$b2],@x[$b2],20
  749. xvadd.w @x[$c3],@x[$c3],@x[$d3]
  750. xvxor.v @x[$b3],@x[$b3],@x[$c3]
  751. xvrotri.w @x[$b3],@x[$b3],20
  752. xvadd.w @x[$a2],@x[$a2],@x[$b2]
  753. xvxor.v @x[$d2],@x[$d2],@x[$a2]
  754. xvrotri.w @x[$d2],@x[$d2],24
  755. xvadd.w @x[$a3],@x[$a3],@x[$b3]
  756. xvxor.v @x[$d3],@x[$d3],@x[$a3]
  757. xvrotri.w @x[$d3],@x[$d3],24
  758. xvadd.w @x[$c2],@x[$c2],@x[$d2]
  759. xvxor.v @x[$b2],@x[$b2],@x[$c2]
  760. xvrotri.w @x[$b2],@x[$b2],25
  761. xvadd.w @x[$c3],@x[$c3],@x[$d3]
  762. xvxor.v @x[$b3],@x[$b3],@x[$c3]
  763. xvrotri.w @x[$b3],@x[$b3],25
  764. EOF
  765. }
  766. $code .= <<EOF;
  767. .align 6
  768. .LChaCha20_8x:
  769. addi.d $sp,$sp,-128
  770. # Save the initial block counter in $t4
  771. ld.w $t4,$counter,0
  772. b .Loop_outer_8x
  773. .align 5
  774. .Loop_outer_8x:
  775. # Load constant
  776. la.local $t8,.Lsigma
  777. xvldrepl.w @x[0],$t8,4*0 # 'expa'
  778. xvldrepl.w @x[1],$t8,4*1 # 'nd 3'
  779. xvldrepl.w @x[2],$t8,4*2 # '2-by'
  780. xvldrepl.w @x[3],$t8,4*3 # 'te k'
  781. # Load key
  782. xvldrepl.w @x[4],$key,4*0
  783. xvldrepl.w @x[5],$key,4*1
  784. xvldrepl.w @x[6],$key,4*2
  785. xvldrepl.w @x[7],$key,4*3
  786. xvldrepl.w @x[8],$key,4*4
  787. xvldrepl.w @x[9],$key,4*5
  788. xvldrepl.w @x[10],$key,4*6
  789. xvldrepl.w @x[11],$key,4*7
  790. # Load block counter
  791. xvreplgr2vr.w @x[12],$t4
  792. # Load nonce
  793. xvldrepl.w @x[13],$counter,4*1
  794. xvldrepl.w @x[14],$counter,4*2
  795. xvldrepl.w @x[15],$counter,4*3
  796. # Get the correct block counter for each block
  797. la.local $t8,.Linc8x
  798. xvld @y[0],$t8,0
  799. xvadd.w @x[12],@x[12],@y[0]
  800. # Copy the initial states from \@x[*] to \@y[*]
  801. xvori.b @y[0],@x[0],0
  802. xvori.b @y[1],@x[1],0
  803. xvori.b @y[2],@x[2],0
  804. xvori.b @y[3],@x[3],0
  805. xvori.b @y[4],@x[4],0
  806. xvori.b @y[5],@x[5],0
  807. xvori.b @y[6],@x[6],0
  808. xvori.b @y[7],@x[7],0
  809. xvori.b @y[8],@x[8],0
  810. xvori.b @y[9],@x[9],0
  811. xvori.b @y[10],@x[10],0
  812. xvori.b @y[11],@x[11],0
  813. xvori.b @y[12],@x[12],0
  814. xvori.b @y[13],@x[13],0
  815. xvori.b @y[14],@x[14],0
  816. xvori.b @y[15],@x[15],0
  817. # Update states in \@x[*] for 20 rounds
  818. ori $t8,$zero,10
  819. b .Loop_8x
  820. .align 5
  821. .Loop_8x:
  822. EOF
  823. &ROUND_8x (0, 4, 8, 12);
  824. &ROUND_8x (0, 5, 10, 15);
  825. $code .= <<EOF;
  826. addi.w $t8,$t8,-1
  827. bnez $t8,.Loop_8x
  828. # Get the final states by adding the initial states
  829. xvadd.w @x[0],@x[0],@y[0]
  830. xvadd.w @x[1],@x[1],@y[1]
  831. xvadd.w @x[2],@x[2],@y[2]
  832. xvadd.w @x[3],@x[3],@y[3]
  833. xvadd.w @x[4],@x[4],@y[4]
  834. xvadd.w @x[5],@x[5],@y[5]
  835. xvadd.w @x[6],@x[6],@y[6]
  836. xvadd.w @x[7],@x[7],@y[7]
  837. xvadd.w @x[8],@x[8],@y[8]
  838. xvadd.w @x[9],@x[9],@y[9]
  839. xvadd.w @x[10],@x[10],@y[10]
  840. xvadd.w @x[11],@x[11],@y[11]
  841. xvadd.w @x[12],@x[12],@y[12]
  842. xvadd.w @x[13],@x[13],@y[13]
  843. xvadd.w @x[14],@x[14],@y[14]
  844. xvadd.w @x[15],@x[15],@y[15]
  845. # Get the transpose of \@x[*] and save them in \@y[*]
  846. xvilvl.w @y[0],@x[1],@x[0]
  847. xvilvh.w @y[1],@x[1],@x[0]
  848. xvilvl.w @y[2],@x[3],@x[2]
  849. xvilvh.w @y[3],@x[3],@x[2]
  850. xvilvl.w @y[4],@x[5],@x[4]
  851. xvilvh.w @y[5],@x[5],@x[4]
  852. xvilvl.w @y[6],@x[7],@x[6]
  853. xvilvh.w @y[7],@x[7],@x[6]
  854. xvilvl.w @y[8],@x[9],@x[8]
  855. xvilvh.w @y[9],@x[9],@x[8]
  856. xvilvl.w @y[10],@x[11],@x[10]
  857. xvilvh.w @y[11],@x[11],@x[10]
  858. xvilvl.w @y[12],@x[13],@x[12]
  859. xvilvh.w @y[13],@x[13],@x[12]
  860. xvilvl.w @y[14],@x[15],@x[14]
  861. xvilvh.w @y[15],@x[15],@x[14]
  862. xvilvl.d @x[0],@y[2],@y[0]
  863. xvilvh.d @x[1],@y[2],@y[0]
  864. xvilvl.d @x[2],@y[3],@y[1]
  865. xvilvh.d @x[3],@y[3],@y[1]
  866. xvilvl.d @x[4],@y[6],@y[4]
  867. xvilvh.d @x[5],@y[6],@y[4]
  868. xvilvl.d @x[6],@y[7],@y[5]
  869. xvilvh.d @x[7],@y[7],@y[5]
  870. xvilvl.d @x[8],@y[10],@y[8]
  871. xvilvh.d @x[9],@y[10],@y[8]
  872. xvilvl.d @x[10],@y[11],@y[9]
  873. xvilvh.d @x[11],@y[11],@y[9]
  874. xvilvl.d @x[12],@y[14],@y[12]
  875. xvilvh.d @x[13],@y[14],@y[12]
  876. xvilvl.d @x[14],@y[15],@y[13]
  877. xvilvh.d @x[15],@y[15],@y[13]
  878. xvori.b @y[0],@x[4],0
  879. xvpermi.q @y[0],@x[0],0x20
  880. xvori.b @y[1],@x[5],0
  881. xvpermi.q @y[1],@x[1],0x20
  882. xvori.b @y[2],@x[6],0
  883. xvpermi.q @y[2],@x[2],0x20
  884. xvori.b @y[3],@x[7],0
  885. xvpermi.q @y[3],@x[3],0x20
  886. xvori.b @y[4],@x[4],0
  887. xvpermi.q @y[4],@x[0],0x31
  888. xvori.b @y[5],@x[5],0
  889. xvpermi.q @y[5],@x[1],0x31
  890. xvori.b @y[6],@x[6],0
  891. xvpermi.q @y[6],@x[2],0x31
  892. xvori.b @y[7],@x[7],0
  893. xvpermi.q @y[7],@x[3],0x31
  894. xvori.b @y[8],@x[12],0
  895. xvpermi.q @y[8],@x[8],0x20
  896. xvori.b @y[9],@x[13],0
  897. xvpermi.q @y[9],@x[9],0x20
  898. xvori.b @y[10],@x[14],0
  899. xvpermi.q @y[10],@x[10],0x20
  900. xvori.b @y[11],@x[15],0
  901. xvpermi.q @y[11],@x[11],0x20
  902. xvori.b @y[12],@x[12],0
  903. xvpermi.q @y[12],@x[8],0x31
  904. xvori.b @y[13],@x[13],0
  905. xvpermi.q @y[13],@x[9],0x31
  906. xvori.b @y[14],@x[14],0
  907. xvpermi.q @y[14],@x[10],0x31
  908. xvori.b @y[15],@x[15],0
  909. xvpermi.q @y[15],@x[11],0x31
  910. EOF
  911. # Adjust the order of elements in @y[*] for ease of use.
  912. @y = (@y[0],@y[8],@y[1],@y[9],@y[2],@y[10],@y[3],@y[11],
  913. @y[4],@y[12],@y[5],@y[13],@y[6],@y[14],@y[7],@y[15]);
  914. $code .= <<EOF;
  915. ori $t8,$zero,64*8
  916. bltu $len,$t8,.Ltail_8x
  917. # Get the encrypted message by xor states with plaintext
  918. xvld @x[0],$inp,32*0
  919. xvld @x[1],$inp,32*1
  920. xvld @x[2],$inp,32*2
  921. xvld @x[3],$inp,32*3
  922. xvxor.v @x[0],@x[0],@y[0]
  923. xvxor.v @x[1],@x[1],@y[1]
  924. xvxor.v @x[2],@x[2],@y[2]
  925. xvxor.v @x[3],@x[3],@y[3]
  926. xvst @x[0],$out,32*0
  927. xvst @x[1],$out,32*1
  928. xvst @x[2],$out,32*2
  929. xvst @x[3],$out,32*3
  930. xvld @x[0],$inp,32*4
  931. xvld @x[1],$inp,32*5
  932. xvld @x[2],$inp,32*6
  933. xvld @x[3],$inp,32*7
  934. xvxor.v @x[0],@x[0],@y[4]
  935. xvxor.v @x[1],@x[1],@y[5]
  936. xvxor.v @x[2],@x[2],@y[6]
  937. xvxor.v @x[3],@x[3],@y[7]
  938. xvst @x[0],$out,32*4
  939. xvst @x[1],$out,32*5
  940. xvst @x[2],$out,32*6
  941. xvst @x[3],$out,32*7
  942. xvld @x[0],$inp,32*8
  943. xvld @x[1],$inp,32*9
  944. xvld @x[2],$inp,32*10
  945. xvld @x[3],$inp,32*11
  946. xvxor.v @x[0],@x[0],@y[8]
  947. xvxor.v @x[1],@x[1],@y[9]
  948. xvxor.v @x[2],@x[2],@y[10]
  949. xvxor.v @x[3],@x[3],@y[11]
  950. xvst @x[0],$out,32*8
  951. xvst @x[1],$out,32*9
  952. xvst @x[2],$out,32*10
  953. xvst @x[3],$out,32*11
  954. xvld @x[0],$inp,32*12
  955. xvld @x[1],$inp,32*13
  956. xvld @x[2],$inp,32*14
  957. xvld @x[3],$inp,32*15
  958. xvxor.v @x[0],@x[0],@y[12]
  959. xvxor.v @x[1],@x[1],@y[13]
  960. xvxor.v @x[2],@x[2],@y[14]
  961. xvxor.v @x[3],@x[3],@y[15]
  962. xvst @x[0],$out,32*12
  963. xvst @x[1],$out,32*13
  964. xvst @x[2],$out,32*14
  965. xvst @x[3],$out,32*15
  966. addi.d $len,$len,-64*8
  967. beqz $len,.Ldone_8x
  968. addi.d $inp,$inp,64*8
  969. addi.d $out,$out,64*8
  970. addi.w $t4,$t4,8
  971. b .Loop_outer_8x
  972. .Ltail_8x:
  973. # Handle the tail for 8x (1 <= tail_len <= 511)
  974. ori $t8,$zero,448
  975. bgeu $len,$t8,.L448_or_more8x
  976. ori $t8,$zero,384
  977. bgeu $len,$t8,.L384_or_more8x
  978. ori $t8,$zero,320
  979. bgeu $len,$t8,.L320_or_more8x
  980. ori $t8,$zero,256
  981. bgeu $len,$t8,.L256_or_more8x
  982. ori $t8,$zero,192
  983. bgeu $len,$t8,.L192_or_more8x
  984. ori $t8,$zero,128
  985. bgeu $len,$t8,.L128_or_more8x
  986. ori $t8,$zero,64
  987. bgeu $len,$t8,.L64_or_more8x
  988. xvst @y[0],$sp,32*0
  989. xvst @y[1],$sp,32*1
  990. move $t8,$zero
  991. b .Loop_tail_8x
  992. .align 5
  993. .L64_or_more8x:
  994. xvld @x[0],$inp,32*0
  995. xvld @x[1],$inp,32*1
  996. xvxor.v @x[0],@x[0],@y[0]
  997. xvxor.v @x[1],@x[1],@y[1]
  998. xvst @x[0],$out,32*0
  999. xvst @x[1],$out,32*1
  1000. addi.d $len,$len,-64
  1001. beqz $len,.Ldone_8x
  1002. addi.d $inp,$inp,64
  1003. addi.d $out,$out,64
  1004. xvst @y[2],$sp,32*0
  1005. xvst @y[3],$sp,32*1
  1006. move $t8,$zero
  1007. b .Loop_tail_8x
  1008. .align 5
  1009. .L128_or_more8x:
  1010. xvld @x[0],$inp,32*0
  1011. xvld @x[1],$inp,32*1
  1012. xvld @x[2],$inp,32*2
  1013. xvld @x[3],$inp,32*3
  1014. xvxor.v @x[0],@x[0],@y[0]
  1015. xvxor.v @x[1],@x[1],@y[1]
  1016. xvxor.v @x[2],@x[2],@y[2]
  1017. xvxor.v @x[3],@x[3],@y[3]
  1018. xvst @x[0],$out,32*0
  1019. xvst @x[1],$out,32*1
  1020. xvst @x[2],$out,32*2
  1021. xvst @x[3],$out,32*3
  1022. addi.d $len,$len,-128
  1023. beqz $len,.Ldone_8x
  1024. addi.d $inp,$inp,128
  1025. addi.d $out,$out,128
  1026. xvst @y[4],$sp,32*0
  1027. xvst @y[5],$sp,32*1
  1028. move $t8,$zero
  1029. b .Loop_tail_8x
  1030. .align 5
  1031. .L192_or_more8x:
  1032. xvld @x[0],$inp,32*0
  1033. xvld @x[1],$inp,32*1
  1034. xvld @x[2],$inp,32*2
  1035. xvld @x[3],$inp,32*3
  1036. xvxor.v @x[0],@x[0],@y[0]
  1037. xvxor.v @x[1],@x[1],@y[1]
  1038. xvxor.v @x[2],@x[2],@y[2]
  1039. xvxor.v @x[3],@x[3],@y[3]
  1040. xvst @x[0],$out,32*0
  1041. xvst @x[1],$out,32*1
  1042. xvst @x[2],$out,32*2
  1043. xvst @x[3],$out,32*3
  1044. xvld @x[0],$inp,32*4
  1045. xvld @x[1],$inp,32*5
  1046. xvxor.v @x[0],@x[0],@y[4]
  1047. xvxor.v @x[1],@x[1],@y[5]
  1048. xvst @x[0],$out,32*4
  1049. xvst @x[1],$out,32*5
  1050. addi.d $len,$len,-192
  1051. beqz $len,.Ldone_8x
  1052. addi.d $inp,$inp,192
  1053. addi.d $out,$out,192
  1054. xvst @y[6],$sp,32*0
  1055. xvst @y[7],$sp,32*1
  1056. move $t8,$zero
  1057. b .Loop_tail_8x
  1058. .align 5
  1059. .L256_or_more8x:
  1060. xvld @x[0],$inp,32*0
  1061. xvld @x[1],$inp,32*1
  1062. xvld @x[2],$inp,32*2
  1063. xvld @x[3],$inp,32*3
  1064. xvxor.v @x[0],@x[0],@y[0]
  1065. xvxor.v @x[1],@x[1],@y[1]
  1066. xvxor.v @x[2],@x[2],@y[2]
  1067. xvxor.v @x[3],@x[3],@y[3]
  1068. xvst @x[0],$out,32*0
  1069. xvst @x[1],$out,32*1
  1070. xvst @x[2],$out,32*2
  1071. xvst @x[3],$out,32*3
  1072. xvld @x[0],$inp,32*4
  1073. xvld @x[1],$inp,32*5
  1074. xvld @x[2],$inp,32*6
  1075. xvld @x[3],$inp,32*7
  1076. xvxor.v @x[0],@x[0],@y[4]
  1077. xvxor.v @x[1],@x[1],@y[5]
  1078. xvxor.v @x[2],@x[2],@y[6]
  1079. xvxor.v @x[3],@x[3],@y[7]
  1080. xvst @x[0],$out,32*4
  1081. xvst @x[1],$out,32*5
  1082. xvst @x[2],$out,32*6
  1083. xvst @x[3],$out,32*7
  1084. addi.d $len,$len,-256
  1085. beqz $len,.Ldone_8x
  1086. addi.d $inp,$inp,256
  1087. addi.d $out,$out,256
  1088. xvst @y[8],$sp,32*0
  1089. xvst @y[9],$sp,32*1
  1090. move $t8,$zero
  1091. b .Loop_tail_8x
  1092. .align 5
  1093. .L320_or_more8x:
  1094. xvld @x[0],$inp,32*0
  1095. xvld @x[1],$inp,32*1
  1096. xvld @x[2],$inp,32*2
  1097. xvld @x[3],$inp,32*3
  1098. xvxor.v @x[0],@x[0],@y[0]
  1099. xvxor.v @x[1],@x[1],@y[1]
  1100. xvxor.v @x[2],@x[2],@y[2]
  1101. xvxor.v @x[3],@x[3],@y[3]
  1102. xvst @x[0],$out,32*0
  1103. xvst @x[1],$out,32*1
  1104. xvst @x[2],$out,32*2
  1105. xvst @x[3],$out,32*3
  1106. xvld @x[0],$inp,32*4
  1107. xvld @x[1],$inp,32*5
  1108. xvld @x[2],$inp,32*6
  1109. xvld @x[3],$inp,32*7
  1110. xvxor.v @x[0],@x[0],@y[4]
  1111. xvxor.v @x[1],@x[1],@y[5]
  1112. xvxor.v @x[2],@x[2],@y[6]
  1113. xvxor.v @x[3],@x[3],@y[7]
  1114. xvst @x[0],$out,32*4
  1115. xvst @x[1],$out,32*5
  1116. xvst @x[2],$out,32*6
  1117. xvst @x[3],$out,32*7
  1118. xvld @x[0],$inp,32*8
  1119. xvld @x[1],$inp,32*9
  1120. xvxor.v @x[0],@x[0],@y[8]
  1121. xvxor.v @x[1],@x[1],@y[9]
  1122. xvst @x[0],$out,32*8
  1123. xvst @x[1],$out,32*9
  1124. addi.d $len,$len,-320
  1125. beqz $len,.Ldone_8x
  1126. addi.d $inp,$inp,320
  1127. addi.d $out,$out,320
  1128. xvst @y[10],$sp,32*0
  1129. xvst @y[11],$sp,32*1
  1130. move $t8,$zero
  1131. b .Loop_tail_8x
  1132. .align 5
  1133. .L384_or_more8x:
  1134. xvld @x[0],$inp,32*0
  1135. xvld @x[1],$inp,32*1
  1136. xvld @x[2],$inp,32*2
  1137. xvld @x[3],$inp,32*3
  1138. xvxor.v @x[0],@x[0],@y[0]
  1139. xvxor.v @x[1],@x[1],@y[1]
  1140. xvxor.v @x[2],@x[2],@y[2]
  1141. xvxor.v @x[3],@x[3],@y[3]
  1142. xvst @x[0],$out,32*0
  1143. xvst @x[1],$out,32*1
  1144. xvst @x[2],$out,32*2
  1145. xvst @x[3],$out,32*3
  1146. xvld @x[0],$inp,32*4
  1147. xvld @x[1],$inp,32*5
  1148. xvld @x[2],$inp,32*6
  1149. xvld @x[3],$inp,32*7
  1150. xvxor.v @x[0],@x[0],@y[4]
  1151. xvxor.v @x[1],@x[1],@y[5]
  1152. xvxor.v @x[2],@x[2],@y[6]
  1153. xvxor.v @x[3],@x[3],@y[7]
  1154. xvst @x[0],$out,32*4
  1155. xvst @x[1],$out,32*5
  1156. xvst @x[2],$out,32*6
  1157. xvst @x[3],$out,32*7
  1158. xvld @x[0],$inp,32*8
  1159. xvld @x[1],$inp,32*9
  1160. xvld @x[2],$inp,32*10
  1161. xvld @x[3],$inp,32*11
  1162. xvxor.v @x[0],@x[0],@y[8]
  1163. xvxor.v @x[1],@x[1],@y[9]
  1164. xvxor.v @x[2],@x[2],@y[10]
  1165. xvxor.v @x[3],@x[3],@y[11]
  1166. xvst @x[0],$out,32*8
  1167. xvst @x[1],$out,32*9
  1168. xvst @x[2],$out,32*10
  1169. xvst @x[3],$out,32*11
  1170. addi.d $len,$len,-384
  1171. beqz $len,.Ldone_8x
  1172. addi.d $inp,$inp,384
  1173. addi.d $out,$out,384
  1174. xvst @y[12],$sp,32*0
  1175. xvst @y[13],$sp,32*1
  1176. move $t8,$zero
  1177. b .Loop_tail_8x
  1178. .align 5
  1179. .L448_or_more8x:
  1180. xvld @x[0],$inp,32*0
  1181. xvld @x[1],$inp,32*1
  1182. xvld @x[2],$inp,32*2
  1183. xvld @x[3],$inp,32*3
  1184. xvxor.v @x[0],@x[0],@y[0]
  1185. xvxor.v @x[1],@x[1],@y[1]
  1186. xvxor.v @x[2],@x[2],@y[2]
  1187. xvxor.v @x[3],@x[3],@y[3]
  1188. xvst @x[0],$out,32*0
  1189. xvst @x[1],$out,32*1
  1190. xvst @x[2],$out,32*2
  1191. xvst @x[3],$out,32*3
  1192. xvld @x[0],$inp,32*4
  1193. xvld @x[1],$inp,32*5
  1194. xvld @x[2],$inp,32*6
  1195. xvld @x[3],$inp,32*7
  1196. xvxor.v @x[0],@x[0],@y[4]
  1197. xvxor.v @x[1],@x[1],@y[5]
  1198. xvxor.v @x[2],@x[2],@y[6]
  1199. xvxor.v @x[3],@x[3],@y[7]
  1200. xvst @x[0],$out,32*4
  1201. xvst @x[1],$out,32*5
  1202. xvst @x[2],$out,32*6
  1203. xvst @x[3],$out,32*7
  1204. xvld @x[0],$inp,32*8
  1205. xvld @x[1],$inp,32*9
  1206. xvld @x[2],$inp,32*10
  1207. xvld @x[3],$inp,32*11
  1208. xvxor.v @x[0],@x[0],@y[8]
  1209. xvxor.v @x[1],@x[1],@y[9]
  1210. xvxor.v @x[2],@x[2],@y[10]
  1211. xvxor.v @x[3],@x[3],@y[11]
  1212. xvst @x[0],$out,32*8
  1213. xvst @x[1],$out,32*9
  1214. xvst @x[2],$out,32*10
  1215. xvst @x[3],$out,32*11
  1216. xvld @x[0],$inp,32*12
  1217. xvld @x[1],$inp,32*13
  1218. xvxor.v @x[0],@x[0],@y[12]
  1219. xvxor.v @x[1],@x[1],@y[13]
  1220. xvst @x[0],$out,32*12
  1221. xvst @x[1],$out,32*13
  1222. addi.d $len,$len,-448
  1223. beqz $len,.Ldone_8x
  1224. addi.d $inp,$inp,448
  1225. addi.d $out,$out,448
  1226. xvst @y[14],$sp,32*0
  1227. xvst @y[15],$sp,32*1
  1228. move $t8,$zero
  1229. b .Loop_tail_8x
  1230. .Loop_tail_8x:
  1231. # Xor input with states byte by byte
  1232. ldx.bu $t5,$inp,$t8
  1233. ldx.bu $t6,$sp,$t8
  1234. xor $t5,$t5,$t6
  1235. stx.b $t5,$out,$t8
  1236. addi.w $t8,$t8,1
  1237. addi.d $len,$len,-1
  1238. bnez $len,.Loop_tail_8x
  1239. b .Ldone_8x
  1240. .Ldone_8x:
  1241. addi.d $sp,$sp,128
  1242. b .Lrestore_saved_fpr
  1243. EOF
  1244. }
  1245. $code .= <<EOF;
  1246. .Lrestore_saved_fpr:
  1247. fld.d $fs0,$sp,0
  1248. fld.d $fs1,$sp,8
  1249. fld.d $fs2,$sp,16
  1250. fld.d $fs3,$sp,24
  1251. fld.d $fs4,$sp,32
  1252. fld.d $fs5,$sp,40
  1253. fld.d $fs6,$sp,48
  1254. fld.d $fs7,$sp,56
  1255. addi.d $sp,$sp,64
  1256. .Lno_data:
  1257. .Lend:
  1258. jr $ra
  1259. .size ChaCha20_ctr32,.-ChaCha20_ctr32
  1260. EOF
  1261. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  1262. print $code;
  1263. close STDOUT;