chacha-riscv64-v-zbb.pl 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. #! /usr/bin/env perl
  2. # This file is dual-licensed, meaning that you can use it under your
  3. # choice of either of the following two licenses:
  4. #
  5. # Copyright 2023-2023 The OpenSSL Project Authors. All Rights Reserved.
  6. #
  7. # Licensed under the Apache License 2.0 (the "License"). You may not use
  8. # this file except in compliance with the License. You can obtain a copy
  9. # in the file LICENSE in the source distribution or at
  10. # https://www.openssl.org/source/license.html
  11. #
  12. # or
  13. #
  14. # Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
  15. # All rights reserved.
  16. #
  17. # Redistribution and use in source and binary forms, with or without
  18. # modification, are permitted provided that the following conditions
  19. # are met:
  20. # 1. Redistributions of source code must retain the above copyright
  21. # notice, this list of conditions and the following disclaimer.
  22. # 2. Redistributions in binary form must reproduce the above copyright
  23. # notice, this list of conditions and the following disclaimer in the
  24. # documentation and/or other materials provided with the distribution.
  25. #
  26. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  27. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  28. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  29. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  30. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  31. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  32. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  33. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  34. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  35. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  36. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. # - RV64I
  38. # - RISC-V Vector ('V') with VLEN >= 128
  39. # - RISC-V Basic Bit-manipulation extension ('Zbb')
  40. # - RISC-V Zicclsm(Main memory supports misaligned loads/stores)
  41. # Optional:
  42. # - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
  43. use strict;
  44. use warnings;
  45. use FindBin qw($Bin);
  46. use lib "$Bin";
  47. use lib "$Bin/../../perlasm";
  48. use riscv;
  49. # $output is the last argument if it looks like a file (it has an extension)
  50. # $flavour is the first argument if it doesn't look like a file
  51. my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  52. my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  53. my $use_zvkb = $flavour && $flavour =~ /zvkb/i ? 1 : 0;
  54. my $isaext = "_v_zbb" . ( $use_zvkb ? "_zvkb" : "" );
  55. $output and open STDOUT, ">$output";
  56. my $code = <<___;
  57. .text
  58. ___
  59. # void ChaCha20_ctr32@{[$isaext]}(unsigned char *out, const unsigned char *inp,
  60. # size_t len, const unsigned int key[8],
  61. # const unsigned int counter[4]);
  62. ################################################################################
  63. my ( $OUTPUT, $INPUT, $LEN, $KEY, $COUNTER ) = ( "a0", "a1", "a2", "a3", "a4" );
  64. my ( $CONST_DATA0, $CONST_DATA1, $CONST_DATA2, $CONST_DATA3 ) = ( "a5", "a6",
  65. "a7", "s0" );
  66. my ( $KEY0, $KEY1, $KEY2, $KEY3, $KEY4, $KEY5, $KEY6, $KEY7, $COUNTER0,
  67. $COUNTER1, $NONCE0, $NONCE1) = ( "s1", "s2", "s3", "s4", "s5", "s6", "s7",
  68. "s8", "s9", "s10", "s11", "t0" );
  69. my ( $STATE0, $STATE1, $STATE2, $STATE3,
  70. $STATE4, $STATE5, $STATE6, $STATE7,
  71. $STATE8, $STATE9, $STATE10, $STATE11,
  72. $STATE12, $STATE13, $STATE14, $STATE15) = (
  73. $CONST_DATA0, $CONST_DATA1, $CONST_DATA2, $CONST_DATA3,
  74. $KEY0, $KEY1, $KEY2, $KEY3,
  75. $KEY4, $KEY5, $KEY6, $KEY7,
  76. $COUNTER0, $COUNTER1, $NONCE0, $NONCE1 );
  77. my ( $VL ) = ( "t1" );
  78. my ( $CURRENT_COUNTER ) = ( "t2" );
  79. my ( $T0 ) = ( "t3" );
  80. my ( $T1 ) = ( "t4" );
  81. my ( $T2 ) = ( "t5" );
  82. my ( $T3 ) = ( "t6" );
  83. my (
  84. $V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7, $V8, $V9, $V10,
  85. $V11, $V12, $V13, $V14, $V15, $V16, $V17, $V18, $V19, $V20, $V21,
  86. $V22, $V23, $V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31,
  87. ) = map( "v$_", ( 0 .. 31 ) );
  88. sub chacha_sub_round {
  89. my (
  90. $A0, $B0, $C0,
  91. $A1, $B1, $C1,
  92. $A2, $B2, $C2,
  93. $A3, $B3, $C3,
  94. $S_A0, $S_B0, $S_C0,
  95. $S_A1, $S_B1, $S_C1,
  96. $S_A2, $S_B2, $S_C2,
  97. $S_A3, $S_B3, $S_C3,
  98. $ROL_SHIFT,
  99. $V_T0, $V_T1, $V_T2, $V_T3,
  100. ) = @_;
  101. # a += b; c ^= a;
  102. my $code = <<___;
  103. @{[vadd_vv $A0, $A0, $B0]}
  104. add $S_A0, $S_A0, $S_B0
  105. @{[vadd_vv $A1, $A1, $B1]}
  106. add $S_A1, $S_A1, $S_B1
  107. @{[vadd_vv $A2, $A2, $B2]}
  108. add $S_A2, $S_A2, $S_B2
  109. @{[vadd_vv $A3, $A3, $B3]}
  110. add $S_A3, $S_A3, $S_B3
  111. @{[vxor_vv $C0, $C0, $A0]}
  112. xor $S_C0, $S_C0, $S_A0
  113. @{[vxor_vv $C1, $C1, $A1]}
  114. xor $S_C1, $S_C1, $S_A1
  115. @{[vxor_vv $C2, $C2, $A2]}
  116. xor $S_C2, $S_C2, $S_A2
  117. @{[vxor_vv $C3, $C3, $A3]}
  118. xor $S_C3, $S_C3, $S_A3
  119. ___
  120. # c <<<= $ROL_SHIFT;
  121. if ($use_zvkb) {
  122. my $ror_part = <<___;
  123. @{[vror_vi $C0, $C0, 32 - $ROL_SHIFT]}
  124. @{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
  125. @{[vror_vi $C1, $C1, 32 - $ROL_SHIFT]}
  126. @{[roriw $S_C1, $S_C1, 32 - $ROL_SHIFT]}
  127. @{[vror_vi $C2, $C2, 32 - $ROL_SHIFT]}
  128. @{[roriw $S_C2, $S_C2, 32 - $ROL_SHIFT]}
  129. @{[vror_vi $C3, $C3, 32 - $ROL_SHIFT]}
  130. @{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
  131. ___
  132. $code .= $ror_part;
  133. } else {
  134. my $ror_part = <<___;
  135. @{[vsll_vi $V_T0, $C0, $ROL_SHIFT]}
  136. @{[vsll_vi $V_T1, $C1, $ROL_SHIFT]}
  137. @{[vsll_vi $V_T2, $C2, $ROL_SHIFT]}
  138. @{[vsll_vi $V_T3, $C3, $ROL_SHIFT]}
  139. @{[vsrl_vi $C0, $C0, 32 - $ROL_SHIFT]}
  140. @{[vsrl_vi $C1, $C1, 32 - $ROL_SHIFT]}
  141. @{[vsrl_vi $C2, $C2, 32 - $ROL_SHIFT]}
  142. @{[vsrl_vi $C3, $C3, 32 - $ROL_SHIFT]}
  143. @{[vor_vv $C0, $C0, $V_T0]}
  144. @{[roriw $S_C0, $S_C0, 32 - $ROL_SHIFT]}
  145. @{[vor_vv $C1, $C1, $V_T1]}
  146. @{[roriw $S_C1, $S_C1, 32 - $ROL_SHIFT]}
  147. @{[vor_vv $C2, $C2, $V_T2]}
  148. @{[roriw $S_C2, $S_C2, 32 - $ROL_SHIFT]}
  149. @{[vor_vv $C3, $C3, $V_T3]}
  150. @{[roriw $S_C3, $S_C3, 32 - $ROL_SHIFT]}
  151. ___
  152. $code .= $ror_part;
  153. }
  154. return $code;
  155. }
  156. sub chacha_quad_round_group {
  157. my (
  158. $A0, $B0, $C0, $D0,
  159. $A1, $B1, $C1, $D1,
  160. $A2, $B2, $C2, $D2,
  161. $A3, $B3, $C3, $D3,
  162. $S_A0, $S_B0, $S_C0, $S_D0,
  163. $S_A1, $S_B1, $S_C1, $S_D1,
  164. $S_A2, $S_B2, $S_C2, $S_D2,
  165. $S_A3, $S_B3, $S_C3, $S_D3,
  166. $V_T0, $V_T1, $V_T2, $V_T3,
  167. ) = @_;
  168. my $code = <<___;
  169. # a += b; d ^= a; d <<<= 16;
  170. @{[chacha_sub_round
  171. $A0, $B0, $D0,
  172. $A1, $B1, $D1,
  173. $A2, $B2, $D2,
  174. $A3, $B3, $D3,
  175. $S_A0, $S_B0, $S_D0,
  176. $S_A1, $S_B1, $S_D1,
  177. $S_A2, $S_B2, $S_D2,
  178. $S_A3, $S_B3, $S_D3,
  179. 16,
  180. $V_T0, $V_T1, $V_T2, $V_T3]}
  181. # c += d; b ^= c; b <<<= 12;
  182. @{[chacha_sub_round
  183. $C0, $D0, $B0,
  184. $C1, $D1, $B1,
  185. $C2, $D2, $B2,
  186. $C3, $D3, $B3,
  187. $S_C0, $S_D0, $S_B0,
  188. $S_C1, $S_D1, $S_B1,
  189. $S_C2, $S_D2, $S_B2,
  190. $S_C3, $S_D3, $S_B3,
  191. 12,
  192. $V_T0, $V_T1, $V_T2, $V_T3]}
  193. # a += b; d ^= a; d <<<= 8;
  194. @{[chacha_sub_round
  195. $A0, $B0, $D0,
  196. $A1, $B1, $D1,
  197. $A2, $B2, $D2,
  198. $A3, $B3, $D3,
  199. $S_A0, $S_B0, $S_D0,
  200. $S_A1, $S_B1, $S_D1,
  201. $S_A2, $S_B2, $S_D2,
  202. $S_A3, $S_B3, $S_D3,
  203. 8,
  204. $V_T0, $V_T1, $V_T2, $V_T3]}
  205. # c += d; b ^= c; b <<<= 7;
  206. @{[chacha_sub_round
  207. $C0, $D0, $B0,
  208. $C1, $D1, $B1,
  209. $C2, $D2, $B2,
  210. $C3, $D3, $B3,
  211. $S_C0, $S_D0, $S_B0,
  212. $S_C1, $S_D1, $S_B1,
  213. $S_C2, $S_D2, $S_B2,
  214. $S_C3, $S_D3, $S_B3,
  215. 7,
  216. $V_T0, $V_T1, $V_T2, $V_T3]}
  217. ___
  218. return $code;
  219. }
  220. $code .= <<___;
  221. .p2align 3
  222. .globl ChaCha20_ctr32@{[$isaext]}
  223. .type ChaCha20_ctr32@{[$isaext]},\@function
  224. ChaCha20_ctr32@{[$isaext]}:
  225. addi sp, sp, -96
  226. sd s0, 0(sp)
  227. sd s1, 8(sp)
  228. sd s2, 16(sp)
  229. sd s3, 24(sp)
  230. sd s4, 32(sp)
  231. sd s5, 40(sp)
  232. sd s6, 48(sp)
  233. sd s7, 56(sp)
  234. sd s8, 64(sp)
  235. sd s9, 72(sp)
  236. sd s10, 80(sp)
  237. sd s11, 88(sp)
  238. addi sp, sp, -64
  239. lw $CURRENT_COUNTER, 0($COUNTER)
  240. .Lblock_loop:
  241. # We will use the scalar ALU for 1 chacha block.
  242. srli $T0, $LEN, 6
  243. @{[vsetvli $VL, $T0, "e32", "m1", "ta", "ma"]}
  244. slli $T1, $VL, 6
  245. bltu $T1, $LEN, 1f
  246. # Since there is no more chacha block existed, we need to split 1 block
  247. # from vector ALU.
  248. addi $T1, $VL, -1
  249. @{[vsetvli $VL, $T1, "e32", "m1", "ta", "ma"]}
  250. 1:
  251. #### chacha block data
  252. # init chacha const states into $V0~$V3
  253. # "expa" little endian
  254. li $CONST_DATA0, 0x61707865
  255. @{[vmv_v_x $V0, $CONST_DATA0]}
  256. # "nd 3" little endian
  257. li $CONST_DATA1, 0x3320646e
  258. @{[vmv_v_x $V1, $CONST_DATA1]}
  259. # "2-by" little endian
  260. li $CONST_DATA2, 0x79622d32
  261. @{[vmv_v_x $V2, $CONST_DATA2]}
  262. # "te k" little endian
  263. li $CONST_DATA3, 0x6b206574
  264. lw $KEY0, 0($KEY)
  265. @{[vmv_v_x $V3, $CONST_DATA3]}
  266. # init chacha key states into $V4~$V11
  267. lw $KEY1, 4($KEY)
  268. @{[vmv_v_x $V4, $KEY0]}
  269. lw $KEY2, 8($KEY)
  270. @{[vmv_v_x $V5, $KEY1]}
  271. lw $KEY3, 12($KEY)
  272. @{[vmv_v_x $V6, $KEY2]}
  273. lw $KEY4, 16($KEY)
  274. @{[vmv_v_x $V7, $KEY3]}
  275. lw $KEY5, 20($KEY)
  276. @{[vmv_v_x $V8, $KEY4]}
  277. lw $KEY6, 24($KEY)
  278. @{[vmv_v_x $V9, $KEY5]}
  279. lw $KEY7, 28($KEY)
  280. @{[vmv_v_x $V10, $KEY6]}
  281. @{[vmv_v_x $V11, $KEY7]}
  282. # init chacha key states into $V12~$V13
  283. lw $COUNTER1, 4($COUNTER)
  284. @{[vid_v $V12]}
  285. lw $NONCE0, 8($COUNTER)
  286. @{[vadd_vx $V12, $V12, $CURRENT_COUNTER]}
  287. lw $NONCE1, 12($COUNTER)
  288. @{[vmv_v_x $V13, $COUNTER1]}
  289. add $COUNTER0, $CURRENT_COUNTER, $VL
  290. # init chacha nonce states into $V14~$V15
  291. @{[vmv_v_x $V14, $NONCE0]}
  292. @{[vmv_v_x $V15, $NONCE1]}
  293. li $T0, 64
  294. # load the top-half of input data into $V16~$V23
  295. @{[vlsseg_nf_e32_v 8, $V16, $INPUT, $T0]}
  296. # till now in block_loop, we used:
  297. # - $V0~$V15 for chacha states.
  298. # - $V16~$V23 for top-half of input data.
  299. # - $V24~$V31 haven't been used yet.
  300. # 20 round groups
  301. li $T0, 10
  302. .Lround_loop:
  303. # we can use $V24~$V31 as temporary registers in round_loop.
  304. addi $T0, $T0, -1
  305. @{[chacha_quad_round_group
  306. $V0, $V4, $V8, $V12,
  307. $V1, $V5, $V9, $V13,
  308. $V2, $V6, $V10, $V14,
  309. $V3, $V7, $V11, $V15,
  310. $STATE0, $STATE4, $STATE8, $STATE12,
  311. $STATE1, $STATE5, $STATE9, $STATE13,
  312. $STATE2, $STATE6, $STATE10, $STATE14,
  313. $STATE3, $STATE7, $STATE11, $STATE15,
  314. $V24, $V25, $V26, $V27]}
  315. @{[chacha_quad_round_group
  316. $V3, $V4, $V9, $V14,
  317. $V0, $V5, $V10, $V15,
  318. $V1, $V6, $V11, $V12,
  319. $V2, $V7, $V8, $V13,
  320. $STATE3, $STATE4, $STATE9, $STATE14,
  321. $STATE0, $STATE5, $STATE10, $STATE15,
  322. $STATE1, $STATE6, $STATE11, $STATE12,
  323. $STATE2, $STATE7, $STATE8, $STATE13,
  324. $V24, $V25, $V26, $V27]}
  325. bnez $T0, .Lround_loop
  326. li $T0, 64
  327. # load the bottom-half of input data into $V24~$V31
  328. addi $T1, $INPUT, 32
  329. @{[vlsseg_nf_e32_v 8, $V24, $T1, $T0]}
  330. # now, there are no free vector registers until the round_loop exits.
  331. # add chacha top-half initial block states
  332. # "expa" little endian
  333. li $T0, 0x61707865
  334. @{[vadd_vx $V0, $V0, $T0]}
  335. add $STATE0, $STATE0, $T0
  336. # "nd 3" little endian
  337. li $T1, 0x3320646e
  338. @{[vadd_vx $V1, $V1, $T1]}
  339. add $STATE1, $STATE1, $T1
  340. lw $T0, 0($KEY)
  341. # "2-by" little endian
  342. li $T2, 0x79622d32
  343. @{[vadd_vx $V2, $V2, $T2]}
  344. add $STATE2, $STATE2, $T2
  345. lw $T1, 4($KEY)
  346. # "te k" little endian
  347. li $T3, 0x6b206574
  348. @{[vadd_vx $V3, $V3, $T3]}
  349. add $STATE3, $STATE3, $T3
  350. lw $T2, 8($KEY)
  351. @{[vadd_vx $V4, $V4, $T0]}
  352. add $STATE4, $STATE4, $T0
  353. lw $T3, 12($KEY)
  354. @{[vadd_vx $V5, $V5, $T1]}
  355. add $STATE5, $STATE5, $T1
  356. @{[vadd_vx $V6, $V6, $T2]}
  357. add $STATE6, $STATE6, $T2
  358. @{[vadd_vx $V7, $V7, $T3]}
  359. add $STATE7, $STATE7, $T3
  360. # xor with the top-half input
  361. @{[vxor_vv $V16, $V16, $V0]}
  362. sw $STATE0, 0(sp)
  363. sw $STATE1, 4(sp)
  364. @{[vxor_vv $V17, $V17, $V1]}
  365. sw $STATE2, 8(sp)
  366. sw $STATE3, 12(sp)
  367. @{[vxor_vv $V18, $V18, $V2]}
  368. sw $STATE4, 16(sp)
  369. sw $STATE5, 20(sp)
  370. @{[vxor_vv $V19, $V19, $V3]}
  371. sw $STATE6, 24(sp)
  372. sw $STATE7, 28(sp)
  373. @{[vxor_vv $V20, $V20, $V4]}
  374. lw $T0, 16($KEY)
  375. @{[vxor_vv $V21, $V21, $V5]}
  376. lw $T1, 20($KEY)
  377. @{[vxor_vv $V22, $V22, $V6]}
  378. lw $T2, 24($KEY)
  379. @{[vxor_vv $V23, $V23, $V7]}
  380. # save the top-half of output from $V16~$V23
  381. li $T3, 64
  382. @{[vssseg_nf_e32_v 8, $V16, $OUTPUT, $T3]}
  383. # add chacha bottom-half initial block states
  384. @{[vadd_vx $V8, $V8, $T0]}
  385. add $STATE8, $STATE8, $T0
  386. lw $T3, 28($KEY)
  387. @{[vadd_vx $V9, $V9, $T1]}
  388. add $STATE9, $STATE9, $T1
  389. lw $T0, 4($COUNTER)
  390. @{[vadd_vx $V10, $V10, $T2]}
  391. add $STATE10, $STATE10, $T2
  392. lw $T1, 8($COUNTER)
  393. @{[vadd_vx $V11, $V11, $T3]}
  394. add $STATE11, $STATE11, $T3
  395. lw $T2, 12($COUNTER)
  396. @{[vid_v $V0]}
  397. add $STATE12, $STATE12, $CURRENT_COUNTER
  398. @{[vadd_vx $V12, $V12, $CURRENT_COUNTER]}
  399. add $STATE12, $STATE12, $VL
  400. @{[vadd_vx $V13, $V13, $T0]}
  401. add $STATE13, $STATE13, $T0
  402. @{[vadd_vx $V14, $V14, $T1]}
  403. add $STATE14, $STATE14, $T1
  404. @{[vadd_vx $V15, $V15, $T2]}
  405. add $STATE15, $STATE15, $T2
  406. @{[vadd_vv $V12, $V12, $V0]}
  407. # xor with the bottom-half input
  408. @{[vxor_vv $V24, $V24, $V8]}
  409. sw $STATE8, 32(sp)
  410. @{[vxor_vv $V25, $V25, $V9]}
  411. sw $STATE9, 36(sp)
  412. @{[vxor_vv $V26, $V26, $V10]}
  413. sw $STATE10, 40(sp)
  414. @{[vxor_vv $V27, $V27, $V11]}
  415. sw $STATE11, 44(sp)
  416. @{[vxor_vv $V29, $V29, $V13]}
  417. sw $STATE12, 48(sp)
  418. @{[vxor_vv $V28, $V28, $V12]}
  419. sw $STATE13, 52(sp)
  420. @{[vxor_vv $V30, $V30, $V14]}
  421. sw $STATE14, 56(sp)
  422. @{[vxor_vv $V31, $V31, $V15]}
  423. sw $STATE15, 60(sp)
  424. # save the bottom-half of output from $V24~$V31
  425. li $T0, 64
  426. addi $T1, $OUTPUT, 32
  427. @{[vssseg_nf_e32_v 8, $V24, $T1, $T0]}
  428. # the computed vector parts: `64 * VL`
  429. slli $T0, $VL, 6
  430. add $INPUT, $INPUT, $T0
  431. add $OUTPUT, $OUTPUT, $T0
  432. sub $LEN, $LEN, $T0
  433. add $CURRENT_COUNTER, $CURRENT_COUNTER, $VL
  434. # process the scalar data block
  435. addi $CURRENT_COUNTER, $CURRENT_COUNTER, 1
  436. li $T0, 64
  437. @{[minu $T1, $LEN, $T0]}
  438. sub $LEN, $LEN, $T1
  439. mv $T2, sp
  440. .Lscalar_data_loop:
  441. @{[vsetvli $VL, $T1, "e8", "m8", "ta", "ma"]}
  442. # from this on, vector registers are grouped with lmul = 8
  443. @{[vle8_v $V8, $INPUT]}
  444. @{[vle8_v $V16, $T2]}
  445. @{[vxor_vv $V8, $V8, $V16]}
  446. @{[vse8_v $V8, $OUTPUT]}
  447. add $INPUT, $INPUT, $VL
  448. add $OUTPUT, $OUTPUT, $VL
  449. add $T2, $T2, $VL
  450. sub $T1, $T1, $VL
  451. bnez $T1, .Lscalar_data_loop
  452. bnez $LEN, .Lblock_loop
  453. addi sp, sp, 64
  454. ld s0, 0(sp)
  455. ld s1, 8(sp)
  456. ld s2, 16(sp)
  457. ld s3, 24(sp)
  458. ld s4, 32(sp)
  459. ld s5, 40(sp)
  460. ld s6, 48(sp)
  461. ld s7, 56(sp)
  462. ld s8, 64(sp)
  463. ld s9, 72(sp)
  464. ld s10, 80(sp)
  465. ld s11, 88(sp)
  466. addi sp, sp, 96
  467. ret
  468. .size ChaCha20_ctr32@{[$isaext]},.-ChaCha20_ctr32@{[$isaext]}
  469. ___
  470. print $code;
  471. close STDOUT or die "error closing STDOUT: $!";