2
0

aes-riscv64-zvkned.pl 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. #! /usr/bin/env perl
  2. # This file is dual-licensed, meaning that you can use it under your
  3. # choice of either of the following two licenses:
  4. #
  5. # Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
  6. #
  7. # Licensed under the Apache License 2.0 (the "License"). You can obtain
  8. # a copy in the file LICENSE in the source distribution or at
  9. # https://www.openssl.org/source/license.html
  10. #
  11. # or
  12. #
  13. # Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
  14. # Copyright (c) 2023, Phoebe Chen <phoebe.chen@sifive.com>
  15. # All rights reserved.
  16. #
  17. # Redistribution and use in source and binary forms, with or without
  18. # modification, are permitted provided that the following conditions
  19. # are met:
  20. # 1. Redistributions of source code must retain the above copyright
  21. # notice, this list of conditions and the following disclaimer.
  22. # 2. Redistributions in binary form must reproduce the above copyright
  23. # notice, this list of conditions and the following disclaimer in the
  24. # documentation and/or other materials provided with the distribution.
  25. #
  26. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  27. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  28. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  29. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  30. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  31. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  32. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  33. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  34. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  35. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  36. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. # - RV64I
  38. # - RISC-V Vector ('V') with VLEN >= 128
  39. # - RISC-V Vector AES block cipher extension ('Zvkned')
  40. use strict;
  41. use warnings;
  42. use FindBin qw($Bin);
  43. use lib "$Bin";
  44. use lib "$Bin/../../perlasm";
  45. use riscv;
  46. # $output is the last argument if it looks like a file (it has an extension)
  47. # $flavour is the first argument if it doesn't look like a file
  48. my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  49. my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  50. $output and open STDOUT,">$output";
  51. my $code=<<___;
  52. .text
  53. ___
  54. my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7,
  55. $V8, $V9, $V10, $V11, $V12, $V13, $V14, $V15,
  56. $V16, $V17, $V18, $V19, $V20, $V21, $V22, $V23,
  57. $V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31,
  58. ) = map("v$_",(0..31));
  59. # Load all 11 round keys to v1-v11 registers.
  60. sub aes_128_load_key {
  61. my $KEYP = shift;
  62. my $code=<<___;
  63. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  64. @{[vle32_v $V1, $KEYP]}
  65. addi $KEYP, $KEYP, 16
  66. @{[vle32_v $V2, $KEYP]}
  67. addi $KEYP, $KEYP, 16
  68. @{[vle32_v $V3, $KEYP]}
  69. addi $KEYP, $KEYP, 16
  70. @{[vle32_v $V4, $KEYP]}
  71. addi $KEYP, $KEYP, 16
  72. @{[vle32_v $V5, $KEYP]}
  73. addi $KEYP, $KEYP, 16
  74. @{[vle32_v $V6, $KEYP]}
  75. addi $KEYP, $KEYP, 16
  76. @{[vle32_v $V7, $KEYP]}
  77. addi $KEYP, $KEYP, 16
  78. @{[vle32_v $V8, $KEYP]}
  79. addi $KEYP, $KEYP, 16
  80. @{[vle32_v $V9, $KEYP]}
  81. addi $KEYP, $KEYP, 16
  82. @{[vle32_v $V10, $KEYP]}
  83. addi $KEYP, $KEYP, 16
  84. @{[vle32_v $V11, $KEYP]}
  85. ___
  86. return $code;
  87. }
  88. # Load all 13 round keys to v1-v13 registers.
  89. sub aes_192_load_key {
  90. my $KEYP = shift;
  91. my $code=<<___;
  92. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  93. @{[vle32_v $V1, $KEYP]}
  94. addi $KEYP, $KEYP, 16
  95. @{[vle32_v $V2, $KEYP]}
  96. addi $KEYP, $KEYP, 16
  97. @{[vle32_v $V3, $KEYP]}
  98. addi $KEYP, $KEYP, 16
  99. @{[vle32_v $V4, $KEYP]}
  100. addi $KEYP, $KEYP, 16
  101. @{[vle32_v $V5, $KEYP]}
  102. addi $KEYP, $KEYP, 16
  103. @{[vle32_v $V6, $KEYP]}
  104. addi $KEYP, $KEYP, 16
  105. @{[vle32_v $V7, $KEYP]}
  106. addi $KEYP, $KEYP, 16
  107. @{[vle32_v $V8, $KEYP]}
  108. addi $KEYP, $KEYP, 16
  109. @{[vle32_v $V9, $KEYP]}
  110. addi $KEYP, $KEYP, 16
  111. @{[vle32_v $V10, $KEYP]}
  112. addi $KEYP, $KEYP, 16
  113. @{[vle32_v $V11, $KEYP]}
  114. addi $KEYP, $KEYP, 16
  115. @{[vle32_v $V12, $KEYP]}
  116. addi $KEYP, $KEYP, 16
  117. @{[vle32_v $V13, $KEYP]}
  118. ___
  119. return $code;
  120. }
  121. # Load all 15 round keys to v1-v15 registers.
  122. sub aes_256_load_key {
  123. my $KEYP = shift;
  124. my $code=<<___;
  125. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  126. @{[vle32_v $V1, $KEYP]}
  127. addi $KEYP, $KEYP, 16
  128. @{[vle32_v $V2, $KEYP]}
  129. addi $KEYP, $KEYP, 16
  130. @{[vle32_v $V3, $KEYP]}
  131. addi $KEYP, $KEYP, 16
  132. @{[vle32_v $V4, $KEYP]}
  133. addi $KEYP, $KEYP, 16
  134. @{[vle32_v $V5, $KEYP]}
  135. addi $KEYP, $KEYP, 16
  136. @{[vle32_v $V6, $KEYP]}
  137. addi $KEYP, $KEYP, 16
  138. @{[vle32_v $V7, $KEYP]}
  139. addi $KEYP, $KEYP, 16
  140. @{[vle32_v $V8, $KEYP]}
  141. addi $KEYP, $KEYP, 16
  142. @{[vle32_v $V9, $KEYP]}
  143. addi $KEYP, $KEYP, 16
  144. @{[vle32_v $V10, $KEYP]}
  145. addi $KEYP, $KEYP, 16
  146. @{[vle32_v $V11, $KEYP]}
  147. addi $KEYP, $KEYP, 16
  148. @{[vle32_v $V12, $KEYP]}
  149. addi $KEYP, $KEYP, 16
  150. @{[vle32_v $V13, $KEYP]}
  151. addi $KEYP, $KEYP, 16
  152. @{[vle32_v $V14, $KEYP]}
  153. addi $KEYP, $KEYP, 16
  154. @{[vle32_v $V15, $KEYP]}
  155. ___
  156. return $code;
  157. }
  158. # aes-128 encryption with round keys v1-v11
  159. sub aes_128_encrypt {
  160. my $code=<<___;
  161. @{[vaesz_vs $V24, $V1]} # with round key w[ 0, 3]
  162. @{[vaesem_vs $V24, $V2]} # with round key w[ 4, 7]
  163. @{[vaesem_vs $V24, $V3]} # with round key w[ 8,11]
  164. @{[vaesem_vs $V24, $V4]} # with round key w[12,15]
  165. @{[vaesem_vs $V24, $V5]} # with round key w[16,19]
  166. @{[vaesem_vs $V24, $V6]} # with round key w[20,23]
  167. @{[vaesem_vs $V24, $V7]} # with round key w[24,27]
  168. @{[vaesem_vs $V24, $V8]} # with round key w[28,31]
  169. @{[vaesem_vs $V24, $V9]} # with round key w[32,35]
  170. @{[vaesem_vs $V24, $V10]} # with round key w[36,39]
  171. @{[vaesef_vs $V24, $V11]} # with round key w[40,43]
  172. ___
  173. return $code;
  174. }
  175. # aes-128 decryption with round keys v1-v11
  176. sub aes_128_decrypt {
  177. my $code=<<___;
  178. @{[vaesz_vs $V24, $V11]} # with round key w[40,43]
  179. @{[vaesdm_vs $V24, $V10]} # with round key w[36,39]
  180. @{[vaesdm_vs $V24, $V9]} # with round key w[32,35]
  181. @{[vaesdm_vs $V24, $V8]} # with round key w[28,31]
  182. @{[vaesdm_vs $V24, $V7]} # with round key w[24,27]
  183. @{[vaesdm_vs $V24, $V6]} # with round key w[20,23]
  184. @{[vaesdm_vs $V24, $V5]} # with round key w[16,19]
  185. @{[vaesdm_vs $V24, $V4]} # with round key w[12,15]
  186. @{[vaesdm_vs $V24, $V3]} # with round key w[ 8,11]
  187. @{[vaesdm_vs $V24, $V2]} # with round key w[ 4, 7]
  188. @{[vaesdf_vs $V24, $V1]} # with round key w[ 0, 3]
  189. ___
  190. return $code;
  191. }
  192. # aes-192 encryption with round keys v1-v13
  193. sub aes_192_encrypt {
  194. my $code=<<___;
  195. @{[vaesz_vs $V24, $V1]} # with round key w[ 0, 3]
  196. @{[vaesem_vs $V24, $V2]} # with round key w[ 4, 7]
  197. @{[vaesem_vs $V24, $V3]} # with round key w[ 8,11]
  198. @{[vaesem_vs $V24, $V4]} # with round key w[12,15]
  199. @{[vaesem_vs $V24, $V5]} # with round key w[16,19]
  200. @{[vaesem_vs $V24, $V6]} # with round key w[20,23]
  201. @{[vaesem_vs $V24, $V7]} # with round key w[24,27]
  202. @{[vaesem_vs $V24, $V8]} # with round key w[28,31]
  203. @{[vaesem_vs $V24, $V9]} # with round key w[32,35]
  204. @{[vaesem_vs $V24, $V10]} # with round key w[36,39]
  205. @{[vaesem_vs $V24, $V11]} # with round key w[40,43]
  206. @{[vaesem_vs $V24, $V12]} # with round key w[44,47]
  207. @{[vaesef_vs $V24, $V13]} # with round key w[48,51]
  208. ___
  209. return $code;
  210. }
  211. # aes-192 decryption with round keys v1-v13
  212. sub aes_192_decrypt {
  213. my $code=<<___;
  214. @{[vaesz_vs $V24, $V13]} # with round key w[48,51]
  215. @{[vaesdm_vs $V24, $V12]} # with round key w[44,47]
  216. @{[vaesdm_vs $V24, $V11]} # with round key w[40,43]
  217. @{[vaesdm_vs $V24, $V10]} # with round key w[36,39]
  218. @{[vaesdm_vs $V24, $V9]} # with round key w[32,35]
  219. @{[vaesdm_vs $V24, $V8]} # with round key w[28,31]
  220. @{[vaesdm_vs $V24, $V7]} # with round key w[24,27]
  221. @{[vaesdm_vs $V24, $V6]} # with round key w[20,23]
  222. @{[vaesdm_vs $V24, $V5]} # with round key w[16,19]
  223. @{[vaesdm_vs $V24, $V4]} # with round key w[12,15]
  224. @{[vaesdm_vs $V24, $V3]} # with round key w[ 8,11]
  225. @{[vaesdm_vs $V24, $V2]} # with round key w[ 4, 7]
  226. @{[vaesdf_vs $V24, $V1]} # with round key w[ 0, 3]
  227. ___
  228. return $code;
  229. }
  230. # aes-256 encryption with round keys v1-v15
  231. sub aes_256_encrypt {
  232. my $code=<<___;
  233. @{[vaesz_vs $V24, $V1]} # with round key w[ 0, 3]
  234. @{[vaesem_vs $V24, $V2]} # with round key w[ 4, 7]
  235. @{[vaesem_vs $V24, $V3]} # with round key w[ 8,11]
  236. @{[vaesem_vs $V24, $V4]} # with round key w[12,15]
  237. @{[vaesem_vs $V24, $V5]} # with round key w[16,19]
  238. @{[vaesem_vs $V24, $V6]} # with round key w[20,23]
  239. @{[vaesem_vs $V24, $V7]} # with round key w[24,27]
  240. @{[vaesem_vs $V24, $V8]} # with round key w[28,31]
  241. @{[vaesem_vs $V24, $V9]} # with round key w[32,35]
  242. @{[vaesem_vs $V24, $V10]} # with round key w[36,39]
  243. @{[vaesem_vs $V24, $V11]} # with round key w[40,43]
  244. @{[vaesem_vs $V24, $V12]} # with round key w[44,47]
  245. @{[vaesem_vs $V24, $V13]} # with round key w[48,51]
  246. @{[vaesem_vs $V24, $V14]} # with round key w[52,55]
  247. @{[vaesef_vs $V24, $V15]} # with round key w[56,59]
  248. ___
  249. return $code;
  250. }
  251. # aes-256 decryption with round keys v1-v15
  252. sub aes_256_decrypt {
  253. my $code=<<___;
  254. @{[vaesz_vs $V24, $V15]} # with round key w[56,59]
  255. @{[vaesdm_vs $V24, $V14]} # with round key w[52,55]
  256. @{[vaesdm_vs $V24, $V13]} # with round key w[48,51]
  257. @{[vaesdm_vs $V24, $V12]} # with round key w[44,47]
  258. @{[vaesdm_vs $V24, $V11]} # with round key w[40,43]
  259. @{[vaesdm_vs $V24, $V10]} # with round key w[36,39]
  260. @{[vaesdm_vs $V24, $V9]} # with round key w[32,35]
  261. @{[vaesdm_vs $V24, $V8]} # with round key w[28,31]
  262. @{[vaesdm_vs $V24, $V7]} # with round key w[24,27]
  263. @{[vaesdm_vs $V24, $V6]} # with round key w[20,23]
  264. @{[vaesdm_vs $V24, $V5]} # with round key w[16,19]
  265. @{[vaesdm_vs $V24, $V4]} # with round key w[12,15]
  266. @{[vaesdm_vs $V24, $V3]} # with round key w[ 8,11]
  267. @{[vaesdm_vs $V24, $V2]} # with round key w[ 4, 7]
  268. @{[vaesdf_vs $V24, $V1]} # with round key w[ 0, 3]
  269. ___
  270. return $code;
  271. }
  272. {
  273. ###############################################################################
  274. # void rv64i_zvkned_cbc_encrypt(const unsigned char *in, unsigned char *out,
  275. # size_t length, const AES_KEY *key,
  276. # unsigned char *ivec, const int enc);
  277. my ($INP, $OUTP, $LEN, $KEYP, $IVP, $ENC) = ("a0", "a1", "a2", "a3", "a4", "a5");
  278. my ($T0, $T1, $ROUNDS) = ("t0", "t1", "t2");
  279. $code .= <<___;
  280. .p2align 3
  281. .globl rv64i_zvkned_cbc_encrypt
  282. .type rv64i_zvkned_cbc_encrypt,\@function
  283. rv64i_zvkned_cbc_encrypt:
  284. # check whether the length is a multiple of 16 and >= 16
  285. li $T1, 16
  286. blt $LEN, $T1, L_end
  287. andi $T1, $LEN, 15
  288. bnez $T1, L_end
  289. # Load number of rounds
  290. lwu $ROUNDS, 240($KEYP)
  291. # Get proper routine for key size
  292. li $T0, 10
  293. beq $ROUNDS, $T0, L_cbc_enc_128
  294. li $T0, 12
  295. beq $ROUNDS, $T0, L_cbc_enc_192
  296. li $T0, 14
  297. beq $ROUNDS, $T0, L_cbc_enc_256
  298. ret
  299. .size rv64i_zvkned_cbc_encrypt,.-rv64i_zvkned_cbc_encrypt
  300. ___
  301. $code .= <<___;
  302. .p2align 3
  303. L_cbc_enc_128:
  304. # Load all 11 round keys to v1-v11 registers.
  305. @{[aes_128_load_key $KEYP]}
  306. # Load IV.
  307. @{[vle32_v $V16, $IVP]}
  308. @{[vle32_v $V24, $INP]}
  309. @{[vxor_vv $V24, $V24, $V16]}
  310. j 2f
  311. 1:
  312. @{[vle32_v $V17, $INP]}
  313. @{[vxor_vv $V24, $V24, $V17]}
  314. 2:
  315. # AES body
  316. @{[aes_128_encrypt]}
  317. @{[vse32_v $V24, $OUTP]}
  318. addi $INP, $INP, 16
  319. addi $OUTP, $OUTP, 16
  320. addi $LEN, $LEN, -16
  321. bnez $LEN, 1b
  322. @{[vse32_v $V24, $IVP]}
  323. ret
  324. .size L_cbc_enc_128,.-L_cbc_enc_128
  325. ___
  326. $code .= <<___;
  327. .p2align 3
  328. L_cbc_enc_192:
  329. # Load all 13 round keys to v1-v13 registers.
  330. @{[aes_192_load_key $KEYP]}
  331. # Load IV.
  332. @{[vle32_v $V16, $IVP]}
  333. @{[vle32_v $V24, $INP]}
  334. @{[vxor_vv $V24, $V24, $V16]}
  335. j 2f
  336. 1:
  337. @{[vle32_v $V17, $INP]}
  338. @{[vxor_vv $V24, $V24, $V17]}
  339. 2:
  340. # AES body
  341. @{[aes_192_encrypt]}
  342. @{[vse32_v $V24, $OUTP]}
  343. addi $INP, $INP, 16
  344. addi $OUTP, $OUTP, 16
  345. addi $LEN, $LEN, -16
  346. bnez $LEN, 1b
  347. @{[vse32_v $V24, $IVP]}
  348. ret
  349. .size L_cbc_enc_192,.-L_cbc_enc_192
  350. ___
  351. $code .= <<___;
  352. .p2align 3
  353. L_cbc_enc_256:
  354. # Load all 15 round keys to v1-v15 registers.
  355. @{[aes_256_load_key $KEYP]}
  356. # Load IV.
  357. @{[vle32_v $V16, $IVP]}
  358. @{[vle32_v $V24, $INP]}
  359. @{[vxor_vv $V24, $V24, $V16]}
  360. j 2f
  361. 1:
  362. @{[vle32_v $V17, $INP]}
  363. @{[vxor_vv $V24, $V24, $V17]}
  364. 2:
  365. # AES body
  366. @{[aes_256_encrypt]}
  367. @{[vse32_v $V24, $OUTP]}
  368. addi $INP, $INP, 16
  369. addi $OUTP, $OUTP, 16
  370. addi $LEN, $LEN, -16
  371. bnez $LEN, 1b
  372. @{[vse32_v $V24, $IVP]}
  373. ret
  374. .size L_cbc_enc_256,.-L_cbc_enc_256
  375. ___
  376. ###############################################################################
  377. # void rv64i_zvkned_cbc_decrypt(const unsigned char *in, unsigned char *out,
  378. # size_t length, const AES_KEY *key,
  379. # unsigned char *ivec, const int enc);
  380. $code .= <<___;
  381. .p2align 3
  382. .globl rv64i_zvkned_cbc_decrypt
  383. .type rv64i_zvkned_cbc_decrypt,\@function
  384. rv64i_zvkned_cbc_decrypt:
  385. # check whether the length is a multiple of 16 and >= 16
  386. li $T1, 16
  387. blt $LEN, $T1, L_end
  388. andi $T1, $LEN, 15
  389. bnez $T1, L_end
  390. # Load number of rounds
  391. lwu $ROUNDS, 240($KEYP)
  392. # Get proper routine for key size
  393. li $T0, 10
  394. beq $ROUNDS, $T0, L_cbc_dec_128
  395. li $T0, 12
  396. beq $ROUNDS, $T0, L_cbc_dec_192
  397. li $T0, 14
  398. beq $ROUNDS, $T0, L_cbc_dec_256
  399. ret
  400. .size rv64i_zvkned_cbc_decrypt,.-rv64i_zvkned_cbc_decrypt
  401. ___
  402. $code .= <<___;
  403. .p2align 3
  404. L_cbc_dec_128:
  405. # Load all 11 round keys to v1-v11 registers.
  406. @{[aes_128_load_key $KEYP]}
  407. # Load IV.
  408. @{[vle32_v $V16, $IVP]}
  409. @{[vle32_v $V24, $INP]}
  410. @{[vmv_v_v $V17, $V24]}
  411. j 2f
  412. 1:
  413. @{[vle32_v $V24, $INP]}
  414. @{[vmv_v_v $V17, $V24]}
  415. addi $OUTP, $OUTP, 16
  416. 2:
  417. # AES body
  418. @{[aes_128_decrypt]}
  419. @{[vxor_vv $V24, $V24, $V16]}
  420. @{[vse32_v $V24, $OUTP]}
  421. @{[vmv_v_v $V16, $V17]}
  422. addi $LEN, $LEN, -16
  423. addi $INP, $INP, 16
  424. bnez $LEN, 1b
  425. @{[vse32_v $V16, $IVP]}
  426. ret
  427. .size L_cbc_dec_128,.-L_cbc_dec_128
  428. ___
  429. $code .= <<___;
  430. .p2align 3
  431. L_cbc_dec_192:
  432. # Load all 13 round keys to v1-v13 registers.
  433. @{[aes_192_load_key $KEYP]}
  434. # Load IV.
  435. @{[vle32_v $V16, $IVP]}
  436. @{[vle32_v $V24, $INP]}
  437. @{[vmv_v_v $V17, $V24]}
  438. j 2f
  439. 1:
  440. @{[vle32_v $V24, $INP]}
  441. @{[vmv_v_v $V17, $V24]}
  442. addi $OUTP, $OUTP, 16
  443. 2:
  444. # AES body
  445. @{[aes_192_decrypt]}
  446. @{[vxor_vv $V24, $V24, $V16]}
  447. @{[vse32_v $V24, $OUTP]}
  448. @{[vmv_v_v $V16, $V17]}
  449. addi $LEN, $LEN, -16
  450. addi $INP, $INP, 16
  451. bnez $LEN, 1b
  452. @{[vse32_v $V16, $IVP]}
  453. ret
  454. .size L_cbc_dec_192,.-L_cbc_dec_192
  455. ___
  456. $code .= <<___;
  457. .p2align 3
  458. L_cbc_dec_256:
  459. # Load all 15 round keys to v1-v15 registers.
  460. @{[aes_256_load_key $KEYP]}
  461. # Load IV.
  462. @{[vle32_v $V16, $IVP]}
  463. @{[vle32_v $V24, $INP]}
  464. @{[vmv_v_v $V17, $V24]}
  465. j 2f
  466. 1:
  467. @{[vle32_v $V24, $INP]}
  468. @{[vmv_v_v $V17, $V24]}
  469. addi $OUTP, $OUTP, 16
  470. 2:
  471. # AES body
  472. @{[aes_256_decrypt]}
  473. @{[vxor_vv $V24, $V24, $V16]}
  474. @{[vse32_v $V24, $OUTP]}
  475. @{[vmv_v_v $V16, $V17]}
  476. addi $LEN, $LEN, -16
  477. addi $INP, $INP, 16
  478. bnez $LEN, 1b
  479. @{[vse32_v $V16, $IVP]}
  480. ret
  481. .size L_cbc_dec_256,.-L_cbc_dec_256
  482. ___
  483. }
  484. {
  485. ###############################################################################
  486. # void rv64i_zvkned_ecb_encrypt(const unsigned char *in, unsigned char *out,
  487. # size_t length, const AES_KEY *key,
  488. # const int enc);
  489. my ($INP, $OUTP, $LEN, $KEYP, $ENC) = ("a0", "a1", "a2", "a3", "a4");
  490. my ($REMAIN_LEN) = ("a5");
  491. my ($VL) = ("a6");
  492. my ($T0, $T1, $ROUNDS) = ("t0", "t1", "t2");
  493. my ($LEN32) = ("t3");
  494. $code .= <<___;
  495. .p2align 3
  496. .globl rv64i_zvkned_ecb_encrypt
  497. .type rv64i_zvkned_ecb_encrypt,\@function
  498. rv64i_zvkned_ecb_encrypt:
  499. # Make the LEN become e32 length.
  500. srli $LEN32, $LEN, 2
  501. # Load number of rounds
  502. lwu $ROUNDS, 240($KEYP)
  503. # Get proper routine for key size
  504. li $T0, 10
  505. beq $ROUNDS, $T0, L_ecb_enc_128
  506. li $T0, 12
  507. beq $ROUNDS, $T0, L_ecb_enc_192
  508. li $T0, 14
  509. beq $ROUNDS, $T0, L_ecb_enc_256
  510. ret
  511. .size rv64i_zvkned_ecb_encrypt,.-rv64i_zvkned_ecb_encrypt
  512. ___
  513. $code .= <<___;
  514. .p2align 3
  515. L_ecb_enc_128:
  516. # Load all 11 round keys to v1-v11 registers.
  517. @{[aes_128_load_key $KEYP]}
  518. 1:
  519. @{[vsetvli $VL, $LEN32, "e32", "m4", "ta", "ma"]}
  520. slli $T0, $VL, 2
  521. sub $LEN32, $LEN32, $VL
  522. @{[vle32_v $V24, $INP]}
  523. # AES body
  524. @{[aes_128_encrypt]}
  525. @{[vse32_v $V24, $OUTP]}
  526. add $INP, $INP, $T0
  527. add $OUTP, $OUTP, $T0
  528. bnez $LEN32, 1b
  529. ret
  530. .size L_ecb_enc_128,.-L_ecb_enc_128
  531. ___
  532. $code .= <<___;
  533. .p2align 3
  534. L_ecb_enc_192:
  535. # Load all 13 round keys to v1-v13 registers.
  536. @{[aes_192_load_key $KEYP]}
  537. 1:
  538. @{[vsetvli $VL, $LEN32, "e32", "m4", "ta", "ma"]}
  539. slli $T0, $VL, 2
  540. sub $LEN32, $LEN32, $VL
  541. @{[vle32_v $V24, $INP]}
  542. # AES body
  543. @{[aes_192_encrypt]}
  544. @{[vse32_v $V24, $OUTP]}
  545. add $INP, $INP, $T0
  546. add $OUTP, $OUTP, $T0
  547. bnez $LEN32, 1b
  548. ret
  549. .size L_ecb_enc_192,.-L_ecb_enc_192
  550. ___
  551. $code .= <<___;
  552. .p2align 3
  553. L_ecb_enc_256:
  554. # Load all 15 round keys to v1-v15 registers.
  555. @{[aes_256_load_key $KEYP]}
  556. 1:
  557. @{[vsetvli $VL, $LEN32, "e32", "m4", "ta", "ma"]}
  558. slli $T0, $VL, 2
  559. sub $LEN32, $LEN32, $VL
  560. @{[vle32_v $V24, $INP]}
  561. # AES body
  562. @{[aes_256_encrypt]}
  563. @{[vse32_v $V24, $OUTP]}
  564. add $INP, $INP, $T0
  565. add $OUTP, $OUTP, $T0
  566. bnez $LEN32, 1b
  567. ret
  568. .size L_ecb_enc_256,.-L_ecb_enc_256
  569. ___
  570. ###############################################################################
  571. # void rv64i_zvkned_ecb_decrypt(const unsigned char *in, unsigned char *out,
  572. # size_t length, const AES_KEY *key,
  573. # const int enc);
  574. $code .= <<___;
  575. .p2align 3
  576. .globl rv64i_zvkned_ecb_decrypt
  577. .type rv64i_zvkned_ecb_decrypt,\@function
  578. rv64i_zvkned_ecb_decrypt:
  579. # Make the LEN become e32 length.
  580. srli $LEN32, $LEN, 2
  581. # Load number of rounds
  582. lwu $ROUNDS, 240($KEYP)
  583. # Get proper routine for key size
  584. li $T0, 10
  585. beq $ROUNDS, $T0, L_ecb_dec_128
  586. li $T0, 12
  587. beq $ROUNDS, $T0, L_ecb_dec_192
  588. li $T0, 14
  589. beq $ROUNDS, $T0, L_ecb_dec_256
  590. ret
  591. .size rv64i_zvkned_ecb_decrypt,.-rv64i_zvkned_ecb_decrypt
  592. ___
  593. $code .= <<___;
  594. .p2align 3
  595. L_ecb_dec_128:
  596. # Load all 11 round keys to v1-v11 registers.
  597. @{[aes_128_load_key $KEYP]}
  598. 1:
  599. @{[vsetvli $VL, $LEN32, "e32", "m4", "ta", "ma"]}
  600. slli $T0, $VL, 2
  601. sub $LEN32, $LEN32, $VL
  602. @{[vle32_v $V24, $INP]}
  603. # AES body
  604. @{[aes_128_decrypt]}
  605. @{[vse32_v $V24, $OUTP]}
  606. add $INP, $INP, $T0
  607. add $OUTP, $OUTP, $T0
  608. bnez $LEN32, 1b
  609. ret
  610. .size L_ecb_dec_128,.-L_ecb_dec_128
  611. ___
  612. $code .= <<___;
  613. .p2align 3
  614. L_ecb_dec_192:
  615. # Load all 13 round keys to v1-v13 registers.
  616. @{[aes_192_load_key $KEYP]}
  617. 1:
  618. @{[vsetvli $VL, $LEN32, "e32", "m4", "ta", "ma"]}
  619. slli $T0, $VL, 2
  620. sub $LEN32, $LEN32, $VL
  621. @{[vle32_v $V24, $INP]}
  622. # AES body
  623. @{[aes_192_decrypt]}
  624. @{[vse32_v $V24, $OUTP]}
  625. add $INP, $INP, $T0
  626. add $OUTP, $OUTP, $T0
  627. bnez $LEN32, 1b
  628. ret
  629. .size L_ecb_dec_192,.-L_ecb_dec_192
  630. ___
  631. $code .= <<___;
  632. .p2align 3
  633. L_ecb_dec_256:
  634. # Load all 15 round keys to v1-v15 registers.
  635. @{[aes_256_load_key $KEYP]}
  636. 1:
  637. @{[vsetvli $VL, $LEN32, "e32", "m4", "ta", "ma"]}
  638. slli $T0, $VL, 2
  639. sub $LEN32, $LEN32, $VL
  640. @{[vle32_v $V24, $INP]}
  641. # AES body
  642. @{[aes_256_decrypt]}
  643. @{[vse32_v $V24, $OUTP]}
  644. add $INP, $INP, $T0
  645. add $OUTP, $OUTP, $T0
  646. bnez $LEN32, 1b
  647. ret
  648. .size L_ecb_dec_256,.-L_ecb_dec_256
  649. ___
  650. }
  651. {
  652. ################################################################################
  653. # int rv64i_zvkned_set_encrypt_key(const unsigned char *userKey, const int bits,
  654. # AES_KEY *key)
  655. # int rv64i_zvkned_set_decrypt_key(const unsigned char *userKey, const int bits,
  656. # AES_KEY *key)
  657. my ($UKEY,$BITS,$KEYP) = ("a0", "a1", "a2");
  658. my ($T0,$T1,$T4) = ("t1", "t2", "t4");
  659. $code .= <<___;
  660. .p2align 3
  661. .globl rv64i_zvkned_set_encrypt_key
  662. .type rv64i_zvkned_set_encrypt_key,\@function
  663. rv64i_zvkned_set_encrypt_key:
  664. beqz $UKEY, L_fail_m1
  665. beqz $KEYP, L_fail_m1
  666. # Get proper routine for key size
  667. li $T0, 256
  668. beq $BITS, $T0, L_set_key_256
  669. li $T0, 128
  670. beq $BITS, $T0, L_set_key_128
  671. j L_fail_m2
  672. .size rv64i_zvkned_set_encrypt_key,.-rv64i_zvkned_set_encrypt_key
  673. ___
  674. $code .= <<___;
  675. .p2align 3
  676. .globl rv64i_zvkned_set_decrypt_key
  677. .type rv64i_zvkned_set_decrypt_key,\@function
  678. rv64i_zvkned_set_decrypt_key:
  679. beqz $UKEY, L_fail_m1
  680. beqz $KEYP, L_fail_m1
  681. # Get proper routine for key size
  682. li $T0, 256
  683. beq $BITS, $T0, L_set_key_256
  684. li $T0, 128
  685. beq $BITS, $T0, L_set_key_128
  686. j L_fail_m2
  687. .size rv64i_zvkned_set_decrypt_key,.-rv64i_zvkned_set_decrypt_key
  688. ___
  689. $code .= <<___;
  690. .p2align 3
  691. L_set_key_128:
  692. # Store the number of rounds
  693. li $T1, 10
  694. sw $T1, 240($KEYP)
  695. @{[vsetivli__x0_4_e32_m1_tu_mu]}
  696. # Load the key
  697. @{[vle32_v $V10, ($UKEY)]}
  698. # Generate keys for round 2-11 into registers v11-v20.
  699. @{[vaeskf1_vi $V11, $V10, 1]} # v11 <- rk2 (w[ 4, 7])
  700. @{[vaeskf1_vi $V12, $V11, 2]} # v12 <- rk3 (w[ 8,11])
  701. @{[vaeskf1_vi $V13, $V12, 3]} # v13 <- rk4 (w[12,15])
  702. @{[vaeskf1_vi $V14, $V13, 4]} # v14 <- rk5 (w[16,19])
  703. @{[vaeskf1_vi $V15, $V14, 5]} # v15 <- rk6 (w[20,23])
  704. @{[vaeskf1_vi $V16, $V15, 6]} # v16 <- rk7 (w[24,27])
  705. @{[vaeskf1_vi $V17, $V16, 7]} # v17 <- rk8 (w[28,31])
  706. @{[vaeskf1_vi $V18, $V17, 8]} # v18 <- rk9 (w[32,35])
  707. @{[vaeskf1_vi $V19, $V18, 9]} # v19 <- rk10 (w[36,39])
  708. @{[vaeskf1_vi $V20, $V19, 10]} # v20 <- rk11 (w[40,43])
  709. # Store the round keys
  710. @{[vse32_v $V10, $KEYP]}
  711. addi $KEYP, $KEYP, 16
  712. @{[vse32_v $V11, $KEYP]}
  713. addi $KEYP, $KEYP, 16
  714. @{[vse32_v $V12, $KEYP]}
  715. addi $KEYP, $KEYP, 16
  716. @{[vse32_v $V13, $KEYP]}
  717. addi $KEYP, $KEYP, 16
  718. @{[vse32_v $V14, $KEYP]}
  719. addi $KEYP, $KEYP, 16
  720. @{[vse32_v $V15, $KEYP]}
  721. addi $KEYP, $KEYP, 16
  722. @{[vse32_v $V16, $KEYP]}
  723. addi $KEYP, $KEYP, 16
  724. @{[vse32_v $V17, $KEYP]}
  725. addi $KEYP, $KEYP, 16
  726. @{[vse32_v $V18, $KEYP]}
  727. addi $KEYP, $KEYP, 16
  728. @{[vse32_v $V19, $KEYP]}
  729. addi $KEYP, $KEYP, 16
  730. @{[vse32_v $V20, $KEYP]}
  731. li a0, 1
  732. ret
  733. .size L_set_key_128,.-L_set_key_128
  734. ___
  735. $code .= <<___;
  736. .p2align 3
  737. L_set_key_256:
  738. # Store the number of rounds
  739. li $T1, 14
  740. sw $T1, 240($KEYP)
  741. @{[vsetivli__x0_4_e32_m1_tu_mu]}
  742. # Load the key
  743. @{[vle32_v $V10, ($UKEY)]}
  744. addi $UKEY, $UKEY, 16
  745. @{[vle32_v $V11, ($UKEY)]}
  746. @{[vmv_v_v $V12, $V10]}
  747. @{[vaeskf2_vi $V12, $V11, 2]}
  748. @{[vmv_v_v $V13, $V11]}
  749. @{[vaeskf2_vi $V13, $V12, 3]}
  750. @{[vmv_v_v $V14, $V12]}
  751. @{[vaeskf2_vi $V14, $V13, 4]}
  752. @{[vmv_v_v $V15, $V13]}
  753. @{[vaeskf2_vi $V15, $V14, 5]}
  754. @{[vmv_v_v $V16, $V14]}
  755. @{[vaeskf2_vi $V16, $V15, 6]}
  756. @{[vmv_v_v $V17, $V15]}
  757. @{[vaeskf2_vi $V17, $V16, 7]}
  758. @{[vmv_v_v $V18, $V16]}
  759. @{[vaeskf2_vi $V18, $V17, 8]}
  760. @{[vmv_v_v $V19, $V17]}
  761. @{[vaeskf2_vi $V19, $V18, 9]}
  762. @{[vmv_v_v $V20, $V18]}
  763. @{[vaeskf2_vi $V20, $V19, 10]}
  764. @{[vmv_v_v $V21, $V19]}
  765. @{[vaeskf2_vi $V21, $V20, 11]}
  766. @{[vmv_v_v $V22, $V20]}
  767. @{[vaeskf2_vi $V22, $V21, 12]}
  768. @{[vmv_v_v $V23, $V21]}
  769. @{[vaeskf2_vi $V23, $V22, 13]}
  770. @{[vmv_v_v $V24, $V22]}
  771. @{[vaeskf2_vi $V24, $V23, 14]}
  772. @{[vse32_v $V10, $KEYP]}
  773. addi $KEYP, $KEYP, 16
  774. @{[vse32_v $V11, $KEYP]}
  775. addi $KEYP, $KEYP, 16
  776. @{[vse32_v $V12, $KEYP]}
  777. addi $KEYP, $KEYP, 16
  778. @{[vse32_v $V13, $KEYP]}
  779. addi $KEYP, $KEYP, 16
  780. @{[vse32_v $V14, $KEYP]}
  781. addi $KEYP, $KEYP, 16
  782. @{[vse32_v $V15, $KEYP]}
  783. addi $KEYP, $KEYP, 16
  784. @{[vse32_v $V16, $KEYP]}
  785. addi $KEYP, $KEYP, 16
  786. @{[vse32_v $V17, $KEYP]}
  787. addi $KEYP, $KEYP, 16
  788. @{[vse32_v $V18, $KEYP]}
  789. addi $KEYP, $KEYP, 16
  790. @{[vse32_v $V19, $KEYP]}
  791. addi $KEYP, $KEYP, 16
  792. @{[vse32_v $V20, $KEYP]}
  793. addi $KEYP, $KEYP, 16
  794. @{[vse32_v $V21, $KEYP]}
  795. addi $KEYP, $KEYP, 16
  796. @{[vse32_v $V22, $KEYP]}
  797. addi $KEYP, $KEYP, 16
  798. @{[vse32_v $V23, $KEYP]}
  799. addi $KEYP, $KEYP, 16
  800. @{[vse32_v $V24, $KEYP]}
  801. li a0, 1
  802. ret
  803. .size L_set_key_256,.-L_set_key_256
  804. ___
  805. }
  806. {
  807. ################################################################################
  808. # void rv64i_zvkned_encrypt(const unsigned char *in, unsigned char *out,
  809. # const AES_KEY *key);
  810. my ($INP,$OUTP,$KEYP) = ("a0", "a1", "a2");
  811. my ($T0,$T1, $ROUNDS, $T6) = ("a3", "a4", "t5", "t6");
  812. $code .= <<___;
  813. .p2align 3
  814. .globl rv64i_zvkned_encrypt
  815. .type rv64i_zvkned_encrypt,\@function
  816. rv64i_zvkned_encrypt:
  817. # Load number of rounds
  818. lwu $ROUNDS, 240($KEYP)
  819. # Get proper routine for key size
  820. li $T6, 14
  821. beq $ROUNDS, $T6, L_enc_256
  822. li $T6, 10
  823. beq $ROUNDS, $T6, L_enc_128
  824. li $T6, 12
  825. beq $ROUNDS, $T6, L_enc_192
  826. j L_fail_m2
  827. .size rv64i_zvkned_encrypt,.-rv64i_zvkned_encrypt
  828. ___
  829. $code .= <<___;
  830. .p2align 3
  831. L_enc_128:
  832. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  833. @{[vle32_v $V1, $INP]}
  834. @{[vle32_v $V10, $KEYP]}
  835. @{[vaesz_vs $V1, $V10]} # with round key w[ 0, 3]
  836. addi $KEYP, $KEYP, 16
  837. @{[vle32_v $V11, $KEYP]}
  838. @{[vaesem_vs $V1, $V11]} # with round key w[ 4, 7]
  839. addi $KEYP, $KEYP, 16
  840. @{[vle32_v $V12, $KEYP]}
  841. @{[vaesem_vs $V1, $V12]} # with round key w[ 8,11]
  842. addi $KEYP, $KEYP, 16
  843. @{[vle32_v $V13, $KEYP]}
  844. @{[vaesem_vs $V1, $V13]} # with round key w[12,15]
  845. addi $KEYP, $KEYP, 16
  846. @{[vle32_v $V14, $KEYP]}
  847. @{[vaesem_vs $V1, $V14]} # with round key w[16,19]
  848. addi $KEYP, $KEYP, 16
  849. @{[vle32_v $V15, $KEYP]}
  850. @{[vaesem_vs $V1, $V15]} # with round key w[20,23]
  851. addi $KEYP, $KEYP, 16
  852. @{[vle32_v $V16, $KEYP]}
  853. @{[vaesem_vs $V1, $V16]} # with round key w[24,27]
  854. addi $KEYP, $KEYP, 16
  855. @{[vle32_v $V17, $KEYP]}
  856. @{[vaesem_vs $V1, $V17]} # with round key w[28,31]
  857. addi $KEYP, $KEYP, 16
  858. @{[vle32_v $V18, $KEYP]}
  859. @{[vaesem_vs $V1, $V18]} # with round key w[32,35]
  860. addi $KEYP, $KEYP, 16
  861. @{[vle32_v $V19, $KEYP]}
  862. @{[vaesem_vs $V1, $V19]} # with round key w[36,39]
  863. addi $KEYP, $KEYP, 16
  864. @{[vle32_v $V20, $KEYP]}
  865. @{[vaesef_vs $V1, $V20]} # with round key w[40,43]
  866. @{[vse32_v $V1, $OUTP]}
  867. ret
  868. .size L_enc_128,.-L_enc_128
  869. ___
  870. $code .= <<___;
  871. .p2align 3
  872. L_enc_192:
  873. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  874. @{[vle32_v $V1, $INP]}
  875. @{[vle32_v $V10, $KEYP]}
  876. @{[vaesz_vs $V1, $V10]} # with round key w[ 0, 3]
  877. addi $KEYP, $KEYP, 16
  878. @{[vle32_v $V11, $KEYP]}
  879. @{[vaesem_vs $V1, $V11]}
  880. addi $KEYP, $KEYP, 16
  881. @{[vle32_v $V12, $KEYP]}
  882. @{[vaesem_vs $V1, $V12]}
  883. addi $KEYP, $KEYP, 16
  884. @{[vle32_v $V13, $KEYP]}
  885. @{[vaesem_vs $V1, $V13]}
  886. addi $KEYP, $KEYP, 16
  887. @{[vle32_v $V14, $KEYP]}
  888. @{[vaesem_vs $V1, $V14]}
  889. addi $KEYP, $KEYP, 16
  890. @{[vle32_v $V15, $KEYP]}
  891. @{[vaesem_vs $V1, $V15]}
  892. addi $KEYP, $KEYP, 16
  893. @{[vle32_v $V16, $KEYP]}
  894. @{[vaesem_vs $V1, $V16]}
  895. addi $KEYP, $KEYP, 16
  896. @{[vle32_v $V17, $KEYP]}
  897. @{[vaesem_vs $V1, $V17]}
  898. addi $KEYP, $KEYP, 16
  899. @{[vle32_v $V18, $KEYP]}
  900. @{[vaesem_vs $V1, $V18]}
  901. addi $KEYP, $KEYP, 16
  902. @{[vle32_v $V19, $KEYP]}
  903. @{[vaesem_vs $V1, $V19]}
  904. addi $KEYP, $KEYP, 16
  905. @{[vle32_v $V20, $KEYP]}
  906. @{[vaesem_vs $V1, $V20]}
  907. addi $KEYP, $KEYP, 16
  908. @{[vle32_v $V21, $KEYP]}
  909. @{[vaesem_vs $V1, $V21]}
  910. addi $KEYP, $KEYP, 16
  911. @{[vle32_v $V22, $KEYP]}
  912. @{[vaesef_vs $V1, $V22]}
  913. @{[vse32_v $V1, $OUTP]}
  914. ret
  915. .size L_enc_192,.-L_enc_192
  916. ___
  917. $code .= <<___;
  918. .p2align 3
  919. L_enc_256:
  920. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  921. @{[vle32_v $V1, $INP]}
  922. @{[vle32_v $V10, $KEYP]}
  923. @{[vaesz_vs $V1, $V10]} # with round key w[ 0, 3]
  924. addi $KEYP, $KEYP, 16
  925. @{[vle32_v $V11, $KEYP]}
  926. @{[vaesem_vs $V1, $V11]}
  927. addi $KEYP, $KEYP, 16
  928. @{[vle32_v $V12, $KEYP]}
  929. @{[vaesem_vs $V1, $V12]}
  930. addi $KEYP, $KEYP, 16
  931. @{[vle32_v $V13, $KEYP]}
  932. @{[vaesem_vs $V1, $V13]}
  933. addi $KEYP, $KEYP, 16
  934. @{[vle32_v $V14, $KEYP]}
  935. @{[vaesem_vs $V1, $V14]}
  936. addi $KEYP, $KEYP, 16
  937. @{[vle32_v $V15, $KEYP]}
  938. @{[vaesem_vs $V1, $V15]}
  939. addi $KEYP, $KEYP, 16
  940. @{[vle32_v $V16, $KEYP]}
  941. @{[vaesem_vs $V1, $V16]}
  942. addi $KEYP, $KEYP, 16
  943. @{[vle32_v $V17, $KEYP]}
  944. @{[vaesem_vs $V1, $V17]}
  945. addi $KEYP, $KEYP, 16
  946. @{[vle32_v $V18, $KEYP]}
  947. @{[vaesem_vs $V1, $V18]}
  948. addi $KEYP, $KEYP, 16
  949. @{[vle32_v $V19, $KEYP]}
  950. @{[vaesem_vs $V1, $V19]}
  951. addi $KEYP, $KEYP, 16
  952. @{[vle32_v $V20, $KEYP]}
  953. @{[vaesem_vs $V1, $V20]}
  954. addi $KEYP, $KEYP, 16
  955. @{[vle32_v $V21, $KEYP]}
  956. @{[vaesem_vs $V1, $V21]}
  957. addi $KEYP, $KEYP, 16
  958. @{[vle32_v $V22, $KEYP]}
  959. @{[vaesem_vs $V1, $V22]}
  960. addi $KEYP, $KEYP, 16
  961. @{[vle32_v $V23, $KEYP]}
  962. @{[vaesem_vs $V1, $V23]}
  963. addi $KEYP, $KEYP, 16
  964. @{[vle32_v $V24, $KEYP]}
  965. @{[vaesef_vs $V1, $V24]}
  966. @{[vse32_v $V1, $OUTP]}
  967. ret
  968. .size L_enc_256,.-L_enc_256
  969. ___
  970. ################################################################################
  971. # void rv64i_zvkned_decrypt(const unsigned char *in, unsigned char *out,
  972. # const AES_KEY *key);
  973. $code .= <<___;
  974. .p2align 3
  975. .globl rv64i_zvkned_decrypt
  976. .type rv64i_zvkned_decrypt,\@function
  977. rv64i_zvkned_decrypt:
  978. # Load number of rounds
  979. lwu $ROUNDS, 240($KEYP)
  980. # Get proper routine for key size
  981. li $T6, 14
  982. beq $ROUNDS, $T6, L_dec_256
  983. li $T6, 10
  984. beq $ROUNDS, $T6, L_dec_128
  985. li $T6, 12
  986. beq $ROUNDS, $T6, L_dec_192
  987. j L_fail_m2
  988. .size rv64i_zvkned_decrypt,.-rv64i_zvkned_decrypt
  989. ___
  990. $code .= <<___;
  991. .p2align 3
  992. L_dec_128:
  993. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  994. @{[vle32_v $V1, $INP]}
  995. addi $KEYP, $KEYP, 160
  996. @{[vle32_v $V20, $KEYP]}
  997. @{[vaesz_vs $V1, $V20]} # with round key w[40,43]
  998. addi $KEYP, $KEYP, -16
  999. @{[vle32_v $V19, $KEYP]}
  1000. @{[vaesdm_vs $V1, $V19]} # with round key w[36,39]
  1001. addi $KEYP, $KEYP, -16
  1002. @{[vle32_v $V18, $KEYP]}
  1003. @{[vaesdm_vs $V1, $V18]} # with round key w[32,35]
  1004. addi $KEYP, $KEYP, -16
  1005. @{[vle32_v $V17, $KEYP]}
  1006. @{[vaesdm_vs $V1, $V17]} # with round key w[28,31]
  1007. addi $KEYP, $KEYP, -16
  1008. @{[vle32_v $V16, $KEYP]}
  1009. @{[vaesdm_vs $V1, $V16]} # with round key w[24,27]
  1010. addi $KEYP, $KEYP, -16
  1011. @{[vle32_v $V15, $KEYP]}
  1012. @{[vaesdm_vs $V1, $V15]} # with round key w[20,23]
  1013. addi $KEYP, $KEYP, -16
  1014. @{[vle32_v $V14, $KEYP]}
  1015. @{[vaesdm_vs $V1, $V14]} # with round key w[16,19]
  1016. addi $KEYP, $KEYP, -16
  1017. @{[vle32_v $V13, $KEYP]}
  1018. @{[vaesdm_vs $V1, $V13]} # with round key w[12,15]
  1019. addi $KEYP, $KEYP, -16
  1020. @{[vle32_v $V12, $KEYP]}
  1021. @{[vaesdm_vs $V1, $V12]} # with round key w[ 8,11]
  1022. addi $KEYP, $KEYP, -16
  1023. @{[vle32_v $V11, $KEYP]}
  1024. @{[vaesdm_vs $V1, $V11]} # with round key w[ 4, 7]
  1025. addi $KEYP, $KEYP, -16
  1026. @{[vle32_v $V10, $KEYP]}
  1027. @{[vaesdf_vs $V1, $V10]} # with round key w[ 0, 3]
  1028. @{[vse32_v $V1, $OUTP]}
  1029. ret
  1030. .size L_dec_128,.-L_dec_128
  1031. ___
  1032. $code .= <<___;
  1033. .p2align 3
  1034. L_dec_192:
  1035. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  1036. @{[vle32_v $V1, $INP]}
  1037. addi $KEYP, $KEYP, 192
  1038. @{[vle32_v $V22, $KEYP]}
  1039. @{[vaesz_vs $V1, $V22]} # with round key w[48,51]
  1040. addi $KEYP, $KEYP, -16
  1041. @{[vle32_v $V21, $KEYP]}
  1042. @{[vaesdm_vs $V1, $V21]} # with round key w[44,47]
  1043. addi $KEYP, $KEYP, -16
  1044. @{[vle32_v $V20, $KEYP]}
  1045. @{[vaesdm_vs $V1, $V20]} # with round key w[40,43]
  1046. addi $KEYP, $KEYP, -16
  1047. @{[vle32_v $V19, $KEYP]}
  1048. @{[vaesdm_vs $V1, $V19]} # with round key w[36,39]
  1049. addi $KEYP, $KEYP, -16
  1050. @{[vle32_v $V18, $KEYP]}
  1051. @{[vaesdm_vs $V1, $V18]} # with round key w[32,35]
  1052. addi $KEYP, $KEYP, -16
  1053. @{[vle32_v $V17, $KEYP]}
  1054. @{[vaesdm_vs $V1, $V17]} # with round key w[28,31]
  1055. addi $KEYP, $KEYP, -16
  1056. @{[vle32_v $V16, $KEYP]}
  1057. @{[vaesdm_vs $V1, $V16]} # with round key w[24,27]
  1058. addi $KEYP, $KEYP, -16
  1059. @{[vle32_v $V15, $KEYP]}
  1060. @{[vaesdm_vs $V1, $V15]} # with round key w[20,23]
  1061. addi $KEYP, $KEYP, -16
  1062. @{[vle32_v $V14, $KEYP]}
  1063. @{[vaesdm_vs $V1, $V14]} # with round key w[16,19]
  1064. addi $KEYP, $KEYP, -16
  1065. @{[vle32_v $V13, $KEYP]}
  1066. @{[vaesdm_vs $V1, $V13]} # with round key w[12,15]
  1067. addi $KEYP, $KEYP, -16
  1068. @{[vle32_v $V12, $KEYP]}
  1069. @{[vaesdm_vs $V1, $V12]} # with round key w[ 8,11]
  1070. addi $KEYP, $KEYP, -16
  1071. @{[vle32_v $V11, $KEYP]}
  1072. @{[vaesdm_vs $V1, $V11]} # with round key w[ 4, 7]
  1073. addi $KEYP, $KEYP, -16
  1074. @{[vle32_v $V10, $KEYP]}
  1075. @{[vaesdf_vs $V1, $V10]} # with round key w[ 0, 3]
  1076. @{[vse32_v $V1, $OUTP]}
  1077. ret
  1078. .size L_dec_192,.-L_dec_192
  1079. ___
  1080. $code .= <<___;
  1081. .p2align 3
  1082. L_dec_256:
  1083. @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]}
  1084. @{[vle32_v $V1, $INP]}
  1085. addi $KEYP, $KEYP, 224
  1086. @{[vle32_v $V24, $KEYP]}
  1087. @{[vaesz_vs $V1, $V24]} # with round key w[56,59]
  1088. addi $KEYP, $KEYP, -16
  1089. @{[vle32_v $V23, $KEYP]}
  1090. @{[vaesdm_vs $V1, $V23]} # with round key w[52,55]
  1091. addi $KEYP, $KEYP, -16
  1092. @{[vle32_v $V22, $KEYP]}
  1093. @{[vaesdm_vs $V1, $V22]} # with round key w[48,51]
  1094. addi $KEYP, $KEYP, -16
  1095. @{[vle32_v $V21, $KEYP]}
  1096. @{[vaesdm_vs $V1, $V21]} # with round key w[44,47]
  1097. addi $KEYP, $KEYP, -16
  1098. @{[vle32_v $V20, $KEYP]}
  1099. @{[vaesdm_vs $V1, $V20]} # with round key w[40,43]
  1100. addi $KEYP, $KEYP, -16
  1101. @{[vle32_v $V19, $KEYP]}
  1102. @{[vaesdm_vs $V1, $V19]} # with round key w[36,39]
  1103. addi $KEYP, $KEYP, -16
  1104. @{[vle32_v $V18, $KEYP]}
  1105. @{[vaesdm_vs $V1, $V18]} # with round key w[32,35]
  1106. addi $KEYP, $KEYP, -16
  1107. @{[vle32_v $V17, $KEYP]}
  1108. @{[vaesdm_vs $V1, $V17]} # with round key w[28,31]
  1109. addi $KEYP, $KEYP, -16
  1110. @{[vle32_v $V16, $KEYP]}
  1111. @{[vaesdm_vs $V1, $V16]} # with round key w[24,27]
  1112. addi $KEYP, $KEYP, -16
  1113. @{[vle32_v $V15, $KEYP]}
  1114. @{[vaesdm_vs $V1, $V15]} # with round key w[20,23]
  1115. addi $KEYP, $KEYP, -16
  1116. @{[vle32_v $V14, $KEYP]}
  1117. @{[vaesdm_vs $V1, $V14]} # with round key w[16,19]
  1118. addi $KEYP, $KEYP, -16
  1119. @{[vle32_v $V13, $KEYP]}
  1120. @{[vaesdm_vs $V1, $V13]} # with round key w[12,15]
  1121. addi $KEYP, $KEYP, -16
  1122. @{[vle32_v $V12, $KEYP]}
  1123. @{[vaesdm_vs $V1, $V12]} # with round key w[ 8,11]
  1124. addi $KEYP, $KEYP, -16
  1125. @{[vle32_v $V11, $KEYP]}
  1126. @{[vaesdm_vs $V1, $V11]} # with round key w[ 4, 7]
  1127. addi $KEYP, $KEYP, -16
  1128. @{[vle32_v $V10, $KEYP]}
  1129. @{[vaesdf_vs $V1, $V10]} # with round key w[ 0, 3]
  1130. @{[vse32_v $V1, $OUTP]}
  1131. ret
  1132. .size L_dec_256,.-L_dec_256
  1133. ___
  1134. }
  1135. $code .= <<___;
  1136. L_fail_m1:
  1137. li a0, -1
  1138. ret
  1139. .size L_fail_m1,.-L_fail_m1
  1140. L_fail_m2:
  1141. li a0, -2
  1142. ret
  1143. .size L_fail_m2,.-L_fail_m2
  1144. L_end:
  1145. ret
  1146. .size L_end,.-L_end
  1147. ___
  1148. print $code;
  1149. close STDOUT or die "error closing STDOUT: $!";