Browse Source

riscv: SM4: Provide a Zvksed-based implementation

The upcoming RISC-V vector crypto extensions feature
a Zvksed extension, that provides SM4-specific instructions.
This patch provides an implementation that utilizes this
extension if available.

Tested on QEMU and no regressions observed.

Signed-off-by: Christoph Müllner <christoph.muellner@vrull.eu>

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
Reviewed-by: Hugo Landau <hlandau@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/21923)
Christoph Müllner 1 year ago
parent
commit
7543bb3a69

+ 19 - 0
crypto/perlasm/riscv.pm

@@ -644,4 +644,23 @@ sub vsha2cl_vv {
     return ".word ".($template | ($vs2 << 20)| ($vs1 << 15 )| ($vd << 7));
 }
 
+## Zvksed instructions
+
+sub vsm4k_vi {
+    # vsm4k.vi vd, vs2, uimm
+    my $template = 0b1000011_00000_00000_010_00000_1110111;
+    my $vd = read_vreg shift;
+    my $vs2 = read_vreg shift;
+    my $uimm = shift;
+    return ".word ".($template | ($vs2 << 20) | ($uimm << 15) | ($vd << 7));
+}
+
+sub vsm4r_vs {
+    # vsm4r.vs vd, vs2
+    my $template = 0b1010011_00000_10000_010_00000_1110111;
+    my $vd = read_vreg shift;
+    my $vs2 = read_vreg shift;
+    return ".word ".($template | ($vs2 << 20) | ($vd << 7));
+}
+
 1;

+ 298 - 0
crypto/sm4/asm/sm4-riscv64-zvksed.pl

@@ -0,0 +1,298 @@
+#! /usr/bin/env perl
+# This file is dual-licensed, meaning that you can use it under your
+# choice of either of the following two licenses:
+#
+# Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the Apache License 2.0 (the "License"). You can obtain
+# a copy in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# or
+#
+# Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The generated code of this file depends on the following RISC-V extensions:
+# - RV64I
+# - RISC-V vector ('V') with VLEN >= 128
+# - Vector Bit-manipulation used in Cryptography ('Zvbb')
+# - Vector ShangMi Suite: SM4 Block Cipher ('Zvksed')
+
+use strict;
+use warnings;
+
+use FindBin qw($Bin);
+use lib "$Bin";
+use lib "$Bin/../../perlasm";
+use riscv;
+
+# $output is the last argument if it looks like a file (it has an extension)
+# $flavour is the first argument if it doesn't look like a file
+my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
+my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
+
+$output and open STDOUT,">$output";
+
+my $code=<<___;
+.text
+___
+
+####
+# int rv64i_zvksed_sm4_set_encrypt_key(const unsigned char *userKey,
+#                                      SM4_KEY *key);
+#
+{
+my ($ukey,$keys,$fk)=("a0","a1","t0");
+my ($vukey,$vfk,$vk0,$vk1,$vk2,$vk3,$vk4,$vk5,$vk6,$vk7)=("v1","v2","v3","v4","v5","v6","v7","v8","v9","v10");
+$code .= <<___;
+.p2align 3
+.globl rv64i_zvksed_sm4_set_encrypt_key
+.type rv64i_zvksed_sm4_set_encrypt_key,\@function
+rv64i_zvksed_sm4_set_encrypt_key:
+    @{[vsetivli__x0_4_e32_m1_tu_mu]}
+
+    # Load the user key
+    @{[vle32_v $vukey, $ukey]}
+    @{[vrev8_v $vukey, $vukey]}
+
+    # Load the FK.
+    la $fk, FK
+    @{[vle32_v $vfk, $fk]}
+
+    # Generate round keys.
+    @{[vxor_vv $vukey, $vukey, $vfk]}
+    @{[vsm4k_vi $vk0, $vukey, 0]} # rk[0:3]
+    @{[vsm4k_vi $vk1, $vk0, 1]} # rk[4:7]
+    @{[vsm4k_vi $vk2, $vk1, 2]} # rk[8:11]
+    @{[vsm4k_vi $vk3, $vk2, 3]} # rk[12:15]
+    @{[vsm4k_vi $vk4, $vk3, 4]} # rk[16:19]
+    @{[vsm4k_vi $vk5, $vk4, 5]} # rk[20:23]
+    @{[vsm4k_vi $vk6, $vk5, 6]} # rk[24:27]
+    @{[vsm4k_vi $vk7, $vk6, 7]} # rk[28:31]
+
+    # Store round keys
+    @{[vse32_v $vk0, $keys]} # rk[0:3]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk1, $keys]} # rk[4:7]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk2, $keys]} # rk[8:11]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk3, $keys]} # rk[12:15]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk4, $keys]} # rk[16:19]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk5, $keys]} # rk[20:23]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk6, $keys]} # rk[24:27]
+    addi $keys, $keys, 16
+    @{[vse32_v $vk7, $keys]} # rk[28:31]
+
+    li a0, 1
+    ret
+.size rv64i_zvksed_sm4_set_encrypt_key,.-rv64i_zvksed_sm4_set_encrypt_key
+___
+}
+
+####
+# int rv64i_zvksed_sm4_set_decrypt_key(const unsigned char *userKey,
+#                                      SM4_KEY *key);
+#
+{
+my ($ukey,$keys,$fk,$stride)=("a0","a1","t0","t1");
+my ($vukey,$vfk,$vk0,$vk1,$vk2,$vk3,$vk4,$vk5,$vk6,$vk7)=("v1","v2","v3","v4","v5","v6","v7","v8","v9","v10");
+$code .= <<___;
+.p2align 3
+.globl rv64i_zvksed_sm4_set_decrypt_key
+.type rv64i_zvksed_sm4_set_decrypt_key,\@function
+rv64i_zvksed_sm4_set_decrypt_key:
+    @{[vsetivli__x0_4_e32_m1_tu_mu]}
+
+    # Load the user key
+    @{[vle32_v $vukey, $ukey]}
+    @{[vrev8_v $vukey, $vukey]}
+
+    # Load the FK.
+    la $fk, FK
+    @{[vle32_v $vfk, $fk]}
+
+    # Generate round keys.
+    @{[vxor_vv $vukey, $vukey, $vfk]}
+    @{[vsm4k_vi $vk0, $vukey, 0]} # rk[0:3]
+    @{[vsm4k_vi $vk1, $vk0, 1]} # rk[4:7]
+    @{[vsm4k_vi $vk2, $vk1, 2]} # rk[8:11]
+    @{[vsm4k_vi $vk3, $vk2, 3]} # rk[12:15]
+    @{[vsm4k_vi $vk4, $vk3, 4]} # rk[16:19]
+    @{[vsm4k_vi $vk5, $vk4, 5]} # rk[20:23]
+    @{[vsm4k_vi $vk6, $vk5, 6]} # rk[24:27]
+    @{[vsm4k_vi $vk7, $vk6, 7]} # rk[28:31]
+
+    # Store round keys in reverse order
+    addi $keys, $keys, 12
+    li $stride, -4
+    @{[vsse32_v $vk7, $keys, $stride]} # rk[31:28]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk6, $keys, $stride]} # rk[27:24]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk5, $keys, $stride]} # rk[23:20]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk4, $keys, $stride]} # rk[19:16]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk3, $keys, $stride]} # rk[15:12]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk2, $keys, $stride]} # rk[11:8]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk1, $keys, $stride]} # rk[7:4]
+    addi $keys, $keys, 16
+    @{[vsse32_v $vk0, $keys, $stride]} # rk[3:0]
+
+    li a0, 1
+    ret
+.size rv64i_zvksed_sm4_set_decrypt_key,.-rv64i_zvksed_sm4_set_decrypt_key
+___
+}
+
+####
+# void rv64i_zvksed_sm4_encrypt(const unsigned char *in, unsigned char *out,
+#                               const SM4_KEY *key);
+#
+{
+my ($in,$out,$keys,$stride)=("a0","a1","a2","t0");
+my ($vdata,$vk0,$vk1,$vk2,$vk3,$vk4,$vk5,$vk6,$vk7,$vgen)=("v1","v2","v3","v4","v5","v6","v7","v8","v9","v10");
+$code .= <<___;
+.p2align 3
+.globl rv64i_zvksed_sm4_encrypt
+.type rv64i_zvksed_sm4_encrypt,\@function
+rv64i_zvksed_sm4_encrypt:
+    @{[vsetivli__x0_4_e32_m1_tu_mu]}
+
+    # Order of elements was adjusted in set_encrypt_key()
+    @{[vle32_v $vk0, $keys]} # rk[0:3]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk1, $keys]} # rk[4:7]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk2, $keys]} # rk[8:11]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk3, $keys]} # rk[12:15]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk4, $keys]} # rk[16:19]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk5, $keys]} # rk[20:23]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk6, $keys]} # rk[24:27]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk7, $keys]} # rk[28:31]
+
+    # Load input data
+    @{[vle32_v $vdata, $in]}
+    @{[vrev8_v $vdata, $vdata]}
+
+    # Encrypt with all keys
+    @{[vsm4r_vs $vdata, $vk0]}
+    @{[vsm4r_vs $vdata, $vk1]}
+    @{[vsm4r_vs $vdata, $vk2]}
+    @{[vsm4r_vs $vdata, $vk3]}
+    @{[vsm4r_vs $vdata, $vk4]}
+    @{[vsm4r_vs $vdata, $vk5]}
+    @{[vsm4r_vs $vdata, $vk6]}
+    @{[vsm4r_vs $vdata, $vk7]}
+
+    # Save the ciphertext (in reverse element order)
+    @{[vrev8_v $vdata, $vdata]}
+    li $stride, -4
+    addi $out, $out, 12
+    @{[vsse32_v $vdata, $out, $stride]}
+
+    ret
+.size rv64i_zvksed_sm4_encrypt,.-rv64i_zvksed_sm4_encrypt
+___
+}
+
+####
+# void rv64i_zvksed_sm4_decrypt(const unsigned char *in, unsigned char *out,
+#                               const SM4_KEY *key);
+#
+{
+my ($in,$out,$keys,$stride)=("a0","a1","a2","t0");
+my ($vdata,$vk0,$vk1,$vk2,$vk3,$vk4,$vk5,$vk6,$vk7,$vgen)=("v1","v2","v3","v4","v5","v6","v7","v8","v9","v10");
+$code .= <<___;
+.p2align 3
+.globl rv64i_zvksed_sm4_decrypt
+.type rv64i_zvksed_sm4_decrypt,\@function
+rv64i_zvksed_sm4_decrypt:
+    @{[vsetivli__x0_4_e32_m1_tu_mu]}
+
+    # Order of elements was adjusted in set_decrypt_key()
+    @{[vle32_v $vk7, $keys]} # rk[31:28]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk6, $keys]} # rk[27:24]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk5, $keys]} # rk[23:20]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk4, $keys]} # rk[19:16]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk3, $keys]} # rk[15:11]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk2, $keys]} # rk[11:8]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk1, $keys]} # rk[7:4]
+    addi $keys, $keys, 16
+    @{[vle32_v $vk0, $keys]} # rk[3:0]
+
+    # Load input data
+    @{[vle32_v $vdata, $in]}
+    @{[vrev8_v $vdata, $vdata]}
+
+    # Encrypt with all keys
+    @{[vsm4r_vs $vdata, $vk7]}
+    @{[vsm4r_vs $vdata, $vk6]}
+    @{[vsm4r_vs $vdata, $vk5]}
+    @{[vsm4r_vs $vdata, $vk4]}
+    @{[vsm4r_vs $vdata, $vk3]}
+    @{[vsm4r_vs $vdata, $vk2]}
+    @{[vsm4r_vs $vdata, $vk1]}
+    @{[vsm4r_vs $vdata, $vk0]}
+
+    # Save the ciphertext (in reverse element order)
+    @{[vrev8_v $vdata, $vdata]}
+    li $stride, -4
+    addi $out, $out, 12
+    @{[vsse32_v $vdata, $out, $stride]}
+
+    ret
+.size rv64i_zvksed_sm4_decrypt,.-rv64i_zvksed_sm4_decrypt
+___
+}
+
+$code .= <<___;
+# Family Key (little-endian 32-bit chunks)
+.p2align 3
+FK:
+    .word 0xA3B1BAC6, 0x56AA3350, 0x677D9197, 0xB27022DC
+.size FK,.-FK
+___
+
+print $code;
+
+close STDOUT or die "error closing STDOUT: $!";

+ 4 - 0
crypto/sm4/build.info

@@ -4,6 +4,9 @@ IF[{- !$disabled{asm} -}]
   $SM4DEF_aarch64=SM4_ASM VPSM4_ASM
   $SM4ASM_aarch64=sm4-armv8.S vpsm4-armv8.S vpsm4_ex-armv8.S
 
+  $SM4DEF_riscv64=SM4_ASM
+  $SM4ASM_riscv64=sm4-riscv64-zvksed.s
+
   # Now that we have defined all the arch specific variables, use the
   # appropriate one, and define the appropriate macros
   IF[$SM4ASM_{- $target{asm_arch} -}]
@@ -34,3 +37,4 @@ GENERATE[vpsm4_ex-armv8.S]=asm/vpsm4_ex-armv8.pl
 INCLUDE[sm4-armv8.o]=..
 INCLUDE[vpsm4-armv8.o]=..
 INCLUDE[vpsm4_ex-armv8.o]=..
+GENERATE[sm4-riscv64-zvksed.s]=asm/sm4-riscv64-zvksed.pl

+ 1 - 0
include/crypto/riscv_arch.def

@@ -39,6 +39,7 @@ RISCV_DEFINE_CAP(ZVKG, 0, 17)
 RISCV_DEFINE_CAP(ZVKNED, 0, 18)
 RISCV_DEFINE_CAP(ZVKNHA, 0, 19)
 RISCV_DEFINE_CAP(ZVKNHB, 0, 20)
+RISCV_DEFINE_CAP(ZVKSED, 0, 21)
 
 /*
  * In the future ...

+ 1 - 0
include/crypto/riscv_arch.h

@@ -62,6 +62,7 @@ static const size_t kRISCVNumCaps =
 #define RISCV_HAS_ZKND_AND_ZKNE() (RISCV_HAS_ZKND() && RISCV_HAS_ZKNE())
 #define RISCV_HAS_ZVBB_AND_ZVKNHA() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHA())
 #define RISCV_HAS_ZVBB_AND_ZVKNHB() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKNHB())
+#define RISCV_HAS_ZVBB_AND_ZVKSED() (RISCV_HAS_ZVBB() && RISCV_HAS_ZVKSED())
 
 /*
  * Get the size of a vector register in bits (VLEN).

+ 13 - 1
include/crypto/sm4_platform.h

@@ -38,7 +38,19 @@ static inline int vpsm4_ex_capable(void)
 #   define HWSM4_cbc_encrypt sm4_v8_cbc_encrypt
 #   define HWSM4_ecb_encrypt sm4_v8_ecb_encrypt
 #   define HWSM4_ctr32_encrypt_blocks sm4_v8_ctr32_encrypt_blocks
-#  endif
+#  elif defined(__riscv) && __riscv_xlen == 64
+/* RV64 support */
+#   include "riscv_arch.h"
+/* Zvksed extension (vector crypto SM4). */
+int rv64i_zvksed_sm4_set_encrypt_key(const unsigned char *userKey,
+                                     SM4_KEY *key);
+int rv64i_zvksed_sm4_set_decrypt_key(const unsigned char *userKey,
+                                     SM4_KEY *key);
+void rv64i_zvksed_sm4_encrypt(const unsigned char *in, unsigned char *out,
+                              const SM4_KEY *key);
+void rv64i_zvksed_sm4_decrypt(const unsigned char *in, unsigned char *out,
+                              const SM4_KEY *key);
+#  endif /* RV64 */
 # endif /* OPENSSL_CPUID_OBJ */
 
 # if defined(HWSM4_CAPABLE)

+ 1 - 0
providers/implementations/ciphers/cipher_sm4_ccm.h

@@ -10,6 +10,7 @@
 #include "crypto/sm4.h"
 #include "prov/ciphercommon.h"
 #include "prov/ciphercommon_ccm.h"
+#include "crypto/sm4_platform.h"
 
 typedef struct prov_sm4_ccm_ctx_st {
     PROV_CCM_CTX base; /* Must be first */

+ 4 - 0
providers/implementations/ciphers/cipher_sm4_ccm_hw.c

@@ -59,7 +59,11 @@ static const PROV_CCM_HW ccm_sm4 = {
     ossl_ccm_generic_gettag
 };
 
+#if defined(__riscv) && __riscv_xlen == 64
+# include "cipher_sm4_ccm_hw_rv64i.inc"
+#else
 const PROV_CCM_HW *ossl_prov_sm4_hw_ccm(size_t keybits)
 {
     return &ccm_sm4;
 }
+#endif

+ 41 - 0
providers/implementations/ciphers/cipher_sm4_ccm_hw_rv64i.inc

@@ -0,0 +1,41 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*-
+ * RV64I ZVKSED support for SM4 CCM.
+ * This file is included by cipher_sm4_ccm_hw.c
+ */
+
+static int rv64i_zvksed_sm4_ccm_initkey(PROV_CCM_CTX *ctx,
+                                        const unsigned char *key,
+                                        size_t keylen)
+{
+    PROV_SM4_CCM_CTX *actx = (PROV_SM4_CCM_CTX *)ctx;
+
+    SM4_HW_CCM_SET_KEY_FN(rv64i_zvksed_sm4_set_encrypt_key,
+                          rv64i_zvksed_sm4_encrypt, NULL, NULL);
+    return 1;
+}
+
+static const PROV_CCM_HW rv64i_zvksed_sm4_ccm = {
+    rv64i_zvksed_sm4_ccm_initkey,
+    ossl_ccm_generic_setiv,
+    ossl_ccm_generic_setaad,
+    ossl_ccm_generic_auth_encrypt,
+    ossl_ccm_generic_auth_decrypt,
+    ossl_ccm_generic_gettag
+};
+
+const PROV_CCM_HW *ossl_prov_sm4_hw_ccm(size_t keybits)
+{
+    if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
+        return &rv64i_zvksed_sm4_ccm;
+    else
+        return &ccm_sm4;
+}

+ 4 - 0
providers/implementations/ciphers/cipher_sm4_gcm_hw.c

@@ -89,7 +89,11 @@ static const PROV_GCM_HW sm4_gcm = {
     ossl_gcm_one_shot
 };
 
+#if defined(__riscv) && __riscv_xlen == 64
+# include "cipher_sm4_gcm_hw_rv64i.inc"
+#else
 const PROV_GCM_HW *ossl_prov_sm4_hw_gcm(size_t keybits)
 {
     return &sm4_gcm;
 }
+#endif

+ 42 - 0
providers/implementations/ciphers/cipher_sm4_gcm_hw_rv64i.inc

@@ -0,0 +1,42 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*-
+ * RISC-V 64 ZVKSED support for SM4 GCM.
+ * This file is included by cipher_sm4_gcm_hw.c
+ */
+
+static int rv64i_zvksed_sm4_gcm_initkey(PROV_GCM_CTX *ctx,
+                                        const unsigned char *key,
+                                        size_t keylen)
+{
+    PROV_SM4_GCM_CTX *actx = (PROV_SM4_GCM_CTX *)ctx;
+    SM4_KEY *ks = &actx->ks.ks;
+
+    SM4_GCM_HW_SET_KEY_CTR_FN(ks, rv64i_zvksed_sm4_set_encrypt_key,
+                              rv64i_zvksed_sm4_encrypt, NULL);
+    return 1;
+}
+
+static const PROV_GCM_HW rv64i_zvksed_sm4_gcm = {
+    rv64i_zvksed_sm4_gcm_initkey,
+    ossl_gcm_setiv,
+    ossl_gcm_aad_update,
+    hw_gcm_cipher_update,
+    ossl_gcm_cipher_final,
+    ossl_gcm_one_shot
+};
+
+const PROV_GCM_HW *ossl_prov_sm4_hw_gcm(size_t keybits)
+{
+    if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
+        return &rv64i_zvksed_sm4_gcm;
+    else
+        return &sm4_gcm;
+}

+ 10 - 0
providers/implementations/ciphers/cipher_sm4_hw.c

@@ -127,11 +127,21 @@ static const PROV_CIPHER_HW sm4_##mode = {                                     \
     ossl_cipher_hw_generic_##mode,                                             \
     cipher_hw_sm4_copyctx                                                      \
 };                                                                             \
+PROV_CIPHER_HW_declare(mode)                                                   \
 const PROV_CIPHER_HW *ossl_prov_cipher_hw_sm4_##mode(size_t keybits)           \
 {                                                                              \
+    PROV_CIPHER_HW_select(mode)                                                \
     return &sm4_##mode;                                                        \
 }
 
+#if defined(__riscv) && __riscv_xlen == 64
+# include "cipher_sm4_hw_rv64i.inc"
+#else
+/* The generic case */
+# define PROV_CIPHER_HW_declare(mode)
+# define PROV_CIPHER_HW_select(mode)
+#endif
+
 PROV_CIPHER_HW_sm4_mode(cbc)
 PROV_CIPHER_HW_sm4_mode(ecb)
 PROV_CIPHER_HW_sm4_mode(ofb128)

+ 52 - 0
providers/implementations/ciphers/cipher_sm4_hw_rv64i.inc

@@ -0,0 +1,52 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*-
+ * RV64 ZVKSED support for AES modes ecb, cbc, ofb, cfb, ctr.
+ * This file is included by cipher_sm4_hw.c
+ */
+
+#define cipher_hw_rv64i_zvksed_sm4_cbc    ossl_cipher_hw_generic_cbc
+#define cipher_hw_rv64i_zvksed_sm4_ecb    ossl_cipher_hw_generic_ecb
+#define cipher_hw_rv64i_zvksed_sm4_ofb128 ossl_cipher_hw_generic_ofb128
+#define cipher_hw_rv64i_zvksed_sm4_cfb128 ossl_cipher_hw_generic_cfb128
+#define cipher_hw_rv64i_zvksed_sm4_ctr    ossl_cipher_hw_generic_ctr
+
+static int cipher_hw_rv64i_zvksed_sm4_initkey(PROV_CIPHER_CTX *ctx,
+                                              const unsigned char *key,
+                                              size_t keylen)
+{
+    PROV_SM4_CTX *sctx =  (PROV_SM4_CTX *)ctx;
+    SM4_KEY *ks = &sctx->ks.ks;
+
+    ctx->ks = ks;
+    if (ctx->enc
+            || (ctx->mode != EVP_CIPH_ECB_MODE
+                && ctx->mode != EVP_CIPH_CBC_MODE)) {
+        rv64i_zvksed_sm4_set_encrypt_key(key, ks);
+        ctx->block = (block128_f) rv64i_zvksed_sm4_encrypt;
+        ctx->stream.cbc = NULL;
+    } else {
+        rv64i_zvksed_sm4_set_decrypt_key(key, ks);
+        ctx->block = (block128_f) rv64i_zvksed_sm4_decrypt;
+        ctx->stream.cbc = NULL;
+    }
+
+    return 1;
+}
+
+#define PROV_CIPHER_HW_declare(mode)                                   \
+static const PROV_CIPHER_HW rv64i_zvksed_sm4_##mode = {                \
+    cipher_hw_rv64i_zvksed_sm4_initkey,                                \
+    cipher_hw_rv64i_zvksed_sm4_##mode,                                 \
+    cipher_hw_sm4_copyctx                                              \
+};
+#define PROV_CIPHER_HW_select(mode)                                    \
+if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)                \
+    return &rv64i_zvksed_sm4_##mode;

+ 5 - 0
providers/implementations/ciphers/cipher_sm4_xts_hw.c

@@ -88,7 +88,12 @@ static const PROV_CIPHER_HW sm4_generic_xts = {
     NULL,
     cipher_hw_sm4_xts_copyctx
 };
+
+#if defined(__riscv) && __riscv_xlen == 64
+# include "cipher_sm4_xts_hw_rv64i.inc"
+#else
 const PROV_CIPHER_HW *ossl_prov_cipher_hw_sm4_xts(size_t keybits)
 {
     return &sm4_generic_xts;
 }
+#endif

+ 43 - 0
providers/implementations/ciphers/cipher_sm4_xts_hw_rv64i.inc

@@ -0,0 +1,43 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*-
+ * RISC-V 64 ZVKSED support for SM4 GCM.
+ * This file is included by cipher_sm4_gcm_hw.c
+ */
+
+static int rv64i_zvksed_sm4_xts_initkey(PROV_CIPHER_CTX *ctx,
+                                        const unsigned char *key,
+                                        size_t keylen)
+{
+    PROV_SM4_XTS_CTX *xctx = (PROV_SM4_XTS_CTX *)ctx;
+    OSSL_xts_stream_fn stream_fn = NULL;
+    OSSL_xts_stream_fn stream_gb_fn = NULL;
+
+    XTS_SET_KEY_FN(rv64i_zvksed_sm4_set_encrypt_key,
+                   rv64i_zvksed_sm4_set_decrypt_key,
+                   rv64i_zvksed_sm4_encrypt,
+                   rv64i_zvksed_sm4_decrypt,
+                   stream_fn, stream_gb_fn);
+    return 1;
+}
+
+static const PROV_CIPHER_HW rv64i_zvksed_sm4_xts = {
+    rv64i_zvksed_sm4_xts_initkey,
+    NULL,
+    cipher_hw_sm4_xts_copyctx
+};
+
+const PROV_CIPHER_HW *ossl_prov_cipher_hw_sm4_xts(size_t keybits)
+{
+    if (RISCV_HAS_ZVBB_AND_ZVKSED() && riscv_vlen() >= 128)
+        return &rv64i_zvksed_sm4_xts;
+    else
+        return &sm4_generic_xts;
+}