2
0

e_afalg.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. * Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* We need to use some deprecated APIs */
  10. #define OPENSSL_SUPPRESS_DEPRECATED
  11. /* Required for vmsplice */
  12. #ifndef _GNU_SOURCE
  13. # define _GNU_SOURCE
  14. #endif
  15. #include <stdio.h>
  16. #include <string.h>
  17. #include <unistd.h>
  18. #include <openssl/engine.h>
  19. #include <openssl/async.h>
  20. #include <openssl/err.h>
  21. #include "internal/nelem.h"
  22. #include <sys/socket.h>
  23. #include <linux/version.h>
  24. #define K_MAJ 4
  25. #define K_MIN1 1
  26. #define K_MIN2 0
  27. #if LINUX_VERSION_CODE < KERNEL_VERSION(K_MAJ, K_MIN1, K_MIN2) || \
  28. !defined(AF_ALG)
  29. # ifndef PEDANTIC
  30. # warning "AFALG ENGINE requires Kernel Headers >= 4.1.0"
  31. # warning "Skipping Compilation of AFALG engine"
  32. # endif
  33. void engine_load_afalg_int(void);
  34. void engine_load_afalg_int(void)
  35. {
  36. }
  37. #else
  38. # include <linux/if_alg.h>
  39. # include <fcntl.h>
  40. # include <sys/utsname.h>
  41. # include <linux/aio_abi.h>
  42. # include <sys/syscall.h>
  43. # include <errno.h>
  44. # include "e_afalg.h"
  45. # include "e_afalg_err.c"
  46. # ifndef SOL_ALG
  47. # define SOL_ALG 279
  48. # endif
  49. # ifdef ALG_ZERO_COPY
  50. # ifndef SPLICE_F_GIFT
  51. # define SPLICE_F_GIFT (0x08)
  52. # endif
  53. # endif
  54. # define ALG_AES_IV_LEN 16
  55. # define ALG_IV_LEN(len) (sizeof(struct af_alg_iv) + (len))
  56. # define ALG_OP_TYPE unsigned int
  57. # define ALG_OP_LEN (sizeof(ALG_OP_TYPE))
  58. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  59. void engine_load_afalg_int(void);
  60. # endif
  61. /* Local Linkage Functions */
  62. static int afalg_init_aio(afalg_aio *aio);
  63. static int afalg_fin_cipher_aio(afalg_aio *ptr, int sfd,
  64. unsigned char *buf, size_t len);
  65. static int afalg_create_sk(afalg_ctx *actx, const char *ciphertype,
  66. const char *ciphername);
  67. static int afalg_destroy(ENGINE *e);
  68. static int afalg_init(ENGINE *e);
  69. static int afalg_finish(ENGINE *e);
  70. static const EVP_CIPHER *afalg_aes_cbc(int nid);
  71. static cbc_handles *get_cipher_handle(int nid);
  72. static int afalg_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
  73. const int **nids, int nid);
  74. static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  75. const unsigned char *iv, int enc);
  76. static int afalg_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
  77. const unsigned char *in, size_t inl);
  78. static int afalg_cipher_cleanup(EVP_CIPHER_CTX *ctx);
  79. static int afalg_chk_platform(void);
  80. /* Engine Id and Name */
  81. static const char *engine_afalg_id = "afalg";
  82. static const char *engine_afalg_name = "AFALG engine support";
  83. static int afalg_cipher_nids[] = {
  84. NID_aes_128_cbc,
  85. NID_aes_192_cbc,
  86. NID_aes_256_cbc,
  87. };
  88. static cbc_handles cbc_handle[] = {{AES_KEY_SIZE_128, NULL},
  89. {AES_KEY_SIZE_192, NULL},
  90. {AES_KEY_SIZE_256, NULL}};
  91. static ossl_inline int io_setup(unsigned n, aio_context_t *ctx)
  92. {
  93. return syscall(__NR_io_setup, n, ctx);
  94. }
  95. static ossl_inline int eventfd(int n)
  96. {
  97. return syscall(__NR_eventfd2, n, 0);
  98. }
  99. static ossl_inline int io_destroy(aio_context_t ctx)
  100. {
  101. return syscall(__NR_io_destroy, ctx);
  102. }
  103. static ossl_inline int io_read(aio_context_t ctx, long n, struct iocb **iocb)
  104. {
  105. return syscall(__NR_io_submit, ctx, n, iocb);
  106. }
  107. /* A version of 'struct timespec' with 32-bit time_t and nanoseconds. */
  108. struct __timespec32
  109. {
  110. __kernel_long_t tv_sec;
  111. __kernel_long_t tv_nsec;
  112. };
  113. static ossl_inline int io_getevents(aio_context_t ctx, long min, long max,
  114. struct io_event *events,
  115. struct timespec *timeout)
  116. {
  117. #if defined(__NR_io_pgetevents_time64)
  118. /* Check if we are a 32-bit architecture with a 64-bit time_t */
  119. if (sizeof(*timeout) != sizeof(struct __timespec32)) {
  120. int ret = syscall(__NR_io_pgetevents_time64, ctx, min, max, events,
  121. timeout, NULL);
  122. if (ret == 0 || errno != ENOSYS)
  123. return ret;
  124. }
  125. #endif
  126. #if defined(__NR_io_getevents)
  127. if (sizeof(*timeout) == sizeof(struct __timespec32))
  128. /*
  129. * time_t matches our architecture length, we can just use
  130. * __NR_io_getevents
  131. */
  132. return syscall(__NR_io_getevents, ctx, min, max, events, timeout);
  133. else {
  134. /*
  135. * We don't have __NR_io_pgetevents_time64, but we are using a
  136. * 64-bit time_t on a 32-bit architecture. If we can fit the
  137. * timeout value in a 32-bit time_t, then let's do that
  138. * and then use the __NR_io_getevents syscall.
  139. */
  140. if (timeout && timeout->tv_sec == (long)timeout->tv_sec) {
  141. struct __timespec32 ts32;
  142. ts32.tv_sec = (__kernel_long_t) timeout->tv_sec;
  143. ts32.tv_nsec = (__kernel_long_t) timeout->tv_nsec;
  144. return syscall(__NR_io_getevents, ctx, min, max, events, ts32);
  145. } else {
  146. return syscall(__NR_io_getevents, ctx, min, max, events, NULL);
  147. }
  148. }
  149. #endif
  150. errno = ENOSYS;
  151. return -1;
  152. }
  153. static void afalg_waitfd_cleanup(ASYNC_WAIT_CTX *ctx, const void *key,
  154. OSSL_ASYNC_FD waitfd, void *custom)
  155. {
  156. close(waitfd);
  157. }
  158. static int afalg_setup_async_event_notification(afalg_aio *aio)
  159. {
  160. ASYNC_JOB *job;
  161. ASYNC_WAIT_CTX *waitctx;
  162. void *custom = NULL;
  163. int ret;
  164. if ((job = ASYNC_get_current_job()) != NULL) {
  165. /* Async mode */
  166. waitctx = ASYNC_get_wait_ctx(job);
  167. if (waitctx == NULL) {
  168. ALG_WARN("%s(%d): ASYNC_get_wait_ctx error", __FILE__, __LINE__);
  169. return 0;
  170. }
  171. /* Get waitfd from ASYNC_WAIT_CTX if it is already set */
  172. ret = ASYNC_WAIT_CTX_get_fd(waitctx, engine_afalg_id,
  173. &aio->efd, &custom);
  174. if (ret == 0) {
  175. /*
  176. * waitfd is not set in ASYNC_WAIT_CTX, create a new one
  177. * and set it. efd will be signaled when AIO operation completes
  178. */
  179. aio->efd = eventfd(0);
  180. if (aio->efd == -1) {
  181. ALG_PERR("%s(%d): Failed to get eventfd : ", __FILE__,
  182. __LINE__);
  183. AFALGerr(AFALG_F_AFALG_SETUP_ASYNC_EVENT_NOTIFICATION,
  184. AFALG_R_EVENTFD_FAILED);
  185. return 0;
  186. }
  187. ret = ASYNC_WAIT_CTX_set_wait_fd(waitctx, engine_afalg_id,
  188. aio->efd, custom,
  189. afalg_waitfd_cleanup);
  190. if (ret == 0) {
  191. ALG_WARN("%s(%d): Failed to set wait fd", __FILE__, __LINE__);
  192. close(aio->efd);
  193. return 0;
  194. }
  195. /* make fd non-blocking in async mode */
  196. if (fcntl(aio->efd, F_SETFL, O_NONBLOCK) != 0) {
  197. ALG_WARN("%s(%d): Failed to set event fd as NONBLOCKING",
  198. __FILE__, __LINE__);
  199. }
  200. }
  201. aio->mode = MODE_ASYNC;
  202. } else {
  203. /* Sync mode */
  204. aio->efd = eventfd(0);
  205. if (aio->efd == -1) {
  206. ALG_PERR("%s(%d): Failed to get eventfd : ", __FILE__, __LINE__);
  207. AFALGerr(AFALG_F_AFALG_SETUP_ASYNC_EVENT_NOTIFICATION,
  208. AFALG_R_EVENTFD_FAILED);
  209. return 0;
  210. }
  211. aio->mode = MODE_SYNC;
  212. }
  213. return 1;
  214. }
  215. static int afalg_init_aio(afalg_aio *aio)
  216. {
  217. int r = -1;
  218. /* Initialise for AIO */
  219. aio->aio_ctx = 0;
  220. r = io_setup(MAX_INFLIGHTS, &aio->aio_ctx);
  221. if (r < 0) {
  222. ALG_PERR("%s(%d): io_setup error : ", __FILE__, __LINE__);
  223. AFALGerr(AFALG_F_AFALG_INIT_AIO, AFALG_R_IO_SETUP_FAILED);
  224. return 0;
  225. }
  226. memset(aio->cbt, 0, sizeof(aio->cbt));
  227. aio->efd = -1;
  228. aio->mode = MODE_UNINIT;
  229. return 1;
  230. }
  231. static int afalg_fin_cipher_aio(afalg_aio *aio, int sfd, unsigned char *buf,
  232. size_t len)
  233. {
  234. int r;
  235. int retry = 0;
  236. unsigned int done = 0;
  237. struct iocb *cb;
  238. struct timespec timeout;
  239. struct io_event events[MAX_INFLIGHTS];
  240. u_int64_t eval = 0;
  241. timeout.tv_sec = 0;
  242. timeout.tv_nsec = 0;
  243. /* if efd has not been initialised yet do it here */
  244. if (aio->mode == MODE_UNINIT) {
  245. r = afalg_setup_async_event_notification(aio);
  246. if (r == 0)
  247. return 0;
  248. }
  249. cb = &(aio->cbt[0 % MAX_INFLIGHTS]);
  250. memset(cb, '\0', sizeof(*cb));
  251. cb->aio_fildes = sfd;
  252. cb->aio_lio_opcode = IOCB_CMD_PREAD;
  253. /*
  254. * The pointer has to be converted to unsigned value first to avoid
  255. * sign extension on cast to 64 bit value in 32-bit builds
  256. */
  257. cb->aio_buf = (size_t)buf;
  258. cb->aio_offset = 0;
  259. cb->aio_data = 0;
  260. cb->aio_nbytes = len;
  261. cb->aio_flags = IOCB_FLAG_RESFD;
  262. cb->aio_resfd = aio->efd;
  263. /*
  264. * Perform AIO read on AFALG socket, this in turn performs an async
  265. * crypto operation in kernel space
  266. */
  267. r = io_read(aio->aio_ctx, 1, &cb);
  268. if (r < 0) {
  269. ALG_PWARN("%s(%d): io_read failed : ", __FILE__, __LINE__);
  270. return 0;
  271. }
  272. do {
  273. /* While AIO read is being performed pause job */
  274. ASYNC_pause_job();
  275. /* Check for completion of AIO read */
  276. r = read(aio->efd, &eval, sizeof(eval));
  277. if (r < 0) {
  278. if (errno == EAGAIN || errno == EWOULDBLOCK)
  279. continue;
  280. ALG_PERR("%s(%d): read failed for event fd : ", __FILE__, __LINE__);
  281. return 0;
  282. } else if (r == 0 || eval <= 0) {
  283. ALG_WARN("%s(%d): eventfd read %d bytes, eval = %lu\n", __FILE__,
  284. __LINE__, r, eval);
  285. }
  286. if (eval > 0) {
  287. #ifdef OSSL_SANITIZE_MEMORY
  288. /*
  289. * In a memory sanitiser build, the changes to memory made by the
  290. * system call aren't reliably detected. By initialising the
  291. * memory here, the sanitiser is told that they are okay.
  292. */
  293. memset(events, 0, sizeof(events));
  294. #endif
  295. /* Get results of AIO read */
  296. r = io_getevents(aio->aio_ctx, 1, MAX_INFLIGHTS,
  297. events, &timeout);
  298. if (r > 0) {
  299. /*
  300. * events.res indicates the actual status of the operation.
  301. * Handle the error condition first.
  302. */
  303. if (events[0].res < 0) {
  304. /*
  305. * Underlying operation cannot be completed at the time
  306. * of previous submission. Resubmit for the operation.
  307. */
  308. if (events[0].res == -EBUSY && retry++ < 3) {
  309. r = io_read(aio->aio_ctx, 1, &cb);
  310. if (r < 0) {
  311. ALG_PERR("%s(%d): retry %d for io_read failed : ",
  312. __FILE__, __LINE__, retry);
  313. return 0;
  314. }
  315. continue;
  316. } else {
  317. /*
  318. * Retries exceed for -EBUSY or unrecoverable error
  319. * condition for this instance of operation.
  320. */
  321. ALG_WARN
  322. ("%s(%d): Crypto Operation failed with code %lld\n",
  323. __FILE__, __LINE__, events[0].res);
  324. return 0;
  325. }
  326. }
  327. /* Operation successful. */
  328. done = 1;
  329. } else if (r < 0) {
  330. ALG_PERR("%s(%d): io_getevents failed : ", __FILE__, __LINE__);
  331. return 0;
  332. } else {
  333. ALG_WARN("%s(%d): io_geteventd read 0 bytes\n", __FILE__,
  334. __LINE__);
  335. }
  336. }
  337. } while (!done);
  338. return 1;
  339. }
  340. static ossl_inline void afalg_set_op_sk(struct cmsghdr *cmsg,
  341. const ALG_OP_TYPE op)
  342. {
  343. cmsg->cmsg_level = SOL_ALG;
  344. cmsg->cmsg_type = ALG_SET_OP;
  345. cmsg->cmsg_len = CMSG_LEN(ALG_OP_LEN);
  346. memcpy(CMSG_DATA(cmsg), &op, ALG_OP_LEN);
  347. }
  348. static void afalg_set_iv_sk(struct cmsghdr *cmsg, const unsigned char *iv,
  349. const unsigned int len)
  350. {
  351. struct af_alg_iv *aiv;
  352. cmsg->cmsg_level = SOL_ALG;
  353. cmsg->cmsg_type = ALG_SET_IV;
  354. cmsg->cmsg_len = CMSG_LEN(ALG_IV_LEN(len));
  355. aiv = (struct af_alg_iv *)CMSG_DATA(cmsg);
  356. aiv->ivlen = len;
  357. memcpy(aiv->iv, iv, len);
  358. }
  359. static ossl_inline int afalg_set_key(afalg_ctx *actx, const unsigned char *key,
  360. const int klen)
  361. {
  362. int ret;
  363. ret = setsockopt(actx->bfd, SOL_ALG, ALG_SET_KEY, key, klen);
  364. if (ret < 0) {
  365. ALG_PERR("%s(%d): Failed to set socket option : ", __FILE__, __LINE__);
  366. AFALGerr(AFALG_F_AFALG_SET_KEY, AFALG_R_SOCKET_SET_KEY_FAILED);
  367. return 0;
  368. }
  369. return 1;
  370. }
  371. static int afalg_create_sk(afalg_ctx *actx, const char *ciphertype,
  372. const char *ciphername)
  373. {
  374. struct sockaddr_alg sa;
  375. int r = -1;
  376. actx->bfd = actx->sfd = -1;
  377. memset(&sa, 0, sizeof(sa));
  378. sa.salg_family = AF_ALG;
  379. OPENSSL_strlcpy((char *) sa.salg_type, ciphertype, sizeof(sa.salg_type));
  380. OPENSSL_strlcpy((char *) sa.salg_name, ciphername, sizeof(sa.salg_name));
  381. actx->bfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
  382. if (actx->bfd == -1) {
  383. ALG_PERR("%s(%d): Failed to open socket : ", __FILE__, __LINE__);
  384. AFALGerr(AFALG_F_AFALG_CREATE_SK, AFALG_R_SOCKET_CREATE_FAILED);
  385. goto err;
  386. }
  387. r = bind(actx->bfd, (struct sockaddr *)&sa, sizeof(sa));
  388. if (r < 0) {
  389. ALG_PERR("%s(%d): Failed to bind socket : ", __FILE__, __LINE__);
  390. AFALGerr(AFALG_F_AFALG_CREATE_SK, AFALG_R_SOCKET_BIND_FAILED);
  391. goto err;
  392. }
  393. actx->sfd = accept(actx->bfd, NULL, 0);
  394. if (actx->sfd < 0) {
  395. ALG_PERR("%s(%d): Socket Accept Failed : ", __FILE__, __LINE__);
  396. AFALGerr(AFALG_F_AFALG_CREATE_SK, AFALG_R_SOCKET_ACCEPT_FAILED);
  397. goto err;
  398. }
  399. return 1;
  400. err:
  401. if (actx->bfd >= 0)
  402. close(actx->bfd);
  403. if (actx->sfd >= 0)
  404. close(actx->sfd);
  405. actx->bfd = actx->sfd = -1;
  406. return 0;
  407. }
  408. static int afalg_start_cipher_sk(afalg_ctx *actx, const unsigned char *in,
  409. size_t inl, const unsigned char *iv,
  410. unsigned int enc)
  411. {
  412. struct msghdr msg;
  413. struct cmsghdr *cmsg;
  414. struct iovec iov;
  415. ssize_t sbytes;
  416. # ifdef ALG_ZERO_COPY
  417. int ret;
  418. # endif
  419. char cbuf[CMSG_SPACE(ALG_IV_LEN(ALG_AES_IV_LEN)) + CMSG_SPACE(ALG_OP_LEN)];
  420. memset(&msg, 0, sizeof(msg));
  421. memset(cbuf, 0, sizeof(cbuf));
  422. msg.msg_control = cbuf;
  423. msg.msg_controllen = sizeof(cbuf);
  424. /*
  425. * cipher direction (i.e. encrypt or decrypt) and iv are sent to the
  426. * kernel as part of sendmsg()'s ancillary data
  427. */
  428. cmsg = CMSG_FIRSTHDR(&msg);
  429. afalg_set_op_sk(cmsg, enc);
  430. cmsg = CMSG_NXTHDR(&msg, cmsg);
  431. afalg_set_iv_sk(cmsg, iv, ALG_AES_IV_LEN);
  432. /* iov that describes input data */
  433. iov.iov_base = (unsigned char *)in;
  434. iov.iov_len = inl;
  435. msg.msg_flags = MSG_MORE;
  436. # ifdef ALG_ZERO_COPY
  437. /*
  438. * ZERO_COPY mode
  439. * Works best when buffer is 4k aligned
  440. * OPENS: out of place processing (i.e. out != in)
  441. */
  442. /* Input data is not sent as part of call to sendmsg() */
  443. msg.msg_iovlen = 0;
  444. msg.msg_iov = NULL;
  445. /* Sendmsg() sends iv and cipher direction to the kernel */
  446. sbytes = sendmsg(actx->sfd, &msg, 0);
  447. if (sbytes < 0) {
  448. ALG_PERR("%s(%d): sendmsg failed for zero copy cipher operation : ",
  449. __FILE__, __LINE__);
  450. return 0;
  451. }
  452. /*
  453. * vmsplice and splice are used to pin the user space input buffer for
  454. * kernel space processing avoiding copies from user to kernel space
  455. */
  456. ret = vmsplice(actx->zc_pipe[1], &iov, 1, SPLICE_F_GIFT);
  457. if (ret < 0) {
  458. ALG_PERR("%s(%d): vmsplice failed : ", __FILE__, __LINE__);
  459. return 0;
  460. }
  461. ret = splice(actx->zc_pipe[0], NULL, actx->sfd, NULL, inl, 0);
  462. if (ret < 0) {
  463. ALG_PERR("%s(%d): splice failed : ", __FILE__, __LINE__);
  464. return 0;
  465. }
  466. # else
  467. msg.msg_iovlen = 1;
  468. msg.msg_iov = &iov;
  469. /* Sendmsg() sends iv, cipher direction and input data to the kernel */
  470. sbytes = sendmsg(actx->sfd, &msg, 0);
  471. if (sbytes < 0) {
  472. ALG_PERR("%s(%d): sendmsg failed for cipher operation : ", __FILE__,
  473. __LINE__);
  474. return 0;
  475. }
  476. if (sbytes != (ssize_t) inl) {
  477. ALG_WARN("Cipher operation send bytes %zd != inlen %zd\n", sbytes,
  478. inl);
  479. return 0;
  480. }
  481. # endif
  482. return 1;
  483. }
  484. static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  485. const unsigned char *iv, int enc)
  486. {
  487. int ciphertype;
  488. int ret, len;
  489. afalg_ctx *actx;
  490. const char *ciphername;
  491. if (ctx == NULL || key == NULL) {
  492. ALG_WARN("%s(%d): Null Parameter\n", __FILE__, __LINE__);
  493. return 0;
  494. }
  495. if (EVP_CIPHER_CTX_get0_cipher(ctx) == NULL) {
  496. ALG_WARN("%s(%d): Cipher object NULL\n", __FILE__, __LINE__);
  497. return 0;
  498. }
  499. actx = EVP_CIPHER_CTX_get_cipher_data(ctx);
  500. if (actx == NULL) {
  501. ALG_WARN("%s(%d): Cipher data NULL\n", __FILE__, __LINE__);
  502. return 0;
  503. }
  504. ciphertype = EVP_CIPHER_CTX_get_nid(ctx);
  505. switch (ciphertype) {
  506. case NID_aes_128_cbc:
  507. case NID_aes_192_cbc:
  508. case NID_aes_256_cbc:
  509. ciphername = "cbc(aes)";
  510. break;
  511. default:
  512. ALG_WARN("%s(%d): Unsupported Cipher type %d\n", __FILE__, __LINE__,
  513. ciphertype);
  514. return 0;
  515. }
  516. if (ALG_AES_IV_LEN != EVP_CIPHER_CTX_get_iv_length(ctx)) {
  517. ALG_WARN("%s(%d): Unsupported IV length :%d\n", __FILE__, __LINE__,
  518. EVP_CIPHER_CTX_get_iv_length(ctx));
  519. return 0;
  520. }
  521. /* Setup AFALG socket for crypto processing */
  522. ret = afalg_create_sk(actx, "skcipher", ciphername);
  523. if (ret < 1)
  524. return 0;
  525. if ((len = EVP_CIPHER_CTX_get_key_length(ctx)) <= 0)
  526. goto err;
  527. ret = afalg_set_key(actx, key, len);
  528. if (ret < 1)
  529. goto err;
  530. /* Setup AIO ctx to allow async AFALG crypto processing */
  531. if (afalg_init_aio(&actx->aio) == 0)
  532. goto err;
  533. # ifdef ALG_ZERO_COPY
  534. pipe(actx->zc_pipe);
  535. # endif
  536. actx->init_done = MAGIC_INIT_NUM;
  537. return 1;
  538. err:
  539. close(actx->sfd);
  540. close(actx->bfd);
  541. return 0;
  542. }
  543. static int afalg_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
  544. const unsigned char *in, size_t inl)
  545. {
  546. afalg_ctx *actx;
  547. int ret;
  548. char nxtiv[ALG_AES_IV_LEN] = { 0 };
  549. if (ctx == NULL || out == NULL || in == NULL) {
  550. ALG_WARN("NULL parameter passed to function %s(%d)\n", __FILE__,
  551. __LINE__);
  552. return 0;
  553. }
  554. actx = (afalg_ctx *) EVP_CIPHER_CTX_get_cipher_data(ctx);
  555. if (actx == NULL || actx->init_done != MAGIC_INIT_NUM) {
  556. ALG_WARN("%s afalg ctx passed\n",
  557. ctx == NULL ? "NULL" : "Uninitialised");
  558. return 0;
  559. }
  560. /*
  561. * set iv now for decrypt operation as the input buffer can be
  562. * overwritten for inplace operation where in = out.
  563. */
  564. if (EVP_CIPHER_CTX_is_encrypting(ctx) == 0) {
  565. memcpy(nxtiv, in + (inl - ALG_AES_IV_LEN), ALG_AES_IV_LEN);
  566. }
  567. /* Send input data to kernel space */
  568. ret = afalg_start_cipher_sk(actx, (unsigned char *)in, inl,
  569. EVP_CIPHER_CTX_iv(ctx),
  570. EVP_CIPHER_CTX_is_encrypting(ctx));
  571. if (ret < 1) {
  572. return 0;
  573. }
  574. /* Perform async crypto operation in kernel space */
  575. ret = afalg_fin_cipher_aio(&actx->aio, actx->sfd, out, inl);
  576. if (ret < 1)
  577. return 0;
  578. if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
  579. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), out + (inl - ALG_AES_IV_LEN),
  580. ALG_AES_IV_LEN);
  581. } else {
  582. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), nxtiv, ALG_AES_IV_LEN);
  583. }
  584. return 1;
  585. }
  586. static int afalg_cipher_cleanup(EVP_CIPHER_CTX *ctx)
  587. {
  588. afalg_ctx *actx;
  589. if (ctx == NULL) {
  590. ALG_WARN("NULL parameter passed to function %s(%d)\n", __FILE__,
  591. __LINE__);
  592. return 0;
  593. }
  594. actx = (afalg_ctx *) EVP_CIPHER_CTX_get_cipher_data(ctx);
  595. if (actx == NULL || actx->init_done != MAGIC_INIT_NUM)
  596. return 1;
  597. close(actx->sfd);
  598. close(actx->bfd);
  599. # ifdef ALG_ZERO_COPY
  600. close(actx->zc_pipe[0]);
  601. close(actx->zc_pipe[1]);
  602. # endif
  603. /* close efd in sync mode, async mode is closed in afalg_waitfd_cleanup() */
  604. if (actx->aio.mode == MODE_SYNC)
  605. close(actx->aio.efd);
  606. io_destroy(actx->aio.aio_ctx);
  607. return 1;
  608. }
  609. static cbc_handles *get_cipher_handle(int nid)
  610. {
  611. switch (nid) {
  612. case NID_aes_128_cbc:
  613. return &cbc_handle[AES_CBC_128];
  614. case NID_aes_192_cbc:
  615. return &cbc_handle[AES_CBC_192];
  616. case NID_aes_256_cbc:
  617. return &cbc_handle[AES_CBC_256];
  618. default:
  619. return NULL;
  620. }
  621. }
  622. static const EVP_CIPHER *afalg_aes_cbc(int nid)
  623. {
  624. cbc_handles *cipher_handle = get_cipher_handle(nid);
  625. if (cipher_handle == NULL)
  626. return NULL;
  627. if (cipher_handle->_hidden == NULL
  628. && ((cipher_handle->_hidden =
  629. EVP_CIPHER_meth_new(nid,
  630. AES_BLOCK_SIZE,
  631. cipher_handle->key_size)) == NULL
  632. || !EVP_CIPHER_meth_set_iv_length(cipher_handle->_hidden,
  633. AES_IV_LEN)
  634. || !EVP_CIPHER_meth_set_flags(cipher_handle->_hidden,
  635. EVP_CIPH_CBC_MODE |
  636. EVP_CIPH_FLAG_DEFAULT_ASN1)
  637. || !EVP_CIPHER_meth_set_init(cipher_handle->_hidden,
  638. afalg_cipher_init)
  639. || !EVP_CIPHER_meth_set_do_cipher(cipher_handle->_hidden,
  640. afalg_do_cipher)
  641. || !EVP_CIPHER_meth_set_cleanup(cipher_handle->_hidden,
  642. afalg_cipher_cleanup)
  643. || !EVP_CIPHER_meth_set_impl_ctx_size(cipher_handle->_hidden,
  644. sizeof(afalg_ctx)))) {
  645. EVP_CIPHER_meth_free(cipher_handle->_hidden);
  646. cipher_handle->_hidden= NULL;
  647. }
  648. return cipher_handle->_hidden;
  649. }
  650. static int afalg_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
  651. const int **nids, int nid)
  652. {
  653. int r = 1;
  654. if (cipher == NULL) {
  655. *nids = afalg_cipher_nids;
  656. return (sizeof(afalg_cipher_nids) / sizeof(afalg_cipher_nids[0]));
  657. }
  658. switch (nid) {
  659. case NID_aes_128_cbc:
  660. case NID_aes_192_cbc:
  661. case NID_aes_256_cbc:
  662. *cipher = afalg_aes_cbc(nid);
  663. break;
  664. default:
  665. *cipher = NULL;
  666. r = 0;
  667. }
  668. return r;
  669. }
  670. static int bind_afalg(ENGINE *e)
  671. {
  672. /* Ensure the afalg error handling is set up */
  673. unsigned short i;
  674. ERR_load_AFALG_strings();
  675. if (!ENGINE_set_id(e, engine_afalg_id)
  676. || !ENGINE_set_name(e, engine_afalg_name)
  677. || !ENGINE_set_destroy_function(e, afalg_destroy)
  678. || !ENGINE_set_init_function(e, afalg_init)
  679. || !ENGINE_set_finish_function(e, afalg_finish)) {
  680. AFALGerr(AFALG_F_BIND_AFALG, AFALG_R_INIT_FAILED);
  681. return 0;
  682. }
  683. /*
  684. * Create _hidden_aes_xxx_cbc by calling afalg_aes_xxx_cbc
  685. * now, as bind_aflag can only be called by one thread at a
  686. * time.
  687. */
  688. for (i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) {
  689. if (afalg_aes_cbc(afalg_cipher_nids[i]) == NULL) {
  690. AFALGerr(AFALG_F_BIND_AFALG, AFALG_R_INIT_FAILED);
  691. return 0;
  692. }
  693. }
  694. if (!ENGINE_set_ciphers(e, afalg_ciphers)) {
  695. AFALGerr(AFALG_F_BIND_AFALG, AFALG_R_INIT_FAILED);
  696. return 0;
  697. }
  698. return 1;
  699. }
  700. # ifndef OPENSSL_NO_DYNAMIC_ENGINE
  701. static int bind_helper(ENGINE *e, const char *id)
  702. {
  703. if (id && (strcmp(id, engine_afalg_id) != 0))
  704. return 0;
  705. if (!afalg_chk_platform())
  706. return 0;
  707. if (!bind_afalg(e))
  708. return 0;
  709. return 1;
  710. }
  711. IMPLEMENT_DYNAMIC_CHECK_FN()
  712. IMPLEMENT_DYNAMIC_BIND_FN(bind_helper)
  713. # endif
  714. static int afalg_chk_platform(void)
  715. {
  716. int ret;
  717. int i;
  718. int kver[3] = { -1, -1, -1 };
  719. int sock;
  720. char *str;
  721. struct utsname ut;
  722. ret = uname(&ut);
  723. if (ret != 0) {
  724. AFALGerr(AFALG_F_AFALG_CHK_PLATFORM,
  725. AFALG_R_FAILED_TO_GET_PLATFORM_INFO);
  726. return 0;
  727. }
  728. str = strtok(ut.release, ".");
  729. for (i = 0; i < 3 && str != NULL; i++) {
  730. kver[i] = atoi(str);
  731. str = strtok(NULL, ".");
  732. }
  733. if (KERNEL_VERSION(kver[0], kver[1], kver[2])
  734. < KERNEL_VERSION(K_MAJ, K_MIN1, K_MIN2)) {
  735. ALG_ERR("ASYNC AFALG not supported this kernel(%d.%d.%d)\n",
  736. kver[0], kver[1], kver[2]);
  737. ALG_ERR("ASYNC AFALG requires kernel version %d.%d.%d or later\n",
  738. K_MAJ, K_MIN1, K_MIN2);
  739. AFALGerr(AFALG_F_AFALG_CHK_PLATFORM,
  740. AFALG_R_KERNEL_DOES_NOT_SUPPORT_ASYNC_AFALG);
  741. return 0;
  742. }
  743. /* Test if we can actually create an AF_ALG socket */
  744. sock = socket(AF_ALG, SOCK_SEQPACKET, 0);
  745. if (sock == -1) {
  746. AFALGerr(AFALG_F_AFALG_CHK_PLATFORM, AFALG_R_SOCKET_CREATE_FAILED);
  747. return 0;
  748. }
  749. close(sock);
  750. return 1;
  751. }
  752. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  753. static ENGINE *engine_afalg(void)
  754. {
  755. ENGINE *ret = ENGINE_new();
  756. if (ret == NULL)
  757. return NULL;
  758. if (!bind_afalg(ret)) {
  759. ENGINE_free(ret);
  760. return NULL;
  761. }
  762. return ret;
  763. }
  764. void engine_load_afalg_int(void)
  765. {
  766. ENGINE *toadd;
  767. if (!afalg_chk_platform())
  768. return;
  769. toadd = engine_afalg();
  770. if (toadd == NULL)
  771. return;
  772. ERR_set_mark();
  773. ENGINE_add(toadd);
  774. /*
  775. * If the "add" worked, it gets a structural reference. So either way, we
  776. * release our just-created reference.
  777. */
  778. ENGINE_free(toadd);
  779. /*
  780. * If the "add" didn't work, it was probably a conflict because it was
  781. * already added (eg. someone calling ENGINE_load_blah then calling
  782. * ENGINE_load_builtin_engines() perhaps).
  783. */
  784. ERR_pop_to_mark();
  785. }
  786. # endif
  787. static int afalg_init(ENGINE *e)
  788. {
  789. return 1;
  790. }
  791. static int afalg_finish(ENGINE *e)
  792. {
  793. return 1;
  794. }
  795. static int free_cbc(void)
  796. {
  797. short unsigned int i;
  798. for (i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) {
  799. EVP_CIPHER_meth_free(cbc_handle[i]._hidden);
  800. cbc_handle[i]._hidden = NULL;
  801. }
  802. return 1;
  803. }
  804. static int afalg_destroy(ENGINE *e)
  805. {
  806. ERR_unload_AFALG_strings();
  807. free_cbc();
  808. return 1;
  809. }
  810. #endif /* KERNEL VERSION */