e_padlock.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Copyright 2004-2023 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /*
  10. * This file uses the low level AES and engine functions (which are deprecated
  11. * for non-internal use) in order to implement the padlock engine AES ciphers.
  12. */
  13. #define OPENSSL_SUPPRESS_DEPRECATED
  14. #include <stdio.h>
  15. #include <string.h>
  16. #include <openssl/opensslconf.h>
  17. #include <openssl/crypto.h>
  18. #include <openssl/engine.h>
  19. #include <openssl/evp.h>
  20. #include <openssl/aes.h>
  21. #include <openssl/rand.h>
  22. #include <openssl/err.h>
  23. #include <openssl/modes.h>
  24. #ifndef OPENSSL_NO_PADLOCKENG
  25. /*
  26. * VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it
  27. * doesn't exist elsewhere, but it even can't be compiled on other platforms!
  28. */
  29. # undef COMPILE_PADLOCKENG
  30. # if defined(PADLOCK_ASM)
  31. # define COMPILE_PADLOCKENG
  32. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  33. static ENGINE *ENGINE_padlock(void);
  34. # endif
  35. # endif
  36. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  37. void engine_load_padlock_int(void);
  38. void engine_load_padlock_int(void)
  39. {
  40. /* On non-x86 CPUs it just returns. */
  41. # ifdef COMPILE_PADLOCKENG
  42. ENGINE *toadd = ENGINE_padlock();
  43. if (!toadd)
  44. return;
  45. ERR_set_mark();
  46. ENGINE_add(toadd);
  47. /*
  48. * If the "add" worked, it gets a structural reference. So either way, we
  49. * release our just-created reference.
  50. */
  51. ENGINE_free(toadd);
  52. /*
  53. * If the "add" didn't work, it was probably a conflict because it was
  54. * already added (eg. someone calling ENGINE_load_blah then calling
  55. * ENGINE_load_builtin_engines() perhaps).
  56. */
  57. ERR_pop_to_mark();
  58. # endif
  59. }
  60. # endif
  61. # ifdef COMPILE_PADLOCKENG
  62. /* Function for ENGINE detection and control */
  63. static int padlock_available(void);
  64. static int padlock_init(ENGINE *e);
  65. /* RNG Stuff */
  66. static RAND_METHOD padlock_rand;
  67. /* Cipher Stuff */
  68. static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
  69. const int **nids, int nid);
  70. /* Engine names */
  71. static const char *padlock_id = "padlock";
  72. static char padlock_name[100];
  73. /* Available features */
  74. static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
  75. static int padlock_use_rng = 0; /* Random Number Generator */
  76. /* ===== Engine "management" functions ===== */
  77. /* Prepare the ENGINE structure for registration */
  78. static int padlock_bind_helper(ENGINE *e)
  79. {
  80. /* Check available features */
  81. padlock_available();
  82. /*
  83. * RNG is currently disabled for reasons discussed in commentary just
  84. * before padlock_rand_bytes function.
  85. */
  86. padlock_use_rng = 0;
  87. /* Generate a nice engine name with available features */
  88. BIO_snprintf(padlock_name, sizeof(padlock_name),
  89. "VIA PadLock (%s, %s)",
  90. padlock_use_rng ? "RNG" : "no-RNG",
  91. padlock_use_ace ? "ACE" : "no-ACE");
  92. /* Register everything or return with an error */
  93. if (!ENGINE_set_id(e, padlock_id) ||
  94. !ENGINE_set_name(e, padlock_name) ||
  95. !ENGINE_set_init_function(e, padlock_init) ||
  96. (padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) ||
  97. (padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) {
  98. return 0;
  99. }
  100. /* Everything looks good */
  101. return 1;
  102. }
  103. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  104. /* Constructor */
  105. static ENGINE *ENGINE_padlock(void)
  106. {
  107. ENGINE *eng = ENGINE_new();
  108. if (eng == NULL) {
  109. return NULL;
  110. }
  111. if (!padlock_bind_helper(eng)) {
  112. ENGINE_free(eng);
  113. return NULL;
  114. }
  115. return eng;
  116. }
  117. # endif
  118. /* Check availability of the engine */
  119. static int padlock_init(ENGINE *e)
  120. {
  121. return (padlock_use_rng || padlock_use_ace);
  122. }
  123. # ifndef AES_ASM
  124. static int padlock_aes_set_encrypt_key(const unsigned char *userKey,
  125. const int bits,
  126. AES_KEY *key);
  127. static int padlock_aes_set_decrypt_key(const unsigned char *userKey,
  128. const int bits,
  129. AES_KEY *key);
  130. # define AES_ASM
  131. # define AES_set_encrypt_key padlock_aes_set_encrypt_key
  132. # define AES_set_decrypt_key padlock_aes_set_decrypt_key
  133. # include "../crypto/aes/aes_core.c"
  134. # endif
  135. /*
  136. * This stuff is needed if this ENGINE is being compiled into a
  137. * self-contained shared-library.
  138. */
  139. # ifndef OPENSSL_NO_DYNAMIC_ENGINE
  140. static int padlock_bind_fn(ENGINE *e, const char *id)
  141. {
  142. if (id && (strcmp(id, padlock_id) != 0)) {
  143. return 0;
  144. }
  145. if (!padlock_bind_helper(e)) {
  146. return 0;
  147. }
  148. return 1;
  149. }
  150. IMPLEMENT_DYNAMIC_CHECK_FN()
  151. IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn)
  152. # endif /* !OPENSSL_NO_DYNAMIC_ENGINE */
  153. /* ===== Here comes the "real" engine ===== */
  154. /* Some AES-related constants */
  155. # define AES_BLOCK_SIZE 16
  156. # define AES_KEY_SIZE_128 16
  157. # define AES_KEY_SIZE_192 24
  158. # define AES_KEY_SIZE_256 32
  159. /*
  160. * Here we store the status information relevant to the current context.
  161. */
  162. /*
  163. * BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on
  164. * the order of items in this structure. Don't blindly modify, reorder,
  165. * etc!
  166. */
  167. struct padlock_cipher_data {
  168. unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
  169. union {
  170. unsigned int pad[4];
  171. struct {
  172. int rounds:4;
  173. int dgst:1; /* n/a in C3 */
  174. int align:1; /* n/a in C3 */
  175. int ciphr:1; /* n/a in C3 */
  176. unsigned int keygen:1;
  177. int interm:1;
  178. unsigned int encdec:1;
  179. int ksize:2;
  180. } b;
  181. } cword; /* Control word */
  182. AES_KEY ks; /* Encryption key */
  183. };
  184. /* Interface to assembler module */
  185. unsigned int padlock_capability(void);
  186. void padlock_key_bswap(AES_KEY *key);
  187. void padlock_verify_context(struct padlock_cipher_data *ctx);
  188. void padlock_reload_key(void);
  189. void padlock_aes_block(void *out, const void *inp,
  190. struct padlock_cipher_data *ctx);
  191. int padlock_ecb_encrypt(void *out, const void *inp,
  192. struct padlock_cipher_data *ctx, size_t len);
  193. int padlock_cbc_encrypt(void *out, const void *inp,
  194. struct padlock_cipher_data *ctx, size_t len);
  195. int padlock_cfb_encrypt(void *out, const void *inp,
  196. struct padlock_cipher_data *ctx, size_t len);
  197. int padlock_ofb_encrypt(void *out, const void *inp,
  198. struct padlock_cipher_data *ctx, size_t len);
  199. int padlock_ctr32_encrypt(void *out, const void *inp,
  200. struct padlock_cipher_data *ctx, size_t len);
  201. int padlock_xstore(void *out, int edx);
  202. void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
  203. void padlock_sha1(void *ctx, const void *inp, size_t len);
  204. void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
  205. void padlock_sha256(void *ctx, const void *inp, size_t len);
  206. /*
  207. * Load supported features of the CPU to see if the PadLock is available.
  208. */
  209. static int padlock_available(void)
  210. {
  211. unsigned int edx = padlock_capability();
  212. /* Fill up some flags */
  213. padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
  214. padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
  215. return padlock_use_ace + padlock_use_rng;
  216. }
  217. /* ===== AES encryption/decryption ===== */
  218. # if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
  219. # define NID_aes_128_cfb NID_aes_128_cfb128
  220. # endif
  221. # if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
  222. # define NID_aes_128_ofb NID_aes_128_ofb128
  223. # endif
  224. # if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
  225. # define NID_aes_192_cfb NID_aes_192_cfb128
  226. # endif
  227. # if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
  228. # define NID_aes_192_ofb NID_aes_192_ofb128
  229. # endif
  230. # if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
  231. # define NID_aes_256_cfb NID_aes_256_cfb128
  232. # endif
  233. # if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
  234. # define NID_aes_256_ofb NID_aes_256_ofb128
  235. # endif
  236. /* List of supported ciphers. */
  237. static const int padlock_cipher_nids[] = {
  238. NID_aes_128_ecb,
  239. NID_aes_128_cbc,
  240. NID_aes_128_cfb,
  241. NID_aes_128_ofb,
  242. NID_aes_128_ctr,
  243. NID_aes_192_ecb,
  244. NID_aes_192_cbc,
  245. NID_aes_192_cfb,
  246. NID_aes_192_ofb,
  247. NID_aes_192_ctr,
  248. NID_aes_256_ecb,
  249. NID_aes_256_cbc,
  250. NID_aes_256_cfb,
  251. NID_aes_256_ofb,
  252. NID_aes_256_ctr
  253. };
  254. static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) /
  255. sizeof(padlock_cipher_nids[0]));
  256. /* Function prototypes ... */
  257. static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  258. const unsigned char *iv, int enc);
  259. # define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
  260. ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
  261. # define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
  262. NEAREST_ALIGNED(EVP_CIPHER_CTX_get_cipher_data(ctx)))
  263. static int
  264. padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  265. const unsigned char *in_arg, size_t nbytes)
  266. {
  267. return padlock_ecb_encrypt(out_arg, in_arg,
  268. ALIGNED_CIPHER_DATA(ctx), nbytes);
  269. }
  270. static int
  271. padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  272. const unsigned char *in_arg, size_t nbytes)
  273. {
  274. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  275. int ret;
  276. memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
  277. if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes)))
  278. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
  279. return ret;
  280. }
  281. static int
  282. padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  283. const unsigned char *in_arg, size_t nbytes)
  284. {
  285. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  286. size_t chunk;
  287. if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */
  288. unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
  289. if (chunk >= AES_BLOCK_SIZE)
  290. return 0; /* bogus value */
  291. if (EVP_CIPHER_CTX_is_encrypting(ctx))
  292. while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
  293. ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
  294. chunk++, nbytes--;
  295. } else
  296. while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
  297. unsigned char c = *(in_arg++);
  298. *(out_arg++) = c ^ ivp[chunk];
  299. ivp[chunk++] = c, nbytes--;
  300. }
  301. EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
  302. }
  303. if (nbytes == 0)
  304. return 1;
  305. memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
  306. if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
  307. if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk))
  308. return 0;
  309. nbytes -= chunk;
  310. }
  311. if (nbytes) {
  312. unsigned char *ivp = cdata->iv;
  313. out_arg += chunk;
  314. in_arg += chunk;
  315. EVP_CIPHER_CTX_set_num(ctx, nbytes);
  316. if (cdata->cword.b.encdec) {
  317. cdata->cword.b.encdec = 0;
  318. padlock_reload_key();
  319. padlock_aes_block(ivp, ivp, cdata);
  320. cdata->cword.b.encdec = 1;
  321. padlock_reload_key();
  322. while (nbytes) {
  323. unsigned char c = *(in_arg++);
  324. *(out_arg++) = c ^ *ivp;
  325. *(ivp++) = c, nbytes--;
  326. }
  327. } else {
  328. padlock_reload_key();
  329. padlock_aes_block(ivp, ivp, cdata);
  330. padlock_reload_key();
  331. while (nbytes) {
  332. *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
  333. ivp++, nbytes--;
  334. }
  335. }
  336. }
  337. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
  338. return 1;
  339. }
  340. static int
  341. padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  342. const unsigned char *in_arg, size_t nbytes)
  343. {
  344. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  345. size_t chunk;
  346. /*
  347. * ctx->num is maintained in byte-oriented modes, such as CFB and OFB...
  348. */
  349. if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */
  350. unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
  351. if (chunk >= AES_BLOCK_SIZE)
  352. return 0; /* bogus value */
  353. while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
  354. *(out_arg++) = *(in_arg++) ^ ivp[chunk];
  355. chunk++, nbytes--;
  356. }
  357. EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
  358. }
  359. if (nbytes == 0)
  360. return 1;
  361. memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
  362. if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
  363. if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk))
  364. return 0;
  365. nbytes -= chunk;
  366. }
  367. if (nbytes) {
  368. unsigned char *ivp = cdata->iv;
  369. out_arg += chunk;
  370. in_arg += chunk;
  371. EVP_CIPHER_CTX_set_num(ctx, nbytes);
  372. padlock_reload_key(); /* empirically found */
  373. padlock_aes_block(ivp, ivp, cdata);
  374. padlock_reload_key(); /* empirically found */
  375. while (nbytes) {
  376. *(out_arg++) = *(in_arg++) ^ *ivp;
  377. ivp++, nbytes--;
  378. }
  379. }
  380. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
  381. return 1;
  382. }
  383. static void padlock_ctr32_encrypt_glue(const unsigned char *in,
  384. unsigned char *out, size_t blocks,
  385. struct padlock_cipher_data *ctx,
  386. const unsigned char *ivec)
  387. {
  388. memcpy(ctx->iv, ivec, AES_BLOCK_SIZE);
  389. padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks);
  390. }
  391. static int
  392. padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  393. const unsigned char *in_arg, size_t nbytes)
  394. {
  395. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  396. int n = EVP_CIPHER_CTX_get_num(ctx);
  397. unsigned int num;
  398. if (n < 0)
  399. return 0;
  400. num = (unsigned int)n;
  401. CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes,
  402. cdata, EVP_CIPHER_CTX_iv_noconst(ctx),
  403. EVP_CIPHER_CTX_buf_noconst(ctx), &num,
  404. (ctr128_f) padlock_ctr32_encrypt_glue);
  405. EVP_CIPHER_CTX_set_num(ctx, (size_t)num);
  406. return 1;
  407. }
  408. # define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
  409. # define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
  410. # define EVP_CIPHER_block_size_OFB 1
  411. # define EVP_CIPHER_block_size_CFB 1
  412. # define EVP_CIPHER_block_size_CTR 1
  413. /*
  414. * Declaring so many ciphers by hand would be a pain. Instead introduce a bit
  415. * of preprocessor magic :-)
  416. */
  417. # define DECLARE_AES_EVP(ksize,lmode,umode) \
  418. static EVP_CIPHER *_hidden_aes_##ksize##_##lmode = NULL; \
  419. static const EVP_CIPHER *padlock_aes_##ksize##_##lmode(void) \
  420. { \
  421. if (_hidden_aes_##ksize##_##lmode == NULL \
  422. && ((_hidden_aes_##ksize##_##lmode = \
  423. EVP_CIPHER_meth_new(NID_aes_##ksize##_##lmode, \
  424. EVP_CIPHER_block_size_##umode, \
  425. AES_KEY_SIZE_##ksize)) == NULL \
  426. || !EVP_CIPHER_meth_set_iv_length(_hidden_aes_##ksize##_##lmode, \
  427. AES_BLOCK_SIZE) \
  428. || !EVP_CIPHER_meth_set_flags(_hidden_aes_##ksize##_##lmode, \
  429. 0 | EVP_CIPH_##umode##_MODE) \
  430. || !EVP_CIPHER_meth_set_init(_hidden_aes_##ksize##_##lmode, \
  431. padlock_aes_init_key) \
  432. || !EVP_CIPHER_meth_set_do_cipher(_hidden_aes_##ksize##_##lmode, \
  433. padlock_##lmode##_cipher) \
  434. || !EVP_CIPHER_meth_set_impl_ctx_size(_hidden_aes_##ksize##_##lmode, \
  435. sizeof(struct padlock_cipher_data) + 16) \
  436. || !EVP_CIPHER_meth_set_set_asn1_params(_hidden_aes_##ksize##_##lmode, \
  437. EVP_CIPHER_set_asn1_iv) \
  438. || !EVP_CIPHER_meth_set_get_asn1_params(_hidden_aes_##ksize##_##lmode, \
  439. EVP_CIPHER_get_asn1_iv))) { \
  440. EVP_CIPHER_meth_free(_hidden_aes_##ksize##_##lmode); \
  441. _hidden_aes_##ksize##_##lmode = NULL; \
  442. } \
  443. return _hidden_aes_##ksize##_##lmode; \
  444. }
  445. DECLARE_AES_EVP(128, ecb, ECB)
  446. DECLARE_AES_EVP(128, cbc, CBC)
  447. DECLARE_AES_EVP(128, cfb, CFB)
  448. DECLARE_AES_EVP(128, ofb, OFB)
  449. DECLARE_AES_EVP(128, ctr, CTR)
  450. DECLARE_AES_EVP(192, ecb, ECB)
  451. DECLARE_AES_EVP(192, cbc, CBC)
  452. DECLARE_AES_EVP(192, cfb, CFB)
  453. DECLARE_AES_EVP(192, ofb, OFB)
  454. DECLARE_AES_EVP(192, ctr, CTR)
  455. DECLARE_AES_EVP(256, ecb, ECB)
  456. DECLARE_AES_EVP(256, cbc, CBC)
  457. DECLARE_AES_EVP(256, cfb, CFB)
  458. DECLARE_AES_EVP(256, ofb, OFB)
  459. DECLARE_AES_EVP(256, ctr, CTR)
  460. static int
  461. padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids,
  462. int nid)
  463. {
  464. /* No specific cipher => return a list of supported nids ... */
  465. if (!cipher) {
  466. *nids = padlock_cipher_nids;
  467. return padlock_cipher_nids_num;
  468. }
  469. /* ... or the requested "cipher" otherwise */
  470. switch (nid) {
  471. case NID_aes_128_ecb:
  472. *cipher = padlock_aes_128_ecb();
  473. break;
  474. case NID_aes_128_cbc:
  475. *cipher = padlock_aes_128_cbc();
  476. break;
  477. case NID_aes_128_cfb:
  478. *cipher = padlock_aes_128_cfb();
  479. break;
  480. case NID_aes_128_ofb:
  481. *cipher = padlock_aes_128_ofb();
  482. break;
  483. case NID_aes_128_ctr:
  484. *cipher = padlock_aes_128_ctr();
  485. break;
  486. case NID_aes_192_ecb:
  487. *cipher = padlock_aes_192_ecb();
  488. break;
  489. case NID_aes_192_cbc:
  490. *cipher = padlock_aes_192_cbc();
  491. break;
  492. case NID_aes_192_cfb:
  493. *cipher = padlock_aes_192_cfb();
  494. break;
  495. case NID_aes_192_ofb:
  496. *cipher = padlock_aes_192_ofb();
  497. break;
  498. case NID_aes_192_ctr:
  499. *cipher = padlock_aes_192_ctr();
  500. break;
  501. case NID_aes_256_ecb:
  502. *cipher = padlock_aes_256_ecb();
  503. break;
  504. case NID_aes_256_cbc:
  505. *cipher = padlock_aes_256_cbc();
  506. break;
  507. case NID_aes_256_cfb:
  508. *cipher = padlock_aes_256_cfb();
  509. break;
  510. case NID_aes_256_ofb:
  511. *cipher = padlock_aes_256_ofb();
  512. break;
  513. case NID_aes_256_ctr:
  514. *cipher = padlock_aes_256_ctr();
  515. break;
  516. default:
  517. /* Sorry, we don't support this NID */
  518. *cipher = NULL;
  519. return 0;
  520. }
  521. return 1;
  522. }
  523. /* Prepare the encryption key for PadLock usage */
  524. static int
  525. padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  526. const unsigned char *iv, int enc)
  527. {
  528. struct padlock_cipher_data *cdata;
  529. int key_len = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
  530. unsigned long mode = EVP_CIPHER_CTX_get_mode(ctx);
  531. if (key == NULL)
  532. return 0; /* ERROR */
  533. cdata = ALIGNED_CIPHER_DATA(ctx);
  534. memset(cdata, 0, sizeof(*cdata));
  535. /* Prepare Control word. */
  536. if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE)
  537. cdata->cword.b.encdec = 0;
  538. else
  539. cdata->cword.b.encdec = (EVP_CIPHER_CTX_is_encrypting(ctx) == 0);
  540. cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
  541. cdata->cword.b.ksize = (key_len - 128) / 64;
  542. switch (key_len) {
  543. case 128:
  544. /*
  545. * PadLock can generate an extended key for AES128 in hardware
  546. */
  547. memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
  548. cdata->cword.b.keygen = 0;
  549. break;
  550. case 192:
  551. case 256:
  552. /*
  553. * Generate an extended AES key in software. Needed for AES192/AES256
  554. */
  555. /*
  556. * Well, the above applies to Stepping 8 CPUs and is listed as
  557. * hardware errata. They most likely will fix it at some point and
  558. * then a check for stepping would be due here.
  559. */
  560. if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
  561. && !enc)
  562. AES_set_decrypt_key(key, key_len, &cdata->ks);
  563. else
  564. AES_set_encrypt_key(key, key_len, &cdata->ks);
  565. /*
  566. * OpenSSL C functions use byte-swapped extended key.
  567. */
  568. padlock_key_bswap(&cdata->ks);
  569. cdata->cword.b.keygen = 1;
  570. break;
  571. default:
  572. /* ERROR */
  573. return 0;
  574. }
  575. /*
  576. * This is done to cover for cases when user reuses the
  577. * context for new key. The catch is that if we don't do
  578. * this, padlock_eas_cipher might proceed with old key...
  579. */
  580. padlock_reload_key();
  581. return 1;
  582. }
  583. /* ===== Random Number Generator ===== */
  584. /*
  585. * This code is not engaged. The reason is that it does not comply
  586. * with recommendations for VIA RNG usage for secure applications
  587. * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
  588. * provide meaningful error control...
  589. */
  590. /*
  591. * Wrapper that provides an interface between the API and the raw PadLock
  592. * RNG
  593. */
  594. static int padlock_rand_bytes(unsigned char *output, int count)
  595. {
  596. unsigned int eax, buf;
  597. while (count >= 8) {
  598. eax = padlock_xstore(output, 0);
  599. if (!(eax & (1 << 6)))
  600. return 0; /* RNG disabled */
  601. /* this ---vv--- covers DC bias, Raw Bits and String Filter */
  602. if (eax & (0x1F << 10))
  603. return 0;
  604. if ((eax & 0x1F) == 0)
  605. continue; /* no data, retry... */
  606. if ((eax & 0x1F) != 8)
  607. return 0; /* fatal failure... */
  608. output += 8;
  609. count -= 8;
  610. }
  611. while (count > 0) {
  612. eax = padlock_xstore(&buf, 3);
  613. if (!(eax & (1 << 6)))
  614. return 0; /* RNG disabled */
  615. /* this ---vv--- covers DC bias, Raw Bits and String Filter */
  616. if (eax & (0x1F << 10))
  617. return 0;
  618. if ((eax & 0x1F) == 0)
  619. continue; /* no data, retry... */
  620. if ((eax & 0x1F) != 1)
  621. return 0; /* fatal failure... */
  622. *output++ = (unsigned char)buf;
  623. count--;
  624. }
  625. OPENSSL_cleanse(&buf, sizeof(buf));
  626. return 1;
  627. }
  628. /* Dummy but necessary function */
  629. static int padlock_rand_status(void)
  630. {
  631. return 1;
  632. }
  633. /* Prepare structure for registration */
  634. static RAND_METHOD padlock_rand = {
  635. NULL, /* seed */
  636. padlock_rand_bytes, /* bytes */
  637. NULL, /* cleanup */
  638. NULL, /* add */
  639. padlock_rand_bytes, /* pseudorand */
  640. padlock_rand_status, /* rand status */
  641. };
  642. # endif /* COMPILE_PADLOCKENG */
  643. #endif /* !OPENSSL_NO_PADLOCKENG */
  644. #if defined(OPENSSL_NO_PADLOCKENG) || !defined(COMPILE_PADLOCKENG)
  645. # ifndef OPENSSL_NO_DYNAMIC_ENGINE
  646. OPENSSL_EXPORT
  647. int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
  648. OPENSSL_EXPORT
  649. int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns)
  650. {
  651. return 0;
  652. }
  653. IMPLEMENT_DYNAMIC_CHECK_FN()
  654. # endif
  655. #endif