e_padlock.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /*
  2. * Copyright 2004-2018 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include <stdio.h>
  10. #include <string.h>
  11. #include <openssl/opensslconf.h>
  12. #include <openssl/crypto.h>
  13. #include <openssl/engine.h>
  14. #include <openssl/evp.h>
  15. #include <openssl/aes.h>
  16. #include <openssl/rand.h>
  17. #include <openssl/err.h>
  18. #include <openssl/modes.h>
  19. #ifndef OPENSSL_NO_HW
  20. # ifndef OPENSSL_NO_HW_PADLOCK
  21. /*
  22. * VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it
  23. * doesn't exist elsewhere, but it even can't be compiled on other platforms!
  24. */
  25. # undef COMPILE_HW_PADLOCK
  26. # if !defined(I386_ONLY) && defined(PADLOCK_ASM)
  27. # define COMPILE_HW_PADLOCK
  28. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  29. static ENGINE *ENGINE_padlock(void);
  30. # endif
  31. # endif
  32. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  33. void engine_load_padlock_int(void);
  34. void engine_load_padlock_int(void)
  35. {
  36. /* On non-x86 CPUs it just returns. */
  37. # ifdef COMPILE_HW_PADLOCK
  38. ENGINE *toadd = ENGINE_padlock();
  39. if (!toadd)
  40. return;
  41. ENGINE_add(toadd);
  42. ENGINE_free(toadd);
  43. ERR_clear_error();
  44. # endif
  45. }
  46. # endif
  47. # ifdef COMPILE_HW_PADLOCK
  48. /* Function for ENGINE detection and control */
  49. static int padlock_available(void);
  50. static int padlock_init(ENGINE *e);
  51. /* RNG Stuff */
  52. static RAND_METHOD padlock_rand;
  53. /* Cipher Stuff */
  54. static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
  55. const int **nids, int nid);
  56. /* Engine names */
  57. static const char *padlock_id = "padlock";
  58. static char padlock_name[100];
  59. /* Available features */
  60. static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
  61. static int padlock_use_rng = 0; /* Random Number Generator */
  62. /* ===== Engine "management" functions ===== */
  63. /* Prepare the ENGINE structure for registration */
  64. static int padlock_bind_helper(ENGINE *e)
  65. {
  66. /* Check available features */
  67. padlock_available();
  68. /*
  69. * RNG is currently disabled for reasons discussed in commentary just
  70. * before padlock_rand_bytes function.
  71. */
  72. padlock_use_rng = 0;
  73. /* Generate a nice engine name with available features */
  74. BIO_snprintf(padlock_name, sizeof(padlock_name),
  75. "VIA PadLock (%s, %s)",
  76. padlock_use_rng ? "RNG" : "no-RNG",
  77. padlock_use_ace ? "ACE" : "no-ACE");
  78. /* Register everything or return with an error */
  79. if (!ENGINE_set_id(e, padlock_id) ||
  80. !ENGINE_set_name(e, padlock_name) ||
  81. !ENGINE_set_init_function(e, padlock_init) ||
  82. (padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) ||
  83. (padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) {
  84. return 0;
  85. }
  86. /* Everything looks good */
  87. return 1;
  88. }
  89. # ifdef OPENSSL_NO_DYNAMIC_ENGINE
  90. /* Constructor */
  91. static ENGINE *ENGINE_padlock(void)
  92. {
  93. ENGINE *eng = ENGINE_new();
  94. if (eng == NULL) {
  95. return NULL;
  96. }
  97. if (!padlock_bind_helper(eng)) {
  98. ENGINE_free(eng);
  99. return NULL;
  100. }
  101. return eng;
  102. }
  103. # endif
  104. /* Check availability of the engine */
  105. static int padlock_init(ENGINE *e)
  106. {
  107. return (padlock_use_rng || padlock_use_ace);
  108. }
  109. /*
  110. * This stuff is needed if this ENGINE is being compiled into a
  111. * self-contained shared-library.
  112. */
  113. # ifdef DYNAMIC_ENGINE
  114. static int padlock_bind_fn(ENGINE *e, const char *id)
  115. {
  116. if (id && (strcmp(id, padlock_id) != 0)) {
  117. return 0;
  118. }
  119. if (!padlock_bind_helper(e)) {
  120. return 0;
  121. }
  122. return 1;
  123. }
  124. IMPLEMENT_DYNAMIC_CHECK_FN()
  125. IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn)
  126. # endif /* DYNAMIC_ENGINE */
  127. /* ===== Here comes the "real" engine ===== */
  128. /* Some AES-related constants */
  129. # define AES_BLOCK_SIZE 16
  130. # define AES_KEY_SIZE_128 16
  131. # define AES_KEY_SIZE_192 24
  132. # define AES_KEY_SIZE_256 32
  133. /*
  134. * Here we store the status information relevant to the current context.
  135. */
  136. /*
  137. * BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on
  138. * the order of items in this structure. Don't blindly modify, reorder,
  139. * etc!
  140. */
  141. struct padlock_cipher_data {
  142. unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
  143. union {
  144. unsigned int pad[4];
  145. struct {
  146. int rounds:4;
  147. int dgst:1; /* n/a in C3 */
  148. int align:1; /* n/a in C3 */
  149. int ciphr:1; /* n/a in C3 */
  150. unsigned int keygen:1;
  151. int interm:1;
  152. unsigned int encdec:1;
  153. int ksize:2;
  154. } b;
  155. } cword; /* Control word */
  156. AES_KEY ks; /* Encryption key */
  157. };
  158. /* Interface to assembler module */
  159. unsigned int padlock_capability(void);
  160. void padlock_key_bswap(AES_KEY *key);
  161. void padlock_verify_context(struct padlock_cipher_data *ctx);
  162. void padlock_reload_key(void);
  163. void padlock_aes_block(void *out, const void *inp,
  164. struct padlock_cipher_data *ctx);
  165. int padlock_ecb_encrypt(void *out, const void *inp,
  166. struct padlock_cipher_data *ctx, size_t len);
  167. int padlock_cbc_encrypt(void *out, const void *inp,
  168. struct padlock_cipher_data *ctx, size_t len);
  169. int padlock_cfb_encrypt(void *out, const void *inp,
  170. struct padlock_cipher_data *ctx, size_t len);
  171. int padlock_ofb_encrypt(void *out, const void *inp,
  172. struct padlock_cipher_data *ctx, size_t len);
  173. int padlock_ctr32_encrypt(void *out, const void *inp,
  174. struct padlock_cipher_data *ctx, size_t len);
  175. int padlock_xstore(void *out, int edx);
  176. void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
  177. void padlock_sha1(void *ctx, const void *inp, size_t len);
  178. void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
  179. void padlock_sha256(void *ctx, const void *inp, size_t len);
  180. /*
  181. * Load supported features of the CPU to see if the PadLock is available.
  182. */
  183. static int padlock_available(void)
  184. {
  185. unsigned int edx = padlock_capability();
  186. /* Fill up some flags */
  187. padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
  188. padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
  189. return padlock_use_ace + padlock_use_rng;
  190. }
  191. /* ===== AES encryption/decryption ===== */
  192. # if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
  193. # define NID_aes_128_cfb NID_aes_128_cfb128
  194. # endif
  195. # if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
  196. # define NID_aes_128_ofb NID_aes_128_ofb128
  197. # endif
  198. # if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
  199. # define NID_aes_192_cfb NID_aes_192_cfb128
  200. # endif
  201. # if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
  202. # define NID_aes_192_ofb NID_aes_192_ofb128
  203. # endif
  204. # if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
  205. # define NID_aes_256_cfb NID_aes_256_cfb128
  206. # endif
  207. # if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
  208. # define NID_aes_256_ofb NID_aes_256_ofb128
  209. # endif
  210. /* List of supported ciphers. */
  211. static const int padlock_cipher_nids[] = {
  212. NID_aes_128_ecb,
  213. NID_aes_128_cbc,
  214. NID_aes_128_cfb,
  215. NID_aes_128_ofb,
  216. NID_aes_128_ctr,
  217. NID_aes_192_ecb,
  218. NID_aes_192_cbc,
  219. NID_aes_192_cfb,
  220. NID_aes_192_ofb,
  221. NID_aes_192_ctr,
  222. NID_aes_256_ecb,
  223. NID_aes_256_cbc,
  224. NID_aes_256_cfb,
  225. NID_aes_256_ofb,
  226. NID_aes_256_ctr
  227. };
  228. static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) /
  229. sizeof(padlock_cipher_nids[0]));
  230. /* Function prototypes ... */
  231. static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  232. const unsigned char *iv, int enc);
  233. # define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
  234. ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
  235. # define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
  236. NEAREST_ALIGNED(EVP_CIPHER_CTX_get_cipher_data(ctx)))
  237. static int
  238. padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  239. const unsigned char *in_arg, size_t nbytes)
  240. {
  241. return padlock_ecb_encrypt(out_arg, in_arg,
  242. ALIGNED_CIPHER_DATA(ctx), nbytes);
  243. }
  244. static int
  245. padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  246. const unsigned char *in_arg, size_t nbytes)
  247. {
  248. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  249. int ret;
  250. memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
  251. if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes)))
  252. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
  253. return ret;
  254. }
  255. static int
  256. padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  257. const unsigned char *in_arg, size_t nbytes)
  258. {
  259. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  260. size_t chunk;
  261. if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */
  262. unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
  263. if (chunk >= AES_BLOCK_SIZE)
  264. return 0; /* bogus value */
  265. if (EVP_CIPHER_CTX_encrypting(ctx))
  266. while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
  267. ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
  268. chunk++, nbytes--;
  269. } else
  270. while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
  271. unsigned char c = *(in_arg++);
  272. *(out_arg++) = c ^ ivp[chunk];
  273. ivp[chunk++] = c, nbytes--;
  274. }
  275. EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
  276. }
  277. if (nbytes == 0)
  278. return 1;
  279. memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
  280. if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
  281. if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk))
  282. return 0;
  283. nbytes -= chunk;
  284. }
  285. if (nbytes) {
  286. unsigned char *ivp = cdata->iv;
  287. out_arg += chunk;
  288. in_arg += chunk;
  289. EVP_CIPHER_CTX_set_num(ctx, nbytes);
  290. if (cdata->cword.b.encdec) {
  291. cdata->cword.b.encdec = 0;
  292. padlock_reload_key();
  293. padlock_aes_block(ivp, ivp, cdata);
  294. cdata->cword.b.encdec = 1;
  295. padlock_reload_key();
  296. while (nbytes) {
  297. unsigned char c = *(in_arg++);
  298. *(out_arg++) = c ^ *ivp;
  299. *(ivp++) = c, nbytes--;
  300. }
  301. } else {
  302. padlock_reload_key();
  303. padlock_aes_block(ivp, ivp, cdata);
  304. padlock_reload_key();
  305. while (nbytes) {
  306. *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
  307. ivp++, nbytes--;
  308. }
  309. }
  310. }
  311. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
  312. return 1;
  313. }
  314. static int
  315. padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  316. const unsigned char *in_arg, size_t nbytes)
  317. {
  318. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  319. size_t chunk;
  320. /*
  321. * ctx->num is maintained in byte-oriented modes, such as CFB and OFB...
  322. */
  323. if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */
  324. unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
  325. if (chunk >= AES_BLOCK_SIZE)
  326. return 0; /* bogus value */
  327. while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
  328. *(out_arg++) = *(in_arg++) ^ ivp[chunk];
  329. chunk++, nbytes--;
  330. }
  331. EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
  332. }
  333. if (nbytes == 0)
  334. return 1;
  335. memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
  336. if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
  337. if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk))
  338. return 0;
  339. nbytes -= chunk;
  340. }
  341. if (nbytes) {
  342. unsigned char *ivp = cdata->iv;
  343. out_arg += chunk;
  344. in_arg += chunk;
  345. EVP_CIPHER_CTX_set_num(ctx, nbytes);
  346. padlock_reload_key(); /* empirically found */
  347. padlock_aes_block(ivp, ivp, cdata);
  348. padlock_reload_key(); /* empirically found */
  349. while (nbytes) {
  350. *(out_arg++) = *(in_arg++) ^ *ivp;
  351. ivp++, nbytes--;
  352. }
  353. }
  354. memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
  355. return 1;
  356. }
  357. static void padlock_ctr32_encrypt_glue(const unsigned char *in,
  358. unsigned char *out, size_t blocks,
  359. struct padlock_cipher_data *ctx,
  360. const unsigned char *ivec)
  361. {
  362. memcpy(ctx->iv, ivec, AES_BLOCK_SIZE);
  363. padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks);
  364. }
  365. static int
  366. padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
  367. const unsigned char *in_arg, size_t nbytes)
  368. {
  369. struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
  370. unsigned int num = EVP_CIPHER_CTX_num(ctx);
  371. CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes,
  372. cdata, EVP_CIPHER_CTX_iv_noconst(ctx),
  373. EVP_CIPHER_CTX_buf_noconst(ctx), &num,
  374. (ctr128_f) padlock_ctr32_encrypt_glue);
  375. EVP_CIPHER_CTX_set_num(ctx, (size_t)num);
  376. return 1;
  377. }
  378. # define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
  379. # define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
  380. # define EVP_CIPHER_block_size_OFB 1
  381. # define EVP_CIPHER_block_size_CFB 1
  382. # define EVP_CIPHER_block_size_CTR 1
  383. /*
  384. * Declaring so many ciphers by hand would be a pain. Instead introduce a bit
  385. * of preprocessor magic :-)
  386. */
  387. # define DECLARE_AES_EVP(ksize,lmode,umode) \
  388. static EVP_CIPHER *_hidden_aes_##ksize##_##lmode = NULL; \
  389. static const EVP_CIPHER *padlock_aes_##ksize##_##lmode(void) \
  390. { \
  391. if (_hidden_aes_##ksize##_##lmode == NULL \
  392. && ((_hidden_aes_##ksize##_##lmode = \
  393. EVP_CIPHER_meth_new(NID_aes_##ksize##_##lmode, \
  394. EVP_CIPHER_block_size_##umode, \
  395. AES_KEY_SIZE_##ksize)) == NULL \
  396. || !EVP_CIPHER_meth_set_iv_length(_hidden_aes_##ksize##_##lmode, \
  397. AES_BLOCK_SIZE) \
  398. || !EVP_CIPHER_meth_set_flags(_hidden_aes_##ksize##_##lmode, \
  399. 0 | EVP_CIPH_##umode##_MODE) \
  400. || !EVP_CIPHER_meth_set_init(_hidden_aes_##ksize##_##lmode, \
  401. padlock_aes_init_key) \
  402. || !EVP_CIPHER_meth_set_do_cipher(_hidden_aes_##ksize##_##lmode, \
  403. padlock_##lmode##_cipher) \
  404. || !EVP_CIPHER_meth_set_impl_ctx_size(_hidden_aes_##ksize##_##lmode, \
  405. sizeof(struct padlock_cipher_data) + 16) \
  406. || !EVP_CIPHER_meth_set_set_asn1_params(_hidden_aes_##ksize##_##lmode, \
  407. EVP_CIPHER_set_asn1_iv) \
  408. || !EVP_CIPHER_meth_set_get_asn1_params(_hidden_aes_##ksize##_##lmode, \
  409. EVP_CIPHER_get_asn1_iv))) { \
  410. EVP_CIPHER_meth_free(_hidden_aes_##ksize##_##lmode); \
  411. _hidden_aes_##ksize##_##lmode = NULL; \
  412. } \
  413. return _hidden_aes_##ksize##_##lmode; \
  414. }
  415. DECLARE_AES_EVP(128, ecb, ECB)
  416. DECLARE_AES_EVP(128, cbc, CBC)
  417. DECLARE_AES_EVP(128, cfb, CFB)
  418. DECLARE_AES_EVP(128, ofb, OFB)
  419. DECLARE_AES_EVP(128, ctr, CTR)
  420. DECLARE_AES_EVP(192, ecb, ECB)
  421. DECLARE_AES_EVP(192, cbc, CBC)
  422. DECLARE_AES_EVP(192, cfb, CFB)
  423. DECLARE_AES_EVP(192, ofb, OFB)
  424. DECLARE_AES_EVP(192, ctr, CTR)
  425. DECLARE_AES_EVP(256, ecb, ECB)
  426. DECLARE_AES_EVP(256, cbc, CBC)
  427. DECLARE_AES_EVP(256, cfb, CFB)
  428. DECLARE_AES_EVP(256, ofb, OFB)
  429. DECLARE_AES_EVP(256, ctr, CTR)
  430. static int
  431. padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids,
  432. int nid)
  433. {
  434. /* No specific cipher => return a list of supported nids ... */
  435. if (!cipher) {
  436. *nids = padlock_cipher_nids;
  437. return padlock_cipher_nids_num;
  438. }
  439. /* ... or the requested "cipher" otherwise */
  440. switch (nid) {
  441. case NID_aes_128_ecb:
  442. *cipher = padlock_aes_128_ecb();
  443. break;
  444. case NID_aes_128_cbc:
  445. *cipher = padlock_aes_128_cbc();
  446. break;
  447. case NID_aes_128_cfb:
  448. *cipher = padlock_aes_128_cfb();
  449. break;
  450. case NID_aes_128_ofb:
  451. *cipher = padlock_aes_128_ofb();
  452. break;
  453. case NID_aes_128_ctr:
  454. *cipher = padlock_aes_128_ctr();
  455. break;
  456. case NID_aes_192_ecb:
  457. *cipher = padlock_aes_192_ecb();
  458. break;
  459. case NID_aes_192_cbc:
  460. *cipher = padlock_aes_192_cbc();
  461. break;
  462. case NID_aes_192_cfb:
  463. *cipher = padlock_aes_192_cfb();
  464. break;
  465. case NID_aes_192_ofb:
  466. *cipher = padlock_aes_192_ofb();
  467. break;
  468. case NID_aes_192_ctr:
  469. *cipher = padlock_aes_192_ctr();
  470. break;
  471. case NID_aes_256_ecb:
  472. *cipher = padlock_aes_256_ecb();
  473. break;
  474. case NID_aes_256_cbc:
  475. *cipher = padlock_aes_256_cbc();
  476. break;
  477. case NID_aes_256_cfb:
  478. *cipher = padlock_aes_256_cfb();
  479. break;
  480. case NID_aes_256_ofb:
  481. *cipher = padlock_aes_256_ofb();
  482. break;
  483. case NID_aes_256_ctr:
  484. *cipher = padlock_aes_256_ctr();
  485. break;
  486. default:
  487. /* Sorry, we don't support this NID */
  488. *cipher = NULL;
  489. return 0;
  490. }
  491. return 1;
  492. }
  493. /* Prepare the encryption key for PadLock usage */
  494. static int
  495. padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
  496. const unsigned char *iv, int enc)
  497. {
  498. struct padlock_cipher_data *cdata;
  499. int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;
  500. unsigned long mode = EVP_CIPHER_CTX_mode(ctx);
  501. if (key == NULL)
  502. return 0; /* ERROR */
  503. cdata = ALIGNED_CIPHER_DATA(ctx);
  504. memset(cdata, 0, sizeof(*cdata));
  505. /* Prepare Control word. */
  506. if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE)
  507. cdata->cword.b.encdec = 0;
  508. else
  509. cdata->cword.b.encdec = (EVP_CIPHER_CTX_encrypting(ctx) == 0);
  510. cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
  511. cdata->cword.b.ksize = (key_len - 128) / 64;
  512. switch (key_len) {
  513. case 128:
  514. /*
  515. * PadLock can generate an extended key for AES128 in hardware
  516. */
  517. memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
  518. cdata->cword.b.keygen = 0;
  519. break;
  520. case 192:
  521. case 256:
  522. /*
  523. * Generate an extended AES key in software. Needed for AES192/AES256
  524. */
  525. /*
  526. * Well, the above applies to Stepping 8 CPUs and is listed as
  527. * hardware errata. They most likely will fix it at some point and
  528. * then a check for stepping would be due here.
  529. */
  530. if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
  531. && !enc)
  532. AES_set_decrypt_key(key, key_len, &cdata->ks);
  533. else
  534. AES_set_encrypt_key(key, key_len, &cdata->ks);
  535. # ifndef AES_ASM
  536. /*
  537. * OpenSSL C functions use byte-swapped extended key.
  538. */
  539. padlock_key_bswap(&cdata->ks);
  540. # endif
  541. cdata->cword.b.keygen = 1;
  542. break;
  543. default:
  544. /* ERROR */
  545. return 0;
  546. }
  547. /*
  548. * This is done to cover for cases when user reuses the
  549. * context for new key. The catch is that if we don't do
  550. * this, padlock_eas_cipher might proceed with old key...
  551. */
  552. padlock_reload_key();
  553. return 1;
  554. }
  555. /* ===== Random Number Generator ===== */
  556. /*
  557. * This code is not engaged. The reason is that it does not comply
  558. * with recommendations for VIA RNG usage for secure applications
  559. * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
  560. * provide meaningful error control...
  561. */
  562. /*
  563. * Wrapper that provides an interface between the API and the raw PadLock
  564. * RNG
  565. */
  566. static int padlock_rand_bytes(unsigned char *output, int count)
  567. {
  568. unsigned int eax, buf;
  569. while (count >= 8) {
  570. eax = padlock_xstore(output, 0);
  571. if (!(eax & (1 << 6)))
  572. return 0; /* RNG disabled */
  573. /* this ---vv--- covers DC bias, Raw Bits and String Filter */
  574. if (eax & (0x1F << 10))
  575. return 0;
  576. if ((eax & 0x1F) == 0)
  577. continue; /* no data, retry... */
  578. if ((eax & 0x1F) != 8)
  579. return 0; /* fatal failure... */
  580. output += 8;
  581. count -= 8;
  582. }
  583. while (count > 0) {
  584. eax = padlock_xstore(&buf, 3);
  585. if (!(eax & (1 << 6)))
  586. return 0; /* RNG disabled */
  587. /* this ---vv--- covers DC bias, Raw Bits and String Filter */
  588. if (eax & (0x1F << 10))
  589. return 0;
  590. if ((eax & 0x1F) == 0)
  591. continue; /* no data, retry... */
  592. if ((eax & 0x1F) != 1)
  593. return 0; /* fatal failure... */
  594. *output++ = (unsigned char)buf;
  595. count--;
  596. }
  597. OPENSSL_cleanse(&buf, sizeof(buf));
  598. return 1;
  599. }
  600. /* Dummy but necessary function */
  601. static int padlock_rand_status(void)
  602. {
  603. return 1;
  604. }
  605. /* Prepare structure for registration */
  606. static RAND_METHOD padlock_rand = {
  607. NULL, /* seed */
  608. padlock_rand_bytes, /* bytes */
  609. NULL, /* cleanup */
  610. NULL, /* add */
  611. padlock_rand_bytes, /* pseudorand */
  612. padlock_rand_status, /* rand status */
  613. };
  614. # endif /* COMPILE_HW_PADLOCK */
  615. # endif /* !OPENSSL_NO_HW_PADLOCK */
  616. #endif /* !OPENSSL_NO_HW */
  617. #if defined(OPENSSL_NO_HW) || defined(OPENSSL_NO_HW_PADLOCK) \
  618. || !defined(COMPILE_HW_PADLOCK)
  619. # ifndef OPENSSL_NO_DYNAMIC_ENGINE
  620. OPENSSL_EXPORT
  621. int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
  622. OPENSSL_EXPORT
  623. int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns)
  624. {
  625. return 0;
  626. }
  627. IMPLEMENT_DYNAMIC_CHECK_FN()
  628. # endif
  629. #endif