cipher_aes_cbc_hmac_sha256_hw.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. /*
  2. * Copyright 2011-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /*
  10. * All low level APIs are deprecated for public use, but still ok for internal
  11. * use where we're using them to implement the higher level EVP interface, as is
  12. * the case here.
  13. */
  14. #include "internal/deprecated.h"
  15. #include "cipher_aes_cbc_hmac_sha.h"
  16. #if !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE)
  17. int ossl_cipher_capable_aes_cbc_hmac_sha256(void)
  18. {
  19. return 0;
  20. }
  21. const PROV_CIPHER_HW_AES_HMAC_SHA *ossl_prov_cipher_hw_aes_cbc_hmac_sha256(void)
  22. {
  23. return NULL;
  24. }
  25. #else
  26. # include <openssl/rand.h>
  27. # include "crypto/evp.h"
  28. # include "internal/constant_time.h"
  29. void sha256_block_data_order(void *c, const void *p, size_t len);
  30. int aesni_cbc_sha256_enc(const void *inp, void *out, size_t blocks,
  31. const AES_KEY *key, unsigned char iv[16],
  32. SHA256_CTX *ctx, const void *in0);
  33. int ossl_cipher_capable_aes_cbc_hmac_sha256(void)
  34. {
  35. return AESNI_CBC_HMAC_SHA_CAPABLE
  36. && aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL);
  37. }
  38. static int aesni_cbc_hmac_sha256_init_key(PROV_CIPHER_CTX *vctx,
  39. const unsigned char *key,
  40. size_t keylen)
  41. {
  42. int ret;
  43. PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
  44. PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
  45. if (ctx->base.enc)
  46. ret = aesni_set_encrypt_key(key, ctx->base.keylen * 8, &ctx->ks);
  47. else
  48. ret = aesni_set_decrypt_key(key, ctx->base.keylen * 8, &ctx->ks);
  49. SHA256_Init(&sctx->head); /* handy when benchmarking */
  50. sctx->tail = sctx->head;
  51. sctx->md = sctx->head;
  52. ctx->payload_length = NO_PAYLOAD_LENGTH;
  53. vctx->removetlspad = 1;
  54. vctx->removetlsfixed = SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE;
  55. return ret < 0 ? 0 : 1;
  56. }
  57. void sha256_block_data_order(void *c, const void *p, size_t len);
  58. static void sha256_update(SHA256_CTX *c, const void *data, size_t len)
  59. {
  60. const unsigned char *ptr = data;
  61. size_t res;
  62. if ((res = c->num)) {
  63. res = SHA256_CBLOCK - res;
  64. if (len < res)
  65. res = len;
  66. SHA256_Update(c, ptr, res);
  67. ptr += res;
  68. len -= res;
  69. }
  70. res = len % SHA256_CBLOCK;
  71. len -= res;
  72. if (len) {
  73. sha256_block_data_order(c, ptr, len / SHA256_CBLOCK);
  74. ptr += len;
  75. c->Nh += len >> 29;
  76. c->Nl += len <<= 3;
  77. if (c->Nl < (unsigned int)len)
  78. c->Nh++;
  79. }
  80. if (res)
  81. SHA256_Update(c, ptr, res);
  82. }
  83. # if !defined(OPENSSL_NO_MULTIBLOCK)
  84. typedef struct {
  85. unsigned int A[8], B[8], C[8], D[8], E[8], F[8], G[8], H[8];
  86. } SHA256_MB_CTX;
  87. typedef struct {
  88. const unsigned char *ptr;
  89. int blocks;
  90. } HASH_DESC;
  91. typedef struct {
  92. const unsigned char *inp;
  93. unsigned char *out;
  94. int blocks;
  95. u64 iv[2];
  96. } CIPH_DESC;
  97. void sha256_multi_block(SHA256_MB_CTX *, const HASH_DESC *, int);
  98. void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int);
  99. static size_t tls1_multi_block_encrypt(void *vctx,
  100. unsigned char *out,
  101. const unsigned char *inp,
  102. size_t inp_len, int n4x)
  103. { /* n4x is 1 or 2 */
  104. PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
  105. PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
  106. HASH_DESC hash_d[8], edges[8];
  107. CIPH_DESC ciph_d[8];
  108. unsigned char storage[sizeof(SHA256_MB_CTX) + 32];
  109. union {
  110. u64 q[16];
  111. u32 d[32];
  112. u8 c[128];
  113. } blocks[8];
  114. SHA256_MB_CTX *mctx;
  115. unsigned int frag, last, packlen, i;
  116. unsigned int x4 = 4 * n4x, minblocks, processed = 0;
  117. size_t ret = 0;
  118. u8 *IVs;
  119. # if defined(BSWAP8)
  120. u64 seqnum;
  121. # endif
  122. /* ask for IVs in bulk */
  123. if (RAND_bytes_ex(ctx->base.libctx, (IVs = blocks[0].c), 16 * x4, 0) <= 0)
  124. return 0;
  125. mctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */
  126. frag = (unsigned int)inp_len >> (1 + n4x);
  127. last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
  128. if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
  129. frag++;
  130. last -= x4 - 1;
  131. }
  132. packlen = 5 + 16 + ((frag + 32 + 16) & -16);
  133. /* populate descriptors with pointers and IVs */
  134. hash_d[0].ptr = inp;
  135. ciph_d[0].inp = inp;
  136. /* 5+16 is place for header and explicit IV */
  137. ciph_d[0].out = out + 5 + 16;
  138. memcpy(ciph_d[0].out - 16, IVs, 16);
  139. memcpy(ciph_d[0].iv, IVs, 16);
  140. IVs += 16;
  141. for (i = 1; i < x4; i++) {
  142. ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
  143. ciph_d[i].out = ciph_d[i - 1].out + packlen;
  144. memcpy(ciph_d[i].out - 16, IVs, 16);
  145. memcpy(ciph_d[i].iv, IVs, 16);
  146. IVs += 16;
  147. }
  148. # if defined(BSWAP8)
  149. memcpy(blocks[0].c, sctx->md.data, 8);
  150. seqnum = BSWAP8(blocks[0].q[0]);
  151. # endif
  152. for (i = 0; i < x4; i++) {
  153. unsigned int len = (i == (x4 - 1) ? last : frag);
  154. # if !defined(BSWAP8)
  155. unsigned int carry, j;
  156. # endif
  157. mctx->A[i] = sctx->md.h[0];
  158. mctx->B[i] = sctx->md.h[1];
  159. mctx->C[i] = sctx->md.h[2];
  160. mctx->D[i] = sctx->md.h[3];
  161. mctx->E[i] = sctx->md.h[4];
  162. mctx->F[i] = sctx->md.h[5];
  163. mctx->G[i] = sctx->md.h[6];
  164. mctx->H[i] = sctx->md.h[7];
  165. /* fix seqnum */
  166. # if defined(BSWAP8)
  167. blocks[i].q[0] = BSWAP8(seqnum + i);
  168. # else
  169. for (carry = i, j = 8; j--;) {
  170. blocks[i].c[j] = ((u8 *)sctx->md.data)[j] + carry;
  171. carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
  172. }
  173. # endif
  174. blocks[i].c[8] = ((u8 *)sctx->md.data)[8];
  175. blocks[i].c[9] = ((u8 *)sctx->md.data)[9];
  176. blocks[i].c[10] = ((u8 *)sctx->md.data)[10];
  177. /* fix length */
  178. blocks[i].c[11] = (u8)(len >> 8);
  179. blocks[i].c[12] = (u8)(len);
  180. memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
  181. hash_d[i].ptr += 64 - 13;
  182. hash_d[i].blocks = (len - (64 - 13)) / 64;
  183. edges[i].ptr = blocks[i].c;
  184. edges[i].blocks = 1;
  185. }
  186. /* hash 13-byte headers and first 64-13 bytes of inputs */
  187. sha256_multi_block(mctx, edges, n4x);
  188. /* hash bulk inputs */
  189. # define MAXCHUNKSIZE 2048
  190. # if MAXCHUNKSIZE%64
  191. # error "MAXCHUNKSIZE is not divisible by 64"
  192. # elif MAXCHUNKSIZE
  193. /*
  194. * goal is to minimize pressure on L1 cache by moving in shorter steps,
  195. * so that hashed data is still in the cache by the time we encrypt it
  196. */
  197. minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
  198. if (minblocks > MAXCHUNKSIZE / 64) {
  199. for (i = 0; i < x4; i++) {
  200. edges[i].ptr = hash_d[i].ptr;
  201. edges[i].blocks = MAXCHUNKSIZE / 64;
  202. ciph_d[i].blocks = MAXCHUNKSIZE / 16;
  203. }
  204. do {
  205. sha256_multi_block(mctx, edges, n4x);
  206. aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
  207. for (i = 0; i < x4; i++) {
  208. edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
  209. hash_d[i].blocks -= MAXCHUNKSIZE / 64;
  210. edges[i].blocks = MAXCHUNKSIZE / 64;
  211. ciph_d[i].inp += MAXCHUNKSIZE;
  212. ciph_d[i].out += MAXCHUNKSIZE;
  213. ciph_d[i].blocks = MAXCHUNKSIZE / 16;
  214. memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
  215. }
  216. processed += MAXCHUNKSIZE;
  217. minblocks -= MAXCHUNKSIZE / 64;
  218. } while (minblocks > MAXCHUNKSIZE / 64);
  219. }
  220. # endif
  221. # undef MAXCHUNKSIZE
  222. sha256_multi_block(mctx, hash_d, n4x);
  223. memset(blocks, 0, sizeof(blocks));
  224. for (i = 0; i < x4; i++) {
  225. unsigned int len = (i == (x4 - 1) ? last : frag),
  226. off = hash_d[i].blocks * 64;
  227. const unsigned char *ptr = hash_d[i].ptr + off;
  228. off = (len - processed) - (64 - 13) - off; /* remainder actually */
  229. memcpy(blocks[i].c, ptr, off);
  230. blocks[i].c[off] = 0x80;
  231. len += 64 + 13; /* 64 is HMAC header */
  232. len *= 8; /* convert to bits */
  233. if (off < (64 - 8)) {
  234. # ifdef BSWAP4
  235. blocks[i].d[15] = BSWAP4(len);
  236. # else
  237. PUTU32(blocks[i].c + 60, len);
  238. # endif
  239. edges[i].blocks = 1;
  240. } else {
  241. # ifdef BSWAP4
  242. blocks[i].d[31] = BSWAP4(len);
  243. # else
  244. PUTU32(blocks[i].c + 124, len);
  245. # endif
  246. edges[i].blocks = 2;
  247. }
  248. edges[i].ptr = blocks[i].c;
  249. }
  250. /* hash input tails and finalize */
  251. sha256_multi_block(mctx, edges, n4x);
  252. memset(blocks, 0, sizeof(blocks));
  253. for (i = 0; i < x4; i++) {
  254. # ifdef BSWAP4
  255. blocks[i].d[0] = BSWAP4(mctx->A[i]);
  256. mctx->A[i] = sctx->tail.h[0];
  257. blocks[i].d[1] = BSWAP4(mctx->B[i]);
  258. mctx->B[i] = sctx->tail.h[1];
  259. blocks[i].d[2] = BSWAP4(mctx->C[i]);
  260. mctx->C[i] = sctx->tail.h[2];
  261. blocks[i].d[3] = BSWAP4(mctx->D[i]);
  262. mctx->D[i] = sctx->tail.h[3];
  263. blocks[i].d[4] = BSWAP4(mctx->E[i]);
  264. mctx->E[i] = sctx->tail.h[4];
  265. blocks[i].d[5] = BSWAP4(mctx->F[i]);
  266. mctx->F[i] = sctx->tail.h[5];
  267. blocks[i].d[6] = BSWAP4(mctx->G[i]);
  268. mctx->G[i] = sctx->tail.h[6];
  269. blocks[i].d[7] = BSWAP4(mctx->H[i]);
  270. mctx->H[i] = sctx->tail.h[7];
  271. blocks[i].c[32] = 0x80;
  272. blocks[i].d[15] = BSWAP4((64 + 32) * 8);
  273. # else
  274. PUTU32(blocks[i].c + 0, mctx->A[i]);
  275. mctx->A[i] = sctx->tail.h[0];
  276. PUTU32(blocks[i].c + 4, mctx->B[i]);
  277. mctx->B[i] = sctx->tail.h[1];
  278. PUTU32(blocks[i].c + 8, mctx->C[i]);
  279. mctx->C[i] = sctx->tail.h[2];
  280. PUTU32(blocks[i].c + 12, mctx->D[i]);
  281. mctx->D[i] = sctx->tail.h[3];
  282. PUTU32(blocks[i].c + 16, mctx->E[i]);
  283. mctx->E[i] = sctx->tail.h[4];
  284. PUTU32(blocks[i].c + 20, mctx->F[i]);
  285. mctx->F[i] = sctx->tail.h[5];
  286. PUTU32(blocks[i].c + 24, mctx->G[i]);
  287. mctx->G[i] = sctx->tail.h[6];
  288. PUTU32(blocks[i].c + 28, mctx->H[i]);
  289. mctx->H[i] = sctx->tail.h[7];
  290. blocks[i].c[32] = 0x80;
  291. PUTU32(blocks[i].c + 60, (64 + 32) * 8);
  292. # endif /* BSWAP */
  293. edges[i].ptr = blocks[i].c;
  294. edges[i].blocks = 1;
  295. }
  296. /* finalize MACs */
  297. sha256_multi_block(mctx, edges, n4x);
  298. for (i = 0; i < x4; i++) {
  299. unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
  300. unsigned char *out0 = out;
  301. memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
  302. ciph_d[i].inp = ciph_d[i].out;
  303. out += 5 + 16 + len;
  304. /* write MAC */
  305. PUTU32(out + 0, mctx->A[i]);
  306. PUTU32(out + 4, mctx->B[i]);
  307. PUTU32(out + 8, mctx->C[i]);
  308. PUTU32(out + 12, mctx->D[i]);
  309. PUTU32(out + 16, mctx->E[i]);
  310. PUTU32(out + 20, mctx->F[i]);
  311. PUTU32(out + 24, mctx->G[i]);
  312. PUTU32(out + 28, mctx->H[i]);
  313. out += 32;
  314. len += 32;
  315. /* pad */
  316. pad = 15 - len % 16;
  317. for (j = 0; j <= pad; j++)
  318. *(out++) = pad;
  319. len += pad + 1;
  320. ciph_d[i].blocks = (len - processed) / 16;
  321. len += 16; /* account for explicit iv */
  322. /* arrange header */
  323. out0[0] = ((u8 *)sctx->md.data)[8];
  324. out0[1] = ((u8 *)sctx->md.data)[9];
  325. out0[2] = ((u8 *)sctx->md.data)[10];
  326. out0[3] = (u8)(len >> 8);
  327. out0[4] = (u8)(len);
  328. ret += len + 5;
  329. inp += frag;
  330. }
  331. aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
  332. OPENSSL_cleanse(blocks, sizeof(blocks));
  333. OPENSSL_cleanse(mctx, sizeof(*mctx));
  334. ctx->multiblock_encrypt_len = ret;
  335. return ret;
  336. }
  337. # endif /* !OPENSSL_NO_MULTIBLOCK */
  338. static int aesni_cbc_hmac_sha256_cipher(PROV_CIPHER_CTX *vctx,
  339. unsigned char *out,
  340. const unsigned char *in, size_t len)
  341. {
  342. PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
  343. PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
  344. unsigned int l;
  345. size_t plen = ctx->payload_length;
  346. size_t iv = 0; /* explicit IV in TLS 1.1 and * later */
  347. size_t aes_off = 0, blocks;
  348. size_t sha_off = SHA256_CBLOCK - sctx->md.num;
  349. ctx->payload_length = NO_PAYLOAD_LENGTH;
  350. if (len % AES_BLOCK_SIZE)
  351. return 0;
  352. if (ctx->base.enc) {
  353. if (plen == NO_PAYLOAD_LENGTH)
  354. plen = len;
  355. else if (len !=
  356. ((plen + SHA256_DIGEST_LENGTH +
  357. AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
  358. return 0;
  359. else if (ctx->aux.tls_ver >= TLS1_1_VERSION)
  360. iv = AES_BLOCK_SIZE;
  361. /*
  362. * Assembly stitch handles AVX-capable processors, but its
  363. * performance is not optimal on AMD Jaguar, ~40% worse, for
  364. * unknown reasons. Incidentally processor in question supports
  365. * AVX, but not AMD-specific XOP extension, which can be used
  366. * to identify it and avoid stitch invocation. So that after we
  367. * establish that current CPU supports AVX, we even see if it's
  368. * either even XOP-capable Bulldozer-based or GenuineIntel one.
  369. * But SHAEXT-capable go ahead...
  370. */
  371. if (((OPENSSL_ia32cap_P[2] & (1 << 29)) || /* SHAEXT? */
  372. ((OPENSSL_ia32cap_P[1] & (1 << (60 - 32))) && /* AVX? */
  373. ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */
  374. | (OPENSSL_ia32cap_P[0] & (1 << 30))))) && /* "Intel CPU"? */
  375. plen > (sha_off + iv) &&
  376. (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) {
  377. sha256_update(&sctx->md, in + iv, sha_off);
  378. (void)aesni_cbc_sha256_enc(in, out, blocks, &ctx->ks,
  379. ctx->base.iv,
  380. &sctx->md, in + iv + sha_off);
  381. blocks *= SHA256_CBLOCK;
  382. aes_off += blocks;
  383. sha_off += blocks;
  384. sctx->md.Nh += blocks >> 29;
  385. sctx->md.Nl += blocks <<= 3;
  386. if (sctx->md.Nl < (unsigned int)blocks)
  387. sctx->md.Nh++;
  388. } else {
  389. sha_off = 0;
  390. }
  391. sha_off += iv;
  392. sha256_update(&sctx->md, in + sha_off, plen - sha_off);
  393. if (plen != len) { /* "TLS" mode of operation */
  394. if (in != out)
  395. memcpy(out + aes_off, in + aes_off, plen - aes_off);
  396. /* calculate HMAC and append it to payload */
  397. SHA256_Final(out + plen, &sctx->md);
  398. sctx->md = sctx->tail;
  399. sha256_update(&sctx->md, out + plen, SHA256_DIGEST_LENGTH);
  400. SHA256_Final(out + plen, &sctx->md);
  401. /* pad the payload|hmac */
  402. plen += SHA256_DIGEST_LENGTH;
  403. for (l = len - plen - 1; plen < len; plen++)
  404. out[plen] = l;
  405. /* encrypt HMAC|padding at once */
  406. aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
  407. &ctx->ks, ctx->base.iv, 1);
  408. } else {
  409. aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
  410. &ctx->ks, ctx->base.iv, 1);
  411. }
  412. } else {
  413. union {
  414. unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)];
  415. unsigned char c[64 + SHA256_DIGEST_LENGTH];
  416. } mac, *pmac;
  417. /* arrange cache line alignment */
  418. pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64));
  419. /* decrypt HMAC|padding at once */
  420. aesni_cbc_encrypt(in, out, len, &ctx->ks,
  421. ctx->base.iv, 0);
  422. if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
  423. size_t inp_len, mask, j, i;
  424. unsigned int res, maxpad, pad, bitlen;
  425. int ret = 1;
  426. union {
  427. unsigned int u[SHA_LBLOCK];
  428. unsigned char c[SHA256_CBLOCK];
  429. } *data = (void *)sctx->md.data;
  430. if ((ctx->aux.tls_aad[plen - 4] << 8 | ctx->aux.tls_aad[plen - 3])
  431. >= TLS1_1_VERSION)
  432. iv = AES_BLOCK_SIZE;
  433. if (len < (iv + SHA256_DIGEST_LENGTH + 1))
  434. return 0;
  435. /* omit explicit iv */
  436. out += iv;
  437. len -= iv;
  438. /* figure out payload length */
  439. pad = out[len - 1];
  440. maxpad = len - (SHA256_DIGEST_LENGTH + 1);
  441. maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
  442. maxpad &= 255;
  443. mask = constant_time_ge(maxpad, pad);
  444. ret &= mask;
  445. /*
  446. * If pad is invalid then we will fail the above test but we must
  447. * continue anyway because we are in constant time code. However,
  448. * we'll use the maxpad value instead of the supplied pad to make
  449. * sure we perform well defined pointer arithmetic.
  450. */
  451. pad = constant_time_select(mask, pad, maxpad);
  452. inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1);
  453. ctx->aux.tls_aad[plen - 2] = inp_len >> 8;
  454. ctx->aux.tls_aad[plen - 1] = inp_len;
  455. /* calculate HMAC */
  456. sctx->md = sctx->head;
  457. sha256_update(&sctx->md, ctx->aux.tls_aad, plen);
  458. /* code with lucky-13 fix */
  459. len -= SHA256_DIGEST_LENGTH; /* amend mac */
  460. if (len >= (256 + SHA256_CBLOCK)) {
  461. j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK);
  462. j += SHA256_CBLOCK - sctx->md.num;
  463. sha256_update(&sctx->md, out, j);
  464. out += j;
  465. len -= j;
  466. inp_len -= j;
  467. }
  468. /* but pretend as if we hashed padded payload */
  469. bitlen = sctx->md.Nl + (inp_len << 3); /* at most 18 bits */
  470. # ifdef BSWAP4
  471. bitlen = BSWAP4(bitlen);
  472. # else
  473. mac.c[0] = 0;
  474. mac.c[1] = (unsigned char)(bitlen >> 16);
  475. mac.c[2] = (unsigned char)(bitlen >> 8);
  476. mac.c[3] = (unsigned char)bitlen;
  477. bitlen = mac.u[0];
  478. # endif /* BSWAP */
  479. pmac->u[0] = 0;
  480. pmac->u[1] = 0;
  481. pmac->u[2] = 0;
  482. pmac->u[3] = 0;
  483. pmac->u[4] = 0;
  484. pmac->u[5] = 0;
  485. pmac->u[6] = 0;
  486. pmac->u[7] = 0;
  487. for (res = sctx->md.num, j = 0; j < len; j++) {
  488. size_t c = out[j];
  489. mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
  490. c &= mask;
  491. c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
  492. data->c[res++] = (unsigned char)c;
  493. if (res != SHA256_CBLOCK)
  494. continue;
  495. /* j is not incremented yet */
  496. mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
  497. data->u[SHA_LBLOCK - 1] |= bitlen & mask;
  498. sha256_block_data_order(&sctx->md, data, 1);
  499. mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
  500. pmac->u[0] |= sctx->md.h[0] & mask;
  501. pmac->u[1] |= sctx->md.h[1] & mask;
  502. pmac->u[2] |= sctx->md.h[2] & mask;
  503. pmac->u[3] |= sctx->md.h[3] & mask;
  504. pmac->u[4] |= sctx->md.h[4] & mask;
  505. pmac->u[5] |= sctx->md.h[5] & mask;
  506. pmac->u[6] |= sctx->md.h[6] & mask;
  507. pmac->u[7] |= sctx->md.h[7] & mask;
  508. res = 0;
  509. }
  510. for (i = res; i < SHA256_CBLOCK; i++, j++)
  511. data->c[i] = 0;
  512. if (res > SHA256_CBLOCK - 8) {
  513. mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
  514. data->u[SHA_LBLOCK - 1] |= bitlen & mask;
  515. sha256_block_data_order(&sctx->md, data, 1);
  516. mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
  517. pmac->u[0] |= sctx->md.h[0] & mask;
  518. pmac->u[1] |= sctx->md.h[1] & mask;
  519. pmac->u[2] |= sctx->md.h[2] & mask;
  520. pmac->u[3] |= sctx->md.h[3] & mask;
  521. pmac->u[4] |= sctx->md.h[4] & mask;
  522. pmac->u[5] |= sctx->md.h[5] & mask;
  523. pmac->u[6] |= sctx->md.h[6] & mask;
  524. pmac->u[7] |= sctx->md.h[7] & mask;
  525. memset(data, 0, SHA256_CBLOCK);
  526. j += 64;
  527. }
  528. data->u[SHA_LBLOCK - 1] = bitlen;
  529. sha256_block_data_order(&sctx->md, data, 1);
  530. mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
  531. pmac->u[0] |= sctx->md.h[0] & mask;
  532. pmac->u[1] |= sctx->md.h[1] & mask;
  533. pmac->u[2] |= sctx->md.h[2] & mask;
  534. pmac->u[3] |= sctx->md.h[3] & mask;
  535. pmac->u[4] |= sctx->md.h[4] & mask;
  536. pmac->u[5] |= sctx->md.h[5] & mask;
  537. pmac->u[6] |= sctx->md.h[6] & mask;
  538. pmac->u[7] |= sctx->md.h[7] & mask;
  539. # ifdef BSWAP4
  540. pmac->u[0] = BSWAP4(pmac->u[0]);
  541. pmac->u[1] = BSWAP4(pmac->u[1]);
  542. pmac->u[2] = BSWAP4(pmac->u[2]);
  543. pmac->u[3] = BSWAP4(pmac->u[3]);
  544. pmac->u[4] = BSWAP4(pmac->u[4]);
  545. pmac->u[5] = BSWAP4(pmac->u[5]);
  546. pmac->u[6] = BSWAP4(pmac->u[6]);
  547. pmac->u[7] = BSWAP4(pmac->u[7]);
  548. # else
  549. for (i = 0; i < 8; i++) {
  550. res = pmac->u[i];
  551. pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
  552. pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
  553. pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
  554. pmac->c[4 * i + 3] = (unsigned char)res;
  555. }
  556. # endif /* BSWAP */
  557. len += SHA256_DIGEST_LENGTH;
  558. sctx->md = sctx->tail;
  559. sha256_update(&sctx->md, pmac->c, SHA256_DIGEST_LENGTH);
  560. SHA256_Final(pmac->c, &sctx->md);
  561. /* verify HMAC */
  562. out += inp_len;
  563. len -= inp_len;
  564. /* code containing lucky-13 fix */
  565. {
  566. unsigned char *p =
  567. out + len - 1 - maxpad - SHA256_DIGEST_LENGTH;
  568. size_t off = out - p;
  569. unsigned int c, cmask;
  570. for (res = 0, i = 0, j = 0;
  571. j < maxpad + SHA256_DIGEST_LENGTH;
  572. j++) {
  573. c = p[j];
  574. cmask =
  575. ((int)(j - off - SHA256_DIGEST_LENGTH)) >>
  576. (sizeof(int) * 8 - 1);
  577. res |= (c ^ pad) & ~cmask; /* ... and padding */
  578. cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
  579. res |= (c ^ pmac->c[i]) & cmask;
  580. i += 1 & cmask;
  581. }
  582. res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
  583. ret &= (int)~res;
  584. }
  585. return ret;
  586. } else {
  587. sha256_update(&sctx->md, out, len);
  588. }
  589. }
  590. return 1;
  591. }
  592. /* EVP_CTRL_AEAD_SET_MAC_KEY */
  593. static void aesni_cbc_hmac_sha256_set_mac_key(void *vctx,
  594. const unsigned char *mackey,
  595. size_t len)
  596. {
  597. PROV_AES_HMAC_SHA256_CTX *ctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
  598. unsigned int i;
  599. unsigned char hmac_key[64];
  600. memset(hmac_key, 0, sizeof(hmac_key));
  601. if (len > sizeof(hmac_key)) {
  602. SHA256_Init(&ctx->head);
  603. sha256_update(&ctx->head, mackey, len);
  604. SHA256_Final(hmac_key, &ctx->head);
  605. } else {
  606. memcpy(hmac_key, mackey, len);
  607. }
  608. for (i = 0; i < sizeof(hmac_key); i++)
  609. hmac_key[i] ^= 0x36; /* ipad */
  610. SHA256_Init(&ctx->head);
  611. sha256_update(&ctx->head, hmac_key, sizeof(hmac_key));
  612. for (i = 0; i < sizeof(hmac_key); i++)
  613. hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
  614. SHA256_Init(&ctx->tail);
  615. sha256_update(&ctx->tail, hmac_key, sizeof(hmac_key));
  616. OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
  617. }
  618. /* EVP_CTRL_AEAD_TLS1_AAD */
  619. static int aesni_cbc_hmac_sha256_set_tls1_aad(void *vctx,
  620. unsigned char *aad_rec, int aad_len)
  621. {
  622. PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
  623. PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
  624. unsigned char *p = aad_rec;
  625. unsigned int len;
  626. if (aad_len != EVP_AEAD_TLS1_AAD_LEN)
  627. return -1;
  628. len = p[aad_len - 2] << 8 | p[aad_len - 1];
  629. if (ctx->base.enc) {
  630. ctx->payload_length = len;
  631. if ((ctx->aux.tls_ver =
  632. p[aad_len - 4] << 8 | p[aad_len - 3]) >= TLS1_1_VERSION) {
  633. if (len < AES_BLOCK_SIZE)
  634. return 0;
  635. len -= AES_BLOCK_SIZE;
  636. p[aad_len - 2] = len >> 8;
  637. p[aad_len - 1] = len;
  638. }
  639. sctx->md = sctx->head;
  640. sha256_update(&sctx->md, p, aad_len);
  641. ctx->tls_aad_pad = (int)(((len + SHA256_DIGEST_LENGTH +
  642. AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
  643. - len);
  644. return 1;
  645. } else {
  646. memcpy(ctx->aux.tls_aad, p, aad_len);
  647. ctx->payload_length = aad_len;
  648. ctx->tls_aad_pad = SHA256_DIGEST_LENGTH;
  649. return 1;
  650. }
  651. }
  652. # if !defined(OPENSSL_NO_MULTIBLOCK)
  653. /* EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE */
  654. static int aesni_cbc_hmac_sha256_tls1_multiblock_max_bufsize(
  655. void *vctx)
  656. {
  657. PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
  658. OPENSSL_assert(ctx->multiblock_max_send_fragment != 0);
  659. return (int)(5 + 16
  660. + (((int)ctx->multiblock_max_send_fragment + 32 + 16) & -16));
  661. }
  662. /* EVP_CTRL_TLS1_1_MULTIBLOCK_AAD */
  663. static int aesni_cbc_hmac_sha256_tls1_multiblock_aad(
  664. void *vctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
  665. {
  666. PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
  667. PROV_AES_HMAC_SHA256_CTX *sctx = (PROV_AES_HMAC_SHA256_CTX *)vctx;
  668. unsigned int n4x = 1, x4;
  669. unsigned int frag, last, packlen, inp_len;
  670. inp_len = param->inp[11] << 8 | param->inp[12];
  671. if (ctx->base.enc) {
  672. if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
  673. return -1;
  674. if (inp_len) {
  675. if (inp_len < 4096)
  676. return 0; /* too short */
  677. if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
  678. n4x = 2; /* AVX2 */
  679. } else if ((n4x = param->interleave / 4) && n4x <= 2)
  680. inp_len = param->len;
  681. else
  682. return -1;
  683. sctx->md = sctx->head;
  684. sha256_update(&sctx->md, param->inp, 13);
  685. x4 = 4 * n4x;
  686. n4x += 1;
  687. frag = inp_len >> n4x;
  688. last = inp_len + frag - (frag << n4x);
  689. if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
  690. frag++;
  691. last -= x4 - 1;
  692. }
  693. packlen = 5 + 16 + ((frag + 32 + 16) & -16);
  694. packlen = (packlen << n4x) - packlen;
  695. packlen += 5 + 16 + ((last + 32 + 16) & -16);
  696. param->interleave = x4;
  697. /* The returned values used by get need to be stored */
  698. ctx->multiblock_interleave = x4;
  699. ctx->multiblock_aad_packlen = packlen;
  700. return 1;
  701. }
  702. return -1; /* not yet */
  703. }
  704. /* EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT */
  705. static int aesni_cbc_hmac_sha256_tls1_multiblock_encrypt(
  706. void *ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
  707. {
  708. return (int)tls1_multi_block_encrypt(ctx, param->out,
  709. param->inp, param->len,
  710. param->interleave / 4);
  711. }
  712. # endif
  713. static const PROV_CIPHER_HW_AES_HMAC_SHA cipher_hw_aes_hmac_sha256 = {
  714. {
  715. aesni_cbc_hmac_sha256_init_key,
  716. aesni_cbc_hmac_sha256_cipher
  717. },
  718. aesni_cbc_hmac_sha256_set_mac_key,
  719. aesni_cbc_hmac_sha256_set_tls1_aad,
  720. # if !defined(OPENSSL_NO_MULTIBLOCK)
  721. aesni_cbc_hmac_sha256_tls1_multiblock_max_bufsize,
  722. aesni_cbc_hmac_sha256_tls1_multiblock_aad,
  723. aesni_cbc_hmac_sha256_tls1_multiblock_encrypt
  724. # endif
  725. };
  726. const PROV_CIPHER_HW_AES_HMAC_SHA *ossl_prov_cipher_hw_aes_cbc_hmac_sha256(void)
  727. {
  728. return &cipher_hw_aes_hmac_sha256;
  729. }
  730. #endif /* !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE) */