rsaz_exp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*****************************************************************************
  2. * *
  3. * Copyright (c) 2012, Intel Corporation *
  4. * *
  5. * All rights reserved. *
  6. * *
  7. * Redistribution and use in source and binary forms, with or without *
  8. * modification, are permitted provided that the following conditions are *
  9. * met: *
  10. * *
  11. * * Redistributions of source code must retain the above copyright *
  12. * notice, this list of conditions and the following disclaimer. *
  13. * *
  14. * * Redistributions in binary form must reproduce the above copyright *
  15. * notice, this list of conditions and the following disclaimer in the *
  16. * documentation and/or other materials provided with the *
  17. * distribution. *
  18. * *
  19. * * Neither the name of the Intel Corporation nor the names of its *
  20. * contributors may be used to endorse or promote products derived from *
  21. * this software without specific prior written permission. *
  22. * *
  23. * *
  24. * THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY *
  25. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR *
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR *
  28. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
  29. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, *
  30. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
  31. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
  35. * *
  36. ******************************************************************************
  37. * Developers and authors: *
  38. * Shay Gueron (1, 2), and Vlad Krasnov (1) *
  39. * (1) Intel Corporation, Israel Development Center, Haifa, Israel *
  40. * (2) University of Haifa, Israel *
  41. *****************************************************************************/
  42. #include "rsaz_exp.h"
  43. #ifdef RSAZ_ENABLED
  44. /*
  45. * See crypto/bn/asm/rsaz-avx2.pl for further details.
  46. */
  47. void rsaz_1024_norm2red_avx2(void *red, const void *norm);
  48. void rsaz_1024_mul_avx2(void *ret, const void *a, const void *b,
  49. const void *n, BN_ULONG k);
  50. void rsaz_1024_sqr_avx2(void *ret, const void *a, const void *n, BN_ULONG k,
  51. int cnt);
  52. void rsaz_1024_scatter5_avx2(void *tbl, const void *val, int i);
  53. void rsaz_1024_gather5_avx2(void *val, const void *tbl, int i);
  54. void rsaz_1024_red2norm_avx2(void *norm, const void *red);
  55. #if defined(__GNUC__)
  56. # define ALIGN64 __attribute__((aligned(64)))
  57. #elif defined(_MSC_VER)
  58. # define ALIGN64 __declspec(align(64))
  59. #elif defined(__SUNPRO_C)
  60. # define ALIGN64
  61. # pragma align 64(one,two80)
  62. #else
  63. /* not fatal, might hurt performance a little */
  64. # define ALIGN64
  65. #endif
  66. ALIGN64 static const BN_ULONG one[40] = {
  67. 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  68. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  69. };
  70. ALIGN64 static const BN_ULONG two80[40] = {
  71. 0, 0, 1 << 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  72. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  73. };
  74. void RSAZ_1024_mod_exp_avx2(BN_ULONG result_norm[16],
  75. const BN_ULONG base_norm[16],
  76. const BN_ULONG exponent[16],
  77. const BN_ULONG m_norm[16], const BN_ULONG RR[16],
  78. BN_ULONG k0)
  79. {
  80. unsigned char storage[320 * 3 + 32 * 9 * 16 + 64]; /* 5.5KB */
  81. unsigned char *p_str = storage + (64 - ((size_t)storage % 64));
  82. unsigned char *a_inv, *m, *result;
  83. unsigned char *table_s = p_str + 320 * 3;
  84. unsigned char *R2 = table_s; /* borrow */
  85. int index;
  86. int wvalue;
  87. if ((((size_t)p_str & 4095) + 320) >> 12) {
  88. result = p_str;
  89. a_inv = p_str + 320;
  90. m = p_str + 320 * 2; /* should not cross page */
  91. } else {
  92. m = p_str; /* should not cross page */
  93. result = p_str + 320;
  94. a_inv = p_str + 320 * 2;
  95. }
  96. rsaz_1024_norm2red_avx2(m, m_norm);
  97. rsaz_1024_norm2red_avx2(a_inv, base_norm);
  98. rsaz_1024_norm2red_avx2(R2, RR);
  99. rsaz_1024_mul_avx2(R2, R2, R2, m, k0);
  100. rsaz_1024_mul_avx2(R2, R2, two80, m, k0);
  101. /* table[0] = 1 */
  102. rsaz_1024_mul_avx2(result, R2, one, m, k0);
  103. /* table[1] = a_inv^1 */
  104. rsaz_1024_mul_avx2(a_inv, a_inv, R2, m, k0);
  105. rsaz_1024_scatter5_avx2(table_s, result, 0);
  106. rsaz_1024_scatter5_avx2(table_s, a_inv, 1);
  107. /* table[2] = a_inv^2 */
  108. rsaz_1024_sqr_avx2(result, a_inv, m, k0, 1);
  109. rsaz_1024_scatter5_avx2(table_s, result, 2);
  110. #if 0
  111. /* this is almost 2x smaller and less than 1% slower */
  112. for (index = 3; index < 32; index++) {
  113. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  114. rsaz_1024_scatter5_avx2(table_s, result, index);
  115. }
  116. #else
  117. /* table[4] = a_inv^4 */
  118. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  119. rsaz_1024_scatter5_avx2(table_s, result, 4);
  120. /* table[8] = a_inv^8 */
  121. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  122. rsaz_1024_scatter5_avx2(table_s, result, 8);
  123. /* table[16] = a_inv^16 */
  124. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  125. rsaz_1024_scatter5_avx2(table_s, result, 16);
  126. /* table[17] = a_inv^17 */
  127. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  128. rsaz_1024_scatter5_avx2(table_s, result, 17);
  129. /* table[3] */
  130. rsaz_1024_gather5_avx2(result, table_s, 2);
  131. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  132. rsaz_1024_scatter5_avx2(table_s, result, 3);
  133. /* table[6] */
  134. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  135. rsaz_1024_scatter5_avx2(table_s, result, 6);
  136. /* table[12] */
  137. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  138. rsaz_1024_scatter5_avx2(table_s, result, 12);
  139. /* table[24] */
  140. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  141. rsaz_1024_scatter5_avx2(table_s, result, 24);
  142. /* table[25] */
  143. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  144. rsaz_1024_scatter5_avx2(table_s, result, 25);
  145. /* table[5] */
  146. rsaz_1024_gather5_avx2(result, table_s, 4);
  147. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  148. rsaz_1024_scatter5_avx2(table_s, result, 5);
  149. /* table[10] */
  150. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  151. rsaz_1024_scatter5_avx2(table_s, result, 10);
  152. /* table[20] */
  153. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  154. rsaz_1024_scatter5_avx2(table_s, result, 20);
  155. /* table[21] */
  156. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  157. rsaz_1024_scatter5_avx2(table_s, result, 21);
  158. /* table[7] */
  159. rsaz_1024_gather5_avx2(result, table_s, 6);
  160. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  161. rsaz_1024_scatter5_avx2(table_s, result, 7);
  162. /* table[14] */
  163. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  164. rsaz_1024_scatter5_avx2(table_s, result, 14);
  165. /* table[28] */
  166. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  167. rsaz_1024_scatter5_avx2(table_s, result, 28);
  168. /* table[29] */
  169. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  170. rsaz_1024_scatter5_avx2(table_s, result, 29);
  171. /* table[9] */
  172. rsaz_1024_gather5_avx2(result, table_s, 8);
  173. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  174. rsaz_1024_scatter5_avx2(table_s, result, 9);
  175. /* table[18] */
  176. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  177. rsaz_1024_scatter5_avx2(table_s, result, 18);
  178. /* table[19] */
  179. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  180. rsaz_1024_scatter5_avx2(table_s, result, 19);
  181. /* table[11] */
  182. rsaz_1024_gather5_avx2(result, table_s, 10);
  183. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  184. rsaz_1024_scatter5_avx2(table_s, result, 11);
  185. /* table[22] */
  186. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  187. rsaz_1024_scatter5_avx2(table_s, result, 22);
  188. /* table[23] */
  189. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  190. rsaz_1024_scatter5_avx2(table_s, result, 23);
  191. /* table[13] */
  192. rsaz_1024_gather5_avx2(result, table_s, 12);
  193. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  194. rsaz_1024_scatter5_avx2(table_s, result, 13);
  195. /* table[26] */
  196. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  197. rsaz_1024_scatter5_avx2(table_s, result, 26);
  198. /* table[27] */
  199. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  200. rsaz_1024_scatter5_avx2(table_s, result, 27);
  201. /* table[15] */
  202. rsaz_1024_gather5_avx2(result, table_s, 14);
  203. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  204. rsaz_1024_scatter5_avx2(table_s, result, 15);
  205. /* table[30] */
  206. rsaz_1024_sqr_avx2(result, result, m, k0, 1);
  207. rsaz_1024_scatter5_avx2(table_s, result, 30);
  208. /* table[31] */
  209. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  210. rsaz_1024_scatter5_avx2(table_s, result, 31);
  211. #endif
  212. /* load first window */
  213. p_str = (unsigned char *)exponent;
  214. wvalue = p_str[127] >> 3;
  215. rsaz_1024_gather5_avx2(result, table_s, wvalue);
  216. index = 1014;
  217. while (index > -1) { /* loop for the remaining 127 windows */
  218. rsaz_1024_sqr_avx2(result, result, m, k0, 5);
  219. wvalue = *((unsigned short *)&p_str[index / 8]);
  220. wvalue = (wvalue >> (index % 8)) & 31;
  221. index -= 5;
  222. rsaz_1024_gather5_avx2(a_inv, table_s, wvalue); /* borrow a_inv */
  223. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  224. }
  225. /* square four times */
  226. rsaz_1024_sqr_avx2(result, result, m, k0, 4);
  227. wvalue = p_str[0] & 15;
  228. rsaz_1024_gather5_avx2(a_inv, table_s, wvalue); /* borrow a_inv */
  229. rsaz_1024_mul_avx2(result, result, a_inv, m, k0);
  230. /* from Montgomery */
  231. rsaz_1024_mul_avx2(result, result, one, m, k0);
  232. rsaz_1024_red2norm_avx2(result_norm, result);
  233. OPENSSL_cleanse(storage, sizeof(storage));
  234. }
  235. /*
  236. * See crypto/bn/rsaz-x86_64.pl for further details.
  237. */
  238. void rsaz_512_mul(void *ret, const void *a, const void *b, const void *n,
  239. BN_ULONG k);
  240. void rsaz_512_mul_scatter4(void *ret, const void *a, const void *n,
  241. BN_ULONG k, const void *tbl, unsigned int power);
  242. void rsaz_512_mul_gather4(void *ret, const void *a, const void *tbl,
  243. const void *n, BN_ULONG k, unsigned int power);
  244. void rsaz_512_mul_by_one(void *ret, const void *a, const void *n, BN_ULONG k);
  245. void rsaz_512_sqr(void *ret, const void *a, const void *n, BN_ULONG k,
  246. int cnt);
  247. void rsaz_512_scatter4(void *tbl, const BN_ULONG *val, int power);
  248. void rsaz_512_gather4(BN_ULONG *val, const void *tbl, int power);
  249. void RSAZ_512_mod_exp(BN_ULONG result[8],
  250. const BN_ULONG base[8], const BN_ULONG exponent[8],
  251. const BN_ULONG m[8], BN_ULONG k0, const BN_ULONG RR[8])
  252. {
  253. unsigned char storage[16 * 8 * 8 + 64 * 2 + 64]; /* 1.2KB */
  254. unsigned char *table = storage + (64 - ((size_t)storage % 64));
  255. BN_ULONG *a_inv = (BN_ULONG *)(table + 16 * 8 * 8);
  256. BN_ULONG *temp = (BN_ULONG *)(table + 16 * 8 * 8 + 8 * 8);
  257. unsigned char *p_str = (unsigned char *)exponent;
  258. int index;
  259. unsigned int wvalue;
  260. /* table[0] = 1_inv */
  261. temp[0] = 0 - m[0];
  262. temp[1] = ~m[1];
  263. temp[2] = ~m[2];
  264. temp[3] = ~m[3];
  265. temp[4] = ~m[4];
  266. temp[5] = ~m[5];
  267. temp[6] = ~m[6];
  268. temp[7] = ~m[7];
  269. rsaz_512_scatter4(table, temp, 0);
  270. /* table [1] = a_inv^1 */
  271. rsaz_512_mul(a_inv, base, RR, m, k0);
  272. rsaz_512_scatter4(table, a_inv, 1);
  273. /* table [2] = a_inv^2 */
  274. rsaz_512_sqr(temp, a_inv, m, k0, 1);
  275. rsaz_512_scatter4(table, temp, 2);
  276. for (index = 3; index < 16; index++)
  277. rsaz_512_mul_scatter4(temp, a_inv, m, k0, table, index);
  278. /* load first window */
  279. wvalue = p_str[63];
  280. rsaz_512_gather4(temp, table, wvalue >> 4);
  281. rsaz_512_sqr(temp, temp, m, k0, 4);
  282. rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue & 0xf);
  283. for (index = 62; index >= 0; index--) {
  284. wvalue = p_str[index];
  285. rsaz_512_sqr(temp, temp, m, k0, 4);
  286. rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue >> 4);
  287. rsaz_512_sqr(temp, temp, m, k0, 4);
  288. rsaz_512_mul_gather4(temp, temp, table, m, k0, wvalue & 0x0f);
  289. }
  290. /* from Montgomery */
  291. rsaz_512_mul_by_one(result, temp, m, k0);
  292. OPENSSL_cleanse(storage, sizeof(storage));
  293. }
  294. #else
  295. # if defined(PEDANTIC) || defined(__DECC) || defined(__clang__)
  296. static void *dummy = &dummy;
  297. # endif
  298. #endif