tls_pstm_montgomery_reduce.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * Copyright (C) 2017 Denys Vlasenko
  3. *
  4. * Licensed under GPLv2, see file LICENSE in this source tree.
  5. */
  6. #include "tls.h"
  7. /* The file is taken almost verbatim from matrixssl-3-7-2b-open/crypto/math/.
  8. * Changes are flagged with //bbox
  9. */
  10. /**
  11. * @file pstm_montgomery_reduce.c
  12. * @version 33ef80f (HEAD, tag: MATRIXSSL-3-7-2-OPEN, tag: MATRIXSSL-3-7-2-COMM, origin/master, origin/HEAD, master)
  13. *
  14. * Multiprecision Montgomery Reduction.
  15. */
  16. /*
  17. * Copyright (c) 2013-2015 INSIDE Secure Corporation
  18. * Copyright (c) PeerSec Networks, 2002-2011
  19. * All Rights Reserved
  20. *
  21. * The latest version of this code is available at http://www.matrixssl.org
  22. *
  23. * This software is open source; you can redistribute it and/or modify
  24. * it under the terms of the GNU General Public License as published by
  25. * the Free Software Foundation; either version 2 of the License, or
  26. * (at your option) any later version.
  27. *
  28. * This General Public License does NOT permit incorporating this software
  29. * into proprietary programs. If you are unable to comply with the GPL, a
  30. * commercial license for this software may be purchased from INSIDE at
  31. * http://www.insidesecure.com/eng/Company/Locations
  32. *
  33. * This program is distributed in WITHOUT ANY WARRANTY; without even the
  34. * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  35. * See the GNU General Public License for more details.
  36. *
  37. * You should have received a copy of the GNU General Public License
  38. * along with this program; if not, write to the Free Software
  39. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  40. * http://www.gnu.org/copyleft/gpl.html
  41. */
  42. /******************************************************************************/
  43. //bbox
  44. //#include "../cryptoApi.h"
  45. #ifndef DISABLE_PSTM
  46. /******************************************************************************/
  47. #if defined(PSTM_X86)
  48. /* x86-32 optimized for 32 bit platforms. For 64 bit mode use X86_64 instead */
  49. #if !defined(__GNUC__) || !defined(__i386__) || !defined(PSTM_32BIT)
  50. #error "PSTM_X86 option requires GCC and 32 bit mode x86 processor"
  51. #endif
  52. //#pragma message ("Using 32 bit x86 Assembly Optimizations")
  53. #define MONT_START
  54. #define MONT_FINI
  55. #define LOOP_END
  56. #define LOOP_START \
  57. mu = c[x] * mp
  58. #if 0
  59. #define INNERMUL \
  60. asm( \
  61. "movl %5,%%eax \n\t" \
  62. "mull %4 \n\t" \
  63. "addl %1,%%eax \n\t" \
  64. "adcl $0,%%edx \n\t" \
  65. "addl %%eax,%0 \n\t" \
  66. "adcl $0,%%edx \n\t" \
  67. "movl %%edx,%1 \n\t" \
  68. :"=g"(_c[LO]), "=r"(cy) \
  69. :"0"(_c[LO]), "1"(cy), "g"(mu), "g"(*tmpm++) \
  70. : "%eax", "%edx", "cc")
  71. /*
  72. * The above generated "error: 'asm' operand has impossible constraints" on Android.
  73. * Do they reserve in their ABI a register for something, and there aren't enough left?
  74. */
  75. #else
  76. /* Let's avoid two explicit "movl" by telling compiler to put input value of *tmpm++
  77. * into EAX, and to expect cy result in EDX:
  78. */
  79. #define INNERMUL \
  80. asm( \
  81. "mull %4 \n\t" \
  82. "addl %3,%%eax \n\t" \
  83. "adcl $0,%%edx \n\t" \
  84. "addl %%eax,%0 \n\t" \
  85. "adcl $0,%%edx \n\t" \
  86. :"=g"(_c[LO]), "=&d"(cy) \
  87. :"0"(_c[LO]), "g"(cy), "g"(mu), "a"(*tmpm++) \
  88. :"cc")
  89. /* This doesn't tell compiler that we clobber EAX, but it probably won't need
  90. * the value of *tmpm anyway, thus won't try to reuse EAX contents.
  91. * TODO: fix it with dummy "=a"(clobbered_eax) output?
  92. */
  93. #endif
  94. #define PROPCARRY \
  95. asm( \
  96. "addl %1,%0 \n\t" \
  97. "sbb %1,%1 \n\t" \
  98. "neg %1 \n\t" \
  99. :"=g"(_c[LO]), "=r"(cy) \
  100. :"0"(_c[LO]), "1"(cy) \
  101. :"cc")
  102. /******************************************************************************/
  103. #elif defined(PSTM_X86_64)
  104. /* x86-64 optimized */
  105. #if !defined(__GNUC__) || !defined(__x86_64__) || !defined(PSTM_64BIT)
  106. #error "PSTM_X86_64 option requires PSTM_64BIT, GCC and 64 bit mode x86 processor"
  107. #endif
  108. //#pragma message ("Using 64 bit x86_64 Assembly Optimizations")
  109. #define MONT_START
  110. #define MONT_FINI
  111. #define LOOP_END
  112. #define LOOP_START \
  113. mu = c[x] * mp
  114. #define INNERMUL \
  115. asm( \
  116. "movq %5,%%rax \n\t" \
  117. "mulq %4 \n\t" \
  118. "addq %1,%%rax \n\t" \
  119. "adcq $0,%%rdx \n\t" \
  120. "addq %%rax,%0 \n\t" \
  121. "adcq $0,%%rdx \n\t" \
  122. "movq %%rdx,%1 \n\t" \
  123. :"=g"(_c[LO]), "=r"(cy) \
  124. :"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
  125. : "%rax", "%rdx", "cc")
  126. #define INNERMUL8 \
  127. asm( \
  128. "movq 0(%5),%%rax \n\t" \
  129. "movq 0(%2),%%r10 \n\t" \
  130. "movq 0x8(%5),%%r11 \n\t" \
  131. "mulq %4 \n\t" \
  132. "addq %%r10,%%rax \n\t" \
  133. "adcq $0,%%rdx \n\t" \
  134. "movq 0x8(%2),%%r10 \n\t" \
  135. "addq %3,%%rax \n\t" \
  136. "adcq $0,%%rdx \n\t" \
  137. "movq %%rax,0(%0) \n\t" \
  138. "movq %%rdx,%1 \n\t" \
  139. \
  140. "movq %%r11,%%rax \n\t" \
  141. "movq 0x10(%5),%%r11 \n\t" \
  142. "mulq %4 \n\t" \
  143. "addq %%r10,%%rax \n\t" \
  144. "adcq $0,%%rdx \n\t" \
  145. "movq 0x10(%2),%%r10 \n\t" \
  146. "addq %3,%%rax \n\t" \
  147. "adcq $0,%%rdx \n\t" \
  148. "movq %%rax,0x8(%0) \n\t" \
  149. "movq %%rdx,%1 \n\t" \
  150. \
  151. "movq %%r11,%%rax \n\t" \
  152. "movq 0x18(%5),%%r11 \n\t" \
  153. "mulq %4 \n\t" \
  154. "addq %%r10,%%rax \n\t" \
  155. "adcq $0,%%rdx \n\t" \
  156. "movq 0x18(%2),%%r10 \n\t" \
  157. "addq %3,%%rax \n\t" \
  158. "adcq $0,%%rdx \n\t" \
  159. "movq %%rax,0x10(%0) \n\t" \
  160. "movq %%rdx,%1 \n\t" \
  161. \
  162. "movq %%r11,%%rax \n\t" \
  163. "movq 0x20(%5),%%r11 \n\t" \
  164. "mulq %4 \n\t" \
  165. "addq %%r10,%%rax \n\t" \
  166. "adcq $0,%%rdx \n\t" \
  167. "movq 0x20(%2),%%r10 \n\t" \
  168. "addq %3,%%rax \n\t" \
  169. "adcq $0,%%rdx \n\t" \
  170. "movq %%rax,0x18(%0) \n\t" \
  171. "movq %%rdx,%1 \n\t" \
  172. \
  173. "movq %%r11,%%rax \n\t" \
  174. "movq 0x28(%5),%%r11 \n\t" \
  175. "mulq %4 \n\t" \
  176. "addq %%r10,%%rax \n\t" \
  177. "adcq $0,%%rdx \n\t" \
  178. "movq 0x28(%2),%%r10 \n\t" \
  179. "addq %3,%%rax \n\t" \
  180. "adcq $0,%%rdx \n\t" \
  181. "movq %%rax,0x20(%0) \n\t" \
  182. "movq %%rdx,%1 \n\t" \
  183. \
  184. "movq %%r11,%%rax \n\t" \
  185. "movq 0x30(%5),%%r11 \n\t" \
  186. "mulq %4 \n\t" \
  187. "addq %%r10,%%rax \n\t" \
  188. "adcq $0,%%rdx \n\t" \
  189. "movq 0x30(%2),%%r10 \n\t" \
  190. "addq %3,%%rax \n\t" \
  191. "adcq $0,%%rdx \n\t" \
  192. "movq %%rax,0x28(%0) \n\t" \
  193. "movq %%rdx,%1 \n\t" \
  194. \
  195. "movq %%r11,%%rax \n\t" \
  196. "movq 0x38(%5),%%r11 \n\t" \
  197. "mulq %4 \n\t" \
  198. "addq %%r10,%%rax \n\t" \
  199. "adcq $0,%%rdx \n\t" \
  200. "movq 0x38(%2),%%r10 \n\t" \
  201. "addq %3,%%rax \n\t" \
  202. "adcq $0,%%rdx \n\t" \
  203. "movq %%rax,0x30(%0) \n\t" \
  204. "movq %%rdx,%1 \n\t" \
  205. \
  206. "movq %%r11,%%rax \n\t" \
  207. "mulq %4 \n\t" \
  208. "addq %%r10,%%rax \n\t" \
  209. "adcq $0,%%rdx \n\t" \
  210. "addq %3,%%rax \n\t" \
  211. "adcq $0,%%rdx \n\t" \
  212. "movq %%rax,0x38(%0) \n\t" \
  213. "movq %%rdx,%1 \n\t" \
  214. \
  215. :"=r"(_c), "=r"(cy) \
  216. : "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\
  217. : "%rax", "%rdx", "%r10", "%r11", "cc")
  218. #define PROPCARRY \
  219. asm( \
  220. "addq %1,%0 \n\t" \
  221. "setb %%al \n\t" \
  222. "movzbq %%al,%1 \n\t" \
  223. :"=g"(_c[LO]), "=r"(cy) \
  224. :"0"(_c[LO]), "1"(cy) \
  225. : "%rax", "cc")
  226. /******************************************************************************/
  227. #elif defined(PSTM_ARM)
  228. #define MONT_START
  229. #define MONT_FINI
  230. #define LOOP_END
  231. #define LOOP_START \
  232. mu = c[x] * mp
  233. #ifdef __thumb2__
  234. //#pragma message ("Using 32 bit ARM Thumb2 Assembly Optimizations")
  235. #define INNERMUL \
  236. asm( \
  237. " LDR r0,%1 \n\t" \
  238. " ADDS r0,r0,%0 \n\t" \
  239. " ITE CS \n\t" \
  240. " MOVCS %0,#1 \n\t" \
  241. " MOVCC %0,#0 \n\t" \
  242. " UMLAL r0,%0,%3,%4 \n\t" \
  243. " STR r0,%1 \n\t" \
  244. :"=r"(cy),"=m"(_c[0])\
  245. :"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0])\
  246. :"r0","cc");
  247. #define PROPCARRY \
  248. asm( \
  249. " LDR r0,%1 \n\t" \
  250. " ADDS r0,r0,%0 \n\t" \
  251. " STR r0,%1 \n\t" \
  252. " ITE CS \n\t" \
  253. " MOVCS %0,#1 \n\t" \
  254. " MOVCC %0,#0 \n\t" \
  255. :"=r"(cy),"=m"(_c[0])\
  256. :"0"(cy),"m"(_c[0])\
  257. :"r0","cc");
  258. #else /* Non-Thumb2 code */
  259. //#pragma message ("Using 32 bit ARM Assembly Optimizations")
  260. #define INNERMUL \
  261. asm( \
  262. " LDR r0,%1 \n\t" \
  263. " ADDS r0,r0,%0 \n\t" \
  264. " MOVCS %0,#1 \n\t" \
  265. " MOVCC %0,#0 \n\t" \
  266. " UMLAL r0,%0,%3,%4 \n\t" \
  267. " STR r0,%1 \n\t" \
  268. :"=r"(cy),"=m"(_c[0])\
  269. :"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0])\
  270. :"r0","cc");
  271. #define PROPCARRY \
  272. asm( \
  273. " LDR r0,%1 \n\t" \
  274. " ADDS r0,r0,%0 \n\t" \
  275. " STR r0,%1 \n\t" \
  276. " MOVCS %0,#1 \n\t" \
  277. " MOVCC %0,#0 \n\t" \
  278. :"=r"(cy),"=m"(_c[0])\
  279. :"0"(cy),"m"(_c[0])\
  280. :"r0","cc");
  281. #endif /* __thumb2__ */
  282. /******************************************************************************/
  283. #elif defined(PSTM_MIPS)
  284. /* MIPS32 */
  285. //#pragma message ("Using 32 bit MIPS Assembly Optimizations")
  286. #define MONT_START
  287. #define MONT_FINI
  288. #define LOOP_END
  289. #define LOOP_START \
  290. mu = c[x] * mp
  291. #define INNERMUL \
  292. asm( \
  293. " multu %3,%4 \n\t" \
  294. " mflo $12 \n\t" \
  295. " mfhi $13 \n\t" \
  296. " addu $12,$12,%0 \n\t" \
  297. " sltu $10,$12,%0 \n\t" \
  298. " addu $13,$13,$10 \n\t" \
  299. " lw $10,%1 \n\t" \
  300. " addu $12,$12,$10 \n\t" \
  301. " sltu $10,$12,$10 \n\t" \
  302. " addu %0,$13,$10 \n\t" \
  303. " sw $12,%1 \n\t" \
  304. :"=r"(cy),"=m"(_c[0])\
  305. :"r"(cy),"r"(mu),"r"(tmpm[0]),"r"(_c[0])\
  306. :"$10","$12","$13")\
  307. ; ++tmpm;
  308. #define PROPCARRY \
  309. asm( \
  310. " lw $10,%1 \n\t" \
  311. " addu $10,$10,%0 \n\t" \
  312. " sw $10,%1 \n\t" \
  313. " sltu %0,$10,%0 \n\t" \
  314. :"=r"(cy),"=m"(_c[0])\
  315. :"r"(cy),"r"(_c[0])\
  316. :"$10");
  317. /******************************************************************************/
  318. #else
  319. /* ISO C code */
  320. #define MONT_START
  321. #define MONT_FINI
  322. #define LOOP_END
  323. #define LOOP_START \
  324. mu = c[x] * mp
  325. #define INNERMUL \
  326. do { pstm_word t; \
  327. t = ((pstm_word)_c[0] + (pstm_word)cy) + \
  328. (((pstm_word)mu) * ((pstm_word)*tmpm++)); \
  329. _c[0] = (pstm_digit)t; \
  330. cy = (pstm_digit)(t >> DIGIT_BIT); \
  331. } while (0)
  332. #define PROPCARRY \
  333. do { pstm_digit t = _c[0] += cy; cy = (t < cy); } while (0)
  334. #endif
  335. /******************************************************************************/
  336. #define LO 0
  337. /* computes x/R == x (mod N) via Montgomery Reduction */
  338. int32 FAST_FUNC pstm_montgomery_reduce(psPool_t *pool, pstm_int *a, pstm_int *m,
  339. pstm_digit mp, pstm_digit *paD, uint32 paDlen)
  340. {
  341. pstm_digit *c, *_c, *tmpm, mu;
  342. int32 oldused, x, y;
  343. int pa; //bbox: was int16
  344. pa = m->used;
  345. if (pa > a->alloc) {
  346. /* Sanity test for bad numbers. This will confirm no buffer overruns */
  347. return PS_LIMIT_FAIL;
  348. }
  349. if (paD && paDlen >= (uint32)2*pa+1) {
  350. c = paD;
  351. memset(c, 0x0, paDlen);
  352. } else {
  353. c = xzalloc(2*pa+1);//bbox
  354. }
  355. /* copy the input */
  356. oldused = a->used;
  357. for (x = 0; x < oldused; x++) {
  358. c[x] = a->dp[x];
  359. }
  360. MONT_START;
  361. for (x = 0; x < pa; x++) {
  362. pstm_digit cy = 0;
  363. /* get Mu for this round */
  364. LOOP_START;
  365. _c = c + x;
  366. tmpm = m->dp;
  367. y = 0;
  368. #ifdef PSTM_X86_64
  369. for (; y < (pa & ~7); y += 8) {
  370. INNERMUL8;
  371. _c += 8;
  372. tmpm += 8;
  373. }
  374. #endif /* PSTM_X86_64 */
  375. for (; y < pa; y++) {
  376. INNERMUL;
  377. ++_c;
  378. }
  379. LOOP_END;
  380. while (cy) {
  381. PROPCARRY;
  382. ++_c;
  383. }
  384. }
  385. /* now copy out */
  386. _c = c + pa;
  387. tmpm = a->dp;
  388. for (x = 0; x < pa+1; x++) {
  389. *tmpm++ = *_c++;
  390. }
  391. for (; x < oldused; x++) {
  392. *tmpm++ = 0;
  393. }
  394. MONT_FINI;
  395. a->used = pa+1;
  396. pstm_clamp(a);
  397. /* reuse x as return code */
  398. x = PSTM_OKAY;
  399. /* if A >= m then A = A - m */
  400. if (pstm_cmp_mag (a, m) != PSTM_LT) {
  401. if (s_pstm_sub (a, m, a) != PSTM_OKAY) {
  402. x = PS_MEM_FAIL;
  403. }
  404. }
  405. if (paDlen < (uint32)2*pa+1) {
  406. psFree(c, pool);
  407. }
  408. return x;
  409. }
  410. #endif /* !DISABLE_PSTM */
  411. /******************************************************************************/