x86_64-gcc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. /*
  2. * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the OpenSSL license (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include "../bn_lcl.h"
  10. #if !(defined(__GNUC__) && __GNUC__>=2)
  11. # include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
  12. #else
  13. /*-
  14. * x86_64 BIGNUM accelerator version 0.1, December 2002.
  15. *
  16. * Implemented by Andy Polyakov <appro@openssl.org> for the OpenSSL
  17. * project.
  18. *
  19. * Rights for redistribution and usage in source and binary forms are
  20. * granted according to the OpenSSL license. Warranty of any kind is
  21. * disclaimed.
  22. *
  23. * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
  24. * versions, like 1.0...
  25. * A. Well, that's because this code is basically a quick-n-dirty
  26. * proof-of-concept hack. As you can see it's implemented with
  27. * inline assembler, which means that you're bound to GCC and that
  28. * there might be enough room for further improvement.
  29. *
  30. * Q. Why inline assembler?
  31. * A. x86_64 features own ABI which I'm not familiar with. This is
  32. * why I decided to let the compiler take care of subroutine
  33. * prologue/epilogue as well as register allocation. For reference.
  34. * Win64 implements different ABI for AMD64, different from Linux.
  35. *
  36. * Q. How much faster does it get?
  37. * A. 'apps/openssl speed rsa dsa' output with no-asm:
  38. *
  39. * sign verify sign/s verify/s
  40. * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
  41. * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
  42. * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
  43. * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
  44. * sign verify sign/s verify/s
  45. * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
  46. * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
  47. * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
  48. *
  49. * 'apps/openssl speed rsa dsa' output with this module:
  50. *
  51. * sign verify sign/s verify/s
  52. * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
  53. * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
  54. * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
  55. * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
  56. * sign verify sign/s verify/s
  57. * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
  58. * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
  59. * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
  60. *
  61. * For the reference. IA-32 assembler implementation performs
  62. * very much like 64-bit code compiled with no-asm on the same
  63. * machine.
  64. */
  65. # if defined(_WIN64) || !defined(__LP64__)
  66. # define BN_ULONG unsigned long long
  67. # else
  68. # define BN_ULONG unsigned long
  69. # endif
  70. # undef mul
  71. # undef mul_add
  72. /*-
  73. * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
  74. * "g"(0) let the compiler to decide where does it
  75. * want to keep the value of zero;
  76. */
  77. # define mul_add(r,a,word,carry) do { \
  78. register BN_ULONG high,low; \
  79. asm ("mulq %3" \
  80. : "=a"(low),"=d"(high) \
  81. : "a"(word),"m"(a) \
  82. : "cc"); \
  83. asm ("addq %2,%0; adcq %3,%1" \
  84. : "+r"(carry),"+d"(high)\
  85. : "a"(low),"g"(0) \
  86. : "cc"); \
  87. asm ("addq %2,%0; adcq %3,%1" \
  88. : "+m"(r),"+d"(high) \
  89. : "r"(carry),"g"(0) \
  90. : "cc"); \
  91. carry=high; \
  92. } while (0)
  93. # define mul(r,a,word,carry) do { \
  94. register BN_ULONG high,low; \
  95. asm ("mulq %3" \
  96. : "=a"(low),"=d"(high) \
  97. : "a"(word),"g"(a) \
  98. : "cc"); \
  99. asm ("addq %2,%0; adcq %3,%1" \
  100. : "+r"(carry),"+d"(high)\
  101. : "a"(low),"g"(0) \
  102. : "cc"); \
  103. (r)=carry, carry=high; \
  104. } while (0)
  105. # undef sqr
  106. # define sqr(r0,r1,a) \
  107. asm ("mulq %2" \
  108. : "=a"(r0),"=d"(r1) \
  109. : "a"(a) \
  110. : "cc");
  111. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  112. BN_ULONG w)
  113. {
  114. BN_ULONG c1 = 0;
  115. if (num <= 0)
  116. return c1;
  117. while (num & ~3) {
  118. mul_add(rp[0], ap[0], w, c1);
  119. mul_add(rp[1], ap[1], w, c1);
  120. mul_add(rp[2], ap[2], w, c1);
  121. mul_add(rp[3], ap[3], w, c1);
  122. ap += 4;
  123. rp += 4;
  124. num -= 4;
  125. }
  126. if (num) {
  127. mul_add(rp[0], ap[0], w, c1);
  128. if (--num == 0)
  129. return c1;
  130. mul_add(rp[1], ap[1], w, c1);
  131. if (--num == 0)
  132. return c1;
  133. mul_add(rp[2], ap[2], w, c1);
  134. return c1;
  135. }
  136. return c1;
  137. }
  138. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
  139. {
  140. BN_ULONG c1 = 0;
  141. if (num <= 0)
  142. return c1;
  143. while (num & ~3) {
  144. mul(rp[0], ap[0], w, c1);
  145. mul(rp[1], ap[1], w, c1);
  146. mul(rp[2], ap[2], w, c1);
  147. mul(rp[3], ap[3], w, c1);
  148. ap += 4;
  149. rp += 4;
  150. num -= 4;
  151. }
  152. if (num) {
  153. mul(rp[0], ap[0], w, c1);
  154. if (--num == 0)
  155. return c1;
  156. mul(rp[1], ap[1], w, c1);
  157. if (--num == 0)
  158. return c1;
  159. mul(rp[2], ap[2], w, c1);
  160. }
  161. return c1;
  162. }
  163. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
  164. {
  165. if (n <= 0)
  166. return;
  167. while (n & ~3) {
  168. sqr(r[0], r[1], a[0]);
  169. sqr(r[2], r[3], a[1]);
  170. sqr(r[4], r[5], a[2]);
  171. sqr(r[6], r[7], a[3]);
  172. a += 4;
  173. r += 8;
  174. n -= 4;
  175. }
  176. if (n) {
  177. sqr(r[0], r[1], a[0]);
  178. if (--n == 0)
  179. return;
  180. sqr(r[2], r[3], a[1]);
  181. if (--n == 0)
  182. return;
  183. sqr(r[4], r[5], a[2]);
  184. }
  185. }
  186. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
  187. {
  188. BN_ULONG ret, waste;
  189. asm("divq %4":"=a"(ret), "=d"(waste)
  190. : "a"(l), "d"(h), "r"(d)
  191. : "cc");
  192. return ret;
  193. }
  194. BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
  195. int n)
  196. {
  197. BN_ULONG ret;
  198. size_t i = 0;
  199. if (n <= 0)
  200. return 0;
  201. asm volatile (" subq %0,%0 \n" /* clear carry */
  202. " jmp 1f \n"
  203. ".p2align 4 \n"
  204. "1: movq (%4,%2,8),%0 \n"
  205. " adcq (%5,%2,8),%0 \n"
  206. " movq %0,(%3,%2,8) \n"
  207. " lea 1(%2),%2 \n"
  208. " dec %1 \n"
  209. " jnz 1b \n"
  210. " sbbq %0,%0 \n"
  211. :"=&r" (ret), "+c"(n), "+r"(i)
  212. :"r"(rp), "r"(ap), "r"(bp)
  213. :"cc", "memory");
  214. return ret & 1;
  215. }
  216. # ifndef SIMICS
  217. BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
  218. int n)
  219. {
  220. BN_ULONG ret;
  221. size_t i = 0;
  222. if (n <= 0)
  223. return 0;
  224. asm volatile (" subq %0,%0 \n" /* clear borrow */
  225. " jmp 1f \n"
  226. ".p2align 4 \n"
  227. "1: movq (%4,%2,8),%0 \n"
  228. " sbbq (%5,%2,8),%0 \n"
  229. " movq %0,(%3,%2,8) \n"
  230. " lea 1(%2),%2 \n"
  231. " dec %1 \n"
  232. " jnz 1b \n"
  233. " sbbq %0,%0 \n"
  234. :"=&r" (ret), "+c"(n), "+r"(i)
  235. :"r"(rp), "r"(ap), "r"(bp)
  236. :"cc", "memory");
  237. return ret & 1;
  238. }
  239. # else
  240. /* Simics 1.4<7 has buggy sbbq:-( */
  241. # define BN_MASK2 0xffffffffffffffffL
  242. BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  243. {
  244. BN_ULONG t1, t2;
  245. int c = 0;
  246. if (n <= 0)
  247. return (BN_ULONG)0;
  248. for (;;) {
  249. t1 = a[0];
  250. t2 = b[0];
  251. r[0] = (t1 - t2 - c) & BN_MASK2;
  252. if (t1 != t2)
  253. c = (t1 < t2);
  254. if (--n <= 0)
  255. break;
  256. t1 = a[1];
  257. t2 = b[1];
  258. r[1] = (t1 - t2 - c) & BN_MASK2;
  259. if (t1 != t2)
  260. c = (t1 < t2);
  261. if (--n <= 0)
  262. break;
  263. t1 = a[2];
  264. t2 = b[2];
  265. r[2] = (t1 - t2 - c) & BN_MASK2;
  266. if (t1 != t2)
  267. c = (t1 < t2);
  268. if (--n <= 0)
  269. break;
  270. t1 = a[3];
  271. t2 = b[3];
  272. r[3] = (t1 - t2 - c) & BN_MASK2;
  273. if (t1 != t2)
  274. c = (t1 < t2);
  275. if (--n <= 0)
  276. break;
  277. a += 4;
  278. b += 4;
  279. r += 4;
  280. }
  281. return c;
  282. }
  283. # endif
  284. /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
  285. /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
  286. /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
  287. /*
  288. * sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number
  289. * c=(c2,c1,c0)
  290. */
  291. /*
  292. * Keep in mind that carrying into high part of multiplication result
  293. * can not overflow, because it cannot be all-ones.
  294. */
  295. # if 0
  296. /* original macros are kept for reference purposes */
  297. # define mul_add_c(a,b,c0,c1,c2) do { \
  298. BN_ULONG ta = (a), tb = (b); \
  299. BN_ULONG lo, hi; \
  300. BN_UMULT_LOHI(lo,hi,ta,tb); \
  301. c0 += lo; hi += (c0<lo)?1:0; \
  302. c1 += hi; c2 += (c1<hi)?1:0; \
  303. } while(0)
  304. # define mul_add_c2(a,b,c0,c1,c2) do { \
  305. BN_ULONG ta = (a), tb = (b); \
  306. BN_ULONG lo, hi, tt; \
  307. BN_UMULT_LOHI(lo,hi,ta,tb); \
  308. c0 += lo; tt = hi+((c0<lo)?1:0); \
  309. c1 += tt; c2 += (c1<tt)?1:0; \
  310. c0 += lo; hi += (c0<lo)?1:0; \
  311. c1 += hi; c2 += (c1<hi)?1:0; \
  312. } while(0)
  313. # define sqr_add_c(a,i,c0,c1,c2) do { \
  314. BN_ULONG ta = (a)[i]; \
  315. BN_ULONG lo, hi; \
  316. BN_UMULT_LOHI(lo,hi,ta,ta); \
  317. c0 += lo; hi += (c0<lo)?1:0; \
  318. c1 += hi; c2 += (c1<hi)?1:0; \
  319. } while(0)
  320. # else
  321. # define mul_add_c(a,b,c0,c1,c2) do { \
  322. BN_ULONG t1,t2; \
  323. asm ("mulq %3" \
  324. : "=a"(t1),"=d"(t2) \
  325. : "a"(a),"m"(b) \
  326. : "cc"); \
  327. asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
  328. : "+r"(c0),"+r"(c1),"+r"(c2) \
  329. : "r"(t1),"r"(t2),"g"(0) \
  330. : "cc"); \
  331. } while (0)
  332. # define sqr_add_c(a,i,c0,c1,c2) do { \
  333. BN_ULONG t1,t2; \
  334. asm ("mulq %2" \
  335. : "=a"(t1),"=d"(t2) \
  336. : "a"(a[i]) \
  337. : "cc"); \
  338. asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
  339. : "+r"(c0),"+r"(c1),"+r"(c2) \
  340. : "r"(t1),"r"(t2),"g"(0) \
  341. : "cc"); \
  342. } while (0)
  343. # define mul_add_c2(a,b,c0,c1,c2) do { \
  344. BN_ULONG t1,t2; \
  345. asm ("mulq %3" \
  346. : "=a"(t1),"=d"(t2) \
  347. : "a"(a),"m"(b) \
  348. : "cc"); \
  349. asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
  350. : "+r"(c0),"+r"(c1),"+r"(c2) \
  351. : "r"(t1),"r"(t2),"g"(0) \
  352. : "cc"); \
  353. asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
  354. : "+r"(c0),"+r"(c1),"+r"(c2) \
  355. : "r"(t1),"r"(t2),"g"(0) \
  356. : "cc"); \
  357. } while (0)
  358. # endif
  359. # define sqr_add_c2(a,i,j,c0,c1,c2) \
  360. mul_add_c2((a)[i],(a)[j],c0,c1,c2)
  361. void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  362. {
  363. BN_ULONG c1, c2, c3;
  364. c1 = 0;
  365. c2 = 0;
  366. c3 = 0;
  367. mul_add_c(a[0], b[0], c1, c2, c3);
  368. r[0] = c1;
  369. c1 = 0;
  370. mul_add_c(a[0], b[1], c2, c3, c1);
  371. mul_add_c(a[1], b[0], c2, c3, c1);
  372. r[1] = c2;
  373. c2 = 0;
  374. mul_add_c(a[2], b[0], c3, c1, c2);
  375. mul_add_c(a[1], b[1], c3, c1, c2);
  376. mul_add_c(a[0], b[2], c3, c1, c2);
  377. r[2] = c3;
  378. c3 = 0;
  379. mul_add_c(a[0], b[3], c1, c2, c3);
  380. mul_add_c(a[1], b[2], c1, c2, c3);
  381. mul_add_c(a[2], b[1], c1, c2, c3);
  382. mul_add_c(a[3], b[0], c1, c2, c3);
  383. r[3] = c1;
  384. c1 = 0;
  385. mul_add_c(a[4], b[0], c2, c3, c1);
  386. mul_add_c(a[3], b[1], c2, c3, c1);
  387. mul_add_c(a[2], b[2], c2, c3, c1);
  388. mul_add_c(a[1], b[3], c2, c3, c1);
  389. mul_add_c(a[0], b[4], c2, c3, c1);
  390. r[4] = c2;
  391. c2 = 0;
  392. mul_add_c(a[0], b[5], c3, c1, c2);
  393. mul_add_c(a[1], b[4], c3, c1, c2);
  394. mul_add_c(a[2], b[3], c3, c1, c2);
  395. mul_add_c(a[3], b[2], c3, c1, c2);
  396. mul_add_c(a[4], b[1], c3, c1, c2);
  397. mul_add_c(a[5], b[0], c3, c1, c2);
  398. r[5] = c3;
  399. c3 = 0;
  400. mul_add_c(a[6], b[0], c1, c2, c3);
  401. mul_add_c(a[5], b[1], c1, c2, c3);
  402. mul_add_c(a[4], b[2], c1, c2, c3);
  403. mul_add_c(a[3], b[3], c1, c2, c3);
  404. mul_add_c(a[2], b[4], c1, c2, c3);
  405. mul_add_c(a[1], b[5], c1, c2, c3);
  406. mul_add_c(a[0], b[6], c1, c2, c3);
  407. r[6] = c1;
  408. c1 = 0;
  409. mul_add_c(a[0], b[7], c2, c3, c1);
  410. mul_add_c(a[1], b[6], c2, c3, c1);
  411. mul_add_c(a[2], b[5], c2, c3, c1);
  412. mul_add_c(a[3], b[4], c2, c3, c1);
  413. mul_add_c(a[4], b[3], c2, c3, c1);
  414. mul_add_c(a[5], b[2], c2, c3, c1);
  415. mul_add_c(a[6], b[1], c2, c3, c1);
  416. mul_add_c(a[7], b[0], c2, c3, c1);
  417. r[7] = c2;
  418. c2 = 0;
  419. mul_add_c(a[7], b[1], c3, c1, c2);
  420. mul_add_c(a[6], b[2], c3, c1, c2);
  421. mul_add_c(a[5], b[3], c3, c1, c2);
  422. mul_add_c(a[4], b[4], c3, c1, c2);
  423. mul_add_c(a[3], b[5], c3, c1, c2);
  424. mul_add_c(a[2], b[6], c3, c1, c2);
  425. mul_add_c(a[1], b[7], c3, c1, c2);
  426. r[8] = c3;
  427. c3 = 0;
  428. mul_add_c(a[2], b[7], c1, c2, c3);
  429. mul_add_c(a[3], b[6], c1, c2, c3);
  430. mul_add_c(a[4], b[5], c1, c2, c3);
  431. mul_add_c(a[5], b[4], c1, c2, c3);
  432. mul_add_c(a[6], b[3], c1, c2, c3);
  433. mul_add_c(a[7], b[2], c1, c2, c3);
  434. r[9] = c1;
  435. c1 = 0;
  436. mul_add_c(a[7], b[3], c2, c3, c1);
  437. mul_add_c(a[6], b[4], c2, c3, c1);
  438. mul_add_c(a[5], b[5], c2, c3, c1);
  439. mul_add_c(a[4], b[6], c2, c3, c1);
  440. mul_add_c(a[3], b[7], c2, c3, c1);
  441. r[10] = c2;
  442. c2 = 0;
  443. mul_add_c(a[4], b[7], c3, c1, c2);
  444. mul_add_c(a[5], b[6], c3, c1, c2);
  445. mul_add_c(a[6], b[5], c3, c1, c2);
  446. mul_add_c(a[7], b[4], c3, c1, c2);
  447. r[11] = c3;
  448. c3 = 0;
  449. mul_add_c(a[7], b[5], c1, c2, c3);
  450. mul_add_c(a[6], b[6], c1, c2, c3);
  451. mul_add_c(a[5], b[7], c1, c2, c3);
  452. r[12] = c1;
  453. c1 = 0;
  454. mul_add_c(a[6], b[7], c2, c3, c1);
  455. mul_add_c(a[7], b[6], c2, c3, c1);
  456. r[13] = c2;
  457. c2 = 0;
  458. mul_add_c(a[7], b[7], c3, c1, c2);
  459. r[14] = c3;
  460. r[15] = c1;
  461. }
  462. void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  463. {
  464. BN_ULONG c1, c2, c3;
  465. c1 = 0;
  466. c2 = 0;
  467. c3 = 0;
  468. mul_add_c(a[0], b[0], c1, c2, c3);
  469. r[0] = c1;
  470. c1 = 0;
  471. mul_add_c(a[0], b[1], c2, c3, c1);
  472. mul_add_c(a[1], b[0], c2, c3, c1);
  473. r[1] = c2;
  474. c2 = 0;
  475. mul_add_c(a[2], b[0], c3, c1, c2);
  476. mul_add_c(a[1], b[1], c3, c1, c2);
  477. mul_add_c(a[0], b[2], c3, c1, c2);
  478. r[2] = c3;
  479. c3 = 0;
  480. mul_add_c(a[0], b[3], c1, c2, c3);
  481. mul_add_c(a[1], b[2], c1, c2, c3);
  482. mul_add_c(a[2], b[1], c1, c2, c3);
  483. mul_add_c(a[3], b[0], c1, c2, c3);
  484. r[3] = c1;
  485. c1 = 0;
  486. mul_add_c(a[3], b[1], c2, c3, c1);
  487. mul_add_c(a[2], b[2], c2, c3, c1);
  488. mul_add_c(a[1], b[3], c2, c3, c1);
  489. r[4] = c2;
  490. c2 = 0;
  491. mul_add_c(a[2], b[3], c3, c1, c2);
  492. mul_add_c(a[3], b[2], c3, c1, c2);
  493. r[5] = c3;
  494. c3 = 0;
  495. mul_add_c(a[3], b[3], c1, c2, c3);
  496. r[6] = c1;
  497. r[7] = c2;
  498. }
  499. void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
  500. {
  501. BN_ULONG c1, c2, c3;
  502. c1 = 0;
  503. c2 = 0;
  504. c3 = 0;
  505. sqr_add_c(a, 0, c1, c2, c3);
  506. r[0] = c1;
  507. c1 = 0;
  508. sqr_add_c2(a, 1, 0, c2, c3, c1);
  509. r[1] = c2;
  510. c2 = 0;
  511. sqr_add_c(a, 1, c3, c1, c2);
  512. sqr_add_c2(a, 2, 0, c3, c1, c2);
  513. r[2] = c3;
  514. c3 = 0;
  515. sqr_add_c2(a, 3, 0, c1, c2, c3);
  516. sqr_add_c2(a, 2, 1, c1, c2, c3);
  517. r[3] = c1;
  518. c1 = 0;
  519. sqr_add_c(a, 2, c2, c3, c1);
  520. sqr_add_c2(a, 3, 1, c2, c3, c1);
  521. sqr_add_c2(a, 4, 0, c2, c3, c1);
  522. r[4] = c2;
  523. c2 = 0;
  524. sqr_add_c2(a, 5, 0, c3, c1, c2);
  525. sqr_add_c2(a, 4, 1, c3, c1, c2);
  526. sqr_add_c2(a, 3, 2, c3, c1, c2);
  527. r[5] = c3;
  528. c3 = 0;
  529. sqr_add_c(a, 3, c1, c2, c3);
  530. sqr_add_c2(a, 4, 2, c1, c2, c3);
  531. sqr_add_c2(a, 5, 1, c1, c2, c3);
  532. sqr_add_c2(a, 6, 0, c1, c2, c3);
  533. r[6] = c1;
  534. c1 = 0;
  535. sqr_add_c2(a, 7, 0, c2, c3, c1);
  536. sqr_add_c2(a, 6, 1, c2, c3, c1);
  537. sqr_add_c2(a, 5, 2, c2, c3, c1);
  538. sqr_add_c2(a, 4, 3, c2, c3, c1);
  539. r[7] = c2;
  540. c2 = 0;
  541. sqr_add_c(a, 4, c3, c1, c2);
  542. sqr_add_c2(a, 5, 3, c3, c1, c2);
  543. sqr_add_c2(a, 6, 2, c3, c1, c2);
  544. sqr_add_c2(a, 7, 1, c3, c1, c2);
  545. r[8] = c3;
  546. c3 = 0;
  547. sqr_add_c2(a, 7, 2, c1, c2, c3);
  548. sqr_add_c2(a, 6, 3, c1, c2, c3);
  549. sqr_add_c2(a, 5, 4, c1, c2, c3);
  550. r[9] = c1;
  551. c1 = 0;
  552. sqr_add_c(a, 5, c2, c3, c1);
  553. sqr_add_c2(a, 6, 4, c2, c3, c1);
  554. sqr_add_c2(a, 7, 3, c2, c3, c1);
  555. r[10] = c2;
  556. c2 = 0;
  557. sqr_add_c2(a, 7, 4, c3, c1, c2);
  558. sqr_add_c2(a, 6, 5, c3, c1, c2);
  559. r[11] = c3;
  560. c3 = 0;
  561. sqr_add_c(a, 6, c1, c2, c3);
  562. sqr_add_c2(a, 7, 5, c1, c2, c3);
  563. r[12] = c1;
  564. c1 = 0;
  565. sqr_add_c2(a, 7, 6, c2, c3, c1);
  566. r[13] = c2;
  567. c2 = 0;
  568. sqr_add_c(a, 7, c3, c1, c2);
  569. r[14] = c3;
  570. r[15] = c1;
  571. }
  572. void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
  573. {
  574. BN_ULONG c1, c2, c3;
  575. c1 = 0;
  576. c2 = 0;
  577. c3 = 0;
  578. sqr_add_c(a, 0, c1, c2, c3);
  579. r[0] = c1;
  580. c1 = 0;
  581. sqr_add_c2(a, 1, 0, c2, c3, c1);
  582. r[1] = c2;
  583. c2 = 0;
  584. sqr_add_c(a, 1, c3, c1, c2);
  585. sqr_add_c2(a, 2, 0, c3, c1, c2);
  586. r[2] = c3;
  587. c3 = 0;
  588. sqr_add_c2(a, 3, 0, c1, c2, c3);
  589. sqr_add_c2(a, 2, 1, c1, c2, c3);
  590. r[3] = c1;
  591. c1 = 0;
  592. sqr_add_c(a, 2, c2, c3, c1);
  593. sqr_add_c2(a, 3, 1, c2, c3, c1);
  594. r[4] = c2;
  595. c2 = 0;
  596. sqr_add_c2(a, 3, 2, c3, c1, c2);
  597. r[5] = c3;
  598. c3 = 0;
  599. sqr_add_c(a, 3, c1, c2, c3);
  600. r[6] = c1;
  601. r[7] = c2;
  602. }
  603. #endif