3
0

tls_sp_c32.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498
  1. /*
  2. * Copyright (C) 2021 Denys Vlasenko
  3. *
  4. * Licensed under GPLv2, see file LICENSE in this source tree.
  5. */
  6. #include "tls.h"
  7. #define SP_DEBUG 0
  8. #define FIXED_SECRET 0
  9. #define FIXED_PEER_PUBKEY 0
  10. #define ALLOW_ASM 1
  11. #if SP_DEBUG
  12. # define dbg(...) fprintf(stderr, __VA_ARGS__)
  13. static void dump_hex(const char *fmt, const void *vp, int len)
  14. {
  15. char hexbuf[32 * 1024 + 4];
  16. const uint8_t *p = vp;
  17. bin2hex(hexbuf, (void*)p, len)[0] = '\0';
  18. dbg(fmt, hexbuf);
  19. }
  20. #else
  21. # define dbg(...) ((void)0)
  22. # define dump_hex(...) ((void)0)
  23. #endif
  24. typedef uint32_t sp_digit;
  25. typedef int32_t signed_sp_digit;
  26. /* 64-bit optimizations:
  27. * if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff,
  28. * then loads and stores can be done in 64-bit chunks.
  29. *
  30. * A narrower case is when arch is also little-endian (such as x86_64),
  31. * then "LSW first", uint32[8] and uint64[4] representations are equivalent,
  32. * and arithmetic can be done in 64 bits too.
  33. */
  34. #if defined(__GNUC__) && defined(__x86_64__)
  35. # define UNALIGNED_LE_64BIT 1
  36. #else
  37. # define UNALIGNED_LE_64BIT 0
  38. #endif
  39. /* The code below is taken from parts of
  40. * wolfssl-3.15.3/wolfcrypt/src/sp_c32.c
  41. * and heavily modified.
  42. */
  43. typedef struct sp_point {
  44. sp_digit x[8]
  45. #if ULONG_MAX > 0xffffffff
  46. /* Make sp_point[] arrays to not be 64-bit misaligned */
  47. ALIGNED(8)
  48. #endif
  49. ;
  50. sp_digit y[8];
  51. sp_digit z[8];
  52. int infinity;
  53. } sp_point;
  54. /* The modulus (prime) of the curve P256. */
  55. static const sp_digit p256_mod[8] ALIGNED(8) = {
  56. 0xffffffff,0xffffffff,0xffffffff,0x00000000,
  57. 0x00000000,0x00000000,0x00000001,0xffffffff,
  58. };
  59. #define p256_mp_mod ((sp_digit)0x000001)
  60. /* Write r as big endian to byte array.
  61. * Fixed length number of bytes written: 32
  62. *
  63. * r A single precision integer.
  64. * a Byte array.
  65. */
  66. #if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff
  67. static void sp_256_to_bin_8(const sp_digit* rr, uint8_t* a)
  68. {
  69. int i;
  70. const uint64_t* r = (void*)rr;
  71. r += 4;
  72. for (i = 0; i < 4; i++) {
  73. r--;
  74. move_to_unaligned64(a, SWAP_BE64(*r));
  75. a += 8;
  76. }
  77. }
  78. #else
  79. static void sp_256_to_bin_8(const sp_digit* r, uint8_t* a)
  80. {
  81. int i;
  82. r += 8;
  83. for (i = 0; i < 8; i++) {
  84. r--;
  85. move_to_unaligned32(a, SWAP_BE32(*r));
  86. a += 4;
  87. }
  88. }
  89. #endif
  90. /* Read big endian unsigned byte array into r.
  91. *
  92. * r A single precision integer.
  93. * a Byte array.
  94. * n Number of bytes in array to read.
  95. */
  96. #if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff
  97. static void sp_256_from_bin_8(sp_digit* rr, const uint8_t* a)
  98. {
  99. int i;
  100. uint64_t* r = (void*)rr;
  101. r += 4;
  102. for (i = 0; i < 4; i++) {
  103. uint64_t v;
  104. move_from_unaligned64(v, a);
  105. *--r = SWAP_BE64(v);
  106. a += 8;
  107. }
  108. }
  109. #else
  110. static void sp_256_from_bin_8(sp_digit* r, const uint8_t* a)
  111. {
  112. int i;
  113. r += 8;
  114. for (i = 0; i < 8; i++) {
  115. sp_digit v;
  116. move_from_unaligned32(v, a);
  117. *--r = SWAP_BE32(v);
  118. a += 4;
  119. }
  120. }
  121. #endif
  122. #if SP_DEBUG
  123. static void dump_256(const char *fmt, const sp_digit* r)
  124. {
  125. uint8_t b32[32];
  126. sp_256_to_bin_8(r, b32);
  127. dump_hex(fmt, b32, 32);
  128. }
  129. static void dump_512(const char *fmt, const sp_digit* r)
  130. {
  131. uint8_t b64[64];
  132. sp_256_to_bin_8(r, b64 + 32);
  133. sp_256_to_bin_8(r+8, b64);
  134. dump_hex(fmt, b64, 64);
  135. }
  136. #else
  137. # define dump_256(...) ((void)0)
  138. # define dump_512(...) ((void)0)
  139. #endif
  140. /* Convert a point of big-endian 32-byte x,y pair to type sp_point. */
  141. static void sp_256_point_from_bin2x32(sp_point* p, const uint8_t *bin2x32)
  142. {
  143. memset(p, 0, sizeof(*p));
  144. /*p->infinity = 0;*/
  145. sp_256_from_bin_8(p->x, bin2x32);
  146. sp_256_from_bin_8(p->y, bin2x32 + 32);
  147. p->z[0] = 1; /* p->z = 1 */
  148. }
  149. /* Compare a with b.
  150. *
  151. * return -ve, 0 or +ve if a is less than, equal to or greater than b
  152. * respectively.
  153. */
  154. #if UNALIGNED_LE_64BIT
  155. static signed_sp_digit sp_256_cmp_8(const sp_digit* aa, const sp_digit* bb)
  156. {
  157. const uint64_t* a = (void*)aa;
  158. const uint64_t* b = (void*)bb;
  159. int i;
  160. for (i = 3; i >= 0; i--) {
  161. if (a[i] == b[i])
  162. continue;
  163. return (a[i] > b[i]) * 2 - 1;
  164. }
  165. return 0;
  166. }
  167. #else
  168. static signed_sp_digit sp_256_cmp_8(const sp_digit* a, const sp_digit* b)
  169. {
  170. int i;
  171. for (i = 7; i >= 0; i--) {
  172. /* signed_sp_digit r = a[i] - b[i];
  173. * if (r != 0)
  174. * return r;
  175. * does not work: think about a[i]=0, b[i]=0xffffffff
  176. */
  177. if (a[i] == b[i])
  178. continue;
  179. return (a[i] > b[i]) * 2 - 1;
  180. }
  181. return 0;
  182. }
  183. #endif
  184. /* Compare two numbers to determine if they are equal.
  185. *
  186. * return 1 when equal and 0 otherwise.
  187. */
  188. static int sp_256_cmp_equal_8(const sp_digit* a, const sp_digit* b)
  189. {
  190. return sp_256_cmp_8(a, b) == 0;
  191. }
  192. /* Add b to a into r. (r = a + b). Return !0 on overflow */
  193. static int sp_256_add_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
  194. {
  195. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  196. sp_digit reg;
  197. asm volatile (
  198. "\n movl (%0), %3"
  199. "\n addl (%1), %3"
  200. "\n movl %3, (%2)"
  201. "\n"
  202. "\n movl 1*4(%0), %3"
  203. "\n adcl 1*4(%1), %3"
  204. "\n movl %3, 1*4(%2)"
  205. "\n"
  206. "\n movl 2*4(%0), %3"
  207. "\n adcl 2*4(%1), %3"
  208. "\n movl %3, 2*4(%2)"
  209. "\n"
  210. "\n movl 3*4(%0), %3"
  211. "\n adcl 3*4(%1), %3"
  212. "\n movl %3, 3*4(%2)"
  213. "\n"
  214. "\n movl 4*4(%0), %3"
  215. "\n adcl 4*4(%1), %3"
  216. "\n movl %3, 4*4(%2)"
  217. "\n"
  218. "\n movl 5*4(%0), %3"
  219. "\n adcl 5*4(%1), %3"
  220. "\n movl %3, 5*4(%2)"
  221. "\n"
  222. "\n movl 6*4(%0), %3"
  223. "\n adcl 6*4(%1), %3"
  224. "\n movl %3, 6*4(%2)"
  225. "\n"
  226. "\n movl 7*4(%0), %3"
  227. "\n adcl 7*4(%1), %3"
  228. "\n movl %3, 7*4(%2)"
  229. "\n"
  230. "\n sbbl %3, %3"
  231. "\n"
  232. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  233. : "0" (a), "1" (b), "2" (r)
  234. : "memory"
  235. );
  236. return reg;
  237. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  238. uint64_t reg;
  239. asm volatile (
  240. "\n movq (%0), %3"
  241. "\n addq (%1), %3"
  242. "\n movq %3, (%2)"
  243. "\n"
  244. "\n movq 1*8(%0), %3"
  245. "\n adcq 1*8(%1), %3"
  246. "\n movq %3, 1*8(%2)"
  247. "\n"
  248. "\n movq 2*8(%0), %3"
  249. "\n adcq 2*8(%1), %3"
  250. "\n movq %3, 2*8(%2)"
  251. "\n"
  252. "\n movq 3*8(%0), %3"
  253. "\n adcq 3*8(%1), %3"
  254. "\n movq %3, 3*8(%2)"
  255. "\n"
  256. "\n sbbq %3, %3"
  257. "\n"
  258. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  259. : "0" (a), "1" (b), "2" (r)
  260. : "memory"
  261. );
  262. return reg;
  263. #else
  264. int i;
  265. sp_digit carry;
  266. carry = 0;
  267. for (i = 0; i < 8; i++) {
  268. sp_digit w, v;
  269. w = b[i] + carry;
  270. v = a[i];
  271. if (w != 0) {
  272. v = a[i] + w;
  273. carry = (v < a[i]);
  274. /* hope compiler detects above as "carry flag set" */
  275. }
  276. /* else: b + carry == 0, two cases:
  277. * b:ffffffff, carry:1
  278. * b:00000000, carry:0
  279. * in either case, r[i] = a[i] and carry remains unchanged
  280. */
  281. r[i] = v;
  282. }
  283. return carry;
  284. #endif
  285. }
  286. /* Sub b from a into r. (r = a - b). Return !0 on underflow */
  287. static int sp_256_sub_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
  288. {
  289. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  290. sp_digit reg;
  291. asm volatile (
  292. "\n movl (%0), %3"
  293. "\n subl (%1), %3"
  294. "\n movl %3, (%2)"
  295. "\n"
  296. "\n movl 1*4(%0), %3"
  297. "\n sbbl 1*4(%1), %3"
  298. "\n movl %3, 1*4(%2)"
  299. "\n"
  300. "\n movl 2*4(%0), %3"
  301. "\n sbbl 2*4(%1), %3"
  302. "\n movl %3, 2*4(%2)"
  303. "\n"
  304. "\n movl 3*4(%0), %3"
  305. "\n sbbl 3*4(%1), %3"
  306. "\n movl %3, 3*4(%2)"
  307. "\n"
  308. "\n movl 4*4(%0), %3"
  309. "\n sbbl 4*4(%1), %3"
  310. "\n movl %3, 4*4(%2)"
  311. "\n"
  312. "\n movl 5*4(%0), %3"
  313. "\n sbbl 5*4(%1), %3"
  314. "\n movl %3, 5*4(%2)"
  315. "\n"
  316. "\n movl 6*4(%0), %3"
  317. "\n sbbl 6*4(%1), %3"
  318. "\n movl %3, 6*4(%2)"
  319. "\n"
  320. "\n movl 7*4(%0), %3"
  321. "\n sbbl 7*4(%1), %3"
  322. "\n movl %3, 7*4(%2)"
  323. "\n"
  324. "\n sbbl %3, %3"
  325. "\n"
  326. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  327. : "0" (a), "1" (b), "2" (r)
  328. : "memory"
  329. );
  330. return reg;
  331. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  332. uint64_t reg;
  333. asm volatile (
  334. "\n movq (%0), %3"
  335. "\n subq (%1), %3"
  336. "\n movq %3, (%2)"
  337. "\n"
  338. "\n movq 1*8(%0), %3"
  339. "\n sbbq 1*8(%1), %3"
  340. "\n movq %3, 1*8(%2)"
  341. "\n"
  342. "\n movq 2*8(%0), %3"
  343. "\n sbbq 2*8(%1), %3"
  344. "\n movq %3, 2*8(%2)"
  345. "\n"
  346. "\n movq 3*8(%0), %3"
  347. "\n sbbq 3*8(%1), %3"
  348. "\n movq %3, 3*8(%2)"
  349. "\n"
  350. "\n sbbq %3, %3"
  351. "\n"
  352. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  353. : "0" (a), "1" (b), "2" (r)
  354. : "memory"
  355. );
  356. return reg;
  357. #else
  358. int i;
  359. sp_digit borrow;
  360. borrow = 0;
  361. for (i = 0; i < 8; i++) {
  362. sp_digit w, v;
  363. w = b[i] + borrow;
  364. v = a[i];
  365. if (w != 0) {
  366. v = a[i] - w;
  367. borrow = (v > a[i]);
  368. /* hope compiler detects above as "carry flag set" */
  369. }
  370. /* else: b + borrow == 0, two cases:
  371. * b:ffffffff, borrow:1
  372. * b:00000000, borrow:0
  373. * in either case, r[i] = a[i] and borrow remains unchanged
  374. */
  375. r[i] = v;
  376. }
  377. return borrow;
  378. #endif
  379. }
  380. /* Sub p256_mod from r. (r = r - p256_mod). */
  381. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  382. static void sp_256_sub_8_p256_mod(sp_digit* r)
  383. {
  384. //p256_mod[7..0] = ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff
  385. asm volatile (
  386. "\n subl $0xffffffff, (%0)"
  387. "\n sbbl $0xffffffff, 1*4(%0)"
  388. "\n sbbl $0xffffffff, 2*4(%0)"
  389. "\n sbbl $0, 3*4(%0)"
  390. "\n sbbl $0, 4*4(%0)"
  391. "\n sbbl $0, 5*4(%0)"
  392. "\n sbbl $1, 6*4(%0)"
  393. "\n sbbl $0xffffffff, 7*4(%0)"
  394. "\n"
  395. : "=r" (r)
  396. : "0" (r)
  397. : "memory"
  398. );
  399. }
  400. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  401. static void sp_256_sub_8_p256_mod(sp_digit* r)
  402. {
  403. uint64_t reg;
  404. uint64_t ooff;
  405. //p256_mod[3..0] = ffffffff00000001 0000000000000000 00000000ffffffff ffffffffffffffff
  406. asm volatile (
  407. "\n addq $1, (%0)" // adding 1 is the same as subtracting ffffffffffffffff
  408. "\n cmc" // only carry bit needs inverting
  409. "\n"
  410. "\n sbbq %1, 1*8(%0)" // %1 holds 00000000ffffffff
  411. "\n"
  412. "\n sbbq $0, 2*8(%0)"
  413. "\n"
  414. "\n movq 3*8(%0), %2"
  415. "\n sbbq $0, %2" // adding 00000000ffffffff (in %1)
  416. "\n addq %1, %2" // is the same as subtracting ffffffff00000001
  417. "\n movq %2, 3*8(%0)"
  418. "\n"
  419. : "=r" (r), "=r" (ooff), "=r" (reg)
  420. : "0" (r), "1" (0x00000000ffffffff)
  421. : "memory"
  422. );
  423. }
  424. #else
  425. static void sp_256_sub_8_p256_mod(sp_digit* r)
  426. {
  427. sp_256_sub_8(r, r, p256_mod);
  428. }
  429. #endif
  430. /* Multiply a and b into r. (r = a * b)
  431. * r should be [16] array (512 bits), and must not coincide with a or b.
  432. */
  433. static void sp_256to512_mul_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
  434. {
  435. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  436. int k;
  437. uint32_t accl;
  438. uint32_t acch;
  439. acch = accl = 0;
  440. for (k = 0; k < 15; k++) {
  441. int i, j;
  442. uint32_t acc_hi;
  443. i = k - 7;
  444. if (i < 0)
  445. i = 0;
  446. j = k - i;
  447. acc_hi = 0;
  448. do {
  449. ////////////////////////
  450. // uint64_t m = ((uint64_t)a[i]) * b[j];
  451. // acc_hi:acch:accl += m;
  452. asm volatile (
  453. // a[i] is already loaded in %%eax
  454. "\n mull %7"
  455. "\n addl %%eax, %0"
  456. "\n adcl %%edx, %1"
  457. "\n adcl $0, %2"
  458. : "=rm" (accl), "=rm" (acch), "=rm" (acc_hi)
  459. : "0" (accl), "1" (acch), "2" (acc_hi), "a" (a[i]), "m" (b[j])
  460. : "cc", "dx"
  461. );
  462. ////////////////////////
  463. j--;
  464. i++;
  465. } while (i != 8 && i <= k);
  466. r[k] = accl;
  467. accl = acch;
  468. acch = acc_hi;
  469. }
  470. r[15] = accl;
  471. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  472. const uint64_t* aa = (const void*)a;
  473. const uint64_t* bb = (const void*)b;
  474. uint64_t* rr = (void*)r;
  475. int k;
  476. uint64_t accl;
  477. uint64_t acch;
  478. acch = accl = 0;
  479. for (k = 0; k < 7; k++) {
  480. int i, j;
  481. uint64_t acc_hi;
  482. i = k - 3;
  483. if (i < 0)
  484. i = 0;
  485. j = k - i;
  486. acc_hi = 0;
  487. do {
  488. ////////////////////////
  489. // uint128_t m = ((uint128_t)a[i]) * b[j];
  490. // acc_hi:acch:accl += m;
  491. asm volatile (
  492. // aa[i] is already loaded in %%rax
  493. "\n mulq %7"
  494. "\n addq %%rax, %0"
  495. "\n adcq %%rdx, %1"
  496. "\n adcq $0, %2"
  497. : "=rm" (accl), "=rm" (acch), "=rm" (acc_hi)
  498. : "0" (accl), "1" (acch), "2" (acc_hi), "a" (aa[i]), "m" (bb[j])
  499. : "cc", "dx"
  500. );
  501. ////////////////////////
  502. j--;
  503. i++;
  504. } while (i != 4 && i <= k);
  505. rr[k] = accl;
  506. accl = acch;
  507. acch = acc_hi;
  508. }
  509. rr[7] = accl;
  510. #elif 0
  511. //TODO: arm assembly (untested)
  512. asm volatile (
  513. "\n mov r5, #0"
  514. "\n mov r6, #0"
  515. "\n mov r7, #0"
  516. "\n mov r8, #0"
  517. "\n 1:"
  518. "\n subs r3, r5, #28"
  519. "\n movcc r3, #0"
  520. "\n sub r4, r5, r3"
  521. "\n 2:"
  522. "\n ldr r14, [%[a], r3]"
  523. "\n ldr r12, [%[b], r4]"
  524. "\n umull r9, r10, r14, r12"
  525. "\n adds r6, r6, r9"
  526. "\n adcs r7, r7, r10"
  527. "\n adc r8, r8, #0"
  528. "\n add r3, r3, #4"
  529. "\n sub r4, r4, #4"
  530. "\n cmp r3, #32"
  531. "\n beq 3f"
  532. "\n cmp r3, r5"
  533. "\n ble 2b"
  534. "\n 3:"
  535. "\n str r6, [%[r], r5]"
  536. "\n mov r6, r7"
  537. "\n mov r7, r8"
  538. "\n mov r8, #0"
  539. "\n add r5, r5, #4"
  540. "\n cmp r5, #56"
  541. "\n ble 1b"
  542. "\n str r6, [%[r], r5]"
  543. : [r] "r" (r), [a] "r" (a), [b] "r" (b)
  544. : "memory", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14"
  545. );
  546. #else
  547. int i, j, k;
  548. uint64_t acc;
  549. acc = 0;
  550. for (k = 0; k < 15; k++) {
  551. uint32_t acc_hi;
  552. i = k - 7;
  553. if (i < 0)
  554. i = 0;
  555. j = k - i;
  556. acc_hi = 0;
  557. do {
  558. uint64_t m = ((uint64_t)a[i]) * b[j];
  559. acc += m;
  560. if (acc < m)
  561. acc_hi++;
  562. j--;
  563. i++;
  564. } while (i != 8 && i <= k);
  565. r[k] = acc;
  566. acc = (acc >> 32) | ((uint64_t)acc_hi << 32);
  567. }
  568. r[15] = acc;
  569. #endif
  570. }
  571. /* Shift number right one bit. Bottom bit is lost. */
  572. #if UNALIGNED_LE_64BIT
  573. static void sp_256_rshift1_8(sp_digit* rr, uint64_t carry)
  574. {
  575. uint64_t *r = (void*)rr;
  576. int i;
  577. carry = (((uint64_t)!!carry) << 63);
  578. for (i = 3; i >= 0; i--) {
  579. uint64_t c = r[i] << 63;
  580. r[i] = (r[i] >> 1) | carry;
  581. carry = c;
  582. }
  583. }
  584. #else
  585. static void sp_256_rshift1_8(sp_digit* r, sp_digit carry)
  586. {
  587. int i;
  588. carry = (((sp_digit)!!carry) << 31);
  589. for (i = 7; i >= 0; i--) {
  590. sp_digit c = r[i] << 31;
  591. r[i] = (r[i] >> 1) | carry;
  592. carry = c;
  593. }
  594. }
  595. #endif
  596. /* Divide the number by 2 mod the modulus (prime). (r = (r / 2) % m) */
  597. static void sp_256_div2_8(sp_digit* r /*, const sp_digit* m*/)
  598. {
  599. const sp_digit* m = p256_mod;
  600. int carry = 0;
  601. if (r[0] & 1)
  602. carry = sp_256_add_8(r, r, m);
  603. sp_256_rshift1_8(r, carry);
  604. }
  605. /* Add two Montgomery form numbers (r = a + b % m) */
  606. static void sp_256_mont_add_8(sp_digit* r, const sp_digit* a, const sp_digit* b
  607. /*, const sp_digit* m*/)
  608. {
  609. // const sp_digit* m = p256_mod;
  610. int carry = sp_256_add_8(r, a, b);
  611. if (carry) {
  612. sp_256_sub_8_p256_mod(r);
  613. }
  614. }
  615. /* Subtract two Montgomery form numbers (r = a - b % m) */
  616. static void sp_256_mont_sub_8(sp_digit* r, const sp_digit* a, const sp_digit* b
  617. /*, const sp_digit* m*/)
  618. {
  619. const sp_digit* m = p256_mod;
  620. int borrow;
  621. borrow = sp_256_sub_8(r, a, b);
  622. if (borrow) {
  623. sp_256_add_8(r, r, m);
  624. }
  625. }
  626. /* Double a Montgomery form number (r = a + a % m) */
  627. static void sp_256_mont_dbl_8(sp_digit* r, const sp_digit* a /*, const sp_digit* m*/)
  628. {
  629. // const sp_digit* m = p256_mod;
  630. int carry = sp_256_add_8(r, a, a);
  631. if (carry)
  632. sp_256_sub_8_p256_mod(r);
  633. }
  634. /* Triple a Montgomery form number (r = a + a + a % m) */
  635. static void sp_256_mont_tpl_8(sp_digit* r, const sp_digit* a /*, const sp_digit* m*/)
  636. {
  637. // const sp_digit* m = p256_mod;
  638. int carry = sp_256_add_8(r, a, a);
  639. if (carry) {
  640. sp_256_sub_8_p256_mod(r);
  641. }
  642. carry = sp_256_add_8(r, r, a);
  643. if (carry) {
  644. sp_256_sub_8_p256_mod(r);
  645. }
  646. }
  647. /* Shift the result in the high 256 bits down to the bottom. */
  648. static void sp_512to256_mont_shift_8(sp_digit* r, sp_digit* a)
  649. {
  650. memcpy(r, a + 8, sizeof(*r) * 8);
  651. }
  652. #if UNALIGNED_LE_64BIT
  653. /* 64-bit little-endian optimized version.
  654. * See generic 32-bit version below for explanation.
  655. * The benefit of this version is: even though r[3] calculation is atrocious,
  656. * we call sp_256_mul_add_4() four times, not 8.
  657. * Measured run time improvement of curve_P256_compute_pubkey_and_premaster()
  658. * call on x86-64: from ~1500us to ~900us. Code size +32 bytes.
  659. */
  660. static int sp_256_mul_add_4(uint64_t *r /*, const uint64_t* a, uint64_t b*/)
  661. {
  662. uint64_t b = r[0];
  663. # if 0
  664. const uint64_t* a = (const void*)p256_mod;
  665. //a[3..0] = ffffffff00000001 0000000000000000 00000000ffffffff ffffffffffffffff
  666. uint128_t t;
  667. int i;
  668. t = 0;
  669. for (i = 0; i < 4; i++) {
  670. uint32_t t_hi;
  671. uint128_t m = ((uint128_t)b * a[i]) + r[i];
  672. t += m;
  673. t_hi = (t < m);
  674. r[i] = (uint64_t)t;
  675. t = (t >> 64) | ((uint128_t)t_hi << 64);
  676. }
  677. r[4] += (uint64_t)t;
  678. return (r[4] < (uint64_t)t); /* 1 if addition overflowed */
  679. # else
  680. // Unroll, then optimize the above loop:
  681. //uint32_t t_hi;
  682. //uint128_t m;
  683. uint64_t t64, t64u;
  684. //m = ((uint128_t)b * a[0]) + r[0];
  685. // Since b is r[0] and a[0] is ffffffffffffffff, the above optimizes to:
  686. // m = r[0] * ffffffffffffffff + r[0] = (r[0] << 64 - r[0]) + r[0] = r[0] << 64;
  687. //t += m;
  688. // t = r[0] << 64 = b << 64;
  689. //t_hi = (t < m);
  690. // t_hi = 0;
  691. //r[0] = (uint64_t)t;
  692. // r[0] = 0;
  693. //the store can be eliminated since caller won't look at lower 256 bits of the result
  694. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  695. // t = b;
  696. //m = ((uint128_t)b * a[1]) + r[1];
  697. // Since a[1] is 00000000ffffffff, the above optimizes to:
  698. // m = b * ffffffff + r[1] = (b * 100000000 - b) + r[1] = (b << 32) - b + r[1];
  699. //t += m;
  700. // t = b + (b << 32) - b + r[1] = (b << 32) + r[1];
  701. //t_hi = (t < m);
  702. // t_hi = 0;
  703. //r[1] = (uint64_t)t;
  704. r[1] += (b << 32);
  705. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  706. t64 = (r[1] < (b << 32));
  707. t64 += (b >> 32);
  708. //m = ((uint128_t)b * a[2]) + r[2];
  709. // Since a[2] is 0000000000000000, the above optimizes to:
  710. // m = b * 0 + r[2] = r[2];
  711. //t += m;
  712. // t = t64 + r[2];
  713. //t_hi = (t < m);
  714. // t_hi = 0;
  715. //r[2] = (uint64_t)t;
  716. r[2] += t64;
  717. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  718. t64 = (r[2] < t64);
  719. //m = ((uint128_t)b * a[3]) + r[3];
  720. // Since a[3] is ffffffff00000001, the above optimizes to:
  721. // m = b * ffffffff00000001 + r[3];
  722. // m = b + b*ffffffff00000000 + r[3]
  723. // m = b + (b*ffffffff << 32) + r[3]
  724. // m = b + (((b<<32) - b) << 32) + r[3]
  725. //t += m;
  726. // t = t64 + (uint128_t)b + ((((uint128_t)b << 32) - b) << 32) + r[3];
  727. t64 += b;
  728. t64u = (t64 < b);
  729. t64 += r[3];
  730. t64u += (t64 < r[3]);
  731. { // add ((((uint128_t)b << 32) - b) << 32):
  732. uint64_t lo, hi;
  733. //lo = (((b << 32) - b) << 32
  734. //hi = (((uint128_t)b << 32) - b) >> 32
  735. //but without uint128_t:
  736. hi = (b << 32) - b; /* make lower 32 bits of "hi", part 1 */
  737. b = (b >> 32) - (/*borrowed above?*/(b << 32) < b); /* upper 32 bits of "hi" are in b */
  738. lo = hi << 32; /* (use "hi" value to calculate "lo",... */
  739. t64 += lo; /* ...consume... */
  740. t64u += (t64 < lo); /* ..."lo") */
  741. hi >>= 32; /* make lower 32 bits of "hi", part 2 */
  742. hi |= (b << 32); /* combine lower and upper 32 bits */
  743. t64u += hi; /* consume "hi" */
  744. }
  745. //t_hi = (t < m);
  746. // t_hi = 0;
  747. //r[3] = (uint64_t)t;
  748. r[3] = t64;
  749. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  750. // t = t64u;
  751. r[4] += t64u;
  752. return (r[4] < t64u); /* 1 if addition overflowed */
  753. # endif
  754. }
  755. static void sp_512to256_mont_reduce_8(sp_digit* r, sp_digit* aa/*, const sp_digit* m, sp_digit mp*/)
  756. {
  757. // const sp_digit* m = p256_mod;
  758. int i;
  759. uint64_t *a = (void*)aa;
  760. sp_digit carry = 0;
  761. for (i = 0; i < 4; i++) {
  762. // mu = a[i];
  763. if (sp_256_mul_add_4(a+i /*, m, mu*/)) {
  764. int j = i + 4;
  765. inc_next_word:
  766. if (++j > 7) { /* a[8] array has no more words? */
  767. carry++;
  768. continue;
  769. }
  770. if (++a[j] == 0) /* did this overflow too? */
  771. goto inc_next_word;
  772. }
  773. }
  774. sp_512to256_mont_shift_8(r, aa);
  775. if (carry != 0)
  776. sp_256_sub_8_p256_mod(r);
  777. }
  778. #else /* Generic 32-bit version */
  779. /* Mul a by scalar b and add into r. (r += a * b)
  780. * a = p256_mod
  781. * b = r[0]
  782. */
  783. static int sp_256_mul_add_8(sp_digit* r /*, const sp_digit* a, sp_digit b*/)
  784. {
  785. sp_digit b = r[0];
  786. uint64_t t;
  787. # if 0
  788. const sp_digit* a = p256_mod;
  789. //a[7..0] = ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff
  790. int i;
  791. t = 0;
  792. for (i = 0; i < 8; i++) {
  793. uint32_t t_hi;
  794. uint64_t m = ((uint64_t)b * a[i]) + r[i];
  795. t += m;
  796. t_hi = (t < m);
  797. r[i] = (sp_digit)t;
  798. t = (t >> 32) | ((uint64_t)t_hi << 32);
  799. }
  800. r[8] += (sp_digit)t;
  801. return (r[8] < (sp_digit)t); /* 1 if addition overflowed */
  802. # else
  803. // Unroll, then optimize the above loop:
  804. //uint32_t t_hi;
  805. uint64_t m;
  806. uint32_t t32;
  807. //m = ((uint64_t)b * a[0]) + r[0];
  808. // Since b is r[0] and a[0] is ffffffff, the above optimizes to:
  809. // m = r[0] * ffffffff + r[0] = (r[0] * 100000000 - r[0]) + r[0] = r[0] << 32;
  810. //t += m;
  811. // t = r[0] << 32 = b << 32;
  812. //t_hi = (t < m);
  813. // t_hi = 0;
  814. //r[0] = (sp_digit)t;
  815. // r[0] = 0;
  816. //the store can be eliminated since caller won't look at lower 256 bits of the result
  817. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  818. // t = b;
  819. //m = ((uint64_t)b * a[1]) + r[1];
  820. // Since a[1] is ffffffff, the above optimizes to:
  821. // m = b * ffffffff + r[1] = (b * 100000000 - b) + r[1] = (b << 32) - b + r[1];
  822. //t += m;
  823. // t = b + (b << 32) - b + r[1] = (b << 32) + r[1];
  824. //t_hi = (t < m);
  825. // t_hi = 0;
  826. //r[1] = (sp_digit)t;
  827. // r[1] = r[1];
  828. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  829. // t = b;
  830. //m = ((uint64_t)b * a[2]) + r[2];
  831. // Since a[2] is ffffffff, the above optimizes to:
  832. // m = b * ffffffff + r[2] = (b * 100000000 - b) + r[2] = (b << 32) - b + r[2];
  833. //t += m;
  834. // t = b + (b << 32) - b + r[2] = (b << 32) + r[2]
  835. //t_hi = (t < m);
  836. // t_hi = 0;
  837. //r[2] = (sp_digit)t;
  838. // r[2] = r[2];
  839. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  840. // t = b;
  841. //m = ((uint64_t)b * a[3]) + r[3];
  842. // Since a[3] is 00000000, the above optimizes to:
  843. // m = b * 0 + r[3] = r[3];
  844. //t += m;
  845. // t = b + r[3];
  846. //t_hi = (t < m);
  847. // t_hi = 0;
  848. //r[3] = (sp_digit)t;
  849. r[3] = r[3] + b;
  850. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  851. t32 = (r[3] < b); // 0 or 1
  852. //m = ((uint64_t)b * a[4]) + r[4];
  853. // Since a[4] is 00000000, the above optimizes to:
  854. // m = b * 0 + r[4] = r[4];
  855. //t += m;
  856. // t = t32 + r[4];
  857. //t_hi = (t < m);
  858. // t_hi = 0;
  859. //r[4] = (sp_digit)t;
  860. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  861. if (t32 != 0) {
  862. r[4]++;
  863. t32 = (r[4] == 0); // 0 or 1
  864. //m = ((uint64_t)b * a[5]) + r[5];
  865. // Since a[5] is 00000000, the above optimizes to:
  866. // m = b * 0 + r[5] = r[5];
  867. //t += m;
  868. // t = t32 + r[5]; (t32 is 0 or 1)
  869. //t_hi = (t < m);
  870. // t_hi = 0;
  871. //r[5] = (sp_digit)t;
  872. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  873. if (t32 != 0) {
  874. r[5]++;
  875. t32 = (r[5] == 0); // 0 or 1
  876. }
  877. }
  878. //m = ((uint64_t)b * a[6]) + r[6];
  879. // Since a[6] is 00000001, the above optimizes to:
  880. // m = (uint64_t)b + r[6]; // 33 bits at most
  881. //t += m;
  882. t = t32 + (uint64_t)b + r[6];
  883. //t_hi = (t < m);
  884. // t_hi = 0;
  885. r[6] = (sp_digit)t;
  886. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  887. t = (t >> 32);
  888. //m = ((uint64_t)b * a[7]) + r[7];
  889. // Since a[7] is ffffffff, the above optimizes to:
  890. // m = b * ffffffff + r[7] = (b * 100000000 - b) + r[7]
  891. m = ((uint64_t)b << 32) - b + r[7];
  892. t += m;
  893. //t_hi = (t < m);
  894. // t_hi in fact is always 0 here (256bit * 32bit can't have more than 32 bits of overflow)
  895. r[7] = (sp_digit)t;
  896. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  897. t = (t >> 32);
  898. r[8] += (sp_digit)t;
  899. return (r[8] < (sp_digit)t); /* 1 if addition overflowed */
  900. # endif
  901. }
  902. /* Reduce the number back to 256 bits using Montgomery reduction.
  903. * Note: the result is NOT guaranteed to be less than p256_mod!
  904. * (it is only guaranteed to fit into 256 bits).
  905. *
  906. * r Result.
  907. * a Double-wide number to reduce. Clobbered.
  908. * m The single precision number representing the modulus.
  909. * mp The digit representing the negative inverse of m mod 2^n.
  910. *
  911. * Montgomery reduction on multiprecision integers:
  912. * Montgomery reduction requires products modulo R.
  913. * When R is a power of B [in our case R=2^128, B=2^32], there is a variant
  914. * of Montgomery reduction which requires products only of machine word sized
  915. * integers. T is stored as an little-endian word array a[0..n]. The algorithm
  916. * reduces it one word at a time. First an appropriate multiple of modulus
  917. * is added to make T divisible by B. [In our case, it is p256_mp_mod * a[0].]
  918. * Then a multiple of modulus is added to make T divisible by B^2.
  919. * [In our case, it is (p256_mp_mod * a[1]) << 32.]
  920. * And so on. Eventually T is divisible by R, and after division by R
  921. * the algorithm is in the same place as the usual Montgomery reduction.
  922. */
  923. static void sp_512to256_mont_reduce_8(sp_digit* r, sp_digit* a/*, const sp_digit* m, sp_digit mp*/)
  924. {
  925. // const sp_digit* m = p256_mod;
  926. sp_digit mp = p256_mp_mod;
  927. int i;
  928. // sp_digit mu;
  929. if (mp != 1) {
  930. sp_digit word16th = 0;
  931. for (i = 0; i < 8; i++) {
  932. // mu = (sp_digit)(a[i] * mp);
  933. if (sp_256_mul_add_8(a+i /*, m, mu*/)) {
  934. int j = i + 8;
  935. inc_next_word0:
  936. if (++j > 15) { /* a[16] array has no more words? */
  937. word16th++;
  938. continue;
  939. }
  940. if (++a[j] == 0) /* did this overflow too? */
  941. goto inc_next_word0;
  942. }
  943. }
  944. sp_512to256_mont_shift_8(r, a);
  945. if (word16th != 0)
  946. sp_256_sub_8_p256_mod(r);
  947. }
  948. else { /* Same code for explicit mp == 1 (which is always the case for P256) */
  949. sp_digit word16th = 0;
  950. for (i = 0; i < 8; i++) {
  951. // mu = a[i];
  952. if (sp_256_mul_add_8(a+i /*, m, mu*/)) {
  953. int j = i + 8;
  954. inc_next_word:
  955. if (++j > 15) { /* a[16] array has no more words? */
  956. word16th++;
  957. continue;
  958. }
  959. if (++a[j] == 0) /* did this overflow too? */
  960. goto inc_next_word;
  961. }
  962. }
  963. sp_512to256_mont_shift_8(r, a);
  964. if (word16th != 0)
  965. sp_256_sub_8_p256_mod(r);
  966. }
  967. }
  968. #endif
  969. /* Multiply two Montogmery form numbers mod the modulus (prime).
  970. * (r = a * b mod m)
  971. *
  972. * r Result of multiplication.
  973. * a First number to multiply in Montogmery form.
  974. * b Second number to multiply in Montogmery form.
  975. * m Modulus (prime).
  976. * mp Montogmery multiplier.
  977. */
  978. static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const sp_digit* b
  979. /*, const sp_digit* m, sp_digit mp*/)
  980. {
  981. //const sp_digit* m = p256_mod;
  982. //sp_digit mp = p256_mp_mod;
  983. sp_digit t[2 * 8];
  984. sp_256to512_mul_8(t, a, b);
  985. sp_512to256_mont_reduce_8(r, t /*, m, mp*/);
  986. }
  987. /* Square the Montgomery form number. (r = a * a mod m)
  988. *
  989. * r Result of squaring.
  990. * a Number to square in Montogmery form.
  991. * m Modulus (prime).
  992. * mp Montogmery multiplier.
  993. */
  994. static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a
  995. /*, const sp_digit* m, sp_digit mp*/)
  996. {
  997. //const sp_digit* m = p256_mod;
  998. //sp_digit mp = p256_mp_mod;
  999. sp_256_mont_mul_8(r, a, a /*, m, mp*/);
  1000. }
  1001. static NOINLINE void sp_256_mont_mul_and_reduce_8(sp_digit* r,
  1002. const sp_digit* a, const sp_digit* b
  1003. /*, const sp_digit* m, sp_digit mp*/)
  1004. {
  1005. sp_digit rr[2 * 8];
  1006. sp_256_mont_mul_8(rr, a, b /*, p256_mod, p256_mp_mod*/);
  1007. memset(rr + 8, 0, sizeof(rr) / 2);
  1008. sp_512to256_mont_reduce_8(r, rr /*, p256_mod, p256_mp_mod*/);
  1009. }
  1010. /* Invert the number, in Montgomery form, modulo the modulus (prime) of the
  1011. * P256 curve. (r = 1 / a mod m)
  1012. *
  1013. * r Inverse result. Must not coincide with a.
  1014. * a Number to invert.
  1015. */
  1016. static void sp_256_mont_inv_8(sp_digit* r, sp_digit* a)
  1017. {
  1018. int i;
  1019. memcpy(r, a, sizeof(sp_digit) * 8);
  1020. for (i = 254; i >= 0; i--) {
  1021. sp_256_mont_sqr_8(r, r /*, p256_mod, p256_mp_mod*/);
  1022. /* p256_mod - 2:
  1023. * ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff - 2
  1024. * Bit pattern:
  1025. * 2 2 2 2 2 2 2 1...1
  1026. * 5 5 4 3 2 1 0 9...0 9...1
  1027. * 543210987654321098765432109876543210987654321098765432109876543210...09876543210...09876543210
  1028. * 111111111111111111111111111111110000000000000000000000000000000100...00000111111...11111111101
  1029. */
  1030. /*if (p256_mod_minus_2[i / 32] & ((sp_digit)1 << (i % 32)))*/
  1031. if (i >= 224 || i == 192 || (i <= 95 && i != 1))
  1032. sp_256_mont_mul_8(r, r, a /*, p256_mod, p256_mp_mod*/);
  1033. }
  1034. }
  1035. /* Multiply a number by Montogmery normalizer mod modulus (prime).
  1036. *
  1037. * r The resulting Montgomery form number.
  1038. * a The number to convert.
  1039. */
  1040. static void sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a)
  1041. {
  1042. int64_t t[8];
  1043. int32_t o;
  1044. #define A(n) ((uint64_t)a[n])
  1045. /* 1 1 0 -1 -1 -1 -1 0 */
  1046. t[0] = 0 + A(0) + A(1) - A(3) - A(4) - A(5) - A(6);
  1047. /* 0 1 1 0 -1 -1 -1 -1 */
  1048. t[1] = 0 + A(1) + A(2) - A(4) - A(5) - A(6) - A(7);
  1049. /* 0 0 1 1 0 -1 -1 -1 */
  1050. t[2] = 0 + A(2) + A(3) - A(5) - A(6) - A(7);
  1051. /* -1 -1 0 2 2 1 0 -1 */
  1052. t[3] = 0 - A(0) - A(1) + 2 * A(3) + 2 * A(4) + A(5) - A(7);
  1053. /* 0 -1 -1 0 2 2 1 0 */
  1054. t[4] = 0 - A(1) - A(2) + 2 * A(4) + 2 * A(5) + A(6);
  1055. /* 0 0 -1 -1 0 2 2 1 */
  1056. t[5] = 0 - A(2) - A(3) + 2 * A(5) + 2 * A(6) + A(7);
  1057. /* -1 -1 0 0 0 1 3 2 */
  1058. t[6] = 0 - A(0) - A(1) + A(5) + 3 * A(6) + 2 * A(7);
  1059. /* 1 0 -1 -1 -1 -1 0 3 */
  1060. t[7] = 0 + A(0) - A(2) - A(3) - A(4) - A(5) + 3 * A(7);
  1061. #undef A
  1062. t[1] += t[0] >> 32; t[0] &= 0xffffffff;
  1063. t[2] += t[1] >> 32; t[1] &= 0xffffffff;
  1064. t[3] += t[2] >> 32; t[2] &= 0xffffffff;
  1065. t[4] += t[3] >> 32; t[3] &= 0xffffffff;
  1066. t[5] += t[4] >> 32; t[4] &= 0xffffffff;
  1067. t[6] += t[5] >> 32; t[5] &= 0xffffffff;
  1068. t[7] += t[6] >> 32; t[6] &= 0xffffffff;
  1069. o = t[7] >> 32; //t[7] &= 0xffffffff;
  1070. t[0] += o;
  1071. t[3] -= o;
  1072. t[6] -= o;
  1073. t[7] += o;
  1074. r[0] = (sp_digit)t[0];
  1075. t[1] += t[0] >> 32;
  1076. r[1] = (sp_digit)t[1];
  1077. t[2] += t[1] >> 32;
  1078. r[2] = (sp_digit)t[2];
  1079. t[3] += t[2] >> 32;
  1080. r[3] = (sp_digit)t[3];
  1081. t[4] += t[3] >> 32;
  1082. r[4] = (sp_digit)t[4];
  1083. t[5] += t[4] >> 32;
  1084. r[5] = (sp_digit)t[5];
  1085. t[6] += t[5] >> 32;
  1086. r[6] = (sp_digit)t[6];
  1087. // t[7] += t[6] >> 32;
  1088. // r[7] = (sp_digit)t[7];
  1089. r[7] = (sp_digit)t[7] + (sp_digit)(t[6] >> 32);
  1090. }
  1091. /* Map the Montgomery form projective co-ordinate point to an affine point.
  1092. *
  1093. * r Resulting affine co-ordinate point.
  1094. * p Montgomery form projective co-ordinate point.
  1095. */
  1096. static void sp_256_map_8(sp_point* r, sp_point* p)
  1097. {
  1098. sp_digit t1[8];
  1099. sp_digit t2[8];
  1100. sp_256_mont_inv_8(t1, p->z);
  1101. sp_256_mont_sqr_8(t2, t1 /*, p256_mod, p256_mp_mod*/);
  1102. sp_256_mont_mul_8(t1, t2, t1 /*, p256_mod, p256_mp_mod*/);
  1103. /* x /= z^2 */
  1104. sp_256_mont_mul_and_reduce_8(r->x, p->x, t2 /*, p256_mod, p256_mp_mod*/);
  1105. /* Reduce x to less than modulus */
  1106. if (sp_256_cmp_8(r->x, p256_mod) >= 0)
  1107. sp_256_sub_8_p256_mod(r->x);
  1108. /* y /= z^3 */
  1109. sp_256_mont_mul_and_reduce_8(r->y, p->y, t1 /*, p256_mod, p256_mp_mod*/);
  1110. /* Reduce y to less than modulus */
  1111. if (sp_256_cmp_8(r->y, p256_mod) >= 0)
  1112. sp_256_sub_8_p256_mod(r->y);
  1113. memset(r->z, 0, sizeof(r->z));
  1114. r->z[0] = 1;
  1115. }
  1116. /* Double the Montgomery form projective point p.
  1117. *
  1118. * r Result of doubling point.
  1119. * p Point to double.
  1120. */
  1121. static void sp_256_proj_point_dbl_8(sp_point* r, sp_point* p)
  1122. {
  1123. sp_digit t1[8];
  1124. sp_digit t2[8];
  1125. /* Put point to double into result */
  1126. if (r != p)
  1127. *r = *p; /* struct copy */
  1128. if (r->infinity)
  1129. return;
  1130. /* T1 = Z * Z */
  1131. sp_256_mont_sqr_8(t1, r->z /*, p256_mod, p256_mp_mod*/);
  1132. /* Z = Y * Z */
  1133. sp_256_mont_mul_8(r->z, r->y, r->z /*, p256_mod, p256_mp_mod*/);
  1134. /* Z = 2Z */
  1135. sp_256_mont_dbl_8(r->z, r->z /*, p256_mod*/);
  1136. /* T2 = X - T1 */
  1137. sp_256_mont_sub_8(t2, r->x, t1 /*, p256_mod*/);
  1138. /* T1 = X + T1 */
  1139. sp_256_mont_add_8(t1, r->x, t1 /*, p256_mod*/);
  1140. /* T2 = T1 * T2 */
  1141. sp_256_mont_mul_8(t2, t1, t2 /*, p256_mod, p256_mp_mod*/);
  1142. /* T1 = 3T2 */
  1143. sp_256_mont_tpl_8(t1, t2 /*, p256_mod*/);
  1144. /* Y = 2Y */
  1145. sp_256_mont_dbl_8(r->y, r->y /*, p256_mod*/);
  1146. /* Y = Y * Y */
  1147. sp_256_mont_sqr_8(r->y, r->y /*, p256_mod, p256_mp_mod*/);
  1148. /* T2 = Y * Y */
  1149. sp_256_mont_sqr_8(t2, r->y /*, p256_mod, p256_mp_mod*/);
  1150. /* T2 = T2/2 */
  1151. sp_256_div2_8(t2 /*, p256_mod*/);
  1152. /* Y = Y * X */
  1153. sp_256_mont_mul_8(r->y, r->y, r->x /*, p256_mod, p256_mp_mod*/);
  1154. /* X = T1 * T1 */
  1155. sp_256_mont_mul_8(r->x, t1, t1 /*, p256_mod, p256_mp_mod*/);
  1156. /* X = X - Y */
  1157. sp_256_mont_sub_8(r->x, r->x, r->y /*, p256_mod*/);
  1158. /* X = X - Y */
  1159. sp_256_mont_sub_8(r->x, r->x, r->y /*, p256_mod*/);
  1160. /* Y = Y - X */
  1161. sp_256_mont_sub_8(r->y, r->y, r->x /*, p256_mod*/);
  1162. /* Y = Y * T1 */
  1163. sp_256_mont_mul_8(r->y, r->y, t1 /*, p256_mod, p256_mp_mod*/);
  1164. /* Y = Y - T2 */
  1165. sp_256_mont_sub_8(r->y, r->y, t2 /*, p256_mod*/);
  1166. dump_512("y2 %s\n", r->y);
  1167. }
  1168. /* Add two Montgomery form projective points.
  1169. *
  1170. * r Result of addition.
  1171. * p Frist point to add.
  1172. * q Second point to add.
  1173. */
  1174. static NOINLINE void sp_256_proj_point_add_8(sp_point* r, sp_point* p, sp_point* q)
  1175. {
  1176. sp_digit t1[8];
  1177. sp_digit t2[8];
  1178. sp_digit t3[8];
  1179. sp_digit t4[8];
  1180. sp_digit t5[8];
  1181. /* Ensure only the first point is the same as the result. */
  1182. if (q == r) {
  1183. sp_point* a = p;
  1184. p = q;
  1185. q = a;
  1186. }
  1187. /* Check double */
  1188. sp_256_sub_8(t1, p256_mod, q->y);
  1189. if (sp_256_cmp_equal_8(p->x, q->x)
  1190. && sp_256_cmp_equal_8(p->z, q->z)
  1191. && (sp_256_cmp_equal_8(p->y, q->y) || sp_256_cmp_equal_8(p->y, t1))
  1192. ) {
  1193. sp_256_proj_point_dbl_8(r, p);
  1194. return;
  1195. }
  1196. if (p->infinity || q->infinity) {
  1197. *r = p->infinity ? *q : *p; /* struct copy */
  1198. return;
  1199. }
  1200. /* U1 = X1*Z2^2 */
  1201. sp_256_mont_sqr_8(t1, q->z /*, p256_mod, p256_mp_mod*/);
  1202. sp_256_mont_mul_8(t3, t1, q->z /*, p256_mod, p256_mp_mod*/);
  1203. sp_256_mont_mul_8(t1, t1, r->x /*, p256_mod, p256_mp_mod*/);
  1204. /* U2 = X2*Z1^2 */
  1205. sp_256_mont_sqr_8(t2, r->z /*, p256_mod, p256_mp_mod*/);
  1206. sp_256_mont_mul_8(t4, t2, r->z /*, p256_mod, p256_mp_mod*/);
  1207. sp_256_mont_mul_8(t2, t2, q->x /*, p256_mod, p256_mp_mod*/);
  1208. /* S1 = Y1*Z2^3 */
  1209. sp_256_mont_mul_8(t3, t3, r->y /*, p256_mod, p256_mp_mod*/);
  1210. /* S2 = Y2*Z1^3 */
  1211. sp_256_mont_mul_8(t4, t4, q->y /*, p256_mod, p256_mp_mod*/);
  1212. /* H = U2 - U1 */
  1213. sp_256_mont_sub_8(t2, t2, t1 /*, p256_mod*/);
  1214. /* R = S2 - S1 */
  1215. sp_256_mont_sub_8(t4, t4, t3 /*, p256_mod*/);
  1216. /* Z3 = H*Z1*Z2 */
  1217. sp_256_mont_mul_8(r->z, r->z, q->z /*, p256_mod, p256_mp_mod*/);
  1218. sp_256_mont_mul_8(r->z, r->z, t2 /*, p256_mod, p256_mp_mod*/);
  1219. /* X3 = R^2 - H^3 - 2*U1*H^2 */
  1220. sp_256_mont_sqr_8(r->x, t4 /*, p256_mod, p256_mp_mod*/);
  1221. sp_256_mont_sqr_8(t5, t2 /*, p256_mod, p256_mp_mod*/);
  1222. sp_256_mont_mul_8(r->y, t1, t5 /*, p256_mod, p256_mp_mod*/);
  1223. sp_256_mont_mul_8(t5, t5, t2 /*, p256_mod, p256_mp_mod*/);
  1224. sp_256_mont_sub_8(r->x, r->x, t5 /*, p256_mod*/);
  1225. sp_256_mont_dbl_8(t1, r->y /*, p256_mod*/);
  1226. sp_256_mont_sub_8(r->x, r->x, t1 /*, p256_mod*/);
  1227. /* Y3 = R*(U1*H^2 - X3) - S1*H^3 */
  1228. sp_256_mont_sub_8(r->y, r->y, r->x /*, p256_mod*/);
  1229. sp_256_mont_mul_8(r->y, r->y, t4 /*, p256_mod, p256_mp_mod*/);
  1230. sp_256_mont_mul_8(t5, t5, t3 /*, p256_mod, p256_mp_mod*/);
  1231. sp_256_mont_sub_8(r->y, r->y, t5 /*, p256_mod*/);
  1232. }
  1233. /* Multiply the point by the scalar and return the result.
  1234. * If map is true then convert result to affine co-ordinates.
  1235. *
  1236. * r Resulting point.
  1237. * g Point to multiply.
  1238. * k Scalar to multiply by.
  1239. * map Indicates whether to convert result to affine.
  1240. */
  1241. static void sp_256_ecc_mulmod_8(sp_point* r, const sp_point* g, const sp_digit* k /*, int map*/)
  1242. {
  1243. enum { map = 1 }; /* we always convert result to affine coordinates */
  1244. sp_point t[3];
  1245. sp_digit n = n; /* for compiler */
  1246. int c, y;
  1247. memset(t, 0, sizeof(t));
  1248. /* t[0] = {0, 0, 1} * norm */
  1249. t[0].infinity = 1;
  1250. /* t[1] = {g->x, g->y, g->z} * norm */
  1251. sp_256_mod_mul_norm_8(t[1].x, g->x);
  1252. sp_256_mod_mul_norm_8(t[1].y, g->y);
  1253. sp_256_mod_mul_norm_8(t[1].z, g->z);
  1254. /* For every bit, starting from most significant... */
  1255. k += 7;
  1256. c = 256;
  1257. for (;;) {
  1258. if ((c & 0x1f) == 0) {
  1259. if (c == 0)
  1260. break;
  1261. n = *k--;
  1262. }
  1263. y = (n >> 31);
  1264. dbg("y:%d t[%d] = t[0]+t[1]\n", y, y^1);
  1265. sp_256_proj_point_add_8(&t[y^1], &t[0], &t[1]);
  1266. dump_512("t[0].x %s\n", t[0].x);
  1267. dump_512("t[0].y %s\n", t[0].y);
  1268. dump_512("t[0].z %s\n", t[0].z);
  1269. dump_512("t[1].x %s\n", t[1].x);
  1270. dump_512("t[1].y %s\n", t[1].y);
  1271. dump_512("t[1].z %s\n", t[1].z);
  1272. dbg("t[2] = t[%d]\n", y);
  1273. t[2] = t[y]; /* struct copy */
  1274. dbg("t[2] *= 2\n");
  1275. sp_256_proj_point_dbl_8(&t[2], &t[2]);
  1276. dump_512("t[2].x %s\n", t[2].x);
  1277. dump_512("t[2].y %s\n", t[2].y);
  1278. dump_512("t[2].z %s\n", t[2].z);
  1279. t[y] = t[2]; /* struct copy */
  1280. n <<= 1;
  1281. c--;
  1282. }
  1283. if (map)
  1284. sp_256_map_8(r, &t[0]);
  1285. else
  1286. *r = t[0]; /* struct copy */
  1287. memset(t, 0, sizeof(t)); //paranoia
  1288. }
  1289. /* Multiply the base point of P256 by the scalar and return the result.
  1290. * If map is true then convert result to affine co-ordinates.
  1291. *
  1292. * r Resulting point.
  1293. * k Scalar to multiply by.
  1294. * map Indicates whether to convert result to affine.
  1295. */
  1296. static void sp_256_ecc_mulmod_base_8(sp_point* r, sp_digit* k /*, int map*/)
  1297. {
  1298. /* Since this function is called only once, save space:
  1299. * don't have "static const sp_point p256_base = {...}".
  1300. */
  1301. static const uint8_t p256_base_bin[] = {
  1302. /* x (big-endian) */
  1303. 0x6b,0x17,0xd1,0xf2,0xe1,0x2c,0x42,0x47,0xf8,0xbc,0xe6,0xe5,0x63,0xa4,0x40,0xf2,
  1304. 0x77,0x03,0x7d,0x81,0x2d,0xeb,0x33,0xa0,0xf4,0xa1,0x39,0x45,0xd8,0x98,0xc2,0x96,
  1305. /* y */
  1306. 0x4f,0xe3,0x42,0xe2,0xfe,0x1a,0x7f,0x9b,0x8e,0xe7,0xeb,0x4a,0x7c,0x0f,0x9e,0x16,
  1307. 0x2b,0xce,0x33,0x57,0x6b,0x31,0x5e,0xce,0xcb,0xb6,0x40,0x68,0x37,0xbf,0x51,0xf5,
  1308. /* z will be set to 1, infinity flag to "false" */
  1309. };
  1310. sp_point p256_base;
  1311. sp_256_point_from_bin2x32(&p256_base, p256_base_bin);
  1312. sp_256_ecc_mulmod_8(r, &p256_base, k /*, map*/);
  1313. }
  1314. /* Multiply the point by the scalar and serialize the X ordinate.
  1315. * The number is 0 padded to maximum size on output.
  1316. *
  1317. * priv Scalar to multiply the point by.
  1318. * pub2x32 Point to multiply.
  1319. * out32 Buffer to hold X ordinate.
  1320. */
  1321. static void sp_ecc_secret_gen_256(const sp_digit priv[8], const uint8_t *pub2x32, uint8_t* out32)
  1322. {
  1323. sp_point point[1];
  1324. #if FIXED_PEER_PUBKEY
  1325. memset((void*)pub2x32, 0x55, 64);
  1326. #endif
  1327. dump_hex("peerkey %s\n", pub2x32, 32); /* in TLS, this is peer's public key */
  1328. dump_hex(" %s\n", pub2x32 + 32, 32);
  1329. sp_256_point_from_bin2x32(point, pub2x32);
  1330. dump_512("point->x %s\n", point->x);
  1331. dump_512("point->y %s\n", point->y);
  1332. sp_256_ecc_mulmod_8(point, point, priv);
  1333. sp_256_to_bin_8(point->x, out32);
  1334. dump_hex("out32: %s\n", out32, 32);
  1335. }
  1336. /* Generates a random scalar in [1..order-1] range. */
  1337. static void sp_256_ecc_gen_k_8(sp_digit k[8])
  1338. {
  1339. /* Since 32-bit words are "dense", no need to use
  1340. * sp_256_from_bin_8(k, buf) to convert random stream
  1341. * to sp_digit array - just store random bits there directly.
  1342. */
  1343. tls_get_random(k, 8 * sizeof(k[0]));
  1344. #if FIXED_SECRET
  1345. memset(k, 0x77, 8 * sizeof(k[0]));
  1346. #endif
  1347. // If scalar is too large, try again (pseudo-code)
  1348. // if (k >= 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551 - 1) // order of P256
  1349. // goto pick_another_random;
  1350. // k++; // ensure non-zero
  1351. /* Simpler alternative, at the cost of not choosing some valid
  1352. * random values, and slightly non-uniform distribution */
  1353. if (k[0] == 0)
  1354. k[0] = 1;
  1355. if (k[7] >= 0xffffffff)
  1356. k[7] = 0xfffffffe;
  1357. }
  1358. /* Makes a random EC key pair. */
  1359. static void sp_ecc_make_key_256(sp_digit privkey[8], uint8_t *pubkey)
  1360. {
  1361. sp_point point[1];
  1362. sp_256_ecc_gen_k_8(privkey);
  1363. dump_256("privkey %s\n", privkey);
  1364. sp_256_ecc_mulmod_base_8(point, privkey);
  1365. dump_512("point->x %s\n", point->x);
  1366. dump_512("point->y %s\n", point->y);
  1367. sp_256_to_bin_8(point->x, pubkey);
  1368. sp_256_to_bin_8(point->y, pubkey + 32);
  1369. memset(point, 0, sizeof(point)); //paranoia
  1370. }
  1371. void FAST_FUNC curve_P256_compute_pubkey_and_premaster(
  1372. uint8_t *pubkey2x32, uint8_t *premaster32,
  1373. const uint8_t *peerkey2x32)
  1374. {
  1375. sp_digit privkey[8];
  1376. dump_hex("peerkey2x32: %s\n", peerkey2x32, 64);
  1377. sp_ecc_make_key_256(privkey, pubkey2x32);
  1378. dump_hex("pubkey: %s\n", pubkey2x32, 32);
  1379. dump_hex(" %s\n", pubkey2x32 + 32, 32);
  1380. /* Combine our privkey and peer's public key to generate premaster */
  1381. sp_ecc_secret_gen_256(privkey, /*x,y:*/peerkey2x32, premaster32);
  1382. dump_hex("premaster: %s\n", premaster32, 32);
  1383. }