3
0

tls_sp_c32.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. * Copyright (C) 2021 Denys Vlasenko
  3. *
  4. * Licensed under GPLv2, see file LICENSE in this source tree.
  5. */
  6. #include "tls.h"
  7. #define SP_DEBUG 0
  8. #define FIXED_SECRET 0
  9. #define FIXED_PEER_PUBKEY 0
  10. #define ALLOW_ASM 1
  11. #if SP_DEBUG
  12. # define dbg(...) fprintf(stderr, __VA_ARGS__)
  13. static void dump_hex(const char *fmt, const void *vp, int len)
  14. {
  15. char hexbuf[32 * 1024 + 4];
  16. const uint8_t *p = vp;
  17. bin2hex(hexbuf, (void*)p, len)[0] = '\0';
  18. dbg(fmt, hexbuf);
  19. }
  20. #else
  21. # define dbg(...) ((void)0)
  22. # define dump_hex(...) ((void)0)
  23. #endif
  24. typedef uint32_t sp_digit;
  25. typedef int32_t signed_sp_digit;
  26. /* 64-bit optimizations:
  27. * if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff,
  28. * then loads and stores can be done in 64-bit chunks.
  29. *
  30. * A narrower case is when arch is also little-endian (such as x86_64),
  31. * then "LSW first", uint32[8] and uint64[4] representations are equivalent,
  32. * and arithmetic can be done in 64 bits too.
  33. */
  34. #if defined(__GNUC__) && defined(__x86_64__)
  35. # define UNALIGNED_LE_64BIT 1
  36. #else
  37. # define UNALIGNED_LE_64BIT 0
  38. #endif
  39. /* The code below is taken from parts of
  40. * wolfssl-3.15.3/wolfcrypt/src/sp_c32.c
  41. * and heavily modified.
  42. */
  43. typedef struct sp_point {
  44. sp_digit x[8]
  45. #if ULONG_MAX > 0xffffffff
  46. /* Make sp_point[] arrays to not be 64-bit misaligned */
  47. ALIGNED(8)
  48. #endif
  49. ;
  50. sp_digit y[8];
  51. sp_digit z[8];
  52. int infinity;
  53. } sp_point;
  54. /* The modulus (prime) of the curve P256. */
  55. static const sp_digit p256_mod[8] ALIGNED(8) = {
  56. 0xffffffff,0xffffffff,0xffffffff,0x00000000,
  57. 0x00000000,0x00000000,0x00000001,0xffffffff,
  58. };
  59. #define p256_mp_mod ((sp_digit)0x000001)
  60. /* Write r as big endian to byte array.
  61. * Fixed length number of bytes written: 32
  62. *
  63. * r A single precision integer.
  64. * a Byte array.
  65. */
  66. #if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff
  67. static void sp_256_to_bin_8(const sp_digit* rr, uint8_t* a)
  68. {
  69. int i;
  70. const uint64_t* r = (void*)rr;
  71. r += 4;
  72. for (i = 0; i < 4; i++) {
  73. r--;
  74. move_to_unaligned64(a, SWAP_BE64(*r));
  75. a += 8;
  76. }
  77. }
  78. #else
  79. static void sp_256_to_bin_8(const sp_digit* r, uint8_t* a)
  80. {
  81. int i;
  82. r += 8;
  83. for (i = 0; i < 8; i++) {
  84. r--;
  85. move_to_unaligned32(a, SWAP_BE32(*r));
  86. a += 4;
  87. }
  88. }
  89. #endif
  90. /* Read big endian unsigned byte array into r.
  91. *
  92. * r A single precision integer.
  93. * a Byte array.
  94. * n Number of bytes in array to read.
  95. */
  96. #if BB_UNALIGNED_MEMACCESS_OK && ULONG_MAX > 0xffffffff
  97. static void sp_256_from_bin_8(sp_digit* rr, const uint8_t* a)
  98. {
  99. int i;
  100. uint64_t* r = (void*)rr;
  101. r += 4;
  102. for (i = 0; i < 4; i++) {
  103. uint64_t v;
  104. move_from_unaligned64(v, a);
  105. *--r = SWAP_BE64(v);
  106. a += 8;
  107. }
  108. }
  109. #else
  110. static void sp_256_from_bin_8(sp_digit* r, const uint8_t* a)
  111. {
  112. int i;
  113. r += 8;
  114. for (i = 0; i < 8; i++) {
  115. sp_digit v;
  116. move_from_unaligned32(v, a);
  117. *--r = SWAP_BE32(v);
  118. a += 4;
  119. }
  120. }
  121. #endif
  122. #if SP_DEBUG
  123. static void dump_256(const char *fmt, const sp_digit* r)
  124. {
  125. uint8_t b32[32];
  126. sp_256_to_bin_8(r, b32);
  127. dump_hex(fmt, b32, 32);
  128. }
  129. static void dump_512(const char *fmt, const sp_digit* r)
  130. {
  131. uint8_t b64[64];
  132. sp_256_to_bin_8(r, b64 + 32);
  133. sp_256_to_bin_8(r+8, b64);
  134. dump_hex(fmt, b64, 64);
  135. }
  136. #else
  137. # define dump_256(...) ((void)0)
  138. # define dump_512(...) ((void)0)
  139. #endif
  140. /* Convert a point of big-endian 32-byte x,y pair to type sp_point. */
  141. static void sp_256_point_from_bin2x32(sp_point* p, const uint8_t *bin2x32)
  142. {
  143. memset(p, 0, sizeof(*p));
  144. /*p->infinity = 0;*/
  145. sp_256_from_bin_8(p->x, bin2x32);
  146. sp_256_from_bin_8(p->y, bin2x32 + 32);
  147. p->z[0] = 1; /* p->z = 1 */
  148. }
  149. /* Compare a with b.
  150. *
  151. * return -ve, 0 or +ve if a is less than, equal to or greater than b
  152. * respectively.
  153. */
  154. #if UNALIGNED_LE_64BIT
  155. static signed_sp_digit sp_256_cmp_8(const sp_digit* aa, const sp_digit* bb)
  156. {
  157. const uint64_t* a = (void*)aa;
  158. const uint64_t* b = (void*)bb;
  159. int i;
  160. for (i = 3; i >= 0; i--) {
  161. if (a[i] == b[i])
  162. continue;
  163. return (a[i] > b[i]) * 2 - 1;
  164. }
  165. return 0;
  166. }
  167. #else
  168. static signed_sp_digit sp_256_cmp_8(const sp_digit* a, const sp_digit* b)
  169. {
  170. int i;
  171. for (i = 7; i >= 0; i--) {
  172. /* signed_sp_digit r = a[i] - b[i];
  173. * if (r != 0)
  174. * return r;
  175. * does not work: think about a[i]=0, b[i]=0xffffffff
  176. */
  177. if (a[i] == b[i])
  178. continue;
  179. return (a[i] > b[i]) * 2 - 1;
  180. }
  181. return 0;
  182. }
  183. #endif
  184. /* Compare two numbers to determine if they are equal.
  185. *
  186. * return 1 when equal and 0 otherwise.
  187. */
  188. static int sp_256_cmp_equal_8(const sp_digit* a, const sp_digit* b)
  189. {
  190. return sp_256_cmp_8(a, b) == 0;
  191. }
  192. /* Add b to a into r. (r = a + b). Return !0 on overflow */
  193. static int sp_256_add_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
  194. {
  195. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  196. sp_digit reg;
  197. asm volatile (
  198. "\n movl (%0), %3"
  199. "\n addl (%1), %3"
  200. "\n movl %3, (%2)"
  201. "\n"
  202. "\n movl 1*4(%0), %3"
  203. "\n adcl 1*4(%1), %3"
  204. "\n movl %3, 1*4(%2)"
  205. "\n"
  206. "\n movl 2*4(%0), %3"
  207. "\n adcl 2*4(%1), %3"
  208. "\n movl %3, 2*4(%2)"
  209. "\n"
  210. "\n movl 3*4(%0), %3"
  211. "\n adcl 3*4(%1), %3"
  212. "\n movl %3, 3*4(%2)"
  213. "\n"
  214. "\n movl 4*4(%0), %3"
  215. "\n adcl 4*4(%1), %3"
  216. "\n movl %3, 4*4(%2)"
  217. "\n"
  218. "\n movl 5*4(%0), %3"
  219. "\n adcl 5*4(%1), %3"
  220. "\n movl %3, 5*4(%2)"
  221. "\n"
  222. "\n movl 6*4(%0), %3"
  223. "\n adcl 6*4(%1), %3"
  224. "\n movl %3, 6*4(%2)"
  225. "\n"
  226. "\n movl 7*4(%0), %3"
  227. "\n adcl 7*4(%1), %3"
  228. "\n movl %3, 7*4(%2)"
  229. "\n"
  230. "\n sbbl %3, %3"
  231. "\n"
  232. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  233. : "0" (a), "1" (b), "2" (r)
  234. : "memory"
  235. );
  236. return reg;
  237. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  238. uint64_t reg;
  239. asm volatile (
  240. "\n movq (%0), %3"
  241. "\n addq (%1), %3"
  242. "\n movq %3, (%2)"
  243. "\n"
  244. "\n movq 1*8(%0), %3"
  245. "\n adcq 1*8(%1), %3"
  246. "\n movq %3, 1*8(%2)"
  247. "\n"
  248. "\n movq 2*8(%0), %3"
  249. "\n adcq 2*8(%1), %3"
  250. "\n movq %3, 2*8(%2)"
  251. "\n"
  252. "\n movq 3*8(%0), %3"
  253. "\n adcq 3*8(%1), %3"
  254. "\n movq %3, 3*8(%2)"
  255. "\n"
  256. "\n sbbq %3, %3"
  257. "\n"
  258. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  259. : "0" (a), "1" (b), "2" (r)
  260. : "memory"
  261. );
  262. return reg;
  263. #else
  264. int i;
  265. sp_digit carry;
  266. carry = 0;
  267. for (i = 0; i < 8; i++) {
  268. sp_digit w, v;
  269. w = b[i] + carry;
  270. v = a[i];
  271. if (w != 0) {
  272. v = a[i] + w;
  273. carry = (v < a[i]);
  274. /* hope compiler detects above as "carry flag set" */
  275. }
  276. /* else: b + carry == 0, two cases:
  277. * b:ffffffff, carry:1
  278. * b:00000000, carry:0
  279. * in either case, r[i] = a[i] and carry remains unchanged
  280. */
  281. r[i] = v;
  282. }
  283. return carry;
  284. #endif
  285. }
  286. /* Sub b from a into r. (r = a - b). Return !0 on underflow */
  287. static int sp_256_sub_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
  288. {
  289. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  290. sp_digit reg;
  291. asm volatile (
  292. "\n movl (%0), %3"
  293. "\n subl (%1), %3"
  294. "\n movl %3, (%2)"
  295. "\n"
  296. "\n movl 1*4(%0), %3"
  297. "\n sbbl 1*4(%1), %3"
  298. "\n movl %3, 1*4(%2)"
  299. "\n"
  300. "\n movl 2*4(%0), %3"
  301. "\n sbbl 2*4(%1), %3"
  302. "\n movl %3, 2*4(%2)"
  303. "\n"
  304. "\n movl 3*4(%0), %3"
  305. "\n sbbl 3*4(%1), %3"
  306. "\n movl %3, 3*4(%2)"
  307. "\n"
  308. "\n movl 4*4(%0), %3"
  309. "\n sbbl 4*4(%1), %3"
  310. "\n movl %3, 4*4(%2)"
  311. "\n"
  312. "\n movl 5*4(%0), %3"
  313. "\n sbbl 5*4(%1), %3"
  314. "\n movl %3, 5*4(%2)"
  315. "\n"
  316. "\n movl 6*4(%0), %3"
  317. "\n sbbl 6*4(%1), %3"
  318. "\n movl %3, 6*4(%2)"
  319. "\n"
  320. "\n movl 7*4(%0), %3"
  321. "\n sbbl 7*4(%1), %3"
  322. "\n movl %3, 7*4(%2)"
  323. "\n"
  324. "\n sbbl %3, %3"
  325. "\n"
  326. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  327. : "0" (a), "1" (b), "2" (r)
  328. : "memory"
  329. );
  330. return reg;
  331. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  332. uint64_t reg;
  333. asm volatile (
  334. "\n movq (%0), %3"
  335. "\n subq (%1), %3"
  336. "\n movq %3, (%2)"
  337. "\n"
  338. "\n movq 1*8(%0), %3"
  339. "\n sbbq 1*8(%1), %3"
  340. "\n movq %3, 1*8(%2)"
  341. "\n"
  342. "\n movq 2*8(%0), %3"
  343. "\n sbbq 2*8(%1), %3"
  344. "\n movq %3, 2*8(%2)"
  345. "\n"
  346. "\n movq 3*8(%0), %3"
  347. "\n sbbq 3*8(%1), %3"
  348. "\n movq %3, 3*8(%2)"
  349. "\n"
  350. "\n sbbq %3, %3"
  351. "\n"
  352. : "=r" (a), "=r" (b), "=r" (r), "=r" (reg)
  353. : "0" (a), "1" (b), "2" (r)
  354. : "memory"
  355. );
  356. return reg;
  357. #else
  358. int i;
  359. sp_digit borrow;
  360. borrow = 0;
  361. for (i = 0; i < 8; i++) {
  362. sp_digit w, v;
  363. w = b[i] + borrow;
  364. v = a[i];
  365. if (w != 0) {
  366. v = a[i] - w;
  367. borrow = (v > a[i]);
  368. /* hope compiler detects above as "carry flag set" */
  369. }
  370. /* else: b + borrow == 0, two cases:
  371. * b:ffffffff, borrow:1
  372. * b:00000000, borrow:0
  373. * in either case, r[i] = a[i] and borrow remains unchanged
  374. */
  375. r[i] = v;
  376. }
  377. return borrow;
  378. #endif
  379. }
  380. /* Sub p256_mod from r. (r = r - p256_mod). */
  381. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  382. static void sp_256_sub_8_p256_mod(sp_digit* r)
  383. {
  384. //p256_mod[7..0] = ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff
  385. asm volatile (
  386. "\n subl $0xffffffff, (%0)"
  387. "\n sbbl $0xffffffff, 1*4(%0)"
  388. "\n sbbl $0xffffffff, 2*4(%0)"
  389. "\n sbbl $0x00000000, 3*4(%0)"
  390. "\n sbbl $0x00000000, 4*4(%0)"
  391. "\n sbbl $0x00000000, 5*4(%0)"
  392. "\n sbbl $0x00000001, 6*4(%0)"
  393. "\n sbbl $0xffffffff, 7*4(%0)"
  394. "\n"
  395. : "=r" (r)
  396. : "0" (r)
  397. : "memory"
  398. );
  399. }
  400. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  401. static void sp_256_sub_8_p256_mod(sp_digit* r)
  402. {
  403. //p256_mod[3..0] = ffffffff00000001 0000000000000000 00000000ffffffff ffffffffffffffff
  404. # if 0
  405. // gcc -Oz bug (?) https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115875
  406. // uses buggy "push $-1; pop %rax" insns to load 00000000ffffffff
  407. uint64_t reg;
  408. uint64_t ooff;
  409. asm volatile (
  410. "\n subq $0xffffffffffffffff, (%0)"
  411. "\n sbbq %1, 1*8(%0)" // %1 = 00000000ffffffff
  412. "\n sbbq $0x0000000000000000, 2*8(%0)"
  413. "\n movq 3*8(%0), %2"
  414. "\n sbbq $0x0, %2" // subtract carry
  415. "\n addq %1, %2" // adding 00000000ffffffff (in %1)
  416. "\n" // is the same as subtracting ffffffff00000001
  417. "\n movq %2, 3*8(%0)"
  418. "\n"
  419. : "=r" (r), "=r" (ooff), "=r" (reg)
  420. : "0" (r), "1" (0x00000000ffffffffUL) /* UL is important! */
  421. : "memory"
  422. );
  423. # else // let's do it by hand:
  424. uint64_t reg;
  425. uint64_t rax;
  426. asm volatile (
  427. "\n orl $0xffffffff, %%eax" // %1 (rax) = 00000000ffffffff
  428. "\n subq $0xffffffffffffffff, (%0)"
  429. "\n sbbq %1, 1*8(%0)"
  430. "\n sbbq $0x0000000000000000, 2*8(%0)"
  431. "\n movq 3*8(%0), %2"
  432. "\n sbbq $0x0, %2" // subtract carry
  433. "\n addq %1, %2" // adding 00000000ffffffff (in %1)
  434. "\n" // is the same as subtracting ffffffff00000001
  435. "\n movq %2, 3*8(%0)"
  436. "\n"
  437. : "=r" (r), "=&a" (rax), "=r" (reg)
  438. : "0" (r)
  439. : "memory"
  440. );
  441. # endif
  442. }
  443. #else
  444. static void sp_256_sub_8_p256_mod(sp_digit* r)
  445. {
  446. sp_256_sub_8(r, r, p256_mod);
  447. }
  448. #endif
  449. /* Multiply a and b into r. (r = a * b)
  450. * r should be [16] array (512 bits), and must not coincide with a or b.
  451. */
  452. static void sp_256to512_mul_8(sp_digit* r, const sp_digit* a, const sp_digit* b)
  453. {
  454. #if ALLOW_ASM && defined(__GNUC__) && defined(__i386__)
  455. int k;
  456. uint32_t accl;
  457. uint32_t acch;
  458. acch = accl = 0;
  459. for (k = 0; k < 15; k++) {
  460. int i, j;
  461. uint32_t acc_hi;
  462. i = k - 7;
  463. if (i < 0)
  464. i = 0;
  465. j = k - i;
  466. acc_hi = 0;
  467. do {
  468. ////////////////////////
  469. // uint64_t m = ((uint64_t)a[i]) * b[j];
  470. // acc_hi:acch:accl += m;
  471. long eax_clobbered;
  472. asm volatile (
  473. // a[i] is already loaded in %%eax
  474. "\n mull %8"
  475. "\n addl %%eax, %0"
  476. "\n adcl %%edx, %1"
  477. "\n adcl $0x0, %2"
  478. : "=rm" (accl), "=rm" (acch), "=rm" (acc_hi), "=a" (eax_clobbered)
  479. : "0" (accl), "1" (acch), "2" (acc_hi), "3" (a[i]), "m" (b[j])
  480. : "cc", "dx"
  481. // What is "eax_clobbered"? gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html:
  482. // "Do not modify the contents of input-only operands (except for inputs tied
  483. // to outputs). The compiler assumes that on exit from the asm statement these
  484. // operands contain the same values as they had before executing the statement.
  485. // It is not possible to use clobbers to inform the compiler that the values
  486. // in these inputs are changing. One common work-around is to tie the changing
  487. // input variable to an output variable that never gets used."
  488. );
  489. ////////////////////////
  490. j--;
  491. i++;
  492. } while (i != 8 && i <= k);
  493. r[k] = accl;
  494. accl = acch;
  495. acch = acc_hi;
  496. }
  497. r[15] = accl;
  498. #elif ALLOW_ASM && defined(__GNUC__) && defined(__x86_64__)
  499. const uint64_t* aa = (const void*)a;
  500. const uint64_t* bb = (const void*)b;
  501. uint64_t* rr = (void*)r;
  502. int k;
  503. register uint64_t accl asm("r8");
  504. register uint64_t acch asm("r9");
  505. /* ^^^ ask gcc to not use rax/rdx/input arg regs for accumulator variables */
  506. /* (or else it may generate lots of silly mov's and even xchg's!) */
  507. acch = accl = 0;
  508. for (k = 0; k < 7; k++) {
  509. unsigned i, j;
  510. /* ^^^^^ not signed "int",
  511. * or gcc can use a temp register to sign-extend i,j for aa[i],bb[j] */
  512. register uint64_t acc_hi asm("r10");
  513. /* ^^^ ask gcc to not use rax/rdx/input arg regs for accumulators */
  514. i = k - 3;
  515. if ((int)i < 0)
  516. i = 0;
  517. j = k - i;
  518. acc_hi = 0;
  519. do {
  520. ////////////////////////
  521. // uint128_t m = ((uint128_t)a[i]) * b[j];
  522. // acc_hi:acch:accl += m;
  523. long rax_clobbered;
  524. asm volatile (
  525. // aa[i] is already loaded in %%rax
  526. "\n mulq %8"
  527. "\n addq %%rax, %0"
  528. "\n adcq %%rdx, %1"
  529. "\n adcq $0x0, %2"
  530. : "=rm" (accl), "=rm" (acch), "=rm" (acc_hi), "=a" (rax_clobbered)
  531. : "0" (accl), "1" (acch), "2" (acc_hi), "3" (aa[i]), "m" (bb[j])
  532. : "cc", "dx"
  533. );
  534. ////////////////////////
  535. j--;
  536. i++;
  537. } while (i != 4 && i <= k);
  538. rr[k] = accl;
  539. accl = acch;
  540. acch = acc_hi;
  541. }
  542. rr[7] = accl;
  543. #elif 0
  544. //TODO: arm assembly (untested)
  545. asm volatile (
  546. "\n mov r5, #0"
  547. "\n mov r6, #0"
  548. "\n mov r7, #0"
  549. "\n mov r8, #0"
  550. "\n 1:"
  551. "\n subs r3, r5, #28"
  552. "\n movcc r3, #0"
  553. "\n sub r4, r5, r3"
  554. "\n 2:"
  555. "\n ldr r14, [%[a], r3]"
  556. "\n ldr r12, [%[b], r4]"
  557. "\n umull r9, r10, r14, r12"
  558. "\n adds r6, r6, r9"
  559. "\n adcs r7, r7, r10"
  560. "\n adc r8, r8, #0"
  561. "\n add r3, r3, #4"
  562. "\n sub r4, r4, #4"
  563. "\n cmp r3, #32"
  564. "\n beq 3f"
  565. "\n cmp r3, r5"
  566. "\n ble 2b"
  567. "\n 3:"
  568. "\n str r6, [%[r], r5]"
  569. "\n mov r6, r7"
  570. "\n mov r7, r8"
  571. "\n mov r8, #0"
  572. "\n add r5, r5, #4"
  573. "\n cmp r5, #56"
  574. "\n ble 1b"
  575. "\n str r6, [%[r], r5]"
  576. : [r] "r" (r), [a] "r" (a), [b] "r" (b)
  577. : "memory", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14"
  578. );
  579. #else
  580. int i, j, k;
  581. uint64_t acc;
  582. acc = 0;
  583. for (k = 0; k < 15; k++) {
  584. uint32_t acc_hi;
  585. i = k - 7;
  586. if (i < 0)
  587. i = 0;
  588. j = k - i;
  589. acc_hi = 0;
  590. do {
  591. uint64_t m = ((uint64_t)a[i]) * b[j];
  592. acc += m;
  593. if (acc < m)
  594. acc_hi++;
  595. j--;
  596. i++;
  597. } while (i != 8 && i <= k);
  598. r[k] = acc;
  599. acc = (acc >> 32) | ((uint64_t)acc_hi << 32);
  600. }
  601. r[15] = acc;
  602. #endif
  603. }
  604. /* Shift number right one bit. Bottom bit is lost. */
  605. #if UNALIGNED_LE_64BIT
  606. static void sp_256_rshift1_8(sp_digit* rr, uint64_t carry)
  607. {
  608. uint64_t *r = (void*)rr;
  609. int i;
  610. carry = (((uint64_t)!!carry) << 63);
  611. for (i = 3; i >= 0; i--) {
  612. uint64_t c = r[i] << 63;
  613. r[i] = (r[i] >> 1) | carry;
  614. carry = c;
  615. }
  616. }
  617. #else
  618. static void sp_256_rshift1_8(sp_digit* r, sp_digit carry)
  619. {
  620. int i;
  621. carry = (((sp_digit)!!carry) << 31);
  622. for (i = 7; i >= 0; i--) {
  623. sp_digit c = r[i] << 31;
  624. r[i] = (r[i] >> 1) | carry;
  625. carry = c;
  626. }
  627. }
  628. #endif
  629. /* Divide the number by 2 mod the modulus (prime). (r = (r / 2) % m) */
  630. static void sp_256_div2_8(sp_digit* r /*, const sp_digit* m*/)
  631. {
  632. const sp_digit* m = p256_mod;
  633. int carry = 0;
  634. if (r[0] & 1)
  635. carry = sp_256_add_8(r, r, m);
  636. sp_256_rshift1_8(r, carry);
  637. }
  638. /* Add two Montgomery form numbers (r = a + b % m) */
  639. static void sp_256_mont_add_8(sp_digit* r, const sp_digit* a, const sp_digit* b
  640. /*, const sp_digit* m*/)
  641. {
  642. // const sp_digit* m = p256_mod;
  643. int carry = sp_256_add_8(r, a, b);
  644. if (carry) {
  645. sp_256_sub_8_p256_mod(r);
  646. }
  647. }
  648. /* Subtract two Montgomery form numbers (r = a - b % m) */
  649. static void sp_256_mont_sub_8(sp_digit* r, const sp_digit* a, const sp_digit* b
  650. /*, const sp_digit* m*/)
  651. {
  652. const sp_digit* m = p256_mod;
  653. int borrow;
  654. borrow = sp_256_sub_8(r, a, b);
  655. if (borrow) {
  656. sp_256_add_8(r, r, m);
  657. }
  658. }
  659. /* Double a Montgomery form number (r = a + a % m) */
  660. static void sp_256_mont_dbl_8(sp_digit* r, const sp_digit* a /*, const sp_digit* m*/)
  661. {
  662. // const sp_digit* m = p256_mod;
  663. int carry = sp_256_add_8(r, a, a);
  664. if (carry)
  665. sp_256_sub_8_p256_mod(r);
  666. }
  667. /* Triple a Montgomery form number (r = a + a + a % m) */
  668. static void sp_256_mont_tpl_8(sp_digit* r, const sp_digit* a /*, const sp_digit* m*/)
  669. {
  670. // const sp_digit* m = p256_mod;
  671. int carry = sp_256_add_8(r, a, a);
  672. if (carry) {
  673. sp_256_sub_8_p256_mod(r);
  674. }
  675. carry = sp_256_add_8(r, r, a);
  676. if (carry) {
  677. sp_256_sub_8_p256_mod(r);
  678. }
  679. }
  680. /* Shift the result in the high 256 bits down to the bottom. */
  681. static void sp_512to256_mont_shift_8(sp_digit* r, sp_digit* a)
  682. {
  683. memcpy(r, a + 8, sizeof(*r) * 8);
  684. }
  685. #if UNALIGNED_LE_64BIT
  686. /* 64-bit little-endian optimized version.
  687. * See generic 32-bit version below for explanation.
  688. * The benefit of this version is: even though r[3] calculation is atrocious,
  689. * we call sp_256_mul_add_4() four times, not 8.
  690. * Measured run time improvement of curve_P256_compute_pubkey_and_premaster()
  691. * call on x86-64: from ~1500us to ~900us. Code size +32 bytes.
  692. */
  693. static int sp_256_mul_add_4(uint64_t *r /*, const uint64_t* a, uint64_t b*/)
  694. {
  695. uint64_t b = r[0];
  696. # if 0
  697. const uint64_t* a = (const void*)p256_mod;
  698. //a[3..0] = ffffffff00000001 0000000000000000 00000000ffffffff ffffffffffffffff
  699. uint128_t t;
  700. int i;
  701. t = 0;
  702. for (i = 0; i < 4; i++) {
  703. uint32_t t_hi;
  704. uint128_t m = ((uint128_t)b * a[i]) + r[i];
  705. t += m;
  706. t_hi = (t < m);
  707. r[i] = (uint64_t)t;
  708. t = (t >> 64) | ((uint128_t)t_hi << 64);
  709. }
  710. r[4] += (uint64_t)t;
  711. return (r[4] < (uint64_t)t); /* 1 if addition overflowed */
  712. # else
  713. // Unroll, then optimize the above loop:
  714. //uint32_t t_hi;
  715. //uint128_t m;
  716. uint64_t t64, t64u;
  717. //m = ((uint128_t)b * a[0]) + r[0];
  718. // Since b is r[0] and a[0] is ffffffffffffffff, the above optimizes to:
  719. // m = r[0] * ffffffffffffffff + r[0] = (r[0] << 64 - r[0]) + r[0] = r[0] << 64;
  720. //t += m;
  721. // t = r[0] << 64 = b << 64;
  722. //t_hi = (t < m);
  723. // t_hi = 0;
  724. //r[0] = (uint64_t)t;
  725. // r[0] = 0;
  726. //the store can be eliminated since caller won't look at lower 256 bits of the result
  727. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  728. // t = b;
  729. //m = ((uint128_t)b * a[1]) + r[1];
  730. // Since a[1] is 00000000ffffffff, the above optimizes to:
  731. // m = b * ffffffff + r[1] = (b * 100000000 - b) + r[1] = (b << 32) - b + r[1];
  732. //t += m;
  733. // t = b + (b << 32) - b + r[1] = (b << 32) + r[1];
  734. //t_hi = (t < m);
  735. // t_hi = 0;
  736. //r[1] = (uint64_t)t;
  737. r[1] += (b << 32);
  738. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  739. t64 = (r[1] < (b << 32));
  740. t64 += (b >> 32);
  741. //m = ((uint128_t)b * a[2]) + r[2];
  742. // Since a[2] is 0000000000000000, the above optimizes to:
  743. // m = b * 0 + r[2] = r[2];
  744. //t += m;
  745. // t = t64 + r[2];
  746. //t_hi = (t < m);
  747. // t_hi = 0;
  748. //r[2] = (uint64_t)t;
  749. r[2] += t64;
  750. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  751. t64 = (r[2] < t64);
  752. //m = ((uint128_t)b * a[3]) + r[3];
  753. // Since a[3] is ffffffff00000001, the above optimizes to:
  754. // m = b * ffffffff00000001 + r[3];
  755. // m = b + b*ffffffff00000000 + r[3]
  756. // m = b + (b*ffffffff << 32) + r[3]
  757. // m = b + (((b<<32) - b) << 32) + r[3]
  758. //t += m;
  759. // t = t64 + (uint128_t)b + ((((uint128_t)b << 32) - b) << 32) + r[3];
  760. t64 += b;
  761. t64u = (t64 < b);
  762. t64 += r[3];
  763. t64u += (t64 < r[3]);
  764. { // add ((((uint128_t)b << 32) - b) << 32):
  765. uint64_t lo, hi;
  766. //lo = (((b << 32) - b) << 32
  767. //hi = (((uint128_t)b << 32) - b) >> 32
  768. //but without uint128_t:
  769. hi = (b << 32) - b; /* make lower 32 bits of "hi", part 1 */
  770. b = (b >> 32) - (/*borrowed above?*/(b << 32) < b); /* upper 32 bits of "hi" are in b */
  771. lo = hi << 32; /* (use "hi" value to calculate "lo",... */
  772. t64 += lo; /* ...consume... */
  773. t64u += (t64 < lo); /* ..."lo") */
  774. hi >>= 32; /* make lower 32 bits of "hi", part 2 */
  775. hi |= (b << 32); /* combine lower and upper 32 bits */
  776. t64u += hi; /* consume "hi" */
  777. }
  778. //t_hi = (t < m);
  779. // t_hi = 0;
  780. //r[3] = (uint64_t)t;
  781. r[3] = t64;
  782. //t = (t >> 64) | ((uint128_t)t_hi << 64);
  783. // t = t64u;
  784. r[4] += t64u;
  785. return (r[4] < t64u); /* 1 if addition overflowed */
  786. # endif
  787. }
  788. static void sp_512to256_mont_reduce_8(sp_digit* r, sp_digit* aa/*, const sp_digit* m, sp_digit mp*/)
  789. {
  790. // const sp_digit* m = p256_mod;
  791. int i;
  792. uint64_t *a = (void*)aa;
  793. sp_digit carry = 0;
  794. for (i = 0; i < 4; i++) {
  795. // mu = a[i];
  796. if (sp_256_mul_add_4(a+i /*, m, mu*/)) {
  797. int j = i + 4;
  798. inc_next_word:
  799. if (++j > 7) { /* a[8] array has no more words? */
  800. carry++;
  801. continue;
  802. }
  803. if (++a[j] == 0) /* did this overflow too? */
  804. goto inc_next_word;
  805. }
  806. }
  807. sp_512to256_mont_shift_8(r, aa);
  808. if (carry != 0)
  809. sp_256_sub_8_p256_mod(r);
  810. }
  811. #else /* Generic 32-bit version */
  812. /* Mul a by scalar b and add into r. (r += a * b)
  813. * a = p256_mod
  814. * b = r[0]
  815. */
  816. static int sp_256_mul_add_8(sp_digit* r /*, const sp_digit* a, sp_digit b*/)
  817. {
  818. sp_digit b = r[0];
  819. uint64_t t;
  820. # if 0
  821. const sp_digit* a = p256_mod;
  822. //a[7..0] = ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff
  823. int i;
  824. t = 0;
  825. for (i = 0; i < 8; i++) {
  826. uint32_t t_hi;
  827. uint64_t m = ((uint64_t)b * a[i]) + r[i];
  828. t += m;
  829. t_hi = (t < m);
  830. r[i] = (sp_digit)t;
  831. t = (t >> 32) | ((uint64_t)t_hi << 32);
  832. }
  833. r[8] += (sp_digit)t;
  834. return (r[8] < (sp_digit)t); /* 1 if addition overflowed */
  835. # else
  836. // Unroll, then optimize the above loop:
  837. //uint32_t t_hi;
  838. uint64_t m;
  839. uint32_t t32;
  840. //m = ((uint64_t)b * a[0]) + r[0];
  841. // Since b is r[0] and a[0] is ffffffff, the above optimizes to:
  842. // m = r[0] * ffffffff + r[0] = (r[0] * 100000000 - r[0]) + r[0] = r[0] << 32;
  843. //t += m;
  844. // t = r[0] << 32 = b << 32;
  845. //t_hi = (t < m);
  846. // t_hi = 0;
  847. //r[0] = (sp_digit)t;
  848. // r[0] = 0;
  849. //the store can be eliminated since caller won't look at lower 256 bits of the result
  850. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  851. // t = b;
  852. //m = ((uint64_t)b * a[1]) + r[1];
  853. // Since a[1] is ffffffff, the above optimizes to:
  854. // m = b * ffffffff + r[1] = (b * 100000000 - b) + r[1] = (b << 32) - b + r[1];
  855. //t += m;
  856. // t = b + (b << 32) - b + r[1] = (b << 32) + r[1];
  857. //t_hi = (t < m);
  858. // t_hi = 0;
  859. //r[1] = (sp_digit)t;
  860. // r[1] = r[1];
  861. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  862. // t = b;
  863. //m = ((uint64_t)b * a[2]) + r[2];
  864. // Since a[2] is ffffffff, the above optimizes to:
  865. // m = b * ffffffff + r[2] = (b * 100000000 - b) + r[2] = (b << 32) - b + r[2];
  866. //t += m;
  867. // t = b + (b << 32) - b + r[2] = (b << 32) + r[2]
  868. //t_hi = (t < m);
  869. // t_hi = 0;
  870. //r[2] = (sp_digit)t;
  871. // r[2] = r[2];
  872. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  873. // t = b;
  874. //m = ((uint64_t)b * a[3]) + r[3];
  875. // Since a[3] is 00000000, the above optimizes to:
  876. // m = b * 0 + r[3] = r[3];
  877. //t += m;
  878. // t = b + r[3];
  879. //t_hi = (t < m);
  880. // t_hi = 0;
  881. //r[3] = (sp_digit)t;
  882. r[3] = r[3] + b;
  883. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  884. t32 = (r[3] < b); // 0 or 1
  885. //m = ((uint64_t)b * a[4]) + r[4];
  886. // Since a[4] is 00000000, the above optimizes to:
  887. // m = b * 0 + r[4] = r[4];
  888. //t += m;
  889. // t = t32 + r[4];
  890. //t_hi = (t < m);
  891. // t_hi = 0;
  892. //r[4] = (sp_digit)t;
  893. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  894. if (t32 != 0) {
  895. r[4]++;
  896. t32 = (r[4] == 0); // 0 or 1
  897. //m = ((uint64_t)b * a[5]) + r[5];
  898. // Since a[5] is 00000000, the above optimizes to:
  899. // m = b * 0 + r[5] = r[5];
  900. //t += m;
  901. // t = t32 + r[5]; (t32 is 0 or 1)
  902. //t_hi = (t < m);
  903. // t_hi = 0;
  904. //r[5] = (sp_digit)t;
  905. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  906. if (t32 != 0) {
  907. r[5]++;
  908. t32 = (r[5] == 0); // 0 or 1
  909. }
  910. }
  911. //m = ((uint64_t)b * a[6]) + r[6];
  912. // Since a[6] is 00000001, the above optimizes to:
  913. // m = (uint64_t)b + r[6]; // 33 bits at most
  914. //t += m;
  915. t = t32 + (uint64_t)b + r[6];
  916. //t_hi = (t < m);
  917. // t_hi = 0;
  918. r[6] = (sp_digit)t;
  919. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  920. t = (t >> 32);
  921. //m = ((uint64_t)b * a[7]) + r[7];
  922. // Since a[7] is ffffffff, the above optimizes to:
  923. // m = b * ffffffff + r[7] = (b * 100000000 - b) + r[7]
  924. m = ((uint64_t)b << 32) - b + r[7];
  925. t += m;
  926. //t_hi = (t < m);
  927. // t_hi in fact is always 0 here (256bit * 32bit can't have more than 32 bits of overflow)
  928. r[7] = (sp_digit)t;
  929. //t = (t >> 32) | ((uint64_t)t_hi << 32);
  930. t = (t >> 32);
  931. r[8] += (sp_digit)t;
  932. return (r[8] < (sp_digit)t); /* 1 if addition overflowed */
  933. # endif
  934. }
  935. /* Reduce the number back to 256 bits using Montgomery reduction.
  936. * Note: the result is NOT guaranteed to be less than p256_mod!
  937. * (it is only guaranteed to fit into 256 bits).
  938. *
  939. * r Result.
  940. * a Double-wide number to reduce. Clobbered.
  941. * m The single precision number representing the modulus.
  942. * mp The digit representing the negative inverse of m mod 2^n.
  943. *
  944. * Montgomery reduction on multiprecision integers:
  945. * Montgomery reduction requires products modulo R.
  946. * When R is a power of B [in our case R=2^128, B=2^32], there is a variant
  947. * of Montgomery reduction which requires products only of machine word sized
  948. * integers. T is stored as an little-endian word array a[0..n]. The algorithm
  949. * reduces it one word at a time. First an appropriate multiple of modulus
  950. * is added to make T divisible by B. [In our case, it is p256_mp_mod * a[0].]
  951. * Then a multiple of modulus is added to make T divisible by B^2.
  952. * [In our case, it is (p256_mp_mod * a[1]) << 32.]
  953. * And so on. Eventually T is divisible by R, and after division by R
  954. * the algorithm is in the same place as the usual Montgomery reduction.
  955. */
  956. static void sp_512to256_mont_reduce_8(sp_digit* r, sp_digit* a/*, const sp_digit* m, sp_digit mp*/)
  957. {
  958. // const sp_digit* m = p256_mod;
  959. sp_digit mp = p256_mp_mod;
  960. int i;
  961. // sp_digit mu;
  962. if (mp != 1) {
  963. sp_digit word16th = 0;
  964. for (i = 0; i < 8; i++) {
  965. // mu = (sp_digit)(a[i] * mp);
  966. if (sp_256_mul_add_8(a+i /*, m, mu*/)) {
  967. int j = i + 8;
  968. inc_next_word0:
  969. if (++j > 15) { /* a[16] array has no more words? */
  970. word16th++;
  971. continue;
  972. }
  973. if (++a[j] == 0) /* did this overflow too? */
  974. goto inc_next_word0;
  975. }
  976. }
  977. sp_512to256_mont_shift_8(r, a);
  978. if (word16th != 0)
  979. sp_256_sub_8_p256_mod(r);
  980. }
  981. else { /* Same code for explicit mp == 1 (which is always the case for P256) */
  982. sp_digit word16th = 0;
  983. for (i = 0; i < 8; i++) {
  984. // mu = a[i];
  985. if (sp_256_mul_add_8(a+i /*, m, mu*/)) {
  986. int j = i + 8;
  987. inc_next_word:
  988. if (++j > 15) { /* a[16] array has no more words? */
  989. word16th++;
  990. continue;
  991. }
  992. if (++a[j] == 0) /* did this overflow too? */
  993. goto inc_next_word;
  994. }
  995. }
  996. sp_512to256_mont_shift_8(r, a);
  997. if (word16th != 0)
  998. sp_256_sub_8_p256_mod(r);
  999. }
  1000. }
  1001. #endif
  1002. /* Multiply two Montogmery form numbers mod the modulus (prime).
  1003. * (r = a * b mod m)
  1004. *
  1005. * r Result of multiplication.
  1006. * a First number to multiply in Montogmery form.
  1007. * b Second number to multiply in Montogmery form.
  1008. * m Modulus (prime).
  1009. * mp Montogmery multiplier.
  1010. */
  1011. static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const sp_digit* b
  1012. /*, const sp_digit* m, sp_digit mp*/)
  1013. {
  1014. //const sp_digit* m = p256_mod;
  1015. //sp_digit mp = p256_mp_mod;
  1016. sp_digit t[2 * 8];
  1017. sp_256to512_mul_8(t, a, b);
  1018. sp_512to256_mont_reduce_8(r, t /*, m, mp*/);
  1019. }
  1020. /* Square the Montgomery form number. (r = a * a mod m)
  1021. *
  1022. * r Result of squaring.
  1023. * a Number to square in Montogmery form.
  1024. * m Modulus (prime).
  1025. * mp Montogmery multiplier.
  1026. */
  1027. static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a
  1028. /*, const sp_digit* m, sp_digit mp*/)
  1029. {
  1030. //const sp_digit* m = p256_mod;
  1031. //sp_digit mp = p256_mp_mod;
  1032. sp_256_mont_mul_8(r, a, a /*, m, mp*/);
  1033. }
  1034. static NOINLINE void sp_256_mont_mul_and_reduce_8(sp_digit* r,
  1035. const sp_digit* a, const sp_digit* b
  1036. /*, const sp_digit* m, sp_digit mp*/)
  1037. {
  1038. sp_digit rr[2 * 8];
  1039. sp_256_mont_mul_8(rr, a, b /*, p256_mod, p256_mp_mod*/);
  1040. memset(rr + 8, 0, sizeof(rr) / 2);
  1041. sp_512to256_mont_reduce_8(r, rr /*, p256_mod, p256_mp_mod*/);
  1042. }
  1043. /* Invert the number, in Montgomery form, modulo the modulus (prime) of the
  1044. * P256 curve. (r = 1 / a mod m)
  1045. *
  1046. * r Inverse result. Must not coincide with a.
  1047. * a Number to invert.
  1048. */
  1049. static void sp_256_mont_inv_8(sp_digit* r, sp_digit* a)
  1050. {
  1051. int i;
  1052. memcpy(r, a, sizeof(sp_digit) * 8);
  1053. for (i = 254; i >= 0; i--) {
  1054. sp_256_mont_sqr_8(r, r /*, p256_mod, p256_mp_mod*/);
  1055. /* p256_mod - 2:
  1056. * ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff ffffffff - 2
  1057. * Bit pattern:
  1058. * 2 2 2 2 2 2 2 1...1
  1059. * 5 5 4 3 2 1 0 9...0 9...1
  1060. * 543210987654321098765432109876543210987654321098765432109876543210...09876543210...09876543210
  1061. * 111111111111111111111111111111110000000000000000000000000000000100...00000111111...11111111101
  1062. */
  1063. /*if (p256_mod_minus_2[i / 32] & ((sp_digit)1 << (i % 32)))*/
  1064. if (i >= 224 || i == 192 || (i <= 95 && i != 1))
  1065. sp_256_mont_mul_8(r, r, a /*, p256_mod, p256_mp_mod*/);
  1066. }
  1067. }
  1068. /* Multiply a number by Montogmery normalizer mod modulus (prime).
  1069. *
  1070. * r The resulting Montgomery form number.
  1071. * a The number to convert.
  1072. */
  1073. static void sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a)
  1074. {
  1075. int64_t t[8];
  1076. int32_t o;
  1077. #define A(n) ((uint64_t)a[n])
  1078. /* 1 1 0 -1 -1 -1 -1 0 */
  1079. t[0] = 0 + A(0) + A(1) - A(3) - A(4) - A(5) - A(6);
  1080. /* 0 1 1 0 -1 -1 -1 -1 */
  1081. t[1] = 0 + A(1) + A(2) - A(4) - A(5) - A(6) - A(7);
  1082. /* 0 0 1 1 0 -1 -1 -1 */
  1083. t[2] = 0 + A(2) + A(3) - A(5) - A(6) - A(7);
  1084. /* -1 -1 0 2 2 1 0 -1 */
  1085. t[3] = 0 - A(0) - A(1) + 2 * A(3) + 2 * A(4) + A(5) - A(7);
  1086. /* 0 -1 -1 0 2 2 1 0 */
  1087. t[4] = 0 - A(1) - A(2) + 2 * A(4) + 2 * A(5) + A(6);
  1088. /* 0 0 -1 -1 0 2 2 1 */
  1089. t[5] = 0 - A(2) - A(3) + 2 * A(5) + 2 * A(6) + A(7);
  1090. /* -1 -1 0 0 0 1 3 2 */
  1091. t[6] = 0 - A(0) - A(1) + A(5) + 3 * A(6) + 2 * A(7);
  1092. /* 1 0 -1 -1 -1 -1 0 3 */
  1093. t[7] = 0 + A(0) - A(2) - A(3) - A(4) - A(5) + 3 * A(7);
  1094. #undef A
  1095. t[1] += t[0] >> 32; t[0] &= 0xffffffff;
  1096. t[2] += t[1] >> 32; t[1] &= 0xffffffff;
  1097. t[3] += t[2] >> 32; t[2] &= 0xffffffff;
  1098. t[4] += t[3] >> 32; t[3] &= 0xffffffff;
  1099. t[5] += t[4] >> 32; t[4] &= 0xffffffff;
  1100. t[6] += t[5] >> 32; t[5] &= 0xffffffff;
  1101. t[7] += t[6] >> 32; t[6] &= 0xffffffff;
  1102. o = t[7] >> 32; //t[7] &= 0xffffffff;
  1103. t[0] += o;
  1104. t[3] -= o;
  1105. t[6] -= o;
  1106. t[7] += o;
  1107. r[0] = (sp_digit)t[0];
  1108. t[1] += t[0] >> 32;
  1109. r[1] = (sp_digit)t[1];
  1110. t[2] += t[1] >> 32;
  1111. r[2] = (sp_digit)t[2];
  1112. t[3] += t[2] >> 32;
  1113. r[3] = (sp_digit)t[3];
  1114. t[4] += t[3] >> 32;
  1115. r[4] = (sp_digit)t[4];
  1116. t[5] += t[4] >> 32;
  1117. r[5] = (sp_digit)t[5];
  1118. t[6] += t[5] >> 32;
  1119. r[6] = (sp_digit)t[6];
  1120. // t[7] += t[6] >> 32;
  1121. // r[7] = (sp_digit)t[7];
  1122. r[7] = (sp_digit)t[7] + (sp_digit)(t[6] >> 32);
  1123. }
  1124. /* Map the Montgomery form projective co-ordinate point to an affine point.
  1125. *
  1126. * r Resulting affine co-ordinate point.
  1127. * p Montgomery form projective co-ordinate point.
  1128. */
  1129. static void sp_256_map_8(sp_point* r, sp_point* p)
  1130. {
  1131. sp_digit t1[8];
  1132. sp_digit t2[8];
  1133. sp_256_mont_inv_8(t1, p->z);
  1134. sp_256_mont_sqr_8(t2, t1 /*, p256_mod, p256_mp_mod*/);
  1135. sp_256_mont_mul_8(t1, t2, t1 /*, p256_mod, p256_mp_mod*/);
  1136. /* x /= z^2 */
  1137. sp_256_mont_mul_and_reduce_8(r->x, p->x, t2 /*, p256_mod, p256_mp_mod*/);
  1138. /* Reduce x to less than modulus */
  1139. if (sp_256_cmp_8(r->x, p256_mod) >= 0)
  1140. sp_256_sub_8_p256_mod(r->x);
  1141. /* y /= z^3 */
  1142. sp_256_mont_mul_and_reduce_8(r->y, p->y, t1 /*, p256_mod, p256_mp_mod*/);
  1143. /* Reduce y to less than modulus */
  1144. if (sp_256_cmp_8(r->y, p256_mod) >= 0)
  1145. sp_256_sub_8_p256_mod(r->y);
  1146. memset(r->z, 0, sizeof(r->z));
  1147. r->z[0] = 1;
  1148. }
  1149. /* Double the Montgomery form projective point p.
  1150. *
  1151. * r Result of doubling point.
  1152. * p Point to double.
  1153. */
  1154. static void sp_256_proj_point_dbl_8(sp_point* r, sp_point* p)
  1155. {
  1156. sp_digit t1[8];
  1157. sp_digit t2[8];
  1158. /* Put point to double into result */
  1159. if (r != p)
  1160. *r = *p; /* struct copy */
  1161. if (r->infinity)
  1162. return;
  1163. /* T1 = Z * Z */
  1164. sp_256_mont_sqr_8(t1, r->z /*, p256_mod, p256_mp_mod*/);
  1165. /* Z = Y * Z */
  1166. sp_256_mont_mul_8(r->z, r->y, r->z /*, p256_mod, p256_mp_mod*/);
  1167. /* Z = 2Z */
  1168. sp_256_mont_dbl_8(r->z, r->z /*, p256_mod*/);
  1169. /* T2 = X - T1 */
  1170. sp_256_mont_sub_8(t2, r->x, t1 /*, p256_mod*/);
  1171. /* T1 = X + T1 */
  1172. sp_256_mont_add_8(t1, r->x, t1 /*, p256_mod*/);
  1173. /* T2 = T1 * T2 */
  1174. sp_256_mont_mul_8(t2, t1, t2 /*, p256_mod, p256_mp_mod*/);
  1175. /* T1 = 3T2 */
  1176. sp_256_mont_tpl_8(t1, t2 /*, p256_mod*/);
  1177. /* Y = 2Y */
  1178. sp_256_mont_dbl_8(r->y, r->y /*, p256_mod*/);
  1179. /* Y = Y * Y */
  1180. sp_256_mont_sqr_8(r->y, r->y /*, p256_mod, p256_mp_mod*/);
  1181. /* T2 = Y * Y */
  1182. sp_256_mont_sqr_8(t2, r->y /*, p256_mod, p256_mp_mod*/);
  1183. /* T2 = T2/2 */
  1184. sp_256_div2_8(t2 /*, p256_mod*/);
  1185. /* Y = Y * X */
  1186. sp_256_mont_mul_8(r->y, r->y, r->x /*, p256_mod, p256_mp_mod*/);
  1187. /* X = T1 * T1 */
  1188. sp_256_mont_mul_8(r->x, t1, t1 /*, p256_mod, p256_mp_mod*/);
  1189. /* X = X - Y */
  1190. sp_256_mont_sub_8(r->x, r->x, r->y /*, p256_mod*/);
  1191. /* X = X - Y */
  1192. sp_256_mont_sub_8(r->x, r->x, r->y /*, p256_mod*/);
  1193. /* Y = Y - X */
  1194. sp_256_mont_sub_8(r->y, r->y, r->x /*, p256_mod*/);
  1195. /* Y = Y * T1 */
  1196. sp_256_mont_mul_8(r->y, r->y, t1 /*, p256_mod, p256_mp_mod*/);
  1197. /* Y = Y - T2 */
  1198. sp_256_mont_sub_8(r->y, r->y, t2 /*, p256_mod*/);
  1199. dump_512("y2 %s\n", r->y);
  1200. }
  1201. /* Add two Montgomery form projective points.
  1202. *
  1203. * r Result of addition.
  1204. * p Frist point to add.
  1205. * q Second point to add.
  1206. */
  1207. static NOINLINE void sp_256_proj_point_add_8(sp_point* r, sp_point* p, sp_point* q)
  1208. {
  1209. sp_digit t1[8];
  1210. sp_digit t2[8];
  1211. sp_digit t3[8];
  1212. sp_digit t4[8];
  1213. sp_digit t5[8];
  1214. /* Ensure only the first point is the same as the result. */
  1215. if (q == r) {
  1216. sp_point* a = p;
  1217. p = q;
  1218. q = a;
  1219. }
  1220. /* Check double */
  1221. sp_256_sub_8(t1, p256_mod, q->y);
  1222. if (sp_256_cmp_equal_8(p->x, q->x)
  1223. && sp_256_cmp_equal_8(p->z, q->z)
  1224. && (sp_256_cmp_equal_8(p->y, q->y) || sp_256_cmp_equal_8(p->y, t1))
  1225. ) {
  1226. sp_256_proj_point_dbl_8(r, p);
  1227. return;
  1228. }
  1229. if (p->infinity || q->infinity) {
  1230. *r = p->infinity ? *q : *p; /* struct copy */
  1231. return;
  1232. }
  1233. /* U1 = X1*Z2^2 */
  1234. sp_256_mont_sqr_8(t1, q->z /*, p256_mod, p256_mp_mod*/);
  1235. sp_256_mont_mul_8(t3, t1, q->z /*, p256_mod, p256_mp_mod*/);
  1236. sp_256_mont_mul_8(t1, t1, r->x /*, p256_mod, p256_mp_mod*/);
  1237. /* U2 = X2*Z1^2 */
  1238. sp_256_mont_sqr_8(t2, r->z /*, p256_mod, p256_mp_mod*/);
  1239. sp_256_mont_mul_8(t4, t2, r->z /*, p256_mod, p256_mp_mod*/);
  1240. sp_256_mont_mul_8(t2, t2, q->x /*, p256_mod, p256_mp_mod*/);
  1241. /* S1 = Y1*Z2^3 */
  1242. sp_256_mont_mul_8(t3, t3, r->y /*, p256_mod, p256_mp_mod*/);
  1243. /* S2 = Y2*Z1^3 */
  1244. sp_256_mont_mul_8(t4, t4, q->y /*, p256_mod, p256_mp_mod*/);
  1245. /* H = U2 - U1 */
  1246. sp_256_mont_sub_8(t2, t2, t1 /*, p256_mod*/);
  1247. /* R = S2 - S1 */
  1248. sp_256_mont_sub_8(t4, t4, t3 /*, p256_mod*/);
  1249. /* Z3 = H*Z1*Z2 */
  1250. sp_256_mont_mul_8(r->z, r->z, q->z /*, p256_mod, p256_mp_mod*/);
  1251. sp_256_mont_mul_8(r->z, r->z, t2 /*, p256_mod, p256_mp_mod*/);
  1252. /* X3 = R^2 - H^3 - 2*U1*H^2 */
  1253. sp_256_mont_sqr_8(r->x, t4 /*, p256_mod, p256_mp_mod*/);
  1254. sp_256_mont_sqr_8(t5, t2 /*, p256_mod, p256_mp_mod*/);
  1255. sp_256_mont_mul_8(r->y, t1, t5 /*, p256_mod, p256_mp_mod*/);
  1256. sp_256_mont_mul_8(t5, t5, t2 /*, p256_mod, p256_mp_mod*/);
  1257. sp_256_mont_sub_8(r->x, r->x, t5 /*, p256_mod*/);
  1258. sp_256_mont_dbl_8(t1, r->y /*, p256_mod*/);
  1259. sp_256_mont_sub_8(r->x, r->x, t1 /*, p256_mod*/);
  1260. /* Y3 = R*(U1*H^2 - X3) - S1*H^3 */
  1261. sp_256_mont_sub_8(r->y, r->y, r->x /*, p256_mod*/);
  1262. sp_256_mont_mul_8(r->y, r->y, t4 /*, p256_mod, p256_mp_mod*/);
  1263. sp_256_mont_mul_8(t5, t5, t3 /*, p256_mod, p256_mp_mod*/);
  1264. sp_256_mont_sub_8(r->y, r->y, t5 /*, p256_mod*/);
  1265. }
  1266. /* Multiply the point by the scalar and return the result.
  1267. * If map is true then convert result to affine co-ordinates.
  1268. *
  1269. * r Resulting point.
  1270. * g Point to multiply.
  1271. * k Scalar to multiply by.
  1272. * map Indicates whether to convert result to affine.
  1273. */
  1274. static void sp_256_ecc_mulmod_8(sp_point* r, const sp_point* g, const sp_digit* k /*, int map*/)
  1275. {
  1276. enum { map = 1 }; /* we always convert result to affine coordinates */
  1277. sp_point t[3];
  1278. sp_digit n = n; /* for compiler */
  1279. int c, y;
  1280. memset(t, 0, sizeof(t));
  1281. /* t[0] = {0, 0, 1} * norm */
  1282. t[0].infinity = 1;
  1283. /* t[1] = {g->x, g->y, g->z} * norm */
  1284. sp_256_mod_mul_norm_8(t[1].x, g->x);
  1285. sp_256_mod_mul_norm_8(t[1].y, g->y);
  1286. sp_256_mod_mul_norm_8(t[1].z, g->z);
  1287. /* For every bit, starting from most significant... */
  1288. k += 7;
  1289. c = 256;
  1290. for (;;) {
  1291. if ((c & 0x1f) == 0) {
  1292. if (c == 0)
  1293. break;
  1294. n = *k--;
  1295. }
  1296. y = (n >> 31);
  1297. dbg("y:%d t[%d] = t[0]+t[1]\n", y, y^1);
  1298. sp_256_proj_point_add_8(&t[y^1], &t[0], &t[1]);
  1299. dump_512("t[0].x %s\n", t[0].x);
  1300. dump_512("t[0].y %s\n", t[0].y);
  1301. dump_512("t[0].z %s\n", t[0].z);
  1302. dump_512("t[1].x %s\n", t[1].x);
  1303. dump_512("t[1].y %s\n", t[1].y);
  1304. dump_512("t[1].z %s\n", t[1].z);
  1305. dbg("t[2] = t[%d]\n", y);
  1306. t[2] = t[y]; /* struct copy */
  1307. dbg("t[2] *= 2\n");
  1308. sp_256_proj_point_dbl_8(&t[2], &t[2]);
  1309. dump_512("t[2].x %s\n", t[2].x);
  1310. dump_512("t[2].y %s\n", t[2].y);
  1311. dump_512("t[2].z %s\n", t[2].z);
  1312. t[y] = t[2]; /* struct copy */
  1313. n <<= 1;
  1314. c--;
  1315. }
  1316. if (map)
  1317. sp_256_map_8(r, &t[0]);
  1318. else
  1319. *r = t[0]; /* struct copy */
  1320. memset(t, 0, sizeof(t)); //paranoia
  1321. }
  1322. /* Multiply the base point of P256 by the scalar and return the result.
  1323. * If map is true then convert result to affine co-ordinates.
  1324. *
  1325. * r Resulting point.
  1326. * k Scalar to multiply by.
  1327. * map Indicates whether to convert result to affine.
  1328. */
  1329. static void sp_256_ecc_mulmod_base_8(sp_point* r, sp_digit* k /*, int map*/)
  1330. {
  1331. /* Since this function is called only once, save space:
  1332. * don't have "static const sp_point p256_base = {...}".
  1333. */
  1334. static const uint8_t p256_base_bin[] = {
  1335. /* x (big-endian) */
  1336. 0x6b,0x17,0xd1,0xf2,0xe1,0x2c,0x42,0x47,0xf8,0xbc,0xe6,0xe5,0x63,0xa4,0x40,0xf2,
  1337. 0x77,0x03,0x7d,0x81,0x2d,0xeb,0x33,0xa0,0xf4,0xa1,0x39,0x45,0xd8,0x98,0xc2,0x96,
  1338. /* y */
  1339. 0x4f,0xe3,0x42,0xe2,0xfe,0x1a,0x7f,0x9b,0x8e,0xe7,0xeb,0x4a,0x7c,0x0f,0x9e,0x16,
  1340. 0x2b,0xce,0x33,0x57,0x6b,0x31,0x5e,0xce,0xcb,0xb6,0x40,0x68,0x37,0xbf,0x51,0xf5,
  1341. /* z will be set to 1, infinity flag to "false" */
  1342. };
  1343. sp_point p256_base;
  1344. sp_256_point_from_bin2x32(&p256_base, p256_base_bin);
  1345. sp_256_ecc_mulmod_8(r, &p256_base, k /*, map*/);
  1346. }
  1347. /* Multiply the point by the scalar and serialize the X ordinate.
  1348. * The number is 0 padded to maximum size on output.
  1349. *
  1350. * priv Scalar to multiply the point by.
  1351. * pub2x32 Point to multiply.
  1352. * out32 Buffer to hold X ordinate.
  1353. */
  1354. static void sp_ecc_secret_gen_256(const sp_digit priv[8], const uint8_t *pub2x32, uint8_t* out32)
  1355. {
  1356. sp_point point[1];
  1357. #if FIXED_PEER_PUBKEY
  1358. memset((void*)pub2x32, 0x55, 64);
  1359. #endif
  1360. dump_hex("peerkey %s\n", pub2x32, 32); /* in TLS, this is peer's public key */
  1361. dump_hex(" %s\n", pub2x32 + 32, 32);
  1362. sp_256_point_from_bin2x32(point, pub2x32);
  1363. dump_512("point->x %s\n", point->x);
  1364. dump_512("point->y %s\n", point->y);
  1365. sp_256_ecc_mulmod_8(point, point, priv);
  1366. sp_256_to_bin_8(point->x, out32);
  1367. dump_hex("out32: %s\n", out32, 32);
  1368. }
  1369. /* Generates a random scalar in [1..order-1] range. */
  1370. static void sp_256_ecc_gen_k_8(sp_digit k[8])
  1371. {
  1372. /* Since 32-bit words are "dense", no need to use
  1373. * sp_256_from_bin_8(k, buf) to convert random stream
  1374. * to sp_digit array - just store random bits there directly.
  1375. */
  1376. tls_get_random(k, 8 * sizeof(k[0]));
  1377. #if FIXED_SECRET
  1378. memset(k, 0x77, 8 * sizeof(k[0]));
  1379. #endif
  1380. // If scalar is too large, try again (pseudo-code)
  1381. // if (k >= 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551 - 1) // order of P256
  1382. // goto pick_another_random;
  1383. // k++; // ensure non-zero
  1384. /* Simpler alternative, at the cost of not choosing some valid
  1385. * random values, and slightly non-uniform distribution */
  1386. if (k[0] == 0)
  1387. k[0] = 1;
  1388. if (k[7] >= 0xffffffff)
  1389. k[7] = 0xfffffffe;
  1390. }
  1391. /* Makes a random EC key pair. */
  1392. static void sp_ecc_make_key_256(sp_digit privkey[8], uint8_t *pubkey)
  1393. {
  1394. sp_point point[1];
  1395. sp_256_ecc_gen_k_8(privkey);
  1396. dump_256("privkey %s\n", privkey);
  1397. sp_256_ecc_mulmod_base_8(point, privkey);
  1398. dump_512("point->x %s\n", point->x);
  1399. dump_512("point->y %s\n", point->y);
  1400. sp_256_to_bin_8(point->x, pubkey);
  1401. sp_256_to_bin_8(point->y, pubkey + 32);
  1402. memset(point, 0, sizeof(point)); //paranoia
  1403. }
  1404. void FAST_FUNC curve_P256_compute_pubkey_and_premaster(
  1405. uint8_t *pubkey2x32, uint8_t *premaster32,
  1406. const uint8_t *peerkey2x32)
  1407. {
  1408. sp_digit privkey[8];
  1409. dump_hex("peerkey2x32: %s\n", peerkey2x32, 64);
  1410. sp_ecc_make_key_256(privkey, pubkey2x32);
  1411. dump_hex("pubkey: %s\n", pubkey2x32, 32);
  1412. dump_hex(" %s\n", pubkey2x32 + 32, 32);
  1413. /* Combine our privkey and peer's public key to generate premaster */
  1414. sp_ecc_secret_gen_256(privkey, /*x,y:*/peerkey2x32, premaster32);
  1415. dump_hex("premaster: %s\n", premaster32, 32);
  1416. }