ecp_nistp256.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166
  1. /* crypto/ec/ecp_nistp256.c */
  2. /*
  3. * Written by Adam Langley (Google) for the OpenSSL project
  4. */
  5. /* Copyright 2011 Google Inc.
  6. *
  7. * Licensed under the Apache License, Version 2.0 (the "License");
  8. *
  9. * you may not use this file except in compliance with the License.
  10. * You may obtain a copy of the License at
  11. *
  12. * http://www.apache.org/licenses/LICENSE-2.0
  13. *
  14. * Unless required by applicable law or agreed to in writing, software
  15. * distributed under the License is distributed on an "AS IS" BASIS,
  16. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. * See the License for the specific language governing permissions and
  18. * limitations under the License.
  19. */
  20. /*
  21. * A 64-bit implementation of the NIST P-256 elliptic curve point multiplication
  22. *
  23. * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
  24. * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
  25. * work which got its smarts from Daniel J. Bernstein's work on the same.
  26. */
  27. #include <openssl/opensslconf.h>
  28. #ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
  29. #include <stdint.h>
  30. #include <string.h>
  31. #include <openssl/err.h>
  32. #include "ec_lcl.h"
  33. #if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
  34. /* even with gcc, the typedef won't work for 32-bit platforms */
  35. typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit platforms */
  36. typedef __int128_t int128_t;
  37. #else
  38. #error "Need GCC 3.1 or later to define type uint128_t"
  39. #endif
  40. typedef uint8_t u8;
  41. typedef uint32_t u32;
  42. typedef uint64_t u64;
  43. typedef int64_t s64;
  44. /* The underlying field.
  45. *
  46. * P256 operates over GF(2^256-2^224+2^192+2^96-1). We can serialise an element
  47. * of this field into 32 bytes. We call this an felem_bytearray. */
  48. typedef u8 felem_bytearray[32];
  49. /* These are the parameters of P256, taken from FIPS 186-3, page 86. These
  50. * values are big-endian. */
  51. static const felem_bytearray nistp256_curve_params[5] = {
  52. {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, /* p */
  53. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  54. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
  55. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
  56. {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, /* a = -3 */
  57. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  58. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
  59. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc}, /* b */
  60. {0x5a, 0xc6, 0x35, 0xd8, 0xaa, 0x3a, 0x93, 0xe7,
  61. 0xb3, 0xeb, 0xbd, 0x55, 0x76, 0x98, 0x86, 0xbc,
  62. 0x65, 0x1d, 0x06, 0xb0, 0xcc, 0x53, 0xb0, 0xf6,
  63. 0x3b, 0xce, 0x3c, 0x3e, 0x27, 0xd2, 0x60, 0x4b},
  64. {0x6b, 0x17, 0xd1, 0xf2, 0xe1, 0x2c, 0x42, 0x47, /* x */
  65. 0xf8, 0xbc, 0xe6, 0xe5, 0x63, 0xa4, 0x40, 0xf2,
  66. 0x77, 0x03, 0x7d, 0x81, 0x2d, 0xeb, 0x33, 0xa0,
  67. 0xf4, 0xa1, 0x39, 0x45, 0xd8, 0x98, 0xc2, 0x96},
  68. {0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, /* y */
  69. 0x8e, 0xe7, 0xeb, 0x4a, 0x7c, 0x0f, 0x9e, 0x16,
  70. 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce,
  71. 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5}
  72. };
  73. /* The representation of field elements.
  74. * ------------------------------------
  75. *
  76. * We represent field elements with either four 128-bit values, eight 128-bit
  77. * values, or four 64-bit values. The field element represented is:
  78. * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
  79. * or:
  80. * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
  81. *
  82. * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
  83. * apart, but are 128-bits wide, the most significant bits of each limb overlap
  84. * with the least significant bits of the next.
  85. *
  86. * A field element with four limbs is an 'felem'. One with eight limbs is a
  87. * 'longfelem'
  88. *
  89. * A field element with four, 64-bit values is called a 'smallfelem'. Small
  90. * values are used as intermediate values before multiplication.
  91. */
  92. #define NLIMBS 4
  93. typedef uint128_t limb;
  94. typedef limb felem[NLIMBS];
  95. typedef limb longfelem[NLIMBS * 2];
  96. typedef u64 smallfelem[NLIMBS];
  97. /* This is the value of the prime as four 64-bit words, little-endian. */
  98. static const u64 kPrime[4] = { 0xfffffffffffffffful, 0xffffffff, 0, 0xffffffff00000001ul };
  99. static const limb bottom32bits = 0xffffffff;
  100. static const u64 bottom63bits = 0x7ffffffffffffffful;
  101. /* bin32_to_felem takes a little-endian byte array and converts it into felem
  102. * form. This assumes that the CPU is little-endian. */
  103. static void bin32_to_felem(felem out, const u8 in[32])
  104. {
  105. out[0] = *((u64*) &in[0]);
  106. out[1] = *((u64*) &in[8]);
  107. out[2] = *((u64*) &in[16]);
  108. out[3] = *((u64*) &in[24]);
  109. }
  110. /* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
  111. * 32 byte array. This assumes that the CPU is little-endian. */
  112. static void smallfelem_to_bin32(u8 out[32], const smallfelem in)
  113. {
  114. *((u64*) &out[0]) = in[0];
  115. *((u64*) &out[8]) = in[1];
  116. *((u64*) &out[16]) = in[2];
  117. *((u64*) &out[24]) = in[3];
  118. }
  119. /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
  120. static void flip_endian(u8 *out, const u8 *in, unsigned len)
  121. {
  122. unsigned i;
  123. for (i = 0; i < len; ++i)
  124. out[i] = in[len-1-i];
  125. }
  126. /* BN_to_felem converts an OpenSSL BIGNUM into an felem */
  127. static int BN_to_felem(felem out, const BIGNUM *bn)
  128. {
  129. felem_bytearray b_in;
  130. felem_bytearray b_out;
  131. unsigned num_bytes;
  132. /* BN_bn2bin eats leading zeroes */
  133. memset(b_out, 0, sizeof b_out);
  134. num_bytes = BN_num_bytes(bn);
  135. if (num_bytes > sizeof b_out)
  136. {
  137. ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
  138. return 0;
  139. }
  140. if (BN_is_negative(bn))
  141. {
  142. ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
  143. return 0;
  144. }
  145. num_bytes = BN_bn2bin(bn, b_in);
  146. flip_endian(b_out, b_in, num_bytes);
  147. bin32_to_felem(out, b_out);
  148. return 1;
  149. }
  150. /* felem_to_BN converts an felem into an OpenSSL BIGNUM */
  151. static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in)
  152. {
  153. felem_bytearray b_in, b_out;
  154. smallfelem_to_bin32(b_in, in);
  155. flip_endian(b_out, b_in, sizeof b_out);
  156. return BN_bin2bn(b_out, sizeof b_out, out);
  157. }
  158. /* Field operations
  159. * ---------------- */
  160. static void smallfelem_one(smallfelem out)
  161. {
  162. out[0] = 1;
  163. out[1] = 0;
  164. out[2] = 0;
  165. out[3] = 0;
  166. }
  167. static void smallfelem_assign(smallfelem out, const smallfelem in)
  168. {
  169. out[0] = in[0];
  170. out[1] = in[1];
  171. out[2] = in[2];
  172. out[3] = in[3];
  173. }
  174. static void felem_assign(felem out, const felem in)
  175. {
  176. out[0] = in[0];
  177. out[1] = in[1];
  178. out[2] = in[2];
  179. out[3] = in[3];
  180. }
  181. /* felem_sum sets out = out + in. */
  182. static void felem_sum(felem out, const felem in)
  183. {
  184. out[0] += in[0];
  185. out[1] += in[1];
  186. out[2] += in[2];
  187. out[3] += in[3];
  188. }
  189. /* felem_small_sum sets out = out + in. */
  190. static void felem_small_sum(felem out, const smallfelem in)
  191. {
  192. out[0] += in[0];
  193. out[1] += in[1];
  194. out[2] += in[2];
  195. out[3] += in[3];
  196. }
  197. /* felem_scalar sets out = out * scalar */
  198. static void felem_scalar(felem out, const u64 scalar)
  199. {
  200. out[0] *= scalar;
  201. out[1] *= scalar;
  202. out[2] *= scalar;
  203. out[3] *= scalar;
  204. }
  205. /* longfelem_scalar sets out = out * scalar */
  206. static void longfelem_scalar(longfelem out, const u64 scalar)
  207. {
  208. out[0] *= scalar;
  209. out[1] *= scalar;
  210. out[2] *= scalar;
  211. out[3] *= scalar;
  212. out[4] *= scalar;
  213. out[5] *= scalar;
  214. out[6] *= scalar;
  215. out[7] *= scalar;
  216. }
  217. #define two105m41m9 (((limb)1) << 105) - (((limb)1) << 41) - (((limb)1) << 9)
  218. #define two105 (((limb)1) << 105)
  219. #define two105m41p9 (((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9)
  220. /* zero105 is 0 mod p */
  221. static const felem zero105 = { two105m41m9, two105, two105m41p9, two105m41p9 };
  222. /* smallfelem_neg sets |out| to |-small|
  223. * On exit:
  224. * out[i] < out[i] + 2^105
  225. */
  226. static void smallfelem_neg(felem out, const smallfelem small)
  227. {
  228. /* In order to prevent underflow, we subtract from 0 mod p. */
  229. out[0] = zero105[0] - small[0];
  230. out[1] = zero105[1] - small[1];
  231. out[2] = zero105[2] - small[2];
  232. out[3] = zero105[3] - small[3];
  233. }
  234. /* felem_diff subtracts |in| from |out|
  235. * On entry:
  236. * in[i] < 2^104
  237. * On exit:
  238. * out[i] < out[i] + 2^105
  239. */
  240. static void felem_diff(felem out, const felem in)
  241. {
  242. /* In order to prevent underflow, we add 0 mod p before subtracting. */
  243. out[0] += zero105[0];
  244. out[1] += zero105[1];
  245. out[2] += zero105[2];
  246. out[3] += zero105[3];
  247. out[0] -= in[0];
  248. out[1] -= in[1];
  249. out[2] -= in[2];
  250. out[3] -= in[3];
  251. }
  252. #define two107m43m11 (((limb)1) << 107) - (((limb)1) << 43) - (((limb)1) << 11)
  253. #define two107 (((limb)1) << 107)
  254. #define two107m43p11 (((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11)
  255. /* zero107 is 0 mod p */
  256. static const felem zero107 = { two107m43m11, two107, two107m43p11, two107m43p11 };
  257. /* An alternative felem_diff for larger inputs |in|
  258. * felem_diff_zero107 subtracts |in| from |out|
  259. * On entry:
  260. * in[i] < 2^106
  261. * On exit:
  262. * out[i] < out[i] + 2^107
  263. */
  264. static void felem_diff_zero107(felem out, const felem in)
  265. {
  266. /* In order to prevent underflow, we add 0 mod p before subtracting. */
  267. out[0] += zero107[0];
  268. out[1] += zero107[1];
  269. out[2] += zero107[2];
  270. out[3] += zero107[3];
  271. out[0] -= in[0];
  272. out[1] -= in[1];
  273. out[2] -= in[2];
  274. out[3] -= in[3];
  275. }
  276. /* longfelem_diff subtracts |in| from |out|
  277. * On entry:
  278. * in[i] < 7*2^67
  279. * On exit:
  280. * out[i] < out[i] + 2^70 + 2^40
  281. */
  282. static void longfelem_diff(longfelem out, const longfelem in)
  283. {
  284. static const limb two70m8p6 = (((limb)1) << 70) - (((limb)1) << 8) + (((limb)1) << 6);
  285. static const limb two70p40 = (((limb)1) << 70) + (((limb)1) << 40);
  286. static const limb two70 = (((limb)1) << 70);
  287. static const limb two70m40m38p6 = (((limb)1) << 70) - (((limb)1) << 40) - (((limb)1) << 38) + (((limb)1) << 6);
  288. static const limb two70m6 = (((limb)1) << 70) - (((limb)1) << 6);
  289. /* add 0 mod p to avoid underflow */
  290. out[0] += two70m8p6;
  291. out[1] += two70p40;
  292. out[2] += two70;
  293. out[3] += two70m40m38p6;
  294. out[4] += two70m6;
  295. out[5] += two70m6;
  296. out[6] += two70m6;
  297. out[7] += two70m6;
  298. /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */
  299. out[0] -= in[0];
  300. out[1] -= in[1];
  301. out[2] -= in[2];
  302. out[3] -= in[3];
  303. out[4] -= in[4];
  304. out[5] -= in[5];
  305. out[6] -= in[6];
  306. out[7] -= in[7];
  307. }
  308. #define two64m0 (((limb)1) << 64) - 1
  309. #define two110p32m0 (((limb)1) << 110) + (((limb)1) << 32) - 1
  310. #define two64m46 (((limb)1) << 64) - (((limb)1) << 46)
  311. #define two64m32 (((limb)1) << 64) - (((limb)1) << 32)
  312. /* zero110 is 0 mod p */
  313. static const felem zero110 = { two64m0, two110p32m0, two64m46, two64m32 };
  314. /* felem_shrink converts an felem into a smallfelem. The result isn't quite
  315. * minimal as the value may be greater than p.
  316. *
  317. * On entry:
  318. * in[i] < 2^109
  319. * On exit:
  320. * out[i] < 2^64
  321. */
  322. static void felem_shrink(smallfelem out, const felem in)
  323. {
  324. felem tmp;
  325. u64 a, b, mask;
  326. s64 high, low;
  327. static const u64 kPrime3Test = 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */
  328. /* Carry 2->3 */
  329. tmp[3] = zero110[3] + in[3] + ((u64) (in[2] >> 64));
  330. /* tmp[3] < 2^110 */
  331. tmp[2] = zero110[2] + (u64) in[2];
  332. tmp[0] = zero110[0] + in[0];
  333. tmp[1] = zero110[1] + in[1];
  334. /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */
  335. /* We perform two partial reductions where we eliminate the
  336. * high-word of tmp[3]. We don't update the other words till the end.
  337. */
  338. a = tmp[3] >> 64; /* a < 2^46 */
  339. tmp[3] = (u64) tmp[3];
  340. tmp[3] -= a;
  341. tmp[3] += ((limb)a) << 32;
  342. /* tmp[3] < 2^79 */
  343. b = a;
  344. a = tmp[3] >> 64; /* a < 2^15 */
  345. b += a; /* b < 2^46 + 2^15 < 2^47 */
  346. tmp[3] = (u64) tmp[3];
  347. tmp[3] -= a;
  348. tmp[3] += ((limb)a) << 32;
  349. /* tmp[3] < 2^64 + 2^47 */
  350. /* This adjusts the other two words to complete the two partial
  351. * reductions. */
  352. tmp[0] += b;
  353. tmp[1] -= (((limb)b) << 32);
  354. /* In order to make space in tmp[3] for the carry from 2 -> 3, we
  355. * conditionally subtract kPrime if tmp[3] is large enough. */
  356. high = tmp[3] >> 64;
  357. /* As tmp[3] < 2^65, high is either 1 or 0 */
  358. high <<= 63;
  359. high >>= 63;
  360. /* high is:
  361. * all ones if the high word of tmp[3] is 1
  362. * all zeros if the high word of tmp[3] if 0 */
  363. low = tmp[3];
  364. mask = low >> 63;
  365. /* mask is:
  366. * all ones if the MSB of low is 1
  367. * all zeros if the MSB of low if 0 */
  368. low &= bottom63bits;
  369. low -= kPrime3Test;
  370. /* if low was greater than kPrime3Test then the MSB is zero */
  371. low = ~low;
  372. low >>= 63;
  373. /* low is:
  374. * all ones if low was > kPrime3Test
  375. * all zeros if low was <= kPrime3Test */
  376. mask = (mask & low) | high;
  377. tmp[0] -= mask & kPrime[0];
  378. tmp[1] -= mask & kPrime[1];
  379. /* kPrime[2] is zero, so omitted */
  380. tmp[3] -= mask & kPrime[3];
  381. /* tmp[3] < 2**64 - 2**32 + 1 */
  382. tmp[1] += ((u64) (tmp[0] >> 64)); tmp[0] = (u64) tmp[0];
  383. tmp[2] += ((u64) (tmp[1] >> 64)); tmp[1] = (u64) tmp[1];
  384. tmp[3] += ((u64) (tmp[2] >> 64)); tmp[2] = (u64) tmp[2];
  385. /* tmp[i] < 2^64 */
  386. out[0] = tmp[0];
  387. out[1] = tmp[1];
  388. out[2] = tmp[2];
  389. out[3] = tmp[3];
  390. }
  391. /* smallfelem_expand converts a smallfelem to an felem */
  392. static void smallfelem_expand(felem out, const smallfelem in)
  393. {
  394. out[0] = in[0];
  395. out[1] = in[1];
  396. out[2] = in[2];
  397. out[3] = in[3];
  398. }
  399. /* smallfelem_square sets |out| = |small|^2
  400. * On entry:
  401. * small[i] < 2^64
  402. * On exit:
  403. * out[i] < 7 * 2^64 < 2^67
  404. */
  405. static void smallfelem_square(longfelem out, const smallfelem small)
  406. {
  407. limb a;
  408. u64 high, low;
  409. a = ((uint128_t) small[0]) * small[0];
  410. low = a;
  411. high = a >> 64;
  412. out[0] = low;
  413. out[1] = high;
  414. a = ((uint128_t) small[0]) * small[1];
  415. low = a;
  416. high = a >> 64;
  417. out[1] += low;
  418. out[1] += low;
  419. out[2] = high;
  420. a = ((uint128_t) small[0]) * small[2];
  421. low = a;
  422. high = a >> 64;
  423. out[2] += low;
  424. out[2] *= 2;
  425. out[3] = high;
  426. a = ((uint128_t) small[0]) * small[3];
  427. low = a;
  428. high = a >> 64;
  429. out[3] += low;
  430. out[4] = high;
  431. a = ((uint128_t) small[1]) * small[2];
  432. low = a;
  433. high = a >> 64;
  434. out[3] += low;
  435. out[3] *= 2;
  436. out[4] += high;
  437. a = ((uint128_t) small[1]) * small[1];
  438. low = a;
  439. high = a >> 64;
  440. out[2] += low;
  441. out[3] += high;
  442. a = ((uint128_t) small[1]) * small[3];
  443. low = a;
  444. high = a >> 64;
  445. out[4] += low;
  446. out[4] *= 2;
  447. out[5] = high;
  448. a = ((uint128_t) small[2]) * small[3];
  449. low = a;
  450. high = a >> 64;
  451. out[5] += low;
  452. out[5] *= 2;
  453. out[6] = high;
  454. out[6] += high;
  455. a = ((uint128_t) small[2]) * small[2];
  456. low = a;
  457. high = a >> 64;
  458. out[4] += low;
  459. out[5] += high;
  460. a = ((uint128_t) small[3]) * small[3];
  461. low = a;
  462. high = a >> 64;
  463. out[6] += low;
  464. out[7] = high;
  465. }
  466. /* felem_square sets |out| = |in|^2
  467. * On entry:
  468. * in[i] < 2^109
  469. * On exit:
  470. * out[i] < 7 * 2^64 < 2^67
  471. */
  472. static void felem_square(longfelem out, const felem in)
  473. {
  474. u64 small[4];
  475. felem_shrink(small, in);
  476. smallfelem_square(out, small);
  477. }
  478. /* smallfelem_mul sets |out| = |small1| * |small2|
  479. * On entry:
  480. * small1[i] < 2^64
  481. * small2[i] < 2^64
  482. * On exit:
  483. * out[i] < 7 * 2^64 < 2^67
  484. */
  485. static void smallfelem_mul(longfelem out, const smallfelem small1, const smallfelem small2)
  486. {
  487. limb a;
  488. u64 high, low;
  489. a = ((uint128_t) small1[0]) * small2[0];
  490. low = a;
  491. high = a >> 64;
  492. out[0] = low;
  493. out[1] = high;
  494. a = ((uint128_t) small1[0]) * small2[1];
  495. low = a;
  496. high = a >> 64;
  497. out[1] += low;
  498. out[2] = high;
  499. a = ((uint128_t) small1[1]) * small2[0];
  500. low = a;
  501. high = a >> 64;
  502. out[1] += low;
  503. out[2] += high;
  504. a = ((uint128_t) small1[0]) * small2[2];
  505. low = a;
  506. high = a >> 64;
  507. out[2] += low;
  508. out[3] = high;
  509. a = ((uint128_t) small1[1]) * small2[1];
  510. low = a;
  511. high = a >> 64;
  512. out[2] += low;
  513. out[3] += high;
  514. a = ((uint128_t) small1[2]) * small2[0];
  515. low = a;
  516. high = a >> 64;
  517. out[2] += low;
  518. out[3] += high;
  519. a = ((uint128_t) small1[0]) * small2[3];
  520. low = a;
  521. high = a >> 64;
  522. out[3] += low;
  523. out[4] = high;
  524. a = ((uint128_t) small1[1]) * small2[2];
  525. low = a;
  526. high = a >> 64;
  527. out[3] += low;
  528. out[4] += high;
  529. a = ((uint128_t) small1[2]) * small2[1];
  530. low = a;
  531. high = a >> 64;
  532. out[3] += low;
  533. out[4] += high;
  534. a = ((uint128_t) small1[3]) * small2[0];
  535. low = a;
  536. high = a >> 64;
  537. out[3] += low;
  538. out[4] += high;
  539. a = ((uint128_t) small1[1]) * small2[3];
  540. low = a;
  541. high = a >> 64;
  542. out[4] += low;
  543. out[5] = high;
  544. a = ((uint128_t) small1[2]) * small2[2];
  545. low = a;
  546. high = a >> 64;
  547. out[4] += low;
  548. out[5] += high;
  549. a = ((uint128_t) small1[3]) * small2[1];
  550. low = a;
  551. high = a >> 64;
  552. out[4] += low;
  553. out[5] += high;
  554. a = ((uint128_t) small1[2]) * small2[3];
  555. low = a;
  556. high = a >> 64;
  557. out[5] += low;
  558. out[6] = high;
  559. a = ((uint128_t) small1[3]) * small2[2];
  560. low = a;
  561. high = a >> 64;
  562. out[5] += low;
  563. out[6] += high;
  564. a = ((uint128_t) small1[3]) * small2[3];
  565. low = a;
  566. high = a >> 64;
  567. out[6] += low;
  568. out[7] = high;
  569. }
  570. /* felem_mul sets |out| = |in1| * |in2|
  571. * On entry:
  572. * in1[i] < 2^109
  573. * in2[i] < 2^109
  574. * On exit:
  575. * out[i] < 7 * 2^64 < 2^67
  576. */
  577. static void felem_mul(longfelem out, const felem in1, const felem in2)
  578. {
  579. smallfelem small1, small2;
  580. felem_shrink(small1, in1);
  581. felem_shrink(small2, in2);
  582. smallfelem_mul(out, small1, small2);
  583. }
  584. /* felem_small_mul sets |out| = |small1| * |in2|
  585. * On entry:
  586. * small1[i] < 2^64
  587. * in2[i] < 2^109
  588. * On exit:
  589. * out[i] < 7 * 2^64 < 2^67
  590. */
  591. static void felem_small_mul(longfelem out, const smallfelem small1, const felem in2)
  592. {
  593. smallfelem small2;
  594. felem_shrink(small2, in2);
  595. smallfelem_mul(out, small1, small2);
  596. }
  597. #define two100m36m4 (((limb)1) << 100) - (((limb)1) << 36) - (((limb)1) << 4)
  598. #define two100 (((limb)1) << 100)
  599. #define two100m36p4 (((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4)
  600. /* zero100 is 0 mod p */
  601. static const felem zero100 = { two100m36m4, two100, two100m36p4, two100m36p4 };
  602. /* Internal function for the different flavours of felem_reduce.
  603. * felem_reduce_ reduces the higher coefficients in[4]-in[7].
  604. * On entry:
  605. * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
  606. * out[1] >= in[7] + 2^32*in[4]
  607. * out[2] >= in[5] + 2^32*in[5]
  608. * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
  609. * On exit:
  610. * out[0] <= out[0] + in[4] + 2^32*in[5]
  611. * out[1] <= out[1] + in[5] + 2^33*in[6]
  612. * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
  613. * out[3] <= out[3] + 2^32*in[4] + 3*in[7]
  614. */
  615. static void felem_reduce_(felem out, const longfelem in)
  616. {
  617. int128_t c;
  618. /* combine common terms from below */
  619. c = in[4] + (in[5] << 32);
  620. out[0] += c;
  621. out[3] -= c;
  622. c = in[5] - in[7];
  623. out[1] += c;
  624. out[2] -= c;
  625. /* the remaining terms */
  626. /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */
  627. out[1] -= (in[4] << 32);
  628. out[3] += (in[4] << 32);
  629. /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */
  630. out[2] -= (in[5] << 32);
  631. /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */
  632. out[0] -= in[6];
  633. out[0] -= (in[6] << 32);
  634. out[1] += (in[6] << 33);
  635. out[2] += (in[6] * 2);
  636. out[3] -= (in[6] << 32);
  637. /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */
  638. out[0] -= in[7];
  639. out[0] -= (in[7] << 32);
  640. out[2] += (in[7] << 33);
  641. out[3] += (in[7] * 3);
  642. }
  643. /* felem_reduce converts a longfelem into an felem.
  644. * To be called directly after felem_square or felem_mul.
  645. * On entry:
  646. * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
  647. * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
  648. * On exit:
  649. * out[i] < 2^101
  650. */
  651. static void felem_reduce(felem out, const longfelem in)
  652. {
  653. out[0] = zero100[0] + in[0];
  654. out[1] = zero100[1] + in[1];
  655. out[2] = zero100[2] + in[2];
  656. out[3] = zero100[3] + in[3];
  657. felem_reduce_(out, in);
  658. /* out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
  659. * out[1] > 2^100 - 2^64 - 7*2^96 > 0
  660. * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
  661. * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
  662. *
  663. * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
  664. * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
  665. * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
  666. * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
  667. */
  668. }
  669. /* felem_reduce_zero105 converts a larger longfelem into an felem.
  670. * On entry:
  671. * in[0] < 2^71
  672. * On exit:
  673. * out[i] < 2^106
  674. */
  675. static void felem_reduce_zero105(felem out, const longfelem in)
  676. {
  677. out[0] = zero105[0] + in[0];
  678. out[1] = zero105[1] + in[1];
  679. out[2] = zero105[2] + in[2];
  680. out[3] = zero105[3] + in[3];
  681. felem_reduce_(out, in);
  682. /* out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
  683. * out[1] > 2^105 - 2^71 - 2^103 > 0
  684. * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
  685. * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
  686. *
  687. * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
  688. * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
  689. * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
  690. * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
  691. */
  692. }
  693. /* subtract_u64 sets *result = *result - v and *carry to one if the subtraction
  694. * underflowed. */
  695. static void subtract_u64(u64* result, u64* carry, u64 v)
  696. {
  697. uint128_t r = *result;
  698. r -= v;
  699. *carry = (r >> 64) & 1;
  700. *result = (u64) r;
  701. }
  702. /* felem_contract converts |in| to its unique, minimal representation.
  703. * On entry:
  704. * in[i] < 2^109
  705. */
  706. static void felem_contract(smallfelem out, const felem in)
  707. {
  708. unsigned i;
  709. u64 all_equal_so_far = 0, result = 0, carry;
  710. felem_shrink(out, in);
  711. /* small is minimal except that the value might be > p */
  712. all_equal_so_far--;
  713. /* We are doing a constant time test if out >= kPrime. We need to
  714. * compare each u64, from most-significant to least significant. For
  715. * each one, if all words so far have been equal (m is all ones) then a
  716. * non-equal result is the answer. Otherwise we continue. */
  717. for (i = 3; i < 4; i--)
  718. {
  719. u64 equal;
  720. uint128_t a = ((uint128_t) kPrime[i]) - out[i];
  721. /* if out[i] > kPrime[i] then a will underflow and the high
  722. * 64-bits will all be set. */
  723. result |= all_equal_so_far & ((u64) (a >> 64));
  724. /* if kPrime[i] == out[i] then |equal| will be all zeros and
  725. * the decrement will make it all ones. */
  726. equal = kPrime[i] ^ out[i];
  727. equal--;
  728. equal &= equal << 32;
  729. equal &= equal << 16;
  730. equal &= equal << 8;
  731. equal &= equal << 4;
  732. equal &= equal << 2;
  733. equal &= equal << 1;
  734. equal = ((s64) equal) >> 63;
  735. all_equal_so_far &= equal;
  736. }
  737. /* if all_equal_so_far is still all ones then the two values are equal
  738. * and so out >= kPrime is true. */
  739. result |= all_equal_so_far;
  740. /* if out >= kPrime then we subtract kPrime. */
  741. subtract_u64(&out[0], &carry, result & kPrime[0]);
  742. subtract_u64(&out[1], &carry, carry);
  743. subtract_u64(&out[2], &carry, carry);
  744. subtract_u64(&out[3], &carry, carry);
  745. subtract_u64(&out[1], &carry, result & kPrime[1]);
  746. subtract_u64(&out[2], &carry, carry);
  747. subtract_u64(&out[3], &carry, carry);
  748. subtract_u64(&out[2], &carry, result & kPrime[2]);
  749. subtract_u64(&out[3], &carry, carry);
  750. subtract_u64(&out[3], &carry, result & kPrime[3]);
  751. }
  752. static void smallfelem_square_contract(smallfelem out, const smallfelem in)
  753. {
  754. longfelem longtmp;
  755. felem tmp;
  756. smallfelem_square(longtmp, in);
  757. felem_reduce(tmp, longtmp);
  758. felem_contract(out, tmp);
  759. }
  760. static void smallfelem_mul_contract(smallfelem out, const smallfelem in1, const smallfelem in2)
  761. {
  762. longfelem longtmp;
  763. felem tmp;
  764. smallfelem_mul(longtmp, in1, in2);
  765. felem_reduce(tmp, longtmp);
  766. felem_contract(out, tmp);
  767. }
  768. /* felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
  769. * otherwise.
  770. * On entry:
  771. * small[i] < 2^64
  772. */
  773. static limb smallfelem_is_zero(const smallfelem small)
  774. {
  775. limb result;
  776. u64 is_p;
  777. u64 is_zero = small[0] | small[1] | small[2] | small[3];
  778. is_zero--;
  779. is_zero &= is_zero << 32;
  780. is_zero &= is_zero << 16;
  781. is_zero &= is_zero << 8;
  782. is_zero &= is_zero << 4;
  783. is_zero &= is_zero << 2;
  784. is_zero &= is_zero << 1;
  785. is_zero = ((s64) is_zero) >> 63;
  786. is_p = (small[0] ^ kPrime[0]) |
  787. (small[1] ^ kPrime[1]) |
  788. (small[2] ^ kPrime[2]) |
  789. (small[3] ^ kPrime[3]);
  790. is_p--;
  791. is_p &= is_p << 32;
  792. is_p &= is_p << 16;
  793. is_p &= is_p << 8;
  794. is_p &= is_p << 4;
  795. is_p &= is_p << 2;
  796. is_p &= is_p << 1;
  797. is_p = ((s64) is_p) >> 63;
  798. is_zero |= is_p;
  799. result = is_zero;
  800. result |= ((limb) is_zero) << 64;
  801. return result;
  802. }
  803. static int smallfelem_is_zero_int(const smallfelem small)
  804. {
  805. return (int) (smallfelem_is_zero(small) & ((limb)1));
  806. }
  807. /* felem_inv calculates |out| = |in|^{-1}
  808. *
  809. * Based on Fermat's Little Theorem:
  810. * a^p = a (mod p)
  811. * a^{p-1} = 1 (mod p)
  812. * a^{p-2} = a^{-1} (mod p)
  813. */
  814. static void felem_inv(felem out, const felem in)
  815. {
  816. felem ftmp, ftmp2;
  817. /* each e_I will hold |in|^{2^I - 1} */
  818. felem e2, e4, e8, e16, e32, e64;
  819. longfelem tmp;
  820. unsigned i;
  821. felem_square(tmp, in); felem_reduce(ftmp, tmp); /* 2^1 */
  822. felem_mul(tmp, in, ftmp); felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
  823. felem_assign(e2, ftmp);
  824. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
  825. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */
  826. felem_mul(tmp, ftmp, e2); felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */
  827. felem_assign(e4, ftmp);
  828. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */
  829. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */
  830. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */
  831. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */
  832. felem_mul(tmp, ftmp, e4); felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
  833. felem_assign(e8, ftmp);
  834. for (i = 0; i < 8; i++) {
  835. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp);
  836. } /* 2^16 - 2^8 */
  837. felem_mul(tmp, ftmp, e8); felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
  838. felem_assign(e16, ftmp);
  839. for (i = 0; i < 16; i++) {
  840. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp);
  841. } /* 2^32 - 2^16 */
  842. felem_mul(tmp, ftmp, e16); felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
  843. felem_assign(e32, ftmp);
  844. for (i = 0; i < 32; i++) {
  845. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp);
  846. } /* 2^64 - 2^32 */
  847. felem_assign(e64, ftmp);
  848. felem_mul(tmp, ftmp, in); felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
  849. for (i = 0; i < 192; i++) {
  850. felem_square(tmp, ftmp); felem_reduce(ftmp, tmp);
  851. } /* 2^256 - 2^224 + 2^192 */
  852. felem_mul(tmp, e64, e32); felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
  853. for (i = 0; i < 16; i++) {
  854. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  855. } /* 2^80 - 2^16 */
  856. felem_mul(tmp, ftmp2, e16); felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
  857. for (i = 0; i < 8; i++) {
  858. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  859. } /* 2^88 - 2^8 */
  860. felem_mul(tmp, ftmp2, e8); felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
  861. for (i = 0; i < 4; i++) {
  862. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp);
  863. } /* 2^92 - 2^4 */
  864. felem_mul(tmp, ftmp2, e4); felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */
  865. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */
  866. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */
  867. felem_mul(tmp, ftmp2, e2); felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */
  868. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */
  869. felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */
  870. felem_mul(tmp, ftmp2, in); felem_reduce(ftmp2, tmp); /* 2^96 - 3 */
  871. felem_mul(tmp, ftmp2, ftmp); felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
  872. }
  873. static void smallfelem_inv_contract(smallfelem out, const smallfelem in)
  874. {
  875. felem tmp;
  876. smallfelem_expand(tmp, in);
  877. felem_inv(tmp, tmp);
  878. felem_contract(out, tmp);
  879. }
  880. /* Group operations
  881. * ----------------
  882. *
  883. * Building on top of the field operations we have the operations on the
  884. * elliptic curve group itself. Points on the curve are represented in Jacobian
  885. * coordinates */
  886. /* point_double calculates 2*(x_in, y_in, z_in)
  887. *
  888. * The method is taken from:
  889. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
  890. *
  891. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
  892. * while x_out == y_in is not (maybe this works, but it's not tested). */
  893. static void
  894. point_double(felem x_out, felem y_out, felem z_out,
  895. const felem x_in, const felem y_in, const felem z_in)
  896. {
  897. longfelem tmp, tmp2;
  898. felem delta, gamma, beta, alpha, ftmp, ftmp2;
  899. smallfelem small1, small2;
  900. felem_assign(ftmp, x_in);
  901. /* ftmp[i] < 2^106 */
  902. felem_assign(ftmp2, x_in);
  903. /* ftmp2[i] < 2^106 */
  904. /* delta = z^2 */
  905. felem_square(tmp, z_in);
  906. felem_reduce(delta, tmp);
  907. /* delta[i] < 2^101 */
  908. /* gamma = y^2 */
  909. felem_square(tmp, y_in);
  910. felem_reduce(gamma, tmp);
  911. /* gamma[i] < 2^101 */
  912. felem_shrink(small1, gamma);
  913. /* beta = x*gamma */
  914. felem_small_mul(tmp, small1, x_in);
  915. felem_reduce(beta, tmp);
  916. /* beta[i] < 2^101 */
  917. /* alpha = 3*(x-delta)*(x+delta) */
  918. felem_diff(ftmp, delta);
  919. /* ftmp[i] < 2^105 + 2^106 < 2^107 */
  920. felem_sum(ftmp2, delta);
  921. /* ftmp2[i] < 2^105 + 2^106 < 2^107 */
  922. felem_scalar(ftmp2, 3);
  923. /* ftmp2[i] < 3 * 2^107 < 2^109 */
  924. felem_mul(tmp, ftmp, ftmp2);
  925. felem_reduce(alpha, tmp);
  926. /* alpha[i] < 2^101 */
  927. felem_shrink(small2, alpha);
  928. /* x' = alpha^2 - 8*beta */
  929. smallfelem_square(tmp, small2);
  930. felem_reduce(x_out, tmp);
  931. felem_assign(ftmp, beta);
  932. felem_scalar(ftmp, 8);
  933. /* ftmp[i] < 8 * 2^101 = 2^104 */
  934. felem_diff(x_out, ftmp);
  935. /* x_out[i] < 2^105 + 2^101 < 2^106 */
  936. /* z' = (y + z)^2 - gamma - delta */
  937. felem_sum(delta, gamma);
  938. /* delta[i] < 2^101 + 2^101 = 2^102 */
  939. felem_assign(ftmp, y_in);
  940. felem_sum(ftmp, z_in);
  941. /* ftmp[i] < 2^106 + 2^106 = 2^107 */
  942. felem_square(tmp, ftmp);
  943. felem_reduce(z_out, tmp);
  944. felem_diff(z_out, delta);
  945. /* z_out[i] < 2^105 + 2^101 < 2^106 */
  946. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  947. felem_scalar(beta, 4);
  948. /* beta[i] < 4 * 2^101 = 2^103 */
  949. felem_diff_zero107(beta, x_out);
  950. /* beta[i] < 2^107 + 2^103 < 2^108 */
  951. felem_small_mul(tmp, small2, beta);
  952. /* tmp[i] < 7 * 2^64 < 2^67 */
  953. smallfelem_square(tmp2, small1);
  954. /* tmp2[i] < 7 * 2^64 */
  955. longfelem_scalar(tmp2, 8);
  956. /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */
  957. longfelem_diff(tmp, tmp2);
  958. /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
  959. felem_reduce_zero105(y_out, tmp);
  960. /* y_out[i] < 2^106 */
  961. }
  962. /* point_double_small is the same as point_double, except that it operates on
  963. * smallfelems */
  964. static void
  965. point_double_small(smallfelem x_out, smallfelem y_out, smallfelem z_out,
  966. const smallfelem x_in, const smallfelem y_in, const smallfelem z_in)
  967. {
  968. felem felem_x_out, felem_y_out, felem_z_out;
  969. felem felem_x_in, felem_y_in, felem_z_in;
  970. smallfelem_expand(felem_x_in, x_in);
  971. smallfelem_expand(felem_y_in, y_in);
  972. smallfelem_expand(felem_z_in, z_in);
  973. point_double(felem_x_out, felem_y_out, felem_z_out,
  974. felem_x_in, felem_y_in, felem_z_in);
  975. felem_shrink(x_out, felem_x_out);
  976. felem_shrink(y_out, felem_y_out);
  977. felem_shrink(z_out, felem_z_out);
  978. }
  979. /* copy_conditional copies in to out iff mask is all ones. */
  980. static void
  981. copy_conditional(felem out, const felem in, limb mask)
  982. {
  983. unsigned i;
  984. for (i = 0; i < NLIMBS; ++i)
  985. {
  986. const limb tmp = mask & (in[i] ^ out[i]);
  987. out[i] ^= tmp;
  988. }
  989. }
  990. /* copy_small_conditional copies in to out iff mask is all ones. */
  991. static void
  992. copy_small_conditional(felem out, const smallfelem in, limb mask)
  993. {
  994. unsigned i;
  995. const u64 mask64 = mask;
  996. for (i = 0; i < NLIMBS; ++i)
  997. {
  998. out[i] = ((limb) (in[i] & mask64)) | (out[i] & ~mask);
  999. }
  1000. }
  1001. /* point_add calcuates (x1, y1, z1) + (x2, y2, z2)
  1002. *
  1003. * The method is taken from:
  1004. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
  1005. * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
  1006. *
  1007. * This function includes a branch for checking whether the two input points
  1008. * are equal, (while not equal to the point at infinity). This case never
  1009. * happens during single point multiplication, so there is no timing leak for
  1010. * ECDH or ECDSA signing. */
  1011. static void point_add(felem x3, felem y3, felem z3,
  1012. const felem x1, const felem y1, const felem z1,
  1013. const int mixed, const smallfelem x2, const smallfelem y2, const smallfelem z2)
  1014. {
  1015. felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
  1016. longfelem tmp, tmp2;
  1017. smallfelem small1, small2, small3, small4, small5;
  1018. limb x_equal, y_equal, z1_is_zero, z2_is_zero;
  1019. felem_shrink(small3, z1);
  1020. z1_is_zero = smallfelem_is_zero(small3);
  1021. z2_is_zero = smallfelem_is_zero(z2);
  1022. /* ftmp = z1z1 = z1**2 */
  1023. smallfelem_square(tmp, small3);
  1024. felem_reduce(ftmp, tmp);
  1025. /* ftmp[i] < 2^101 */
  1026. felem_shrink(small1, ftmp);
  1027. if(!mixed)
  1028. {
  1029. /* ftmp2 = z2z2 = z2**2 */
  1030. smallfelem_square(tmp, z2);
  1031. felem_reduce(ftmp2, tmp);
  1032. /* ftmp2[i] < 2^101 */
  1033. felem_shrink(small2, ftmp2);
  1034. felem_shrink(small5, x1);
  1035. /* u1 = ftmp3 = x1*z2z2 */
  1036. smallfelem_mul(tmp, small5, small2);
  1037. felem_reduce(ftmp3, tmp);
  1038. /* ftmp3[i] < 2^101 */
  1039. /* ftmp5 = z1 + z2 */
  1040. felem_assign(ftmp5, z1);
  1041. felem_small_sum(ftmp5, z2);
  1042. /* ftmp5[i] < 2^107 */
  1043. /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */
  1044. felem_square(tmp, ftmp5);
  1045. felem_reduce(ftmp5, tmp);
  1046. /* ftmp2 = z2z2 + z1z1 */
  1047. felem_sum(ftmp2, ftmp);
  1048. /* ftmp2[i] < 2^101 + 2^101 = 2^102 */
  1049. felem_diff(ftmp5, ftmp2);
  1050. /* ftmp5[i] < 2^105 + 2^101 < 2^106 */
  1051. /* ftmp2 = z2 * z2z2 */
  1052. smallfelem_mul(tmp, small2, z2);
  1053. felem_reduce(ftmp2, tmp);
  1054. /* s1 = ftmp2 = y1 * z2**3 */
  1055. felem_mul(tmp, y1, ftmp2);
  1056. felem_reduce(ftmp6, tmp);
  1057. /* ftmp6[i] < 2^101 */
  1058. }
  1059. else
  1060. {
  1061. /* We'll assume z2 = 1 (special case z2 = 0 is handled later) */
  1062. /* u1 = ftmp3 = x1*z2z2 */
  1063. felem_assign(ftmp3, x1);
  1064. /* ftmp3[i] < 2^106 */
  1065. /* ftmp5 = 2z1z2 */
  1066. felem_assign(ftmp5, z1);
  1067. felem_scalar(ftmp5, 2);
  1068. /* ftmp5[i] < 2*2^106 = 2^107 */
  1069. /* s1 = ftmp2 = y1 * z2**3 */
  1070. felem_assign(ftmp6, y1);
  1071. /* ftmp6[i] < 2^106 */
  1072. }
  1073. /* u2 = x2*z1z1 */
  1074. smallfelem_mul(tmp, x2, small1);
  1075. felem_reduce(ftmp4, tmp);
  1076. /* h = ftmp4 = u2 - u1 */
  1077. felem_diff_zero107(ftmp4, ftmp3);
  1078. /* ftmp4[i] < 2^107 + 2^101 < 2^108 */
  1079. felem_shrink(small4, ftmp4);
  1080. x_equal = smallfelem_is_zero(small4);
  1081. /* z_out = ftmp5 * h */
  1082. felem_small_mul(tmp, small4, ftmp5);
  1083. felem_reduce(z_out, tmp);
  1084. /* z_out[i] < 2^101 */
  1085. /* ftmp = z1 * z1z1 */
  1086. smallfelem_mul(tmp, small1, small3);
  1087. felem_reduce(ftmp, tmp);
  1088. /* s2 = tmp = y2 * z1**3 */
  1089. felem_small_mul(tmp, y2, ftmp);
  1090. felem_reduce(ftmp5, tmp);
  1091. /* r = ftmp5 = (s2 - s1)*2 */
  1092. felem_diff_zero107(ftmp5, ftmp6);
  1093. /* ftmp5[i] < 2^107 + 2^107 = 2^108*/
  1094. felem_scalar(ftmp5, 2);
  1095. /* ftmp5[i] < 2^109 */
  1096. felem_shrink(small1, ftmp5);
  1097. y_equal = smallfelem_is_zero(small1);
  1098. if (x_equal && y_equal && !z1_is_zero && !z2_is_zero)
  1099. {
  1100. point_double(x3, y3, z3, x1, y1, z1);
  1101. return;
  1102. }
  1103. /* I = ftmp = (2h)**2 */
  1104. felem_assign(ftmp, ftmp4);
  1105. felem_scalar(ftmp, 2);
  1106. /* ftmp[i] < 2*2^108 = 2^109 */
  1107. felem_square(tmp, ftmp);
  1108. felem_reduce(ftmp, tmp);
  1109. /* J = ftmp2 = h * I */
  1110. felem_mul(tmp, ftmp4, ftmp);
  1111. felem_reduce(ftmp2, tmp);
  1112. /* V = ftmp4 = U1 * I */
  1113. felem_mul(tmp, ftmp3, ftmp);
  1114. felem_reduce(ftmp4, tmp);
  1115. /* x_out = r**2 - J - 2V */
  1116. smallfelem_square(tmp, small1);
  1117. felem_reduce(x_out, tmp);
  1118. felem_assign(ftmp3, ftmp4);
  1119. felem_scalar(ftmp4, 2);
  1120. felem_sum(ftmp4, ftmp2);
  1121. /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */
  1122. felem_diff(x_out, ftmp4);
  1123. /* x_out[i] < 2^105 + 2^101 */
  1124. /* y_out = r(V-x_out) - 2 * s1 * J */
  1125. felem_diff_zero107(ftmp3, x_out);
  1126. /* ftmp3[i] < 2^107 + 2^101 < 2^108 */
  1127. felem_small_mul(tmp, small1, ftmp3);
  1128. felem_mul(tmp2, ftmp6, ftmp2);
  1129. longfelem_scalar(tmp2, 2);
  1130. /* tmp2[i] < 2*2^67 = 2^68 */
  1131. longfelem_diff(tmp, tmp2);
  1132. /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
  1133. felem_reduce_zero105(y_out, tmp);
  1134. /* y_out[i] < 2^106 */
  1135. copy_small_conditional(x_out, x2, z1_is_zero);
  1136. copy_conditional(x_out, x1, z2_is_zero);
  1137. copy_small_conditional(y_out, y2, z1_is_zero);
  1138. copy_conditional(y_out, y1, z2_is_zero);
  1139. copy_small_conditional(z_out, z2, z1_is_zero);
  1140. copy_conditional(z_out, z1, z2_is_zero);
  1141. felem_assign(x3, x_out);
  1142. felem_assign(y3, y_out);
  1143. felem_assign(z3, z_out);
  1144. }
  1145. /* point_add_small is the same as point_add, except that it operates on
  1146. * smallfelems */
  1147. static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
  1148. smallfelem x1, smallfelem y1, smallfelem z1,
  1149. smallfelem x2, smallfelem y2, smallfelem z2)
  1150. {
  1151. felem felem_x3, felem_y3, felem_z3;
  1152. felem felem_x1, felem_y1, felem_z1;
  1153. smallfelem_expand(felem_x1, x1);
  1154. smallfelem_expand(felem_y1, y1);
  1155. smallfelem_expand(felem_z1, z1);
  1156. point_add(felem_x3, felem_y3, felem_z3, felem_x1, felem_y1, felem_z1, 0, x2, y2, z2);
  1157. felem_shrink(x3, felem_x3);
  1158. felem_shrink(y3, felem_y3);
  1159. felem_shrink(z3, felem_z3);
  1160. }
  1161. /* Base point pre computation
  1162. * --------------------------
  1163. *
  1164. * Two different sorts of precomputed tables are used in the following code.
  1165. * Each contain various points on the curve, where each point is three field
  1166. * elements (x, y, z).
  1167. *
  1168. * For the base point table, z is usually 1 (0 for the point at infinity).
  1169. * This table has 2 * 16 elements, starting with the following:
  1170. * index | bits | point
  1171. * ------+---------+------------------------------
  1172. * 0 | 0 0 0 0 | 0G
  1173. * 1 | 0 0 0 1 | 1G
  1174. * 2 | 0 0 1 0 | 2^64G
  1175. * 3 | 0 0 1 1 | (2^64 + 1)G
  1176. * 4 | 0 1 0 0 | 2^128G
  1177. * 5 | 0 1 0 1 | (2^128 + 1)G
  1178. * 6 | 0 1 1 0 | (2^128 + 2^64)G
  1179. * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
  1180. * 8 | 1 0 0 0 | 2^192G
  1181. * 9 | 1 0 0 1 | (2^192 + 1)G
  1182. * 10 | 1 0 1 0 | (2^192 + 2^64)G
  1183. * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
  1184. * 12 | 1 1 0 0 | (2^192 + 2^128)G
  1185. * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
  1186. * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
  1187. * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
  1188. * followed by a copy of this with each element multiplied by 2^32.
  1189. *
  1190. * The reason for this is so that we can clock bits into four different
  1191. * locations when doing simple scalar multiplies against the base point,
  1192. * and then another four locations using the second 16 elements.
  1193. *
  1194. * Tables for other points have table[i] = iG for i in 0 .. 16. */
  1195. /* gmul is the table of precomputed base points */
  1196. static const smallfelem gmul[2][16][3] =
  1197. {{{{0, 0, 0, 0},
  1198. {0, 0, 0, 0},
  1199. {0, 0, 0, 0}},
  1200. {{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2, 0x6b17d1f2e12c4247},
  1201. {0xcbb6406837bf51f5, 0x2bce33576b315ece, 0x8ee7eb4a7c0f9e16, 0x4fe342e2fe1a7f9b},
  1202. {1, 0, 0, 0}},
  1203. {{0x90e75cb48e14db63, 0x29493baaad651f7e, 0x8492592e326e25de, 0x0fa822bc2811aaa5},
  1204. {0xe41124545f462ee7, 0x34b1a65050fe82f5, 0x6f4ad4bcb3df188b, 0xbff44ae8f5dba80d},
  1205. {1, 0, 0, 0}},
  1206. {{0x93391ce2097992af, 0xe96c98fd0d35f1fa, 0xb257c0de95e02789, 0x300a4bbc89d6726f},
  1207. {0xaa54a291c08127a0, 0x5bb1eeada9d806a5, 0x7f1ddb25ff1e3c6f, 0x72aac7e0d09b4644},
  1208. {1, 0, 0, 0}},
  1209. {{0x57c84fc9d789bd85, 0xfc35ff7dc297eac3, 0xfb982fd588c6766e, 0x447d739beedb5e67},
  1210. {0x0c7e33c972e25b32, 0x3d349b95a7fae500, 0xe12e9d953a4aaff7, 0x2d4825ab834131ee},
  1211. {1, 0, 0, 0}},
  1212. {{0x13949c932a1d367f, 0xef7fbd2b1a0a11b7, 0xddc6068bb91dfc60, 0xef9519328a9c72ff},
  1213. {0x196035a77376d8a8, 0x23183b0895ca1740, 0xc1ee9807022c219c, 0x611e9fc37dbb2c9b},
  1214. {1, 0, 0, 0}},
  1215. {{0xcae2b1920b57f4bc, 0x2936df5ec6c9bc36, 0x7dea6482e11238bf, 0x550663797b51f5d8},
  1216. {0x44ffe216348a964c, 0x9fb3d576dbdefbe1, 0x0afa40018d9d50e5, 0x157164848aecb851},
  1217. {1, 0, 0, 0}},
  1218. {{0xe48ecafffc5cde01, 0x7ccd84e70d715f26, 0xa2e8f483f43e4391, 0xeb5d7745b21141ea},
  1219. {0xcac917e2731a3479, 0x85f22cfe2844b645, 0x0990e6a158006cee, 0xeafd72ebdbecc17b},
  1220. {1, 0, 0, 0}},
  1221. {{0x6cf20ffb313728be, 0x96439591a3c6b94a, 0x2736ff8344315fc5, 0xa6d39677a7849276},
  1222. {0xf2bab833c357f5f4, 0x824a920c2284059b, 0x66b8babd2d27ecdf, 0x674f84749b0b8816},
  1223. {1, 0, 0, 0}},
  1224. {{0x2df48c04677c8a3e, 0x74e02f080203a56b, 0x31855f7db8c7fedb, 0x4e769e7672c9ddad},
  1225. {0xa4c36165b824bbb0, 0xfb9ae16f3b9122a5, 0x1ec0057206947281, 0x42b99082de830663},
  1226. {1, 0, 0, 0}},
  1227. {{0x6ef95150dda868b9, 0xd1f89e799c0ce131, 0x7fdc1ca008a1c478, 0x78878ef61c6ce04d},
  1228. {0x9c62b9121fe0d976, 0x6ace570ebde08d4f, 0xde53142c12309def, 0xb6cb3f5d7b72c321},
  1229. {1, 0, 0, 0}},
  1230. {{0x7f991ed2c31a3573, 0x5b82dd5bd54fb496, 0x595c5220812ffcae, 0x0c88bc4d716b1287},
  1231. {0x3a57bf635f48aca8, 0x7c8181f4df2564f3, 0x18d1b5b39c04e6aa, 0xdd5ddea3f3901dc6},
  1232. {1, 0, 0, 0}},
  1233. {{0xe96a79fb3e72ad0c, 0x43a0a28c42ba792f, 0xefe0a423083e49f3, 0x68f344af6b317466},
  1234. {0xcdfe17db3fb24d4a, 0x668bfc2271f5c626, 0x604ed93c24d67ff3, 0x31b9c405f8540a20},
  1235. {1, 0, 0, 0}},
  1236. {{0xd36b4789a2582e7f, 0x0d1a10144ec39c28, 0x663c62c3edbad7a0, 0x4052bf4b6f461db9},
  1237. {0x235a27c3188d25eb, 0xe724f33999bfcc5b, 0x862be6bd71d70cc8, 0xfecf4d5190b0fc61},
  1238. {1, 0, 0, 0}},
  1239. {{0x74346c10a1d4cfac, 0xafdf5cc08526a7a4, 0x123202a8f62bff7a, 0x1eddbae2c802e41a},
  1240. {0x8fa0af2dd603f844, 0x36e06b7e4c701917, 0x0c45f45273db33a0, 0x43104d86560ebcfc},
  1241. {1, 0, 0, 0}},
  1242. {{0x9615b5110d1d78e5, 0x66b0de3225c4744b, 0x0a4a46fb6aaf363a, 0xb48e26b484f7a21c},
  1243. {0x06ebb0f621a01b2d, 0xc004e4048b7b0f98, 0x64131bcdfed6f668, 0xfac015404d4d3dab},
  1244. {1, 0, 0, 0}}},
  1245. {{{0, 0, 0, 0},
  1246. {0, 0, 0, 0},
  1247. {0, 0, 0, 0}},
  1248. {{0x3a5a9e22185a5943, 0x1ab919365c65dfb6, 0x21656b32262c71da, 0x7fe36b40af22af89},
  1249. {0xd50d152c699ca101, 0x74b3d5867b8af212, 0x9f09f40407dca6f1, 0xe697d45825b63624},
  1250. {1, 0, 0, 0}},
  1251. {{0xa84aa9397512218e, 0xe9a521b074ca0141, 0x57880b3a18a2e902, 0x4a5b506612a677a6},
  1252. {0x0beada7a4c4f3840, 0x626db15419e26d9d, 0xc42604fbe1627d40, 0xeb13461ceac089f1},
  1253. {1, 0, 0, 0}},
  1254. {{0xf9faed0927a43281, 0x5e52c4144103ecbc, 0xc342967aa815c857, 0x0781b8291c6a220a},
  1255. {0x5a8343ceeac55f80, 0x88f80eeee54a05e3, 0x97b2a14f12916434, 0x690cde8df0151593},
  1256. {1, 0, 0, 0}},
  1257. {{0xaee9c75df7f82f2a, 0x9e4c35874afdf43a, 0xf5622df437371326, 0x8a535f566ec73617},
  1258. {0xc5f9a0ac223094b7, 0xcde533864c8c7669, 0x37e02819085a92bf, 0x0455c08468b08bd7},
  1259. {1, 0, 0, 0}},
  1260. {{0x0c0a6e2c9477b5d9, 0xf9a4bf62876dc444, 0x5050a949b6cdc279, 0x06bada7ab77f8276},
  1261. {0xc8b4aed1ea48dac9, 0xdebd8a4b7ea1070f, 0x427d49101366eb70, 0x5b476dfd0e6cb18a},
  1262. {1, 0, 0, 0}},
  1263. {{0x7c5c3e44278c340a, 0x4d54606812d66f3b, 0x29a751b1ae23c5d8, 0x3e29864e8a2ec908},
  1264. {0x142d2a6626dbb850, 0xad1744c4765bd780, 0x1f150e68e322d1ed, 0x239b90ea3dc31e7e},
  1265. {1, 0, 0, 0}},
  1266. {{0x78c416527a53322a, 0x305dde6709776f8e, 0xdbcab759f8862ed4, 0x820f4dd949f72ff7},
  1267. {0x6cc544a62b5debd4, 0x75be5d937b4e8cc4, 0x1b481b1b215c14d3, 0x140406ec783a05ec},
  1268. {1, 0, 0, 0}},
  1269. {{0x6a703f10e895df07, 0xfd75f3fa01876bd8, 0xeb5b06e70ce08ffe, 0x68f6b8542783dfee},
  1270. {0x90c76f8a78712655, 0xcf5293d2f310bf7f, 0xfbc8044dfda45028, 0xcbe1feba92e40ce6},
  1271. {1, 0, 0, 0}},
  1272. {{0xe998ceea4396e4c1, 0xfc82ef0b6acea274, 0x230f729f2250e927, 0xd0b2f94d2f420109},
  1273. {0x4305adddb38d4966, 0x10b838f8624c3b45, 0x7db2636658954e7a, 0x971459828b0719e5},
  1274. {1, 0, 0, 0}},
  1275. {{0x4bd6b72623369fc9, 0x57f2929e53d0b876, 0xc2d5cba4f2340687, 0x961610004a866aba},
  1276. {0x49997bcd2e407a5e, 0x69ab197d92ddcb24, 0x2cf1f2438fe5131c, 0x7acb9fadcee75e44},
  1277. {1, 0, 0, 0}},
  1278. {{0x254e839423d2d4c0, 0xf57f0c917aea685b, 0xa60d880f6f75aaea, 0x24eb9acca333bf5b},
  1279. {0xe3de4ccb1cda5dea, 0xfeef9341c51a6b4f, 0x743125f88bac4c4d, 0x69f891c5acd079cc},
  1280. {1, 0, 0, 0}},
  1281. {{0xeee44b35702476b5, 0x7ed031a0e45c2258, 0xb422d1e7bd6f8514, 0xe51f547c5972a107},
  1282. {0xa25bcd6fc9cf343d, 0x8ca922ee097c184e, 0xa62f98b3a9fe9a06, 0x1c309a2b25bb1387},
  1283. {1, 0, 0, 0}},
  1284. {{0x9295dbeb1967c459, 0xb00148833472c98e, 0xc504977708011828, 0x20b87b8aa2c4e503},
  1285. {0x3063175de057c277, 0x1bd539338fe582dd, 0x0d11adef5f69a044, 0xf5c6fa49919776be},
  1286. {1, 0, 0, 0}},
  1287. {{0x8c944e760fd59e11, 0x3876cba1102fad5f, 0xa454c3fad83faa56, 0x1ed7d1b9332010b9},
  1288. {0xa1011a270024b889, 0x05e4d0dcac0cd344, 0x52b520f0eb6a2a24, 0x3a2b03f03217257a},
  1289. {1, 0, 0, 0}},
  1290. {{0xf20fc2afdf1d043d, 0xf330240db58d5a62, 0xfc7d229ca0058c3b, 0x15fee545c78dd9f6},
  1291. {0x501e82885bc98cda, 0x41ef80e5d046ac04, 0x557d9f49461210fb, 0x4ab5b6b2b8753f81},
  1292. {1, 0, 0, 0}}}};
  1293. /* select_point selects the |idx|th point from a precomputation table and
  1294. * copies it to out. */
  1295. static void select_point(const u64 idx, unsigned int size, const smallfelem pre_comp[16][3], smallfelem out[3])
  1296. {
  1297. unsigned i, j;
  1298. u64 *outlimbs = &out[0][0];
  1299. memset(outlimbs, 0, 3 * sizeof(smallfelem));
  1300. for (i = 0; i < size; i++)
  1301. {
  1302. const u64 *inlimbs = (u64*) &pre_comp[i][0][0];
  1303. u64 mask = i ^ idx;
  1304. mask |= mask >> 4;
  1305. mask |= mask >> 2;
  1306. mask |= mask >> 1;
  1307. mask &= 1;
  1308. mask--;
  1309. for (j = 0; j < NLIMBS * 3; j++)
  1310. outlimbs[j] |= inlimbs[j] & mask;
  1311. }
  1312. }
  1313. /* get_bit returns the |i|th bit in |in| */
  1314. static char get_bit(const felem_bytearray in, int i)
  1315. {
  1316. if ((i < 0) || (i >= 256))
  1317. return 0;
  1318. return (in[i >> 3] >> (i & 7)) & 1;
  1319. }
  1320. /* Interleaved point multiplication using precomputed point multiples:
  1321. * The small point multiples 0*P, 1*P, ..., 17*P are in pre_comp[],
  1322. * the scalars in scalars[]. If g_scalar is non-NULL, we also add this multiple
  1323. * of the generator, using certain (large) precomputed multiples in g_pre_comp.
  1324. * Output point (X, Y, Z) is stored in x_out, y_out, z_out */
  1325. static void batch_mul(felem x_out, felem y_out, felem z_out,
  1326. const felem_bytearray scalars[], const unsigned num_points, const u8 *g_scalar,
  1327. const int mixed, const smallfelem pre_comp[][17][3], const smallfelem g_pre_comp[2][16][3])
  1328. {
  1329. int i, skip;
  1330. unsigned num, gen_mul = (g_scalar != NULL);
  1331. felem nq[3], ftmp;
  1332. smallfelem tmp[3];
  1333. u64 bits;
  1334. u8 sign, digit;
  1335. /* set nq to the point at infinity */
  1336. memset(nq, 0, 3 * sizeof(felem));
  1337. /* Loop over all scalars msb-to-lsb, interleaving additions
  1338. * of multiples of the generator (two in each of the last 32 rounds)
  1339. * and additions of other points multiples (every 5th round).
  1340. */
  1341. skip = 1; /* save two point operations in the first round */
  1342. for (i = (num_points ? 255 : 31); i >= 0; --i)
  1343. {
  1344. /* double */
  1345. if (!skip)
  1346. point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
  1347. /* add multiples of the generator */
  1348. if (gen_mul && (i <= 31))
  1349. {
  1350. /* first, look 32 bits upwards */
  1351. bits = get_bit(g_scalar, i + 224) << 3;
  1352. bits |= get_bit(g_scalar, i + 160) << 2;
  1353. bits |= get_bit(g_scalar, i + 96) << 1;
  1354. bits |= get_bit(g_scalar, i + 32);
  1355. /* select the point to add, in constant time */
  1356. select_point(bits, 16, g_pre_comp[1], tmp);
  1357. if (!skip)
  1358. {
  1359. point_add(nq[0], nq[1], nq[2],
  1360. nq[0], nq[1], nq[2],
  1361. 1 /* mixed */, tmp[0], tmp[1], tmp[2]);
  1362. }
  1363. else
  1364. {
  1365. smallfelem_expand(nq[0], tmp[0]);
  1366. smallfelem_expand(nq[1], tmp[1]);
  1367. smallfelem_expand(nq[2], tmp[2]);
  1368. skip = 0;
  1369. }
  1370. /* second, look at the current position */
  1371. bits = get_bit(g_scalar, i + 192) << 3;
  1372. bits |= get_bit(g_scalar, i + 128) << 2;
  1373. bits |= get_bit(g_scalar, i + 64) << 1;
  1374. bits |= get_bit(g_scalar, i);
  1375. /* select the point to add, in constant time */
  1376. select_point(bits, 16, g_pre_comp[0], tmp);
  1377. point_add(nq[0], nq[1], nq[2],
  1378. nq[0], nq[1], nq[2],
  1379. 1 /* mixed */, tmp[0], tmp[1], tmp[2]);
  1380. }
  1381. /* do other additions every 5 doublings */
  1382. if (num_points && (i % 5 == 0))
  1383. {
  1384. /* loop over all scalars */
  1385. for (num = 0; num < num_points; ++num)
  1386. {
  1387. bits = get_bit(scalars[num], i + 4) << 5;
  1388. bits |= get_bit(scalars[num], i + 3) << 4;
  1389. bits |= get_bit(scalars[num], i + 2) << 3;
  1390. bits |= get_bit(scalars[num], i + 1) << 2;
  1391. bits |= get_bit(scalars[num], i) << 1;
  1392. bits |= get_bit(scalars[num], i - 1);
  1393. ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
  1394. /* select the point to add or subtract, in constant time */
  1395. select_point(digit, 17, pre_comp[num], tmp);
  1396. smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative point */
  1397. copy_small_conditional(ftmp, tmp[1], (((limb) sign) - 1));
  1398. felem_contract(tmp[1], ftmp);
  1399. if (!skip)
  1400. {
  1401. point_add(nq[0], nq[1], nq[2],
  1402. nq[0], nq[1], nq[2],
  1403. mixed, tmp[0], tmp[1], tmp[2]);
  1404. }
  1405. else
  1406. {
  1407. smallfelem_expand(nq[0], tmp[0]);
  1408. smallfelem_expand(nq[1], tmp[1]);
  1409. smallfelem_expand(nq[2], tmp[2]);
  1410. skip = 0;
  1411. }
  1412. }
  1413. }
  1414. }
  1415. felem_assign(x_out, nq[0]);
  1416. felem_assign(y_out, nq[1]);
  1417. felem_assign(z_out, nq[2]);
  1418. }
  1419. /* Precomputation for the group generator. */
  1420. typedef struct {
  1421. smallfelem g_pre_comp[2][16][3];
  1422. int references;
  1423. } NISTP256_PRE_COMP;
  1424. const EC_METHOD *EC_GFp_nistp256_method(void)
  1425. {
  1426. static const EC_METHOD ret = {
  1427. EC_FLAGS_DEFAULT_OCT,
  1428. NID_X9_62_prime_field,
  1429. ec_GFp_nistp256_group_init,
  1430. ec_GFp_simple_group_finish,
  1431. ec_GFp_simple_group_clear_finish,
  1432. ec_GFp_nist_group_copy,
  1433. ec_GFp_nistp256_group_set_curve,
  1434. ec_GFp_simple_group_get_curve,
  1435. ec_GFp_simple_group_get_degree,
  1436. ec_GFp_simple_group_check_discriminant,
  1437. ec_GFp_simple_point_init,
  1438. ec_GFp_simple_point_finish,
  1439. ec_GFp_simple_point_clear_finish,
  1440. ec_GFp_simple_point_copy,
  1441. ec_GFp_simple_point_set_to_infinity,
  1442. ec_GFp_simple_set_Jprojective_coordinates_GFp,
  1443. ec_GFp_simple_get_Jprojective_coordinates_GFp,
  1444. ec_GFp_simple_point_set_affine_coordinates,
  1445. ec_GFp_nistp256_point_get_affine_coordinates,
  1446. 0 /* point_set_compressed_coordinates */,
  1447. 0 /* point2oct */,
  1448. 0 /* oct2point */,
  1449. ec_GFp_simple_add,
  1450. ec_GFp_simple_dbl,
  1451. ec_GFp_simple_invert,
  1452. ec_GFp_simple_is_at_infinity,
  1453. ec_GFp_simple_is_on_curve,
  1454. ec_GFp_simple_cmp,
  1455. ec_GFp_simple_make_affine,
  1456. ec_GFp_simple_points_make_affine,
  1457. ec_GFp_nistp256_points_mul,
  1458. ec_GFp_nistp256_precompute_mult,
  1459. ec_GFp_nistp256_have_precompute_mult,
  1460. ec_GFp_nist_field_mul,
  1461. ec_GFp_nist_field_sqr,
  1462. 0 /* field_div */,
  1463. 0 /* field_encode */,
  1464. 0 /* field_decode */,
  1465. 0 /* field_set_to_one */ };
  1466. return &ret;
  1467. }
  1468. /******************************************************************************/
  1469. /* FUNCTIONS TO MANAGE PRECOMPUTATION
  1470. */
  1471. static NISTP256_PRE_COMP *nistp256_pre_comp_new()
  1472. {
  1473. NISTP256_PRE_COMP *ret = NULL;
  1474. ret = (NISTP256_PRE_COMP *) OPENSSL_malloc(sizeof *ret);
  1475. if (!ret)
  1476. {
  1477. ECerr(EC_F_NISTP256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  1478. return ret;
  1479. }
  1480. memset(ret->g_pre_comp, 0, sizeof(ret->g_pre_comp));
  1481. ret->references = 1;
  1482. return ret;
  1483. }
  1484. static void *nistp256_pre_comp_dup(void *src_)
  1485. {
  1486. NISTP256_PRE_COMP *src = src_;
  1487. /* no need to actually copy, these objects never change! */
  1488. CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
  1489. return src_;
  1490. }
  1491. static void nistp256_pre_comp_free(void *pre_)
  1492. {
  1493. int i;
  1494. NISTP256_PRE_COMP *pre = pre_;
  1495. if (!pre)
  1496. return;
  1497. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  1498. if (i > 0)
  1499. return;
  1500. OPENSSL_free(pre);
  1501. }
  1502. static void nistp256_pre_comp_clear_free(void *pre_)
  1503. {
  1504. int i;
  1505. NISTP256_PRE_COMP *pre = pre_;
  1506. if (!pre)
  1507. return;
  1508. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  1509. if (i > 0)
  1510. return;
  1511. OPENSSL_cleanse(pre, sizeof *pre);
  1512. OPENSSL_free(pre);
  1513. }
  1514. /******************************************************************************/
  1515. /* OPENSSL EC_METHOD FUNCTIONS
  1516. */
  1517. int ec_GFp_nistp256_group_init(EC_GROUP *group)
  1518. {
  1519. int ret;
  1520. ret = ec_GFp_simple_group_init(group);
  1521. group->a_is_minus3 = 1;
  1522. return ret;
  1523. }
  1524. int ec_GFp_nistp256_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  1525. const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
  1526. {
  1527. int ret = 0;
  1528. BN_CTX *new_ctx = NULL;
  1529. BIGNUM *curve_p, *curve_a, *curve_b;
  1530. if (ctx == NULL)
  1531. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1532. BN_CTX_start(ctx);
  1533. if (((curve_p = BN_CTX_get(ctx)) == NULL) ||
  1534. ((curve_a = BN_CTX_get(ctx)) == NULL) ||
  1535. ((curve_b = BN_CTX_get(ctx)) == NULL)) goto err;
  1536. BN_bin2bn(nistp256_curve_params[0], sizeof(felem_bytearray), curve_p);
  1537. BN_bin2bn(nistp256_curve_params[1], sizeof(felem_bytearray), curve_a);
  1538. BN_bin2bn(nistp256_curve_params[2], sizeof(felem_bytearray), curve_b);
  1539. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) ||
  1540. (BN_cmp(curve_b, b)))
  1541. {
  1542. ECerr(EC_F_EC_GFP_NISTP256_GROUP_SET_CURVE,
  1543. EC_R_WRONG_CURVE_PARAMETERS);
  1544. goto err;
  1545. }
  1546. group->field_mod_func = BN_nist_mod_256;
  1547. ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  1548. err:
  1549. BN_CTX_end(ctx);
  1550. if (new_ctx != NULL)
  1551. BN_CTX_free(new_ctx);
  1552. return ret;
  1553. }
  1554. /* Takes the Jacobian coordinates (X, Y, Z) of a point and returns
  1555. * (X', Y') = (X/Z^2, Y/Z^3) */
  1556. int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group,
  1557. const EC_POINT *point, BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
  1558. {
  1559. felem z1, z2, x_in, y_in;
  1560. smallfelem x_out, y_out;
  1561. longfelem tmp;
  1562. if (EC_POINT_is_at_infinity(group, point))
  1563. {
  1564. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1565. EC_R_POINT_AT_INFINITY);
  1566. return 0;
  1567. }
  1568. if ((!BN_to_felem(x_in, &point->X)) || (!BN_to_felem(y_in, &point->Y)) ||
  1569. (!BN_to_felem(z1, &point->Z))) return 0;
  1570. felem_inv(z2, z1);
  1571. felem_square(tmp, z2); felem_reduce(z1, tmp);
  1572. felem_mul(tmp, x_in, z1); felem_reduce(x_in, tmp);
  1573. felem_contract(x_out, x_in);
  1574. if (x != NULL)
  1575. {
  1576. if (!smallfelem_to_BN(x, x_out)) {
  1577. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1578. ERR_R_BN_LIB);
  1579. return 0;
  1580. }
  1581. }
  1582. felem_mul(tmp, z1, z2); felem_reduce(z1, tmp);
  1583. felem_mul(tmp, y_in, z1); felem_reduce(y_in, tmp);
  1584. felem_contract(y_out, y_in);
  1585. if (y != NULL)
  1586. {
  1587. if (!smallfelem_to_BN(y, y_out))
  1588. {
  1589. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1590. ERR_R_BN_LIB);
  1591. return 0;
  1592. }
  1593. }
  1594. return 1;
  1595. }
  1596. static void make_points_affine(size_t num, smallfelem points[/* num */][3], smallfelem tmp_smallfelems[/* num+1 */])
  1597. {
  1598. /* Runs in constant time, unless an input is the point at infinity
  1599. * (which normally shouldn't happen). */
  1600. ec_GFp_nistp_points_make_affine_internal(
  1601. num,
  1602. points,
  1603. sizeof(smallfelem),
  1604. tmp_smallfelems,
  1605. (void (*)(void *)) smallfelem_one,
  1606. (int (*)(const void *)) smallfelem_is_zero_int,
  1607. (void (*)(void *, const void *)) smallfelem_assign,
  1608. (void (*)(void *, const void *)) smallfelem_square_contract,
  1609. (void (*)(void *, const void *, const void *)) smallfelem_mul_contract,
  1610. (void (*)(void *, const void *)) smallfelem_inv_contract,
  1611. (void (*)(void *, const void *)) smallfelem_assign /* nothing to contract */);
  1612. }
  1613. /* Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL values
  1614. * Result is stored in r (r can equal one of the inputs). */
  1615. int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
  1616. const BIGNUM *scalar, size_t num, const EC_POINT *points[],
  1617. const BIGNUM *scalars[], BN_CTX *ctx)
  1618. {
  1619. int ret = 0;
  1620. int j;
  1621. int mixed = 0;
  1622. BN_CTX *new_ctx = NULL;
  1623. BIGNUM *x, *y, *z, *tmp_scalar;
  1624. felem_bytearray g_secret;
  1625. felem_bytearray *secrets = NULL;
  1626. smallfelem (*pre_comp)[17][3] = NULL;
  1627. smallfelem *tmp_smallfelems = NULL;
  1628. felem_bytearray tmp;
  1629. unsigned i, num_bytes;
  1630. int have_pre_comp = 0;
  1631. size_t num_points = num;
  1632. smallfelem x_in, y_in, z_in;
  1633. felem x_out, y_out, z_out;
  1634. NISTP256_PRE_COMP *pre = NULL;
  1635. const smallfelem (*g_pre_comp)[16][3] = NULL;
  1636. EC_POINT *generator = NULL;
  1637. const EC_POINT *p = NULL;
  1638. const BIGNUM *p_scalar = NULL;
  1639. if (ctx == NULL)
  1640. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1641. BN_CTX_start(ctx);
  1642. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1643. ((y = BN_CTX_get(ctx)) == NULL) ||
  1644. ((z = BN_CTX_get(ctx)) == NULL) ||
  1645. ((tmp_scalar = BN_CTX_get(ctx)) == NULL))
  1646. goto err;
  1647. if (scalar != NULL)
  1648. {
  1649. pre = EC_EX_DATA_get_data(group->extra_data,
  1650. nistp256_pre_comp_dup, nistp256_pre_comp_free,
  1651. nistp256_pre_comp_clear_free);
  1652. if (pre)
  1653. /* we have precomputation, try to use it */
  1654. g_pre_comp = (const smallfelem (*)[16][3]) pre->g_pre_comp;
  1655. else
  1656. /* try to use the standard precomputation */
  1657. g_pre_comp = &gmul[0];
  1658. generator = EC_POINT_new(group);
  1659. if (generator == NULL)
  1660. goto err;
  1661. /* get the generator from precomputation */
  1662. if (!smallfelem_to_BN(x, g_pre_comp[0][1][0]) ||
  1663. !smallfelem_to_BN(y, g_pre_comp[0][1][1]) ||
  1664. !smallfelem_to_BN(z, g_pre_comp[0][1][2]))
  1665. {
  1666. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1667. goto err;
  1668. }
  1669. if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
  1670. generator, x, y, z, ctx))
  1671. goto err;
  1672. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1673. /* precomputation matches generator */
  1674. have_pre_comp = 1;
  1675. else
  1676. /* we don't have valid precomputation:
  1677. * treat the generator as a random point */
  1678. num_points++;
  1679. }
  1680. if (num_points > 0)
  1681. {
  1682. if (num_points >= 3)
  1683. {
  1684. /* unless we precompute multiples for just one or two points,
  1685. * converting those into affine form is time well spent */
  1686. mixed = 1;
  1687. }
  1688. secrets = OPENSSL_malloc(num_points * sizeof(felem_bytearray));
  1689. pre_comp = OPENSSL_malloc(num_points * 17 * 3 * sizeof(smallfelem));
  1690. if (mixed)
  1691. tmp_smallfelems = OPENSSL_malloc((num_points * 17 + 1) * sizeof(smallfelem));
  1692. if ((secrets == NULL) || (pre_comp == NULL) || (mixed && (tmp_smallfelems == NULL)))
  1693. {
  1694. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
  1695. goto err;
  1696. }
  1697. /* we treat NULL scalars as 0, and NULL points as points at infinity,
  1698. * i.e., they contribute nothing to the linear combination */
  1699. memset(secrets, 0, num_points * sizeof(felem_bytearray));
  1700. memset(pre_comp, 0, num_points * 17 * 3 * sizeof(smallfelem));
  1701. for (i = 0; i < num_points; ++i)
  1702. {
  1703. if (i == num)
  1704. /* we didn't have a valid precomputation, so we pick
  1705. * the generator */
  1706. {
  1707. p = EC_GROUP_get0_generator(group);
  1708. p_scalar = scalar;
  1709. }
  1710. else
  1711. /* the i^th point */
  1712. {
  1713. p = points[i];
  1714. p_scalar = scalars[i];
  1715. }
  1716. if ((p_scalar != NULL) && (p != NULL))
  1717. {
  1718. /* reduce scalar to 0 <= scalar < 2^256 */
  1719. if ((BN_num_bits(p_scalar) > 256) || (BN_is_negative(p_scalar)))
  1720. {
  1721. /* this is an unusual input, and we don't guarantee
  1722. * constant-timeness */
  1723. if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx))
  1724. {
  1725. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1726. goto err;
  1727. }
  1728. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1729. }
  1730. else
  1731. num_bytes = BN_bn2bin(p_scalar, tmp);
  1732. flip_endian(secrets[i], tmp, num_bytes);
  1733. /* precompute multiples */
  1734. if ((!BN_to_felem(x_out, &p->X)) ||
  1735. (!BN_to_felem(y_out, &p->Y)) ||
  1736. (!BN_to_felem(z_out, &p->Z))) goto err;
  1737. felem_shrink(pre_comp[i][1][0], x_out);
  1738. felem_shrink(pre_comp[i][1][1], y_out);
  1739. felem_shrink(pre_comp[i][1][2], z_out);
  1740. for (j = 2; j <= 16; ++j)
  1741. {
  1742. if (j & 1)
  1743. {
  1744. point_add_small(
  1745. pre_comp[i][j][0], pre_comp[i][j][1], pre_comp[i][j][2],
  1746. pre_comp[i][1][0], pre_comp[i][1][1], pre_comp[i][1][2],
  1747. pre_comp[i][j-1][0], pre_comp[i][j-1][1], pre_comp[i][j-1][2]);
  1748. }
  1749. else
  1750. {
  1751. point_double_small(
  1752. pre_comp[i][j][0], pre_comp[i][j][1], pre_comp[i][j][2],
  1753. pre_comp[i][j/2][0], pre_comp[i][j/2][1], pre_comp[i][j/2][2]);
  1754. }
  1755. }
  1756. }
  1757. }
  1758. if (mixed)
  1759. make_points_affine(num_points * 17, pre_comp[0], tmp_smallfelems);
  1760. }
  1761. /* the scalar for the generator */
  1762. if ((scalar != NULL) && (have_pre_comp))
  1763. {
  1764. memset(g_secret, 0, sizeof(g_secret));
  1765. /* reduce scalar to 0 <= scalar < 2^256 */
  1766. if ((BN_num_bits(scalar) > 256) || (BN_is_negative(scalar)))
  1767. {
  1768. /* this is an unusual input, and we don't guarantee
  1769. * constant-timeness */
  1770. if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx))
  1771. {
  1772. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1773. goto err;
  1774. }
  1775. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1776. }
  1777. else
  1778. num_bytes = BN_bn2bin(scalar, tmp);
  1779. flip_endian(g_secret, tmp, num_bytes);
  1780. /* do the multiplication with generator precomputation*/
  1781. batch_mul(x_out, y_out, z_out,
  1782. (const felem_bytearray (*)) secrets, num_points,
  1783. g_secret,
  1784. mixed, (const smallfelem (*)[17][3]) pre_comp,
  1785. g_pre_comp);
  1786. }
  1787. else
  1788. /* do the multiplication without generator precomputation */
  1789. batch_mul(x_out, y_out, z_out,
  1790. (const felem_bytearray (*)) secrets, num_points,
  1791. NULL, mixed, (const smallfelem (*)[17][3]) pre_comp, NULL);
  1792. /* reduce the output to its unique minimal representation */
  1793. felem_contract(x_in, x_out);
  1794. felem_contract(y_in, y_out);
  1795. felem_contract(z_in, z_out);
  1796. if ((!smallfelem_to_BN(x, x_in)) || (!smallfelem_to_BN(y, y_in)) ||
  1797. (!smallfelem_to_BN(z, z_in)))
  1798. {
  1799. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1800. goto err;
  1801. }
  1802. ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
  1803. err:
  1804. BN_CTX_end(ctx);
  1805. if (generator != NULL)
  1806. EC_POINT_free(generator);
  1807. if (new_ctx != NULL)
  1808. BN_CTX_free(new_ctx);
  1809. if (secrets != NULL)
  1810. OPENSSL_free(secrets);
  1811. if (pre_comp != NULL)
  1812. OPENSSL_free(pre_comp);
  1813. if (tmp_smallfelems != NULL)
  1814. OPENSSL_free(tmp_smallfelems);
  1815. return ret;
  1816. }
  1817. int ec_GFp_nistp256_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  1818. {
  1819. int ret = 0;
  1820. NISTP256_PRE_COMP *pre = NULL;
  1821. int i, j;
  1822. BN_CTX *new_ctx = NULL;
  1823. BIGNUM *x, *y;
  1824. EC_POINT *generator = NULL;
  1825. smallfelem tmp_smallfelems[32];
  1826. felem x_tmp, y_tmp, z_tmp;
  1827. /* throw away old precomputation */
  1828. EC_EX_DATA_free_data(&group->extra_data, nistp256_pre_comp_dup,
  1829. nistp256_pre_comp_free, nistp256_pre_comp_clear_free);
  1830. if (ctx == NULL)
  1831. if ((ctx = new_ctx = BN_CTX_new()) == NULL) return 0;
  1832. BN_CTX_start(ctx);
  1833. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1834. ((y = BN_CTX_get(ctx)) == NULL))
  1835. goto err;
  1836. /* get the generator */
  1837. if (group->generator == NULL) goto err;
  1838. generator = EC_POINT_new(group);
  1839. if (generator == NULL)
  1840. goto err;
  1841. BN_bin2bn(nistp256_curve_params[3], sizeof (felem_bytearray), x);
  1842. BN_bin2bn(nistp256_curve_params[4], sizeof (felem_bytearray), y);
  1843. if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
  1844. goto err;
  1845. if ((pre = nistp256_pre_comp_new()) == NULL)
  1846. goto err;
  1847. /* if the generator is the standard one, use built-in precomputation */
  1848. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1849. {
  1850. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  1851. ret = 1;
  1852. goto err;
  1853. }
  1854. if ((!BN_to_felem(x_tmp, &group->generator->X)) ||
  1855. (!BN_to_felem(y_tmp, &group->generator->Y)) ||
  1856. (!BN_to_felem(z_tmp, &group->generator->Z)))
  1857. goto err;
  1858. felem_shrink(pre->g_pre_comp[0][1][0], x_tmp);
  1859. felem_shrink(pre->g_pre_comp[0][1][1], y_tmp);
  1860. felem_shrink(pre->g_pre_comp[0][1][2], z_tmp);
  1861. /* compute 2^64*G, 2^128*G, 2^192*G for the first table,
  1862. * 2^32*G, 2^96*G, 2^160*G, 2^224*G for the second one
  1863. */
  1864. for (i = 1; i <= 8; i <<= 1)
  1865. {
  1866. point_double_small(
  1867. pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2],
  1868. pre->g_pre_comp[0][i][0], pre->g_pre_comp[0][i][1], pre->g_pre_comp[0][i][2]);
  1869. for (j = 0; j < 31; ++j)
  1870. {
  1871. point_double_small(
  1872. pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2],
  1873. pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
  1874. }
  1875. if (i == 8)
  1876. break;
  1877. point_double_small(
  1878. pre->g_pre_comp[0][2*i][0], pre->g_pre_comp[0][2*i][1], pre->g_pre_comp[0][2*i][2],
  1879. pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
  1880. for (j = 0; j < 31; ++j)
  1881. {
  1882. point_double_small(
  1883. pre->g_pre_comp[0][2*i][0], pre->g_pre_comp[0][2*i][1], pre->g_pre_comp[0][2*i][2],
  1884. pre->g_pre_comp[0][2*i][0], pre->g_pre_comp[0][2*i][1], pre->g_pre_comp[0][2*i][2]);
  1885. }
  1886. }
  1887. for (i = 0; i < 2; i++)
  1888. {
  1889. /* g_pre_comp[i][0] is the point at infinity */
  1890. memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
  1891. /* the remaining multiples */
  1892. /* 2^64*G + 2^128*G resp. 2^96*G + 2^160*G */
  1893. point_add_small(
  1894. pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1], pre->g_pre_comp[i][6][2],
  1895. pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
  1896. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1], pre->g_pre_comp[i][2][2]);
  1897. /* 2^64*G + 2^192*G resp. 2^96*G + 2^224*G */
  1898. point_add_small(
  1899. pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1], pre->g_pre_comp[i][10][2],
  1900. pre->g_pre_comp[i][8][0], pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
  1901. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1], pre->g_pre_comp[i][2][2]);
  1902. /* 2^128*G + 2^192*G resp. 2^160*G + 2^224*G */
  1903. point_add_small(
  1904. pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
  1905. pre->g_pre_comp[i][8][0], pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
  1906. pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2]);
  1907. /* 2^64*G + 2^128*G + 2^192*G resp. 2^96*G + 2^160*G + 2^224*G */
  1908. point_add_small(
  1909. pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1], pre->g_pre_comp[i][14][2],
  1910. pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
  1911. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1], pre->g_pre_comp[i][2][2]);
  1912. for (j = 1; j < 8; ++j)
  1913. {
  1914. /* odd multiples: add G resp. 2^32*G */
  1915. point_add_small(
  1916. pre->g_pre_comp[i][2*j+1][0], pre->g_pre_comp[i][2*j+1][1], pre->g_pre_comp[i][2*j+1][2],
  1917. pre->g_pre_comp[i][2*j][0], pre->g_pre_comp[i][2*j][1], pre->g_pre_comp[i][2*j][2],
  1918. pre->g_pre_comp[i][1][0], pre->g_pre_comp[i][1][1], pre->g_pre_comp[i][1][2]);
  1919. }
  1920. }
  1921. make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_smallfelems);
  1922. if (!EC_EX_DATA_set_data(&group->extra_data, pre, nistp256_pre_comp_dup,
  1923. nistp256_pre_comp_free, nistp256_pre_comp_clear_free))
  1924. goto err;
  1925. ret = 1;
  1926. pre = NULL;
  1927. err:
  1928. BN_CTX_end(ctx);
  1929. if (generator != NULL)
  1930. EC_POINT_free(generator);
  1931. if (new_ctx != NULL)
  1932. BN_CTX_free(new_ctx);
  1933. if (pre)
  1934. nistp256_pre_comp_free(pre);
  1935. return ret;
  1936. }
  1937. int ec_GFp_nistp256_have_precompute_mult(const EC_GROUP *group)
  1938. {
  1939. if (EC_EX_DATA_get_data(group->extra_data, nistp256_pre_comp_dup,
  1940. nistp256_pre_comp_free, nistp256_pre_comp_clear_free)
  1941. != NULL)
  1942. return 1;
  1943. else
  1944. return 0;
  1945. }
  1946. #else
  1947. static void *dummy=&dummy;
  1948. #endif