ecp_nistp256.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368
  1. /* crypto/ec/ecp_nistp256.c */
  2. /*
  3. * Written by Adam Langley (Google) for the OpenSSL project
  4. */
  5. /* Copyright 2011 Google Inc.
  6. *
  7. * Licensed under the Apache License, Version 2.0 (the "License");
  8. *
  9. * you may not use this file except in compliance with the License.
  10. * You may obtain a copy of the License at
  11. *
  12. * http://www.apache.org/licenses/LICENSE-2.0
  13. *
  14. * Unless required by applicable law or agreed to in writing, software
  15. * distributed under the License is distributed on an "AS IS" BASIS,
  16. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. * See the License for the specific language governing permissions and
  18. * limitations under the License.
  19. */
  20. /*
  21. * A 64-bit implementation of the NIST P-256 elliptic curve point multiplication
  22. *
  23. * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
  24. * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
  25. * work which got its smarts from Daniel J. Bernstein's work on the same.
  26. */
  27. #include <openssl/opensslconf.h>
  28. #ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
  29. # ifndef OPENSSL_SYS_VMS
  30. # include <stdint.h>
  31. # else
  32. # include <inttypes.h>
  33. # endif
  34. # include <string.h>
  35. # include <openssl/err.h>
  36. # include "ec_lcl.h"
  37. # if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
  38. /* even with gcc, the typedef won't work for 32-bit platforms */
  39. typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
  40. * platforms */
  41. typedef __int128_t int128_t;
  42. # else
  43. # error "Need GCC 3.1 or later to define type uint128_t"
  44. # endif
  45. typedef uint8_t u8;
  46. typedef uint32_t u32;
  47. typedef uint64_t u64;
  48. typedef int64_t s64;
  49. /*
  50. * The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We
  51. * can serialise an element of this field into 32 bytes. We call this an
  52. * felem_bytearray.
  53. */
  54. typedef u8 felem_bytearray[32];
  55. /*
  56. * These are the parameters of P256, taken from FIPS 186-3, page 86. These
  57. * values are big-endian.
  58. */
  59. static const felem_bytearray nistp256_curve_params[5] = {
  60. {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, /* p */
  61. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  62. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
  63. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
  64. {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, /* a = -3 */
  65. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  66. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
  67. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc}, /* b */
  68. {0x5a, 0xc6, 0x35, 0xd8, 0xaa, 0x3a, 0x93, 0xe7,
  69. 0xb3, 0xeb, 0xbd, 0x55, 0x76, 0x98, 0x86, 0xbc,
  70. 0x65, 0x1d, 0x06, 0xb0, 0xcc, 0x53, 0xb0, 0xf6,
  71. 0x3b, 0xce, 0x3c, 0x3e, 0x27, 0xd2, 0x60, 0x4b},
  72. {0x6b, 0x17, 0xd1, 0xf2, 0xe1, 0x2c, 0x42, 0x47, /* x */
  73. 0xf8, 0xbc, 0xe6, 0xe5, 0x63, 0xa4, 0x40, 0xf2,
  74. 0x77, 0x03, 0x7d, 0x81, 0x2d, 0xeb, 0x33, 0xa0,
  75. 0xf4, 0xa1, 0x39, 0x45, 0xd8, 0x98, 0xc2, 0x96},
  76. {0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, /* y */
  77. 0x8e, 0xe7, 0xeb, 0x4a, 0x7c, 0x0f, 0x9e, 0x16,
  78. 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce,
  79. 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5}
  80. };
  81. /*-
  82. * The representation of field elements.
  83. * ------------------------------------
  84. *
  85. * We represent field elements with either four 128-bit values, eight 128-bit
  86. * values, or four 64-bit values. The field element represented is:
  87. * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
  88. * or:
  89. * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
  90. *
  91. * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
  92. * apart, but are 128-bits wide, the most significant bits of each limb overlap
  93. * with the least significant bits of the next.
  94. *
  95. * A field element with four limbs is an 'felem'. One with eight limbs is a
  96. * 'longfelem'
  97. *
  98. * A field element with four, 64-bit values is called a 'smallfelem'. Small
  99. * values are used as intermediate values before multiplication.
  100. */
  101. # define NLIMBS 4
  102. typedef uint128_t limb;
  103. typedef limb felem[NLIMBS];
  104. typedef limb longfelem[NLIMBS * 2];
  105. typedef u64 smallfelem[NLIMBS];
  106. /* This is the value of the prime as four 64-bit words, little-endian. */
  107. static const u64 kPrime[4] =
  108. { 0xfffffffffffffffful, 0xffffffff, 0, 0xffffffff00000001ul };
  109. static const u64 bottom63bits = 0x7ffffffffffffffful;
  110. /*
  111. * bin32_to_felem takes a little-endian byte array and converts it into felem
  112. * form. This assumes that the CPU is little-endian.
  113. */
  114. static void bin32_to_felem(felem out, const u8 in[32])
  115. {
  116. out[0] = *((u64 *)&in[0]);
  117. out[1] = *((u64 *)&in[8]);
  118. out[2] = *((u64 *)&in[16]);
  119. out[3] = *((u64 *)&in[24]);
  120. }
  121. /*
  122. * smallfelem_to_bin32 takes a smallfelem and serialises into a little
  123. * endian, 32 byte array. This assumes that the CPU is little-endian.
  124. */
  125. static void smallfelem_to_bin32(u8 out[32], const smallfelem in)
  126. {
  127. *((u64 *)&out[0]) = in[0];
  128. *((u64 *)&out[8]) = in[1];
  129. *((u64 *)&out[16]) = in[2];
  130. *((u64 *)&out[24]) = in[3];
  131. }
  132. /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
  133. static void flip_endian(u8 *out, const u8 *in, unsigned len)
  134. {
  135. unsigned i;
  136. for (i = 0; i < len; ++i)
  137. out[i] = in[len - 1 - i];
  138. }
  139. /* BN_to_felem converts an OpenSSL BIGNUM into an felem */
  140. static int BN_to_felem(felem out, const BIGNUM *bn)
  141. {
  142. felem_bytearray b_in;
  143. felem_bytearray b_out;
  144. unsigned num_bytes;
  145. /* BN_bn2bin eats leading zeroes */
  146. memset(b_out, 0, sizeof(b_out));
  147. num_bytes = BN_num_bytes(bn);
  148. if (num_bytes > sizeof(b_out)) {
  149. ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
  150. return 0;
  151. }
  152. if (BN_is_negative(bn)) {
  153. ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
  154. return 0;
  155. }
  156. num_bytes = BN_bn2bin(bn, b_in);
  157. flip_endian(b_out, b_in, num_bytes);
  158. bin32_to_felem(out, b_out);
  159. return 1;
  160. }
  161. /* felem_to_BN converts an felem into an OpenSSL BIGNUM */
  162. static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in)
  163. {
  164. felem_bytearray b_in, b_out;
  165. smallfelem_to_bin32(b_in, in);
  166. flip_endian(b_out, b_in, sizeof(b_out));
  167. return BN_bin2bn(b_out, sizeof(b_out), out);
  168. }
  169. /*-
  170. * Field operations
  171. * ----------------
  172. */
  173. static void smallfelem_one(smallfelem out)
  174. {
  175. out[0] = 1;
  176. out[1] = 0;
  177. out[2] = 0;
  178. out[3] = 0;
  179. }
  180. static void smallfelem_assign(smallfelem out, const smallfelem in)
  181. {
  182. out[0] = in[0];
  183. out[1] = in[1];
  184. out[2] = in[2];
  185. out[3] = in[3];
  186. }
  187. static void felem_assign(felem out, const felem in)
  188. {
  189. out[0] = in[0];
  190. out[1] = in[1];
  191. out[2] = in[2];
  192. out[3] = in[3];
  193. }
  194. /* felem_sum sets out = out + in. */
  195. static void felem_sum(felem out, const felem in)
  196. {
  197. out[0] += in[0];
  198. out[1] += in[1];
  199. out[2] += in[2];
  200. out[3] += in[3];
  201. }
  202. /* felem_small_sum sets out = out + in. */
  203. static void felem_small_sum(felem out, const smallfelem in)
  204. {
  205. out[0] += in[0];
  206. out[1] += in[1];
  207. out[2] += in[2];
  208. out[3] += in[3];
  209. }
  210. /* felem_scalar sets out = out * scalar */
  211. static void felem_scalar(felem out, const u64 scalar)
  212. {
  213. out[0] *= scalar;
  214. out[1] *= scalar;
  215. out[2] *= scalar;
  216. out[3] *= scalar;
  217. }
  218. /* longfelem_scalar sets out = out * scalar */
  219. static void longfelem_scalar(longfelem out, const u64 scalar)
  220. {
  221. out[0] *= scalar;
  222. out[1] *= scalar;
  223. out[2] *= scalar;
  224. out[3] *= scalar;
  225. out[4] *= scalar;
  226. out[5] *= scalar;
  227. out[6] *= scalar;
  228. out[7] *= scalar;
  229. }
  230. # define two105m41m9 (((limb)1) << 105) - (((limb)1) << 41) - (((limb)1) << 9)
  231. # define two105 (((limb)1) << 105)
  232. # define two105m41p9 (((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9)
  233. /* zero105 is 0 mod p */
  234. static const felem zero105 =
  235. { two105m41m9, two105, two105m41p9, two105m41p9 };
  236. /*-
  237. * smallfelem_neg sets |out| to |-small|
  238. * On exit:
  239. * out[i] < out[i] + 2^105
  240. */
  241. static void smallfelem_neg(felem out, const smallfelem small)
  242. {
  243. /* In order to prevent underflow, we subtract from 0 mod p. */
  244. out[0] = zero105[0] - small[0];
  245. out[1] = zero105[1] - small[1];
  246. out[2] = zero105[2] - small[2];
  247. out[3] = zero105[3] - small[3];
  248. }
  249. /*-
  250. * felem_diff subtracts |in| from |out|
  251. * On entry:
  252. * in[i] < 2^104
  253. * On exit:
  254. * out[i] < out[i] + 2^105
  255. */
  256. static void felem_diff(felem out, const felem in)
  257. {
  258. /*
  259. * In order to prevent underflow, we add 0 mod p before subtracting.
  260. */
  261. out[0] += zero105[0];
  262. out[1] += zero105[1];
  263. out[2] += zero105[2];
  264. out[3] += zero105[3];
  265. out[0] -= in[0];
  266. out[1] -= in[1];
  267. out[2] -= in[2];
  268. out[3] -= in[3];
  269. }
  270. # define two107m43m11 (((limb)1) << 107) - (((limb)1) << 43) - (((limb)1) << 11)
  271. # define two107 (((limb)1) << 107)
  272. # define two107m43p11 (((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11)
  273. /* zero107 is 0 mod p */
  274. static const felem zero107 =
  275. { two107m43m11, two107, two107m43p11, two107m43p11 };
  276. /*-
  277. * An alternative felem_diff for larger inputs |in|
  278. * felem_diff_zero107 subtracts |in| from |out|
  279. * On entry:
  280. * in[i] < 2^106
  281. * On exit:
  282. * out[i] < out[i] + 2^107
  283. */
  284. static void felem_diff_zero107(felem out, const felem in)
  285. {
  286. /*
  287. * In order to prevent underflow, we add 0 mod p before subtracting.
  288. */
  289. out[0] += zero107[0];
  290. out[1] += zero107[1];
  291. out[2] += zero107[2];
  292. out[3] += zero107[3];
  293. out[0] -= in[0];
  294. out[1] -= in[1];
  295. out[2] -= in[2];
  296. out[3] -= in[3];
  297. }
  298. /*-
  299. * longfelem_diff subtracts |in| from |out|
  300. * On entry:
  301. * in[i] < 7*2^67
  302. * On exit:
  303. * out[i] < out[i] + 2^70 + 2^40
  304. */
  305. static void longfelem_diff(longfelem out, const longfelem in)
  306. {
  307. static const limb two70m8p6 =
  308. (((limb) 1) << 70) - (((limb) 1) << 8) + (((limb) 1) << 6);
  309. static const limb two70p40 = (((limb) 1) << 70) + (((limb) 1) << 40);
  310. static const limb two70 = (((limb) 1) << 70);
  311. static const limb two70m40m38p6 =
  312. (((limb) 1) << 70) - (((limb) 1) << 40) - (((limb) 1) << 38) +
  313. (((limb) 1) << 6);
  314. static const limb two70m6 = (((limb) 1) << 70) - (((limb) 1) << 6);
  315. /* add 0 mod p to avoid underflow */
  316. out[0] += two70m8p6;
  317. out[1] += two70p40;
  318. out[2] += two70;
  319. out[3] += two70m40m38p6;
  320. out[4] += two70m6;
  321. out[5] += two70m6;
  322. out[6] += two70m6;
  323. out[7] += two70m6;
  324. /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */
  325. out[0] -= in[0];
  326. out[1] -= in[1];
  327. out[2] -= in[2];
  328. out[3] -= in[3];
  329. out[4] -= in[4];
  330. out[5] -= in[5];
  331. out[6] -= in[6];
  332. out[7] -= in[7];
  333. }
  334. # define two64m0 (((limb)1) << 64) - 1
  335. # define two110p32m0 (((limb)1) << 110) + (((limb)1) << 32) - 1
  336. # define two64m46 (((limb)1) << 64) - (((limb)1) << 46)
  337. # define two64m32 (((limb)1) << 64) - (((limb)1) << 32)
  338. /* zero110 is 0 mod p */
  339. static const felem zero110 = { two64m0, two110p32m0, two64m46, two64m32 };
  340. /*-
  341. * felem_shrink converts an felem into a smallfelem. The result isn't quite
  342. * minimal as the value may be greater than p.
  343. *
  344. * On entry:
  345. * in[i] < 2^109
  346. * On exit:
  347. * out[i] < 2^64
  348. */
  349. static void felem_shrink(smallfelem out, const felem in)
  350. {
  351. felem tmp;
  352. u64 a, b, mask;
  353. s64 high, low;
  354. static const u64 kPrime3Test = 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */
  355. /* Carry 2->3 */
  356. tmp[3] = zero110[3] + in[3] + ((u64)(in[2] >> 64));
  357. /* tmp[3] < 2^110 */
  358. tmp[2] = zero110[2] + (u64)in[2];
  359. tmp[0] = zero110[0] + in[0];
  360. tmp[1] = zero110[1] + in[1];
  361. /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */
  362. /*
  363. * We perform two partial reductions where we eliminate the high-word of
  364. * tmp[3]. We don't update the other words till the end.
  365. */
  366. a = tmp[3] >> 64; /* a < 2^46 */
  367. tmp[3] = (u64)tmp[3];
  368. tmp[3] -= a;
  369. tmp[3] += ((limb) a) << 32;
  370. /* tmp[3] < 2^79 */
  371. b = a;
  372. a = tmp[3] >> 64; /* a < 2^15 */
  373. b += a; /* b < 2^46 + 2^15 < 2^47 */
  374. tmp[3] = (u64)tmp[3];
  375. tmp[3] -= a;
  376. tmp[3] += ((limb) a) << 32;
  377. /* tmp[3] < 2^64 + 2^47 */
  378. /*
  379. * This adjusts the other two words to complete the two partial
  380. * reductions.
  381. */
  382. tmp[0] += b;
  383. tmp[1] -= (((limb) b) << 32);
  384. /*
  385. * In order to make space in tmp[3] for the carry from 2 -> 3, we
  386. * conditionally subtract kPrime if tmp[3] is large enough.
  387. */
  388. high = tmp[3] >> 64;
  389. /* As tmp[3] < 2^65, high is either 1 or 0 */
  390. high <<= 63;
  391. high >>= 63;
  392. /*-
  393. * high is:
  394. * all ones if the high word of tmp[3] is 1
  395. * all zeros if the high word of tmp[3] if 0 */
  396. low = tmp[3];
  397. mask = low >> 63;
  398. /*-
  399. * mask is:
  400. * all ones if the MSB of low is 1
  401. * all zeros if the MSB of low if 0 */
  402. low &= bottom63bits;
  403. low -= kPrime3Test;
  404. /* if low was greater than kPrime3Test then the MSB is zero */
  405. low = ~low;
  406. low >>= 63;
  407. /*-
  408. * low is:
  409. * all ones if low was > kPrime3Test
  410. * all zeros if low was <= kPrime3Test */
  411. mask = (mask & low) | high;
  412. tmp[0] -= mask & kPrime[0];
  413. tmp[1] -= mask & kPrime[1];
  414. /* kPrime[2] is zero, so omitted */
  415. tmp[3] -= mask & kPrime[3];
  416. /* tmp[3] < 2**64 - 2**32 + 1 */
  417. tmp[1] += ((u64)(tmp[0] >> 64));
  418. tmp[0] = (u64)tmp[0];
  419. tmp[2] += ((u64)(tmp[1] >> 64));
  420. tmp[1] = (u64)tmp[1];
  421. tmp[3] += ((u64)(tmp[2] >> 64));
  422. tmp[2] = (u64)tmp[2];
  423. /* tmp[i] < 2^64 */
  424. out[0] = tmp[0];
  425. out[1] = tmp[1];
  426. out[2] = tmp[2];
  427. out[3] = tmp[3];
  428. }
  429. /* smallfelem_expand converts a smallfelem to an felem */
  430. static void smallfelem_expand(felem out, const smallfelem in)
  431. {
  432. out[0] = in[0];
  433. out[1] = in[1];
  434. out[2] = in[2];
  435. out[3] = in[3];
  436. }
  437. /*-
  438. * smallfelem_square sets |out| = |small|^2
  439. * On entry:
  440. * small[i] < 2^64
  441. * On exit:
  442. * out[i] < 7 * 2^64 < 2^67
  443. */
  444. static void smallfelem_square(longfelem out, const smallfelem small)
  445. {
  446. limb a;
  447. u64 high, low;
  448. a = ((uint128_t) small[0]) * small[0];
  449. low = a;
  450. high = a >> 64;
  451. out[0] = low;
  452. out[1] = high;
  453. a = ((uint128_t) small[0]) * small[1];
  454. low = a;
  455. high = a >> 64;
  456. out[1] += low;
  457. out[1] += low;
  458. out[2] = high;
  459. a = ((uint128_t) small[0]) * small[2];
  460. low = a;
  461. high = a >> 64;
  462. out[2] += low;
  463. out[2] *= 2;
  464. out[3] = high;
  465. a = ((uint128_t) small[0]) * small[3];
  466. low = a;
  467. high = a >> 64;
  468. out[3] += low;
  469. out[4] = high;
  470. a = ((uint128_t) small[1]) * small[2];
  471. low = a;
  472. high = a >> 64;
  473. out[3] += low;
  474. out[3] *= 2;
  475. out[4] += high;
  476. a = ((uint128_t) small[1]) * small[1];
  477. low = a;
  478. high = a >> 64;
  479. out[2] += low;
  480. out[3] += high;
  481. a = ((uint128_t) small[1]) * small[3];
  482. low = a;
  483. high = a >> 64;
  484. out[4] += low;
  485. out[4] *= 2;
  486. out[5] = high;
  487. a = ((uint128_t) small[2]) * small[3];
  488. low = a;
  489. high = a >> 64;
  490. out[5] += low;
  491. out[5] *= 2;
  492. out[6] = high;
  493. out[6] += high;
  494. a = ((uint128_t) small[2]) * small[2];
  495. low = a;
  496. high = a >> 64;
  497. out[4] += low;
  498. out[5] += high;
  499. a = ((uint128_t) small[3]) * small[3];
  500. low = a;
  501. high = a >> 64;
  502. out[6] += low;
  503. out[7] = high;
  504. }
  505. /*-
  506. * felem_square sets |out| = |in|^2
  507. * On entry:
  508. * in[i] < 2^109
  509. * On exit:
  510. * out[i] < 7 * 2^64 < 2^67
  511. */
  512. static void felem_square(longfelem out, const felem in)
  513. {
  514. u64 small[4];
  515. felem_shrink(small, in);
  516. smallfelem_square(out, small);
  517. }
  518. /*-
  519. * smallfelem_mul sets |out| = |small1| * |small2|
  520. * On entry:
  521. * small1[i] < 2^64
  522. * small2[i] < 2^64
  523. * On exit:
  524. * out[i] < 7 * 2^64 < 2^67
  525. */
  526. static void smallfelem_mul(longfelem out, const smallfelem small1,
  527. const smallfelem small2)
  528. {
  529. limb a;
  530. u64 high, low;
  531. a = ((uint128_t) small1[0]) * small2[0];
  532. low = a;
  533. high = a >> 64;
  534. out[0] = low;
  535. out[1] = high;
  536. a = ((uint128_t) small1[0]) * small2[1];
  537. low = a;
  538. high = a >> 64;
  539. out[1] += low;
  540. out[2] = high;
  541. a = ((uint128_t) small1[1]) * small2[0];
  542. low = a;
  543. high = a >> 64;
  544. out[1] += low;
  545. out[2] += high;
  546. a = ((uint128_t) small1[0]) * small2[2];
  547. low = a;
  548. high = a >> 64;
  549. out[2] += low;
  550. out[3] = high;
  551. a = ((uint128_t) small1[1]) * small2[1];
  552. low = a;
  553. high = a >> 64;
  554. out[2] += low;
  555. out[3] += high;
  556. a = ((uint128_t) small1[2]) * small2[0];
  557. low = a;
  558. high = a >> 64;
  559. out[2] += low;
  560. out[3] += high;
  561. a = ((uint128_t) small1[0]) * small2[3];
  562. low = a;
  563. high = a >> 64;
  564. out[3] += low;
  565. out[4] = high;
  566. a = ((uint128_t) small1[1]) * small2[2];
  567. low = a;
  568. high = a >> 64;
  569. out[3] += low;
  570. out[4] += high;
  571. a = ((uint128_t) small1[2]) * small2[1];
  572. low = a;
  573. high = a >> 64;
  574. out[3] += low;
  575. out[4] += high;
  576. a = ((uint128_t) small1[3]) * small2[0];
  577. low = a;
  578. high = a >> 64;
  579. out[3] += low;
  580. out[4] += high;
  581. a = ((uint128_t) small1[1]) * small2[3];
  582. low = a;
  583. high = a >> 64;
  584. out[4] += low;
  585. out[5] = high;
  586. a = ((uint128_t) small1[2]) * small2[2];
  587. low = a;
  588. high = a >> 64;
  589. out[4] += low;
  590. out[5] += high;
  591. a = ((uint128_t) small1[3]) * small2[1];
  592. low = a;
  593. high = a >> 64;
  594. out[4] += low;
  595. out[5] += high;
  596. a = ((uint128_t) small1[2]) * small2[3];
  597. low = a;
  598. high = a >> 64;
  599. out[5] += low;
  600. out[6] = high;
  601. a = ((uint128_t) small1[3]) * small2[2];
  602. low = a;
  603. high = a >> 64;
  604. out[5] += low;
  605. out[6] += high;
  606. a = ((uint128_t) small1[3]) * small2[3];
  607. low = a;
  608. high = a >> 64;
  609. out[6] += low;
  610. out[7] = high;
  611. }
  612. /*-
  613. * felem_mul sets |out| = |in1| * |in2|
  614. * On entry:
  615. * in1[i] < 2^109
  616. * in2[i] < 2^109
  617. * On exit:
  618. * out[i] < 7 * 2^64 < 2^67
  619. */
  620. static void felem_mul(longfelem out, const felem in1, const felem in2)
  621. {
  622. smallfelem small1, small2;
  623. felem_shrink(small1, in1);
  624. felem_shrink(small2, in2);
  625. smallfelem_mul(out, small1, small2);
  626. }
  627. /*-
  628. * felem_small_mul sets |out| = |small1| * |in2|
  629. * On entry:
  630. * small1[i] < 2^64
  631. * in2[i] < 2^109
  632. * On exit:
  633. * out[i] < 7 * 2^64 < 2^67
  634. */
  635. static void felem_small_mul(longfelem out, const smallfelem small1,
  636. const felem in2)
  637. {
  638. smallfelem small2;
  639. felem_shrink(small2, in2);
  640. smallfelem_mul(out, small1, small2);
  641. }
  642. # define two100m36m4 (((limb)1) << 100) - (((limb)1) << 36) - (((limb)1) << 4)
  643. # define two100 (((limb)1) << 100)
  644. # define two100m36p4 (((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4)
  645. /* zero100 is 0 mod p */
  646. static const felem zero100 =
  647. { two100m36m4, two100, two100m36p4, two100m36p4 };
  648. /*-
  649. * Internal function for the different flavours of felem_reduce.
  650. * felem_reduce_ reduces the higher coefficients in[4]-in[7].
  651. * On entry:
  652. * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
  653. * out[1] >= in[7] + 2^32*in[4]
  654. * out[2] >= in[5] + 2^32*in[5]
  655. * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
  656. * On exit:
  657. * out[0] <= out[0] + in[4] + 2^32*in[5]
  658. * out[1] <= out[1] + in[5] + 2^33*in[6]
  659. * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
  660. * out[3] <= out[3] + 2^32*in[4] + 3*in[7]
  661. */
  662. static void felem_reduce_(felem out, const longfelem in)
  663. {
  664. int128_t c;
  665. /* combine common terms from below */
  666. c = in[4] + (in[5] << 32);
  667. out[0] += c;
  668. out[3] -= c;
  669. c = in[5] - in[7];
  670. out[1] += c;
  671. out[2] -= c;
  672. /* the remaining terms */
  673. /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */
  674. out[1] -= (in[4] << 32);
  675. out[3] += (in[4] << 32);
  676. /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */
  677. out[2] -= (in[5] << 32);
  678. /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */
  679. out[0] -= in[6];
  680. out[0] -= (in[6] << 32);
  681. out[1] += (in[6] << 33);
  682. out[2] += (in[6] * 2);
  683. out[3] -= (in[6] << 32);
  684. /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */
  685. out[0] -= in[7];
  686. out[0] -= (in[7] << 32);
  687. out[2] += (in[7] << 33);
  688. out[3] += (in[7] * 3);
  689. }
  690. /*-
  691. * felem_reduce converts a longfelem into an felem.
  692. * To be called directly after felem_square or felem_mul.
  693. * On entry:
  694. * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
  695. * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
  696. * On exit:
  697. * out[i] < 2^101
  698. */
  699. static void felem_reduce(felem out, const longfelem in)
  700. {
  701. out[0] = zero100[0] + in[0];
  702. out[1] = zero100[1] + in[1];
  703. out[2] = zero100[2] + in[2];
  704. out[3] = zero100[3] + in[3];
  705. felem_reduce_(out, in);
  706. /*-
  707. * out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
  708. * out[1] > 2^100 - 2^64 - 7*2^96 > 0
  709. * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
  710. * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
  711. *
  712. * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
  713. * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
  714. * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
  715. * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
  716. */
  717. }
  718. /*-
  719. * felem_reduce_zero105 converts a larger longfelem into an felem.
  720. * On entry:
  721. * in[0] < 2^71
  722. * On exit:
  723. * out[i] < 2^106
  724. */
  725. static void felem_reduce_zero105(felem out, const longfelem in)
  726. {
  727. out[0] = zero105[0] + in[0];
  728. out[1] = zero105[1] + in[1];
  729. out[2] = zero105[2] + in[2];
  730. out[3] = zero105[3] + in[3];
  731. felem_reduce_(out, in);
  732. /*-
  733. * out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
  734. * out[1] > 2^105 - 2^71 - 2^103 > 0
  735. * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
  736. * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
  737. *
  738. * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
  739. * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
  740. * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
  741. * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
  742. */
  743. }
  744. /*
  745. * subtract_u64 sets *result = *result - v and *carry to one if the
  746. * subtraction underflowed.
  747. */
  748. static void subtract_u64(u64 *result, u64 *carry, u64 v)
  749. {
  750. uint128_t r = *result;
  751. r -= v;
  752. *carry = (r >> 64) & 1;
  753. *result = (u64)r;
  754. }
  755. /*
  756. * felem_contract converts |in| to its unique, minimal representation. On
  757. * entry: in[i] < 2^109
  758. */
  759. static void felem_contract(smallfelem out, const felem in)
  760. {
  761. unsigned i;
  762. u64 all_equal_so_far = 0, result = 0, carry;
  763. felem_shrink(out, in);
  764. /* small is minimal except that the value might be > p */
  765. all_equal_so_far--;
  766. /*
  767. * We are doing a constant time test if out >= kPrime. We need to compare
  768. * each u64, from most-significant to least significant. For each one, if
  769. * all words so far have been equal (m is all ones) then a non-equal
  770. * result is the answer. Otherwise we continue.
  771. */
  772. for (i = 3; i < 4; i--) {
  773. u64 equal;
  774. uint128_t a = ((uint128_t) kPrime[i]) - out[i];
  775. /*
  776. * if out[i] > kPrime[i] then a will underflow and the high 64-bits
  777. * will all be set.
  778. */
  779. result |= all_equal_so_far & ((u64)(a >> 64));
  780. /*
  781. * if kPrime[i] == out[i] then |equal| will be all zeros and the
  782. * decrement will make it all ones.
  783. */
  784. equal = kPrime[i] ^ out[i];
  785. equal--;
  786. equal &= equal << 32;
  787. equal &= equal << 16;
  788. equal &= equal << 8;
  789. equal &= equal << 4;
  790. equal &= equal << 2;
  791. equal &= equal << 1;
  792. equal = ((s64) equal) >> 63;
  793. all_equal_so_far &= equal;
  794. }
  795. /*
  796. * if all_equal_so_far is still all ones then the two values are equal
  797. * and so out >= kPrime is true.
  798. */
  799. result |= all_equal_so_far;
  800. /* if out >= kPrime then we subtract kPrime. */
  801. subtract_u64(&out[0], &carry, result & kPrime[0]);
  802. subtract_u64(&out[1], &carry, carry);
  803. subtract_u64(&out[2], &carry, carry);
  804. subtract_u64(&out[3], &carry, carry);
  805. subtract_u64(&out[1], &carry, result & kPrime[1]);
  806. subtract_u64(&out[2], &carry, carry);
  807. subtract_u64(&out[3], &carry, carry);
  808. subtract_u64(&out[2], &carry, result & kPrime[2]);
  809. subtract_u64(&out[3], &carry, carry);
  810. subtract_u64(&out[3], &carry, result & kPrime[3]);
  811. }
  812. static void smallfelem_square_contract(smallfelem out, const smallfelem in)
  813. {
  814. longfelem longtmp;
  815. felem tmp;
  816. smallfelem_square(longtmp, in);
  817. felem_reduce(tmp, longtmp);
  818. felem_contract(out, tmp);
  819. }
  820. static void smallfelem_mul_contract(smallfelem out, const smallfelem in1,
  821. const smallfelem in2)
  822. {
  823. longfelem longtmp;
  824. felem tmp;
  825. smallfelem_mul(longtmp, in1, in2);
  826. felem_reduce(tmp, longtmp);
  827. felem_contract(out, tmp);
  828. }
  829. /*-
  830. * felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
  831. * otherwise.
  832. * On entry:
  833. * small[i] < 2^64
  834. */
  835. static limb smallfelem_is_zero(const smallfelem small)
  836. {
  837. limb result;
  838. u64 is_p;
  839. u64 is_zero = small[0] | small[1] | small[2] | small[3];
  840. is_zero--;
  841. is_zero &= is_zero << 32;
  842. is_zero &= is_zero << 16;
  843. is_zero &= is_zero << 8;
  844. is_zero &= is_zero << 4;
  845. is_zero &= is_zero << 2;
  846. is_zero &= is_zero << 1;
  847. is_zero = ((s64) is_zero) >> 63;
  848. is_p = (small[0] ^ kPrime[0]) |
  849. (small[1] ^ kPrime[1]) |
  850. (small[2] ^ kPrime[2]) | (small[3] ^ kPrime[3]);
  851. is_p--;
  852. is_p &= is_p << 32;
  853. is_p &= is_p << 16;
  854. is_p &= is_p << 8;
  855. is_p &= is_p << 4;
  856. is_p &= is_p << 2;
  857. is_p &= is_p << 1;
  858. is_p = ((s64) is_p) >> 63;
  859. is_zero |= is_p;
  860. result = is_zero;
  861. result |= ((limb) is_zero) << 64;
  862. return result;
  863. }
  864. static int smallfelem_is_zero_int(const void *small)
  865. {
  866. return (int)(smallfelem_is_zero(small) & ((limb) 1));
  867. }
  868. /*-
  869. * felem_inv calculates |out| = |in|^{-1}
  870. *
  871. * Based on Fermat's Little Theorem:
  872. * a^p = a (mod p)
  873. * a^{p-1} = 1 (mod p)
  874. * a^{p-2} = a^{-1} (mod p)
  875. */
  876. static void felem_inv(felem out, const felem in)
  877. {
  878. felem ftmp, ftmp2;
  879. /* each e_I will hold |in|^{2^I - 1} */
  880. felem e2, e4, e8, e16, e32, e64;
  881. longfelem tmp;
  882. unsigned i;
  883. felem_square(tmp, in);
  884. felem_reduce(ftmp, tmp); /* 2^1 */
  885. felem_mul(tmp, in, ftmp);
  886. felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
  887. felem_assign(e2, ftmp);
  888. felem_square(tmp, ftmp);
  889. felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
  890. felem_square(tmp, ftmp);
  891. felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */
  892. felem_mul(tmp, ftmp, e2);
  893. felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */
  894. felem_assign(e4, ftmp);
  895. felem_square(tmp, ftmp);
  896. felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */
  897. felem_square(tmp, ftmp);
  898. felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */
  899. felem_square(tmp, ftmp);
  900. felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */
  901. felem_square(tmp, ftmp);
  902. felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */
  903. felem_mul(tmp, ftmp, e4);
  904. felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
  905. felem_assign(e8, ftmp);
  906. for (i = 0; i < 8; i++) {
  907. felem_square(tmp, ftmp);
  908. felem_reduce(ftmp, tmp);
  909. } /* 2^16 - 2^8 */
  910. felem_mul(tmp, ftmp, e8);
  911. felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
  912. felem_assign(e16, ftmp);
  913. for (i = 0; i < 16; i++) {
  914. felem_square(tmp, ftmp);
  915. felem_reduce(ftmp, tmp);
  916. } /* 2^32 - 2^16 */
  917. felem_mul(tmp, ftmp, e16);
  918. felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
  919. felem_assign(e32, ftmp);
  920. for (i = 0; i < 32; i++) {
  921. felem_square(tmp, ftmp);
  922. felem_reduce(ftmp, tmp);
  923. } /* 2^64 - 2^32 */
  924. felem_assign(e64, ftmp);
  925. felem_mul(tmp, ftmp, in);
  926. felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
  927. for (i = 0; i < 192; i++) {
  928. felem_square(tmp, ftmp);
  929. felem_reduce(ftmp, tmp);
  930. } /* 2^256 - 2^224 + 2^192 */
  931. felem_mul(tmp, e64, e32);
  932. felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
  933. for (i = 0; i < 16; i++) {
  934. felem_square(tmp, ftmp2);
  935. felem_reduce(ftmp2, tmp);
  936. } /* 2^80 - 2^16 */
  937. felem_mul(tmp, ftmp2, e16);
  938. felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
  939. for (i = 0; i < 8; i++) {
  940. felem_square(tmp, ftmp2);
  941. felem_reduce(ftmp2, tmp);
  942. } /* 2^88 - 2^8 */
  943. felem_mul(tmp, ftmp2, e8);
  944. felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
  945. for (i = 0; i < 4; i++) {
  946. felem_square(tmp, ftmp2);
  947. felem_reduce(ftmp2, tmp);
  948. } /* 2^92 - 2^4 */
  949. felem_mul(tmp, ftmp2, e4);
  950. felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */
  951. felem_square(tmp, ftmp2);
  952. felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */
  953. felem_square(tmp, ftmp2);
  954. felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */
  955. felem_mul(tmp, ftmp2, e2);
  956. felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */
  957. felem_square(tmp, ftmp2);
  958. felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */
  959. felem_square(tmp, ftmp2);
  960. felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */
  961. felem_mul(tmp, ftmp2, in);
  962. felem_reduce(ftmp2, tmp); /* 2^96 - 3 */
  963. felem_mul(tmp, ftmp2, ftmp);
  964. felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
  965. }
  966. static void smallfelem_inv_contract(smallfelem out, const smallfelem in)
  967. {
  968. felem tmp;
  969. smallfelem_expand(tmp, in);
  970. felem_inv(tmp, tmp);
  971. felem_contract(out, tmp);
  972. }
  973. /*-
  974. * Group operations
  975. * ----------------
  976. *
  977. * Building on top of the field operations we have the operations on the
  978. * elliptic curve group itself. Points on the curve are represented in Jacobian
  979. * coordinates
  980. */
  981. /*-
  982. * point_double calculates 2*(x_in, y_in, z_in)
  983. *
  984. * The method is taken from:
  985. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
  986. *
  987. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
  988. * while x_out == y_in is not (maybe this works, but it's not tested).
  989. */
  990. static void
  991. point_double(felem x_out, felem y_out, felem z_out,
  992. const felem x_in, const felem y_in, const felem z_in)
  993. {
  994. longfelem tmp, tmp2;
  995. felem delta, gamma, beta, alpha, ftmp, ftmp2;
  996. smallfelem small1, small2;
  997. felem_assign(ftmp, x_in);
  998. /* ftmp[i] < 2^106 */
  999. felem_assign(ftmp2, x_in);
  1000. /* ftmp2[i] < 2^106 */
  1001. /* delta = z^2 */
  1002. felem_square(tmp, z_in);
  1003. felem_reduce(delta, tmp);
  1004. /* delta[i] < 2^101 */
  1005. /* gamma = y^2 */
  1006. felem_square(tmp, y_in);
  1007. felem_reduce(gamma, tmp);
  1008. /* gamma[i] < 2^101 */
  1009. felem_shrink(small1, gamma);
  1010. /* beta = x*gamma */
  1011. felem_small_mul(tmp, small1, x_in);
  1012. felem_reduce(beta, tmp);
  1013. /* beta[i] < 2^101 */
  1014. /* alpha = 3*(x-delta)*(x+delta) */
  1015. felem_diff(ftmp, delta);
  1016. /* ftmp[i] < 2^105 + 2^106 < 2^107 */
  1017. felem_sum(ftmp2, delta);
  1018. /* ftmp2[i] < 2^105 + 2^106 < 2^107 */
  1019. felem_scalar(ftmp2, 3);
  1020. /* ftmp2[i] < 3 * 2^107 < 2^109 */
  1021. felem_mul(tmp, ftmp, ftmp2);
  1022. felem_reduce(alpha, tmp);
  1023. /* alpha[i] < 2^101 */
  1024. felem_shrink(small2, alpha);
  1025. /* x' = alpha^2 - 8*beta */
  1026. smallfelem_square(tmp, small2);
  1027. felem_reduce(x_out, tmp);
  1028. felem_assign(ftmp, beta);
  1029. felem_scalar(ftmp, 8);
  1030. /* ftmp[i] < 8 * 2^101 = 2^104 */
  1031. felem_diff(x_out, ftmp);
  1032. /* x_out[i] < 2^105 + 2^101 < 2^106 */
  1033. /* z' = (y + z)^2 - gamma - delta */
  1034. felem_sum(delta, gamma);
  1035. /* delta[i] < 2^101 + 2^101 = 2^102 */
  1036. felem_assign(ftmp, y_in);
  1037. felem_sum(ftmp, z_in);
  1038. /* ftmp[i] < 2^106 + 2^106 = 2^107 */
  1039. felem_square(tmp, ftmp);
  1040. felem_reduce(z_out, tmp);
  1041. felem_diff(z_out, delta);
  1042. /* z_out[i] < 2^105 + 2^101 < 2^106 */
  1043. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  1044. felem_scalar(beta, 4);
  1045. /* beta[i] < 4 * 2^101 = 2^103 */
  1046. felem_diff_zero107(beta, x_out);
  1047. /* beta[i] < 2^107 + 2^103 < 2^108 */
  1048. felem_small_mul(tmp, small2, beta);
  1049. /* tmp[i] < 7 * 2^64 < 2^67 */
  1050. smallfelem_square(tmp2, small1);
  1051. /* tmp2[i] < 7 * 2^64 */
  1052. longfelem_scalar(tmp2, 8);
  1053. /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */
  1054. longfelem_diff(tmp, tmp2);
  1055. /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
  1056. felem_reduce_zero105(y_out, tmp);
  1057. /* y_out[i] < 2^106 */
  1058. }
  1059. /*
  1060. * point_double_small is the same as point_double, except that it operates on
  1061. * smallfelems
  1062. */
  1063. static void
  1064. point_double_small(smallfelem x_out, smallfelem y_out, smallfelem z_out,
  1065. const smallfelem x_in, const smallfelem y_in,
  1066. const smallfelem z_in)
  1067. {
  1068. felem felem_x_out, felem_y_out, felem_z_out;
  1069. felem felem_x_in, felem_y_in, felem_z_in;
  1070. smallfelem_expand(felem_x_in, x_in);
  1071. smallfelem_expand(felem_y_in, y_in);
  1072. smallfelem_expand(felem_z_in, z_in);
  1073. point_double(felem_x_out, felem_y_out, felem_z_out,
  1074. felem_x_in, felem_y_in, felem_z_in);
  1075. felem_shrink(x_out, felem_x_out);
  1076. felem_shrink(y_out, felem_y_out);
  1077. felem_shrink(z_out, felem_z_out);
  1078. }
  1079. /* copy_conditional copies in to out iff mask is all ones. */
  1080. static void copy_conditional(felem out, const felem in, limb mask)
  1081. {
  1082. unsigned i;
  1083. for (i = 0; i < NLIMBS; ++i) {
  1084. const limb tmp = mask & (in[i] ^ out[i]);
  1085. out[i] ^= tmp;
  1086. }
  1087. }
  1088. /* copy_small_conditional copies in to out iff mask is all ones. */
  1089. static void copy_small_conditional(felem out, const smallfelem in, limb mask)
  1090. {
  1091. unsigned i;
  1092. const u64 mask64 = mask;
  1093. for (i = 0; i < NLIMBS; ++i) {
  1094. out[i] = ((limb) (in[i] & mask64)) | (out[i] & ~mask);
  1095. }
  1096. }
  1097. /*-
  1098. * point_add calcuates (x1, y1, z1) + (x2, y2, z2)
  1099. *
  1100. * The method is taken from:
  1101. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
  1102. * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
  1103. *
  1104. * This function includes a branch for checking whether the two input points
  1105. * are equal, (while not equal to the point at infinity). This case never
  1106. * happens during single point multiplication, so there is no timing leak for
  1107. * ECDH or ECDSA signing.
  1108. */
  1109. static void point_add(felem x3, felem y3, felem z3,
  1110. const felem x1, const felem y1, const felem z1,
  1111. const int mixed, const smallfelem x2,
  1112. const smallfelem y2, const smallfelem z2)
  1113. {
  1114. felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
  1115. longfelem tmp, tmp2;
  1116. smallfelem small1, small2, small3, small4, small5;
  1117. limb x_equal, y_equal, z1_is_zero, z2_is_zero;
  1118. felem_shrink(small3, z1);
  1119. z1_is_zero = smallfelem_is_zero(small3);
  1120. z2_is_zero = smallfelem_is_zero(z2);
  1121. /* ftmp = z1z1 = z1**2 */
  1122. smallfelem_square(tmp, small3);
  1123. felem_reduce(ftmp, tmp);
  1124. /* ftmp[i] < 2^101 */
  1125. felem_shrink(small1, ftmp);
  1126. if (!mixed) {
  1127. /* ftmp2 = z2z2 = z2**2 */
  1128. smallfelem_square(tmp, z2);
  1129. felem_reduce(ftmp2, tmp);
  1130. /* ftmp2[i] < 2^101 */
  1131. felem_shrink(small2, ftmp2);
  1132. felem_shrink(small5, x1);
  1133. /* u1 = ftmp3 = x1*z2z2 */
  1134. smallfelem_mul(tmp, small5, small2);
  1135. felem_reduce(ftmp3, tmp);
  1136. /* ftmp3[i] < 2^101 */
  1137. /* ftmp5 = z1 + z2 */
  1138. felem_assign(ftmp5, z1);
  1139. felem_small_sum(ftmp5, z2);
  1140. /* ftmp5[i] < 2^107 */
  1141. /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */
  1142. felem_square(tmp, ftmp5);
  1143. felem_reduce(ftmp5, tmp);
  1144. /* ftmp2 = z2z2 + z1z1 */
  1145. felem_sum(ftmp2, ftmp);
  1146. /* ftmp2[i] < 2^101 + 2^101 = 2^102 */
  1147. felem_diff(ftmp5, ftmp2);
  1148. /* ftmp5[i] < 2^105 + 2^101 < 2^106 */
  1149. /* ftmp2 = z2 * z2z2 */
  1150. smallfelem_mul(tmp, small2, z2);
  1151. felem_reduce(ftmp2, tmp);
  1152. /* s1 = ftmp2 = y1 * z2**3 */
  1153. felem_mul(tmp, y1, ftmp2);
  1154. felem_reduce(ftmp6, tmp);
  1155. /* ftmp6[i] < 2^101 */
  1156. } else {
  1157. /*
  1158. * We'll assume z2 = 1 (special case z2 = 0 is handled later)
  1159. */
  1160. /* u1 = ftmp3 = x1*z2z2 */
  1161. felem_assign(ftmp3, x1);
  1162. /* ftmp3[i] < 2^106 */
  1163. /* ftmp5 = 2z1z2 */
  1164. felem_assign(ftmp5, z1);
  1165. felem_scalar(ftmp5, 2);
  1166. /* ftmp5[i] < 2*2^106 = 2^107 */
  1167. /* s1 = ftmp2 = y1 * z2**3 */
  1168. felem_assign(ftmp6, y1);
  1169. /* ftmp6[i] < 2^106 */
  1170. }
  1171. /* u2 = x2*z1z1 */
  1172. smallfelem_mul(tmp, x2, small1);
  1173. felem_reduce(ftmp4, tmp);
  1174. /* h = ftmp4 = u2 - u1 */
  1175. felem_diff_zero107(ftmp4, ftmp3);
  1176. /* ftmp4[i] < 2^107 + 2^101 < 2^108 */
  1177. felem_shrink(small4, ftmp4);
  1178. x_equal = smallfelem_is_zero(small4);
  1179. /* z_out = ftmp5 * h */
  1180. felem_small_mul(tmp, small4, ftmp5);
  1181. felem_reduce(z_out, tmp);
  1182. /* z_out[i] < 2^101 */
  1183. /* ftmp = z1 * z1z1 */
  1184. smallfelem_mul(tmp, small1, small3);
  1185. felem_reduce(ftmp, tmp);
  1186. /* s2 = tmp = y2 * z1**3 */
  1187. felem_small_mul(tmp, y2, ftmp);
  1188. felem_reduce(ftmp5, tmp);
  1189. /* r = ftmp5 = (s2 - s1)*2 */
  1190. felem_diff_zero107(ftmp5, ftmp6);
  1191. /* ftmp5[i] < 2^107 + 2^107 = 2^108 */
  1192. felem_scalar(ftmp5, 2);
  1193. /* ftmp5[i] < 2^109 */
  1194. felem_shrink(small1, ftmp5);
  1195. y_equal = smallfelem_is_zero(small1);
  1196. if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
  1197. point_double(x3, y3, z3, x1, y1, z1);
  1198. return;
  1199. }
  1200. /* I = ftmp = (2h)**2 */
  1201. felem_assign(ftmp, ftmp4);
  1202. felem_scalar(ftmp, 2);
  1203. /* ftmp[i] < 2*2^108 = 2^109 */
  1204. felem_square(tmp, ftmp);
  1205. felem_reduce(ftmp, tmp);
  1206. /* J = ftmp2 = h * I */
  1207. felem_mul(tmp, ftmp4, ftmp);
  1208. felem_reduce(ftmp2, tmp);
  1209. /* V = ftmp4 = U1 * I */
  1210. felem_mul(tmp, ftmp3, ftmp);
  1211. felem_reduce(ftmp4, tmp);
  1212. /* x_out = r**2 - J - 2V */
  1213. smallfelem_square(tmp, small1);
  1214. felem_reduce(x_out, tmp);
  1215. felem_assign(ftmp3, ftmp4);
  1216. felem_scalar(ftmp4, 2);
  1217. felem_sum(ftmp4, ftmp2);
  1218. /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */
  1219. felem_diff(x_out, ftmp4);
  1220. /* x_out[i] < 2^105 + 2^101 */
  1221. /* y_out = r(V-x_out) - 2 * s1 * J */
  1222. felem_diff_zero107(ftmp3, x_out);
  1223. /* ftmp3[i] < 2^107 + 2^101 < 2^108 */
  1224. felem_small_mul(tmp, small1, ftmp3);
  1225. felem_mul(tmp2, ftmp6, ftmp2);
  1226. longfelem_scalar(tmp2, 2);
  1227. /* tmp2[i] < 2*2^67 = 2^68 */
  1228. longfelem_diff(tmp, tmp2);
  1229. /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
  1230. felem_reduce_zero105(y_out, tmp);
  1231. /* y_out[i] < 2^106 */
  1232. copy_small_conditional(x_out, x2, z1_is_zero);
  1233. copy_conditional(x_out, x1, z2_is_zero);
  1234. copy_small_conditional(y_out, y2, z1_is_zero);
  1235. copy_conditional(y_out, y1, z2_is_zero);
  1236. copy_small_conditional(z_out, z2, z1_is_zero);
  1237. copy_conditional(z_out, z1, z2_is_zero);
  1238. felem_assign(x3, x_out);
  1239. felem_assign(y3, y_out);
  1240. felem_assign(z3, z_out);
  1241. }
  1242. /*
  1243. * point_add_small is the same as point_add, except that it operates on
  1244. * smallfelems
  1245. */
  1246. static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
  1247. smallfelem x1, smallfelem y1, smallfelem z1,
  1248. smallfelem x2, smallfelem y2, smallfelem z2)
  1249. {
  1250. felem felem_x3, felem_y3, felem_z3;
  1251. felem felem_x1, felem_y1, felem_z1;
  1252. smallfelem_expand(felem_x1, x1);
  1253. smallfelem_expand(felem_y1, y1);
  1254. smallfelem_expand(felem_z1, z1);
  1255. point_add(felem_x3, felem_y3, felem_z3, felem_x1, felem_y1, felem_z1, 0,
  1256. x2, y2, z2);
  1257. felem_shrink(x3, felem_x3);
  1258. felem_shrink(y3, felem_y3);
  1259. felem_shrink(z3, felem_z3);
  1260. }
  1261. /*-
  1262. * Base point pre computation
  1263. * --------------------------
  1264. *
  1265. * Two different sorts of precomputed tables are used in the following code.
  1266. * Each contain various points on the curve, where each point is three field
  1267. * elements (x, y, z).
  1268. *
  1269. * For the base point table, z is usually 1 (0 for the point at infinity).
  1270. * This table has 2 * 16 elements, starting with the following:
  1271. * index | bits | point
  1272. * ------+---------+------------------------------
  1273. * 0 | 0 0 0 0 | 0G
  1274. * 1 | 0 0 0 1 | 1G
  1275. * 2 | 0 0 1 0 | 2^64G
  1276. * 3 | 0 0 1 1 | (2^64 + 1)G
  1277. * 4 | 0 1 0 0 | 2^128G
  1278. * 5 | 0 1 0 1 | (2^128 + 1)G
  1279. * 6 | 0 1 1 0 | (2^128 + 2^64)G
  1280. * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
  1281. * 8 | 1 0 0 0 | 2^192G
  1282. * 9 | 1 0 0 1 | (2^192 + 1)G
  1283. * 10 | 1 0 1 0 | (2^192 + 2^64)G
  1284. * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
  1285. * 12 | 1 1 0 0 | (2^192 + 2^128)G
  1286. * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
  1287. * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
  1288. * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
  1289. * followed by a copy of this with each element multiplied by 2^32.
  1290. *
  1291. * The reason for this is so that we can clock bits into four different
  1292. * locations when doing simple scalar multiplies against the base point,
  1293. * and then another four locations using the second 16 elements.
  1294. *
  1295. * Tables for other points have table[i] = iG for i in 0 .. 16. */
  1296. /* gmul is the table of precomputed base points */
  1297. static const smallfelem gmul[2][16][3] = {
  1298. {{{0, 0, 0, 0},
  1299. {0, 0, 0, 0},
  1300. {0, 0, 0, 0}},
  1301. {{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2,
  1302. 0x6b17d1f2e12c4247},
  1303. {0xcbb6406837bf51f5, 0x2bce33576b315ece, 0x8ee7eb4a7c0f9e16,
  1304. 0x4fe342e2fe1a7f9b},
  1305. {1, 0, 0, 0}},
  1306. {{0x90e75cb48e14db63, 0x29493baaad651f7e, 0x8492592e326e25de,
  1307. 0x0fa822bc2811aaa5},
  1308. {0xe41124545f462ee7, 0x34b1a65050fe82f5, 0x6f4ad4bcb3df188b,
  1309. 0xbff44ae8f5dba80d},
  1310. {1, 0, 0, 0}},
  1311. {{0x93391ce2097992af, 0xe96c98fd0d35f1fa, 0xb257c0de95e02789,
  1312. 0x300a4bbc89d6726f},
  1313. {0xaa54a291c08127a0, 0x5bb1eeada9d806a5, 0x7f1ddb25ff1e3c6f,
  1314. 0x72aac7e0d09b4644},
  1315. {1, 0, 0, 0}},
  1316. {{0x57c84fc9d789bd85, 0xfc35ff7dc297eac3, 0xfb982fd588c6766e,
  1317. 0x447d739beedb5e67},
  1318. {0x0c7e33c972e25b32, 0x3d349b95a7fae500, 0xe12e9d953a4aaff7,
  1319. 0x2d4825ab834131ee},
  1320. {1, 0, 0, 0}},
  1321. {{0x13949c932a1d367f, 0xef7fbd2b1a0a11b7, 0xddc6068bb91dfc60,
  1322. 0xef9519328a9c72ff},
  1323. {0x196035a77376d8a8, 0x23183b0895ca1740, 0xc1ee9807022c219c,
  1324. 0x611e9fc37dbb2c9b},
  1325. {1, 0, 0, 0}},
  1326. {{0xcae2b1920b57f4bc, 0x2936df5ec6c9bc36, 0x7dea6482e11238bf,
  1327. 0x550663797b51f5d8},
  1328. {0x44ffe216348a964c, 0x9fb3d576dbdefbe1, 0x0afa40018d9d50e5,
  1329. 0x157164848aecb851},
  1330. {1, 0, 0, 0}},
  1331. {{0xe48ecafffc5cde01, 0x7ccd84e70d715f26, 0xa2e8f483f43e4391,
  1332. 0xeb5d7745b21141ea},
  1333. {0xcac917e2731a3479, 0x85f22cfe2844b645, 0x0990e6a158006cee,
  1334. 0xeafd72ebdbecc17b},
  1335. {1, 0, 0, 0}},
  1336. {{0x6cf20ffb313728be, 0x96439591a3c6b94a, 0x2736ff8344315fc5,
  1337. 0xa6d39677a7849276},
  1338. {0xf2bab833c357f5f4, 0x824a920c2284059b, 0x66b8babd2d27ecdf,
  1339. 0x674f84749b0b8816},
  1340. {1, 0, 0, 0}},
  1341. {{0x2df48c04677c8a3e, 0x74e02f080203a56b, 0x31855f7db8c7fedb,
  1342. 0x4e769e7672c9ddad},
  1343. {0xa4c36165b824bbb0, 0xfb9ae16f3b9122a5, 0x1ec0057206947281,
  1344. 0x42b99082de830663},
  1345. {1, 0, 0, 0}},
  1346. {{0x6ef95150dda868b9, 0xd1f89e799c0ce131, 0x7fdc1ca008a1c478,
  1347. 0x78878ef61c6ce04d},
  1348. {0x9c62b9121fe0d976, 0x6ace570ebde08d4f, 0xde53142c12309def,
  1349. 0xb6cb3f5d7b72c321},
  1350. {1, 0, 0, 0}},
  1351. {{0x7f991ed2c31a3573, 0x5b82dd5bd54fb496, 0x595c5220812ffcae,
  1352. 0x0c88bc4d716b1287},
  1353. {0x3a57bf635f48aca8, 0x7c8181f4df2564f3, 0x18d1b5b39c04e6aa,
  1354. 0xdd5ddea3f3901dc6},
  1355. {1, 0, 0, 0}},
  1356. {{0xe96a79fb3e72ad0c, 0x43a0a28c42ba792f, 0xefe0a423083e49f3,
  1357. 0x68f344af6b317466},
  1358. {0xcdfe17db3fb24d4a, 0x668bfc2271f5c626, 0x604ed93c24d67ff3,
  1359. 0x31b9c405f8540a20},
  1360. {1, 0, 0, 0}},
  1361. {{0xd36b4789a2582e7f, 0x0d1a10144ec39c28, 0x663c62c3edbad7a0,
  1362. 0x4052bf4b6f461db9},
  1363. {0x235a27c3188d25eb, 0xe724f33999bfcc5b, 0x862be6bd71d70cc8,
  1364. 0xfecf4d5190b0fc61},
  1365. {1, 0, 0, 0}},
  1366. {{0x74346c10a1d4cfac, 0xafdf5cc08526a7a4, 0x123202a8f62bff7a,
  1367. 0x1eddbae2c802e41a},
  1368. {0x8fa0af2dd603f844, 0x36e06b7e4c701917, 0x0c45f45273db33a0,
  1369. 0x43104d86560ebcfc},
  1370. {1, 0, 0, 0}},
  1371. {{0x9615b5110d1d78e5, 0x66b0de3225c4744b, 0x0a4a46fb6aaf363a,
  1372. 0xb48e26b484f7a21c},
  1373. {0x06ebb0f621a01b2d, 0xc004e4048b7b0f98, 0x64131bcdfed6f668,
  1374. 0xfac015404d4d3dab},
  1375. {1, 0, 0, 0}}},
  1376. {{{0, 0, 0, 0},
  1377. {0, 0, 0, 0},
  1378. {0, 0, 0, 0}},
  1379. {{0x3a5a9e22185a5943, 0x1ab919365c65dfb6, 0x21656b32262c71da,
  1380. 0x7fe36b40af22af89},
  1381. {0xd50d152c699ca101, 0x74b3d5867b8af212, 0x9f09f40407dca6f1,
  1382. 0xe697d45825b63624},
  1383. {1, 0, 0, 0}},
  1384. {{0xa84aa9397512218e, 0xe9a521b074ca0141, 0x57880b3a18a2e902,
  1385. 0x4a5b506612a677a6},
  1386. {0x0beada7a4c4f3840, 0x626db15419e26d9d, 0xc42604fbe1627d40,
  1387. 0xeb13461ceac089f1},
  1388. {1, 0, 0, 0}},
  1389. {{0xf9faed0927a43281, 0x5e52c4144103ecbc, 0xc342967aa815c857,
  1390. 0x0781b8291c6a220a},
  1391. {0x5a8343ceeac55f80, 0x88f80eeee54a05e3, 0x97b2a14f12916434,
  1392. 0x690cde8df0151593},
  1393. {1, 0, 0, 0}},
  1394. {{0xaee9c75df7f82f2a, 0x9e4c35874afdf43a, 0xf5622df437371326,
  1395. 0x8a535f566ec73617},
  1396. {0xc5f9a0ac223094b7, 0xcde533864c8c7669, 0x37e02819085a92bf,
  1397. 0x0455c08468b08bd7},
  1398. {1, 0, 0, 0}},
  1399. {{0x0c0a6e2c9477b5d9, 0xf9a4bf62876dc444, 0x5050a949b6cdc279,
  1400. 0x06bada7ab77f8276},
  1401. {0xc8b4aed1ea48dac9, 0xdebd8a4b7ea1070f, 0x427d49101366eb70,
  1402. 0x5b476dfd0e6cb18a},
  1403. {1, 0, 0, 0}},
  1404. {{0x7c5c3e44278c340a, 0x4d54606812d66f3b, 0x29a751b1ae23c5d8,
  1405. 0x3e29864e8a2ec908},
  1406. {0x142d2a6626dbb850, 0xad1744c4765bd780, 0x1f150e68e322d1ed,
  1407. 0x239b90ea3dc31e7e},
  1408. {1, 0, 0, 0}},
  1409. {{0x78c416527a53322a, 0x305dde6709776f8e, 0xdbcab759f8862ed4,
  1410. 0x820f4dd949f72ff7},
  1411. {0x6cc544a62b5debd4, 0x75be5d937b4e8cc4, 0x1b481b1b215c14d3,
  1412. 0x140406ec783a05ec},
  1413. {1, 0, 0, 0}},
  1414. {{0x6a703f10e895df07, 0xfd75f3fa01876bd8, 0xeb5b06e70ce08ffe,
  1415. 0x68f6b8542783dfee},
  1416. {0x90c76f8a78712655, 0xcf5293d2f310bf7f, 0xfbc8044dfda45028,
  1417. 0xcbe1feba92e40ce6},
  1418. {1, 0, 0, 0}},
  1419. {{0xe998ceea4396e4c1, 0xfc82ef0b6acea274, 0x230f729f2250e927,
  1420. 0xd0b2f94d2f420109},
  1421. {0x4305adddb38d4966, 0x10b838f8624c3b45, 0x7db2636658954e7a,
  1422. 0x971459828b0719e5},
  1423. {1, 0, 0, 0}},
  1424. {{0x4bd6b72623369fc9, 0x57f2929e53d0b876, 0xc2d5cba4f2340687,
  1425. 0x961610004a866aba},
  1426. {0x49997bcd2e407a5e, 0x69ab197d92ddcb24, 0x2cf1f2438fe5131c,
  1427. 0x7acb9fadcee75e44},
  1428. {1, 0, 0, 0}},
  1429. {{0x254e839423d2d4c0, 0xf57f0c917aea685b, 0xa60d880f6f75aaea,
  1430. 0x24eb9acca333bf5b},
  1431. {0xe3de4ccb1cda5dea, 0xfeef9341c51a6b4f, 0x743125f88bac4c4d,
  1432. 0x69f891c5acd079cc},
  1433. {1, 0, 0, 0}},
  1434. {{0xeee44b35702476b5, 0x7ed031a0e45c2258, 0xb422d1e7bd6f8514,
  1435. 0xe51f547c5972a107},
  1436. {0xa25bcd6fc9cf343d, 0x8ca922ee097c184e, 0xa62f98b3a9fe9a06,
  1437. 0x1c309a2b25bb1387},
  1438. {1, 0, 0, 0}},
  1439. {{0x9295dbeb1967c459, 0xb00148833472c98e, 0xc504977708011828,
  1440. 0x20b87b8aa2c4e503},
  1441. {0x3063175de057c277, 0x1bd539338fe582dd, 0x0d11adef5f69a044,
  1442. 0xf5c6fa49919776be},
  1443. {1, 0, 0, 0}},
  1444. {{0x8c944e760fd59e11, 0x3876cba1102fad5f, 0xa454c3fad83faa56,
  1445. 0x1ed7d1b9332010b9},
  1446. {0xa1011a270024b889, 0x05e4d0dcac0cd344, 0x52b520f0eb6a2a24,
  1447. 0x3a2b03f03217257a},
  1448. {1, 0, 0, 0}},
  1449. {{0xf20fc2afdf1d043d, 0xf330240db58d5a62, 0xfc7d229ca0058c3b,
  1450. 0x15fee545c78dd9f6},
  1451. {0x501e82885bc98cda, 0x41ef80e5d046ac04, 0x557d9f49461210fb,
  1452. 0x4ab5b6b2b8753f81},
  1453. {1, 0, 0, 0}}}
  1454. };
  1455. /*
  1456. * select_point selects the |idx|th point from a precomputation table and
  1457. * copies it to out.
  1458. */
  1459. static void select_point(const u64 idx, unsigned int size,
  1460. const smallfelem pre_comp[16][3], smallfelem out[3])
  1461. {
  1462. unsigned i, j;
  1463. u64 *outlimbs = &out[0][0];
  1464. memset(outlimbs, 0, 3 * sizeof(smallfelem));
  1465. for (i = 0; i < size; i++) {
  1466. const u64 *inlimbs = (u64 *)&pre_comp[i][0][0];
  1467. u64 mask = i ^ idx;
  1468. mask |= mask >> 4;
  1469. mask |= mask >> 2;
  1470. mask |= mask >> 1;
  1471. mask &= 1;
  1472. mask--;
  1473. for (j = 0; j < NLIMBS * 3; j++)
  1474. outlimbs[j] |= inlimbs[j] & mask;
  1475. }
  1476. }
  1477. /* get_bit returns the |i|th bit in |in| */
  1478. static char get_bit(const felem_bytearray in, int i)
  1479. {
  1480. if ((i < 0) || (i >= 256))
  1481. return 0;
  1482. return (in[i >> 3] >> (i & 7)) & 1;
  1483. }
  1484. /*
  1485. * Interleaved point multiplication using precomputed point multiples: The
  1486. * small point multiples 0*P, 1*P, ..., 17*P are in pre_comp[], the scalars
  1487. * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
  1488. * generator, using certain (large) precomputed multiples in g_pre_comp.
  1489. * Output point (X, Y, Z) is stored in x_out, y_out, z_out
  1490. */
  1491. static void batch_mul(felem x_out, felem y_out, felem z_out,
  1492. const felem_bytearray scalars[],
  1493. const unsigned num_points, const u8 *g_scalar,
  1494. const int mixed, const smallfelem pre_comp[][17][3],
  1495. const smallfelem g_pre_comp[2][16][3])
  1496. {
  1497. int i, skip;
  1498. unsigned num, gen_mul = (g_scalar != NULL);
  1499. felem nq[3], ftmp;
  1500. smallfelem tmp[3];
  1501. u64 bits;
  1502. u8 sign, digit;
  1503. /* set nq to the point at infinity */
  1504. memset(nq, 0, 3 * sizeof(felem));
  1505. /*
  1506. * Loop over all scalars msb-to-lsb, interleaving additions of multiples
  1507. * of the generator (two in each of the last 32 rounds) and additions of
  1508. * other points multiples (every 5th round).
  1509. */
  1510. skip = 1; /* save two point operations in the first
  1511. * round */
  1512. for (i = (num_points ? 255 : 31); i >= 0; --i) {
  1513. /* double */
  1514. if (!skip)
  1515. point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
  1516. /* add multiples of the generator */
  1517. if (gen_mul && (i <= 31)) {
  1518. /* first, look 32 bits upwards */
  1519. bits = get_bit(g_scalar, i + 224) << 3;
  1520. bits |= get_bit(g_scalar, i + 160) << 2;
  1521. bits |= get_bit(g_scalar, i + 96) << 1;
  1522. bits |= get_bit(g_scalar, i + 32);
  1523. /* select the point to add, in constant time */
  1524. select_point(bits, 16, g_pre_comp[1], tmp);
  1525. if (!skip) {
  1526. /* Arg 1 below is for "mixed" */
  1527. point_add(nq[0], nq[1], nq[2],
  1528. nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
  1529. } else {
  1530. smallfelem_expand(nq[0], tmp[0]);
  1531. smallfelem_expand(nq[1], tmp[1]);
  1532. smallfelem_expand(nq[2], tmp[2]);
  1533. skip = 0;
  1534. }
  1535. /* second, look at the current position */
  1536. bits = get_bit(g_scalar, i + 192) << 3;
  1537. bits |= get_bit(g_scalar, i + 128) << 2;
  1538. bits |= get_bit(g_scalar, i + 64) << 1;
  1539. bits |= get_bit(g_scalar, i);
  1540. /* select the point to add, in constant time */
  1541. select_point(bits, 16, g_pre_comp[0], tmp);
  1542. /* Arg 1 below is for "mixed" */
  1543. point_add(nq[0], nq[1], nq[2],
  1544. nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
  1545. }
  1546. /* do other additions every 5 doublings */
  1547. if (num_points && (i % 5 == 0)) {
  1548. /* loop over all scalars */
  1549. for (num = 0; num < num_points; ++num) {
  1550. bits = get_bit(scalars[num], i + 4) << 5;
  1551. bits |= get_bit(scalars[num], i + 3) << 4;
  1552. bits |= get_bit(scalars[num], i + 2) << 3;
  1553. bits |= get_bit(scalars[num], i + 1) << 2;
  1554. bits |= get_bit(scalars[num], i) << 1;
  1555. bits |= get_bit(scalars[num], i - 1);
  1556. ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
  1557. /*
  1558. * select the point to add or subtract, in constant time
  1559. */
  1560. select_point(digit, 17, pre_comp[num], tmp);
  1561. smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative
  1562. * point */
  1563. copy_small_conditional(ftmp, tmp[1], (((limb) sign) - 1));
  1564. felem_contract(tmp[1], ftmp);
  1565. if (!skip) {
  1566. point_add(nq[0], nq[1], nq[2],
  1567. nq[0], nq[1], nq[2],
  1568. mixed, tmp[0], tmp[1], tmp[2]);
  1569. } else {
  1570. smallfelem_expand(nq[0], tmp[0]);
  1571. smallfelem_expand(nq[1], tmp[1]);
  1572. smallfelem_expand(nq[2], tmp[2]);
  1573. skip = 0;
  1574. }
  1575. }
  1576. }
  1577. }
  1578. felem_assign(x_out, nq[0]);
  1579. felem_assign(y_out, nq[1]);
  1580. felem_assign(z_out, nq[2]);
  1581. }
  1582. /* Precomputation for the group generator. */
  1583. typedef struct {
  1584. smallfelem g_pre_comp[2][16][3];
  1585. int references;
  1586. } NISTP256_PRE_COMP;
  1587. const EC_METHOD *EC_GFp_nistp256_method(void)
  1588. {
  1589. static const EC_METHOD ret = {
  1590. EC_FLAGS_DEFAULT_OCT,
  1591. NID_X9_62_prime_field,
  1592. ec_GFp_nistp256_group_init,
  1593. ec_GFp_simple_group_finish,
  1594. ec_GFp_simple_group_clear_finish,
  1595. ec_GFp_nist_group_copy,
  1596. ec_GFp_nistp256_group_set_curve,
  1597. ec_GFp_simple_group_get_curve,
  1598. ec_GFp_simple_group_get_degree,
  1599. ec_GFp_simple_group_check_discriminant,
  1600. ec_GFp_simple_point_init,
  1601. ec_GFp_simple_point_finish,
  1602. ec_GFp_simple_point_clear_finish,
  1603. ec_GFp_simple_point_copy,
  1604. ec_GFp_simple_point_set_to_infinity,
  1605. ec_GFp_simple_set_Jprojective_coordinates_GFp,
  1606. ec_GFp_simple_get_Jprojective_coordinates_GFp,
  1607. ec_GFp_simple_point_set_affine_coordinates,
  1608. ec_GFp_nistp256_point_get_affine_coordinates,
  1609. 0 /* point_set_compressed_coordinates */ ,
  1610. 0 /* point2oct */ ,
  1611. 0 /* oct2point */ ,
  1612. ec_GFp_simple_add,
  1613. ec_GFp_simple_dbl,
  1614. ec_GFp_simple_invert,
  1615. ec_GFp_simple_is_at_infinity,
  1616. ec_GFp_simple_is_on_curve,
  1617. ec_GFp_simple_cmp,
  1618. ec_GFp_simple_make_affine,
  1619. ec_GFp_simple_points_make_affine,
  1620. ec_GFp_nistp256_points_mul,
  1621. ec_GFp_nistp256_precompute_mult,
  1622. ec_GFp_nistp256_have_precompute_mult,
  1623. ec_GFp_nist_field_mul,
  1624. ec_GFp_nist_field_sqr,
  1625. 0 /* field_div */ ,
  1626. 0 /* field_encode */ ,
  1627. 0 /* field_decode */ ,
  1628. 0 /* field_set_to_one */
  1629. };
  1630. return &ret;
  1631. }
  1632. /******************************************************************************/
  1633. /*
  1634. * FUNCTIONS TO MANAGE PRECOMPUTATION
  1635. */
  1636. static NISTP256_PRE_COMP *nistp256_pre_comp_new()
  1637. {
  1638. NISTP256_PRE_COMP *ret = NULL;
  1639. ret = (NISTP256_PRE_COMP *) OPENSSL_malloc(sizeof(*ret));
  1640. if (!ret) {
  1641. ECerr(EC_F_NISTP256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
  1642. return ret;
  1643. }
  1644. memset(ret->g_pre_comp, 0, sizeof(ret->g_pre_comp));
  1645. ret->references = 1;
  1646. return ret;
  1647. }
  1648. static void *nistp256_pre_comp_dup(void *src_)
  1649. {
  1650. NISTP256_PRE_COMP *src = src_;
  1651. /* no need to actually copy, these objects never change! */
  1652. CRYPTO_add(&src->references, 1, CRYPTO_LOCK_EC_PRE_COMP);
  1653. return src_;
  1654. }
  1655. static void nistp256_pre_comp_free(void *pre_)
  1656. {
  1657. int i;
  1658. NISTP256_PRE_COMP *pre = pre_;
  1659. if (!pre)
  1660. return;
  1661. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  1662. if (i > 0)
  1663. return;
  1664. OPENSSL_free(pre);
  1665. }
  1666. static void nistp256_pre_comp_clear_free(void *pre_)
  1667. {
  1668. int i;
  1669. NISTP256_PRE_COMP *pre = pre_;
  1670. if (!pre)
  1671. return;
  1672. i = CRYPTO_add(&pre->references, -1, CRYPTO_LOCK_EC_PRE_COMP);
  1673. if (i > 0)
  1674. return;
  1675. OPENSSL_cleanse(pre, sizeof(*pre));
  1676. OPENSSL_free(pre);
  1677. }
  1678. /******************************************************************************/
  1679. /*
  1680. * OPENSSL EC_METHOD FUNCTIONS
  1681. */
  1682. int ec_GFp_nistp256_group_init(EC_GROUP *group)
  1683. {
  1684. int ret;
  1685. ret = ec_GFp_simple_group_init(group);
  1686. group->a_is_minus3 = 1;
  1687. return ret;
  1688. }
  1689. int ec_GFp_nistp256_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  1690. const BIGNUM *a, const BIGNUM *b,
  1691. BN_CTX *ctx)
  1692. {
  1693. int ret = 0;
  1694. BN_CTX *new_ctx = NULL;
  1695. BIGNUM *curve_p, *curve_a, *curve_b;
  1696. if (ctx == NULL)
  1697. if ((ctx = new_ctx = BN_CTX_new()) == NULL)
  1698. return 0;
  1699. BN_CTX_start(ctx);
  1700. if (((curve_p = BN_CTX_get(ctx)) == NULL) ||
  1701. ((curve_a = BN_CTX_get(ctx)) == NULL) ||
  1702. ((curve_b = BN_CTX_get(ctx)) == NULL))
  1703. goto err;
  1704. BN_bin2bn(nistp256_curve_params[0], sizeof(felem_bytearray), curve_p);
  1705. BN_bin2bn(nistp256_curve_params[1], sizeof(felem_bytearray), curve_a);
  1706. BN_bin2bn(nistp256_curve_params[2], sizeof(felem_bytearray), curve_b);
  1707. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
  1708. ECerr(EC_F_EC_GFP_NISTP256_GROUP_SET_CURVE,
  1709. EC_R_WRONG_CURVE_PARAMETERS);
  1710. goto err;
  1711. }
  1712. group->field_mod_func = BN_nist_mod_256;
  1713. ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  1714. err:
  1715. BN_CTX_end(ctx);
  1716. if (new_ctx != NULL)
  1717. BN_CTX_free(new_ctx);
  1718. return ret;
  1719. }
  1720. /*
  1721. * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
  1722. * (X/Z^2, Y/Z^3)
  1723. */
  1724. int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group,
  1725. const EC_POINT *point,
  1726. BIGNUM *x, BIGNUM *y,
  1727. BN_CTX *ctx)
  1728. {
  1729. felem z1, z2, x_in, y_in;
  1730. smallfelem x_out, y_out;
  1731. longfelem tmp;
  1732. if (EC_POINT_is_at_infinity(group, point)) {
  1733. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1734. EC_R_POINT_AT_INFINITY);
  1735. return 0;
  1736. }
  1737. if ((!BN_to_felem(x_in, &point->X)) || (!BN_to_felem(y_in, &point->Y)) ||
  1738. (!BN_to_felem(z1, &point->Z)))
  1739. return 0;
  1740. felem_inv(z2, z1);
  1741. felem_square(tmp, z2);
  1742. felem_reduce(z1, tmp);
  1743. felem_mul(tmp, x_in, z1);
  1744. felem_reduce(x_in, tmp);
  1745. felem_contract(x_out, x_in);
  1746. if (x != NULL) {
  1747. if (!smallfelem_to_BN(x, x_out)) {
  1748. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1749. ERR_R_BN_LIB);
  1750. return 0;
  1751. }
  1752. }
  1753. felem_mul(tmp, z1, z2);
  1754. felem_reduce(z1, tmp);
  1755. felem_mul(tmp, y_in, z1);
  1756. felem_reduce(y_in, tmp);
  1757. felem_contract(y_out, y_in);
  1758. if (y != NULL) {
  1759. if (!smallfelem_to_BN(y, y_out)) {
  1760. ECerr(EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES,
  1761. ERR_R_BN_LIB);
  1762. return 0;
  1763. }
  1764. }
  1765. return 1;
  1766. }
  1767. /* points below is of size |num|, and tmp_smallfelems is of size |num+1| */
  1768. static void make_points_affine(size_t num, smallfelem points[][3],
  1769. smallfelem tmp_smallfelems[])
  1770. {
  1771. /*
  1772. * Runs in constant time, unless an input is the point at infinity (which
  1773. * normally shouldn't happen).
  1774. */
  1775. ec_GFp_nistp_points_make_affine_internal(num,
  1776. points,
  1777. sizeof(smallfelem),
  1778. tmp_smallfelems,
  1779. (void (*)(void *))smallfelem_one,
  1780. smallfelem_is_zero_int,
  1781. (void (*)(void *, const void *))
  1782. smallfelem_assign,
  1783. (void (*)(void *, const void *))
  1784. smallfelem_square_contract,
  1785. (void (*)
  1786. (void *, const void *,
  1787. const void *))
  1788. smallfelem_mul_contract,
  1789. (void (*)(void *, const void *))
  1790. smallfelem_inv_contract,
  1791. /* nothing to contract */
  1792. (void (*)(void *, const void *))
  1793. smallfelem_assign);
  1794. }
  1795. /*
  1796. * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
  1797. * values Result is stored in r (r can equal one of the inputs).
  1798. */
  1799. int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
  1800. const BIGNUM *scalar, size_t num,
  1801. const EC_POINT *points[],
  1802. const BIGNUM *scalars[], BN_CTX *ctx)
  1803. {
  1804. int ret = 0;
  1805. int j;
  1806. int mixed = 0;
  1807. BN_CTX *new_ctx = NULL;
  1808. BIGNUM *x, *y, *z, *tmp_scalar;
  1809. felem_bytearray g_secret;
  1810. felem_bytearray *secrets = NULL;
  1811. smallfelem(*pre_comp)[17][3] = NULL;
  1812. smallfelem *tmp_smallfelems = NULL;
  1813. felem_bytearray tmp;
  1814. unsigned i, num_bytes;
  1815. int have_pre_comp = 0;
  1816. size_t num_points = num;
  1817. smallfelem x_in, y_in, z_in;
  1818. felem x_out, y_out, z_out;
  1819. NISTP256_PRE_COMP *pre = NULL;
  1820. const smallfelem(*g_pre_comp)[16][3] = NULL;
  1821. EC_POINT *generator = NULL;
  1822. const EC_POINT *p = NULL;
  1823. const BIGNUM *p_scalar = NULL;
  1824. if (ctx == NULL)
  1825. if ((ctx = new_ctx = BN_CTX_new()) == NULL)
  1826. return 0;
  1827. BN_CTX_start(ctx);
  1828. if (((x = BN_CTX_get(ctx)) == NULL) ||
  1829. ((y = BN_CTX_get(ctx)) == NULL) ||
  1830. ((z = BN_CTX_get(ctx)) == NULL) ||
  1831. ((tmp_scalar = BN_CTX_get(ctx)) == NULL))
  1832. goto err;
  1833. if (scalar != NULL) {
  1834. pre = EC_EX_DATA_get_data(group->extra_data,
  1835. nistp256_pre_comp_dup,
  1836. nistp256_pre_comp_free,
  1837. nistp256_pre_comp_clear_free);
  1838. if (pre)
  1839. /* we have precomputation, try to use it */
  1840. g_pre_comp = (const smallfelem(*)[16][3])pre->g_pre_comp;
  1841. else
  1842. /* try to use the standard precomputation */
  1843. g_pre_comp = &gmul[0];
  1844. generator = EC_POINT_new(group);
  1845. if (generator == NULL)
  1846. goto err;
  1847. /* get the generator from precomputation */
  1848. if (!smallfelem_to_BN(x, g_pre_comp[0][1][0]) ||
  1849. !smallfelem_to_BN(y, g_pre_comp[0][1][1]) ||
  1850. !smallfelem_to_BN(z, g_pre_comp[0][1][2])) {
  1851. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1852. goto err;
  1853. }
  1854. if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
  1855. generator, x, y, z,
  1856. ctx))
  1857. goto err;
  1858. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1859. /* precomputation matches generator */
  1860. have_pre_comp = 1;
  1861. else
  1862. /*
  1863. * we don't have valid precomputation: treat the generator as a
  1864. * random point
  1865. */
  1866. num_points++;
  1867. }
  1868. if (num_points > 0) {
  1869. if (num_points >= 3) {
  1870. /*
  1871. * unless we precompute multiples for just one or two points,
  1872. * converting those into affine form is time well spent
  1873. */
  1874. mixed = 1;
  1875. }
  1876. secrets = OPENSSL_malloc(num_points * sizeof(felem_bytearray));
  1877. pre_comp = OPENSSL_malloc(num_points * 17 * 3 * sizeof(smallfelem));
  1878. if (mixed)
  1879. tmp_smallfelems =
  1880. OPENSSL_malloc((num_points * 17 + 1) * sizeof(smallfelem));
  1881. if ((secrets == NULL) || (pre_comp == NULL)
  1882. || (mixed && (tmp_smallfelems == NULL))) {
  1883. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
  1884. goto err;
  1885. }
  1886. /*
  1887. * we treat NULL scalars as 0, and NULL points as points at infinity,
  1888. * i.e., they contribute nothing to the linear combination
  1889. */
  1890. memset(secrets, 0, num_points * sizeof(felem_bytearray));
  1891. memset(pre_comp, 0, num_points * 17 * 3 * sizeof(smallfelem));
  1892. for (i = 0; i < num_points; ++i) {
  1893. if (i == num)
  1894. /*
  1895. * we didn't have a valid precomputation, so we pick the
  1896. * generator
  1897. */
  1898. {
  1899. p = EC_GROUP_get0_generator(group);
  1900. p_scalar = scalar;
  1901. } else
  1902. /* the i^th point */
  1903. {
  1904. p = points[i];
  1905. p_scalar = scalars[i];
  1906. }
  1907. if ((p_scalar != NULL) && (p != NULL)) {
  1908. /* reduce scalar to 0 <= scalar < 2^256 */
  1909. if ((BN_num_bits(p_scalar) > 256)
  1910. || (BN_is_negative(p_scalar))) {
  1911. /*
  1912. * this is an unusual input, and we don't guarantee
  1913. * constant-timeness
  1914. */
  1915. if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) {
  1916. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1917. goto err;
  1918. }
  1919. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1920. } else
  1921. num_bytes = BN_bn2bin(p_scalar, tmp);
  1922. flip_endian(secrets[i], tmp, num_bytes);
  1923. /* precompute multiples */
  1924. if ((!BN_to_felem(x_out, &p->X)) ||
  1925. (!BN_to_felem(y_out, &p->Y)) ||
  1926. (!BN_to_felem(z_out, &p->Z)))
  1927. goto err;
  1928. felem_shrink(pre_comp[i][1][0], x_out);
  1929. felem_shrink(pre_comp[i][1][1], y_out);
  1930. felem_shrink(pre_comp[i][1][2], z_out);
  1931. for (j = 2; j <= 16; ++j) {
  1932. if (j & 1) {
  1933. point_add_small(pre_comp[i][j][0], pre_comp[i][j][1],
  1934. pre_comp[i][j][2], pre_comp[i][1][0],
  1935. pre_comp[i][1][1], pre_comp[i][1][2],
  1936. pre_comp[i][j - 1][0],
  1937. pre_comp[i][j - 1][1],
  1938. pre_comp[i][j - 1][2]);
  1939. } else {
  1940. point_double_small(pre_comp[i][j][0],
  1941. pre_comp[i][j][1],
  1942. pre_comp[i][j][2],
  1943. pre_comp[i][j / 2][0],
  1944. pre_comp[i][j / 2][1],
  1945. pre_comp[i][j / 2][2]);
  1946. }
  1947. }
  1948. }
  1949. }
  1950. if (mixed)
  1951. make_points_affine(num_points * 17, pre_comp[0], tmp_smallfelems);
  1952. }
  1953. /* the scalar for the generator */
  1954. if ((scalar != NULL) && (have_pre_comp)) {
  1955. memset(g_secret, 0, sizeof(g_secret));
  1956. /* reduce scalar to 0 <= scalar < 2^256 */
  1957. if ((BN_num_bits(scalar) > 256) || (BN_is_negative(scalar))) {
  1958. /*
  1959. * this is an unusual input, and we don't guarantee
  1960. * constant-timeness
  1961. */
  1962. if (!BN_nnmod(tmp_scalar, scalar, &group->order, ctx)) {
  1963. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1964. goto err;
  1965. }
  1966. num_bytes = BN_bn2bin(tmp_scalar, tmp);
  1967. } else
  1968. num_bytes = BN_bn2bin(scalar, tmp);
  1969. flip_endian(g_secret, tmp, num_bytes);
  1970. /* do the multiplication with generator precomputation */
  1971. batch_mul(x_out, y_out, z_out,
  1972. (const felem_bytearray(*))secrets, num_points,
  1973. g_secret,
  1974. mixed, (const smallfelem(*)[17][3])pre_comp, g_pre_comp);
  1975. } else
  1976. /* do the multiplication without generator precomputation */
  1977. batch_mul(x_out, y_out, z_out,
  1978. (const felem_bytearray(*))secrets, num_points,
  1979. NULL, mixed, (const smallfelem(*)[17][3])pre_comp, NULL);
  1980. /* reduce the output to its unique minimal representation */
  1981. felem_contract(x_in, x_out);
  1982. felem_contract(y_in, y_out);
  1983. felem_contract(z_in, z_out);
  1984. if ((!smallfelem_to_BN(x, x_in)) || (!smallfelem_to_BN(y, y_in)) ||
  1985. (!smallfelem_to_BN(z, z_in))) {
  1986. ECerr(EC_F_EC_GFP_NISTP256_POINTS_MUL, ERR_R_BN_LIB);
  1987. goto err;
  1988. }
  1989. ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
  1990. err:
  1991. BN_CTX_end(ctx);
  1992. if (generator != NULL)
  1993. EC_POINT_free(generator);
  1994. if (new_ctx != NULL)
  1995. BN_CTX_free(new_ctx);
  1996. if (secrets != NULL)
  1997. OPENSSL_free(secrets);
  1998. if (pre_comp != NULL)
  1999. OPENSSL_free(pre_comp);
  2000. if (tmp_smallfelems != NULL)
  2001. OPENSSL_free(tmp_smallfelems);
  2002. return ret;
  2003. }
  2004. int ec_GFp_nistp256_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  2005. {
  2006. int ret = 0;
  2007. NISTP256_PRE_COMP *pre = NULL;
  2008. int i, j;
  2009. BN_CTX *new_ctx = NULL;
  2010. BIGNUM *x, *y;
  2011. EC_POINT *generator = NULL;
  2012. smallfelem tmp_smallfelems[32];
  2013. felem x_tmp, y_tmp, z_tmp;
  2014. /* throw away old precomputation */
  2015. EC_EX_DATA_free_data(&group->extra_data, nistp256_pre_comp_dup,
  2016. nistp256_pre_comp_free,
  2017. nistp256_pre_comp_clear_free);
  2018. if (ctx == NULL)
  2019. if ((ctx = new_ctx = BN_CTX_new()) == NULL)
  2020. return 0;
  2021. BN_CTX_start(ctx);
  2022. if (((x = BN_CTX_get(ctx)) == NULL) || ((y = BN_CTX_get(ctx)) == NULL))
  2023. goto err;
  2024. /* get the generator */
  2025. if (group->generator == NULL)
  2026. goto err;
  2027. generator = EC_POINT_new(group);
  2028. if (generator == NULL)
  2029. goto err;
  2030. BN_bin2bn(nistp256_curve_params[3], sizeof(felem_bytearray), x);
  2031. BN_bin2bn(nistp256_curve_params[4], sizeof(felem_bytearray), y);
  2032. if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
  2033. goto err;
  2034. if ((pre = nistp256_pre_comp_new()) == NULL)
  2035. goto err;
  2036. /*
  2037. * if the generator is the standard one, use built-in precomputation
  2038. */
  2039. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
  2040. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  2041. goto done;
  2042. }
  2043. if ((!BN_to_felem(x_tmp, &group->generator->X)) ||
  2044. (!BN_to_felem(y_tmp, &group->generator->Y)) ||
  2045. (!BN_to_felem(z_tmp, &group->generator->Z)))
  2046. goto err;
  2047. felem_shrink(pre->g_pre_comp[0][1][0], x_tmp);
  2048. felem_shrink(pre->g_pre_comp[0][1][1], y_tmp);
  2049. felem_shrink(pre->g_pre_comp[0][1][2], z_tmp);
  2050. /*
  2051. * compute 2^64*G, 2^128*G, 2^192*G for the first table, 2^32*G, 2^96*G,
  2052. * 2^160*G, 2^224*G for the second one
  2053. */
  2054. for (i = 1; i <= 8; i <<= 1) {
  2055. point_double_small(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
  2056. pre->g_pre_comp[1][i][2], pre->g_pre_comp[0][i][0],
  2057. pre->g_pre_comp[0][i][1],
  2058. pre->g_pre_comp[0][i][2]);
  2059. for (j = 0; j < 31; ++j) {
  2060. point_double_small(pre->g_pre_comp[1][i][0],
  2061. pre->g_pre_comp[1][i][1],
  2062. pre->g_pre_comp[1][i][2],
  2063. pre->g_pre_comp[1][i][0],
  2064. pre->g_pre_comp[1][i][1],
  2065. pre->g_pre_comp[1][i][2]);
  2066. }
  2067. if (i == 8)
  2068. break;
  2069. point_double_small(pre->g_pre_comp[0][2 * i][0],
  2070. pre->g_pre_comp[0][2 * i][1],
  2071. pre->g_pre_comp[0][2 * i][2],
  2072. pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
  2073. pre->g_pre_comp[1][i][2]);
  2074. for (j = 0; j < 31; ++j) {
  2075. point_double_small(pre->g_pre_comp[0][2 * i][0],
  2076. pre->g_pre_comp[0][2 * i][1],
  2077. pre->g_pre_comp[0][2 * i][2],
  2078. pre->g_pre_comp[0][2 * i][0],
  2079. pre->g_pre_comp[0][2 * i][1],
  2080. pre->g_pre_comp[0][2 * i][2]);
  2081. }
  2082. }
  2083. for (i = 0; i < 2; i++) {
  2084. /* g_pre_comp[i][0] is the point at infinity */
  2085. memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
  2086. /* the remaining multiples */
  2087. /* 2^64*G + 2^128*G resp. 2^96*G + 2^160*G */
  2088. point_add_small(pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1],
  2089. pre->g_pre_comp[i][6][2], pre->g_pre_comp[i][4][0],
  2090. pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
  2091. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
  2092. pre->g_pre_comp[i][2][2]);
  2093. /* 2^64*G + 2^192*G resp. 2^96*G + 2^224*G */
  2094. point_add_small(pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1],
  2095. pre->g_pre_comp[i][10][2], pre->g_pre_comp[i][8][0],
  2096. pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
  2097. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
  2098. pre->g_pre_comp[i][2][2]);
  2099. /* 2^128*G + 2^192*G resp. 2^160*G + 2^224*G */
  2100. point_add_small(pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1],
  2101. pre->g_pre_comp[i][12][2], pre->g_pre_comp[i][8][0],
  2102. pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
  2103. pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1],
  2104. pre->g_pre_comp[i][4][2]);
  2105. /*
  2106. * 2^64*G + 2^128*G + 2^192*G resp. 2^96*G + 2^160*G + 2^224*G
  2107. */
  2108. point_add_small(pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1],
  2109. pre->g_pre_comp[i][14][2], pre->g_pre_comp[i][12][0],
  2110. pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
  2111. pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
  2112. pre->g_pre_comp[i][2][2]);
  2113. for (j = 1; j < 8; ++j) {
  2114. /* odd multiples: add G resp. 2^32*G */
  2115. point_add_small(pre->g_pre_comp[i][2 * j + 1][0],
  2116. pre->g_pre_comp[i][2 * j + 1][1],
  2117. pre->g_pre_comp[i][2 * j + 1][2],
  2118. pre->g_pre_comp[i][2 * j][0],
  2119. pre->g_pre_comp[i][2 * j][1],
  2120. pre->g_pre_comp[i][2 * j][2],
  2121. pre->g_pre_comp[i][1][0],
  2122. pre->g_pre_comp[i][1][1],
  2123. pre->g_pre_comp[i][1][2]);
  2124. }
  2125. }
  2126. make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_smallfelems);
  2127. done:
  2128. if (!EC_EX_DATA_set_data(&group->extra_data, pre, nistp256_pre_comp_dup,
  2129. nistp256_pre_comp_free,
  2130. nistp256_pre_comp_clear_free))
  2131. goto err;
  2132. ret = 1;
  2133. pre = NULL;
  2134. err:
  2135. BN_CTX_end(ctx);
  2136. if (generator != NULL)
  2137. EC_POINT_free(generator);
  2138. if (new_ctx != NULL)
  2139. BN_CTX_free(new_ctx);
  2140. if (pre)
  2141. nistp256_pre_comp_free(pre);
  2142. return ret;
  2143. }
  2144. int ec_GFp_nistp256_have_precompute_mult(const EC_GROUP *group)
  2145. {
  2146. if (EC_EX_DATA_get_data(group->extra_data, nistp256_pre_comp_dup,
  2147. nistp256_pre_comp_free,
  2148. nistp256_pre_comp_clear_free)
  2149. != NULL)
  2150. return 1;
  2151. else
  2152. return 0;
  2153. }
  2154. #else
  2155. static void *dummy = &dummy;
  2156. #endif