ecp_nistp521.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237
  1. /*
  2. * Copyright 2011-2021 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* Copyright 2011 Google Inc.
  10. *
  11. * Licensed under the Apache License, Version 2.0 (the "License");
  12. *
  13. * you may not use this file except in compliance with the License.
  14. * You may obtain a copy of the License at
  15. *
  16. * http://www.apache.org/licenses/LICENSE-2.0
  17. *
  18. * Unless required by applicable law or agreed to in writing, software
  19. * distributed under the License is distributed on an "AS IS" BASIS,
  20. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21. * See the License for the specific language governing permissions and
  22. * limitations under the License.
  23. */
  24. /*
  25. * ECDSA low level APIs are deprecated for public use, but still ok for
  26. * internal use.
  27. */
  28. #include "internal/deprecated.h"
  29. /*
  30. * A 64-bit implementation of the NIST P-521 elliptic curve point multiplication
  31. *
  32. * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
  33. * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
  34. * work which got its smarts from Daniel J. Bernstein's work on the same.
  35. */
  36. #include <openssl/e_os2.h>
  37. #include <string.h>
  38. #include <openssl/err.h>
  39. #include "ec_local.h"
  40. #include "internal/numbers.h"
  41. #ifndef INT128_MAX
  42. # error "Your compiler doesn't appear to support 128-bit integer types"
  43. #endif
  44. typedef uint8_t u8;
  45. typedef uint64_t u64;
  46. /*
  47. * The underlying field. P521 operates over GF(2^521-1). We can serialize an
  48. * element of this field into 66 bytes where the most significant byte
  49. * contains only a single bit. We call this an felem_bytearray.
  50. */
  51. typedef u8 felem_bytearray[66];
  52. /*
  53. * These are the parameters of P521, taken from FIPS 186-3, section D.1.2.5.
  54. * These values are big-endian.
  55. */
  56. static const felem_bytearray nistp521_curve_params[5] = {
  57. {0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* p */
  58. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  59. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  60. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  61. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  62. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  63. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  64. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  65. 0xff, 0xff},
  66. {0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* a = -3 */
  67. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  68. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  69. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  70. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  71. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  72. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  73. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  74. 0xff, 0xfc},
  75. {0x00, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, /* b */
  76. 0x9a, 0x1f, 0x92, 0x9a, 0x21, 0xa0, 0xb6, 0x85,
  77. 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3,
  78. 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1,
  79. 0x09, 0xe1, 0x56, 0x19, 0x39, 0x51, 0xec, 0x7e,
  80. 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1,
  81. 0xbf, 0x07, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c,
  82. 0x34, 0xf1, 0xef, 0x45, 0x1f, 0xd4, 0x6b, 0x50,
  83. 0x3f, 0x00},
  84. {0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, 0x04, /* x */
  85. 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, 0x95,
  86. 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, 0x3f,
  87. 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d,
  88. 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, 0xe7,
  89. 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff,
  90. 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a,
  91. 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, 0xe5,
  92. 0xbd, 0x66},
  93. {0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, /* y */
  94. 0xc0, 0x04, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d,
  95. 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b,
  96. 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e,
  97. 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4,
  98. 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad,
  99. 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72,
  100. 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1,
  101. 0x66, 0x50}
  102. };
  103. /*-
  104. * The representation of field elements.
  105. * ------------------------------------
  106. *
  107. * We represent field elements with nine values. These values are either 64 or
  108. * 128 bits and the field element represented is:
  109. * v[0]*2^0 + v[1]*2^58 + v[2]*2^116 + ... + v[8]*2^464 (mod p)
  110. * Each of the nine values is called a 'limb'. Since the limbs are spaced only
  111. * 58 bits apart, but are greater than 58 bits in length, the most significant
  112. * bits of each limb overlap with the least significant bits of the next.
  113. *
  114. * A field element with 64-bit limbs is an 'felem'. One with 128-bit limbs is a
  115. * 'largefelem' */
  116. #define NLIMBS 9
  117. typedef uint64_t limb;
  118. typedef limb limb_aX __attribute((__aligned__(1)));
  119. typedef limb felem[NLIMBS];
  120. typedef uint128_t largefelem[NLIMBS];
  121. static const limb bottom57bits = 0x1ffffffffffffff;
  122. static const limb bottom58bits = 0x3ffffffffffffff;
  123. /*
  124. * bin66_to_felem takes a little-endian byte array and converts it into felem
  125. * form. This assumes that the CPU is little-endian.
  126. */
  127. static void bin66_to_felem(felem out, const u8 in[66])
  128. {
  129. out[0] = (*((limb *) & in[0])) & bottom58bits;
  130. out[1] = (*((limb_aX *) & in[7]) >> 2) & bottom58bits;
  131. out[2] = (*((limb_aX *) & in[14]) >> 4) & bottom58bits;
  132. out[3] = (*((limb_aX *) & in[21]) >> 6) & bottom58bits;
  133. out[4] = (*((limb_aX *) & in[29])) & bottom58bits;
  134. out[5] = (*((limb_aX *) & in[36]) >> 2) & bottom58bits;
  135. out[6] = (*((limb_aX *) & in[43]) >> 4) & bottom58bits;
  136. out[7] = (*((limb_aX *) & in[50]) >> 6) & bottom58bits;
  137. out[8] = (*((limb_aX *) & in[58])) & bottom57bits;
  138. }
  139. /*
  140. * felem_to_bin66 takes an felem and serializes into a little endian, 66 byte
  141. * array. This assumes that the CPU is little-endian.
  142. */
  143. static void felem_to_bin66(u8 out[66], const felem in)
  144. {
  145. memset(out, 0, 66);
  146. (*((limb *) & out[0])) = in[0];
  147. (*((limb_aX *) & out[7])) |= in[1] << 2;
  148. (*((limb_aX *) & out[14])) |= in[2] << 4;
  149. (*((limb_aX *) & out[21])) |= in[3] << 6;
  150. (*((limb_aX *) & out[29])) = in[4];
  151. (*((limb_aX *) & out[36])) |= in[5] << 2;
  152. (*((limb_aX *) & out[43])) |= in[6] << 4;
  153. (*((limb_aX *) & out[50])) |= in[7] << 6;
  154. (*((limb_aX *) & out[58])) = in[8];
  155. }
  156. /* BN_to_felem converts an OpenSSL BIGNUM into an felem */
  157. static int BN_to_felem(felem out, const BIGNUM *bn)
  158. {
  159. felem_bytearray b_out;
  160. int num_bytes;
  161. if (BN_is_negative(bn)) {
  162. ERR_raise(ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE);
  163. return 0;
  164. }
  165. num_bytes = BN_bn2lebinpad(bn, b_out, sizeof(b_out));
  166. if (num_bytes < 0) {
  167. ERR_raise(ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE);
  168. return 0;
  169. }
  170. bin66_to_felem(out, b_out);
  171. return 1;
  172. }
  173. /* felem_to_BN converts an felem into an OpenSSL BIGNUM */
  174. static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
  175. {
  176. felem_bytearray b_out;
  177. felem_to_bin66(b_out, in);
  178. return BN_lebin2bn(b_out, sizeof(b_out), out);
  179. }
  180. /*-
  181. * Field operations
  182. * ----------------
  183. */
  184. static void felem_one(felem out)
  185. {
  186. out[0] = 1;
  187. out[1] = 0;
  188. out[2] = 0;
  189. out[3] = 0;
  190. out[4] = 0;
  191. out[5] = 0;
  192. out[6] = 0;
  193. out[7] = 0;
  194. out[8] = 0;
  195. }
  196. static void felem_assign(felem out, const felem in)
  197. {
  198. out[0] = in[0];
  199. out[1] = in[1];
  200. out[2] = in[2];
  201. out[3] = in[3];
  202. out[4] = in[4];
  203. out[5] = in[5];
  204. out[6] = in[6];
  205. out[7] = in[7];
  206. out[8] = in[8];
  207. }
  208. /* felem_sum64 sets out = out + in. */
  209. static void felem_sum64(felem out, const felem in)
  210. {
  211. out[0] += in[0];
  212. out[1] += in[1];
  213. out[2] += in[2];
  214. out[3] += in[3];
  215. out[4] += in[4];
  216. out[5] += in[5];
  217. out[6] += in[6];
  218. out[7] += in[7];
  219. out[8] += in[8];
  220. }
  221. /* felem_scalar sets out = in * scalar */
  222. static void felem_scalar(felem out, const felem in, limb scalar)
  223. {
  224. out[0] = in[0] * scalar;
  225. out[1] = in[1] * scalar;
  226. out[2] = in[2] * scalar;
  227. out[3] = in[3] * scalar;
  228. out[4] = in[4] * scalar;
  229. out[5] = in[5] * scalar;
  230. out[6] = in[6] * scalar;
  231. out[7] = in[7] * scalar;
  232. out[8] = in[8] * scalar;
  233. }
  234. /* felem_scalar64 sets out = out * scalar */
  235. static void felem_scalar64(felem out, limb scalar)
  236. {
  237. out[0] *= scalar;
  238. out[1] *= scalar;
  239. out[2] *= scalar;
  240. out[3] *= scalar;
  241. out[4] *= scalar;
  242. out[5] *= scalar;
  243. out[6] *= scalar;
  244. out[7] *= scalar;
  245. out[8] *= scalar;
  246. }
  247. /* felem_scalar128 sets out = out * scalar */
  248. static void felem_scalar128(largefelem out, limb scalar)
  249. {
  250. out[0] *= scalar;
  251. out[1] *= scalar;
  252. out[2] *= scalar;
  253. out[3] *= scalar;
  254. out[4] *= scalar;
  255. out[5] *= scalar;
  256. out[6] *= scalar;
  257. out[7] *= scalar;
  258. out[8] *= scalar;
  259. }
  260. /*-
  261. * felem_neg sets |out| to |-in|
  262. * On entry:
  263. * in[i] < 2^59 + 2^14
  264. * On exit:
  265. * out[i] < 2^62
  266. */
  267. static void felem_neg(felem out, const felem in)
  268. {
  269. /* In order to prevent underflow, we subtract from 0 mod p. */
  270. static const limb two62m3 = (((limb) 1) << 62) - (((limb) 1) << 5);
  271. static const limb two62m2 = (((limb) 1) << 62) - (((limb) 1) << 4);
  272. out[0] = two62m3 - in[0];
  273. out[1] = two62m2 - in[1];
  274. out[2] = two62m2 - in[2];
  275. out[3] = two62m2 - in[3];
  276. out[4] = two62m2 - in[4];
  277. out[5] = two62m2 - in[5];
  278. out[6] = two62m2 - in[6];
  279. out[7] = two62m2 - in[7];
  280. out[8] = two62m2 - in[8];
  281. }
  282. /*-
  283. * felem_diff64 subtracts |in| from |out|
  284. * On entry:
  285. * in[i] < 2^59 + 2^14
  286. * On exit:
  287. * out[i] < out[i] + 2^62
  288. */
  289. static void felem_diff64(felem out, const felem in)
  290. {
  291. /*
  292. * In order to prevent underflow, we add 0 mod p before subtracting.
  293. */
  294. static const limb two62m3 = (((limb) 1) << 62) - (((limb) 1) << 5);
  295. static const limb two62m2 = (((limb) 1) << 62) - (((limb) 1) << 4);
  296. out[0] += two62m3 - in[0];
  297. out[1] += two62m2 - in[1];
  298. out[2] += two62m2 - in[2];
  299. out[3] += two62m2 - in[3];
  300. out[4] += two62m2 - in[4];
  301. out[5] += two62m2 - in[5];
  302. out[6] += two62m2 - in[6];
  303. out[7] += two62m2 - in[7];
  304. out[8] += two62m2 - in[8];
  305. }
  306. /*-
  307. * felem_diff_128_64 subtracts |in| from |out|
  308. * On entry:
  309. * in[i] < 2^62 + 2^17
  310. * On exit:
  311. * out[i] < out[i] + 2^63
  312. */
  313. static void felem_diff_128_64(largefelem out, const felem in)
  314. {
  315. /*
  316. * In order to prevent underflow, we add 64p mod p (which is equivalent
  317. * to 0 mod p) before subtracting. p is 2^521 - 1, i.e. in binary a 521
  318. * digit number with all bits set to 1. See "The representation of field
  319. * elements" comment above for a description of how limbs are used to
  320. * represent a number. 64p is represented with 8 limbs containing a number
  321. * with 58 bits set and one limb with a number with 57 bits set.
  322. */
  323. static const limb two63m6 = (((limb) 1) << 63) - (((limb) 1) << 6);
  324. static const limb two63m5 = (((limb) 1) << 63) - (((limb) 1) << 5);
  325. out[0] += two63m6 - in[0];
  326. out[1] += two63m5 - in[1];
  327. out[2] += two63m5 - in[2];
  328. out[3] += two63m5 - in[3];
  329. out[4] += two63m5 - in[4];
  330. out[5] += two63m5 - in[5];
  331. out[6] += two63m5 - in[6];
  332. out[7] += two63m5 - in[7];
  333. out[8] += two63m5 - in[8];
  334. }
  335. /*-
  336. * felem_diff_128_64 subtracts |in| from |out|
  337. * On entry:
  338. * in[i] < 2^126
  339. * On exit:
  340. * out[i] < out[i] + 2^127 - 2^69
  341. */
  342. static void felem_diff128(largefelem out, const largefelem in)
  343. {
  344. /*
  345. * In order to prevent underflow, we add 0 mod p before subtracting.
  346. */
  347. static const uint128_t two127m70 =
  348. (((uint128_t) 1) << 127) - (((uint128_t) 1) << 70);
  349. static const uint128_t two127m69 =
  350. (((uint128_t) 1) << 127) - (((uint128_t) 1) << 69);
  351. out[0] += (two127m70 - in[0]);
  352. out[1] += (two127m69 - in[1]);
  353. out[2] += (two127m69 - in[2]);
  354. out[3] += (two127m69 - in[3]);
  355. out[4] += (two127m69 - in[4]);
  356. out[5] += (two127m69 - in[5]);
  357. out[6] += (two127m69 - in[6]);
  358. out[7] += (two127m69 - in[7]);
  359. out[8] += (two127m69 - in[8]);
  360. }
  361. /*-
  362. * felem_square sets |out| = |in|^2
  363. * On entry:
  364. * in[i] < 2^62
  365. * On exit:
  366. * out[i] < 17 * max(in[i]) * max(in[i])
  367. */
  368. static void felem_square_ref(largefelem out, const felem in)
  369. {
  370. felem inx2, inx4;
  371. felem_scalar(inx2, in, 2);
  372. felem_scalar(inx4, in, 4);
  373. /*-
  374. * We have many cases were we want to do
  375. * in[x] * in[y] +
  376. * in[y] * in[x]
  377. * This is obviously just
  378. * 2 * in[x] * in[y]
  379. * However, rather than do the doubling on the 128 bit result, we
  380. * double one of the inputs to the multiplication by reading from
  381. * |inx2|
  382. */
  383. out[0] = ((uint128_t) in[0]) * in[0];
  384. out[1] = ((uint128_t) in[0]) * inx2[1];
  385. out[2] = ((uint128_t) in[0]) * inx2[2] + ((uint128_t) in[1]) * in[1];
  386. out[3] = ((uint128_t) in[0]) * inx2[3] + ((uint128_t) in[1]) * inx2[2];
  387. out[4] = ((uint128_t) in[0]) * inx2[4] +
  388. ((uint128_t) in[1]) * inx2[3] + ((uint128_t) in[2]) * in[2];
  389. out[5] = ((uint128_t) in[0]) * inx2[5] +
  390. ((uint128_t) in[1]) * inx2[4] + ((uint128_t) in[2]) * inx2[3];
  391. out[6] = ((uint128_t) in[0]) * inx2[6] +
  392. ((uint128_t) in[1]) * inx2[5] +
  393. ((uint128_t) in[2]) * inx2[4] + ((uint128_t) in[3]) * in[3];
  394. out[7] = ((uint128_t) in[0]) * inx2[7] +
  395. ((uint128_t) in[1]) * inx2[6] +
  396. ((uint128_t) in[2]) * inx2[5] + ((uint128_t) in[3]) * inx2[4];
  397. out[8] = ((uint128_t) in[0]) * inx2[8] +
  398. ((uint128_t) in[1]) * inx2[7] +
  399. ((uint128_t) in[2]) * inx2[6] +
  400. ((uint128_t) in[3]) * inx2[5] + ((uint128_t) in[4]) * in[4];
  401. /*
  402. * The remaining limbs fall above 2^521, with the first falling at 2^522.
  403. * They correspond to locations one bit up from the limbs produced above
  404. * so we would have to multiply by two to align them. Again, rather than
  405. * operate on the 128-bit result, we double one of the inputs to the
  406. * multiplication. If we want to double for both this reason, and the
  407. * reason above, then we end up multiplying by four.
  408. */
  409. /* 9 */
  410. out[0] += ((uint128_t) in[1]) * inx4[8] +
  411. ((uint128_t) in[2]) * inx4[7] +
  412. ((uint128_t) in[3]) * inx4[6] + ((uint128_t) in[4]) * inx4[5];
  413. /* 10 */
  414. out[1] += ((uint128_t) in[2]) * inx4[8] +
  415. ((uint128_t) in[3]) * inx4[7] +
  416. ((uint128_t) in[4]) * inx4[6] + ((uint128_t) in[5]) * inx2[5];
  417. /* 11 */
  418. out[2] += ((uint128_t) in[3]) * inx4[8] +
  419. ((uint128_t) in[4]) * inx4[7] + ((uint128_t) in[5]) * inx4[6];
  420. /* 12 */
  421. out[3] += ((uint128_t) in[4]) * inx4[8] +
  422. ((uint128_t) in[5]) * inx4[7] + ((uint128_t) in[6]) * inx2[6];
  423. /* 13 */
  424. out[4] += ((uint128_t) in[5]) * inx4[8] + ((uint128_t) in[6]) * inx4[7];
  425. /* 14 */
  426. out[5] += ((uint128_t) in[6]) * inx4[8] + ((uint128_t) in[7]) * inx2[7];
  427. /* 15 */
  428. out[6] += ((uint128_t) in[7]) * inx4[8];
  429. /* 16 */
  430. out[7] += ((uint128_t) in[8]) * inx2[8];
  431. }
  432. /*-
  433. * felem_mul sets |out| = |in1| * |in2|
  434. * On entry:
  435. * in1[i] < 2^64
  436. * in2[i] < 2^63
  437. * On exit:
  438. * out[i] < 17 * max(in1[i]) * max(in2[i])
  439. */
  440. static void felem_mul_ref(largefelem out, const felem in1, const felem in2)
  441. {
  442. felem in2x2;
  443. felem_scalar(in2x2, in2, 2);
  444. out[0] = ((uint128_t) in1[0]) * in2[0];
  445. out[1] = ((uint128_t) in1[0]) * in2[1] +
  446. ((uint128_t) in1[1]) * in2[0];
  447. out[2] = ((uint128_t) in1[0]) * in2[2] +
  448. ((uint128_t) in1[1]) * in2[1] +
  449. ((uint128_t) in1[2]) * in2[0];
  450. out[3] = ((uint128_t) in1[0]) * in2[3] +
  451. ((uint128_t) in1[1]) * in2[2] +
  452. ((uint128_t) in1[2]) * in2[1] +
  453. ((uint128_t) in1[3]) * in2[0];
  454. out[4] = ((uint128_t) in1[0]) * in2[4] +
  455. ((uint128_t) in1[1]) * in2[3] +
  456. ((uint128_t) in1[2]) * in2[2] +
  457. ((uint128_t) in1[3]) * in2[1] +
  458. ((uint128_t) in1[4]) * in2[0];
  459. out[5] = ((uint128_t) in1[0]) * in2[5] +
  460. ((uint128_t) in1[1]) * in2[4] +
  461. ((uint128_t) in1[2]) * in2[3] +
  462. ((uint128_t) in1[3]) * in2[2] +
  463. ((uint128_t) in1[4]) * in2[1] +
  464. ((uint128_t) in1[5]) * in2[0];
  465. out[6] = ((uint128_t) in1[0]) * in2[6] +
  466. ((uint128_t) in1[1]) * in2[5] +
  467. ((uint128_t) in1[2]) * in2[4] +
  468. ((uint128_t) in1[3]) * in2[3] +
  469. ((uint128_t) in1[4]) * in2[2] +
  470. ((uint128_t) in1[5]) * in2[1] +
  471. ((uint128_t) in1[6]) * in2[0];
  472. out[7] = ((uint128_t) in1[0]) * in2[7] +
  473. ((uint128_t) in1[1]) * in2[6] +
  474. ((uint128_t) in1[2]) * in2[5] +
  475. ((uint128_t) in1[3]) * in2[4] +
  476. ((uint128_t) in1[4]) * in2[3] +
  477. ((uint128_t) in1[5]) * in2[2] +
  478. ((uint128_t) in1[6]) * in2[1] +
  479. ((uint128_t) in1[7]) * in2[0];
  480. out[8] = ((uint128_t) in1[0]) * in2[8] +
  481. ((uint128_t) in1[1]) * in2[7] +
  482. ((uint128_t) in1[2]) * in2[6] +
  483. ((uint128_t) in1[3]) * in2[5] +
  484. ((uint128_t) in1[4]) * in2[4] +
  485. ((uint128_t) in1[5]) * in2[3] +
  486. ((uint128_t) in1[6]) * in2[2] +
  487. ((uint128_t) in1[7]) * in2[1] +
  488. ((uint128_t) in1[8]) * in2[0];
  489. /* See comment in felem_square about the use of in2x2 here */
  490. out[0] += ((uint128_t) in1[1]) * in2x2[8] +
  491. ((uint128_t) in1[2]) * in2x2[7] +
  492. ((uint128_t) in1[3]) * in2x2[6] +
  493. ((uint128_t) in1[4]) * in2x2[5] +
  494. ((uint128_t) in1[5]) * in2x2[4] +
  495. ((uint128_t) in1[6]) * in2x2[3] +
  496. ((uint128_t) in1[7]) * in2x2[2] +
  497. ((uint128_t) in1[8]) * in2x2[1];
  498. out[1] += ((uint128_t) in1[2]) * in2x2[8] +
  499. ((uint128_t) in1[3]) * in2x2[7] +
  500. ((uint128_t) in1[4]) * in2x2[6] +
  501. ((uint128_t) in1[5]) * in2x2[5] +
  502. ((uint128_t) in1[6]) * in2x2[4] +
  503. ((uint128_t) in1[7]) * in2x2[3] +
  504. ((uint128_t) in1[8]) * in2x2[2];
  505. out[2] += ((uint128_t) in1[3]) * in2x2[8] +
  506. ((uint128_t) in1[4]) * in2x2[7] +
  507. ((uint128_t) in1[5]) * in2x2[6] +
  508. ((uint128_t) in1[6]) * in2x2[5] +
  509. ((uint128_t) in1[7]) * in2x2[4] +
  510. ((uint128_t) in1[8]) * in2x2[3];
  511. out[3] += ((uint128_t) in1[4]) * in2x2[8] +
  512. ((uint128_t) in1[5]) * in2x2[7] +
  513. ((uint128_t) in1[6]) * in2x2[6] +
  514. ((uint128_t) in1[7]) * in2x2[5] +
  515. ((uint128_t) in1[8]) * in2x2[4];
  516. out[4] += ((uint128_t) in1[5]) * in2x2[8] +
  517. ((uint128_t) in1[6]) * in2x2[7] +
  518. ((uint128_t) in1[7]) * in2x2[6] +
  519. ((uint128_t) in1[8]) * in2x2[5];
  520. out[5] += ((uint128_t) in1[6]) * in2x2[8] +
  521. ((uint128_t) in1[7]) * in2x2[7] +
  522. ((uint128_t) in1[8]) * in2x2[6];
  523. out[6] += ((uint128_t) in1[7]) * in2x2[8] +
  524. ((uint128_t) in1[8]) * in2x2[7];
  525. out[7] += ((uint128_t) in1[8]) * in2x2[8];
  526. }
  527. static const limb bottom52bits = 0xfffffffffffff;
  528. /*-
  529. * felem_reduce converts a largefelem to an felem.
  530. * On entry:
  531. * in[i] < 2^128
  532. * On exit:
  533. * out[i] < 2^59 + 2^14
  534. */
  535. static void felem_reduce(felem out, const largefelem in)
  536. {
  537. u64 overflow1, overflow2;
  538. out[0] = ((limb) in[0]) & bottom58bits;
  539. out[1] = ((limb) in[1]) & bottom58bits;
  540. out[2] = ((limb) in[2]) & bottom58bits;
  541. out[3] = ((limb) in[3]) & bottom58bits;
  542. out[4] = ((limb) in[4]) & bottom58bits;
  543. out[5] = ((limb) in[5]) & bottom58bits;
  544. out[6] = ((limb) in[6]) & bottom58bits;
  545. out[7] = ((limb) in[7]) & bottom58bits;
  546. out[8] = ((limb) in[8]) & bottom58bits;
  547. /* out[i] < 2^58 */
  548. out[1] += ((limb) in[0]) >> 58;
  549. out[1] += (((limb) (in[0] >> 64)) & bottom52bits) << 6;
  550. /*-
  551. * out[1] < 2^58 + 2^6 + 2^58
  552. * = 2^59 + 2^6
  553. */
  554. out[2] += ((limb) (in[0] >> 64)) >> 52;
  555. out[2] += ((limb) in[1]) >> 58;
  556. out[2] += (((limb) (in[1] >> 64)) & bottom52bits) << 6;
  557. out[3] += ((limb) (in[1] >> 64)) >> 52;
  558. out[3] += ((limb) in[2]) >> 58;
  559. out[3] += (((limb) (in[2] >> 64)) & bottom52bits) << 6;
  560. out[4] += ((limb) (in[2] >> 64)) >> 52;
  561. out[4] += ((limb) in[3]) >> 58;
  562. out[4] += (((limb) (in[3] >> 64)) & bottom52bits) << 6;
  563. out[5] += ((limb) (in[3] >> 64)) >> 52;
  564. out[5] += ((limb) in[4]) >> 58;
  565. out[5] += (((limb) (in[4] >> 64)) & bottom52bits) << 6;
  566. out[6] += ((limb) (in[4] >> 64)) >> 52;
  567. out[6] += ((limb) in[5]) >> 58;
  568. out[6] += (((limb) (in[5] >> 64)) & bottom52bits) << 6;
  569. out[7] += ((limb) (in[5] >> 64)) >> 52;
  570. out[7] += ((limb) in[6]) >> 58;
  571. out[7] += (((limb) (in[6] >> 64)) & bottom52bits) << 6;
  572. out[8] += ((limb) (in[6] >> 64)) >> 52;
  573. out[8] += ((limb) in[7]) >> 58;
  574. out[8] += (((limb) (in[7] >> 64)) & bottom52bits) << 6;
  575. /*-
  576. * out[x > 1] < 2^58 + 2^6 + 2^58 + 2^12
  577. * < 2^59 + 2^13
  578. */
  579. overflow1 = ((limb) (in[7] >> 64)) >> 52;
  580. overflow1 += ((limb) in[8]) >> 58;
  581. overflow1 += (((limb) (in[8] >> 64)) & bottom52bits) << 6;
  582. overflow2 = ((limb) (in[8] >> 64)) >> 52;
  583. overflow1 <<= 1; /* overflow1 < 2^13 + 2^7 + 2^59 */
  584. overflow2 <<= 1; /* overflow2 < 2^13 */
  585. out[0] += overflow1; /* out[0] < 2^60 */
  586. out[1] += overflow2; /* out[1] < 2^59 + 2^6 + 2^13 */
  587. out[1] += out[0] >> 58;
  588. out[0] &= bottom58bits;
  589. /*-
  590. * out[0] < 2^58
  591. * out[1] < 2^59 + 2^6 + 2^13 + 2^2
  592. * < 2^59 + 2^14
  593. */
  594. }
  595. #if defined(ECP_NISTP521_ASM)
  596. void felem_square_wrapper(largefelem out, const felem in);
  597. void felem_mul_wrapper(largefelem out, const felem in1, const felem in2);
  598. static void (*felem_square_p)(largefelem out, const felem in) =
  599. felem_square_wrapper;
  600. static void (*felem_mul_p)(largefelem out, const felem in1, const felem in2) =
  601. felem_mul_wrapper;
  602. void p521_felem_square(largefelem out, const felem in);
  603. void p521_felem_mul(largefelem out, const felem in1, const felem in2);
  604. # if defined(_ARCH_PPC64)
  605. # include "ppc_arch.h"
  606. # endif
  607. void felem_select(void)
  608. {
  609. # if defined(_ARCH_PPC64)
  610. if ((OPENSSL_ppccap_P & PPC_MADD300) && (OPENSSL_ppccap_P & PPC_ALTIVEC)) {
  611. felem_square_p = p521_felem_square;
  612. felem_mul_p = p521_felem_mul;
  613. return;
  614. }
  615. # endif
  616. /* Default */
  617. felem_square_p = felem_square_ref;
  618. felem_mul_p = felem_mul_ref;
  619. }
  620. void felem_square_wrapper(largefelem out, const felem in)
  621. {
  622. felem_select();
  623. felem_square_p(out, in);
  624. }
  625. void felem_mul_wrapper(largefelem out, const felem in1, const felem in2)
  626. {
  627. felem_select();
  628. felem_mul_p(out, in1, in2);
  629. }
  630. # define felem_square felem_square_p
  631. # define felem_mul felem_mul_p
  632. #else
  633. # define felem_square felem_square_ref
  634. # define felem_mul felem_mul_ref
  635. #endif
  636. static void felem_square_reduce(felem out, const felem in)
  637. {
  638. largefelem tmp;
  639. felem_square(tmp, in);
  640. felem_reduce(out, tmp);
  641. }
  642. static void felem_mul_reduce(felem out, const felem in1, const felem in2)
  643. {
  644. largefelem tmp;
  645. felem_mul(tmp, in1, in2);
  646. felem_reduce(out, tmp);
  647. }
  648. /*-
  649. * felem_inv calculates |out| = |in|^{-1}
  650. *
  651. * Based on Fermat's Little Theorem:
  652. * a^p = a (mod p)
  653. * a^{p-1} = 1 (mod p)
  654. * a^{p-2} = a^{-1} (mod p)
  655. */
  656. static void felem_inv(felem out, const felem in)
  657. {
  658. felem ftmp, ftmp2, ftmp3, ftmp4;
  659. largefelem tmp;
  660. unsigned i;
  661. felem_square(tmp, in);
  662. felem_reduce(ftmp, tmp); /* 2^1 */
  663. felem_mul(tmp, in, ftmp);
  664. felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
  665. felem_assign(ftmp2, ftmp);
  666. felem_square(tmp, ftmp);
  667. felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
  668. felem_mul(tmp, in, ftmp);
  669. felem_reduce(ftmp, tmp); /* 2^3 - 2^0 */
  670. felem_square(tmp, ftmp);
  671. felem_reduce(ftmp, tmp); /* 2^4 - 2^1 */
  672. felem_square(tmp, ftmp2);
  673. felem_reduce(ftmp3, tmp); /* 2^3 - 2^1 */
  674. felem_square(tmp, ftmp3);
  675. felem_reduce(ftmp3, tmp); /* 2^4 - 2^2 */
  676. felem_mul(tmp, ftmp3, ftmp2);
  677. felem_reduce(ftmp3, tmp); /* 2^4 - 2^0 */
  678. felem_assign(ftmp2, ftmp3);
  679. felem_square(tmp, ftmp3);
  680. felem_reduce(ftmp3, tmp); /* 2^5 - 2^1 */
  681. felem_square(tmp, ftmp3);
  682. felem_reduce(ftmp3, tmp); /* 2^6 - 2^2 */
  683. felem_square(tmp, ftmp3);
  684. felem_reduce(ftmp3, tmp); /* 2^7 - 2^3 */
  685. felem_square(tmp, ftmp3);
  686. felem_reduce(ftmp3, tmp); /* 2^8 - 2^4 */
  687. felem_assign(ftmp4, ftmp3);
  688. felem_mul(tmp, ftmp3, ftmp);
  689. felem_reduce(ftmp4, tmp); /* 2^8 - 2^1 */
  690. felem_square(tmp, ftmp4);
  691. felem_reduce(ftmp4, tmp); /* 2^9 - 2^2 */
  692. felem_mul(tmp, ftmp3, ftmp2);
  693. felem_reduce(ftmp3, tmp); /* 2^8 - 2^0 */
  694. felem_assign(ftmp2, ftmp3);
  695. for (i = 0; i < 8; i++) {
  696. felem_square(tmp, ftmp3);
  697. felem_reduce(ftmp3, tmp); /* 2^16 - 2^8 */
  698. }
  699. felem_mul(tmp, ftmp3, ftmp2);
  700. felem_reduce(ftmp3, tmp); /* 2^16 - 2^0 */
  701. felem_assign(ftmp2, ftmp3);
  702. for (i = 0; i < 16; i++) {
  703. felem_square(tmp, ftmp3);
  704. felem_reduce(ftmp3, tmp); /* 2^32 - 2^16 */
  705. }
  706. felem_mul(tmp, ftmp3, ftmp2);
  707. felem_reduce(ftmp3, tmp); /* 2^32 - 2^0 */
  708. felem_assign(ftmp2, ftmp3);
  709. for (i = 0; i < 32; i++) {
  710. felem_square(tmp, ftmp3);
  711. felem_reduce(ftmp3, tmp); /* 2^64 - 2^32 */
  712. }
  713. felem_mul(tmp, ftmp3, ftmp2);
  714. felem_reduce(ftmp3, tmp); /* 2^64 - 2^0 */
  715. felem_assign(ftmp2, ftmp3);
  716. for (i = 0; i < 64; i++) {
  717. felem_square(tmp, ftmp3);
  718. felem_reduce(ftmp3, tmp); /* 2^128 - 2^64 */
  719. }
  720. felem_mul(tmp, ftmp3, ftmp2);
  721. felem_reduce(ftmp3, tmp); /* 2^128 - 2^0 */
  722. felem_assign(ftmp2, ftmp3);
  723. for (i = 0; i < 128; i++) {
  724. felem_square(tmp, ftmp3);
  725. felem_reduce(ftmp3, tmp); /* 2^256 - 2^128 */
  726. }
  727. felem_mul(tmp, ftmp3, ftmp2);
  728. felem_reduce(ftmp3, tmp); /* 2^256 - 2^0 */
  729. felem_assign(ftmp2, ftmp3);
  730. for (i = 0; i < 256; i++) {
  731. felem_square(tmp, ftmp3);
  732. felem_reduce(ftmp3, tmp); /* 2^512 - 2^256 */
  733. }
  734. felem_mul(tmp, ftmp3, ftmp2);
  735. felem_reduce(ftmp3, tmp); /* 2^512 - 2^0 */
  736. for (i = 0; i < 9; i++) {
  737. felem_square(tmp, ftmp3);
  738. felem_reduce(ftmp3, tmp); /* 2^521 - 2^9 */
  739. }
  740. felem_mul(tmp, ftmp3, ftmp4);
  741. felem_reduce(ftmp3, tmp); /* 2^512 - 2^2 */
  742. felem_mul(tmp, ftmp3, in);
  743. felem_reduce(out, tmp); /* 2^512 - 3 */
  744. }
  745. /* This is 2^521-1, expressed as an felem */
  746. static const felem kPrime = {
  747. 0x03ffffffffffffff, 0x03ffffffffffffff, 0x03ffffffffffffff,
  748. 0x03ffffffffffffff, 0x03ffffffffffffff, 0x03ffffffffffffff,
  749. 0x03ffffffffffffff, 0x03ffffffffffffff, 0x01ffffffffffffff
  750. };
  751. /*-
  752. * felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
  753. * otherwise.
  754. * On entry:
  755. * in[i] < 2^59 + 2^14
  756. */
  757. static limb felem_is_zero(const felem in)
  758. {
  759. felem ftmp;
  760. limb is_zero, is_p;
  761. felem_assign(ftmp, in);
  762. ftmp[0] += ftmp[8] >> 57;
  763. ftmp[8] &= bottom57bits;
  764. /* ftmp[8] < 2^57 */
  765. ftmp[1] += ftmp[0] >> 58;
  766. ftmp[0] &= bottom58bits;
  767. ftmp[2] += ftmp[1] >> 58;
  768. ftmp[1] &= bottom58bits;
  769. ftmp[3] += ftmp[2] >> 58;
  770. ftmp[2] &= bottom58bits;
  771. ftmp[4] += ftmp[3] >> 58;
  772. ftmp[3] &= bottom58bits;
  773. ftmp[5] += ftmp[4] >> 58;
  774. ftmp[4] &= bottom58bits;
  775. ftmp[6] += ftmp[5] >> 58;
  776. ftmp[5] &= bottom58bits;
  777. ftmp[7] += ftmp[6] >> 58;
  778. ftmp[6] &= bottom58bits;
  779. ftmp[8] += ftmp[7] >> 58;
  780. ftmp[7] &= bottom58bits;
  781. /* ftmp[8] < 2^57 + 4 */
  782. /*
  783. * The ninth limb of 2*(2^521-1) is 0x03ffffffffffffff, which is greater
  784. * than our bound for ftmp[8]. Therefore we only have to check if the
  785. * zero is zero or 2^521-1.
  786. */
  787. is_zero = 0;
  788. is_zero |= ftmp[0];
  789. is_zero |= ftmp[1];
  790. is_zero |= ftmp[2];
  791. is_zero |= ftmp[3];
  792. is_zero |= ftmp[4];
  793. is_zero |= ftmp[5];
  794. is_zero |= ftmp[6];
  795. is_zero |= ftmp[7];
  796. is_zero |= ftmp[8];
  797. is_zero--;
  798. /*
  799. * We know that ftmp[i] < 2^63, therefore the only way that the top bit
  800. * can be set is if is_zero was 0 before the decrement.
  801. */
  802. is_zero = 0 - (is_zero >> 63);
  803. is_p = ftmp[0] ^ kPrime[0];
  804. is_p |= ftmp[1] ^ kPrime[1];
  805. is_p |= ftmp[2] ^ kPrime[2];
  806. is_p |= ftmp[3] ^ kPrime[3];
  807. is_p |= ftmp[4] ^ kPrime[4];
  808. is_p |= ftmp[5] ^ kPrime[5];
  809. is_p |= ftmp[6] ^ kPrime[6];
  810. is_p |= ftmp[7] ^ kPrime[7];
  811. is_p |= ftmp[8] ^ kPrime[8];
  812. is_p--;
  813. is_p = 0 - (is_p >> 63);
  814. is_zero |= is_p;
  815. return is_zero;
  816. }
  817. static int felem_is_zero_int(const void *in)
  818. {
  819. return (int)(felem_is_zero(in) & ((limb) 1));
  820. }
  821. /*-
  822. * felem_contract converts |in| to its unique, minimal representation.
  823. * On entry:
  824. * in[i] < 2^59 + 2^14
  825. */
  826. static void felem_contract(felem out, const felem in)
  827. {
  828. limb is_p, is_greater, sign;
  829. static const limb two58 = ((limb) 1) << 58;
  830. felem_assign(out, in);
  831. out[0] += out[8] >> 57;
  832. out[8] &= bottom57bits;
  833. /* out[8] < 2^57 */
  834. out[1] += out[0] >> 58;
  835. out[0] &= bottom58bits;
  836. out[2] += out[1] >> 58;
  837. out[1] &= bottom58bits;
  838. out[3] += out[2] >> 58;
  839. out[2] &= bottom58bits;
  840. out[4] += out[3] >> 58;
  841. out[3] &= bottom58bits;
  842. out[5] += out[4] >> 58;
  843. out[4] &= bottom58bits;
  844. out[6] += out[5] >> 58;
  845. out[5] &= bottom58bits;
  846. out[7] += out[6] >> 58;
  847. out[6] &= bottom58bits;
  848. out[8] += out[7] >> 58;
  849. out[7] &= bottom58bits;
  850. /* out[8] < 2^57 + 4 */
  851. /*
  852. * If the value is greater than 2^521-1 then we have to subtract 2^521-1
  853. * out. See the comments in felem_is_zero regarding why we don't test for
  854. * other multiples of the prime.
  855. */
  856. /*
  857. * First, if |out| is equal to 2^521-1, we subtract it out to get zero.
  858. */
  859. is_p = out[0] ^ kPrime[0];
  860. is_p |= out[1] ^ kPrime[1];
  861. is_p |= out[2] ^ kPrime[2];
  862. is_p |= out[3] ^ kPrime[3];
  863. is_p |= out[4] ^ kPrime[4];
  864. is_p |= out[5] ^ kPrime[5];
  865. is_p |= out[6] ^ kPrime[6];
  866. is_p |= out[7] ^ kPrime[7];
  867. is_p |= out[8] ^ kPrime[8];
  868. is_p--;
  869. is_p &= is_p << 32;
  870. is_p &= is_p << 16;
  871. is_p &= is_p << 8;
  872. is_p &= is_p << 4;
  873. is_p &= is_p << 2;
  874. is_p &= is_p << 1;
  875. is_p = 0 - (is_p >> 63);
  876. is_p = ~is_p;
  877. /* is_p is 0 iff |out| == 2^521-1 and all ones otherwise */
  878. out[0] &= is_p;
  879. out[1] &= is_p;
  880. out[2] &= is_p;
  881. out[3] &= is_p;
  882. out[4] &= is_p;
  883. out[5] &= is_p;
  884. out[6] &= is_p;
  885. out[7] &= is_p;
  886. out[8] &= is_p;
  887. /*
  888. * In order to test that |out| >= 2^521-1 we need only test if out[8] >>
  889. * 57 is greater than zero as (2^521-1) + x >= 2^522
  890. */
  891. is_greater = out[8] >> 57;
  892. is_greater |= is_greater << 32;
  893. is_greater |= is_greater << 16;
  894. is_greater |= is_greater << 8;
  895. is_greater |= is_greater << 4;
  896. is_greater |= is_greater << 2;
  897. is_greater |= is_greater << 1;
  898. is_greater = 0 - (is_greater >> 63);
  899. out[0] -= kPrime[0] & is_greater;
  900. out[1] -= kPrime[1] & is_greater;
  901. out[2] -= kPrime[2] & is_greater;
  902. out[3] -= kPrime[3] & is_greater;
  903. out[4] -= kPrime[4] & is_greater;
  904. out[5] -= kPrime[5] & is_greater;
  905. out[6] -= kPrime[6] & is_greater;
  906. out[7] -= kPrime[7] & is_greater;
  907. out[8] -= kPrime[8] & is_greater;
  908. /* Eliminate negative coefficients */
  909. sign = -(out[0] >> 63);
  910. out[0] += (two58 & sign);
  911. out[1] -= (1 & sign);
  912. sign = -(out[1] >> 63);
  913. out[1] += (two58 & sign);
  914. out[2] -= (1 & sign);
  915. sign = -(out[2] >> 63);
  916. out[2] += (two58 & sign);
  917. out[3] -= (1 & sign);
  918. sign = -(out[3] >> 63);
  919. out[3] += (two58 & sign);
  920. out[4] -= (1 & sign);
  921. sign = -(out[4] >> 63);
  922. out[4] += (two58 & sign);
  923. out[5] -= (1 & sign);
  924. sign = -(out[0] >> 63);
  925. out[5] += (two58 & sign);
  926. out[6] -= (1 & sign);
  927. sign = -(out[6] >> 63);
  928. out[6] += (two58 & sign);
  929. out[7] -= (1 & sign);
  930. sign = -(out[7] >> 63);
  931. out[7] += (two58 & sign);
  932. out[8] -= (1 & sign);
  933. sign = -(out[5] >> 63);
  934. out[5] += (two58 & sign);
  935. out[6] -= (1 & sign);
  936. sign = -(out[6] >> 63);
  937. out[6] += (two58 & sign);
  938. out[7] -= (1 & sign);
  939. sign = -(out[7] >> 63);
  940. out[7] += (two58 & sign);
  941. out[8] -= (1 & sign);
  942. }
  943. /*-
  944. * Group operations
  945. * ----------------
  946. *
  947. * Building on top of the field operations we have the operations on the
  948. * elliptic curve group itself. Points on the curve are represented in Jacobian
  949. * coordinates */
  950. /*-
  951. * point_double calculates 2*(x_in, y_in, z_in)
  952. *
  953. * The method is taken from:
  954. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
  955. *
  956. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
  957. * while x_out == y_in is not (maybe this works, but it's not tested). */
  958. static void
  959. point_double(felem x_out, felem y_out, felem z_out,
  960. const felem x_in, const felem y_in, const felem z_in)
  961. {
  962. largefelem tmp, tmp2;
  963. felem delta, gamma, beta, alpha, ftmp, ftmp2;
  964. felem_assign(ftmp, x_in);
  965. felem_assign(ftmp2, x_in);
  966. /* delta = z^2 */
  967. felem_square(tmp, z_in);
  968. felem_reduce(delta, tmp); /* delta[i] < 2^59 + 2^14 */
  969. /* gamma = y^2 */
  970. felem_square(tmp, y_in);
  971. felem_reduce(gamma, tmp); /* gamma[i] < 2^59 + 2^14 */
  972. /* beta = x*gamma */
  973. felem_mul(tmp, x_in, gamma);
  974. felem_reduce(beta, tmp); /* beta[i] < 2^59 + 2^14 */
  975. /* alpha = 3*(x-delta)*(x+delta) */
  976. felem_diff64(ftmp, delta);
  977. /* ftmp[i] < 2^61 */
  978. felem_sum64(ftmp2, delta);
  979. /* ftmp2[i] < 2^60 + 2^15 */
  980. felem_scalar64(ftmp2, 3);
  981. /* ftmp2[i] < 3*2^60 + 3*2^15 */
  982. felem_mul(tmp, ftmp, ftmp2);
  983. /*-
  984. * tmp[i] < 17(3*2^121 + 3*2^76)
  985. * = 61*2^121 + 61*2^76
  986. * < 64*2^121 + 64*2^76
  987. * = 2^127 + 2^82
  988. * < 2^128
  989. */
  990. felem_reduce(alpha, tmp);
  991. /* x' = alpha^2 - 8*beta */
  992. felem_square(tmp, alpha);
  993. /*
  994. * tmp[i] < 17*2^120 < 2^125
  995. */
  996. felem_assign(ftmp, beta);
  997. felem_scalar64(ftmp, 8);
  998. /* ftmp[i] < 2^62 + 2^17 */
  999. felem_diff_128_64(tmp, ftmp);
  1000. /* tmp[i] < 2^125 + 2^63 + 2^62 + 2^17 */
  1001. felem_reduce(x_out, tmp);
  1002. /* z' = (y + z)^2 - gamma - delta */
  1003. felem_sum64(delta, gamma);
  1004. /* delta[i] < 2^60 + 2^15 */
  1005. felem_assign(ftmp, y_in);
  1006. felem_sum64(ftmp, z_in);
  1007. /* ftmp[i] < 2^60 + 2^15 */
  1008. felem_square(tmp, ftmp);
  1009. /*
  1010. * tmp[i] < 17(2^122) < 2^127
  1011. */
  1012. felem_diff_128_64(tmp, delta);
  1013. /* tmp[i] < 2^127 + 2^63 */
  1014. felem_reduce(z_out, tmp);
  1015. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  1016. felem_scalar64(beta, 4);
  1017. /* beta[i] < 2^61 + 2^16 */
  1018. felem_diff64(beta, x_out);
  1019. /* beta[i] < 2^61 + 2^60 + 2^16 */
  1020. felem_mul(tmp, alpha, beta);
  1021. /*-
  1022. * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
  1023. * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
  1024. * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
  1025. * < 2^128
  1026. */
  1027. felem_square(tmp2, gamma);
  1028. /*-
  1029. * tmp2[i] < 17*(2^59 + 2^14)^2
  1030. * = 17*(2^118 + 2^74 + 2^28)
  1031. */
  1032. felem_scalar128(tmp2, 8);
  1033. /*-
  1034. * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
  1035. * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
  1036. * < 2^126
  1037. */
  1038. felem_diff128(tmp, tmp2);
  1039. /*-
  1040. * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
  1041. * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
  1042. * 2^74 + 2^69 + 2^34 + 2^30
  1043. * < 2^128
  1044. */
  1045. felem_reduce(y_out, tmp);
  1046. }
  1047. /* copy_conditional copies in to out iff mask is all ones. */
  1048. static void copy_conditional(felem out, const felem in, limb mask)
  1049. {
  1050. unsigned i;
  1051. for (i = 0; i < NLIMBS; ++i) {
  1052. const limb tmp = mask & (in[i] ^ out[i]);
  1053. out[i] ^= tmp;
  1054. }
  1055. }
  1056. /*-
  1057. * point_add calculates (x1, y1, z1) + (x2, y2, z2)
  1058. *
  1059. * The method is taken from
  1060. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
  1061. * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
  1062. *
  1063. * This function includes a branch for checking whether the two input points
  1064. * are equal (while not equal to the point at infinity). See comment below
  1065. * on constant-time.
  1066. */
  1067. static void point_add(felem x3, felem y3, felem z3,
  1068. const felem x1, const felem y1, const felem z1,
  1069. const int mixed, const felem x2, const felem y2,
  1070. const felem z2)
  1071. {
  1072. felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
  1073. largefelem tmp, tmp2;
  1074. limb x_equal, y_equal, z1_is_zero, z2_is_zero;
  1075. limb points_equal;
  1076. z1_is_zero = felem_is_zero(z1);
  1077. z2_is_zero = felem_is_zero(z2);
  1078. /* ftmp = z1z1 = z1**2 */
  1079. felem_square(tmp, z1);
  1080. felem_reduce(ftmp, tmp);
  1081. if (!mixed) {
  1082. /* ftmp2 = z2z2 = z2**2 */
  1083. felem_square(tmp, z2);
  1084. felem_reduce(ftmp2, tmp);
  1085. /* u1 = ftmp3 = x1*z2z2 */
  1086. felem_mul(tmp, x1, ftmp2);
  1087. felem_reduce(ftmp3, tmp);
  1088. /* ftmp5 = z1 + z2 */
  1089. felem_assign(ftmp5, z1);
  1090. felem_sum64(ftmp5, z2);
  1091. /* ftmp5[i] < 2^61 */
  1092. /* ftmp5 = (z1 + z2)**2 - z1z1 - z2z2 = 2*z1z2 */
  1093. felem_square(tmp, ftmp5);
  1094. /* tmp[i] < 17*2^122 */
  1095. felem_diff_128_64(tmp, ftmp);
  1096. /* tmp[i] < 17*2^122 + 2^63 */
  1097. felem_diff_128_64(tmp, ftmp2);
  1098. /* tmp[i] < 17*2^122 + 2^64 */
  1099. felem_reduce(ftmp5, tmp);
  1100. /* ftmp2 = z2 * z2z2 */
  1101. felem_mul(tmp, ftmp2, z2);
  1102. felem_reduce(ftmp2, tmp);
  1103. /* s1 = ftmp6 = y1 * z2**3 */
  1104. felem_mul(tmp, y1, ftmp2);
  1105. felem_reduce(ftmp6, tmp);
  1106. } else {
  1107. /*
  1108. * We'll assume z2 = 1 (special case z2 = 0 is handled later)
  1109. */
  1110. /* u1 = ftmp3 = x1*z2z2 */
  1111. felem_assign(ftmp3, x1);
  1112. /* ftmp5 = 2*z1z2 */
  1113. felem_scalar(ftmp5, z1, 2);
  1114. /* s1 = ftmp6 = y1 * z2**3 */
  1115. felem_assign(ftmp6, y1);
  1116. }
  1117. /* u2 = x2*z1z1 */
  1118. felem_mul(tmp, x2, ftmp);
  1119. /* tmp[i] < 17*2^120 */
  1120. /* h = ftmp4 = u2 - u1 */
  1121. felem_diff_128_64(tmp, ftmp3);
  1122. /* tmp[i] < 17*2^120 + 2^63 */
  1123. felem_reduce(ftmp4, tmp);
  1124. x_equal = felem_is_zero(ftmp4);
  1125. /* z_out = ftmp5 * h */
  1126. felem_mul(tmp, ftmp5, ftmp4);
  1127. felem_reduce(z_out, tmp);
  1128. /* ftmp = z1 * z1z1 */
  1129. felem_mul(tmp, ftmp, z1);
  1130. felem_reduce(ftmp, tmp);
  1131. /* s2 = tmp = y2 * z1**3 */
  1132. felem_mul(tmp, y2, ftmp);
  1133. /* tmp[i] < 17*2^120 */
  1134. /* r = ftmp5 = (s2 - s1)*2 */
  1135. felem_diff_128_64(tmp, ftmp6);
  1136. /* tmp[i] < 17*2^120 + 2^63 */
  1137. felem_reduce(ftmp5, tmp);
  1138. y_equal = felem_is_zero(ftmp5);
  1139. felem_scalar64(ftmp5, 2);
  1140. /* ftmp5[i] < 2^61 */
  1141. /*
  1142. * The formulae are incorrect if the points are equal, in affine coordinates
  1143. * (X_1, Y_1) == (X_2, Y_2), so we check for this and do doubling if this
  1144. * happens.
  1145. *
  1146. * We use bitwise operations to avoid potential side-channels introduced by
  1147. * the short-circuiting behaviour of boolean operators.
  1148. *
  1149. * The special case of either point being the point at infinity (z1 and/or
  1150. * z2 are zero), is handled separately later on in this function, so we
  1151. * avoid jumping to point_double here in those special cases.
  1152. *
  1153. * Notice the comment below on the implications of this branching for timing
  1154. * leaks and why it is considered practically irrelevant.
  1155. */
  1156. points_equal = (x_equal & y_equal & (~z1_is_zero) & (~z2_is_zero));
  1157. if (points_equal) {
  1158. /*
  1159. * This is obviously not constant-time but it will almost-never happen
  1160. * for ECDH / ECDSA. The case where it can happen is during scalar-mult
  1161. * where the intermediate value gets very close to the group order.
  1162. * Since |ossl_ec_GFp_nistp_recode_scalar_bits| produces signed digits
  1163. * for the scalar, it's possible for the intermediate value to be a small
  1164. * negative multiple of the base point, and for the final signed digit
  1165. * to be the same value. We believe that this only occurs for the scalar
  1166. * 1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
  1167. * ffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb
  1168. * 71e913863f7, in that case the penultimate intermediate is -9G and
  1169. * the final digit is also -9G. Since this only happens for a single
  1170. * scalar, the timing leak is irrelevant. (Any attacker who wanted to
  1171. * check whether a secret scalar was that exact value, can already do
  1172. * so.)
  1173. */
  1174. point_double(x3, y3, z3, x1, y1, z1);
  1175. return;
  1176. }
  1177. /* I = ftmp = (2h)**2 */
  1178. felem_assign(ftmp, ftmp4);
  1179. felem_scalar64(ftmp, 2);
  1180. /* ftmp[i] < 2^61 */
  1181. felem_square(tmp, ftmp);
  1182. /* tmp[i] < 17*2^122 */
  1183. felem_reduce(ftmp, tmp);
  1184. /* J = ftmp2 = h * I */
  1185. felem_mul(tmp, ftmp4, ftmp);
  1186. felem_reduce(ftmp2, tmp);
  1187. /* V = ftmp4 = U1 * I */
  1188. felem_mul(tmp, ftmp3, ftmp);
  1189. felem_reduce(ftmp4, tmp);
  1190. /* x_out = r**2 - J - 2V */
  1191. felem_square(tmp, ftmp5);
  1192. /* tmp[i] < 17*2^122 */
  1193. felem_diff_128_64(tmp, ftmp2);
  1194. /* tmp[i] < 17*2^122 + 2^63 */
  1195. felem_assign(ftmp3, ftmp4);
  1196. felem_scalar64(ftmp4, 2);
  1197. /* ftmp4[i] < 2^61 */
  1198. felem_diff_128_64(tmp, ftmp4);
  1199. /* tmp[i] < 17*2^122 + 2^64 */
  1200. felem_reduce(x_out, tmp);
  1201. /* y_out = r(V-x_out) - 2 * s1 * J */
  1202. felem_diff64(ftmp3, x_out);
  1203. /*
  1204. * ftmp3[i] < 2^60 + 2^60 = 2^61
  1205. */
  1206. felem_mul(tmp, ftmp5, ftmp3);
  1207. /* tmp[i] < 17*2^122 */
  1208. felem_mul(tmp2, ftmp6, ftmp2);
  1209. /* tmp2[i] < 17*2^120 */
  1210. felem_scalar128(tmp2, 2);
  1211. /* tmp2[i] < 17*2^121 */
  1212. felem_diff128(tmp, tmp2);
  1213. /*-
  1214. * tmp[i] < 2^127 - 2^69 + 17*2^122
  1215. * = 2^126 - 2^122 - 2^6 - 2^2 - 1
  1216. * < 2^127
  1217. */
  1218. felem_reduce(y_out, tmp);
  1219. copy_conditional(x_out, x2, z1_is_zero);
  1220. copy_conditional(x_out, x1, z2_is_zero);
  1221. copy_conditional(y_out, y2, z1_is_zero);
  1222. copy_conditional(y_out, y1, z2_is_zero);
  1223. copy_conditional(z_out, z2, z1_is_zero);
  1224. copy_conditional(z_out, z1, z2_is_zero);
  1225. felem_assign(x3, x_out);
  1226. felem_assign(y3, y_out);
  1227. felem_assign(z3, z_out);
  1228. }
  1229. /*-
  1230. * Base point pre computation
  1231. * --------------------------
  1232. *
  1233. * Two different sorts of precomputed tables are used in the following code.
  1234. * Each contain various points on the curve, where each point is three field
  1235. * elements (x, y, z).
  1236. *
  1237. * For the base point table, z is usually 1 (0 for the point at infinity).
  1238. * This table has 16 elements:
  1239. * index | bits | point
  1240. * ------+---------+------------------------------
  1241. * 0 | 0 0 0 0 | 0G
  1242. * 1 | 0 0 0 1 | 1G
  1243. * 2 | 0 0 1 0 | 2^130G
  1244. * 3 | 0 0 1 1 | (2^130 + 1)G
  1245. * 4 | 0 1 0 0 | 2^260G
  1246. * 5 | 0 1 0 1 | (2^260 + 1)G
  1247. * 6 | 0 1 1 0 | (2^260 + 2^130)G
  1248. * 7 | 0 1 1 1 | (2^260 + 2^130 + 1)G
  1249. * 8 | 1 0 0 0 | 2^390G
  1250. * 9 | 1 0 0 1 | (2^390 + 1)G
  1251. * 10 | 1 0 1 0 | (2^390 + 2^130)G
  1252. * 11 | 1 0 1 1 | (2^390 + 2^130 + 1)G
  1253. * 12 | 1 1 0 0 | (2^390 + 2^260)G
  1254. * 13 | 1 1 0 1 | (2^390 + 2^260 + 1)G
  1255. * 14 | 1 1 1 0 | (2^390 + 2^260 + 2^130)G
  1256. * 15 | 1 1 1 1 | (2^390 + 2^260 + 2^130 + 1)G
  1257. *
  1258. * The reason for this is so that we can clock bits into four different
  1259. * locations when doing simple scalar multiplies against the base point.
  1260. *
  1261. * Tables for other points have table[i] = iG for i in 0 .. 16. */
  1262. /* gmul is the table of precomputed base points */
  1263. static const felem gmul[16][3] = {
  1264. {{0, 0, 0, 0, 0, 0, 0, 0, 0},
  1265. {0, 0, 0, 0, 0, 0, 0, 0, 0},
  1266. {0, 0, 0, 0, 0, 0, 0, 0, 0}},
  1267. {{0x017e7e31c2e5bd66, 0x022cf0615a90a6fe, 0x00127a2ffa8de334,
  1268. 0x01dfbf9d64a3f877, 0x006b4d3dbaa14b5e, 0x014fed487e0a2bd8,
  1269. 0x015b4429c6481390, 0x03a73678fb2d988e, 0x00c6858e06b70404},
  1270. {0x00be94769fd16650, 0x031c21a89cb09022, 0x039013fad0761353,
  1271. 0x02657bd099031542, 0x03273e662c97ee72, 0x01e6d11a05ebef45,
  1272. 0x03d1bd998f544495, 0x03001172297ed0b1, 0x011839296a789a3b},
  1273. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1274. {{0x0373faacbc875bae, 0x00f325023721c671, 0x00f666fd3dbde5ad,
  1275. 0x01a6932363f88ea7, 0x01fc6d9e13f9c47b, 0x03bcbffc2bbf734e,
  1276. 0x013ee3c3647f3a92, 0x029409fefe75d07d, 0x00ef9199963d85e5},
  1277. {0x011173743ad5b178, 0x02499c7c21bf7d46, 0x035beaeabb8b1a58,
  1278. 0x00f989c4752ea0a3, 0x0101e1de48a9c1a3, 0x01a20076be28ba6c,
  1279. 0x02f8052e5eb2de95, 0x01bfe8f82dea117c, 0x0160074d3c36ddb7},
  1280. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1281. {{0x012f3fc373393b3b, 0x03d3d6172f1419fa, 0x02adc943c0b86873,
  1282. 0x00d475584177952b, 0x012a4d1673750ee2, 0x00512517a0f13b0c,
  1283. 0x02b184671a7b1734, 0x0315b84236f1a50a, 0x00a4afc472edbdb9},
  1284. {0x00152a7077f385c4, 0x03044007d8d1c2ee, 0x0065829d61d52b52,
  1285. 0x00494ff6b6631d0d, 0x00a11d94d5f06bcf, 0x02d2f89474d9282e,
  1286. 0x0241c5727c06eeb9, 0x0386928710fbdb9d, 0x01f883f727b0dfbe},
  1287. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1288. {{0x019b0c3c9185544d, 0x006243a37c9d97db, 0x02ee3cbe030a2ad2,
  1289. 0x00cfdd946bb51e0d, 0x0271c00932606b91, 0x03f817d1ec68c561,
  1290. 0x03f37009806a369c, 0x03c1f30baf184fd5, 0x01091022d6d2f065},
  1291. {0x0292c583514c45ed, 0x0316fca51f9a286c, 0x00300af507c1489a,
  1292. 0x0295f69008298cf1, 0x02c0ed8274943d7b, 0x016509b9b47a431e,
  1293. 0x02bc9de9634868ce, 0x005b34929bffcb09, 0x000c1a0121681524},
  1294. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1295. {{0x0286abc0292fb9f2, 0x02665eee9805b3f7, 0x01ed7455f17f26d6,
  1296. 0x0346355b83175d13, 0x006284944cd0a097, 0x0191895bcdec5e51,
  1297. 0x02e288370afda7d9, 0x03b22312bfefa67a, 0x01d104d3fc0613fe},
  1298. {0x0092421a12f7e47f, 0x0077a83fa373c501, 0x03bd25c5f696bd0d,
  1299. 0x035c41e4d5459761, 0x01ca0d1742b24f53, 0x00aaab27863a509c,
  1300. 0x018b6de47df73917, 0x025c0b771705cd01, 0x01fd51d566d760a7},
  1301. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1302. {{0x01dd92ff6b0d1dbd, 0x039c5e2e8f8afa69, 0x0261ed13242c3b27,
  1303. 0x0382c6e67026e6a0, 0x01d60b10be2089f9, 0x03c15f3dce86723f,
  1304. 0x03c764a32d2a062d, 0x017307eac0fad056, 0x018207c0b96c5256},
  1305. {0x0196a16d60e13154, 0x03e6ce74c0267030, 0x00ddbf2b4e52a5aa,
  1306. 0x012738241bbf31c8, 0x00ebe8dc04685a28, 0x024c2ad6d380d4a2,
  1307. 0x035ee062a6e62d0e, 0x0029ed74af7d3a0f, 0x00eef32aec142ebd},
  1308. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1309. {{0x00c31ec398993b39, 0x03a9f45bcda68253, 0x00ac733c24c70890,
  1310. 0x00872b111401ff01, 0x01d178c23195eafb, 0x03bca2c816b87f74,
  1311. 0x0261a9af46fbad7a, 0x0324b2a8dd3d28f9, 0x00918121d8f24e23},
  1312. {0x032bc8c1ca983cd7, 0x00d869dfb08fc8c6, 0x01693cb61fce1516,
  1313. 0x012a5ea68f4e88a8, 0x010869cab88d7ae3, 0x009081ad277ceee1,
  1314. 0x033a77166d064cdc, 0x03955235a1fb3a95, 0x01251a4a9b25b65e},
  1315. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1316. {{0x00148a3a1b27f40b, 0x0123186df1b31fdc, 0x00026e7beaad34ce,
  1317. 0x01db446ac1d3dbba, 0x0299c1a33437eaec, 0x024540610183cbb7,
  1318. 0x0173bb0e9ce92e46, 0x02b937e43921214b, 0x01ab0436a9bf01b5},
  1319. {0x0383381640d46948, 0x008dacbf0e7f330f, 0x03602122bcc3f318,
  1320. 0x01ee596b200620d6, 0x03bd0585fda430b3, 0x014aed77fd123a83,
  1321. 0x005ace749e52f742, 0x0390fe041da2b842, 0x0189a8ceb3299242},
  1322. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1323. {{0x012a19d6b3282473, 0x00c0915918b423ce, 0x023a954eb94405ae,
  1324. 0x00529f692be26158, 0x0289fa1b6fa4b2aa, 0x0198ae4ceea346ef,
  1325. 0x0047d8cdfbdedd49, 0x00cc8c8953f0f6b8, 0x001424abbff49203},
  1326. {0x0256732a1115a03a, 0x0351bc38665c6733, 0x03f7b950fb4a6447,
  1327. 0x000afffa94c22155, 0x025763d0a4dab540, 0x000511e92d4fc283,
  1328. 0x030a7e9eda0ee96c, 0x004c3cd93a28bf0a, 0x017edb3a8719217f},
  1329. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1330. {{0x011de5675a88e673, 0x031d7d0f5e567fbe, 0x0016b2062c970ae5,
  1331. 0x03f4a2be49d90aa7, 0x03cef0bd13822866, 0x03f0923dcf774a6c,
  1332. 0x0284bebc4f322f72, 0x016ab2645302bb2c, 0x01793f95dace0e2a},
  1333. {0x010646e13527a28f, 0x01ca1babd59dc5e7, 0x01afedfd9a5595df,
  1334. 0x01f15785212ea6b1, 0x0324e5d64f6ae3f4, 0x02d680f526d00645,
  1335. 0x0127920fadf627a7, 0x03b383f75df4f684, 0x0089e0057e783b0a},
  1336. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1337. {{0x00f334b9eb3c26c6, 0x0298fdaa98568dce, 0x01c2d24843a82292,
  1338. 0x020bcb24fa1b0711, 0x02cbdb3d2b1875e6, 0x0014907598f89422,
  1339. 0x03abe3aa43b26664, 0x02cbf47f720bc168, 0x0133b5e73014b79b},
  1340. {0x034aab5dab05779d, 0x00cdc5d71fee9abb, 0x0399f16bd4bd9d30,
  1341. 0x03582fa592d82647, 0x02be1cdfb775b0e9, 0x0034f7cea32e94cb,
  1342. 0x0335a7f08f56f286, 0x03b707e9565d1c8b, 0x0015c946ea5b614f},
  1343. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1344. {{0x024676f6cff72255, 0x00d14625cac96378, 0x00532b6008bc3767,
  1345. 0x01fc16721b985322, 0x023355ea1b091668, 0x029de7afdc0317c3,
  1346. 0x02fc8a7ca2da037c, 0x02de1217d74a6f30, 0x013f7173175b73bf},
  1347. {0x0344913f441490b5, 0x0200f9e272b61eca, 0x0258a246b1dd55d2,
  1348. 0x03753db9ea496f36, 0x025e02937a09c5ef, 0x030cbd3d14012692,
  1349. 0x01793a67e70dc72a, 0x03ec1d37048a662e, 0x006550f700c32a8d},
  1350. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1351. {{0x00d3f48a347eba27, 0x008e636649b61bd8, 0x00d3b93716778fb3,
  1352. 0x004d1915757bd209, 0x019d5311a3da44e0, 0x016d1afcbbe6aade,
  1353. 0x0241bf5f73265616, 0x0384672e5d50d39b, 0x005009fee522b684},
  1354. {0x029b4fab064435fe, 0x018868ee095bbb07, 0x01ea3d6936cc92b8,
  1355. 0x000608b00f78a2f3, 0x02db911073d1c20f, 0x018205938470100a,
  1356. 0x01f1e4964cbe6ff2, 0x021a19a29eed4663, 0x01414485f42afa81},
  1357. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1358. {{0x01612b3a17f63e34, 0x03813992885428e6, 0x022b3c215b5a9608,
  1359. 0x029b4057e19f2fcb, 0x0384059a587af7e6, 0x02d6400ace6fe610,
  1360. 0x029354d896e8e331, 0x00c047ee6dfba65e, 0x0037720542e9d49d},
  1361. {0x02ce9eed7c5e9278, 0x0374ed703e79643b, 0x01316c54c4072006,
  1362. 0x005aaa09054b2ee8, 0x002824000c840d57, 0x03d4eba24771ed86,
  1363. 0x0189c50aabc3bdae, 0x0338c01541e15510, 0x00466d56e38eed42},
  1364. {1, 0, 0, 0, 0, 0, 0, 0, 0}},
  1365. {{0x007efd8330ad8bd6, 0x02465ed48047710b, 0x0034c6606b215e0c,
  1366. 0x016ae30c53cbf839, 0x01fa17bd37161216, 0x018ead4e61ce8ab9,
  1367. 0x005482ed5f5dee46, 0x037543755bba1d7f, 0x005e5ac7e70a9d0f},
  1368. {0x0117e1bb2fdcb2a2, 0x03deea36249f40c4, 0x028d09b4a6246cb7,
  1369. 0x03524b8855bcf756, 0x023d7d109d5ceb58, 0x0178e43e3223ef9c,
  1370. 0x0154536a0c6e966a, 0x037964d1286ee9fe, 0x0199bcd90e125055},
  1371. {1, 0, 0, 0, 0, 0, 0, 0, 0}}
  1372. };
  1373. /*
  1374. * select_point selects the |idx|th point from a precomputation table and
  1375. * copies it to out.
  1376. */
  1377. /* pre_comp below is of the size provided in |size| */
  1378. static void select_point(const limb idx, unsigned int size,
  1379. const felem pre_comp[][3], felem out[3])
  1380. {
  1381. unsigned i, j;
  1382. limb *outlimbs = &out[0][0];
  1383. memset(out, 0, sizeof(*out) * 3);
  1384. for (i = 0; i < size; i++) {
  1385. const limb *inlimbs = &pre_comp[i][0][0];
  1386. limb mask = i ^ idx;
  1387. mask |= mask >> 4;
  1388. mask |= mask >> 2;
  1389. mask |= mask >> 1;
  1390. mask &= 1;
  1391. mask--;
  1392. for (j = 0; j < NLIMBS * 3; j++)
  1393. outlimbs[j] |= inlimbs[j] & mask;
  1394. }
  1395. }
  1396. /* get_bit returns the |i|th bit in |in| */
  1397. static char get_bit(const felem_bytearray in, int i)
  1398. {
  1399. if (i < 0)
  1400. return 0;
  1401. return (in[i >> 3] >> (i & 7)) & 1;
  1402. }
  1403. /*
  1404. * Interleaved point multiplication using precomputed point multiples: The
  1405. * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
  1406. * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
  1407. * generator, using certain (large) precomputed multiples in g_pre_comp.
  1408. * Output point (X, Y, Z) is stored in x_out, y_out, z_out
  1409. */
  1410. static void batch_mul(felem x_out, felem y_out, felem z_out,
  1411. const felem_bytearray scalars[],
  1412. const unsigned num_points, const u8 *g_scalar,
  1413. const int mixed, const felem pre_comp[][17][3],
  1414. const felem g_pre_comp[16][3])
  1415. {
  1416. int i, skip;
  1417. unsigned num, gen_mul = (g_scalar != NULL);
  1418. felem nq[3], tmp[4];
  1419. limb bits;
  1420. u8 sign, digit;
  1421. /* set nq to the point at infinity */
  1422. memset(nq, 0, sizeof(nq));
  1423. /*
  1424. * Loop over all scalars msb-to-lsb, interleaving additions of multiples
  1425. * of the generator (last quarter of rounds) and additions of other
  1426. * points multiples (every 5th round).
  1427. */
  1428. skip = 1; /* save two point operations in the first
  1429. * round */
  1430. for (i = (num_points ? 520 : 130); i >= 0; --i) {
  1431. /* double */
  1432. if (!skip)
  1433. point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
  1434. /* add multiples of the generator */
  1435. if (gen_mul && (i <= 130)) {
  1436. bits = get_bit(g_scalar, i + 390) << 3;
  1437. if (i < 130) {
  1438. bits |= get_bit(g_scalar, i + 260) << 2;
  1439. bits |= get_bit(g_scalar, i + 130) << 1;
  1440. bits |= get_bit(g_scalar, i);
  1441. }
  1442. /* select the point to add, in constant time */
  1443. select_point(bits, 16, g_pre_comp, tmp);
  1444. if (!skip) {
  1445. /* The 1 argument below is for "mixed" */
  1446. point_add(nq[0], nq[1], nq[2],
  1447. nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
  1448. } else {
  1449. memcpy(nq, tmp, 3 * sizeof(felem));
  1450. skip = 0;
  1451. }
  1452. }
  1453. /* do other additions every 5 doublings */
  1454. if (num_points && (i % 5 == 0)) {
  1455. /* loop over all scalars */
  1456. for (num = 0; num < num_points; ++num) {
  1457. bits = get_bit(scalars[num], i + 4) << 5;
  1458. bits |= get_bit(scalars[num], i + 3) << 4;
  1459. bits |= get_bit(scalars[num], i + 2) << 3;
  1460. bits |= get_bit(scalars[num], i + 1) << 2;
  1461. bits |= get_bit(scalars[num], i) << 1;
  1462. bits |= get_bit(scalars[num], i - 1);
  1463. ossl_ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
  1464. /*
  1465. * select the point to add or subtract, in constant time
  1466. */
  1467. select_point(digit, 17, pre_comp[num], tmp);
  1468. felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
  1469. * point */
  1470. copy_conditional(tmp[1], tmp[3], (-(limb) sign));
  1471. if (!skip) {
  1472. point_add(nq[0], nq[1], nq[2],
  1473. nq[0], nq[1], nq[2],
  1474. mixed, tmp[0], tmp[1], tmp[2]);
  1475. } else {
  1476. memcpy(nq, tmp, 3 * sizeof(felem));
  1477. skip = 0;
  1478. }
  1479. }
  1480. }
  1481. }
  1482. felem_assign(x_out, nq[0]);
  1483. felem_assign(y_out, nq[1]);
  1484. felem_assign(z_out, nq[2]);
  1485. }
  1486. /* Precomputation for the group generator. */
  1487. struct nistp521_pre_comp_st {
  1488. felem g_pre_comp[16][3];
  1489. CRYPTO_REF_COUNT references;
  1490. CRYPTO_RWLOCK *lock;
  1491. };
  1492. const EC_METHOD *EC_GFp_nistp521_method(void)
  1493. {
  1494. static const EC_METHOD ret = {
  1495. EC_FLAGS_DEFAULT_OCT,
  1496. NID_X9_62_prime_field,
  1497. ossl_ec_GFp_nistp521_group_init,
  1498. ossl_ec_GFp_simple_group_finish,
  1499. ossl_ec_GFp_simple_group_clear_finish,
  1500. ossl_ec_GFp_nist_group_copy,
  1501. ossl_ec_GFp_nistp521_group_set_curve,
  1502. ossl_ec_GFp_simple_group_get_curve,
  1503. ossl_ec_GFp_simple_group_get_degree,
  1504. ossl_ec_group_simple_order_bits,
  1505. ossl_ec_GFp_simple_group_check_discriminant,
  1506. ossl_ec_GFp_simple_point_init,
  1507. ossl_ec_GFp_simple_point_finish,
  1508. ossl_ec_GFp_simple_point_clear_finish,
  1509. ossl_ec_GFp_simple_point_copy,
  1510. ossl_ec_GFp_simple_point_set_to_infinity,
  1511. ossl_ec_GFp_simple_point_set_affine_coordinates,
  1512. ossl_ec_GFp_nistp521_point_get_affine_coordinates,
  1513. 0 /* point_set_compressed_coordinates */ ,
  1514. 0 /* point2oct */ ,
  1515. 0 /* oct2point */ ,
  1516. ossl_ec_GFp_simple_add,
  1517. ossl_ec_GFp_simple_dbl,
  1518. ossl_ec_GFp_simple_invert,
  1519. ossl_ec_GFp_simple_is_at_infinity,
  1520. ossl_ec_GFp_simple_is_on_curve,
  1521. ossl_ec_GFp_simple_cmp,
  1522. ossl_ec_GFp_simple_make_affine,
  1523. ossl_ec_GFp_simple_points_make_affine,
  1524. ossl_ec_GFp_nistp521_points_mul,
  1525. ossl_ec_GFp_nistp521_precompute_mult,
  1526. ossl_ec_GFp_nistp521_have_precompute_mult,
  1527. ossl_ec_GFp_nist_field_mul,
  1528. ossl_ec_GFp_nist_field_sqr,
  1529. 0 /* field_div */ ,
  1530. ossl_ec_GFp_simple_field_inv,
  1531. 0 /* field_encode */ ,
  1532. 0 /* field_decode */ ,
  1533. 0, /* field_set_to_one */
  1534. ossl_ec_key_simple_priv2oct,
  1535. ossl_ec_key_simple_oct2priv,
  1536. 0, /* set private */
  1537. ossl_ec_key_simple_generate_key,
  1538. ossl_ec_key_simple_check_key,
  1539. ossl_ec_key_simple_generate_public_key,
  1540. 0, /* keycopy */
  1541. 0, /* keyfinish */
  1542. ossl_ecdh_simple_compute_key,
  1543. ossl_ecdsa_simple_sign_setup,
  1544. ossl_ecdsa_simple_sign_sig,
  1545. ossl_ecdsa_simple_verify_sig,
  1546. 0, /* field_inverse_mod_ord */
  1547. 0, /* blind_coordinates */
  1548. 0, /* ladder_pre */
  1549. 0, /* ladder_step */
  1550. 0 /* ladder_post */
  1551. };
  1552. return &ret;
  1553. }
  1554. /******************************************************************************/
  1555. /*
  1556. * FUNCTIONS TO MANAGE PRECOMPUTATION
  1557. */
  1558. static NISTP521_PRE_COMP *nistp521_pre_comp_new(void)
  1559. {
  1560. NISTP521_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
  1561. if (ret == NULL) {
  1562. ERR_raise(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
  1563. return ret;
  1564. }
  1565. ret->references = 1;
  1566. ret->lock = CRYPTO_THREAD_lock_new();
  1567. if (ret->lock == NULL) {
  1568. ERR_raise(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
  1569. OPENSSL_free(ret);
  1570. return NULL;
  1571. }
  1572. return ret;
  1573. }
  1574. NISTP521_PRE_COMP *EC_nistp521_pre_comp_dup(NISTP521_PRE_COMP *p)
  1575. {
  1576. int i;
  1577. if (p != NULL)
  1578. CRYPTO_UP_REF(&p->references, &i, p->lock);
  1579. return p;
  1580. }
  1581. void EC_nistp521_pre_comp_free(NISTP521_PRE_COMP *p)
  1582. {
  1583. int i;
  1584. if (p == NULL)
  1585. return;
  1586. CRYPTO_DOWN_REF(&p->references, &i, p->lock);
  1587. REF_PRINT_COUNT("EC_nistp521", p);
  1588. if (i > 0)
  1589. return;
  1590. REF_ASSERT_ISNT(i < 0);
  1591. CRYPTO_THREAD_lock_free(p->lock);
  1592. OPENSSL_free(p);
  1593. }
  1594. /******************************************************************************/
  1595. /*
  1596. * OPENSSL EC_METHOD FUNCTIONS
  1597. */
  1598. int ossl_ec_GFp_nistp521_group_init(EC_GROUP *group)
  1599. {
  1600. int ret;
  1601. ret = ossl_ec_GFp_simple_group_init(group);
  1602. group->a_is_minus3 = 1;
  1603. return ret;
  1604. }
  1605. int ossl_ec_GFp_nistp521_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  1606. const BIGNUM *a, const BIGNUM *b,
  1607. BN_CTX *ctx)
  1608. {
  1609. int ret = 0;
  1610. BIGNUM *curve_p, *curve_a, *curve_b;
  1611. #ifndef FIPS_MODULE
  1612. BN_CTX *new_ctx = NULL;
  1613. if (ctx == NULL)
  1614. ctx = new_ctx = BN_CTX_new();
  1615. #endif
  1616. if (ctx == NULL)
  1617. return 0;
  1618. BN_CTX_start(ctx);
  1619. curve_p = BN_CTX_get(ctx);
  1620. curve_a = BN_CTX_get(ctx);
  1621. curve_b = BN_CTX_get(ctx);
  1622. if (curve_b == NULL)
  1623. goto err;
  1624. BN_bin2bn(nistp521_curve_params[0], sizeof(felem_bytearray), curve_p);
  1625. BN_bin2bn(nistp521_curve_params[1], sizeof(felem_bytearray), curve_a);
  1626. BN_bin2bn(nistp521_curve_params[2], sizeof(felem_bytearray), curve_b);
  1627. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
  1628. ERR_raise(ERR_LIB_EC, EC_R_WRONG_CURVE_PARAMETERS);
  1629. goto err;
  1630. }
  1631. group->field_mod_func = BN_nist_mod_521;
  1632. ret = ossl_ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  1633. err:
  1634. BN_CTX_end(ctx);
  1635. #ifndef FIPS_MODULE
  1636. BN_CTX_free(new_ctx);
  1637. #endif
  1638. return ret;
  1639. }
  1640. /*
  1641. * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
  1642. * (X/Z^2, Y/Z^3)
  1643. */
  1644. int ossl_ec_GFp_nistp521_point_get_affine_coordinates(const EC_GROUP *group,
  1645. const EC_POINT *point,
  1646. BIGNUM *x, BIGNUM *y,
  1647. BN_CTX *ctx)
  1648. {
  1649. felem z1, z2, x_in, y_in, x_out, y_out;
  1650. largefelem tmp;
  1651. if (EC_POINT_is_at_infinity(group, point)) {
  1652. ERR_raise(ERR_LIB_EC, EC_R_POINT_AT_INFINITY);
  1653. return 0;
  1654. }
  1655. if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
  1656. (!BN_to_felem(z1, point->Z)))
  1657. return 0;
  1658. felem_inv(z2, z1);
  1659. felem_square(tmp, z2);
  1660. felem_reduce(z1, tmp);
  1661. felem_mul(tmp, x_in, z1);
  1662. felem_reduce(x_in, tmp);
  1663. felem_contract(x_out, x_in);
  1664. if (x != NULL) {
  1665. if (!felem_to_BN(x, x_out)) {
  1666. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1667. return 0;
  1668. }
  1669. }
  1670. felem_mul(tmp, z1, z2);
  1671. felem_reduce(z1, tmp);
  1672. felem_mul(tmp, y_in, z1);
  1673. felem_reduce(y_in, tmp);
  1674. felem_contract(y_out, y_in);
  1675. if (y != NULL) {
  1676. if (!felem_to_BN(y, y_out)) {
  1677. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1678. return 0;
  1679. }
  1680. }
  1681. return 1;
  1682. }
  1683. /* points below is of size |num|, and tmp_felems is of size |num+1/ */
  1684. static void make_points_affine(size_t num, felem points[][3],
  1685. felem tmp_felems[])
  1686. {
  1687. /*
  1688. * Runs in constant time, unless an input is the point at infinity (which
  1689. * normally shouldn't happen).
  1690. */
  1691. ossl_ec_GFp_nistp_points_make_affine_internal(num,
  1692. points,
  1693. sizeof(felem),
  1694. tmp_felems,
  1695. (void (*)(void *))felem_one,
  1696. felem_is_zero_int,
  1697. (void (*)(void *, const void *))
  1698. felem_assign,
  1699. (void (*)(void *, const void *))
  1700. felem_square_reduce, (void (*)
  1701. (void *,
  1702. const void
  1703. *,
  1704. const void
  1705. *))
  1706. felem_mul_reduce,
  1707. (void (*)(void *, const void *))
  1708. felem_inv,
  1709. (void (*)(void *, const void *))
  1710. felem_contract);
  1711. }
  1712. /*
  1713. * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
  1714. * values Result is stored in r (r can equal one of the inputs).
  1715. */
  1716. int ossl_ec_GFp_nistp521_points_mul(const EC_GROUP *group, EC_POINT *r,
  1717. const BIGNUM *scalar, size_t num,
  1718. const EC_POINT *points[],
  1719. const BIGNUM *scalars[], BN_CTX *ctx)
  1720. {
  1721. int ret = 0;
  1722. int j;
  1723. int mixed = 0;
  1724. BIGNUM *x, *y, *z, *tmp_scalar;
  1725. felem_bytearray g_secret;
  1726. felem_bytearray *secrets = NULL;
  1727. felem (*pre_comp)[17][3] = NULL;
  1728. felem *tmp_felems = NULL;
  1729. unsigned i;
  1730. int num_bytes;
  1731. int have_pre_comp = 0;
  1732. size_t num_points = num;
  1733. felem x_in, y_in, z_in, x_out, y_out, z_out;
  1734. NISTP521_PRE_COMP *pre = NULL;
  1735. felem(*g_pre_comp)[3] = NULL;
  1736. EC_POINT *generator = NULL;
  1737. const EC_POINT *p = NULL;
  1738. const BIGNUM *p_scalar = NULL;
  1739. BN_CTX_start(ctx);
  1740. x = BN_CTX_get(ctx);
  1741. y = BN_CTX_get(ctx);
  1742. z = BN_CTX_get(ctx);
  1743. tmp_scalar = BN_CTX_get(ctx);
  1744. if (tmp_scalar == NULL)
  1745. goto err;
  1746. if (scalar != NULL) {
  1747. pre = group->pre_comp.nistp521;
  1748. if (pre)
  1749. /* we have precomputation, try to use it */
  1750. g_pre_comp = &pre->g_pre_comp[0];
  1751. else
  1752. /* try to use the standard precomputation */
  1753. g_pre_comp = (felem(*)[3]) gmul;
  1754. generator = EC_POINT_new(group);
  1755. if (generator == NULL)
  1756. goto err;
  1757. /* get the generator from precomputation */
  1758. if (!felem_to_BN(x, g_pre_comp[1][0]) ||
  1759. !felem_to_BN(y, g_pre_comp[1][1]) ||
  1760. !felem_to_BN(z, g_pre_comp[1][2])) {
  1761. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1762. goto err;
  1763. }
  1764. if (!ossl_ec_GFp_simple_set_Jprojective_coordinates_GFp(group,
  1765. generator,
  1766. x, y, z, ctx))
  1767. goto err;
  1768. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1769. /* precomputation matches generator */
  1770. have_pre_comp = 1;
  1771. else
  1772. /*
  1773. * we don't have valid precomputation: treat the generator as a
  1774. * random point
  1775. */
  1776. num_points++;
  1777. }
  1778. if (num_points > 0) {
  1779. if (num_points >= 2) {
  1780. /*
  1781. * unless we precompute multiples for just one point, converting
  1782. * those into affine form is time well spent
  1783. */
  1784. mixed = 1;
  1785. }
  1786. secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
  1787. pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
  1788. if (mixed)
  1789. tmp_felems =
  1790. OPENSSL_malloc(sizeof(*tmp_felems) * (num_points * 17 + 1));
  1791. if ((secrets == NULL) || (pre_comp == NULL)
  1792. || (mixed && (tmp_felems == NULL))) {
  1793. ERR_raise(ERR_LIB_EC, ERR_R_MALLOC_FAILURE);
  1794. goto err;
  1795. }
  1796. /*
  1797. * we treat NULL scalars as 0, and NULL points as points at infinity,
  1798. * i.e., they contribute nothing to the linear combination
  1799. */
  1800. for (i = 0; i < num_points; ++i) {
  1801. if (i == num) {
  1802. /*
  1803. * we didn't have a valid precomputation, so we pick the
  1804. * generator
  1805. */
  1806. p = EC_GROUP_get0_generator(group);
  1807. p_scalar = scalar;
  1808. } else {
  1809. /* the i^th point */
  1810. p = points[i];
  1811. p_scalar = scalars[i];
  1812. }
  1813. if ((p_scalar != NULL) && (p != NULL)) {
  1814. /* reduce scalar to 0 <= scalar < 2^521 */
  1815. if ((BN_num_bits(p_scalar) > 521)
  1816. || (BN_is_negative(p_scalar))) {
  1817. /*
  1818. * this is an unusual input, and we don't guarantee
  1819. * constant-timeness
  1820. */
  1821. if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
  1822. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1823. goto err;
  1824. }
  1825. num_bytes = BN_bn2lebinpad(tmp_scalar,
  1826. secrets[i], sizeof(secrets[i]));
  1827. } else {
  1828. num_bytes = BN_bn2lebinpad(p_scalar,
  1829. secrets[i], sizeof(secrets[i]));
  1830. }
  1831. if (num_bytes < 0) {
  1832. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1833. goto err;
  1834. }
  1835. /* precompute multiples */
  1836. if ((!BN_to_felem(x_out, p->X)) ||
  1837. (!BN_to_felem(y_out, p->Y)) ||
  1838. (!BN_to_felem(z_out, p->Z)))
  1839. goto err;
  1840. memcpy(pre_comp[i][1][0], x_out, sizeof(felem));
  1841. memcpy(pre_comp[i][1][1], y_out, sizeof(felem));
  1842. memcpy(pre_comp[i][1][2], z_out, sizeof(felem));
  1843. for (j = 2; j <= 16; ++j) {
  1844. if (j & 1) {
  1845. point_add(pre_comp[i][j][0], pre_comp[i][j][1],
  1846. pre_comp[i][j][2], pre_comp[i][1][0],
  1847. pre_comp[i][1][1], pre_comp[i][1][2], 0,
  1848. pre_comp[i][j - 1][0],
  1849. pre_comp[i][j - 1][1],
  1850. pre_comp[i][j - 1][2]);
  1851. } else {
  1852. point_double(pre_comp[i][j][0], pre_comp[i][j][1],
  1853. pre_comp[i][j][2], pre_comp[i][j / 2][0],
  1854. pre_comp[i][j / 2][1],
  1855. pre_comp[i][j / 2][2]);
  1856. }
  1857. }
  1858. }
  1859. }
  1860. if (mixed)
  1861. make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
  1862. }
  1863. /* the scalar for the generator */
  1864. if ((scalar != NULL) && (have_pre_comp)) {
  1865. memset(g_secret, 0, sizeof(g_secret));
  1866. /* reduce scalar to 0 <= scalar < 2^521 */
  1867. if ((BN_num_bits(scalar) > 521) || (BN_is_negative(scalar))) {
  1868. /*
  1869. * this is an unusual input, and we don't guarantee
  1870. * constant-timeness
  1871. */
  1872. if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
  1873. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1874. goto err;
  1875. }
  1876. num_bytes = BN_bn2lebinpad(tmp_scalar, g_secret, sizeof(g_secret));
  1877. } else {
  1878. num_bytes = BN_bn2lebinpad(scalar, g_secret, sizeof(g_secret));
  1879. }
  1880. /* do the multiplication with generator precomputation */
  1881. batch_mul(x_out, y_out, z_out,
  1882. (const felem_bytearray(*))secrets, num_points,
  1883. g_secret,
  1884. mixed, (const felem(*)[17][3])pre_comp,
  1885. (const felem(*)[3])g_pre_comp);
  1886. } else {
  1887. /* do the multiplication without generator precomputation */
  1888. batch_mul(x_out, y_out, z_out,
  1889. (const felem_bytearray(*))secrets, num_points,
  1890. NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
  1891. }
  1892. /* reduce the output to its unique minimal representation */
  1893. felem_contract(x_in, x_out);
  1894. felem_contract(y_in, y_out);
  1895. felem_contract(z_in, z_out);
  1896. if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
  1897. (!felem_to_BN(z, z_in))) {
  1898. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1899. goto err;
  1900. }
  1901. ret = ossl_ec_GFp_simple_set_Jprojective_coordinates_GFp(group, r, x, y, z,
  1902. ctx);
  1903. err:
  1904. BN_CTX_end(ctx);
  1905. EC_POINT_free(generator);
  1906. OPENSSL_free(secrets);
  1907. OPENSSL_free(pre_comp);
  1908. OPENSSL_free(tmp_felems);
  1909. return ret;
  1910. }
  1911. int ossl_ec_GFp_nistp521_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  1912. {
  1913. int ret = 0;
  1914. NISTP521_PRE_COMP *pre = NULL;
  1915. int i, j;
  1916. BIGNUM *x, *y;
  1917. EC_POINT *generator = NULL;
  1918. felem tmp_felems[16];
  1919. #ifndef FIPS_MODULE
  1920. BN_CTX *new_ctx = NULL;
  1921. #endif
  1922. /* throw away old precomputation */
  1923. EC_pre_comp_free(group);
  1924. #ifndef FIPS_MODULE
  1925. if (ctx == NULL)
  1926. ctx = new_ctx = BN_CTX_new();
  1927. #endif
  1928. if (ctx == NULL)
  1929. return 0;
  1930. BN_CTX_start(ctx);
  1931. x = BN_CTX_get(ctx);
  1932. y = BN_CTX_get(ctx);
  1933. if (y == NULL)
  1934. goto err;
  1935. /* get the generator */
  1936. if (group->generator == NULL)
  1937. goto err;
  1938. generator = EC_POINT_new(group);
  1939. if (generator == NULL)
  1940. goto err;
  1941. BN_bin2bn(nistp521_curve_params[3], sizeof(felem_bytearray), x);
  1942. BN_bin2bn(nistp521_curve_params[4], sizeof(felem_bytearray), y);
  1943. if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
  1944. goto err;
  1945. if ((pre = nistp521_pre_comp_new()) == NULL)
  1946. goto err;
  1947. /*
  1948. * if the generator is the standard one, use built-in precomputation
  1949. */
  1950. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
  1951. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  1952. goto done;
  1953. }
  1954. if ((!BN_to_felem(pre->g_pre_comp[1][0], group->generator->X)) ||
  1955. (!BN_to_felem(pre->g_pre_comp[1][1], group->generator->Y)) ||
  1956. (!BN_to_felem(pre->g_pre_comp[1][2], group->generator->Z)))
  1957. goto err;
  1958. /* compute 2^130*G, 2^260*G, 2^390*G */
  1959. for (i = 1; i <= 4; i <<= 1) {
  1960. point_double(pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1],
  1961. pre->g_pre_comp[2 * i][2], pre->g_pre_comp[i][0],
  1962. pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
  1963. for (j = 0; j < 129; ++j) {
  1964. point_double(pre->g_pre_comp[2 * i][0],
  1965. pre->g_pre_comp[2 * i][1],
  1966. pre->g_pre_comp[2 * i][2],
  1967. pre->g_pre_comp[2 * i][0],
  1968. pre->g_pre_comp[2 * i][1],
  1969. pre->g_pre_comp[2 * i][2]);
  1970. }
  1971. }
  1972. /* g_pre_comp[0] is the point at infinity */
  1973. memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
  1974. /* the remaining multiples */
  1975. /* 2^130*G + 2^260*G */
  1976. point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1],
  1977. pre->g_pre_comp[6][2], pre->g_pre_comp[4][0],
  1978. pre->g_pre_comp[4][1], pre->g_pre_comp[4][2],
  1979. 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1980. pre->g_pre_comp[2][2]);
  1981. /* 2^130*G + 2^390*G */
  1982. point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1],
  1983. pre->g_pre_comp[10][2], pre->g_pre_comp[8][0],
  1984. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1985. 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1986. pre->g_pre_comp[2][2]);
  1987. /* 2^260*G + 2^390*G */
  1988. point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1],
  1989. pre->g_pre_comp[12][2], pre->g_pre_comp[8][0],
  1990. pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
  1991. 0, pre->g_pre_comp[4][0], pre->g_pre_comp[4][1],
  1992. pre->g_pre_comp[4][2]);
  1993. /* 2^130*G + 2^260*G + 2^390*G */
  1994. point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1],
  1995. pre->g_pre_comp[14][2], pre->g_pre_comp[12][0],
  1996. pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
  1997. 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
  1998. pre->g_pre_comp[2][2]);
  1999. for (i = 1; i < 8; ++i) {
  2000. /* odd multiples: add G */
  2001. point_add(pre->g_pre_comp[2 * i + 1][0],
  2002. pre->g_pre_comp[2 * i + 1][1],
  2003. pre->g_pre_comp[2 * i + 1][2], pre->g_pre_comp[2 * i][0],
  2004. pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2], 0,
  2005. pre->g_pre_comp[1][0], pre->g_pre_comp[1][1],
  2006. pre->g_pre_comp[1][2]);
  2007. }
  2008. make_points_affine(15, &(pre->g_pre_comp[1]), tmp_felems);
  2009. done:
  2010. SETPRECOMP(group, nistp521, pre);
  2011. ret = 1;
  2012. pre = NULL;
  2013. err:
  2014. BN_CTX_end(ctx);
  2015. EC_POINT_free(generator);
  2016. #ifndef FIPS_MODULE
  2017. BN_CTX_free(new_ctx);
  2018. #endif
  2019. EC_nistp521_pre_comp_free(pre);
  2020. return ret;
  2021. }
  2022. int ossl_ec_GFp_nistp521_have_precompute_mult(const EC_GROUP *group)
  2023. {
  2024. return HAVEPRECOMP(group, nistp521);
  2025. }