2
0

ecp_nistp384.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997
  1. /*
  2. * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* Copyright 2023 IBM Corp.
  10. *
  11. * Licensed under the Apache License, Version 2.0 (the "License");
  12. *
  13. * you may not use this file except in compliance with the License.
  14. * You may obtain a copy of the License at
  15. *
  16. * http://www.apache.org/licenses/LICENSE-2.0
  17. *
  18. * Unless required by applicable law or agreed to in writing, software
  19. * distributed under the License is distributed on an "AS IS" BASIS,
  20. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21. * See the License for the specific language governing permissions and
  22. * limitations under the License.
  23. */
  24. /*
  25. * Designed for 56-bit limbs by Rohan McLure <rohan.mclure@linux.ibm.com>.
  26. * The layout is based on that of ecp_nistp{224,521}.c, allowing even for asm
  27. * acceleration of felem_{square,mul} as supported in these files.
  28. */
  29. #include <openssl/e_os2.h>
  30. #include <string.h>
  31. #include <openssl/err.h>
  32. #include "ec_local.h"
  33. #include "internal/numbers.h"
  34. #ifndef INT128_MAX
  35. # error "Your compiler doesn't appear to support 128-bit integer types"
  36. #endif
  37. typedef uint8_t u8;
  38. typedef uint64_t u64;
  39. /*
  40. * The underlying field. P384 operates over GF(2^384-2^128-2^96+2^32-1). We
  41. * can serialize an element of this field into 48 bytes. We call this an
  42. * felem_bytearray.
  43. */
  44. typedef u8 felem_bytearray[48];
  45. /*
  46. * These are the parameters of P384, taken from FIPS 186-3, section D.1.2.4.
  47. * These values are big-endian.
  48. */
  49. static const felem_bytearray nistp384_curve_params[5] = {
  50. {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* p */
  51. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  52. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
  53. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF},
  54. {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* a = -3 */
  55. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  56. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
  57. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC},
  58. {0xB3, 0x31, 0x2F, 0xA7, 0xE2, 0x3E, 0xE7, 0xE4, 0x98, 0x8E, 0x05, 0x6B, /* b */
  59. 0xE3, 0xF8, 0x2D, 0x19, 0x18, 0x1D, 0x9C, 0x6E, 0xFE, 0x81, 0x41, 0x12,
  60. 0x03, 0x14, 0x08, 0x8F, 0x50, 0x13, 0x87, 0x5A, 0xC6, 0x56, 0x39, 0x8D,
  61. 0x8A, 0x2E, 0xD1, 0x9D, 0x2A, 0x85, 0xC8, 0xED, 0xD3, 0xEC, 0x2A, 0xEF},
  62. {0xAA, 0x87, 0xCA, 0x22, 0xBE, 0x8B, 0x05, 0x37, 0x8E, 0xB1, 0xC7, 0x1E, /* x */
  63. 0xF3, 0x20, 0xAD, 0x74, 0x6E, 0x1D, 0x3B, 0x62, 0x8B, 0xA7, 0x9B, 0x98,
  64. 0x59, 0xF7, 0x41, 0xE0, 0x82, 0x54, 0x2A, 0x38, 0x55, 0x02, 0xF2, 0x5D,
  65. 0xBF, 0x55, 0x29, 0x6C, 0x3A, 0x54, 0x5E, 0x38, 0x72, 0x76, 0x0A, 0xB7},
  66. {0x36, 0x17, 0xDE, 0x4A, 0x96, 0x26, 0x2C, 0x6F, 0x5D, 0x9E, 0x98, 0xBF, /* y */
  67. 0x92, 0x92, 0xDC, 0x29, 0xF8, 0xF4, 0x1D, 0xBD, 0x28, 0x9A, 0x14, 0x7C,
  68. 0xE9, 0xDA, 0x31, 0x13, 0xB5, 0xF0, 0xB8, 0xC0, 0x0A, 0x60, 0xB1, 0xCE,
  69. 0x1D, 0x7E, 0x81, 0x9D, 0x7A, 0x43, 0x1D, 0x7C, 0x90, 0xEA, 0x0E, 0x5F},
  70. };
  71. /*-
  72. * The representation of field elements.
  73. * ------------------------------------
  74. *
  75. * We represent field elements with seven values. These values are either 64 or
  76. * 128 bits and the field element represented is:
  77. * v[0]*2^0 + v[1]*2^56 + v[2]*2^112 + ... + v[6]*2^336 (mod p)
  78. * Each of the seven values is called a 'limb'. Since the limbs are spaced only
  79. * 56 bits apart, but are greater than 56 bits in length, the most significant
  80. * bits of each limb overlap with the least significant bits of the next
  81. *
  82. * This representation is considered to be 'redundant' in the sense that
  83. * intermediate values can each contain more than a 56-bit value in each limb.
  84. * Reduction causes all but the final limb to be reduced to contain a value less
  85. * than 2^56, with the final value represented allowed to be larger than 2^384,
  86. * inasmuch as we can be sure that arithmetic overflow remains impossible. The
  87. * reduced value must of course be congruent to the unreduced value.
  88. *
  89. * A field element with 64-bit limbs is an 'felem'. One with 128-bit limbs is a
  90. * 'widefelem', featuring enough bits to store the result of a multiplication
  91. * and even some further arithmetic without need for immediate reduction.
  92. */
  93. #define NLIMBS 7
  94. typedef uint64_t limb;
  95. typedef uint128_t widelimb;
  96. typedef limb limb_aX __attribute((__aligned__(1)));
  97. typedef limb felem[NLIMBS];
  98. typedef widelimb widefelem[2*NLIMBS-1];
  99. static const limb bottom56bits = 0xffffffffffffff;
  100. /* Helper functions (de)serialising reduced field elements in little endian */
  101. static void bin48_to_felem(felem out, const u8 in[48])
  102. {
  103. memset(out, 0, 56);
  104. out[0] = (*((limb *) & in[0])) & bottom56bits;
  105. out[1] = (*((limb_aX *) & in[7])) & bottom56bits;
  106. out[2] = (*((limb_aX *) & in[14])) & bottom56bits;
  107. out[3] = (*((limb_aX *) & in[21])) & bottom56bits;
  108. out[4] = (*((limb_aX *) & in[28])) & bottom56bits;
  109. out[5] = (*((limb_aX *) & in[35])) & bottom56bits;
  110. memmove(&out[6], &in[42], 6);
  111. }
  112. static void felem_to_bin48(u8 out[48], const felem in)
  113. {
  114. memset(out, 0, 48);
  115. (*((limb *) & out[0])) |= (in[0] & bottom56bits);
  116. (*((limb_aX *) & out[7])) |= (in[1] & bottom56bits);
  117. (*((limb_aX *) & out[14])) |= (in[2] & bottom56bits);
  118. (*((limb_aX *) & out[21])) |= (in[3] & bottom56bits);
  119. (*((limb_aX *) & out[28])) |= (in[4] & bottom56bits);
  120. (*((limb_aX *) & out[35])) |= (in[5] & bottom56bits);
  121. memmove(&out[42], &in[6], 6);
  122. }
  123. /* BN_to_felem converts an OpenSSL BIGNUM into an felem */
  124. static int BN_to_felem(felem out, const BIGNUM *bn)
  125. {
  126. felem_bytearray b_out;
  127. int num_bytes;
  128. if (BN_is_negative(bn)) {
  129. ERR_raise(ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE);
  130. return 0;
  131. }
  132. num_bytes = BN_bn2lebinpad(bn, b_out, sizeof(b_out));
  133. if (num_bytes < 0) {
  134. ERR_raise(ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE);
  135. return 0;
  136. }
  137. bin48_to_felem(out, b_out);
  138. return 1;
  139. }
  140. /* felem_to_BN converts an felem into an OpenSSL BIGNUM */
  141. static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
  142. {
  143. felem_bytearray b_out;
  144. felem_to_bin48(b_out, in);
  145. return BN_lebin2bn(b_out, sizeof(b_out), out);
  146. }
  147. /*-
  148. * Field operations
  149. * ----------------
  150. */
  151. static void felem_one(felem out)
  152. {
  153. out[0] = 1;
  154. memset(&out[1], 0, sizeof(limb) * (NLIMBS-1));
  155. }
  156. static void felem_assign(felem out, const felem in)
  157. {
  158. memcpy(out, in, sizeof(felem));
  159. }
  160. /* felem_sum64 sets out = out + in. */
  161. static void felem_sum64(felem out, const felem in)
  162. {
  163. unsigned int i;
  164. for (i = 0; i < NLIMBS; i++)
  165. out[i] += in[i];
  166. }
  167. /* felem_scalar sets out = in * scalar */
  168. static void felem_scalar(felem out, const felem in, limb scalar)
  169. {
  170. unsigned int i;
  171. for (i = 0; i < NLIMBS; i++)
  172. out[i] = in[i] * scalar;
  173. }
  174. /* felem_scalar64 sets out = out * scalar */
  175. static void felem_scalar64(felem out, limb scalar)
  176. {
  177. unsigned int i;
  178. for (i = 0; i < NLIMBS; i++)
  179. out[i] *= scalar;
  180. }
  181. /* felem_scalar128 sets out = out * scalar */
  182. static void felem_scalar128(widefelem out, limb scalar)
  183. {
  184. unsigned int i;
  185. for (i = 0; i < 2*NLIMBS-1; i++)
  186. out[i] *= scalar;
  187. }
  188. /*-
  189. * felem_neg sets |out| to |-in|
  190. * On entry:
  191. * in[i] < 2^60 - 2^29
  192. * On exit:
  193. * out[i] < 2^60
  194. */
  195. static void felem_neg(felem out, const felem in)
  196. {
  197. /*
  198. * In order to prevent underflow, we add a multiple of p before subtracting.
  199. * Use telescopic sums to represent 2^12 * p redundantly with each limb
  200. * of the form 2^60 + ...
  201. */
  202. static const limb two60m52m4 = (((limb) 1) << 60)
  203. - (((limb) 1) << 52)
  204. - (((limb) 1) << 4);
  205. static const limb two60p44m12 = (((limb) 1) << 60)
  206. + (((limb) 1) << 44)
  207. - (((limb) 1) << 12);
  208. static const limb two60m28m4 = (((limb) 1) << 60)
  209. - (((limb) 1) << 28)
  210. - (((limb) 1) << 4);
  211. static const limb two60m4 = (((limb) 1) << 60)
  212. - (((limb) 1) << 4);
  213. out[0] = two60p44m12 - in[0];
  214. out[1] = two60m52m4 - in[1];
  215. out[2] = two60m28m4 - in[2];
  216. out[3] = two60m4 - in[3];
  217. out[4] = two60m4 - in[4];
  218. out[5] = two60m4 - in[5];
  219. out[6] = two60m4 - in[6];
  220. }
  221. /*-
  222. * felem_diff64 subtracts |in| from |out|
  223. * On entry:
  224. * in[i] < 2^60 - 2^52 - 2^4
  225. * On exit:
  226. * out[i] < out_orig[i] + 2^60 + 2^44
  227. */
  228. static void felem_diff64(felem out, const felem in)
  229. {
  230. /*
  231. * In order to prevent underflow, we add a multiple of p before subtracting.
  232. * Use telescopic sums to represent 2^12 * p redundantly with each limb
  233. * of the form 2^60 + ...
  234. */
  235. static const limb two60m52m4 = (((limb) 1) << 60)
  236. - (((limb) 1) << 52)
  237. - (((limb) 1) << 4);
  238. static const limb two60p44m12 = (((limb) 1) << 60)
  239. + (((limb) 1) << 44)
  240. - (((limb) 1) << 12);
  241. static const limb two60m28m4 = (((limb) 1) << 60)
  242. - (((limb) 1) << 28)
  243. - (((limb) 1) << 4);
  244. static const limb two60m4 = (((limb) 1) << 60)
  245. - (((limb) 1) << 4);
  246. out[0] += two60p44m12 - in[0];
  247. out[1] += two60m52m4 - in[1];
  248. out[2] += two60m28m4 - in[2];
  249. out[3] += two60m4 - in[3];
  250. out[4] += two60m4 - in[4];
  251. out[5] += two60m4 - in[5];
  252. out[6] += two60m4 - in[6];
  253. }
  254. /*
  255. * in[i] < 2^63
  256. * out[i] < out_orig[i] + 2^64 + 2^48
  257. */
  258. static void felem_diff_128_64(widefelem out, const felem in)
  259. {
  260. /*
  261. * In order to prevent underflow, we add a multiple of p before subtracting.
  262. * Use telescopic sums to represent 2^16 * p redundantly with each limb
  263. * of the form 2^64 + ...
  264. */
  265. static const widelimb two64m56m8 = (((widelimb) 1) << 64)
  266. - (((widelimb) 1) << 56)
  267. - (((widelimb) 1) << 8);
  268. static const widelimb two64m32m8 = (((widelimb) 1) << 64)
  269. - (((widelimb) 1) << 32)
  270. - (((widelimb) 1) << 8);
  271. static const widelimb two64m8 = (((widelimb) 1) << 64)
  272. - (((widelimb) 1) << 8);
  273. static const widelimb two64p48m16 = (((widelimb) 1) << 64)
  274. + (((widelimb) 1) << 48)
  275. - (((widelimb) 1) << 16);
  276. unsigned int i;
  277. out[0] += two64p48m16;
  278. out[1] += two64m56m8;
  279. out[2] += two64m32m8;
  280. out[3] += two64m8;
  281. out[4] += two64m8;
  282. out[5] += two64m8;
  283. out[6] += two64m8;
  284. for (i = 0; i < NLIMBS; i++)
  285. out[i] -= in[i];
  286. }
  287. /*
  288. * in[i] < 2^127 - 2^119 - 2^71
  289. * out[i] < out_orig[i] + 2^127 + 2^111
  290. */
  291. static void felem_diff128(widefelem out, const widefelem in)
  292. {
  293. /*
  294. * In order to prevent underflow, we add a multiple of p before subtracting.
  295. * Use telescopic sums to represent 2^415 * p redundantly with each limb
  296. * of the form 2^127 + ...
  297. */
  298. static const widelimb two127 = ((widelimb) 1) << 127;
  299. static const widelimb two127m71 = (((widelimb) 1) << 127)
  300. - (((widelimb) 1) << 71);
  301. static const widelimb two127p111m79m71 = (((widelimb) 1) << 127)
  302. + (((widelimb) 1) << 111)
  303. - (((widelimb) 1) << 79)
  304. - (((widelimb) 1) << 71);
  305. static const widelimb two127m119m71 = (((widelimb) 1) << 127)
  306. - (((widelimb) 1) << 119)
  307. - (((widelimb) 1) << 71);
  308. static const widelimb two127m95m71 = (((widelimb) 1) << 127)
  309. - (((widelimb) 1) << 95)
  310. - (((widelimb) 1) << 71);
  311. unsigned int i;
  312. out[0] += two127;
  313. out[1] += two127m71;
  314. out[2] += two127m71;
  315. out[3] += two127m71;
  316. out[4] += two127m71;
  317. out[5] += two127m71;
  318. out[6] += two127p111m79m71;
  319. out[7] += two127m119m71;
  320. out[8] += two127m95m71;
  321. out[9] += two127m71;
  322. out[10] += two127m71;
  323. out[11] += two127m71;
  324. out[12] += two127m71;
  325. for (i = 0; i < 2*NLIMBS-1; i++)
  326. out[i] -= in[i];
  327. }
  328. static void felem_square_ref(widefelem out, const felem in)
  329. {
  330. felem inx2;
  331. felem_scalar(inx2, in, 2);
  332. out[0] = ((uint128_t) in[0]) * in[0];
  333. out[1] = ((uint128_t) in[0]) * inx2[1];
  334. out[2] = ((uint128_t) in[0]) * inx2[2]
  335. + ((uint128_t) in[1]) * in[1];
  336. out[3] = ((uint128_t) in[0]) * inx2[3]
  337. + ((uint128_t) in[1]) * inx2[2];
  338. out[4] = ((uint128_t) in[0]) * inx2[4]
  339. + ((uint128_t) in[1]) * inx2[3]
  340. + ((uint128_t) in[2]) * in[2];
  341. out[5] = ((uint128_t) in[0]) * inx2[5]
  342. + ((uint128_t) in[1]) * inx2[4]
  343. + ((uint128_t) in[2]) * inx2[3];
  344. out[6] = ((uint128_t) in[0]) * inx2[6]
  345. + ((uint128_t) in[1]) * inx2[5]
  346. + ((uint128_t) in[2]) * inx2[4]
  347. + ((uint128_t) in[3]) * in[3];
  348. out[7] = ((uint128_t) in[1]) * inx2[6]
  349. + ((uint128_t) in[2]) * inx2[5]
  350. + ((uint128_t) in[3]) * inx2[4];
  351. out[8] = ((uint128_t) in[2]) * inx2[6]
  352. + ((uint128_t) in[3]) * inx2[5]
  353. + ((uint128_t) in[4]) * in[4];
  354. out[9] = ((uint128_t) in[3]) * inx2[6]
  355. + ((uint128_t) in[4]) * inx2[5];
  356. out[10] = ((uint128_t) in[4]) * inx2[6]
  357. + ((uint128_t) in[5]) * in[5];
  358. out[11] = ((uint128_t) in[5]) * inx2[6];
  359. out[12] = ((uint128_t) in[6]) * in[6];
  360. }
  361. static void felem_mul_ref(widefelem out, const felem in1, const felem in2)
  362. {
  363. out[0] = ((uint128_t) in1[0]) * in2[0];
  364. out[1] = ((uint128_t) in1[0]) * in2[1]
  365. + ((uint128_t) in1[1]) * in2[0];
  366. out[2] = ((uint128_t) in1[0]) * in2[2]
  367. + ((uint128_t) in1[1]) * in2[1]
  368. + ((uint128_t) in1[2]) * in2[0];
  369. out[3] = ((uint128_t) in1[0]) * in2[3]
  370. + ((uint128_t) in1[1]) * in2[2]
  371. + ((uint128_t) in1[2]) * in2[1]
  372. + ((uint128_t) in1[3]) * in2[0];
  373. out[4] = ((uint128_t) in1[0]) * in2[4]
  374. + ((uint128_t) in1[1]) * in2[3]
  375. + ((uint128_t) in1[2]) * in2[2]
  376. + ((uint128_t) in1[3]) * in2[1]
  377. + ((uint128_t) in1[4]) * in2[0];
  378. out[5] = ((uint128_t) in1[0]) * in2[5]
  379. + ((uint128_t) in1[1]) * in2[4]
  380. + ((uint128_t) in1[2]) * in2[3]
  381. + ((uint128_t) in1[3]) * in2[2]
  382. + ((uint128_t) in1[4]) * in2[1]
  383. + ((uint128_t) in1[5]) * in2[0];
  384. out[6] = ((uint128_t) in1[0]) * in2[6]
  385. + ((uint128_t) in1[1]) * in2[5]
  386. + ((uint128_t) in1[2]) * in2[4]
  387. + ((uint128_t) in1[3]) * in2[3]
  388. + ((uint128_t) in1[4]) * in2[2]
  389. + ((uint128_t) in1[5]) * in2[1]
  390. + ((uint128_t) in1[6]) * in2[0];
  391. out[7] = ((uint128_t) in1[1]) * in2[6]
  392. + ((uint128_t) in1[2]) * in2[5]
  393. + ((uint128_t) in1[3]) * in2[4]
  394. + ((uint128_t) in1[4]) * in2[3]
  395. + ((uint128_t) in1[5]) * in2[2]
  396. + ((uint128_t) in1[6]) * in2[1];
  397. out[8] = ((uint128_t) in1[2]) * in2[6]
  398. + ((uint128_t) in1[3]) * in2[5]
  399. + ((uint128_t) in1[4]) * in2[4]
  400. + ((uint128_t) in1[5]) * in2[3]
  401. + ((uint128_t) in1[6]) * in2[2];
  402. out[9] = ((uint128_t) in1[3]) * in2[6]
  403. + ((uint128_t) in1[4]) * in2[5]
  404. + ((uint128_t) in1[5]) * in2[4]
  405. + ((uint128_t) in1[6]) * in2[3];
  406. out[10] = ((uint128_t) in1[4]) * in2[6]
  407. + ((uint128_t) in1[5]) * in2[5]
  408. + ((uint128_t) in1[6]) * in2[4];
  409. out[11] = ((uint128_t) in1[5]) * in2[6]
  410. + ((uint128_t) in1[6]) * in2[5];
  411. out[12] = ((uint128_t) in1[6]) * in2[6];
  412. }
  413. /*-
  414. * Reduce thirteen 128-bit coefficients to seven 64-bit coefficients.
  415. * in[i] < 2^128 - 2^125
  416. * out[i] < 2^56 for i < 6,
  417. * out[6] <= 2^48
  418. *
  419. * The technique in use here stems from the format of the prime modulus:
  420. * P384 = 2^384 - delta
  421. *
  422. * Thus we can reduce numbers of the form (X + 2^384 * Y) by substituting
  423. * them with (X + delta Y), with delta = 2^128 + 2^96 + (-2^32 + 1). These
  424. * coefficients are still quite large, and so we repeatedly apply this
  425. * technique on high-order bits in order to guarantee the desired bounds on
  426. * the size of our output.
  427. *
  428. * The three phases of elimination are as follows:
  429. * [1]: Y = 2^120 (in[12] | in[11] | in[10] | in[9])
  430. * [2]: Y = 2^8 (acc[8] | acc[7])
  431. * [3]: Y = 2^48 (acc[6] >> 48)
  432. * (Where a | b | c | d = (2^56)^3 a + (2^56)^2 b + (2^56) c + d)
  433. */
  434. static void felem_reduce(felem out, const widefelem in)
  435. {
  436. /*
  437. * In order to prevent underflow, we add a multiple of p before subtracting.
  438. * Use telescopic sums to represent 2^76 * p redundantly with each limb
  439. * of the form 2^124 + ...
  440. */
  441. static const widelimb two124m68 = (((widelimb) 1) << 124)
  442. - (((widelimb) 1) << 68);
  443. static const widelimb two124m116m68 = (((widelimb) 1) << 124)
  444. - (((widelimb) 1) << 116)
  445. - (((widelimb) 1) << 68);
  446. static const widelimb two124p108m76 = (((widelimb) 1) << 124)
  447. + (((widelimb) 1) << 108)
  448. - (((widelimb) 1) << 76);
  449. static const widelimb two124m92m68 = (((widelimb) 1) << 124)
  450. - (((widelimb) 1) << 92)
  451. - (((widelimb) 1) << 68);
  452. widelimb temp, acc[9];
  453. unsigned int i;
  454. memcpy(acc, in, sizeof(widelimb) * 9);
  455. acc[0] += two124p108m76;
  456. acc[1] += two124m116m68;
  457. acc[2] += two124m92m68;
  458. acc[3] += two124m68;
  459. acc[4] += two124m68;
  460. acc[5] += two124m68;
  461. acc[6] += two124m68;
  462. /* [1]: Eliminate in[9], ..., in[12] */
  463. acc[8] += in[12] >> 32;
  464. acc[7] += (in[12] & 0xffffffff) << 24;
  465. acc[7] += in[12] >> 8;
  466. acc[6] += (in[12] & 0xff) << 48;
  467. acc[6] -= in[12] >> 16;
  468. acc[5] -= (in[12] & 0xffff) << 40;
  469. acc[6] += in[12] >> 48;
  470. acc[5] += (in[12] & 0xffffffffffff) << 8;
  471. acc[7] += in[11] >> 32;
  472. acc[6] += (in[11] & 0xffffffff) << 24;
  473. acc[6] += in[11] >> 8;
  474. acc[5] += (in[11] & 0xff) << 48;
  475. acc[5] -= in[11] >> 16;
  476. acc[4] -= (in[11] & 0xffff) << 40;
  477. acc[5] += in[11] >> 48;
  478. acc[4] += (in[11] & 0xffffffffffff) << 8;
  479. acc[6] += in[10] >> 32;
  480. acc[5] += (in[10] & 0xffffffff) << 24;
  481. acc[5] += in[10] >> 8;
  482. acc[4] += (in[10] & 0xff) << 48;
  483. acc[4] -= in[10] >> 16;
  484. acc[3] -= (in[10] & 0xffff) << 40;
  485. acc[4] += in[10] >> 48;
  486. acc[3] += (in[10] & 0xffffffffffff) << 8;
  487. acc[5] += in[9] >> 32;
  488. acc[4] += (in[9] & 0xffffffff) << 24;
  489. acc[4] += in[9] >> 8;
  490. acc[3] += (in[9] & 0xff) << 48;
  491. acc[3] -= in[9] >> 16;
  492. acc[2] -= (in[9] & 0xffff) << 40;
  493. acc[3] += in[9] >> 48;
  494. acc[2] += (in[9] & 0xffffffffffff) << 8;
  495. /*
  496. * [2]: Eliminate acc[7], acc[8], that is the 7 and eighth limbs, as
  497. * well as the contributions made from eliminating higher limbs.
  498. * acc[7] < in[7] + 2^120 + 2^56 < in[7] + 2^121
  499. * acc[8] < in[8] + 2^96
  500. */
  501. acc[4] += acc[8] >> 32;
  502. acc[3] += (acc[8] & 0xffffffff) << 24;
  503. acc[3] += acc[8] >> 8;
  504. acc[2] += (acc[8] & 0xff) << 48;
  505. acc[2] -= acc[8] >> 16;
  506. acc[1] -= (acc[8] & 0xffff) << 40;
  507. acc[2] += acc[8] >> 48;
  508. acc[1] += (acc[8] & 0xffffffffffff) << 8;
  509. acc[3] += acc[7] >> 32;
  510. acc[2] += (acc[7] & 0xffffffff) << 24;
  511. acc[2] += acc[7] >> 8;
  512. acc[1] += (acc[7] & 0xff) << 48;
  513. acc[1] -= acc[7] >> 16;
  514. acc[0] -= (acc[7] & 0xffff) << 40;
  515. acc[1] += acc[7] >> 48;
  516. acc[0] += (acc[7] & 0xffffffffffff) << 8;
  517. /*-
  518. * acc[k] < in[k] + 2^124 + 2^121
  519. * < in[k] + 2^125
  520. * < 2^128, for k <= 6
  521. */
  522. /*
  523. * Carry 4 -> 5 -> 6
  524. * This has the effect of ensuring that these more significant limbs
  525. * will be small in value after eliminating high bits from acc[6].
  526. */
  527. acc[5] += acc[4] >> 56;
  528. acc[4] &= 0x00ffffffffffffff;
  529. acc[6] += acc[5] >> 56;
  530. acc[5] &= 0x00ffffffffffffff;
  531. /*-
  532. * acc[6] < in[6] + 2^124 + 2^121 + 2^72 + 2^16
  533. * < in[6] + 2^125
  534. * < 2^128
  535. */
  536. /* [3]: Eliminate high bits of acc[6] */
  537. temp = acc[6] >> 48;
  538. acc[6] &= 0x0000ffffffffffff;
  539. /* temp < 2^80 */
  540. acc[3] += temp >> 40;
  541. acc[2] += (temp & 0xffffffffff) << 16;
  542. acc[2] += temp >> 16;
  543. acc[1] += (temp & 0xffff) << 40;
  544. acc[1] -= temp >> 24;
  545. acc[0] -= (temp & 0xffffff) << 32;
  546. acc[0] += temp;
  547. /*-
  548. * acc[k] < acc_old[k] + 2^64 + 2^56
  549. * < in[k] + 2^124 + 2^121 + 2^72 + 2^64 + 2^56 + 2^16 , k < 4
  550. */
  551. /* Carry 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 */
  552. acc[1] += acc[0] >> 56; /* acc[1] < acc_old[1] + 2^72 */
  553. acc[0] &= 0x00ffffffffffffff;
  554. acc[2] += acc[1] >> 56; /* acc[2] < acc_old[2] + 2^72 + 2^16 */
  555. acc[1] &= 0x00ffffffffffffff;
  556. acc[3] += acc[2] >> 56; /* acc[3] < acc_old[3] + 2^72 + 2^16 */
  557. acc[2] &= 0x00ffffffffffffff;
  558. /*-
  559. * acc[k] < acc_old[k] + 2^72 + 2^16
  560. * < in[k] + 2^124 + 2^121 + 2^73 + 2^64 + 2^56 + 2^17
  561. * < in[k] + 2^125
  562. * < 2^128 , k < 4
  563. */
  564. acc[4] += acc[3] >> 56; /*-
  565. * acc[4] < acc_old[4] + 2^72 + 2^16
  566. * < 2^72 + 2^56 + 2^16
  567. */
  568. acc[3] &= 0x00ffffffffffffff;
  569. acc[5] += acc[4] >> 56; /*-
  570. * acc[5] < acc_old[5] + 2^16 + 1
  571. * < 2^56 + 2^16 + 1
  572. */
  573. acc[4] &= 0x00ffffffffffffff;
  574. acc[6] += acc[5] >> 56; /* acc[6] < 2^48 + 1 <= 2^48 */
  575. acc[5] &= 0x00ffffffffffffff;
  576. for (i = 0; i < NLIMBS; i++)
  577. out[i] = acc[i];
  578. }
  579. #if defined(ECP_NISTP384_ASM)
  580. static void felem_square_wrapper(widefelem out, const felem in);
  581. static void felem_mul_wrapper(widefelem out, const felem in1, const felem in2);
  582. static void (*felem_square_p)(widefelem out, const felem in) =
  583. felem_square_wrapper;
  584. static void (*felem_mul_p)(widefelem out, const felem in1, const felem in2) =
  585. felem_mul_wrapper;
  586. void p384_felem_square(widefelem out, const felem in);
  587. void p384_felem_mul(widefelem out, const felem in1, const felem in2);
  588. # if defined(_ARCH_PPC64)
  589. # include "crypto/ppc_arch.h"
  590. # endif
  591. static void felem_select(void)
  592. {
  593. # if defined(_ARCH_PPC64)
  594. if ((OPENSSL_ppccap_P & PPC_MADD300) && (OPENSSL_ppccap_P & PPC_ALTIVEC)) {
  595. felem_square_p = p384_felem_square;
  596. felem_mul_p = p384_felem_mul;
  597. return;
  598. }
  599. # endif
  600. /* Default */
  601. felem_square_p = felem_square_ref;
  602. felem_mul_p = felem_mul_ref;
  603. }
  604. static void felem_square_wrapper(widefelem out, const felem in)
  605. {
  606. felem_select();
  607. felem_square_p(out, in);
  608. }
  609. static void felem_mul_wrapper(widefelem out, const felem in1, const felem in2)
  610. {
  611. felem_select();
  612. felem_mul_p(out, in1, in2);
  613. }
  614. # define felem_square felem_square_p
  615. # define felem_mul felem_mul_p
  616. #else
  617. # define felem_square felem_square_ref
  618. # define felem_mul felem_mul_ref
  619. #endif
  620. static ossl_inline void felem_square_reduce(felem out, const felem in)
  621. {
  622. widefelem tmp;
  623. felem_square(tmp, in);
  624. felem_reduce(out, tmp);
  625. }
  626. static ossl_inline void felem_mul_reduce(felem out, const felem in1, const felem in2)
  627. {
  628. widefelem tmp;
  629. felem_mul(tmp, in1, in2);
  630. felem_reduce(out, tmp);
  631. }
  632. /*-
  633. * felem_inv calculates |out| = |in|^{-1}
  634. *
  635. * Based on Fermat's Little Theorem:
  636. * a^p = a (mod p)
  637. * a^{p-1} = 1 (mod p)
  638. * a^{p-2} = a^{-1} (mod p)
  639. */
  640. static void felem_inv(felem out, const felem in)
  641. {
  642. felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6;
  643. unsigned int i = 0;
  644. felem_square_reduce(ftmp, in); /* 2^1 */
  645. felem_mul_reduce(ftmp, ftmp, in); /* 2^1 + 2^0 */
  646. felem_assign(ftmp2, ftmp);
  647. felem_square_reduce(ftmp, ftmp); /* 2^2 + 2^1 */
  648. felem_mul_reduce(ftmp, ftmp, in); /* 2^2 + 2^1 * 2^0 */
  649. felem_assign(ftmp3, ftmp);
  650. for (i = 0; i < 3; i++)
  651. felem_square_reduce(ftmp, ftmp); /* 2^5 + 2^4 + 2^3 */
  652. felem_mul_reduce(ftmp, ftmp3, ftmp); /* 2^5 + 2^4 + 2^3 + 2^2 + 2^1 + 2^0 */
  653. felem_assign(ftmp4, ftmp);
  654. for (i = 0; i < 6; i++)
  655. felem_square_reduce(ftmp, ftmp); /* 2^11 + ... + 2^6 */
  656. felem_mul_reduce(ftmp, ftmp4, ftmp); /* 2^11 + ... + 2^0 */
  657. for (i = 0; i < 3; i++)
  658. felem_square_reduce(ftmp, ftmp); /* 2^14 + ... + 2^3 */
  659. felem_mul_reduce(ftmp, ftmp3, ftmp); /* 2^14 + ... + 2^0 */
  660. felem_assign(ftmp5, ftmp);
  661. for (i = 0; i < 15; i++)
  662. felem_square_reduce(ftmp, ftmp); /* 2^29 + ... + 2^15 */
  663. felem_mul_reduce(ftmp, ftmp5, ftmp); /* 2^29 + ... + 2^0 */
  664. felem_assign(ftmp6, ftmp);
  665. for (i = 0; i < 30; i++)
  666. felem_square_reduce(ftmp, ftmp); /* 2^59 + ... + 2^30 */
  667. felem_mul_reduce(ftmp, ftmp6, ftmp); /* 2^59 + ... + 2^0 */
  668. felem_assign(ftmp4, ftmp);
  669. for (i = 0; i < 60; i++)
  670. felem_square_reduce(ftmp, ftmp); /* 2^119 + ... + 2^60 */
  671. felem_mul_reduce(ftmp, ftmp4, ftmp); /* 2^119 + ... + 2^0 */
  672. felem_assign(ftmp4, ftmp);
  673. for (i = 0; i < 120; i++)
  674. felem_square_reduce(ftmp, ftmp); /* 2^239 + ... + 2^120 */
  675. felem_mul_reduce(ftmp, ftmp4, ftmp); /* 2^239 + ... + 2^0 */
  676. for (i = 0; i < 15; i++)
  677. felem_square_reduce(ftmp, ftmp); /* 2^254 + ... + 2^15 */
  678. felem_mul_reduce(ftmp, ftmp5, ftmp); /* 2^254 + ... + 2^0 */
  679. for (i = 0; i < 31; i++)
  680. felem_square_reduce(ftmp, ftmp); /* 2^285 + ... + 2^31 */
  681. felem_mul_reduce(ftmp, ftmp6, ftmp); /* 2^285 + ... + 2^31 + 2^29 + ... + 2^0 */
  682. for (i = 0; i < 2; i++)
  683. felem_square_reduce(ftmp, ftmp); /* 2^287 + ... + 2^33 + 2^31 + ... + 2^2 */
  684. felem_mul_reduce(ftmp, ftmp2, ftmp); /* 2^287 + ... + 2^33 + 2^31 + ... + 2^0 */
  685. for (i = 0; i < 94; i++)
  686. felem_square_reduce(ftmp, ftmp); /* 2^381 + ... + 2^127 + 2^125 + ... + 2^94 */
  687. felem_mul_reduce(ftmp, ftmp6, ftmp); /* 2^381 + ... + 2^127 + 2^125 + ... + 2^94 + 2^29 + ... + 2^0 */
  688. for (i = 0; i < 2; i++)
  689. felem_square_reduce(ftmp, ftmp); /* 2^383 + ... + 2^129 + 2^127 + ... + 2^96 + 2^31 + ... + 2^2 */
  690. felem_mul_reduce(ftmp, in, ftmp); /* 2^383 + ... + 2^129 + 2^127 + ... + 2^96 + 2^31 + ... + 2^2 + 2^0 */
  691. memcpy(out, ftmp, sizeof(felem));
  692. }
  693. /*
  694. * Zero-check: returns a limb with all bits set if |in| == 0 (mod p)
  695. * and 0 otherwise. We know that field elements are reduced to
  696. * 0 < in < 2p, so we only need to check two cases:
  697. * 0 and 2^384 - 2^128 - 2^96 + 2^32 - 1
  698. * in[k] < 2^56, k < 6
  699. * in[6] <= 2^48
  700. */
  701. static limb felem_is_zero(const felem in)
  702. {
  703. limb zero, p384;
  704. zero = in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6];
  705. zero = ((int64_t) (zero) - 1) >> 63;
  706. p384 = (in[0] ^ 0x000000ffffffff) | (in[1] ^ 0xffff0000000000)
  707. | (in[2] ^ 0xfffffffffeffff) | (in[3] ^ 0xffffffffffffff)
  708. | (in[4] ^ 0xffffffffffffff) | (in[5] ^ 0xffffffffffffff)
  709. | (in[6] ^ 0xffffffffffff);
  710. p384 = ((int64_t) (p384) - 1) >> 63;
  711. return (zero | p384);
  712. }
  713. static int felem_is_zero_int(const void *in)
  714. {
  715. return (int)(felem_is_zero(in) & ((limb) 1));
  716. }
  717. /*-
  718. * felem_contract converts |in| to its unique, minimal representation.
  719. * Assume we've removed all redundant bits.
  720. * On entry:
  721. * in[k] < 2^56, k < 6
  722. * in[6] <= 2^48
  723. */
  724. static void felem_contract(felem out, const felem in)
  725. {
  726. static const int64_t two56 = ((limb) 1) << 56;
  727. /*
  728. * We know for a fact that 0 <= |in| < 2*p, for p = 2^384 - 2^128 - 2^96 + 2^32 - 1
  729. * Perform two successive, idempotent subtractions to reduce if |in| >= p.
  730. */
  731. int64_t tmp[NLIMBS], cond[5], a;
  732. unsigned int i;
  733. memcpy(tmp, in, sizeof(felem));
  734. /* Case 1: a = 1 iff |in| >= 2^384 */
  735. a = (in[6] >> 48);
  736. tmp[0] += a;
  737. tmp[0] -= a << 32;
  738. tmp[1] += a << 40;
  739. tmp[2] += a << 16;
  740. tmp[6] &= 0x0000ffffffffffff;
  741. /*
  742. * eliminate negative coefficients: if tmp[0] is negative, tmp[1] must be
  743. * non-zero, so we only need one step
  744. */
  745. a = tmp[0] >> 63;
  746. tmp[0] += a & two56;
  747. tmp[1] -= a & 1;
  748. /* Carry 1 -> 2 -> 3 -> 4 -> 5 -> 6 */
  749. tmp[2] += tmp[1] >> 56;
  750. tmp[1] &= 0x00ffffffffffffff;
  751. tmp[3] += tmp[2] >> 56;
  752. tmp[2] &= 0x00ffffffffffffff;
  753. tmp[4] += tmp[3] >> 56;
  754. tmp[3] &= 0x00ffffffffffffff;
  755. tmp[5] += tmp[4] >> 56;
  756. tmp[4] &= 0x00ffffffffffffff;
  757. tmp[6] += tmp[5] >> 56; /* tmp[6] < 2^48 */
  758. tmp[5] &= 0x00ffffffffffffff;
  759. /*
  760. * Case 2: a = all ones if p <= |in| < 2^384, 0 otherwise
  761. */
  762. /* 0 iff (2^129..2^383) are all one */
  763. cond[0] = ((tmp[6] | 0xff000000000000) & tmp[5] & tmp[4] & tmp[3] & (tmp[2] | 0x0000000001ffff)) + 1;
  764. /* 0 iff 2^128 bit is one */
  765. cond[1] = (tmp[2] | ~0x00000000010000) + 1;
  766. /* 0 iff (2^96..2^127) bits are all one */
  767. cond[2] = ((tmp[2] | 0xffffffffff0000) & (tmp[1] | 0x0000ffffffffff)) + 1;
  768. /* 0 iff (2^32..2^95) bits are all zero */
  769. cond[3] = (tmp[1] & ~0xffff0000000000) | (tmp[0] & ~((int64_t) 0x000000ffffffff));
  770. /* 0 iff (2^0..2^31) bits are all one */
  771. cond[4] = (tmp[0] | 0xffffff00000000) + 1;
  772. /*
  773. * In effect, invert our conditions, so that 0 values become all 1's,
  774. * any non-zero value in the low-order 56 bits becomes all 0's
  775. */
  776. for (i = 0; i < 5; i++)
  777. cond[i] = ((cond[i] & 0x00ffffffffffffff) - 1) >> 63;
  778. /*
  779. * The condition for determining whether in is greater than our
  780. * prime is given by the following condition.
  781. */
  782. /* First subtract 2^384 - 2^129 cheaply */
  783. a = cond[0] & (cond[1] | (cond[2] & (~cond[3] | cond[4])));
  784. tmp[6] &= ~a;
  785. tmp[5] &= ~a;
  786. tmp[4] &= ~a;
  787. tmp[3] &= ~a;
  788. tmp[2] &= ~a | 0x0000000001ffff;
  789. /*
  790. * Subtract 2^128 - 2^96 by
  791. * means of disjoint cases.
  792. */
  793. /* subtract 2^128 if that bit is present, and add 2^96 */
  794. a = cond[0] & cond[1];
  795. tmp[2] &= ~a | 0xfffffffffeffff;
  796. tmp[1] += a & ((int64_t) 1 << 40);
  797. /* otherwise, clear bits 2^127 .. 2^96 */
  798. a = cond[0] & ~cond[1] & (cond[2] & (~cond[3] | cond[4]));
  799. tmp[2] &= ~a | 0xffffffffff0000;
  800. tmp[1] &= ~a | 0x0000ffffffffff;
  801. /* finally, subtract the last 2^32 - 1 */
  802. a = cond[0] & (cond[1] | (cond[2] & (~cond[3] | cond[4])));
  803. tmp[0] += a & (-((int64_t) 1 << 32) + 1);
  804. /*
  805. * eliminate negative coefficients: if tmp[0] is negative, tmp[1] must be
  806. * non-zero, so we only need one step
  807. */
  808. a = tmp[0] >> 63;
  809. tmp[0] += a & two56;
  810. tmp[1] -= a & 1;
  811. /* Carry 1 -> 2 -> 3 -> 4 -> 5 -> 6 */
  812. tmp[2] += tmp[1] >> 56;
  813. tmp[1] &= 0x00ffffffffffffff;
  814. tmp[3] += tmp[2] >> 56;
  815. tmp[2] &= 0x00ffffffffffffff;
  816. tmp[4] += tmp[3] >> 56;
  817. tmp[3] &= 0x00ffffffffffffff;
  818. tmp[5] += tmp[4] >> 56;
  819. tmp[4] &= 0x00ffffffffffffff;
  820. tmp[6] += tmp[5] >> 56;
  821. tmp[5] &= 0x00ffffffffffffff;
  822. memcpy(out, tmp, sizeof(felem));
  823. }
  824. /*-
  825. * Group operations
  826. * ----------------
  827. *
  828. * Building on top of the field operations we have the operations on the
  829. * elliptic curve group itself. Points on the curve are represented in Jacobian
  830. * coordinates
  831. */
  832. /*-
  833. * point_double calculates 2*(x_in, y_in, z_in)
  834. *
  835. * The method is taken from:
  836. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
  837. *
  838. * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
  839. * while x_out == y_in is not (maybe this works, but it's not tested).
  840. */
  841. static void
  842. point_double(felem x_out, felem y_out, felem z_out,
  843. const felem x_in, const felem y_in, const felem z_in)
  844. {
  845. widefelem tmp, tmp2;
  846. felem delta, gamma, beta, alpha, ftmp, ftmp2;
  847. felem_assign(ftmp, x_in);
  848. felem_assign(ftmp2, x_in);
  849. /* delta = z^2 */
  850. felem_square_reduce(delta, z_in); /* delta[i] < 2^56 */
  851. /* gamma = y^2 */
  852. felem_square_reduce(gamma, y_in); /* gamma[i] < 2^56 */
  853. /* beta = x*gamma */
  854. felem_mul_reduce(beta, x_in, gamma); /* beta[i] < 2^56 */
  855. /* alpha = 3*(x-delta)*(x+delta) */
  856. felem_diff64(ftmp, delta); /* ftmp[i] < 2^60 + 2^58 + 2^44 */
  857. felem_sum64(ftmp2, delta); /* ftmp2[i] < 2^59 */
  858. felem_scalar64(ftmp2, 3); /* ftmp2[i] < 2^61 */
  859. felem_mul_reduce(alpha, ftmp, ftmp2); /* alpha[i] < 2^56 */
  860. /* x' = alpha^2 - 8*beta */
  861. felem_square(tmp, alpha); /* tmp[i] < 2^115 */
  862. felem_assign(ftmp, beta); /* ftmp[i] < 2^56 */
  863. felem_scalar64(ftmp, 8); /* ftmp[i] < 2^59 */
  864. felem_diff_128_64(tmp, ftmp); /* tmp[i] < 2^115 + 2^64 + 2^48 */
  865. felem_reduce(x_out, tmp); /* x_out[i] < 2^56 */
  866. /* z' = (y + z)^2 - gamma - delta */
  867. felem_sum64(delta, gamma); /* delta[i] < 2^57 */
  868. felem_assign(ftmp, y_in); /* ftmp[i] < 2^56 */
  869. felem_sum64(ftmp, z_in); /* ftmp[i] < 2^56 */
  870. felem_square(tmp, ftmp); /* tmp[i] < 2^115 */
  871. felem_diff_128_64(tmp, delta); /* tmp[i] < 2^115 + 2^64 + 2^48 */
  872. felem_reduce(z_out, tmp); /* z_out[i] < 2^56 */
  873. /* y' = alpha*(4*beta - x') - 8*gamma^2 */
  874. felem_scalar64(beta, 4); /* beta[i] < 2^58 */
  875. felem_diff64(beta, x_out); /* beta[i] < 2^60 + 2^58 + 2^44 */
  876. felem_mul(tmp, alpha, beta); /* tmp[i] < 2^119 */
  877. felem_square(tmp2, gamma); /* tmp2[i] < 2^115 */
  878. felem_scalar128(tmp2, 8); /* tmp2[i] < 2^118 */
  879. felem_diff128(tmp, tmp2); /* tmp[i] < 2^127 + 2^119 + 2^111 */
  880. felem_reduce(y_out, tmp); /* tmp[i] < 2^56 */
  881. }
  882. /* copy_conditional copies in to out iff mask is all ones. */
  883. static void copy_conditional(felem out, const felem in, limb mask)
  884. {
  885. unsigned int i;
  886. for (i = 0; i < NLIMBS; i++)
  887. out[i] ^= mask & (in[i] ^ out[i]);
  888. }
  889. /*-
  890. * point_add calculates (x1, y1, z1) + (x2, y2, z2)
  891. *
  892. * The method is taken from
  893. * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
  894. * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
  895. *
  896. * This function includes a branch for checking whether the two input points
  897. * are equal (while not equal to the point at infinity). See comment below
  898. * on constant-time.
  899. */
  900. static void point_add(felem x3, felem y3, felem z3,
  901. const felem x1, const felem y1, const felem z1,
  902. const int mixed, const felem x2, const felem y2,
  903. const felem z2)
  904. {
  905. felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
  906. widefelem tmp, tmp2;
  907. limb x_equal, y_equal, z1_is_zero, z2_is_zero;
  908. limb points_equal;
  909. z1_is_zero = felem_is_zero(z1);
  910. z2_is_zero = felem_is_zero(z2);
  911. /* ftmp = z1z1 = z1**2 */
  912. felem_square_reduce(ftmp, z1); /* ftmp[i] < 2^56 */
  913. if (!mixed) {
  914. /* ftmp2 = z2z2 = z2**2 */
  915. felem_square_reduce(ftmp2, z2); /* ftmp2[i] < 2^56 */
  916. /* u1 = ftmp3 = x1*z2z2 */
  917. felem_mul_reduce(ftmp3, x1, ftmp2); /* ftmp3[i] < 2^56 */
  918. /* ftmp5 = z1 + z2 */
  919. felem_assign(ftmp5, z1); /* ftmp5[i] < 2^56 */
  920. felem_sum64(ftmp5, z2); /* ftmp5[i] < 2^57 */
  921. /* ftmp5 = (z1 + z2)**2 - z1z1 - z2z2 = 2*z1z2 */
  922. felem_square(tmp, ftmp5); /* tmp[i] < 2^117 */
  923. felem_diff_128_64(tmp, ftmp); /* tmp[i] < 2^117 + 2^64 + 2^48 */
  924. felem_diff_128_64(tmp, ftmp2); /* tmp[i] < 2^117 + 2^65 + 2^49 */
  925. felem_reduce(ftmp5, tmp); /* ftmp5[i] < 2^56 */
  926. /* ftmp2 = z2 * z2z2 */
  927. felem_mul_reduce(ftmp2, ftmp2, z2); /* ftmp2[i] < 2^56 */
  928. /* s1 = ftmp6 = y1 * z2**3 */
  929. felem_mul_reduce(ftmp6, y1, ftmp2); /* ftmp6[i] < 2^56 */
  930. } else {
  931. /*
  932. * We'll assume z2 = 1 (special case z2 = 0 is handled later)
  933. */
  934. /* u1 = ftmp3 = x1*z2z2 */
  935. felem_assign(ftmp3, x1); /* ftmp3[i] < 2^56 */
  936. /* ftmp5 = 2*z1z2 */
  937. felem_scalar(ftmp5, z1, 2); /* ftmp5[i] < 2^57 */
  938. /* s1 = ftmp6 = y1 * z2**3 */
  939. felem_assign(ftmp6, y1); /* ftmp6[i] < 2^56 */
  940. }
  941. /* ftmp3[i] < 2^56, ftmp5[i] < 2^57, ftmp6[i] < 2^56 */
  942. /* u2 = x2*z1z1 */
  943. felem_mul(tmp, x2, ftmp); /* tmp[i] < 2^115 */
  944. /* h = ftmp4 = u2 - u1 */
  945. felem_diff_128_64(tmp, ftmp3); /* tmp[i] < 2^115 + 2^64 + 2^48 */
  946. felem_reduce(ftmp4, tmp); /* ftmp[4] < 2^56 */
  947. x_equal = felem_is_zero(ftmp4);
  948. /* z_out = ftmp5 * h */
  949. felem_mul_reduce(z_out, ftmp5, ftmp4); /* z_out[i] < 2^56 */
  950. /* ftmp = z1 * z1z1 */
  951. felem_mul_reduce(ftmp, ftmp, z1); /* ftmp[i] < 2^56 */
  952. /* s2 = tmp = y2 * z1**3 */
  953. felem_mul(tmp, y2, ftmp); /* tmp[i] < 2^115 */
  954. /* r = ftmp5 = (s2 - s1)*2 */
  955. felem_diff_128_64(tmp, ftmp6); /* tmp[i] < 2^115 + 2^64 + 2^48 */
  956. felem_reduce(ftmp5, tmp); /* ftmp5[i] < 2^56 */
  957. y_equal = felem_is_zero(ftmp5);
  958. felem_scalar64(ftmp5, 2); /* ftmp5[i] < 2^57 */
  959. /*
  960. * The formulae are incorrect if the points are equal, in affine coordinates
  961. * (X_1, Y_1) == (X_2, Y_2), so we check for this and do doubling if this
  962. * happens.
  963. *
  964. * We use bitwise operations to avoid potential side-channels introduced by
  965. * the short-circuiting behaviour of boolean operators.
  966. *
  967. * The special case of either point being the point at infinity (z1 and/or
  968. * z2 are zero), is handled separately later on in this function, so we
  969. * avoid jumping to point_double here in those special cases.
  970. *
  971. * Notice the comment below on the implications of this branching for timing
  972. * leaks and why it is considered practically irrelevant.
  973. */
  974. points_equal = (x_equal & y_equal & (~z1_is_zero) & (~z2_is_zero));
  975. if (points_equal) {
  976. /*
  977. * This is obviously not constant-time but it will almost-never happen
  978. * for ECDH / ECDSA.
  979. */
  980. point_double(x3, y3, z3, x1, y1, z1);
  981. return;
  982. }
  983. /* I = ftmp = (2h)**2 */
  984. felem_assign(ftmp, ftmp4); /* ftmp[i] < 2^56 */
  985. felem_scalar64(ftmp, 2); /* ftmp[i] < 2^57 */
  986. felem_square_reduce(ftmp, ftmp); /* ftmp[i] < 2^56 */
  987. /* J = ftmp2 = h * I */
  988. felem_mul_reduce(ftmp2, ftmp4, ftmp); /* ftmp2[i] < 2^56 */
  989. /* V = ftmp4 = U1 * I */
  990. felem_mul_reduce(ftmp4, ftmp3, ftmp); /* ftmp4[i] < 2^56 */
  991. /* x_out = r**2 - J - 2V */
  992. felem_square(tmp, ftmp5); /* tmp[i] < 2^117 */
  993. felem_diff_128_64(tmp, ftmp2); /* tmp[i] < 2^117 + 2^64 + 2^48 */
  994. felem_assign(ftmp3, ftmp4); /* ftmp3[i] < 2^56 */
  995. felem_scalar64(ftmp4, 2); /* ftmp4[i] < 2^57 */
  996. felem_diff_128_64(tmp, ftmp4); /* tmp[i] < 2^117 + 2^65 + 2^49 */
  997. felem_reduce(x_out, tmp); /* x_out[i] < 2^56 */
  998. /* y_out = r(V-x_out) - 2 * s1 * J */
  999. felem_diff64(ftmp3, x_out); /* ftmp3[i] < 2^60 + 2^56 + 2^44 */
  1000. felem_mul(tmp, ftmp5, ftmp3); /* tmp[i] < 2^116 */
  1001. felem_mul(tmp2, ftmp6, ftmp2); /* tmp2[i] < 2^115 */
  1002. felem_scalar128(tmp2, 2); /* tmp2[i] < 2^116 */
  1003. felem_diff128(tmp, tmp2); /* tmp[i] < 2^127 + 2^116 + 2^111 */
  1004. felem_reduce(y_out, tmp); /* y_out[i] < 2^56 */
  1005. copy_conditional(x_out, x2, z1_is_zero);
  1006. copy_conditional(x_out, x1, z2_is_zero);
  1007. copy_conditional(y_out, y2, z1_is_zero);
  1008. copy_conditional(y_out, y1, z2_is_zero);
  1009. copy_conditional(z_out, z2, z1_is_zero);
  1010. copy_conditional(z_out, z1, z2_is_zero);
  1011. felem_assign(x3, x_out);
  1012. felem_assign(y3, y_out);
  1013. felem_assign(z3, z_out);
  1014. }
  1015. /*-
  1016. * Base point pre computation
  1017. * --------------------------
  1018. *
  1019. * Two different sorts of precomputed tables are used in the following code.
  1020. * Each contain various points on the curve, where each point is three field
  1021. * elements (x, y, z).
  1022. *
  1023. * For the base point table, z is usually 1 (0 for the point at infinity).
  1024. * This table has 16 elements:
  1025. * index | bits | point
  1026. * ------+---------+------------------------------
  1027. * 0 | 0 0 0 0 | 0G
  1028. * 1 | 0 0 0 1 | 1G
  1029. * 2 | 0 0 1 0 | 2^95G
  1030. * 3 | 0 0 1 1 | (2^95 + 1)G
  1031. * 4 | 0 1 0 0 | 2^190G
  1032. * 5 | 0 1 0 1 | (2^190 + 1)G
  1033. * 6 | 0 1 1 0 | (2^190 + 2^95)G
  1034. * 7 | 0 1 1 1 | (2^190 + 2^95 + 1)G
  1035. * 8 | 1 0 0 0 | 2^285G
  1036. * 9 | 1 0 0 1 | (2^285 + 1)G
  1037. * 10 | 1 0 1 0 | (2^285 + 2^95)G
  1038. * 11 | 1 0 1 1 | (2^285 + 2^95 + 1)G
  1039. * 12 | 1 1 0 0 | (2^285 + 2^190)G
  1040. * 13 | 1 1 0 1 | (2^285 + 2^190 + 1)G
  1041. * 14 | 1 1 1 0 | (2^285 + 2^190 + 2^95)G
  1042. * 15 | 1 1 1 1 | (2^285 + 2^190 + 2^95 + 1)G
  1043. *
  1044. * The reason for this is so that we can clock bits into four different
  1045. * locations when doing simple scalar multiplies against the base point.
  1046. *
  1047. * Tables for other points have table[i] = iG for i in 0 .. 16.
  1048. */
  1049. /* gmul is the table of precomputed base points */
  1050. static const felem gmul[16][3] = {
  1051. {{0, 0, 0, 0, 0, 0, 0},
  1052. {0, 0, 0, 0, 0, 0, 0},
  1053. {0, 0, 0, 0, 0, 0, 0}},
  1054. {{0x00545e3872760ab7, 0x00f25dbf55296c3a, 0x00e082542a385502, 0x008ba79b9859f741,
  1055. 0x0020ad746e1d3b62, 0x0005378eb1c71ef3, 0x0000aa87ca22be8b},
  1056. {0x00431d7c90ea0e5f, 0x00b1ce1d7e819d7a, 0x0013b5f0b8c00a60, 0x00289a147ce9da31,
  1057. 0x0092dc29f8f41dbd, 0x002c6f5d9e98bf92, 0x00003617de4a9626},
  1058. {1, 0, 0, 0, 0, 0, 0}},
  1059. {{0x00024711cc902a90, 0x00acb2e579ab4fe1, 0x00af818a4b4d57b1, 0x00a17c7bec49c3de,
  1060. 0x004280482d726a8b, 0x00128dd0f0a90f3b, 0x00004387c1c3fa3c},
  1061. {0x002ce76543cf5c3a, 0x00de6cee5ef58f0a, 0x00403e42fa561ca6, 0x00bc54d6f9cb9731,
  1062. 0x007155f925fb4ff1, 0x004a9ce731b7b9bc, 0x00002609076bd7b2},
  1063. {1, 0, 0, 0, 0, 0, 0}},
  1064. {{0x00e74c9182f0251d, 0x0039bf54bb111974, 0x00b9d2f2eec511d2, 0x0036b1594eb3a6a4,
  1065. 0x00ac3bb82d9d564b, 0x00f9313f4615a100, 0x00006716a9a91b10},
  1066. {0x0046698116e2f15c, 0x00f34347067d3d33, 0x008de4ccfdebd002, 0x00e838c6b8e8c97b,
  1067. 0x006faf0798def346, 0x007349794a57563c, 0x00002629e7e6ad84},
  1068. {1, 0, 0, 0, 0, 0, 0}},
  1069. {{0x0075300e34fd163b, 0x0092e9db4e8d0ad3, 0x00254be9f625f760, 0x00512c518c72ae68,
  1070. 0x009bfcf162bede5a, 0x00bf9341566ce311, 0x0000cd6175bd41cf},
  1071. {0x007dfe52af4ac70f, 0x0002159d2d5c4880, 0x00b504d16f0af8d0, 0x0014585e11f5e64c,
  1072. 0x0089c6388e030967, 0x00ffb270cbfa5f71, 0x00009a15d92c3947},
  1073. {1, 0, 0, 0, 0, 0, 0}},
  1074. {{0x0033fc1278dc4fe5, 0x00d53088c2caa043, 0x0085558827e2db66, 0x00c192bef387b736,
  1075. 0x00df6405a2225f2c, 0x0075205aa90fd91a, 0x0000137e3f12349d},
  1076. {0x00ce5b115efcb07e, 0x00abc3308410deeb, 0x005dc6fc1de39904, 0x00907c1c496f36b4,
  1077. 0x0008e6ad3926cbe1, 0x00110747b787928c, 0x0000021b9162eb7e},
  1078. {1, 0, 0, 0, 0, 0, 0}},
  1079. {{0x008180042cfa26e1, 0x007b826a96254967, 0x0082473694d6b194, 0x007bd6880a45b589,
  1080. 0x00c0a5097072d1a3, 0x0019186555e18b4e, 0x000020278190e5ca},
  1081. {0x00b4bef17de61ac0, 0x009535e3c38ed348, 0x002d4aa8e468ceab, 0x00ef40b431036ad3,
  1082. 0x00defd52f4542857, 0x0086edbf98234266, 0x00002025b3a7814d},
  1083. {1, 0, 0, 0, 0, 0, 0}},
  1084. {{0x00b238aa97b886be, 0x00ef3192d6dd3a32, 0x0079f9e01fd62df8, 0x00742e890daba6c5,
  1085. 0x008e5289144408ce, 0x0073bbcc8e0171a5, 0x0000c4fd329d3b52},
  1086. {0x00c6f64a15ee23e7, 0x00dcfb7b171cad8b, 0x00039f6cbd805867, 0x00de024e428d4562,
  1087. 0x00be6a594d7c64c5, 0x0078467b70dbcd64, 0x0000251f2ed7079b},
  1088. {1, 0, 0, 0, 0, 0, 0}},
  1089. {{0x000e5cc25fc4b872, 0x005ebf10d31ef4e1, 0x0061e0ebd11e8256, 0x0076e026096f5a27,
  1090. 0x0013e6fc44662e9a, 0x0042b00289d3597e, 0x000024f089170d88},
  1091. {0x001604d7e0effbe6, 0x0048d77cba64ec2c, 0x008166b16da19e36, 0x006b0d1a0f28c088,
  1092. 0x000259fcd47754fd, 0x00cc643e4d725f9a, 0x00007b10f3c79c14},
  1093. {1, 0, 0, 0, 0, 0, 0}},
  1094. {{0x00430155e3b908af, 0x00b801e4fec25226, 0x00b0d4bcfe806d26, 0x009fc4014eb13d37,
  1095. 0x0066c94e44ec07e8, 0x00d16adc03874ba2, 0x000030c917a0d2a7},
  1096. {0x00edac9e21eb891c, 0x00ef0fb768102eff, 0x00c088cef272a5f3, 0x00cbf782134e2964,
  1097. 0x0001044a7ba9a0e3, 0x00e363f5b194cf3c, 0x00009ce85249e372},
  1098. {1, 0, 0, 0, 0, 0, 0}},
  1099. {{0x001dd492dda5a7eb, 0x008fd577be539fd1, 0x002ff4b25a5fc3f1, 0x0074a8a1b64df72f,
  1100. 0x002ba3d8c204a76c, 0x009d5cff95c8235a, 0x0000e014b9406e0f},
  1101. {0x008c2e4dbfc98aba, 0x00f30bb89f1a1436, 0x00b46f7aea3e259c, 0x009224454ac02f54,
  1102. 0x00906401f5645fa2, 0x003a1d1940eabc77, 0x00007c9351d680e6},
  1103. {1, 0, 0, 0, 0, 0, 0}},
  1104. {{0x005a35d872ef967c, 0x0049f1b7884e1987, 0x0059d46d7e31f552, 0x00ceb4869d2d0fb6,
  1105. 0x00e8e89eee56802a, 0x0049d806a774aaf2, 0x0000147e2af0ae24},
  1106. {0x005fd1bd852c6e5e, 0x00b674b7b3de6885, 0x003b9ea5eb9b6c08, 0x005c9f03babf3ef7,
  1107. 0x00605337fecab3c7, 0x009a3f85b11bbcc8, 0x0000455470f330ec},
  1108. {1, 0, 0, 0, 0, 0, 0}},
  1109. {{0x002197ff4d55498d, 0x00383e8916c2d8af, 0x00eb203f34d1c6d2, 0x0080367cbd11b542,
  1110. 0x00769b3be864e4f5, 0x0081a8458521c7bb, 0x0000c531b34d3539},
  1111. {0x00e2a3d775fa2e13, 0x00534fc379573844, 0x00ff237d2a8db54a, 0x00d301b2335a8882,
  1112. 0x000f75ea96103a80, 0x0018fecb3cdd96fa, 0x0000304bf61e94eb},
  1113. {1, 0, 0, 0, 0, 0, 0}},
  1114. {{0x00b2afc332a73dbd, 0x0029a0d5bb007bc5, 0x002d628eb210f577, 0x009f59a36dd05f50,
  1115. 0x006d339de4eca613, 0x00c75a71addc86bc, 0x000060384c5ea93c},
  1116. {0x00aa9641c32a30b4, 0x00cc73ae8cce565d, 0x00ec911a4df07f61, 0x00aa4b762ea4b264,
  1117. 0x0096d395bb393629, 0x004efacfb7632fe0, 0x00006f252f46fa3f},
  1118. {1, 0, 0, 0, 0, 0, 0}},
  1119. {{0x00567eec597c7af6, 0x0059ba6795204413, 0x00816d4e6f01196f, 0x004ae6b3eb57951d,
  1120. 0x00420f5abdda2108, 0x003401d1f57ca9d9, 0x0000cf5837b0b67a},
  1121. {0x00eaa64b8aeeabf9, 0x00246ddf16bcb4de, 0x000e7e3c3aecd751, 0x0008449f04fed72e,
  1122. 0x00307b67ccf09183, 0x0017108c3556b7b1, 0x0000229b2483b3bf},
  1123. {1, 0, 0, 0, 0, 0, 0}},
  1124. {{0x00e7c491a7bb78a1, 0x00eafddd1d3049ab, 0x00352c05e2bc7c98, 0x003d6880c165fa5c,
  1125. 0x00b6ac61cc11c97d, 0x00beeb54fcf90ce5, 0x0000dc1f0b455edc},
  1126. {0x002db2e7aee34d60, 0x0073b5f415a2d8c0, 0x00dd84e4193e9a0c, 0x00d02d873467c572,
  1127. 0x0018baaeda60aee5, 0x0013fb11d697c61e, 0x000083aafcc3a973},
  1128. {1, 0, 0, 0, 0, 0, 0}}
  1129. };
  1130. /*
  1131. * select_point selects the |idx|th point from a precomputation table and
  1132. * copies it to out.
  1133. *
  1134. * pre_comp below is of the size provided in |size|.
  1135. */
  1136. static void select_point(const limb idx, unsigned int size,
  1137. const felem pre_comp[][3], felem out[3])
  1138. {
  1139. unsigned int i, j;
  1140. limb *outlimbs = &out[0][0];
  1141. memset(out, 0, sizeof(*out) * 3);
  1142. for (i = 0; i < size; i++) {
  1143. const limb *inlimbs = &pre_comp[i][0][0];
  1144. limb mask = i ^ idx;
  1145. mask |= mask >> 4;
  1146. mask |= mask >> 2;
  1147. mask |= mask >> 1;
  1148. mask &= 1;
  1149. mask--;
  1150. for (j = 0; j < NLIMBS * 3; j++)
  1151. outlimbs[j] |= inlimbs[j] & mask;
  1152. }
  1153. }
  1154. /* get_bit returns the |i|th bit in |in| */
  1155. static char get_bit(const felem_bytearray in, int i)
  1156. {
  1157. if (i < 0 || i >= 384)
  1158. return 0;
  1159. return (in[i >> 3] >> (i & 7)) & 1;
  1160. }
  1161. /*
  1162. * Interleaved point multiplication using precomputed point multiples: The
  1163. * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
  1164. * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
  1165. * generator, using certain (large) precomputed multiples in g_pre_comp.
  1166. * Output point (X, Y, Z) is stored in x_out, y_out, z_out
  1167. */
  1168. static void batch_mul(felem x_out, felem y_out, felem z_out,
  1169. const felem_bytearray scalars[],
  1170. const unsigned int num_points, const u8 *g_scalar,
  1171. const int mixed, const felem pre_comp[][17][3],
  1172. const felem g_pre_comp[16][3])
  1173. {
  1174. int i, skip;
  1175. unsigned int num, gen_mul = (g_scalar != NULL);
  1176. felem nq[3], tmp[4];
  1177. limb bits;
  1178. u8 sign, digit;
  1179. /* set nq to the point at infinity */
  1180. memset(nq, 0, sizeof(nq));
  1181. /*
  1182. * Loop over all scalars msb-to-lsb, interleaving additions of multiples
  1183. * of the generator (last quarter of rounds) and additions of other
  1184. * points multiples (every 5th round).
  1185. */
  1186. skip = 1; /* save two point operations in the first
  1187. * round */
  1188. for (i = (num_points ? 380 : 98); i >= 0; --i) {
  1189. /* double */
  1190. if (!skip)
  1191. point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
  1192. /* add multiples of the generator */
  1193. if (gen_mul && (i <= 98)) {
  1194. bits = get_bit(g_scalar, i + 285) << 3;
  1195. if (i < 95) {
  1196. bits |= get_bit(g_scalar, i + 190) << 2;
  1197. bits |= get_bit(g_scalar, i + 95) << 1;
  1198. bits |= get_bit(g_scalar, i);
  1199. }
  1200. /* select the point to add, in constant time */
  1201. select_point(bits, 16, g_pre_comp, tmp);
  1202. if (!skip) {
  1203. /* The 1 argument below is for "mixed" */
  1204. point_add(nq[0], nq[1], nq[2],
  1205. nq[0], nq[1], nq[2], 1,
  1206. tmp[0], tmp[1], tmp[2]);
  1207. } else {
  1208. memcpy(nq, tmp, 3 * sizeof(felem));
  1209. skip = 0;
  1210. }
  1211. }
  1212. /* do other additions every 5 doublings */
  1213. if (num_points && (i % 5 == 0)) {
  1214. /* loop over all scalars */
  1215. for (num = 0; num < num_points; ++num) {
  1216. bits = get_bit(scalars[num], i + 4) << 5;
  1217. bits |= get_bit(scalars[num], i + 3) << 4;
  1218. bits |= get_bit(scalars[num], i + 2) << 3;
  1219. bits |= get_bit(scalars[num], i + 1) << 2;
  1220. bits |= get_bit(scalars[num], i) << 1;
  1221. bits |= get_bit(scalars[num], i - 1);
  1222. ossl_ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
  1223. /*
  1224. * select the point to add or subtract, in constant time
  1225. */
  1226. select_point(digit, 17, pre_comp[num], tmp);
  1227. felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
  1228. * point */
  1229. copy_conditional(tmp[1], tmp[3], (-(limb) sign));
  1230. if (!skip) {
  1231. point_add(nq[0], nq[1], nq[2],
  1232. nq[0], nq[1], nq[2], mixed,
  1233. tmp[0], tmp[1], tmp[2]);
  1234. } else {
  1235. memcpy(nq, tmp, 3 * sizeof(felem));
  1236. skip = 0;
  1237. }
  1238. }
  1239. }
  1240. }
  1241. felem_assign(x_out, nq[0]);
  1242. felem_assign(y_out, nq[1]);
  1243. felem_assign(z_out, nq[2]);
  1244. }
  1245. /* Precomputation for the group generator. */
  1246. struct nistp384_pre_comp_st {
  1247. felem g_pre_comp[16][3];
  1248. CRYPTO_REF_COUNT references;
  1249. };
  1250. const EC_METHOD *ossl_ec_GFp_nistp384_method(void)
  1251. {
  1252. static const EC_METHOD ret = {
  1253. EC_FLAGS_DEFAULT_OCT,
  1254. NID_X9_62_prime_field,
  1255. ossl_ec_GFp_nistp384_group_init,
  1256. ossl_ec_GFp_simple_group_finish,
  1257. ossl_ec_GFp_simple_group_clear_finish,
  1258. ossl_ec_GFp_nist_group_copy,
  1259. ossl_ec_GFp_nistp384_group_set_curve,
  1260. ossl_ec_GFp_simple_group_get_curve,
  1261. ossl_ec_GFp_simple_group_get_degree,
  1262. ossl_ec_group_simple_order_bits,
  1263. ossl_ec_GFp_simple_group_check_discriminant,
  1264. ossl_ec_GFp_simple_point_init,
  1265. ossl_ec_GFp_simple_point_finish,
  1266. ossl_ec_GFp_simple_point_clear_finish,
  1267. ossl_ec_GFp_simple_point_copy,
  1268. ossl_ec_GFp_simple_point_set_to_infinity,
  1269. ossl_ec_GFp_simple_point_set_affine_coordinates,
  1270. ossl_ec_GFp_nistp384_point_get_affine_coordinates,
  1271. 0, /* point_set_compressed_coordinates */
  1272. 0, /* point2oct */
  1273. 0, /* oct2point */
  1274. ossl_ec_GFp_simple_add,
  1275. ossl_ec_GFp_simple_dbl,
  1276. ossl_ec_GFp_simple_invert,
  1277. ossl_ec_GFp_simple_is_at_infinity,
  1278. ossl_ec_GFp_simple_is_on_curve,
  1279. ossl_ec_GFp_simple_cmp,
  1280. ossl_ec_GFp_simple_make_affine,
  1281. ossl_ec_GFp_simple_points_make_affine,
  1282. ossl_ec_GFp_nistp384_points_mul,
  1283. ossl_ec_GFp_nistp384_precompute_mult,
  1284. ossl_ec_GFp_nistp384_have_precompute_mult,
  1285. ossl_ec_GFp_nist_field_mul,
  1286. ossl_ec_GFp_nist_field_sqr,
  1287. 0, /* field_div */
  1288. ossl_ec_GFp_simple_field_inv,
  1289. 0, /* field_encode */
  1290. 0, /* field_decode */
  1291. 0, /* field_set_to_one */
  1292. ossl_ec_key_simple_priv2oct,
  1293. ossl_ec_key_simple_oct2priv,
  1294. 0, /* set private */
  1295. ossl_ec_key_simple_generate_key,
  1296. ossl_ec_key_simple_check_key,
  1297. ossl_ec_key_simple_generate_public_key,
  1298. 0, /* keycopy */
  1299. 0, /* keyfinish */
  1300. ossl_ecdh_simple_compute_key,
  1301. ossl_ecdsa_simple_sign_setup,
  1302. ossl_ecdsa_simple_sign_sig,
  1303. ossl_ecdsa_simple_verify_sig,
  1304. 0, /* field_inverse_mod_ord */
  1305. 0, /* blind_coordinates */
  1306. 0, /* ladder_pre */
  1307. 0, /* ladder_step */
  1308. 0 /* ladder_post */
  1309. };
  1310. return &ret;
  1311. }
  1312. /******************************************************************************/
  1313. /*
  1314. * FUNCTIONS TO MANAGE PRECOMPUTATION
  1315. */
  1316. static NISTP384_PRE_COMP *nistp384_pre_comp_new(void)
  1317. {
  1318. NISTP384_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
  1319. if (ret == NULL)
  1320. return ret;
  1321. if (!CRYPTO_NEW_REF(&ret->references, 1)) {
  1322. OPENSSL_free(ret);
  1323. return NULL;
  1324. }
  1325. return ret;
  1326. }
  1327. NISTP384_PRE_COMP *ossl_ec_nistp384_pre_comp_dup(NISTP384_PRE_COMP *p)
  1328. {
  1329. int i;
  1330. if (p != NULL)
  1331. CRYPTO_UP_REF(&p->references, &i);
  1332. return p;
  1333. }
  1334. void ossl_ec_nistp384_pre_comp_free(NISTP384_PRE_COMP *p)
  1335. {
  1336. int i;
  1337. if (p == NULL)
  1338. return;
  1339. CRYPTO_DOWN_REF(&p->references, &i);
  1340. REF_PRINT_COUNT("ossl_ec_nistp384", p);
  1341. if (i > 0)
  1342. return;
  1343. REF_ASSERT_ISNT(i < 0);
  1344. CRYPTO_FREE_REF(&p->references);
  1345. OPENSSL_free(p);
  1346. }
  1347. /******************************************************************************/
  1348. /*
  1349. * OPENSSL EC_METHOD FUNCTIONS
  1350. */
  1351. int ossl_ec_GFp_nistp384_group_init(EC_GROUP *group)
  1352. {
  1353. int ret;
  1354. ret = ossl_ec_GFp_simple_group_init(group);
  1355. group->a_is_minus3 = 1;
  1356. return ret;
  1357. }
  1358. int ossl_ec_GFp_nistp384_group_set_curve(EC_GROUP *group, const BIGNUM *p,
  1359. const BIGNUM *a, const BIGNUM *b,
  1360. BN_CTX *ctx)
  1361. {
  1362. int ret = 0;
  1363. BIGNUM *curve_p, *curve_a, *curve_b;
  1364. #ifndef FIPS_MODULE
  1365. BN_CTX *new_ctx = NULL;
  1366. if (ctx == NULL)
  1367. ctx = new_ctx = BN_CTX_new();
  1368. #endif
  1369. if (ctx == NULL)
  1370. return 0;
  1371. BN_CTX_start(ctx);
  1372. curve_p = BN_CTX_get(ctx);
  1373. curve_a = BN_CTX_get(ctx);
  1374. curve_b = BN_CTX_get(ctx);
  1375. if (curve_b == NULL)
  1376. goto err;
  1377. BN_bin2bn(nistp384_curve_params[0], sizeof(felem_bytearray), curve_p);
  1378. BN_bin2bn(nistp384_curve_params[1], sizeof(felem_bytearray), curve_a);
  1379. BN_bin2bn(nistp384_curve_params[2], sizeof(felem_bytearray), curve_b);
  1380. if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
  1381. ERR_raise(ERR_LIB_EC, EC_R_WRONG_CURVE_PARAMETERS);
  1382. goto err;
  1383. }
  1384. group->field_mod_func = BN_nist_mod_384;
  1385. ret = ossl_ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
  1386. err:
  1387. BN_CTX_end(ctx);
  1388. #ifndef FIPS_MODULE
  1389. BN_CTX_free(new_ctx);
  1390. #endif
  1391. return ret;
  1392. }
  1393. /*
  1394. * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
  1395. * (X/Z^2, Y/Z^3)
  1396. */
  1397. int ossl_ec_GFp_nistp384_point_get_affine_coordinates(const EC_GROUP *group,
  1398. const EC_POINT *point,
  1399. BIGNUM *x, BIGNUM *y,
  1400. BN_CTX *ctx)
  1401. {
  1402. felem z1, z2, x_in, y_in, x_out, y_out;
  1403. widefelem tmp;
  1404. if (EC_POINT_is_at_infinity(group, point)) {
  1405. ERR_raise(ERR_LIB_EC, EC_R_POINT_AT_INFINITY);
  1406. return 0;
  1407. }
  1408. if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
  1409. (!BN_to_felem(z1, point->Z)))
  1410. return 0;
  1411. felem_inv(z2, z1);
  1412. felem_square(tmp, z2);
  1413. felem_reduce(z1, tmp);
  1414. felem_mul(tmp, x_in, z1);
  1415. felem_reduce(x_in, tmp);
  1416. felem_contract(x_out, x_in);
  1417. if (x != NULL) {
  1418. if (!felem_to_BN(x, x_out)) {
  1419. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1420. return 0;
  1421. }
  1422. }
  1423. felem_mul(tmp, z1, z2);
  1424. felem_reduce(z1, tmp);
  1425. felem_mul(tmp, y_in, z1);
  1426. felem_reduce(y_in, tmp);
  1427. felem_contract(y_out, y_in);
  1428. if (y != NULL) {
  1429. if (!felem_to_BN(y, y_out)) {
  1430. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1431. return 0;
  1432. }
  1433. }
  1434. return 1;
  1435. }
  1436. /* points below is of size |num|, and tmp_felems is of size |num+1/ */
  1437. static void make_points_affine(size_t num, felem points[][3],
  1438. felem tmp_felems[])
  1439. {
  1440. /*
  1441. * Runs in constant time, unless an input is the point at infinity (which
  1442. * normally shouldn't happen).
  1443. */
  1444. ossl_ec_GFp_nistp_points_make_affine_internal(num,
  1445. points,
  1446. sizeof(felem),
  1447. tmp_felems,
  1448. (void (*)(void *))felem_one,
  1449. felem_is_zero_int,
  1450. (void (*)(void *, const void *))
  1451. felem_assign,
  1452. (void (*)(void *, const void *))
  1453. felem_square_reduce,
  1454. (void (*)(void *, const void *, const void*))
  1455. felem_mul_reduce,
  1456. (void (*)(void *, const void *))
  1457. felem_inv,
  1458. (void (*)(void *, const void *))
  1459. felem_contract);
  1460. }
  1461. /*
  1462. * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
  1463. * values Result is stored in r (r can equal one of the inputs).
  1464. */
  1465. int ossl_ec_GFp_nistp384_points_mul(const EC_GROUP *group, EC_POINT *r,
  1466. const BIGNUM *scalar, size_t num,
  1467. const EC_POINT *points[],
  1468. const BIGNUM *scalars[], BN_CTX *ctx)
  1469. {
  1470. int ret = 0;
  1471. int j;
  1472. int mixed = 0;
  1473. BIGNUM *x, *y, *z, *tmp_scalar;
  1474. felem_bytearray g_secret;
  1475. felem_bytearray *secrets = NULL;
  1476. felem (*pre_comp)[17][3] = NULL;
  1477. felem *tmp_felems = NULL;
  1478. unsigned int i;
  1479. int num_bytes;
  1480. int have_pre_comp = 0;
  1481. size_t num_points = num;
  1482. felem x_in, y_in, z_in, x_out, y_out, z_out;
  1483. NISTP384_PRE_COMP *pre = NULL;
  1484. felem(*g_pre_comp)[3] = NULL;
  1485. EC_POINT *generator = NULL;
  1486. const EC_POINT *p = NULL;
  1487. const BIGNUM *p_scalar = NULL;
  1488. BN_CTX_start(ctx);
  1489. x = BN_CTX_get(ctx);
  1490. y = BN_CTX_get(ctx);
  1491. z = BN_CTX_get(ctx);
  1492. tmp_scalar = BN_CTX_get(ctx);
  1493. if (tmp_scalar == NULL)
  1494. goto err;
  1495. if (scalar != NULL) {
  1496. pre = group->pre_comp.nistp384;
  1497. if (pre)
  1498. /* we have precomputation, try to use it */
  1499. g_pre_comp = &pre->g_pre_comp[0];
  1500. else
  1501. /* try to use the standard precomputation */
  1502. g_pre_comp = (felem(*)[3]) gmul;
  1503. generator = EC_POINT_new(group);
  1504. if (generator == NULL)
  1505. goto err;
  1506. /* get the generator from precomputation */
  1507. if (!felem_to_BN(x, g_pre_comp[1][0]) ||
  1508. !felem_to_BN(y, g_pre_comp[1][1]) ||
  1509. !felem_to_BN(z, g_pre_comp[1][2])) {
  1510. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1511. goto err;
  1512. }
  1513. if (!ossl_ec_GFp_simple_set_Jprojective_coordinates_GFp(group,
  1514. generator,
  1515. x, y, z, ctx))
  1516. goto err;
  1517. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
  1518. /* precomputation matches generator */
  1519. have_pre_comp = 1;
  1520. else
  1521. /*
  1522. * we don't have valid precomputation: treat the generator as a
  1523. * random point
  1524. */
  1525. num_points++;
  1526. }
  1527. if (num_points > 0) {
  1528. if (num_points >= 2) {
  1529. /*
  1530. * unless we precompute multiples for just one point, converting
  1531. * those into affine form is time well spent
  1532. */
  1533. mixed = 1;
  1534. }
  1535. secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
  1536. pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
  1537. if (mixed)
  1538. tmp_felems =
  1539. OPENSSL_malloc(sizeof(*tmp_felems) * (num_points * 17 + 1));
  1540. if ((secrets == NULL) || (pre_comp == NULL)
  1541. || (mixed && (tmp_felems == NULL)))
  1542. goto err;
  1543. /*
  1544. * we treat NULL scalars as 0, and NULL points as points at infinity,
  1545. * i.e., they contribute nothing to the linear combination
  1546. */
  1547. for (i = 0; i < num_points; ++i) {
  1548. if (i == num) {
  1549. /*
  1550. * we didn't have a valid precomputation, so we pick the
  1551. * generator
  1552. */
  1553. p = EC_GROUP_get0_generator(group);
  1554. p_scalar = scalar;
  1555. } else {
  1556. /* the i^th point */
  1557. p = points[i];
  1558. p_scalar = scalars[i];
  1559. }
  1560. if (p_scalar != NULL && p != NULL) {
  1561. /* reduce scalar to 0 <= scalar < 2^384 */
  1562. if ((BN_num_bits(p_scalar) > 384)
  1563. || (BN_is_negative(p_scalar))) {
  1564. /*
  1565. * this is an unusual input, and we don't guarantee
  1566. * constant-timeness
  1567. */
  1568. if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
  1569. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1570. goto err;
  1571. }
  1572. num_bytes = BN_bn2lebinpad(tmp_scalar,
  1573. secrets[i], sizeof(secrets[i]));
  1574. } else {
  1575. num_bytes = BN_bn2lebinpad(p_scalar,
  1576. secrets[i], sizeof(secrets[i]));
  1577. }
  1578. if (num_bytes < 0) {
  1579. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1580. goto err;
  1581. }
  1582. /* precompute multiples */
  1583. if ((!BN_to_felem(x_out, p->X)) ||
  1584. (!BN_to_felem(y_out, p->Y)) ||
  1585. (!BN_to_felem(z_out, p->Z)))
  1586. goto err;
  1587. memcpy(pre_comp[i][1][0], x_out, sizeof(felem));
  1588. memcpy(pre_comp[i][1][1], y_out, sizeof(felem));
  1589. memcpy(pre_comp[i][1][2], z_out, sizeof(felem));
  1590. for (j = 2; j <= 16; ++j) {
  1591. if (j & 1) {
  1592. point_add(pre_comp[i][j][0], pre_comp[i][j][1], pre_comp[i][j][2],
  1593. pre_comp[i][1][0], pre_comp[i][1][1], pre_comp[i][1][2], 0,
  1594. pre_comp[i][j - 1][0], pre_comp[i][j - 1][1], pre_comp[i][j - 1][2]);
  1595. } else {
  1596. point_double(pre_comp[i][j][0], pre_comp[i][j][1], pre_comp[i][j][2],
  1597. pre_comp[i][j / 2][0], pre_comp[i][j / 2][1], pre_comp[i][j / 2][2]);
  1598. }
  1599. }
  1600. }
  1601. }
  1602. if (mixed)
  1603. make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
  1604. }
  1605. /* the scalar for the generator */
  1606. if (scalar != NULL && have_pre_comp) {
  1607. memset(g_secret, 0, sizeof(g_secret));
  1608. /* reduce scalar to 0 <= scalar < 2^384 */
  1609. if ((BN_num_bits(scalar) > 384) || (BN_is_negative(scalar))) {
  1610. /*
  1611. * this is an unusual input, and we don't guarantee
  1612. * constant-timeness
  1613. */
  1614. if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
  1615. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1616. goto err;
  1617. }
  1618. num_bytes = BN_bn2lebinpad(tmp_scalar, g_secret, sizeof(g_secret));
  1619. } else {
  1620. num_bytes = BN_bn2lebinpad(scalar, g_secret, sizeof(g_secret));
  1621. }
  1622. /* do the multiplication with generator precomputation */
  1623. batch_mul(x_out, y_out, z_out,
  1624. (const felem_bytearray(*))secrets, num_points,
  1625. g_secret,
  1626. mixed, (const felem(*)[17][3])pre_comp,
  1627. (const felem(*)[3])g_pre_comp);
  1628. } else {
  1629. /* do the multiplication without generator precomputation */
  1630. batch_mul(x_out, y_out, z_out,
  1631. (const felem_bytearray(*))secrets, num_points,
  1632. NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
  1633. }
  1634. /* reduce the output to its unique minimal representation */
  1635. felem_contract(x_in, x_out);
  1636. felem_contract(y_in, y_out);
  1637. felem_contract(z_in, z_out);
  1638. if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
  1639. (!felem_to_BN(z, z_in))) {
  1640. ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
  1641. goto err;
  1642. }
  1643. ret = ossl_ec_GFp_simple_set_Jprojective_coordinates_GFp(group, r, x, y, z,
  1644. ctx);
  1645. err:
  1646. BN_CTX_end(ctx);
  1647. EC_POINT_free(generator);
  1648. OPENSSL_free(secrets);
  1649. OPENSSL_free(pre_comp);
  1650. OPENSSL_free(tmp_felems);
  1651. return ret;
  1652. }
  1653. int ossl_ec_GFp_nistp384_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
  1654. {
  1655. int ret = 0;
  1656. NISTP384_PRE_COMP *pre = NULL;
  1657. int i, j;
  1658. BIGNUM *x, *y;
  1659. EC_POINT *generator = NULL;
  1660. felem tmp_felems[16];
  1661. #ifndef FIPS_MODULE
  1662. BN_CTX *new_ctx = NULL;
  1663. #endif
  1664. /* throw away old precomputation */
  1665. EC_pre_comp_free(group);
  1666. #ifndef FIPS_MODULE
  1667. if (ctx == NULL)
  1668. ctx = new_ctx = BN_CTX_new();
  1669. #endif
  1670. if (ctx == NULL)
  1671. return 0;
  1672. BN_CTX_start(ctx);
  1673. x = BN_CTX_get(ctx);
  1674. y = BN_CTX_get(ctx);
  1675. if (y == NULL)
  1676. goto err;
  1677. /* get the generator */
  1678. if (group->generator == NULL)
  1679. goto err;
  1680. generator = EC_POINT_new(group);
  1681. if (generator == NULL)
  1682. goto err;
  1683. BN_bin2bn(nistp384_curve_params[3], sizeof(felem_bytearray), x);
  1684. BN_bin2bn(nistp384_curve_params[4], sizeof(felem_bytearray), y);
  1685. if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
  1686. goto err;
  1687. if ((pre = nistp384_pre_comp_new()) == NULL)
  1688. goto err;
  1689. /*
  1690. * if the generator is the standard one, use built-in precomputation
  1691. */
  1692. if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
  1693. memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
  1694. goto done;
  1695. }
  1696. if ((!BN_to_felem(pre->g_pre_comp[1][0], group->generator->X)) ||
  1697. (!BN_to_felem(pre->g_pre_comp[1][1], group->generator->Y)) ||
  1698. (!BN_to_felem(pre->g_pre_comp[1][2], group->generator->Z)))
  1699. goto err;
  1700. /* compute 2^95*G, 2^190*G, 2^285*G */
  1701. for (i = 1; i <= 4; i <<= 1) {
  1702. point_double(pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2],
  1703. pre->g_pre_comp[i][0], pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
  1704. for (j = 0; j < 94; ++j) {
  1705. point_double(pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2],
  1706. pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2]);
  1707. }
  1708. }
  1709. /* g_pre_comp[0] is the point at infinity */
  1710. memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
  1711. /* the remaining multiples */
  1712. /* 2^95*G + 2^190*G */
  1713. point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1], pre->g_pre_comp[6][2],
  1714. pre->g_pre_comp[4][0], pre->g_pre_comp[4][1], pre->g_pre_comp[4][2], 0,
  1715. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1], pre->g_pre_comp[2][2]);
  1716. /* 2^95*G + 2^285*G */
  1717. point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1], pre->g_pre_comp[10][2],
  1718. pre->g_pre_comp[8][0], pre->g_pre_comp[8][1], pre->g_pre_comp[8][2], 0,
  1719. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1], pre->g_pre_comp[2][2]);
  1720. /* 2^190*G + 2^285*G */
  1721. point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
  1722. pre->g_pre_comp[8][0], pre->g_pre_comp[8][1], pre->g_pre_comp[8][2], 0,
  1723. pre->g_pre_comp[4][0], pre->g_pre_comp[4][1], pre->g_pre_comp[4][2]);
  1724. /* 2^95*G + 2^190*G + 2^285*G */
  1725. point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1], pre->g_pre_comp[14][2],
  1726. pre->g_pre_comp[12][0], pre->g_pre_comp[12][1], pre->g_pre_comp[12][2], 0,
  1727. pre->g_pre_comp[2][0], pre->g_pre_comp[2][1], pre->g_pre_comp[2][2]);
  1728. for (i = 1; i < 8; ++i) {
  1729. /* odd multiples: add G */
  1730. point_add(pre->g_pre_comp[2 * i + 1][0], pre->g_pre_comp[2 * i + 1][1], pre->g_pre_comp[2 * i + 1][2],
  1731. pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2], 0,
  1732. pre->g_pre_comp[1][0], pre->g_pre_comp[1][1], pre->g_pre_comp[1][2]);
  1733. }
  1734. make_points_affine(15, &(pre->g_pre_comp[1]), tmp_felems);
  1735. done:
  1736. SETPRECOMP(group, nistp384, pre);
  1737. ret = 1;
  1738. pre = NULL;
  1739. err:
  1740. BN_CTX_end(ctx);
  1741. EC_POINT_free(generator);
  1742. #ifndef FIPS_MODULE
  1743. BN_CTX_free(new_ctx);
  1744. #endif
  1745. ossl_ec_nistp384_pre_comp_free(pre);
  1746. return ret;
  1747. }
  1748. int ossl_ec_GFp_nistp384_have_precompute_mult(const EC_GROUP *group)
  1749. {
  1750. return HAVEPRECOMP(group, nistp384);
  1751. }