2
0

mini-gmp.c 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130
  1. /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
  2. Contributed to the GNU project by Niels Möller
  3. Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1999, 2000, 2001,
  4. 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
  5. Free Software Foundation, Inc.
  6. This file is part of the GNU MP Library.
  7. The GNU MP Library is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU Lesser General Public License as published by
  9. the Free Software Foundation; either version 3 of the License, or (at your
  10. option) any later version.
  11. The GNU MP Library is distributed in the hope that it will be useful, but
  12. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  13. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
  14. License for more details.
  15. You should have received a copy of the GNU Lesser General Public License
  16. along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
  17. /* NOTE: All functions in this file which are not declared in
  18. mini-gmp.h are internal, and are not intended to be compatible
  19. neither with GMP nor with future versions of mini-gmp. */
  20. /* Much of the material copied from GMP files, including: gmp-impl.h,
  21. longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
  22. mpn/generic/lshift.c, mpn/generic/mul_1.c,
  23. mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
  24. mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
  25. mpn/generic/submul_1.c. */
  26. #include <assert.h>
  27. #include <ctype.h>
  28. #include <limits.h>
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include "mini-gmp.h"
  33. /* Macros */
  34. #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
  35. #define GMP_LIMB_MAX (~ (mp_limb_t) 0)
  36. #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
  37. #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
  38. #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
  39. #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
  40. #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
  41. #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
  42. #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
  43. #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
  44. #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
  45. #define gmp_assert_nocarry(x) do { \
  46. mp_limb_t __cy = x; \
  47. assert (__cy == 0); \
  48. } while (0)
  49. #define gmp_clz(count, x) do { \
  50. mp_limb_t __clz_x = (x); \
  51. unsigned __clz_c; \
  52. for (__clz_c = 0; \
  53. (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
  54. __clz_c += 8) \
  55. __clz_x <<= 8; \
  56. for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
  57. __clz_x <<= 1; \
  58. (count) = __clz_c; \
  59. } while (0)
  60. #define gmp_ctz(count, x) do { \
  61. mp_limb_t __ctz_x = (x); \
  62. unsigned __ctz_c = 0; \
  63. gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
  64. (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
  65. } while (0)
  66. #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
  67. do { \
  68. mp_limb_t __x; \
  69. __x = (al) + (bl); \
  70. (sh) = (ah) + (bh) + (__x < (al)); \
  71. (sl) = __x; \
  72. } while (0)
  73. #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
  74. do { \
  75. mp_limb_t __x; \
  76. __x = (al) - (bl); \
  77. (sh) = (ah) - (bh) - ((al) < (bl)); \
  78. (sl) = __x; \
  79. } while (0)
  80. #define gmp_umul_ppmm(w1, w0, u, v) \
  81. do { \
  82. mp_limb_t __x0, __x1, __x2, __x3; \
  83. unsigned __ul, __vl, __uh, __vh; \
  84. mp_limb_t __u = (u), __v = (v); \
  85. \
  86. __ul = __u & GMP_LLIMB_MASK; \
  87. __uh = __u >> (GMP_LIMB_BITS / 2); \
  88. __vl = __v & GMP_LLIMB_MASK; \
  89. __vh = __v >> (GMP_LIMB_BITS / 2); \
  90. \
  91. __x0 = (mp_limb_t) __ul * __vl; \
  92. __x1 = (mp_limb_t) __ul * __vh; \
  93. __x2 = (mp_limb_t) __uh * __vl; \
  94. __x3 = (mp_limb_t) __uh * __vh; \
  95. \
  96. __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
  97. __x1 += __x2; /* but this indeed can */ \
  98. if (__x1 < __x2) /* did we get it? */ \
  99. __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
  100. \
  101. (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
  102. (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
  103. } while (0)
  104. #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
  105. do { \
  106. mp_limb_t _qh, _ql, _r, _mask; \
  107. gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
  108. gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
  109. _r = (nl) - _qh * (d); \
  110. _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
  111. _qh += _mask; \
  112. _r += _mask & (d); \
  113. if (_r >= (d)) \
  114. { \
  115. _r -= (d); \
  116. _qh++; \
  117. } \
  118. \
  119. (r) = _r; \
  120. (q) = _qh; \
  121. } while (0)
  122. #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
  123. do { \
  124. mp_limb_t _q0, _t1, _t0, _mask; \
  125. gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
  126. gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
  127. \
  128. /* Compute the two most significant limbs of n - q'd */ \
  129. (r1) = (n1) - (d1) * (q); \
  130. gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
  131. gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
  132. gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
  133. (q)++; \
  134. \
  135. /* Conditionally adjust q and the remainders */ \
  136. _mask = - (mp_limb_t) ((r1) >= _q0); \
  137. (q) += _mask; \
  138. gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
  139. if ((r1) >= (d1)) \
  140. { \
  141. if ((r1) > (d1) || (r0) >= (d0)) \
  142. { \
  143. (q)++; \
  144. gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
  145. } \
  146. } \
  147. } while (0)
  148. /* Swap macros. */
  149. #define MP_LIMB_T_SWAP(x, y) \
  150. do { \
  151. mp_limb_t __mp_limb_t_swap__tmp = (x); \
  152. (x) = (y); \
  153. (y) = __mp_limb_t_swap__tmp; \
  154. } while (0)
  155. #define MP_SIZE_T_SWAP(x, y) \
  156. do { \
  157. mp_size_t __mp_size_t_swap__tmp = (x); \
  158. (x) = (y); \
  159. (y) = __mp_size_t_swap__tmp; \
  160. } while (0)
  161. #define MP_BITCNT_T_SWAP(x,y) \
  162. do { \
  163. mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
  164. (x) = (y); \
  165. (y) = __mp_bitcnt_t_swap__tmp; \
  166. } while (0)
  167. #define MP_PTR_SWAP(x, y) \
  168. do { \
  169. mp_ptr __mp_ptr_swap__tmp = (x); \
  170. (x) = (y); \
  171. (y) = __mp_ptr_swap__tmp; \
  172. } while (0)
  173. #define MP_SRCPTR_SWAP(x, y) \
  174. do { \
  175. mp_srcptr __mp_srcptr_swap__tmp = (x); \
  176. (x) = (y); \
  177. (y) = __mp_srcptr_swap__tmp; \
  178. } while (0)
  179. #define MPN_PTR_SWAP(xp,xs, yp,ys) \
  180. do { \
  181. MP_PTR_SWAP (xp, yp); \
  182. MP_SIZE_T_SWAP (xs, ys); \
  183. } while(0)
  184. #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
  185. do { \
  186. MP_SRCPTR_SWAP (xp, yp); \
  187. MP_SIZE_T_SWAP (xs, ys); \
  188. } while(0)
  189. #define MPZ_PTR_SWAP(x, y) \
  190. do { \
  191. mpz_ptr __mpz_ptr_swap__tmp = (x); \
  192. (x) = (y); \
  193. (y) = __mpz_ptr_swap__tmp; \
  194. } while (0)
  195. #define MPZ_SRCPTR_SWAP(x, y) \
  196. do { \
  197. mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
  198. (x) = (y); \
  199. (y) = __mpz_srcptr_swap__tmp; \
  200. } while (0)
  201. /* Memory allocation and other helper functions. */
  202. static void
  203. gmp_die (const char *msg)
  204. {
  205. fprintf (stderr, "%s\n", msg);
  206. abort();
  207. }
  208. static void *
  209. gmp_default_alloc (size_t size)
  210. {
  211. void *p;
  212. assert (size > 0);
  213. p = malloc (size);
  214. if (!p)
  215. gmp_die("gmp_default_alloc: Virtual memory exhausted.");
  216. return p;
  217. }
  218. static void *
  219. gmp_default_realloc (void *old, size_t old_size, size_t new_size)
  220. {
  221. mp_ptr p;
  222. p = realloc (old, new_size);
  223. if (!p)
  224. gmp_die("gmp_default_realoc: Virtual memory exhausted.");
  225. return p;
  226. }
  227. static void
  228. gmp_default_free (void *p, size_t size)
  229. {
  230. free (p);
  231. }
  232. static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
  233. static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
  234. static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
  235. void
  236. mp_get_memory_functions (void *(**alloc_func) (size_t),
  237. void *(**realloc_func) (void *, size_t, size_t),
  238. void (**free_func) (void *, size_t))
  239. {
  240. if (alloc_func)
  241. *alloc_func = gmp_allocate_func;
  242. if (realloc_func)
  243. *realloc_func = gmp_reallocate_func;
  244. if (free_func)
  245. *free_func = gmp_free_func;
  246. }
  247. void
  248. mp_set_memory_functions (void *(*alloc_func) (size_t),
  249. void *(*realloc_func) (void *, size_t, size_t),
  250. void (*free_func) (void *, size_t))
  251. {
  252. if (!alloc_func)
  253. alloc_func = gmp_default_alloc;
  254. if (!realloc_func)
  255. realloc_func = gmp_default_realloc;
  256. if (!free_func)
  257. free_func = gmp_default_free;
  258. gmp_allocate_func = alloc_func;
  259. gmp_reallocate_func = realloc_func;
  260. gmp_free_func = free_func;
  261. }
  262. #define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
  263. #define gmp_free(p) ((*gmp_free_func) ((p), 0))
  264. static mp_ptr
  265. gmp_xalloc_limbs (mp_size_t size)
  266. {
  267. return gmp_xalloc (size * sizeof (mp_limb_t));
  268. }
  269. static mp_ptr
  270. gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
  271. {
  272. assert (size > 0);
  273. return (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
  274. }
  275. /* MPN interface */
  276. void
  277. mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
  278. {
  279. mp_size_t i;
  280. for (i = 0; i < n; i++)
  281. d[i] = s[i];
  282. }
  283. void
  284. mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
  285. {
  286. while (n-- > 0)
  287. d[n] = s[n];
  288. }
  289. int
  290. mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  291. {
  292. for (; n > 0; n--)
  293. {
  294. if (ap[n-1] < bp[n-1])
  295. return -1;
  296. else if (ap[n-1] > bp[n-1])
  297. return 1;
  298. }
  299. return 0;
  300. }
  301. static int
  302. mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
  303. {
  304. if (an > bn)
  305. return 1;
  306. else if (an < bn)
  307. return -1;
  308. else
  309. return mpn_cmp (ap, bp, an);
  310. }
  311. static mp_size_t
  312. mpn_normalized_size (mp_srcptr xp, mp_size_t n)
  313. {
  314. for (; n > 0 && xp[n-1] == 0; n--)
  315. ;
  316. return n;
  317. }
  318. #define mpn_zero_p(xp, n) (mpn_normalized_size ((xp), (n)) == 0)
  319. mp_limb_t
  320. mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
  321. {
  322. mp_size_t i;
  323. assert (n > 0);
  324. for (i = 0; i < n; i++)
  325. {
  326. mp_limb_t r = ap[i] + b;
  327. /* Carry out */
  328. b = (r < b);
  329. rp[i] = r;
  330. }
  331. return b;
  332. }
  333. mp_limb_t
  334. mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  335. {
  336. mp_size_t i;
  337. mp_limb_t cy;
  338. for (i = 0, cy = 0; i < n; i++)
  339. {
  340. mp_limb_t a, b, r;
  341. a = ap[i]; b = bp[i];
  342. r = a + cy;
  343. cy = (r < cy);
  344. r += b;
  345. cy += (r < b);
  346. rp[i] = r;
  347. }
  348. return cy;
  349. }
  350. mp_limb_t
  351. mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
  352. {
  353. mp_limb_t cy;
  354. assert (an >= bn);
  355. cy = mpn_add_n (rp, ap, bp, bn);
  356. if (an > bn)
  357. cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
  358. return cy;
  359. }
  360. mp_limb_t
  361. mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
  362. {
  363. mp_size_t i;
  364. assert (n > 0);
  365. for (i = 0; i < n; i++)
  366. {
  367. mp_limb_t a = ap[i];
  368. /* Carry out */
  369. mp_limb_t cy = a < b;;
  370. rp[i] = a - b;
  371. b = cy;
  372. }
  373. return b;
  374. }
  375. mp_limb_t
  376. mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  377. {
  378. mp_size_t i;
  379. mp_limb_t cy;
  380. for (i = 0, cy = 0; i < n; i++)
  381. {
  382. mp_limb_t a, b;
  383. a = ap[i]; b = bp[i];
  384. b += cy;
  385. cy = (b < cy);
  386. cy += (a < b);
  387. rp[i] = a - b;
  388. }
  389. return cy;
  390. }
  391. mp_limb_t
  392. mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
  393. {
  394. mp_limb_t cy;
  395. assert (an >= bn);
  396. cy = mpn_sub_n (rp, ap, bp, bn);
  397. if (an > bn)
  398. cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
  399. return cy;
  400. }
  401. mp_limb_t
  402. mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
  403. {
  404. mp_limb_t ul, cl, hpl, lpl;
  405. assert (n >= 1);
  406. cl = 0;
  407. do
  408. {
  409. ul = *up++;
  410. gmp_umul_ppmm (hpl, lpl, ul, vl);
  411. lpl += cl;
  412. cl = (lpl < cl) + hpl;
  413. *rp++ = lpl;
  414. }
  415. while (--n != 0);
  416. return cl;
  417. }
  418. mp_limb_t
  419. mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
  420. {
  421. mp_limb_t ul, cl, hpl, lpl, rl;
  422. assert (n >= 1);
  423. cl = 0;
  424. do
  425. {
  426. ul = *up++;
  427. gmp_umul_ppmm (hpl, lpl, ul, vl);
  428. lpl += cl;
  429. cl = (lpl < cl) + hpl;
  430. rl = *rp;
  431. lpl = rl + lpl;
  432. cl += lpl < rl;
  433. *rp++ = lpl;
  434. }
  435. while (--n != 0);
  436. return cl;
  437. }
  438. mp_limb_t
  439. mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
  440. {
  441. mp_limb_t ul, cl, hpl, lpl, rl;
  442. assert (n >= 1);
  443. cl = 0;
  444. do
  445. {
  446. ul = *up++;
  447. gmp_umul_ppmm (hpl, lpl, ul, vl);
  448. lpl += cl;
  449. cl = (lpl < cl) + hpl;
  450. rl = *rp;
  451. lpl = rl - lpl;
  452. cl += lpl > rl;
  453. *rp++ = lpl;
  454. }
  455. while (--n != 0);
  456. return cl;
  457. }
  458. mp_limb_t
  459. mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
  460. {
  461. assert (un >= vn);
  462. assert (vn >= 1);
  463. /* We first multiply by the low order limb. This result can be
  464. stored, not added, to rp. We also avoid a loop for zeroing this
  465. way. */
  466. rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
  467. rp += 1, vp += 1, vn -= 1;
  468. /* Now accumulate the product of up[] and the next higher limb from
  469. vp[]. */
  470. while (vn >= 1)
  471. {
  472. rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
  473. rp += 1, vp += 1, vn -= 1;
  474. }
  475. return rp[un - 1];
  476. }
  477. void
  478. mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
  479. {
  480. mpn_mul (rp, ap, n, bp, n);
  481. }
  482. void
  483. mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
  484. {
  485. mpn_mul (rp, ap, n, ap, n);
  486. }
  487. mp_limb_t
  488. mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
  489. {
  490. mp_limb_t high_limb, low_limb;
  491. unsigned int tnc;
  492. mp_size_t i;
  493. mp_limb_t retval;
  494. assert (n >= 1);
  495. assert (cnt >= 1);
  496. assert (cnt < GMP_LIMB_BITS);
  497. up += n;
  498. rp += n;
  499. tnc = GMP_LIMB_BITS - cnt;
  500. low_limb = *--up;
  501. retval = low_limb >> tnc;
  502. high_limb = (low_limb << cnt);
  503. for (i = n - 1; i != 0; i--)
  504. {
  505. low_limb = *--up;
  506. *--rp = high_limb | (low_limb >> tnc);
  507. high_limb = (low_limb << cnt);
  508. }
  509. *--rp = high_limb;
  510. return retval;
  511. }
  512. mp_limb_t
  513. mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
  514. {
  515. mp_limb_t high_limb, low_limb;
  516. unsigned int tnc;
  517. mp_size_t i;
  518. mp_limb_t retval;
  519. assert (n >= 1);
  520. assert (cnt >= 1);
  521. assert (cnt < GMP_LIMB_BITS);
  522. tnc = GMP_LIMB_BITS - cnt;
  523. high_limb = *up++;
  524. retval = (high_limb << tnc);
  525. low_limb = high_limb >> cnt;
  526. for (i = n - 1; i != 0; i--)
  527. {
  528. high_limb = *up++;
  529. *rp++ = low_limb | (high_limb << tnc);
  530. low_limb = high_limb >> cnt;
  531. }
  532. *rp = low_limb;
  533. return retval;
  534. }
  535. /* MPN division interface. */
  536. mp_limb_t
  537. mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
  538. {
  539. mp_limb_t r, p, m;
  540. unsigned ul, uh;
  541. unsigned ql, qh;
  542. /* First, do a 2/1 inverse. */
  543. /* The inverse m is defined as floor( (B^2 - 1 - u1)/u1 ), so that 0 <
  544. * B^2 - (B + m) u1 <= u1 */
  545. assert (u1 >= GMP_LIMB_HIGHBIT);
  546. ul = u1 & GMP_LLIMB_MASK;
  547. uh = u1 >> (GMP_LIMB_BITS / 2);
  548. qh = ~u1 / uh;
  549. r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
  550. p = (mp_limb_t) qh * ul;
  551. /* Adjustment steps taken from udiv_qrnnd_c */
  552. if (r < p)
  553. {
  554. qh--;
  555. r += u1;
  556. if (r >= u1) /* i.e. we didn't get carry when adding to r */
  557. if (r < p)
  558. {
  559. qh--;
  560. r += u1;
  561. }
  562. }
  563. r -= p;
  564. /* Do a 3/2 division (with half limb size) */
  565. p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
  566. ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
  567. /* By the 3/2 method, we don't need the high half limb. */
  568. r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
  569. if (r >= (p << (GMP_LIMB_BITS / 2)))
  570. {
  571. ql--;
  572. r += u1;
  573. }
  574. m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
  575. if (r >= u1)
  576. {
  577. m++;
  578. r -= u1;
  579. }
  580. if (u0 > 0)
  581. {
  582. mp_limb_t th, tl;
  583. r = ~r;
  584. r += u0;
  585. if (r < u0)
  586. {
  587. m--;
  588. if (r >= u1)
  589. {
  590. m--;
  591. r -= u1;
  592. }
  593. r -= u1;
  594. }
  595. gmp_umul_ppmm (th, tl, u0, m);
  596. r += th;
  597. if (r < th)
  598. {
  599. m--;
  600. if (r > u1 || (r == u1 && tl > u0))
  601. m--;
  602. }
  603. }
  604. return m;
  605. }
  606. struct gmp_div_inverse
  607. {
  608. /* Normalization shift count. */
  609. unsigned shift;
  610. /* Normalized divisor (d0 unused for mpn_div_qr_1) */
  611. mp_limb_t d1, d0;
  612. /* Inverse, for 2/1 or 3/2. */
  613. mp_limb_t di;
  614. };
  615. static void
  616. mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
  617. {
  618. unsigned shift;
  619. assert (d > 0);
  620. gmp_clz (shift, d);
  621. inv->shift = shift;
  622. inv->d1 = d << shift;
  623. inv->di = mpn_invert_limb (inv->d1);
  624. }
  625. static void
  626. mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
  627. mp_limb_t d1, mp_limb_t d0)
  628. {
  629. unsigned shift;
  630. assert (d1 > 0);
  631. gmp_clz (shift, d1);
  632. inv->shift = shift;
  633. if (shift > 0)
  634. {
  635. d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
  636. d0 <<= shift;
  637. }
  638. inv->d1 = d1;
  639. inv->d0 = d0;
  640. inv->di = mpn_invert_3by2 (d1, d0);
  641. }
  642. static void
  643. mpn_div_qr_invert (struct gmp_div_inverse *inv,
  644. mp_srcptr dp, mp_size_t dn)
  645. {
  646. assert (dn > 0);
  647. if (dn == 1)
  648. mpn_div_qr_1_invert (inv, dp[0]);
  649. else if (dn == 2)
  650. mpn_div_qr_2_invert (inv, dp[1], dp[0]);
  651. else
  652. {
  653. unsigned shift;
  654. mp_limb_t d1, d0;
  655. d1 = dp[dn-1];
  656. d0 = dp[dn-2];
  657. assert (d1 > 0);
  658. gmp_clz (shift, d1);
  659. inv->shift = shift;
  660. if (shift > 0)
  661. {
  662. d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
  663. d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
  664. }
  665. inv->d1 = d1;
  666. inv->d0 = d0;
  667. inv->di = mpn_invert_3by2 (d1, d0);
  668. }
  669. }
  670. /* Not matching current public gmp interface, rather corresponding to
  671. the sbpi1_div_* functions. */
  672. static mp_limb_t
  673. mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
  674. const struct gmp_div_inverse *inv)
  675. {
  676. mp_limb_t d, di;
  677. mp_limb_t r;
  678. mp_ptr tp = NULL;
  679. if (inv->shift > 0)
  680. {
  681. tp = gmp_xalloc_limbs (nn);
  682. r = mpn_lshift (tp, np, nn, inv->shift);
  683. np = tp;
  684. }
  685. else
  686. r = 0;
  687. d = inv->d1;
  688. di = inv->di;
  689. while (nn-- > 0)
  690. {
  691. mp_limb_t q;
  692. gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
  693. if (qp)
  694. qp[nn] = q;
  695. }
  696. if (inv->shift > 0)
  697. gmp_free (tp);
  698. return r >> inv->shift;
  699. }
  700. static mp_limb_t
  701. mpn_div_qr_1 (mp_ptr qp, mp_srcptr np, mp_size_t nn, mp_limb_t d)
  702. {
  703. assert (d > 0);
  704. /* Special case for powers of two. */
  705. if (d > 1 && (d & (d-1)) == 0)
  706. {
  707. unsigned shift;
  708. mp_limb_t r = np[0] & (d-1);
  709. gmp_ctz (shift, d);
  710. if (qp)
  711. mpn_rshift (qp, np, nn, shift);
  712. return r;
  713. }
  714. else
  715. {
  716. struct gmp_div_inverse inv;
  717. mpn_div_qr_1_invert (&inv, d);
  718. return mpn_div_qr_1_preinv (qp, np, nn, &inv);
  719. }
  720. }
  721. static void
  722. mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
  723. const struct gmp_div_inverse *inv)
  724. {
  725. unsigned shift;
  726. mp_size_t i;
  727. mp_limb_t d1, d0, di, r1, r0;
  728. mp_ptr tp;
  729. assert (nn >= 2);
  730. shift = inv->shift;
  731. d1 = inv->d1;
  732. d0 = inv->d0;
  733. di = inv->di;
  734. if (shift > 0)
  735. {
  736. tp = gmp_xalloc_limbs (nn);
  737. r1 = mpn_lshift (tp, np, nn, shift);
  738. np = tp;
  739. }
  740. else
  741. r1 = 0;
  742. r0 = np[nn - 1];
  743. for (i = nn - 2; i >= 0; i--)
  744. {
  745. mp_limb_t n0, q;
  746. n0 = np[i];
  747. gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
  748. if (qp)
  749. qp[i] = q;
  750. }
  751. if (shift > 0)
  752. {
  753. assert ((r0 << (GMP_LIMB_BITS - shift)) == 0);
  754. r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
  755. r1 >>= shift;
  756. gmp_free (tp);
  757. }
  758. rp[1] = r1;
  759. rp[0] = r0;
  760. }
  761. #if 0
  762. static void
  763. mpn_div_qr_2 (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
  764. mp_limb_t d1, mp_limb_t d0)
  765. {
  766. struct gmp_div_inverse inv;
  767. assert (nn >= 2);
  768. mpn_div_qr_2_invert (&inv, d1, d0);
  769. mpn_div_qr_2_preinv (qp, rp, np, nn, &inv);
  770. }
  771. #endif
  772. static void
  773. mpn_div_qr_pi1 (mp_ptr qp,
  774. mp_ptr np, mp_size_t nn, mp_limb_t n1,
  775. mp_srcptr dp, mp_size_t dn,
  776. mp_limb_t dinv)
  777. {
  778. mp_size_t i;
  779. mp_limb_t d1, d0;
  780. mp_limb_t cy, cy1;
  781. mp_limb_t q;
  782. assert (dn > 2);
  783. assert (nn >= dn);
  784. d1 = dp[dn - 1];
  785. d0 = dp[dn - 2];
  786. assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
  787. /* Iteration variable is the index of the q limb.
  788. *
  789. * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
  790. * by <d1, d0, dp[dn-3], ..., dp[0] >
  791. */
  792. for (i = nn - dn; i >= 0; i--)
  793. {
  794. mp_limb_t n0 = np[dn-1+i];
  795. if (n1 == d1 && n0 == d0)
  796. {
  797. q = GMP_LIMB_MAX;
  798. mpn_submul_1 (np+i, dp, dn, q);
  799. n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */
  800. }
  801. else
  802. {
  803. gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
  804. cy = mpn_submul_1 (np + i, dp, dn-2, q);
  805. cy1 = n0 < cy;
  806. n0 = n0 - cy;
  807. cy = n1 < cy1;
  808. n1 = n1 - cy1;
  809. np[dn-2+i] = n0;
  810. if (cy != 0)
  811. {
  812. n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
  813. q--;
  814. }
  815. }
  816. if (qp)
  817. qp[i] = q;
  818. }
  819. np[dn - 1] = n1;
  820. }
  821. static void
  822. mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
  823. mp_srcptr dp, mp_size_t dn,
  824. const struct gmp_div_inverse *inv)
  825. {
  826. assert (dn > 0);
  827. assert (nn >= dn);
  828. if (dn == 1)
  829. np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
  830. else if (dn == 2)
  831. mpn_div_qr_2_preinv (qp, np, np, nn, inv);
  832. else
  833. {
  834. mp_limb_t nh;
  835. unsigned shift;
  836. assert (inv->d1 == dp[dn-1]);
  837. assert (inv->d0 == dp[dn-2]);
  838. assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
  839. shift = inv->shift;
  840. if (shift > 0)
  841. nh = mpn_lshift (np, np, nn, shift);
  842. else
  843. nh = 0;
  844. mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
  845. if (shift > 0)
  846. gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
  847. }
  848. }
  849. static void
  850. mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
  851. {
  852. struct gmp_div_inverse inv;
  853. mp_ptr tp = NULL;
  854. assert (dn > 0);
  855. assert (nn >= dn);
  856. mpn_div_qr_invert (&inv, dp, dn);
  857. if (dn > 2 && inv.shift > 0)
  858. {
  859. tp = gmp_xalloc_limbs (dn);
  860. gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
  861. dp = tp;
  862. }
  863. mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
  864. if (tp)
  865. gmp_free (tp);
  866. }
  867. /* MPN base conversion. */
  868. static unsigned
  869. mpn_base_power_of_two_p (unsigned b)
  870. {
  871. switch (b)
  872. {
  873. case 2: return 1;
  874. case 4: return 2;
  875. case 8: return 3;
  876. case 16: return 4;
  877. case 32: return 5;
  878. case 64: return 6;
  879. case 128: return 7;
  880. case 256: return 8;
  881. default: return 0;
  882. }
  883. }
  884. struct mpn_base_info
  885. {
  886. /* bb is the largest power of the base which fits in one limb, and
  887. exp is the corresponding exponent. */
  888. unsigned exp;
  889. mp_limb_t bb;
  890. };
  891. static void
  892. mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
  893. {
  894. mp_limb_t m;
  895. mp_limb_t p;
  896. unsigned exp;
  897. m = GMP_LIMB_MAX / b;
  898. for (exp = 1, p = b; p <= m; exp++)
  899. p *= b;
  900. info->exp = exp;
  901. info->bb = p;
  902. }
  903. static mp_bitcnt_t
  904. mpn_limb_size_in_base_2 (mp_limb_t u)
  905. {
  906. unsigned shift;
  907. assert (u > 0);
  908. gmp_clz (shift, u);
  909. return GMP_LIMB_BITS - shift;
  910. }
  911. static size_t
  912. mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
  913. {
  914. unsigned char mask;
  915. size_t sn, j;
  916. mp_size_t i;
  917. int shift;
  918. sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
  919. + bits - 1) / bits;
  920. mask = (1U << bits) - 1;
  921. for (i = 0, j = sn, shift = 0; j-- > 0;)
  922. {
  923. unsigned char digit = up[i] >> shift;
  924. shift += bits;
  925. if (shift >= GMP_LIMB_BITS && ++i < un)
  926. {
  927. shift -= GMP_LIMB_BITS;
  928. digit |= up[i] << (bits - shift);
  929. }
  930. sp[j] = digit & mask;
  931. }
  932. return sn;
  933. }
  934. /* We generate digits from the least significant end, and reverse at
  935. the end. */
  936. static size_t
  937. mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
  938. const struct gmp_div_inverse *binv)
  939. {
  940. mp_size_t i;
  941. for (i = 0; w > 0; i++)
  942. {
  943. mp_limb_t h, l, r;
  944. h = w >> (GMP_LIMB_BITS - binv->shift);
  945. l = w << binv->shift;
  946. gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
  947. assert ( (r << (GMP_LIMB_BITS - binv->shift)) == 0);
  948. r >>= binv->shift;
  949. sp[i] = r;
  950. }
  951. return i;
  952. }
  953. static size_t
  954. mpn_get_str_other (unsigned char *sp,
  955. int base, const struct mpn_base_info *info,
  956. mp_ptr up, mp_size_t un)
  957. {
  958. struct gmp_div_inverse binv;
  959. size_t sn;
  960. size_t i;
  961. mpn_div_qr_1_invert (&binv, base);
  962. sn = 0;
  963. if (un > 1)
  964. {
  965. struct gmp_div_inverse bbinv;
  966. mpn_div_qr_1_invert (&bbinv, info->bb);
  967. do
  968. {
  969. mp_limb_t w;
  970. size_t done;
  971. w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
  972. un -= (up[un-1] == 0);
  973. done = mpn_limb_get_str (sp + sn, w, &binv);
  974. for (sn += done; done < info->exp; done++)
  975. sp[sn++] = 0;
  976. }
  977. while (un > 1);
  978. }
  979. sn += mpn_limb_get_str (sp + sn, up[0], &binv);
  980. /* Reverse order */
  981. for (i = 0; 2*i + 1 < sn; i++)
  982. {
  983. unsigned char t = sp[i];
  984. sp[i] = sp[sn - i - 1];
  985. sp[sn - i - 1] = t;
  986. }
  987. return sn;
  988. }
  989. size_t
  990. mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
  991. {
  992. unsigned bits;
  993. assert (un > 0);
  994. assert (up[un-1] > 0);
  995. bits = mpn_base_power_of_two_p (base);
  996. if (bits)
  997. return mpn_get_str_bits (sp, bits, up, un);
  998. else
  999. {
  1000. struct mpn_base_info info;
  1001. mpn_get_base_info (&info, base);
  1002. return mpn_get_str_other (sp, base, &info, up, un);
  1003. }
  1004. }
  1005. static mp_size_t
  1006. mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
  1007. unsigned bits)
  1008. {
  1009. mp_size_t rn;
  1010. size_t j;
  1011. unsigned shift;
  1012. for (j = sn, rn = 0, shift = 0; j-- > 0; )
  1013. {
  1014. if (shift == 0)
  1015. {
  1016. rp[rn++] = sp[j];
  1017. shift += bits;
  1018. }
  1019. else
  1020. {
  1021. rp[rn-1] |= (mp_limb_t) sp[j] << shift;
  1022. shift += bits;
  1023. if (shift >= GMP_LIMB_BITS)
  1024. {
  1025. shift -= GMP_LIMB_BITS;
  1026. if (shift > 0)
  1027. rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
  1028. }
  1029. }
  1030. }
  1031. rn = mpn_normalized_size (rp, rn);
  1032. return rn;
  1033. }
  1034. static mp_size_t
  1035. mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
  1036. mp_limb_t b, const struct mpn_base_info *info)
  1037. {
  1038. mp_size_t rn;
  1039. mp_limb_t w;
  1040. unsigned first;
  1041. unsigned k;
  1042. size_t j;
  1043. first = 1 + (sn - 1) % info->exp;
  1044. j = 0;
  1045. w = sp[j++];
  1046. for (k = 1; k < first; k++)
  1047. w = w * b + sp[j++];
  1048. rp[0] = w;
  1049. for (rn = (w > 0); j < sn;)
  1050. {
  1051. mp_limb_t cy;
  1052. w = sp[j++];
  1053. for (k = 1; k < info->exp; k++)
  1054. w = w * b + sp[j++];
  1055. cy = mpn_mul_1 (rp, rp, rn, info->bb);
  1056. cy += mpn_add_1 (rp, rp, rn, w);
  1057. if (cy > 0)
  1058. rp[rn++] = cy;
  1059. }
  1060. assert (j == sn);
  1061. return rn;
  1062. }
  1063. mp_size_t
  1064. mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
  1065. {
  1066. unsigned bits;
  1067. if (sn == 0)
  1068. return 0;
  1069. bits = mpn_base_power_of_two_p (base);
  1070. if (bits)
  1071. return mpn_set_str_bits (rp, sp, sn, bits);
  1072. else
  1073. {
  1074. struct mpn_base_info info;
  1075. mpn_get_base_info (&info, base);
  1076. return mpn_set_str_other (rp, sp, sn, base, &info);
  1077. }
  1078. }
  1079. /* MPZ interface */
  1080. void
  1081. mpz_init (mpz_t r)
  1082. {
  1083. r->_mp_alloc = 1;
  1084. r->_mp_size = 0;
  1085. r->_mp_d = gmp_xalloc_limbs (1);
  1086. }
  1087. /* The utility of this function is a bit limited, since many functions
  1088. assings the result variable using mpz_swap. */
  1089. void
  1090. mpz_init2 (mpz_t r, mp_bitcnt_t bits)
  1091. {
  1092. mp_size_t rn;
  1093. bits -= (bits != 0); /* Round down, except if 0 */
  1094. rn = 1 + bits / GMP_LIMB_BITS;
  1095. r->_mp_alloc = rn;
  1096. r->_mp_size = 0;
  1097. r->_mp_d = gmp_xalloc_limbs (rn);
  1098. }
  1099. void
  1100. mpz_clear (mpz_t r)
  1101. {
  1102. gmp_free (r->_mp_d);
  1103. }
  1104. static void *
  1105. mpz_realloc (mpz_t r, mp_size_t size)
  1106. {
  1107. size = GMP_MAX (size, 1);
  1108. r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
  1109. r->_mp_alloc = size;
  1110. if (GMP_ABS (r->_mp_size) > size)
  1111. r->_mp_size = 0;
  1112. return r->_mp_d;
  1113. }
  1114. /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
  1115. #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
  1116. ? mpz_realloc(z,n) \
  1117. : (z)->_mp_d)
  1118. /* MPZ assignment and basic conversions. */
  1119. void
  1120. mpz_set_si (mpz_t r, signed long int x)
  1121. {
  1122. if (x >= 0)
  1123. mpz_set_ui (r, x);
  1124. else /* (x < 0) */
  1125. {
  1126. r->_mp_size = -1;
  1127. r->_mp_d[0] = GMP_NEG_CAST (unsigned long int, x);
  1128. }
  1129. }
  1130. void
  1131. mpz_set_ui (mpz_t r, unsigned long int x)
  1132. {
  1133. if (x > 0)
  1134. {
  1135. r->_mp_size = 1;
  1136. r->_mp_d[0] = x;
  1137. }
  1138. else
  1139. r->_mp_size = 0;
  1140. }
  1141. void
  1142. mpz_set (mpz_t r, const mpz_t x)
  1143. {
  1144. /* Allow the NOP r == x */
  1145. if (r != x)
  1146. {
  1147. mp_size_t n;
  1148. mp_ptr rp;
  1149. n = GMP_ABS (x->_mp_size);
  1150. rp = MPZ_REALLOC (r, n);
  1151. mpn_copyi (rp, x->_mp_d, n);
  1152. r->_mp_size = x->_mp_size;
  1153. }
  1154. }
  1155. void
  1156. mpz_init_set_si (mpz_t r, signed long int x)
  1157. {
  1158. mpz_init (r);
  1159. mpz_set_si (r, x);
  1160. }
  1161. void
  1162. mpz_init_set_ui (mpz_t r, unsigned long int x)
  1163. {
  1164. mpz_init (r);
  1165. mpz_set_ui (r, x);
  1166. }
  1167. void
  1168. mpz_init_set (mpz_t r, const mpz_t x)
  1169. {
  1170. mpz_init (r);
  1171. mpz_set (r, x);
  1172. }
  1173. int
  1174. mpz_fits_slong_p (const mpz_t u)
  1175. {
  1176. mp_size_t us = u->_mp_size;
  1177. if (us == 0)
  1178. return 1;
  1179. else if (us == 1)
  1180. return u->_mp_d[0] < GMP_LIMB_HIGHBIT;
  1181. else if (us == -1)
  1182. return u->_mp_d[0] <= GMP_LIMB_HIGHBIT;
  1183. else
  1184. return 0;
  1185. }
  1186. int
  1187. mpz_fits_ulong_p (const mpz_t u)
  1188. {
  1189. mp_size_t us = u->_mp_size;
  1190. return us == 0 || us == 1;
  1191. }
  1192. long int
  1193. mpz_get_si (const mpz_t u)
  1194. {
  1195. mp_size_t us = u->_mp_size;
  1196. if (us > 0)
  1197. return (long) (u->_mp_d[0] & ~GMP_LIMB_HIGHBIT);
  1198. else if (us < 0)
  1199. return (long) (- u->_mp_d[0] | GMP_LIMB_HIGHBIT);
  1200. else
  1201. return 0;
  1202. }
  1203. unsigned long int
  1204. mpz_get_ui (const mpz_t u)
  1205. {
  1206. return u->_mp_size == 0 ? 0 : u->_mp_d[0];
  1207. }
  1208. size_t
  1209. mpz_size (const mpz_t u)
  1210. {
  1211. return GMP_ABS (u->_mp_size);
  1212. }
  1213. mp_limb_t
  1214. mpz_getlimbn (const mpz_t u, mp_size_t n)
  1215. {
  1216. if (n >= 0 && n < GMP_ABS (u->_mp_size))
  1217. return u->_mp_d[n];
  1218. else
  1219. return 0;
  1220. }
  1221. /* Conversions and comparison to double. */
  1222. void
  1223. mpz_set_d (mpz_t r, double x)
  1224. {
  1225. int sign;
  1226. mp_ptr rp;
  1227. mp_size_t rn, i;
  1228. double B;
  1229. double Bi;
  1230. mp_limb_t f;
  1231. /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
  1232. zero or infinity. */
  1233. if (x == 0.0 || x != x || x == x * 0.5)
  1234. {
  1235. r->_mp_size = 0;
  1236. return;
  1237. }
  1238. if (x < 0.0)
  1239. {
  1240. x = - x;
  1241. sign = 1;
  1242. }
  1243. else
  1244. sign = 0;
  1245. if (x < 1.0)
  1246. {
  1247. r->_mp_size = 0;
  1248. return;
  1249. }
  1250. B = 2.0 * (double) GMP_LIMB_HIGHBIT;
  1251. Bi = 1.0 / B;
  1252. for (rn = 1; x >= B; rn++)
  1253. x *= Bi;
  1254. rp = MPZ_REALLOC (r, rn);
  1255. f = (mp_limb_t) x;
  1256. x -= f;
  1257. assert (x < 1.0);
  1258. rp[rn-1] = f;
  1259. for (i = rn-1; i-- > 0; )
  1260. {
  1261. x = B * x;
  1262. f = (mp_limb_t) x;
  1263. x -= f;
  1264. assert (x < 1.0);
  1265. rp[i] = f;
  1266. }
  1267. r->_mp_size = sign ? - rn : rn;
  1268. }
  1269. void
  1270. mpz_init_set_d (mpz_t r, double x)
  1271. {
  1272. mpz_init (r);
  1273. mpz_set_d (r, x);
  1274. }
  1275. double
  1276. mpz_get_d (const mpz_t u)
  1277. {
  1278. mp_size_t un;
  1279. double x;
  1280. double B = 2.0 * (double) GMP_LIMB_HIGHBIT;
  1281. un = GMP_ABS (u->_mp_size);
  1282. if (un == 0)
  1283. return 0.0;
  1284. x = u->_mp_d[--un];
  1285. while (un > 0)
  1286. x = B*x + u->_mp_d[--un];
  1287. if (u->_mp_size < 0)
  1288. x = -x;
  1289. return x;
  1290. }
  1291. int
  1292. mpz_cmpabs_d (const mpz_t x, double d)
  1293. {
  1294. mp_size_t xn;
  1295. double B, Bi;
  1296. mp_size_t i;
  1297. xn = x->_mp_size;
  1298. d = GMP_ABS (d);
  1299. if (xn != 0)
  1300. {
  1301. xn = GMP_ABS (xn);
  1302. B = 2.0 * (double) GMP_LIMB_HIGHBIT;
  1303. Bi = 1.0 / B;
  1304. /* Scale d so it can be compared with the top limb. */
  1305. for (i = 1; i < xn; i++)
  1306. d *= Bi;
  1307. if (d >= B)
  1308. return -1;
  1309. /* Compare floor(d) to top limb, subtract and cancel when equal. */
  1310. for (i = xn; i-- > 0;)
  1311. {
  1312. mp_limb_t f, xl;
  1313. f = (mp_limb_t) d;
  1314. xl = x->_mp_d[i];
  1315. if (xl > f)
  1316. return 1;
  1317. else if (xl < f)
  1318. return -1;
  1319. d = B * (d - f);
  1320. }
  1321. }
  1322. return - (d > 0.0);
  1323. }
  1324. int
  1325. mpz_cmp_d (const mpz_t x, double d)
  1326. {
  1327. if (x->_mp_size < 0)
  1328. {
  1329. if (d >= 0.0)
  1330. return -1;
  1331. else
  1332. return -mpz_cmpabs_d (x, d);
  1333. }
  1334. else
  1335. {
  1336. if (d < 0.0)
  1337. return 1;
  1338. else
  1339. return mpz_cmpabs_d (x, d);
  1340. }
  1341. }
  1342. /* MPZ comparisons and the like. */
  1343. int
  1344. mpz_sgn (const mpz_t u)
  1345. {
  1346. mp_size_t usize = u->_mp_size;
  1347. if (usize > 0)
  1348. return 1;
  1349. else if (usize < 0)
  1350. return -1;
  1351. else
  1352. return 0;
  1353. }
  1354. int
  1355. mpz_cmp_si (const mpz_t u, long v)
  1356. {
  1357. mp_size_t usize = u->_mp_size;
  1358. if (usize < -1)
  1359. return -1;
  1360. else if (v >= 0)
  1361. return mpz_cmp_ui (u, v);
  1362. else if (usize >= 0)
  1363. return 1;
  1364. else /* usize == -1 */
  1365. {
  1366. mp_limb_t ul = u->_mp_d[0];
  1367. if ((mp_limb_t)GMP_NEG_CAST (unsigned long int, v) < ul)
  1368. return -1;
  1369. else if ( (mp_limb_t)GMP_NEG_CAST (unsigned long int, v) > ul)
  1370. return 1;
  1371. }
  1372. return 0;
  1373. }
  1374. int
  1375. mpz_cmp_ui (const mpz_t u, unsigned long v)
  1376. {
  1377. mp_size_t usize = u->_mp_size;
  1378. if (usize > 1)
  1379. return 1;
  1380. else if (usize < 0)
  1381. return -1;
  1382. else
  1383. {
  1384. mp_limb_t ul = (usize > 0) ? u->_mp_d[0] : 0;
  1385. if (ul > v)
  1386. return 1;
  1387. else if (ul < v)
  1388. return -1;
  1389. }
  1390. return 0;
  1391. }
  1392. int
  1393. mpz_cmp (const mpz_t a, const mpz_t b)
  1394. {
  1395. mp_size_t asize = a->_mp_size;
  1396. mp_size_t bsize = b->_mp_size;
  1397. if (asize > bsize)
  1398. return 1;
  1399. else if (asize < bsize)
  1400. return -1;
  1401. else if (asize > 0)
  1402. return mpn_cmp (a->_mp_d, b->_mp_d, asize);
  1403. else if (asize < 0)
  1404. return -mpn_cmp (a->_mp_d, b->_mp_d, -asize);
  1405. else
  1406. return 0;
  1407. }
  1408. int
  1409. mpz_cmpabs_ui (const mpz_t u, unsigned long v)
  1410. {
  1411. mp_size_t un = GMP_ABS (u->_mp_size);
  1412. mp_limb_t ul;
  1413. if (un > 1)
  1414. return 1;
  1415. ul = (un == 1) ? u->_mp_d[0] : 0;
  1416. if (ul > v)
  1417. return 1;
  1418. else if (ul < v)
  1419. return -1;
  1420. else
  1421. return 0;
  1422. }
  1423. int
  1424. mpz_cmpabs (const mpz_t u, const mpz_t v)
  1425. {
  1426. return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
  1427. v->_mp_d, GMP_ABS (v->_mp_size));
  1428. }
  1429. void
  1430. mpz_abs (mpz_t r, const mpz_t u)
  1431. {
  1432. if (r != u)
  1433. mpz_set (r, u);
  1434. r->_mp_size = GMP_ABS (r->_mp_size);
  1435. }
  1436. void
  1437. mpz_neg (mpz_t r, const mpz_t u)
  1438. {
  1439. if (r != u)
  1440. mpz_set (r, u);
  1441. r->_mp_size = -r->_mp_size;
  1442. }
  1443. void
  1444. mpz_swap (mpz_t u, mpz_t v)
  1445. {
  1446. MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
  1447. MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
  1448. MP_PTR_SWAP (u->_mp_d, v->_mp_d);
  1449. }
  1450. /* MPZ addition and subtraction */
  1451. /* Adds to the absolute value. Returns new size, but doesn't store it. */
  1452. static mp_size_t
  1453. mpz_abs_add_ui (mpz_t r, const mpz_t a, unsigned long b)
  1454. {
  1455. mp_size_t an;
  1456. mp_ptr rp;
  1457. mp_limb_t cy;
  1458. an = GMP_ABS (a->_mp_size);
  1459. if (an == 0)
  1460. {
  1461. r->_mp_d[0] = b;
  1462. return b > 0;
  1463. }
  1464. rp = MPZ_REALLOC (r, an + 1);
  1465. cy = mpn_add_1 (rp, a->_mp_d, an, b);
  1466. rp[an] = cy;
  1467. an += (cy > 0);
  1468. return an;
  1469. }
  1470. /* Subtract from the absolute value. Returns new size, (or -1 on underflow),
  1471. but doesn't store it. */
  1472. static mp_size_t
  1473. mpz_abs_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
  1474. {
  1475. mp_size_t an = GMP_ABS (a->_mp_size);
  1476. mp_ptr rp = MPZ_REALLOC (r, an);
  1477. if (an == 0)
  1478. {
  1479. rp[0] = b;
  1480. return -(b > 0);
  1481. }
  1482. else if (an == 1 && a->_mp_d[0] < b)
  1483. {
  1484. rp[0] = b - a->_mp_d[0];
  1485. return -1;
  1486. }
  1487. else
  1488. {
  1489. gmp_assert_nocarry (mpn_sub_1 (rp, a->_mp_d, an, b));
  1490. return mpn_normalized_size (rp, an);
  1491. }
  1492. }
  1493. void
  1494. mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
  1495. {
  1496. if (a->_mp_size >= 0)
  1497. r->_mp_size = mpz_abs_add_ui (r, a, b);
  1498. else
  1499. r->_mp_size = -mpz_abs_sub_ui (r, a, b);
  1500. }
  1501. void
  1502. mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
  1503. {
  1504. if (a->_mp_size < 0)
  1505. r->_mp_size = -mpz_abs_add_ui (r, a, b);
  1506. else
  1507. r->_mp_size = mpz_abs_sub_ui (r, a, b);
  1508. }
  1509. void
  1510. mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
  1511. {
  1512. if (b->_mp_size < 0)
  1513. r->_mp_size = mpz_abs_add_ui (r, b, a);
  1514. else
  1515. r->_mp_size = -mpz_abs_sub_ui (r, b, a);
  1516. }
  1517. static mp_size_t
  1518. mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
  1519. {
  1520. mp_size_t an = GMP_ABS (a->_mp_size);
  1521. mp_size_t bn = GMP_ABS (b->_mp_size);
  1522. mp_size_t rn;
  1523. mp_ptr rp;
  1524. mp_limb_t cy;
  1525. rn = GMP_MAX (an, bn);
  1526. rp = MPZ_REALLOC (r, rn + 1);
  1527. if (an >= bn)
  1528. cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
  1529. else
  1530. cy = mpn_add (rp, b->_mp_d, bn, a->_mp_d, an);
  1531. rp[rn] = cy;
  1532. return rn + (cy > 0);
  1533. }
  1534. static mp_size_t
  1535. mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
  1536. {
  1537. mp_size_t an = GMP_ABS (a->_mp_size);
  1538. mp_size_t bn = GMP_ABS (b->_mp_size);
  1539. int cmp;
  1540. mp_ptr rp;
  1541. cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
  1542. if (cmp > 0)
  1543. {
  1544. rp = MPZ_REALLOC (r, an);
  1545. gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
  1546. return mpn_normalized_size (rp, an);
  1547. }
  1548. else if (cmp < 0)
  1549. {
  1550. rp = MPZ_REALLOC (r, bn);
  1551. gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
  1552. return -mpn_normalized_size (rp, bn);
  1553. }
  1554. else
  1555. return 0;
  1556. }
  1557. void
  1558. mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
  1559. {
  1560. mp_size_t rn;
  1561. if ( (a->_mp_size ^ b->_mp_size) >= 0)
  1562. rn = mpz_abs_add (r, a, b);
  1563. else
  1564. rn = mpz_abs_sub (r, a, b);
  1565. r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
  1566. }
  1567. void
  1568. mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
  1569. {
  1570. mp_size_t rn;
  1571. if ( (a->_mp_size ^ b->_mp_size) >= 0)
  1572. rn = mpz_abs_sub (r, a, b);
  1573. else
  1574. rn = mpz_abs_add (r, a, b);
  1575. r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
  1576. }
  1577. /* MPZ multiplication */
  1578. void
  1579. mpz_mul_si (mpz_t r, const mpz_t u, long int v)
  1580. {
  1581. if (v < 0)
  1582. {
  1583. mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
  1584. mpz_neg (r, r);
  1585. }
  1586. else
  1587. mpz_mul_ui (r, u, (unsigned long int) v);
  1588. }
  1589. void
  1590. mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
  1591. {
  1592. mp_size_t un;
  1593. mpz_t t;
  1594. mp_ptr tp;
  1595. mp_limb_t cy;
  1596. un = GMP_ABS (u->_mp_size);
  1597. if (un == 0 || v == 0)
  1598. {
  1599. r->_mp_size = 0;
  1600. return;
  1601. }
  1602. mpz_init2 (t, (un + 1) * GMP_LIMB_BITS);
  1603. tp = t->_mp_d;
  1604. cy = mpn_mul_1 (tp, u->_mp_d, un, v);
  1605. tp[un] = cy;
  1606. t->_mp_size = un + (cy > 0);
  1607. if (u->_mp_size < 0)
  1608. t->_mp_size = - t->_mp_size;
  1609. mpz_swap (r, t);
  1610. mpz_clear (t);
  1611. }
  1612. void
  1613. mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
  1614. {
  1615. int sign;
  1616. mp_size_t un, vn, rn;
  1617. mpz_t t;
  1618. mp_ptr tp;
  1619. un = GMP_ABS (u->_mp_size);
  1620. vn = GMP_ABS (v->_mp_size);
  1621. if (un == 0 || vn == 0)
  1622. {
  1623. r->_mp_size = 0;
  1624. return;
  1625. }
  1626. sign = (u->_mp_size ^ v->_mp_size) < 0;
  1627. mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
  1628. tp = t->_mp_d;
  1629. if (un >= vn)
  1630. mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
  1631. else
  1632. mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
  1633. rn = un + vn;
  1634. rn -= tp[rn-1] == 0;
  1635. t->_mp_size = sign ? - rn : rn;
  1636. mpz_swap (r, t);
  1637. mpz_clear (t);
  1638. }
  1639. void
  1640. mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
  1641. {
  1642. mp_size_t un, rn;
  1643. mp_size_t limbs;
  1644. unsigned shift;
  1645. mp_ptr rp;
  1646. un = GMP_ABS (u->_mp_size);
  1647. if (un == 0)
  1648. {
  1649. r->_mp_size = 0;
  1650. return;
  1651. }
  1652. limbs = bits / GMP_LIMB_BITS;
  1653. shift = bits % GMP_LIMB_BITS;
  1654. rn = un + limbs + (shift > 0);
  1655. rp = MPZ_REALLOC (r, rn);
  1656. if (shift > 0)
  1657. {
  1658. mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
  1659. rp[rn-1] = cy;
  1660. rn -= (cy == 0);
  1661. }
  1662. else
  1663. mpn_copyd (rp + limbs, u->_mp_d, un);
  1664. while (limbs > 0)
  1665. rp[--limbs] = 0;
  1666. r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
  1667. }
  1668. /* MPZ division */
  1669. enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
  1670. /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
  1671. static int
  1672. mpz_div_qr (mpz_t q, mpz_t r,
  1673. const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
  1674. {
  1675. mp_size_t ns, ds, nn, dn, qs;
  1676. ns = n->_mp_size;
  1677. ds = d->_mp_size;
  1678. if (ds == 0)
  1679. gmp_die("mpz_div_qr: Divide by zero.");
  1680. if (ns == 0)
  1681. {
  1682. if (q)
  1683. q->_mp_size = 0;
  1684. if (r)
  1685. r->_mp_size = 0;
  1686. return 0;
  1687. }
  1688. nn = GMP_ABS (ns);
  1689. dn = GMP_ABS (ds);
  1690. qs = ds ^ ns;
  1691. if (nn < dn)
  1692. {
  1693. if (mode == GMP_DIV_CEIL && qs >= 0)
  1694. {
  1695. /* q = 1, r = n - d */
  1696. if (r)
  1697. mpz_sub (r, n, d);
  1698. if (q)
  1699. mpz_set_ui (q, 1);
  1700. }
  1701. else if (mode == GMP_DIV_FLOOR && qs < 0)
  1702. {
  1703. /* q = -1, r = n + d */
  1704. if (r)
  1705. mpz_add (r, n, d);
  1706. if (q)
  1707. mpz_set_si (q, -1);
  1708. }
  1709. else
  1710. {
  1711. /* q = 0, r = d */
  1712. if (r)
  1713. mpz_set (r, n);
  1714. if (q)
  1715. q->_mp_size = 0;
  1716. }
  1717. return 1;
  1718. }
  1719. else
  1720. {
  1721. mp_ptr np, qp;
  1722. mp_size_t qn, rn;
  1723. mpz_t tq, tr;
  1724. mpz_init (tr);
  1725. mpz_set (tr, n);
  1726. np = tr->_mp_d;
  1727. qn = nn - dn + 1;
  1728. if (q)
  1729. {
  1730. mpz_init2 (tq, qn * GMP_LIMB_BITS);
  1731. qp = tq->_mp_d;
  1732. }
  1733. else
  1734. qp = NULL;
  1735. mpn_div_qr (qp, np, nn, d->_mp_d, dn);
  1736. if (qp)
  1737. {
  1738. qn -= (qp[qn-1] == 0);
  1739. tq->_mp_size = qs < 0 ? -qn : qn;
  1740. }
  1741. rn = mpn_normalized_size (np, dn);
  1742. tr->_mp_size = ns < 0 ? - rn : rn;
  1743. if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
  1744. {
  1745. if (q)
  1746. mpz_sub_ui (tq, tq, 1);
  1747. if (r)
  1748. mpz_add (tr, tr, d);
  1749. }
  1750. else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
  1751. {
  1752. if (q)
  1753. mpz_add_ui (tq, tq, 1);
  1754. if (r)
  1755. mpz_sub (tr, tr, d);
  1756. }
  1757. if (q)
  1758. {
  1759. mpz_swap (tq, q);
  1760. mpz_clear (tq);
  1761. }
  1762. if (r)
  1763. mpz_swap (tr, r);
  1764. mpz_clear (tr);
  1765. return rn != 0;
  1766. }
  1767. }
  1768. void
  1769. mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  1770. {
  1771. mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
  1772. }
  1773. void
  1774. mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  1775. {
  1776. mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
  1777. }
  1778. void
  1779. mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  1780. {
  1781. mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
  1782. }
  1783. void
  1784. mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  1785. {
  1786. mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
  1787. }
  1788. void
  1789. mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  1790. {
  1791. mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
  1792. }
  1793. void
  1794. mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  1795. {
  1796. mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
  1797. }
  1798. void
  1799. mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  1800. {
  1801. mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
  1802. }
  1803. void
  1804. mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  1805. {
  1806. mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
  1807. }
  1808. void
  1809. mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  1810. {
  1811. mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
  1812. }
  1813. void
  1814. mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
  1815. {
  1816. if (d->_mp_size >= 0)
  1817. mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
  1818. else
  1819. mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
  1820. }
  1821. static void
  1822. mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
  1823. enum mpz_div_round_mode mode)
  1824. {
  1825. mp_size_t un, qn;
  1826. mp_size_t limb_cnt;
  1827. mp_ptr qp;
  1828. mp_limb_t adjust;
  1829. un = u->_mp_size;
  1830. if (un == 0)
  1831. {
  1832. q->_mp_size = 0;
  1833. return;
  1834. }
  1835. limb_cnt = bit_index / GMP_LIMB_BITS;
  1836. qn = GMP_ABS (un) - limb_cnt;
  1837. bit_index %= GMP_LIMB_BITS;
  1838. if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
  1839. /* Note: Below, the final indexing at limb_cnt is valid because at
  1840. that point we have qn > 0. */
  1841. adjust = (qn <= 0
  1842. || !mpn_zero_p (u->_mp_d, limb_cnt)
  1843. || (u->_mp_d[limb_cnt]
  1844. & (((mp_limb_t) 1 << bit_index) - 1)));
  1845. else
  1846. adjust = 0;
  1847. if (qn <= 0)
  1848. qn = 0;
  1849. else
  1850. {
  1851. qp = MPZ_REALLOC (q, qn);
  1852. if (bit_index != 0)
  1853. {
  1854. mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
  1855. qn -= qp[qn - 1] == 0;
  1856. }
  1857. else
  1858. {
  1859. mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
  1860. }
  1861. }
  1862. q->_mp_size = qn;
  1863. mpz_add_ui (q, q, adjust);
  1864. if (un < 0)
  1865. mpz_neg (q, q);
  1866. }
  1867. static void
  1868. mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
  1869. enum mpz_div_round_mode mode)
  1870. {
  1871. mp_size_t us, un, rn;
  1872. mp_ptr rp;
  1873. mp_limb_t mask;
  1874. us = u->_mp_size;
  1875. if (us == 0 || bit_index == 0)
  1876. {
  1877. r->_mp_size = 0;
  1878. return;
  1879. }
  1880. rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
  1881. assert (rn > 0);
  1882. rp = MPZ_REALLOC (r, rn);
  1883. un = GMP_ABS (us);
  1884. mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
  1885. if (rn > un)
  1886. {
  1887. /* Quotient (with truncation) is zero, and remainder is
  1888. non-zero */
  1889. if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
  1890. {
  1891. /* Have to negate and sign extend. */
  1892. mp_size_t i;
  1893. mp_limb_t cy;
  1894. for (cy = 1, i = 0; i < un; i++)
  1895. {
  1896. mp_limb_t s = ~u->_mp_d[i] + cy;
  1897. cy = s < cy;
  1898. rp[i] = s;
  1899. }
  1900. assert (cy == 0);
  1901. for (; i < rn - 1; i++)
  1902. rp[i] = GMP_LIMB_MAX;
  1903. rp[rn-1] = mask;
  1904. us = -us;
  1905. }
  1906. else
  1907. {
  1908. /* Just copy */
  1909. if (r != u)
  1910. mpn_copyi (rp, u->_mp_d, un);
  1911. rn = un;
  1912. }
  1913. }
  1914. else
  1915. {
  1916. if (r != u)
  1917. mpn_copyi (rp, u->_mp_d, rn - 1);
  1918. rp[rn-1] = u->_mp_d[rn-1] & mask;
  1919. if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
  1920. {
  1921. /* If r != 0, compute 2^{bit_count} - r. */
  1922. mp_size_t i;
  1923. for (i = 0; i < rn && rp[i] == 0; i++)
  1924. ;
  1925. if (i < rn)
  1926. {
  1927. /* r > 0, need to flip sign. */
  1928. rp[i] = ~rp[i] + 1;
  1929. for (i++; i < rn; i++)
  1930. rp[i] = ~rp[i];
  1931. rp[rn-1] &= mask;
  1932. /* us is not used for anything else, so we can modify it
  1933. here to indicate flipped sign. */
  1934. us = -us;
  1935. }
  1936. }
  1937. }
  1938. rn = mpn_normalized_size (rp, rn);
  1939. r->_mp_size = us < 0 ? -rn : rn;
  1940. }
  1941. void
  1942. mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  1943. {
  1944. mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
  1945. }
  1946. void
  1947. mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  1948. {
  1949. mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
  1950. }
  1951. void
  1952. mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  1953. {
  1954. mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
  1955. }
  1956. void
  1957. mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  1958. {
  1959. mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
  1960. }
  1961. void
  1962. mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  1963. {
  1964. mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
  1965. }
  1966. void
  1967. mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  1968. {
  1969. mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
  1970. }
  1971. void
  1972. mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
  1973. {
  1974. gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
  1975. }
  1976. int
  1977. mpz_divisible_p (const mpz_t n, const mpz_t d)
  1978. {
  1979. return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
  1980. }
  1981. static unsigned long
  1982. mpz_div_qr_ui (mpz_t q, mpz_t r,
  1983. const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
  1984. {
  1985. mp_size_t ns, qn;
  1986. mp_ptr qp;
  1987. mp_limb_t rl;
  1988. mp_size_t rs;
  1989. ns = n->_mp_size;
  1990. if (ns == 0)
  1991. {
  1992. if (q)
  1993. q->_mp_size = 0;
  1994. if (r)
  1995. r->_mp_size = 0;
  1996. return 0;
  1997. }
  1998. qn = GMP_ABS (ns);
  1999. if (q)
  2000. qp = MPZ_REALLOC (q, qn);
  2001. else
  2002. qp = NULL;
  2003. rl = mpn_div_qr_1 (qp, n->_mp_d, qn, d);
  2004. assert (rl < d);
  2005. rs = rl > 0;
  2006. rs = (ns < 0) ? -rs : rs;
  2007. if (rl > 0 && ( (mode == GMP_DIV_FLOOR && ns < 0)
  2008. || (mode == GMP_DIV_CEIL && ns >= 0)))
  2009. {
  2010. if (q)
  2011. gmp_assert_nocarry (mpn_add_1 (qp, qp, qn, 1));
  2012. rl = d - rl;
  2013. rs = -rs;
  2014. }
  2015. if (r)
  2016. {
  2017. r->_mp_d[0] = rl;
  2018. r->_mp_size = rs;
  2019. }
  2020. if (q)
  2021. {
  2022. qn -= (qp[qn-1] == 0);
  2023. assert (qn == 0 || qp[qn-1] > 0);
  2024. q->_mp_size = (ns < 0) ? - qn : qn;
  2025. }
  2026. return rl;
  2027. }
  2028. unsigned long
  2029. mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
  2030. {
  2031. return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
  2032. }
  2033. unsigned long
  2034. mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
  2035. {
  2036. return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
  2037. }
  2038. unsigned long
  2039. mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
  2040. {
  2041. return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
  2042. }
  2043. unsigned long
  2044. mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
  2045. {
  2046. return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
  2047. }
  2048. unsigned long
  2049. mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
  2050. {
  2051. return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
  2052. }
  2053. unsigned long
  2054. mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
  2055. {
  2056. return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
  2057. }
  2058. unsigned long
  2059. mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
  2060. {
  2061. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
  2062. }
  2063. unsigned long
  2064. mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
  2065. {
  2066. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
  2067. }
  2068. unsigned long
  2069. mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
  2070. {
  2071. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
  2072. }
  2073. unsigned long
  2074. mpz_cdiv_ui (const mpz_t n, unsigned long d)
  2075. {
  2076. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
  2077. }
  2078. unsigned long
  2079. mpz_fdiv_ui (const mpz_t n, unsigned long d)
  2080. {
  2081. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
  2082. }
  2083. unsigned long
  2084. mpz_tdiv_ui (const mpz_t n, unsigned long d)
  2085. {
  2086. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
  2087. }
  2088. unsigned long
  2089. mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
  2090. {
  2091. return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
  2092. }
  2093. void
  2094. mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
  2095. {
  2096. gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
  2097. }
  2098. int
  2099. mpz_divisible_ui_p (const mpz_t n, unsigned long d)
  2100. {
  2101. return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
  2102. }
  2103. /* GCD */
  2104. static mp_limb_t
  2105. mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
  2106. {
  2107. unsigned shift;
  2108. assert ( (u | v) > 0);
  2109. if (u == 0)
  2110. return v;
  2111. else if (v == 0)
  2112. return u;
  2113. gmp_ctz (shift, u | v);
  2114. u >>= shift;
  2115. v >>= shift;
  2116. if ( (u & 1) == 0)
  2117. MP_LIMB_T_SWAP (u, v);
  2118. while ( (v & 1) == 0)
  2119. v >>= 1;
  2120. while (u != v)
  2121. {
  2122. if (u > v)
  2123. {
  2124. u -= v;
  2125. do
  2126. u >>= 1;
  2127. while ( (u & 1) == 0);
  2128. }
  2129. else
  2130. {
  2131. v -= u;
  2132. do
  2133. v >>= 1;
  2134. while ( (v & 1) == 0);
  2135. }
  2136. }
  2137. return u << shift;
  2138. }
  2139. unsigned long
  2140. mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
  2141. {
  2142. mp_size_t un;
  2143. if (v == 0)
  2144. {
  2145. if (g)
  2146. mpz_abs (g, u);
  2147. }
  2148. else
  2149. {
  2150. un = GMP_ABS (u->_mp_size);
  2151. if (un != 0)
  2152. v = mpn_gcd_11 (mpn_div_qr_1 (NULL, u->_mp_d, un, v), v);
  2153. if (g)
  2154. mpz_set_ui (g, v);
  2155. }
  2156. return v;
  2157. }
  2158. static mp_bitcnt_t
  2159. mpz_make_odd (mpz_t r, const mpz_t u)
  2160. {
  2161. mp_size_t un, rn, i;
  2162. mp_ptr rp;
  2163. unsigned shift;
  2164. un = GMP_ABS (u->_mp_size);
  2165. assert (un > 0);
  2166. for (i = 0; u->_mp_d[i] == 0; i++)
  2167. ;
  2168. gmp_ctz (shift, u->_mp_d[i]);
  2169. rn = un - i;
  2170. rp = MPZ_REALLOC (r, rn);
  2171. if (shift > 0)
  2172. {
  2173. mpn_rshift (rp, u->_mp_d + i, rn, shift);
  2174. rn -= (rp[rn-1] == 0);
  2175. }
  2176. else
  2177. mpn_copyi (rp, u->_mp_d + i, rn);
  2178. r->_mp_size = rn;
  2179. return i * GMP_LIMB_BITS + shift;
  2180. }
  2181. void
  2182. mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
  2183. {
  2184. mpz_t tu, tv;
  2185. mp_bitcnt_t uz, vz, gz;
  2186. if (u->_mp_size == 0)
  2187. {
  2188. mpz_abs (g, v);
  2189. return;
  2190. }
  2191. if (v->_mp_size == 0)
  2192. {
  2193. mpz_abs (g, u);
  2194. return;
  2195. }
  2196. mpz_init (tu);
  2197. mpz_init (tv);
  2198. uz = mpz_make_odd (tu, u);
  2199. vz = mpz_make_odd (tv, v);
  2200. gz = GMP_MIN (uz, vz);
  2201. if (tu->_mp_size < tv->_mp_size)
  2202. mpz_swap (tu, tv);
  2203. mpz_tdiv_r (tu, tu, tv);
  2204. if (tu->_mp_size == 0)
  2205. {
  2206. mpz_swap (g, tv);
  2207. }
  2208. else
  2209. for (;;)
  2210. {
  2211. int c;
  2212. mpz_make_odd (tu, tu);
  2213. c = mpz_cmp (tu, tv);
  2214. if (c == 0)
  2215. {
  2216. mpz_swap (g, tu);
  2217. break;
  2218. }
  2219. if (c < 0)
  2220. mpz_swap (tu, tv);
  2221. if (tv->_mp_size == 1)
  2222. {
  2223. mp_limb_t vl = tv->_mp_d[0];
  2224. mp_limb_t ul = mpz_tdiv_ui (tu, vl);
  2225. mpz_set_ui (g, mpn_gcd_11 (ul, vl));
  2226. break;
  2227. }
  2228. mpz_sub (tu, tu, tv);
  2229. }
  2230. mpz_clear (tu);
  2231. mpz_clear (tv);
  2232. mpz_mul_2exp (g, g, gz);
  2233. }
  2234. void
  2235. mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
  2236. {
  2237. mpz_t tu, tv, s0, s1, t0, t1;
  2238. mp_bitcnt_t uz, vz, gz;
  2239. mp_bitcnt_t power;
  2240. if (u->_mp_size == 0)
  2241. {
  2242. /* g = 0 u + sgn(v) v */
  2243. signed long sign = mpz_sgn (v);
  2244. mpz_abs (g, v);
  2245. if (s)
  2246. mpz_set_ui (s, 0);
  2247. if (t)
  2248. mpz_set_si (t, sign);
  2249. return;
  2250. }
  2251. if (v->_mp_size == 0)
  2252. {
  2253. /* g = sgn(u) u + 0 v */
  2254. signed long sign = mpz_sgn (u);
  2255. mpz_abs (g, u);
  2256. if (s)
  2257. mpz_set_si (s, sign);
  2258. if (t)
  2259. mpz_set_ui (t, 0);
  2260. return;
  2261. }
  2262. mpz_init (tu);
  2263. mpz_init (tv);
  2264. mpz_init (s0);
  2265. mpz_init (s1);
  2266. mpz_init (t0);
  2267. mpz_init (t1);
  2268. uz = mpz_make_odd (tu, u);
  2269. vz = mpz_make_odd (tv, v);
  2270. gz = GMP_MIN (uz, vz);
  2271. uz -= gz;
  2272. vz -= gz;
  2273. /* Cofactors corresponding to odd gcd. gz handled later. */
  2274. if (tu->_mp_size < tv->_mp_size)
  2275. {
  2276. mpz_swap (tu, tv);
  2277. MPZ_SRCPTR_SWAP (u, v);
  2278. MPZ_PTR_SWAP (s, t);
  2279. MP_BITCNT_T_SWAP (uz, vz);
  2280. }
  2281. /* Maintain
  2282. *
  2283. * u = t0 tu + t1 tv
  2284. * v = s0 tu + s1 tv
  2285. *
  2286. * where u and v denote the inputs with common factors of two
  2287. * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
  2288. *
  2289. * 2^p tu = s1 u - t1 v
  2290. * 2^p tv = -s0 u + t0 v
  2291. */
  2292. /* After initial division, tu = q tv + tu', we have
  2293. *
  2294. * u = 2^uz (tu' + q tv)
  2295. * v = 2^vz tv
  2296. *
  2297. * or
  2298. *
  2299. * t0 = 2^uz, t1 = 2^uz q
  2300. * s0 = 0, s1 = 2^vz
  2301. */
  2302. mpz_setbit (t0, uz);
  2303. mpz_tdiv_qr (t1, tu, tu, tv);
  2304. mpz_mul_2exp (t1, t1, uz);
  2305. mpz_setbit (s1, vz);
  2306. power = uz + vz;
  2307. if (tu->_mp_size > 0)
  2308. {
  2309. mp_bitcnt_t shift;
  2310. shift = mpz_make_odd (tu, tu);
  2311. mpz_mul_2exp (t0, t0, shift);
  2312. mpz_mul_2exp (s0, s0, shift);
  2313. power += shift;
  2314. for (;;)
  2315. {
  2316. int c;
  2317. c = mpz_cmp (tu, tv);
  2318. if (c == 0)
  2319. break;
  2320. if (c < 0)
  2321. {
  2322. /* tv = tv' + tu
  2323. *
  2324. * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
  2325. * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
  2326. mpz_sub (tv, tv, tu);
  2327. mpz_add (t0, t0, t1);
  2328. mpz_add (s0, s0, s1);
  2329. shift = mpz_make_odd (tv, tv);
  2330. mpz_mul_2exp (t1, t1, shift);
  2331. mpz_mul_2exp (s1, s1, shift);
  2332. }
  2333. else
  2334. {
  2335. mpz_sub (tu, tu, tv);
  2336. mpz_add (t1, t0, t1);
  2337. mpz_add (s1, s0, s1);
  2338. shift = mpz_make_odd (tu, tu);
  2339. mpz_mul_2exp (t0, t0, shift);
  2340. mpz_mul_2exp (s0, s0, shift);
  2341. }
  2342. power += shift;
  2343. }
  2344. }
  2345. /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
  2346. cofactors. */
  2347. mpz_mul_2exp (tv, tv, gz);
  2348. mpz_neg (s0, s0);
  2349. /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
  2350. adjust cofactors, we need u / g and v / g */
  2351. mpz_divexact (s1, v, tv);
  2352. mpz_abs (s1, s1);
  2353. mpz_divexact (t1, u, tv);
  2354. mpz_abs (t1, t1);
  2355. while (power-- > 0)
  2356. {
  2357. /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
  2358. if (mpz_odd_p (s0) || mpz_odd_p (t0))
  2359. {
  2360. mpz_sub (s0, s0, s1);
  2361. mpz_add (t0, t0, t1);
  2362. }
  2363. mpz_divexact_ui (s0, s0, 2);
  2364. mpz_divexact_ui (t0, t0, 2);
  2365. }
  2366. /* Arrange so that |s| < |u| / 2g */
  2367. mpz_add (s1, s0, s1);
  2368. if (mpz_cmpabs (s0, s1) > 0)
  2369. {
  2370. mpz_swap (s0, s1);
  2371. mpz_sub (t0, t0, t1);
  2372. }
  2373. if (u->_mp_size < 0)
  2374. mpz_neg (s0, s0);
  2375. if (v->_mp_size < 0)
  2376. mpz_neg (t0, t0);
  2377. mpz_swap (g, tv);
  2378. if (s)
  2379. mpz_swap (s, s0);
  2380. if (t)
  2381. mpz_swap (t, t0);
  2382. mpz_clear (tu);
  2383. mpz_clear (tv);
  2384. mpz_clear (s0);
  2385. mpz_clear (s1);
  2386. mpz_clear (t0);
  2387. mpz_clear (t1);
  2388. }
  2389. void
  2390. mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
  2391. {
  2392. mpz_t g;
  2393. if (u->_mp_size == 0 || v->_mp_size == 0)
  2394. {
  2395. r->_mp_size = 0;
  2396. return;
  2397. }
  2398. mpz_init (g);
  2399. mpz_gcd (g, u, v);
  2400. mpz_divexact (g, u, g);
  2401. mpz_mul (r, g, v);
  2402. mpz_clear (g);
  2403. mpz_abs (r, r);
  2404. }
  2405. void
  2406. mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
  2407. {
  2408. if (v == 0 || u->_mp_size == 0)
  2409. {
  2410. r->_mp_size = 0;
  2411. return;
  2412. }
  2413. v /= mpz_gcd_ui (NULL, u, v);
  2414. mpz_mul_ui (r, u, v);
  2415. mpz_abs (r, r);
  2416. }
  2417. int
  2418. mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
  2419. {
  2420. mpz_t g, tr;
  2421. int invertible;
  2422. if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
  2423. return 0;
  2424. mpz_init (g);
  2425. mpz_init (tr);
  2426. mpz_gcdext (g, tr, NULL, u, m);
  2427. invertible = (mpz_cmp_ui (g, 1) == 0);
  2428. if (invertible)
  2429. {
  2430. if (tr->_mp_size < 0)
  2431. {
  2432. if (m->_mp_size >= 0)
  2433. mpz_add (tr, tr, m);
  2434. else
  2435. mpz_sub (tr, tr, m);
  2436. }
  2437. mpz_swap (r, tr);
  2438. }
  2439. mpz_clear (g);
  2440. mpz_clear (tr);
  2441. return invertible;
  2442. }
  2443. /* Higher level operations (sqrt, pow and root) */
  2444. void
  2445. mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
  2446. {
  2447. unsigned long bit;
  2448. mpz_t tr;
  2449. mpz_init_set_ui (tr, 1);
  2450. for (bit = GMP_ULONG_HIGHBIT; bit > 0; bit >>= 1)
  2451. {
  2452. mpz_mul (tr, tr, tr);
  2453. if (e & bit)
  2454. mpz_mul (tr, tr, b);
  2455. }
  2456. mpz_swap (r, tr);
  2457. mpz_clear (tr);
  2458. }
  2459. void
  2460. mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
  2461. {
  2462. mpz_t b;
  2463. mpz_init_set_ui (b, blimb);
  2464. mpz_pow_ui (r, b, e);
  2465. mpz_clear (b);
  2466. }
  2467. void
  2468. mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
  2469. {
  2470. mpz_t tr;
  2471. mpz_t base;
  2472. mp_size_t en, mn;
  2473. mp_srcptr mp;
  2474. struct gmp_div_inverse minv;
  2475. unsigned shift;
  2476. mp_ptr tp = NULL;
  2477. en = GMP_ABS (e->_mp_size);
  2478. mn = GMP_ABS (m->_mp_size);
  2479. if (mn == 0)
  2480. gmp_die ("mpz_powm: Zero modulo.");
  2481. if (en == 0)
  2482. {
  2483. mpz_set_ui (r, 1);
  2484. return;
  2485. }
  2486. mp = m->_mp_d;
  2487. mpn_div_qr_invert (&minv, mp, mn);
  2488. shift = minv.shift;
  2489. if (shift > 0)
  2490. {
  2491. /* To avoid shifts, we do all our reductions, except the final
  2492. one, using a *normalized* m. */
  2493. minv.shift = 0;
  2494. tp = gmp_xalloc_limbs (mn);
  2495. gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
  2496. mp = tp;
  2497. }
  2498. mpz_init (base);
  2499. if (e->_mp_size < 0)
  2500. {
  2501. if (!mpz_invert (base, b, m))
  2502. gmp_die ("mpz_powm: Negative exponent and non-invertibe base.");
  2503. }
  2504. else
  2505. {
  2506. mp_size_t bn;
  2507. mpz_abs (base, b);
  2508. bn = base->_mp_size;
  2509. if (bn >= mn)
  2510. {
  2511. mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
  2512. bn = mn;
  2513. }
  2514. /* We have reduced the absolute value. Now take care of the
  2515. sign. Note that we get zero represented non-canonically as
  2516. m. */
  2517. if (b->_mp_size < 0)
  2518. {
  2519. mp_ptr bp = MPZ_REALLOC (base, mn);
  2520. gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
  2521. bn = mn;
  2522. }
  2523. base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
  2524. }
  2525. mpz_init_set_ui (tr, 1);
  2526. while (en-- > 0)
  2527. {
  2528. mp_limb_t w = e->_mp_d[en];
  2529. mp_limb_t bit;
  2530. for (bit = GMP_LIMB_HIGHBIT; bit > 0; bit >>= 1)
  2531. {
  2532. mpz_mul (tr, tr, tr);
  2533. if (w & bit)
  2534. mpz_mul (tr, tr, base);
  2535. if (tr->_mp_size > mn)
  2536. {
  2537. mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
  2538. tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
  2539. }
  2540. }
  2541. }
  2542. /* Final reduction */
  2543. if (tr->_mp_size >= mn)
  2544. {
  2545. minv.shift = shift;
  2546. mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
  2547. tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
  2548. }
  2549. if (tp)
  2550. gmp_free (tp);
  2551. mpz_swap (r, tr);
  2552. mpz_clear (tr);
  2553. mpz_clear (base);
  2554. }
  2555. void
  2556. mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
  2557. {
  2558. mpz_t e;
  2559. mpz_init_set_ui (e, elimb);
  2560. mpz_powm (r, b, e, m);
  2561. mpz_clear (e);
  2562. }
  2563. /* x=trunc(y^(1/z)), r=y-x^z */
  2564. void
  2565. mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
  2566. {
  2567. int sgn;
  2568. mpz_t t, u;
  2569. sgn = y->_mp_size < 0;
  2570. if (sgn && (z & 1) == 0)
  2571. gmp_die ("mpz_rootrem: Negative argument, with even root.");
  2572. if (z == 0)
  2573. gmp_die ("mpz_rootrem: Zeroth root.");
  2574. if (mpz_cmpabs_ui (y, 1) <= 0) {
  2575. mpz_set (x, y);
  2576. if (r)
  2577. r->_mp_size = 0;
  2578. return;
  2579. }
  2580. mpz_init (t);
  2581. mpz_init (u);
  2582. mpz_setbit (t, mpz_sizeinbase (y, 2) / z + 1);
  2583. if (z == 2) /* simplify sqrt loop: z-1 == 1 */
  2584. do {
  2585. mpz_swap (u, t); /* u = x */
  2586. mpz_tdiv_q (t, y, u); /* t = y/x */
  2587. mpz_add (t, t, u); /* t = y/x + x */
  2588. mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */
  2589. } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
  2590. else /* z != 2 */ {
  2591. mpz_t v;
  2592. mpz_init (v);
  2593. if (sgn)
  2594. mpz_neg (t, t);
  2595. do {
  2596. mpz_swap (u, t); /* u = x */
  2597. mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */
  2598. mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */
  2599. mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */
  2600. mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */
  2601. mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */
  2602. } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
  2603. mpz_clear (v);
  2604. }
  2605. if (r) {
  2606. mpz_pow_ui (t, u, z);
  2607. mpz_sub (r, y, t);
  2608. }
  2609. mpz_swap (x, u);
  2610. mpz_clear (u);
  2611. mpz_clear (t);
  2612. }
  2613. int
  2614. mpz_root (mpz_t x, const mpz_t y, unsigned long z)
  2615. {
  2616. int res;
  2617. mpz_t r;
  2618. mpz_init (r);
  2619. mpz_rootrem (x, r, y, z);
  2620. res = r->_mp_size == 0;
  2621. mpz_clear (r);
  2622. return res;
  2623. }
  2624. /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
  2625. void
  2626. mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
  2627. {
  2628. mpz_rootrem (s, r, u, 2);
  2629. }
  2630. void
  2631. mpz_sqrt (mpz_t s, const mpz_t u)
  2632. {
  2633. mpz_rootrem (s, NULL, u, 2);
  2634. }
  2635. /* Combinatorics */
  2636. void
  2637. mpz_fac_ui (mpz_t x, unsigned long n)
  2638. {
  2639. if (n < 2) {
  2640. mpz_set_ui (x, 1);
  2641. return;
  2642. }
  2643. mpz_set_ui (x, n);
  2644. for (;--n > 1;)
  2645. mpz_mul_ui (x, x, n);
  2646. }
  2647. void
  2648. mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
  2649. {
  2650. mpz_t t;
  2651. if (k > n) {
  2652. r->_mp_size = 0;
  2653. return;
  2654. }
  2655. mpz_fac_ui (r, n);
  2656. mpz_init (t);
  2657. mpz_fac_ui (t, k);
  2658. mpz_divexact (r, r, t);
  2659. mpz_fac_ui (t, n - k);
  2660. mpz_divexact (r, r, t);
  2661. mpz_clear (t);
  2662. }
  2663. /* Logical operations and bit manipulation. */
  2664. /* Numbers are treated as if represented in two's complement (and
  2665. infinitely sign extended). For a negative values we get the two's
  2666. complement from -x = ~x + 1, where ~ is bitwise complementt.
  2667. Negation transforms
  2668. xxxx10...0
  2669. into
  2670. yyyy10...0
  2671. where yyyy is the bitwise complement of xxxx. So least significant
  2672. bits, up to and including the first one bit, are unchanged, and
  2673. the more significant bits are all complemented.
  2674. To change a bit from zero to one in a negative number, subtract the
  2675. corresponding power of two from the absolute value. This can never
  2676. underflow. To change a bit from one to zero, add the corresponding
  2677. power of two, and this might overflow. E.g., if x = -001111, the
  2678. two's complement is 110001. Clearing the least significant bit, we
  2679. get two's complement 110000, and -010000. */
  2680. int
  2681. mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
  2682. {
  2683. mp_size_t limb_index;
  2684. unsigned shift;
  2685. mp_size_t ds;
  2686. mp_size_t dn;
  2687. mp_limb_t w;
  2688. int bit;
  2689. ds = d->_mp_size;
  2690. dn = GMP_ABS (ds);
  2691. limb_index = bit_index / GMP_LIMB_BITS;
  2692. if (limb_index >= dn)
  2693. return ds < 0;
  2694. shift = bit_index % GMP_LIMB_BITS;
  2695. w = d->_mp_d[limb_index];
  2696. bit = (w >> shift) & 1;
  2697. if (ds < 0)
  2698. {
  2699. /* d < 0. Check if any of the bits below is set: If so, our bit
  2700. must be complemented. */
  2701. if (shift > 0 && (w << (GMP_LIMB_BITS - shift)) > 0)
  2702. return bit ^ 1;
  2703. while (limb_index-- > 0)
  2704. if (d->_mp_d[limb_index] > 0)
  2705. return bit ^ 1;
  2706. }
  2707. return bit;
  2708. }
  2709. static void
  2710. mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
  2711. {
  2712. mp_size_t dn, limb_index;
  2713. mp_limb_t bit;
  2714. mp_ptr dp;
  2715. dn = GMP_ABS (d->_mp_size);
  2716. limb_index = bit_index / GMP_LIMB_BITS;
  2717. bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
  2718. if (limb_index >= dn)
  2719. {
  2720. mp_size_t i;
  2721. /* The bit should be set outside of the end of the number.
  2722. We have to increase the size of the number. */
  2723. dp = MPZ_REALLOC (d, limb_index + 1);
  2724. dp[limb_index] = bit;
  2725. for (i = dn; i < limb_index; i++)
  2726. dp[i] = 0;
  2727. dn = limb_index + 1;
  2728. }
  2729. else
  2730. {
  2731. mp_limb_t cy;
  2732. dp = d->_mp_d;
  2733. cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
  2734. if (cy > 0)
  2735. {
  2736. dp = MPZ_REALLOC (d, dn + 1);
  2737. dp[dn++] = cy;
  2738. }
  2739. }
  2740. d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
  2741. }
  2742. static void
  2743. mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
  2744. {
  2745. mp_size_t dn, limb_index;
  2746. mp_ptr dp;
  2747. mp_limb_t bit;
  2748. dn = GMP_ABS (d->_mp_size);
  2749. dp = d->_mp_d;
  2750. limb_index = bit_index / GMP_LIMB_BITS;
  2751. bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
  2752. assert (limb_index < dn);
  2753. gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
  2754. dn - limb_index, bit));
  2755. dn -= (dp[dn-1] == 0);
  2756. d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
  2757. }
  2758. void
  2759. mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
  2760. {
  2761. if (!mpz_tstbit (d, bit_index))
  2762. {
  2763. if (d->_mp_size >= 0)
  2764. mpz_abs_add_bit (d, bit_index);
  2765. else
  2766. mpz_abs_sub_bit (d, bit_index);
  2767. }
  2768. }
  2769. void
  2770. mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
  2771. {
  2772. if (mpz_tstbit (d, bit_index))
  2773. {
  2774. if (d->_mp_size >= 0)
  2775. mpz_abs_sub_bit (d, bit_index);
  2776. else
  2777. mpz_abs_add_bit (d, bit_index);
  2778. }
  2779. }
  2780. void
  2781. mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
  2782. {
  2783. if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
  2784. mpz_abs_sub_bit (d, bit_index);
  2785. else
  2786. mpz_abs_add_bit (d, bit_index);
  2787. }
  2788. void
  2789. mpz_com (mpz_t r, const mpz_t u)
  2790. {
  2791. mpz_neg (r, u);
  2792. mpz_sub_ui (r, r, 1);
  2793. }
  2794. void
  2795. mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
  2796. {
  2797. mp_size_t un, vn, rn, i;
  2798. mp_ptr up, vp, rp;
  2799. mp_limb_t ux, vx, rx;
  2800. mp_limb_t uc, vc, rc;
  2801. mp_limb_t ul, vl, rl;
  2802. un = GMP_ABS (u->_mp_size);
  2803. vn = GMP_ABS (v->_mp_size);
  2804. if (un < vn)
  2805. {
  2806. MPZ_SRCPTR_SWAP (u, v);
  2807. MP_SIZE_T_SWAP (un, vn);
  2808. }
  2809. if (vn == 0)
  2810. {
  2811. r->_mp_size = 0;
  2812. return;
  2813. }
  2814. uc = u->_mp_size < 0;
  2815. vc = v->_mp_size < 0;
  2816. rc = uc & vc;
  2817. ux = -uc;
  2818. vx = -vc;
  2819. rx = -rc;
  2820. /* If the smaller input is positive, higher limbs don't matter. */
  2821. rn = vx ? un : vn;
  2822. rp = MPZ_REALLOC (r, rn + rc);
  2823. up = u->_mp_d;
  2824. vp = v->_mp_d;
  2825. for (i = 0; i < vn; i++)
  2826. {
  2827. ul = (up[i] ^ ux) + uc;
  2828. uc = ul < uc;
  2829. vl = (vp[i] ^ vx) + vc;
  2830. vc = vl < vc;
  2831. rl = ( (ul & vl) ^ rx) + rc;
  2832. rc = rl < rc;
  2833. rp[i] = rl;
  2834. }
  2835. assert (vc == 0);
  2836. for (; i < rn; i++)
  2837. {
  2838. ul = (up[i] ^ ux) + uc;
  2839. uc = ul < uc;
  2840. rl = ( (ul & vx) ^ rx) + rc;
  2841. rc = rl < rc;
  2842. rp[i] = rl;
  2843. }
  2844. if (rc)
  2845. rp[rn++] = rc;
  2846. else
  2847. rn = mpn_normalized_size (rp, rn);
  2848. r->_mp_size = rx ? -rn : rn;
  2849. }
  2850. void
  2851. mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
  2852. {
  2853. mp_size_t un, vn, rn, i;
  2854. mp_ptr up, vp, rp;
  2855. mp_limb_t ux, vx, rx;
  2856. mp_limb_t uc, vc, rc;
  2857. mp_limb_t ul, vl, rl;
  2858. un = GMP_ABS (u->_mp_size);
  2859. vn = GMP_ABS (v->_mp_size);
  2860. if (un < vn)
  2861. {
  2862. MPZ_SRCPTR_SWAP (u, v);
  2863. MP_SIZE_T_SWAP (un, vn);
  2864. }
  2865. if (vn == 0)
  2866. {
  2867. mpz_set (r, u);
  2868. return;
  2869. }
  2870. uc = u->_mp_size < 0;
  2871. vc = v->_mp_size < 0;
  2872. rc = uc | vc;
  2873. ux = -uc;
  2874. vx = -vc;
  2875. rx = -rc;
  2876. /* If the smaller input is negative, by sign extension higher limbs
  2877. don't matter. */
  2878. rn = vx ? vn : un;
  2879. rp = MPZ_REALLOC (r, rn + rc);
  2880. up = u->_mp_d;
  2881. vp = v->_mp_d;
  2882. for (i = 0; i < vn; i++)
  2883. {
  2884. ul = (up[i] ^ ux) + uc;
  2885. uc = ul < uc;
  2886. vl = (vp[i] ^ vx) + vc;
  2887. vc = vl < vc;
  2888. rl = ( (ul | vl) ^ rx) + rc;
  2889. rc = rl < rc;
  2890. rp[i] = rl;
  2891. }
  2892. assert (vc == 0);
  2893. for (; i < rn; i++)
  2894. {
  2895. ul = (up[i] ^ ux) + uc;
  2896. uc = ul < uc;
  2897. rl = ( (ul | vx) ^ rx) + rc;
  2898. rc = rl < rc;
  2899. rp[i] = rl;
  2900. }
  2901. if (rc)
  2902. rp[rn++] = rc;
  2903. else
  2904. rn = mpn_normalized_size (rp, rn);
  2905. r->_mp_size = rx ? -rn : rn;
  2906. }
  2907. void
  2908. mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
  2909. {
  2910. mp_size_t un, vn, i;
  2911. mp_ptr up, vp, rp;
  2912. mp_limb_t ux, vx, rx;
  2913. mp_limb_t uc, vc, rc;
  2914. mp_limb_t ul, vl, rl;
  2915. un = GMP_ABS (u->_mp_size);
  2916. vn = GMP_ABS (v->_mp_size);
  2917. if (un < vn)
  2918. {
  2919. MPZ_SRCPTR_SWAP (u, v);
  2920. MP_SIZE_T_SWAP (un, vn);
  2921. }
  2922. if (vn == 0)
  2923. {
  2924. mpz_set (r, u);
  2925. return;
  2926. }
  2927. uc = u->_mp_size < 0;
  2928. vc = v->_mp_size < 0;
  2929. rc = uc ^ vc;
  2930. ux = -uc;
  2931. vx = -vc;
  2932. rx = -rc;
  2933. rp = MPZ_REALLOC (r, un + rc);
  2934. up = u->_mp_d;
  2935. vp = v->_mp_d;
  2936. for (i = 0; i < vn; i++)
  2937. {
  2938. ul = (up[i] ^ ux) + uc;
  2939. uc = ul < uc;
  2940. vl = (vp[i] ^ vx) + vc;
  2941. vc = vl < vc;
  2942. rl = (ul ^ vl ^ rx) + rc;
  2943. rc = rl < rc;
  2944. rp[i] = rl;
  2945. }
  2946. assert (vc == 0);
  2947. for (; i < un; i++)
  2948. {
  2949. ul = (up[i] ^ ux) + uc;
  2950. uc = ul < uc;
  2951. rl = (ul ^ ux) + rc;
  2952. rc = rl < rc;
  2953. rp[i] = rl;
  2954. }
  2955. if (rc)
  2956. rp[un++] = rc;
  2957. else
  2958. un = mpn_normalized_size (rp, un);
  2959. r->_mp_size = rx ? -un : un;
  2960. }
  2961. static unsigned
  2962. gmp_popcount_limb (mp_limb_t x)
  2963. {
  2964. unsigned c;
  2965. /* Do 16 bits at a time, to avoid limb-sized constants. */
  2966. for (c = 0; x > 0; x >>= 16)
  2967. {
  2968. unsigned w = ((x >> 1) & 0x5555) + (x & 0x5555);
  2969. w = ((w >> 2) & 0x3333) + (w & 0x3333);
  2970. w = ((w >> 4) & 0x0f0f) + (w & 0x0f0f);
  2971. w = (w >> 8) + (w & 0x00ff);
  2972. c += w;
  2973. }
  2974. return c;
  2975. }
  2976. mp_bitcnt_t
  2977. mpz_popcount (const mpz_t u)
  2978. {
  2979. mp_size_t un, i;
  2980. mp_bitcnt_t c;
  2981. un = u->_mp_size;
  2982. if (un < 0)
  2983. return ~(mp_bitcnt_t) 0;
  2984. for (c = 0, i = 0; i < un; i++)
  2985. c += gmp_popcount_limb (u->_mp_d[i]);
  2986. return c;
  2987. }
  2988. mp_bitcnt_t
  2989. mpz_hamdist (const mpz_t u, const mpz_t v)
  2990. {
  2991. mp_size_t un, vn, i;
  2992. mp_limb_t uc, vc, ul, vl, comp;
  2993. mp_srcptr up, vp;
  2994. mp_bitcnt_t c;
  2995. un = u->_mp_size;
  2996. vn = v->_mp_size;
  2997. if ( (un ^ vn) < 0)
  2998. return ~(mp_bitcnt_t) 0;
  2999. if (un < 0)
  3000. {
  3001. assert (vn < 0);
  3002. un = -un;
  3003. vn = -vn;
  3004. uc = vc = 1;
  3005. comp = - (mp_limb_t) 1;
  3006. }
  3007. else
  3008. uc = vc = comp = 0;
  3009. up = u->_mp_d;
  3010. vp = v->_mp_d;
  3011. if (un < vn)
  3012. MPN_SRCPTR_SWAP (up, un, vp, vn);
  3013. for (i = 0, c = 0; i < vn; i++)
  3014. {
  3015. ul = (up[i] ^ comp) + uc;
  3016. uc = ul < uc;
  3017. vl = (vp[i] ^ comp) + vc;
  3018. vc = vl < vc;
  3019. c += gmp_popcount_limb (ul ^ vl);
  3020. }
  3021. assert (vc == 0);
  3022. for (; i < un; i++)
  3023. {
  3024. ul = (up[i] ^ comp) + uc;
  3025. uc = ul < uc;
  3026. c += gmp_popcount_limb (ul ^ comp);
  3027. }
  3028. return c;
  3029. }
  3030. mp_bitcnt_t
  3031. mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
  3032. {
  3033. mp_ptr up;
  3034. mp_size_t us, un, i;
  3035. mp_limb_t limb, ux, uc;
  3036. unsigned cnt;
  3037. up = u->_mp_d;
  3038. us = u->_mp_size;
  3039. un = GMP_ABS (us);
  3040. i = starting_bit / GMP_LIMB_BITS;
  3041. /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
  3042. for u<0. Notice this test picks up any u==0 too. */
  3043. if (i >= un)
  3044. return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
  3045. if (us < 0)
  3046. {
  3047. ux = GMP_LIMB_MAX;
  3048. uc = mpn_zero_p (up, i);
  3049. }
  3050. else
  3051. ux = uc = 0;
  3052. limb = (ux ^ up[i]) + uc;
  3053. uc = limb < uc;
  3054. /* Mask to 0 all bits before starting_bit, thus ignoring them. */
  3055. limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
  3056. while (limb == 0)
  3057. {
  3058. i++;
  3059. if (i == un)
  3060. {
  3061. assert (uc == 0);
  3062. /* For the u > 0 case, this can happen only for the first
  3063. masked limb. For the u < 0 case, it happens when the
  3064. highest limbs of the absolute value are all ones. */
  3065. return (us >= 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
  3066. }
  3067. limb = (ux ^ up[i]) + uc;
  3068. uc = limb < uc;
  3069. }
  3070. gmp_ctz (cnt, limb);
  3071. return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
  3072. }
  3073. mp_bitcnt_t
  3074. mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
  3075. {
  3076. mp_ptr up;
  3077. mp_size_t us, un, i;
  3078. mp_limb_t limb, ux, uc;
  3079. unsigned cnt;
  3080. up = u->_mp_d;
  3081. us = u->_mp_size;
  3082. un = GMP_ABS (us);
  3083. i = starting_bit / GMP_LIMB_BITS;
  3084. /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
  3085. u<0. Notice this test picks up all cases of u==0 too. */
  3086. if (i >= un)
  3087. return (us >= 0 ? starting_bit : ~(mp_bitcnt_t) 0);
  3088. if (us < 0)
  3089. {
  3090. ux = GMP_LIMB_MAX;
  3091. uc = mpn_zero_p (up, i);
  3092. }
  3093. else
  3094. ux = uc = 0;
  3095. limb = (ux ^ up[i]) + uc;
  3096. uc = limb < uc;
  3097. /* Mask to 1 all bits before starting_bit, thus ignoring them. */
  3098. limb |= ((mp_limb_t) 1 << (starting_bit % GMP_LIMB_BITS)) - 1;
  3099. while (limb == GMP_LIMB_MAX)
  3100. {
  3101. i++;
  3102. if (i == un)
  3103. {
  3104. assert (uc == 0);
  3105. return (us >= 0 ? un * GMP_LIMB_BITS : ~(mp_bitcnt_t) 0);
  3106. }
  3107. limb = (ux ^ up[i]) + uc;
  3108. uc = limb < uc;
  3109. }
  3110. gmp_ctz (cnt, ~limb);
  3111. return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
  3112. }
  3113. /* MPZ base conversion. */
  3114. size_t
  3115. mpz_sizeinbase (const mpz_t u, int base)
  3116. {
  3117. mp_size_t un;
  3118. mp_srcptr up;
  3119. mp_ptr tp;
  3120. mp_bitcnt_t bits;
  3121. struct gmp_div_inverse bi;
  3122. size_t ndigits;
  3123. assert (base >= 2);
  3124. assert (base <= 36);
  3125. un = GMP_ABS (u->_mp_size);
  3126. if (un == 0)
  3127. return 1;
  3128. up = u->_mp_d;
  3129. bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
  3130. switch (base)
  3131. {
  3132. case 2:
  3133. return bits;
  3134. case 4:
  3135. return (bits + 1) / 2;
  3136. case 8:
  3137. return (bits + 2) / 3;
  3138. case 16:
  3139. return (bits + 3) / 4;
  3140. case 32:
  3141. return (bits + 4) / 5;
  3142. /* FIXME: Do something more clever for the common case of base
  3143. 10. */
  3144. }
  3145. tp = gmp_xalloc_limbs (un);
  3146. mpn_copyi (tp, up, un);
  3147. mpn_div_qr_1_invert (&bi, base);
  3148. for (ndigits = 0; un > 0; ndigits++)
  3149. {
  3150. mpn_div_qr_1_preinv (tp, tp, un, &bi);
  3151. un -= (tp[un-1] == 0);
  3152. }
  3153. gmp_free (tp);
  3154. return ndigits;
  3155. }
  3156. char *
  3157. mpz_get_str (char *sp, int base, const mpz_t u)
  3158. {
  3159. unsigned bits;
  3160. const char *digits;
  3161. mp_size_t un;
  3162. size_t i, sn;
  3163. if (base >= 0)
  3164. {
  3165. digits = "0123456789abcdefghijklmnopqrstuvwxyz";
  3166. }
  3167. else
  3168. {
  3169. base = -base;
  3170. digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
  3171. }
  3172. if (base <= 1)
  3173. base = 10;
  3174. if (base > 36)
  3175. return NULL;
  3176. sn = 1 + mpz_sizeinbase (u, base);
  3177. if (!sp)
  3178. sp = gmp_xalloc (1 + sn);
  3179. un = GMP_ABS (u->_mp_size);
  3180. if (un == 0)
  3181. {
  3182. sp[0] = '0';
  3183. sp[1] = '\0';
  3184. return sp;
  3185. }
  3186. i = 0;
  3187. if (u->_mp_size < 0)
  3188. sp[i++] = '-';
  3189. bits = mpn_base_power_of_two_p (base);
  3190. if (bits)
  3191. /* Not modified in this case. */
  3192. sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
  3193. else
  3194. {
  3195. struct mpn_base_info info;
  3196. mp_ptr tp;
  3197. mpn_get_base_info (&info, base);
  3198. tp = gmp_xalloc_limbs (un);
  3199. mpn_copyi (tp, u->_mp_d, un);
  3200. sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
  3201. gmp_free (tp);
  3202. }
  3203. for (; i < sn; i++)
  3204. sp[i] = digits[(unsigned char) sp[i]];
  3205. sp[sn] = '\0';
  3206. return sp;
  3207. }
  3208. int
  3209. mpz_set_str (mpz_t r, const char *sp, int base)
  3210. {
  3211. unsigned bits;
  3212. mp_size_t rn, alloc;
  3213. mp_ptr rp;
  3214. size_t sn;
  3215. size_t dn;
  3216. int sign;
  3217. unsigned char *dp;
  3218. assert (base == 0 || (base >= 2 && base <= 36));
  3219. while (isspace( (unsigned char) *sp))
  3220. sp++;
  3221. if (*sp == '-')
  3222. {
  3223. sign = 1;
  3224. sp++;
  3225. }
  3226. else
  3227. sign = 0;
  3228. if (base == 0)
  3229. {
  3230. if (*sp == '0')
  3231. {
  3232. sp++;
  3233. if (*sp == 'x' || *sp == 'X')
  3234. {
  3235. base = 16;
  3236. sp++;
  3237. }
  3238. else if (*sp == 'b' || *sp == 'B')
  3239. {
  3240. base = 2;
  3241. sp++;
  3242. }
  3243. else
  3244. base = 8;
  3245. }
  3246. else
  3247. base = 10;
  3248. }
  3249. sn = strlen (sp);
  3250. dp = gmp_xalloc (sn + (sn == 0));
  3251. for (dn = 0; *sp; sp++)
  3252. {
  3253. unsigned digit;
  3254. if (isspace ((unsigned char) *sp))
  3255. continue;
  3256. if (*sp >= '0' && *sp <= '9')
  3257. digit = *sp - '0';
  3258. else if (*sp >= 'a' && *sp <= 'z')
  3259. digit = *sp - 'a' + 10;
  3260. else if (*sp >= 'A' && *sp <= 'Z')
  3261. digit = *sp - 'A' + 10;
  3262. else
  3263. digit = base; /* fail */
  3264. if (digit >= base)
  3265. {
  3266. gmp_free (dp);
  3267. r->_mp_size = 0;
  3268. return -1;
  3269. }
  3270. dp[dn++] = digit;
  3271. }
  3272. bits = mpn_base_power_of_two_p (base);
  3273. if (bits > 0)
  3274. {
  3275. alloc = (sn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
  3276. rp = MPZ_REALLOC (r, alloc);
  3277. rn = mpn_set_str_bits (rp, dp, dn, bits);
  3278. }
  3279. else
  3280. {
  3281. struct mpn_base_info info;
  3282. mpn_get_base_info (&info, base);
  3283. alloc = (sn + info.exp - 1) / info.exp;
  3284. rp = MPZ_REALLOC (r, alloc);
  3285. rn = mpn_set_str_other (rp, dp, dn, base, &info);
  3286. }
  3287. assert (rn <= alloc);
  3288. gmp_free (dp);
  3289. r->_mp_size = sign ? - rn : rn;
  3290. return 0;
  3291. }
  3292. int
  3293. mpz_init_set_str (mpz_t r, const char *sp, int base)
  3294. {
  3295. mpz_init (r);
  3296. return mpz_set_str (r, sp, base);
  3297. }
  3298. size_t
  3299. mpz_out_str (FILE *stream, int base, const mpz_t x)
  3300. {
  3301. char *str;
  3302. size_t len;
  3303. str = mpz_get_str (NULL, base, x);
  3304. len = strlen (str);
  3305. len = fwrite (str, 1, len, stream);
  3306. gmp_free (str);
  3307. return len;
  3308. }
  3309. static int
  3310. gmp_detect_endian (void)
  3311. {
  3312. static const int i = 1;
  3313. const unsigned char *p = (const unsigned char *) &i;
  3314. if (*p == 1)
  3315. /* Little endian */
  3316. return -1;
  3317. else
  3318. /* Big endian */
  3319. return 1;
  3320. }
  3321. /* Import and export. Does not support nails. */
  3322. void
  3323. mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
  3324. size_t nails, const void *src)
  3325. {
  3326. const unsigned char *p;
  3327. ptrdiff_t word_step;
  3328. mp_ptr rp;
  3329. mp_size_t rn;
  3330. /* The current (partial) limb. */
  3331. mp_limb_t limb;
  3332. /* The number of bytes already copied to this limb (starting from
  3333. the low end). */
  3334. size_t bytes;
  3335. /* The index where the limb should be stored, when completed. */
  3336. mp_size_t i;
  3337. if (nails != 0)
  3338. gmp_die ("mpz_import: Nails not supported.");
  3339. assert (order == 1 || order == -1);
  3340. assert (endian >= -1 && endian <= 1);
  3341. if (endian == 0)
  3342. endian = gmp_detect_endian ();
  3343. p = (unsigned char *) src;
  3344. word_step = (order != endian) ? 2 * size : 0;
  3345. /* Process bytes from the least significant end, so point p at the
  3346. least significant word. */
  3347. if (order == 1)
  3348. {
  3349. p += size * (count - 1);
  3350. word_step = - word_step;
  3351. }
  3352. /* And at least significant byte of that word. */
  3353. if (endian == 1)
  3354. p += (size - 1);
  3355. rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
  3356. rp = MPZ_REALLOC (r, rn);
  3357. for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
  3358. {
  3359. size_t j;
  3360. for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
  3361. {
  3362. limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
  3363. if (bytes == sizeof(mp_limb_t))
  3364. {
  3365. rp[i++] = limb;
  3366. bytes = 0;
  3367. limb = 0;
  3368. }
  3369. }
  3370. }
  3371. if (bytes > 0)
  3372. rp[i++] = limb;
  3373. assert (i == rn);
  3374. r->_mp_size = mpn_normalized_size (rp, i);
  3375. }
  3376. void *
  3377. mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
  3378. size_t nails, const mpz_t u)
  3379. {
  3380. unsigned char *p;
  3381. ptrdiff_t word_step;
  3382. size_t count, k;
  3383. mp_size_t un;
  3384. /* The current (partial) limb. */
  3385. mp_limb_t limb;
  3386. /* The number of bytes left to to in this limb. */
  3387. size_t bytes;
  3388. /* The index where the limb was read. */
  3389. mp_size_t i;
  3390. if (nails != 0)
  3391. gmp_die ("mpz_import: Nails not supported.");
  3392. assert (order == 1 || order == -1);
  3393. assert (endian >= -1 && endian <= 1);
  3394. assert (size > 0 || u->_mp_size == 0);
  3395. un = GMP_ABS (u->_mp_size);
  3396. if (un == 0)
  3397. {
  3398. if (countp)
  3399. *countp = 0;
  3400. return r;
  3401. }
  3402. /* Count bytes in top limb. */
  3403. for (limb = u->_mp_d[un-1], k = 0; limb > 0; k++, limb >>= CHAR_BIT)
  3404. ;
  3405. assert (k > 0);
  3406. count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
  3407. if (!r)
  3408. r = gmp_xalloc (count * size);
  3409. if (endian == 0)
  3410. endian = gmp_detect_endian ();
  3411. p = (unsigned char *) r;
  3412. word_step = (order != endian) ? 2 * size : 0;
  3413. /* Process bytes from the least significant end, so point p at the
  3414. least significant word. */
  3415. if (order == 1)
  3416. {
  3417. p += size * (count - 1);
  3418. word_step = - word_step;
  3419. }
  3420. /* And at least significant byte of that word. */
  3421. if (endian == 1)
  3422. p += (size - 1);
  3423. for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
  3424. {
  3425. size_t j;
  3426. for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
  3427. {
  3428. if (bytes == 0)
  3429. {
  3430. if (i < un)
  3431. limb = u->_mp_d[i++];
  3432. bytes = sizeof (mp_limb_t);
  3433. }
  3434. *p = limb;
  3435. limb >>= CHAR_BIT;
  3436. bytes--;
  3437. }
  3438. }
  3439. assert (i == un);
  3440. assert (k == count);
  3441. if (countp)
  3442. *countp = count;
  3443. return r;
  3444. }