x86_64-mont.pl 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # October 2005.
  9. #
  10. # Montgomery multiplication routine for x86_64. While it gives modest
  11. # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
  12. # than twice, >2x, as fast. Most common rsa1024 sign is improved by
  13. # respectful 50%. It remains to be seen if loop unrolling and
  14. # dedicated squaring routine can provide further improvement...
  15. # July 2011.
  16. #
  17. # Add dedicated squaring procedure. Performance improvement varies
  18. # from platform to platform, but in average it's ~5%/15%/25%/33%
  19. # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
  20. # August 2011.
  21. #
  22. # Unroll and modulo-schedule inner loops in such manner that they
  23. # are "fallen through" for input lengths of 8, which is critical for
  24. # 1024-bit RSA *sign*. Average performance improvement in comparison
  25. # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
  26. # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
  27. $flavour = shift;
  28. $output = shift;
  29. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  30. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  31. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  32. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  33. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  34. die "can't locate x86_64-xlate.pl";
  35. open STDOUT,"| $^X $xlate $flavour $output";
  36. # int bn_mul_mont(
  37. $rp="%rdi"; # BN_ULONG *rp,
  38. $ap="%rsi"; # const BN_ULONG *ap,
  39. $bp="%rdx"; # const BN_ULONG *bp,
  40. $np="%rcx"; # const BN_ULONG *np,
  41. $n0="%r8"; # const BN_ULONG *n0,
  42. $num="%r9"; # int num);
  43. $lo0="%r10";
  44. $hi0="%r11";
  45. $hi1="%r13";
  46. $i="%r14";
  47. $j="%r15";
  48. $m0="%rbx";
  49. $m1="%rbp";
  50. $code=<<___;
  51. .text
  52. .globl bn_mul_mont
  53. .type bn_mul_mont,\@function,6
  54. .align 16
  55. bn_mul_mont:
  56. test \$3,${num}d
  57. jnz .Lmul_enter
  58. cmp \$8,${num}d
  59. jb .Lmul_enter
  60. cmp $ap,$bp
  61. jne .Lmul4x_enter
  62. jmp .Lsqr4x_enter
  63. .align 16
  64. .Lmul_enter:
  65. push %rbx
  66. push %rbp
  67. push %r12
  68. push %r13
  69. push %r14
  70. push %r15
  71. mov ${num}d,${num}d
  72. lea 2($num),%r10
  73. mov %rsp,%r11
  74. neg %r10
  75. lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
  76. and \$-1024,%rsp # minimize TLB usage
  77. mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
  78. .Lmul_body:
  79. mov $bp,%r12 # reassign $bp
  80. ___
  81. $bp="%r12";
  82. $code.=<<___;
  83. mov ($n0),$n0 # pull n0[0] value
  84. mov ($bp),$m0 # m0=bp[0]
  85. mov ($ap),%rax
  86. xor $i,$i # i=0
  87. xor $j,$j # j=0
  88. mov $n0,$m1
  89. mulq $m0 # ap[0]*bp[0]
  90. mov %rax,$lo0
  91. mov ($np),%rax
  92. imulq $lo0,$m1 # "tp[0]"*n0
  93. mov %rdx,$hi0
  94. mulq $m1 # np[0]*m1
  95. add %rax,$lo0 # discarded
  96. mov 8($ap),%rax
  97. adc \$0,%rdx
  98. mov %rdx,$hi1
  99. lea 1($j),$j # j++
  100. jmp .L1st_enter
  101. .align 16
  102. .L1st:
  103. add %rax,$hi1
  104. mov ($ap,$j,8),%rax
  105. adc \$0,%rdx
  106. add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
  107. mov $lo0,$hi0
  108. adc \$0,%rdx
  109. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  110. mov %rdx,$hi1
  111. .L1st_enter:
  112. mulq $m0 # ap[j]*bp[0]
  113. add %rax,$hi0
  114. mov ($np,$j,8),%rax
  115. adc \$0,%rdx
  116. lea 1($j),$j # j++
  117. mov %rdx,$lo0
  118. mulq $m1 # np[j]*m1
  119. cmp $num,$j
  120. jne .L1st
  121. add %rax,$hi1
  122. mov ($ap),%rax # ap[0]
  123. adc \$0,%rdx
  124. add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
  125. adc \$0,%rdx
  126. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  127. mov %rdx,$hi1
  128. mov $lo0,$hi0
  129. xor %rdx,%rdx
  130. add $hi0,$hi1
  131. adc \$0,%rdx
  132. mov $hi1,-8(%rsp,$num,8)
  133. mov %rdx,(%rsp,$num,8) # store upmost overflow bit
  134. lea 1($i),$i # i++
  135. jmp .Louter
  136. .align 16
  137. .Louter:
  138. mov ($bp,$i,8),$m0 # m0=bp[i]
  139. xor $j,$j # j=0
  140. mov $n0,$m1
  141. mov (%rsp),$lo0
  142. mulq $m0 # ap[0]*bp[i]
  143. add %rax,$lo0 # ap[0]*bp[i]+tp[0]
  144. mov ($np),%rax
  145. adc \$0,%rdx
  146. imulq $lo0,$m1 # tp[0]*n0
  147. mov %rdx,$hi0
  148. mulq $m1 # np[0]*m1
  149. add %rax,$lo0 # discarded
  150. mov 8($ap),%rax
  151. adc \$0,%rdx
  152. mov 8(%rsp),$lo0 # tp[1]
  153. mov %rdx,$hi1
  154. lea 1($j),$j # j++
  155. jmp .Linner_enter
  156. .align 16
  157. .Linner:
  158. add %rax,$hi1
  159. mov ($ap,$j,8),%rax
  160. adc \$0,%rdx
  161. add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
  162. mov (%rsp,$j,8),$lo0
  163. adc \$0,%rdx
  164. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  165. mov %rdx,$hi1
  166. .Linner_enter:
  167. mulq $m0 # ap[j]*bp[i]
  168. add %rax,$hi0
  169. mov ($np,$j,8),%rax
  170. adc \$0,%rdx
  171. add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
  172. mov %rdx,$hi0
  173. adc \$0,$hi0
  174. lea 1($j),$j # j++
  175. mulq $m1 # np[j]*m1
  176. cmp $num,$j
  177. jne .Linner
  178. add %rax,$hi1
  179. mov ($ap),%rax # ap[0]
  180. adc \$0,%rdx
  181. add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
  182. mov (%rsp,$j,8),$lo0
  183. adc \$0,%rdx
  184. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  185. mov %rdx,$hi1
  186. xor %rdx,%rdx
  187. add $hi0,$hi1
  188. adc \$0,%rdx
  189. add $lo0,$hi1 # pull upmost overflow bit
  190. adc \$0,%rdx
  191. mov $hi1,-8(%rsp,$num,8)
  192. mov %rdx,(%rsp,$num,8) # store upmost overflow bit
  193. lea 1($i),$i # i++
  194. cmp $num,$i
  195. jl .Louter
  196. xor $i,$i # i=0 and clear CF!
  197. mov (%rsp),%rax # tp[0]
  198. lea (%rsp),$ap # borrow ap for tp
  199. mov $num,$j # j=num
  200. jmp .Lsub
  201. .align 16
  202. .Lsub: sbb ($np,$i,8),%rax
  203. mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
  204. mov 8($ap,$i,8),%rax # tp[i+1]
  205. lea 1($i),$i # i++
  206. dec $j # doesnn't affect CF!
  207. jnz .Lsub
  208. sbb \$0,%rax # handle upmost overflow bit
  209. xor $i,$i
  210. and %rax,$ap
  211. not %rax
  212. mov $rp,$np
  213. and %rax,$np
  214. mov $num,$j # j=num
  215. or $np,$ap # ap=borrow?tp:rp
  216. .align 16
  217. .Lcopy: # copy or in-place refresh
  218. mov ($ap,$i,8),%rax
  219. mov $i,(%rsp,$i,8) # zap temporary vector
  220. mov %rax,($rp,$i,8) # rp[i]=tp[i]
  221. lea 1($i),$i
  222. sub \$1,$j
  223. jnz .Lcopy
  224. mov 8(%rsp,$num,8),%rsi # restore %rsp
  225. mov \$1,%rax
  226. mov (%rsi),%r15
  227. mov 8(%rsi),%r14
  228. mov 16(%rsi),%r13
  229. mov 24(%rsi),%r12
  230. mov 32(%rsi),%rbp
  231. mov 40(%rsi),%rbx
  232. lea 48(%rsi),%rsp
  233. .Lmul_epilogue:
  234. ret
  235. .size bn_mul_mont,.-bn_mul_mont
  236. ___
  237. {{{
  238. my @A=("%r10","%r11");
  239. my @N=("%r13","%rdi");
  240. $code.=<<___;
  241. .type bn_mul4x_mont,\@function,6
  242. .align 16
  243. bn_mul4x_mont:
  244. .Lmul4x_enter:
  245. push %rbx
  246. push %rbp
  247. push %r12
  248. push %r13
  249. push %r14
  250. push %r15
  251. mov ${num}d,${num}d
  252. lea 4($num),%r10
  253. mov %rsp,%r11
  254. neg %r10
  255. lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
  256. and \$-1024,%rsp # minimize TLB usage
  257. mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
  258. .Lmul4x_body:
  259. mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
  260. mov %rdx,%r12 # reassign $bp
  261. ___
  262. $bp="%r12";
  263. $code.=<<___;
  264. mov ($n0),$n0 # pull n0[0] value
  265. mov ($bp),$m0 # m0=bp[0]
  266. mov ($ap),%rax
  267. xor $i,$i # i=0
  268. xor $j,$j # j=0
  269. mov $n0,$m1
  270. mulq $m0 # ap[0]*bp[0]
  271. mov %rax,$A[0]
  272. mov ($np),%rax
  273. imulq $A[0],$m1 # "tp[0]"*n0
  274. mov %rdx,$A[1]
  275. mulq $m1 # np[0]*m1
  276. add %rax,$A[0] # discarded
  277. mov 8($ap),%rax
  278. adc \$0,%rdx
  279. mov %rdx,$N[1]
  280. mulq $m0
  281. add %rax,$A[1]
  282. mov 8($np),%rax
  283. adc \$0,%rdx
  284. mov %rdx,$A[0]
  285. mulq $m1
  286. add %rax,$N[1]
  287. mov 16($ap),%rax
  288. adc \$0,%rdx
  289. add $A[1],$N[1]
  290. lea 4($j),$j # j++
  291. adc \$0,%rdx
  292. mov $N[1],(%rsp)
  293. mov %rdx,$N[0]
  294. jmp .L1st4x
  295. .align 16
  296. .L1st4x:
  297. mulq $m0 # ap[j]*bp[0]
  298. add %rax,$A[0]
  299. mov -16($np,$j,8),%rax
  300. adc \$0,%rdx
  301. mov %rdx,$A[1]
  302. mulq $m1 # np[j]*m1
  303. add %rax,$N[0]
  304. mov -8($ap,$j,8),%rax
  305. adc \$0,%rdx
  306. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  307. adc \$0,%rdx
  308. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  309. mov %rdx,$N[1]
  310. mulq $m0 # ap[j]*bp[0]
  311. add %rax,$A[1]
  312. mov -8($np,$j,8),%rax
  313. adc \$0,%rdx
  314. mov %rdx,$A[0]
  315. mulq $m1 # np[j]*m1
  316. add %rax,$N[1]
  317. mov ($ap,$j,8),%rax
  318. adc \$0,%rdx
  319. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  320. adc \$0,%rdx
  321. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  322. mov %rdx,$N[0]
  323. mulq $m0 # ap[j]*bp[0]
  324. add %rax,$A[0]
  325. mov ($np,$j,8),%rax
  326. adc \$0,%rdx
  327. mov %rdx,$A[1]
  328. mulq $m1 # np[j]*m1
  329. add %rax,$N[0]
  330. mov 8($ap,$j,8),%rax
  331. adc \$0,%rdx
  332. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  333. adc \$0,%rdx
  334. mov $N[0],-8(%rsp,$j,8) # tp[j-1]
  335. mov %rdx,$N[1]
  336. mulq $m0 # ap[j]*bp[0]
  337. add %rax,$A[1]
  338. mov 8($np,$j,8),%rax
  339. adc \$0,%rdx
  340. lea 4($j),$j # j++
  341. mov %rdx,$A[0]
  342. mulq $m1 # np[j]*m1
  343. add %rax,$N[1]
  344. mov -16($ap,$j,8),%rax
  345. adc \$0,%rdx
  346. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  347. adc \$0,%rdx
  348. mov $N[1],-32(%rsp,$j,8) # tp[j-1]
  349. mov %rdx,$N[0]
  350. cmp $num,$j
  351. jl .L1st4x
  352. mulq $m0 # ap[j]*bp[0]
  353. add %rax,$A[0]
  354. mov -16($np,$j,8),%rax
  355. adc \$0,%rdx
  356. mov %rdx,$A[1]
  357. mulq $m1 # np[j]*m1
  358. add %rax,$N[0]
  359. mov -8($ap,$j,8),%rax
  360. adc \$0,%rdx
  361. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  362. adc \$0,%rdx
  363. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  364. mov %rdx,$N[1]
  365. mulq $m0 # ap[j]*bp[0]
  366. add %rax,$A[1]
  367. mov -8($np,$j,8),%rax
  368. adc \$0,%rdx
  369. mov %rdx,$A[0]
  370. mulq $m1 # np[j]*m1
  371. add %rax,$N[1]
  372. mov ($ap),%rax # ap[0]
  373. adc \$0,%rdx
  374. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  375. adc \$0,%rdx
  376. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  377. mov %rdx,$N[0]
  378. xor $N[1],$N[1]
  379. add $A[0],$N[0]
  380. adc \$0,$N[1]
  381. mov $N[0],-8(%rsp,$j,8)
  382. mov $N[1],(%rsp,$j,8) # store upmost overflow bit
  383. lea 1($i),$i # i++
  384. .align 4
  385. .Louter4x:
  386. mov ($bp,$i,8),$m0 # m0=bp[i]
  387. xor $j,$j # j=0
  388. mov (%rsp),$A[0]
  389. mov $n0,$m1
  390. mulq $m0 # ap[0]*bp[i]
  391. add %rax,$A[0] # ap[0]*bp[i]+tp[0]
  392. mov ($np),%rax
  393. adc \$0,%rdx
  394. imulq $A[0],$m1 # tp[0]*n0
  395. mov %rdx,$A[1]
  396. mulq $m1 # np[0]*m1
  397. add %rax,$A[0] # "$N[0]", discarded
  398. mov 8($ap),%rax
  399. adc \$0,%rdx
  400. mov %rdx,$N[1]
  401. mulq $m0 # ap[j]*bp[i]
  402. add %rax,$A[1]
  403. mov 8($np),%rax
  404. adc \$0,%rdx
  405. add 8(%rsp),$A[1] # +tp[1]
  406. adc \$0,%rdx
  407. mov %rdx,$A[0]
  408. mulq $m1 # np[j]*m1
  409. add %rax,$N[1]
  410. mov 16($ap),%rax
  411. adc \$0,%rdx
  412. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
  413. lea 4($j),$j # j+=2
  414. adc \$0,%rdx
  415. mov $N[1],(%rsp) # tp[j-1]
  416. mov %rdx,$N[0]
  417. jmp .Linner4x
  418. .align 16
  419. .Linner4x:
  420. mulq $m0 # ap[j]*bp[i]
  421. add %rax,$A[0]
  422. mov -16($np,$j,8),%rax
  423. adc \$0,%rdx
  424. add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  425. adc \$0,%rdx
  426. mov %rdx,$A[1]
  427. mulq $m1 # np[j]*m1
  428. add %rax,$N[0]
  429. mov -8($ap,$j,8),%rax
  430. adc \$0,%rdx
  431. add $A[0],$N[0]
  432. adc \$0,%rdx
  433. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  434. mov %rdx,$N[1]
  435. mulq $m0 # ap[j]*bp[i]
  436. add %rax,$A[1]
  437. mov -8($np,$j,8),%rax
  438. adc \$0,%rdx
  439. add -8(%rsp,$j,8),$A[1]
  440. adc \$0,%rdx
  441. mov %rdx,$A[0]
  442. mulq $m1 # np[j]*m1
  443. add %rax,$N[1]
  444. mov ($ap,$j,8),%rax
  445. adc \$0,%rdx
  446. add $A[1],$N[1]
  447. adc \$0,%rdx
  448. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  449. mov %rdx,$N[0]
  450. mulq $m0 # ap[j]*bp[i]
  451. add %rax,$A[0]
  452. mov ($np,$j,8),%rax
  453. adc \$0,%rdx
  454. add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  455. adc \$0,%rdx
  456. mov %rdx,$A[1]
  457. mulq $m1 # np[j]*m1
  458. add %rax,$N[0]
  459. mov 8($ap,$j,8),%rax
  460. adc \$0,%rdx
  461. add $A[0],$N[0]
  462. adc \$0,%rdx
  463. mov $N[0],-8(%rsp,$j,8) # tp[j-1]
  464. mov %rdx,$N[1]
  465. mulq $m0 # ap[j]*bp[i]
  466. add %rax,$A[1]
  467. mov 8($np,$j,8),%rax
  468. adc \$0,%rdx
  469. add 8(%rsp,$j,8),$A[1]
  470. adc \$0,%rdx
  471. lea 4($j),$j # j++
  472. mov %rdx,$A[0]
  473. mulq $m1 # np[j]*m1
  474. add %rax,$N[1]
  475. mov -16($ap,$j,8),%rax
  476. adc \$0,%rdx
  477. add $A[1],$N[1]
  478. adc \$0,%rdx
  479. mov $N[1],-32(%rsp,$j,8) # tp[j-1]
  480. mov %rdx,$N[0]
  481. cmp $num,$j
  482. jl .Linner4x
  483. mulq $m0 # ap[j]*bp[i]
  484. add %rax,$A[0]
  485. mov -16($np,$j,8),%rax
  486. adc \$0,%rdx
  487. add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  488. adc \$0,%rdx
  489. mov %rdx,$A[1]
  490. mulq $m1 # np[j]*m1
  491. add %rax,$N[0]
  492. mov -8($ap,$j,8),%rax
  493. adc \$0,%rdx
  494. add $A[0],$N[0]
  495. adc \$0,%rdx
  496. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  497. mov %rdx,$N[1]
  498. mulq $m0 # ap[j]*bp[i]
  499. add %rax,$A[1]
  500. mov -8($np,$j,8),%rax
  501. adc \$0,%rdx
  502. add -8(%rsp,$j,8),$A[1]
  503. adc \$0,%rdx
  504. lea 1($i),$i # i++
  505. mov %rdx,$A[0]
  506. mulq $m1 # np[j]*m1
  507. add %rax,$N[1]
  508. mov ($ap),%rax # ap[0]
  509. adc \$0,%rdx
  510. add $A[1],$N[1]
  511. adc \$0,%rdx
  512. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  513. mov %rdx,$N[0]
  514. xor $N[1],$N[1]
  515. add $A[0],$N[0]
  516. adc \$0,$N[1]
  517. add (%rsp,$num,8),$N[0] # pull upmost overflow bit
  518. adc \$0,$N[1]
  519. mov $N[0],-8(%rsp,$j,8)
  520. mov $N[1],(%rsp,$j,8) # store upmost overflow bit
  521. cmp $num,$i
  522. jl .Louter4x
  523. ___
  524. {
  525. my @ri=("%rax","%rdx",$m0,$m1);
  526. $code.=<<___;
  527. mov 16(%rsp,$num,8),$rp # restore $rp
  528. mov 0(%rsp),@ri[0] # tp[0]
  529. pxor %xmm0,%xmm0
  530. mov 8(%rsp),@ri[1] # tp[1]
  531. shr \$2,$num # num/=4
  532. lea (%rsp),$ap # borrow ap for tp
  533. xor $i,$i # i=0 and clear CF!
  534. sub 0($np),@ri[0]
  535. mov 16($ap),@ri[2] # tp[2]
  536. mov 24($ap),@ri[3] # tp[3]
  537. sbb 8($np),@ri[1]
  538. lea -1($num),$j # j=num/4-1
  539. jmp .Lsub4x
  540. .align 16
  541. .Lsub4x:
  542. mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
  543. mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
  544. sbb 16($np,$i,8),@ri[2]
  545. mov 32($ap,$i,8),@ri[0] # tp[i+1]
  546. mov 40($ap,$i,8),@ri[1]
  547. sbb 24($np,$i,8),@ri[3]
  548. mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
  549. mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
  550. sbb 32($np,$i,8),@ri[0]
  551. mov 48($ap,$i,8),@ri[2]
  552. mov 56($ap,$i,8),@ri[3]
  553. sbb 40($np,$i,8),@ri[1]
  554. lea 4($i),$i # i++
  555. dec $j # doesnn't affect CF!
  556. jnz .Lsub4x
  557. mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
  558. mov 32($ap,$i,8),@ri[0] # load overflow bit
  559. sbb 16($np,$i,8),@ri[2]
  560. mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
  561. sbb 24($np,$i,8),@ri[3]
  562. mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
  563. sbb \$0,@ri[0] # handle upmost overflow bit
  564. mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
  565. xor $i,$i # i=0
  566. and @ri[0],$ap
  567. not @ri[0]
  568. mov $rp,$np
  569. and @ri[0],$np
  570. lea -1($num),$j
  571. or $np,$ap # ap=borrow?tp:rp
  572. movdqu ($ap),%xmm1
  573. movdqa %xmm0,(%rsp)
  574. movdqu %xmm1,($rp)
  575. jmp .Lcopy4x
  576. .align 16
  577. .Lcopy4x: # copy or in-place refresh
  578. movdqu 16($ap,$i),%xmm2
  579. movdqu 32($ap,$i),%xmm1
  580. movdqa %xmm0,16(%rsp,$i)
  581. movdqu %xmm2,16($rp,$i)
  582. movdqa %xmm0,32(%rsp,$i)
  583. movdqu %xmm1,32($rp,$i)
  584. lea 32($i),$i
  585. dec $j
  586. jnz .Lcopy4x
  587. shl \$2,$num
  588. movdqu 16($ap,$i),%xmm2
  589. movdqa %xmm0,16(%rsp,$i)
  590. movdqu %xmm2,16($rp,$i)
  591. ___
  592. }
  593. $code.=<<___;
  594. mov 8(%rsp,$num,8),%rsi # restore %rsp
  595. mov \$1,%rax
  596. mov (%rsi),%r15
  597. mov 8(%rsi),%r14
  598. mov 16(%rsi),%r13
  599. mov 24(%rsi),%r12
  600. mov 32(%rsi),%rbp
  601. mov 40(%rsi),%rbx
  602. lea 48(%rsi),%rsp
  603. .Lmul4x_epilogue:
  604. ret
  605. .size bn_mul4x_mont,.-bn_mul4x_mont
  606. ___
  607. }}}
  608. {{{
  609. ######################################################################
  610. # void bn_sqr4x_mont(
  611. my $rptr="%rdi"; # const BN_ULONG *rptr,
  612. my $aptr="%rsi"; # const BN_ULONG *aptr,
  613. my $bptr="%rdx"; # not used
  614. my $nptr="%rcx"; # const BN_ULONG *nptr,
  615. my $n0 ="%r8"; # const BN_ULONG *n0);
  616. my $num ="%r9"; # int num, has to be divisible by 4 and
  617. # not less than 8
  618. my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
  619. my @A0=("%r10","%r11");
  620. my @A1=("%r12","%r13");
  621. my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
  622. $code.=<<___;
  623. .type bn_sqr4x_mont,\@function,6
  624. .align 16
  625. bn_sqr4x_mont:
  626. .Lsqr4x_enter:
  627. push %rbx
  628. push %rbp
  629. push %r12
  630. push %r13
  631. push %r14
  632. push %r15
  633. shl \$3,${num}d # convert $num to bytes
  634. xor %r10,%r10
  635. mov %rsp,%r11 # put aside %rsp
  636. sub $num,%r10 # -$num
  637. mov ($n0),$n0 # *n0
  638. lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
  639. and \$-1024,%rsp # minimize TLB usage
  640. ##############################################################
  641. # Stack layout
  642. #
  643. # +0 saved $num, used in reduction section
  644. # +8 &t[2*$num], used in reduction section
  645. # +32 saved $rptr
  646. # +40 saved $nptr
  647. # +48 saved *n0
  648. # +56 saved %rsp
  649. # +64 t[2*$num]
  650. #
  651. mov $rptr,32(%rsp) # save $rptr
  652. mov $nptr,40(%rsp)
  653. mov $n0, 48(%rsp)
  654. mov %r11, 56(%rsp) # save original %rsp
  655. .Lsqr4x_body:
  656. ##############################################################
  657. # Squaring part:
  658. #
  659. # a) multiply-n-add everything but a[i]*a[i];
  660. # b) shift result of a) by 1 to the left and accumulate
  661. # a[i]*a[i] products;
  662. #
  663. lea 32(%r10),$i # $i=-($num-32)
  664. lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
  665. mov $num,$j # $j=$num
  666. # comments apply to $num==8 case
  667. mov -32($aptr,$i),$a0 # a[0]
  668. lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
  669. mov -24($aptr,$i),%rax # a[1]
  670. lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
  671. mov -16($aptr,$i),$ai # a[2]
  672. mov %rax,$a1
  673. mul $a0 # a[1]*a[0]
  674. mov %rax,$A0[0] # a[1]*a[0]
  675. mov $ai,%rax # a[2]
  676. mov %rdx,$A0[1]
  677. mov $A0[0],-24($tptr,$i) # t[1]
  678. xor $A0[0],$A0[0]
  679. mul $a0 # a[2]*a[0]
  680. add %rax,$A0[1]
  681. mov $ai,%rax
  682. adc %rdx,$A0[0]
  683. mov $A0[1],-16($tptr,$i) # t[2]
  684. lea -16($i),$j # j=-16
  685. mov 8($aptr,$j),$ai # a[3]
  686. mul $a1 # a[2]*a[1]
  687. mov %rax,$A1[0] # a[2]*a[1]+t[3]
  688. mov $ai,%rax
  689. mov %rdx,$A1[1]
  690. xor $A0[1],$A0[1]
  691. add $A1[0],$A0[0]
  692. lea 16($j),$j
  693. adc \$0,$A0[1]
  694. mul $a0 # a[3]*a[0]
  695. add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
  696. mov $ai,%rax
  697. adc %rdx,$A0[1]
  698. mov $A0[0],-8($tptr,$j) # t[3]
  699. jmp .Lsqr4x_1st
  700. .align 16
  701. .Lsqr4x_1st:
  702. mov ($aptr,$j),$ai # a[4]
  703. xor $A1[0],$A1[0]
  704. mul $a1 # a[3]*a[1]
  705. add %rax,$A1[1] # a[3]*a[1]+t[4]
  706. mov $ai,%rax
  707. adc %rdx,$A1[0]
  708. xor $A0[0],$A0[0]
  709. add $A1[1],$A0[1]
  710. adc \$0,$A0[0]
  711. mul $a0 # a[4]*a[0]
  712. add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
  713. mov $ai,%rax # a[3]
  714. adc %rdx,$A0[0]
  715. mov $A0[1],($tptr,$j) # t[4]
  716. mov 8($aptr,$j),$ai # a[5]
  717. xor $A1[1],$A1[1]
  718. mul $a1 # a[4]*a[3]
  719. add %rax,$A1[0] # a[4]*a[3]+t[5]
  720. mov $ai,%rax
  721. adc %rdx,$A1[1]
  722. xor $A0[1],$A0[1]
  723. add $A1[0],$A0[0]
  724. lea 16($j),$j
  725. adc \$0,$A0[1]
  726. mul $a0 # a[5]*a[2]
  727. add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
  728. mov $ai,%rax
  729. adc %rdx,$A0[1]
  730. mov $A0[0],-8($tptr,$j) # t[5]
  731. mov ($aptr,$j),$ai # a[6]
  732. xor $A1[0],$A1[0]
  733. mul $a1 # a[5]*a[3]
  734. add %rax,$A1[1] # a[5]*a[3]+t[6]
  735. mov $ai,%rax
  736. adc %rdx,$A1[0]
  737. xor $A0[0],$A0[0]
  738. add $A1[1],$A0[1]
  739. adc \$0,$A0[0]
  740. mul $a0 # a[6]*a[2]
  741. add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
  742. mov $ai,%rax # a[3]
  743. adc %rdx,$A0[0]
  744. mov $A0[1],($tptr,$j) # t[6]
  745. mov 8($aptr,$j),$ai # a[7]
  746. xor $A1[1],$A1[1]
  747. mul $a1 # a[6]*a[5]
  748. add %rax,$A1[0] # a[6]*a[5]+t[7]
  749. mov $ai,%rax
  750. adc %rdx,$A1[1]
  751. xor $A0[1],$A0[1]
  752. add $A1[0],$A0[0]
  753. lea 16($j),$j
  754. adc \$0,$A0[1]
  755. mul $a0 # a[7]*a[4]
  756. add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
  757. mov $ai,%rax
  758. adc %rdx,$A0[1]
  759. mov $A0[0],-8($tptr,$j) # t[7]
  760. cmp \$0,$j
  761. jne .Lsqr4x_1st
  762. xor $A1[0],$A1[0]
  763. add $A0[1],$A1[1]
  764. adc \$0,$A1[0]
  765. mul $a1 # a[7]*a[5]
  766. add %rax,$A1[1]
  767. adc %rdx,$A1[0]
  768. mov $A1[1],($tptr) # t[8]
  769. lea 16($i),$i
  770. mov $A1[0],8($tptr) # t[9]
  771. jmp .Lsqr4x_outer
  772. .align 16
  773. .Lsqr4x_outer: # comments apply to $num==6 case
  774. mov -32($aptr,$i),$a0 # a[0]
  775. lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
  776. mov -24($aptr,$i),%rax # a[1]
  777. lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
  778. mov -16($aptr,$i),$ai # a[2]
  779. mov %rax,$a1
  780. mov -24($tptr,$i),$A0[0] # t[1]
  781. xor $A0[1],$A0[1]
  782. mul $a0 # a[1]*a[0]
  783. add %rax,$A0[0] # a[1]*a[0]+t[1]
  784. mov $ai,%rax # a[2]
  785. adc %rdx,$A0[1]
  786. mov $A0[0],-24($tptr,$i) # t[1]
  787. xor $A0[0],$A0[0]
  788. add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
  789. adc \$0,$A0[0]
  790. mul $a0 # a[2]*a[0]
  791. add %rax,$A0[1]
  792. mov $ai,%rax
  793. adc %rdx,$A0[0]
  794. mov $A0[1],-16($tptr,$i) # t[2]
  795. lea -16($i),$j # j=-16
  796. xor $A1[0],$A1[0]
  797. mov 8($aptr,$j),$ai # a[3]
  798. xor $A1[1],$A1[1]
  799. add 8($tptr,$j),$A1[0]
  800. adc \$0,$A1[1]
  801. mul $a1 # a[2]*a[1]
  802. add %rax,$A1[0] # a[2]*a[1]+t[3]
  803. mov $ai,%rax
  804. adc %rdx,$A1[1]
  805. xor $A0[1],$A0[1]
  806. add $A1[0],$A0[0]
  807. adc \$0,$A0[1]
  808. mul $a0 # a[3]*a[0]
  809. add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
  810. mov $ai,%rax
  811. adc %rdx,$A0[1]
  812. mov $A0[0],8($tptr,$j) # t[3]
  813. lea 16($j),$j
  814. jmp .Lsqr4x_inner
  815. .align 16
  816. .Lsqr4x_inner:
  817. mov ($aptr,$j),$ai # a[4]
  818. xor $A1[0],$A1[0]
  819. add ($tptr,$j),$A1[1]
  820. adc \$0,$A1[0]
  821. mul $a1 # a[3]*a[1]
  822. add %rax,$A1[1] # a[3]*a[1]+t[4]
  823. mov $ai,%rax
  824. adc %rdx,$A1[0]
  825. xor $A0[0],$A0[0]
  826. add $A1[1],$A0[1]
  827. adc \$0,$A0[0]
  828. mul $a0 # a[4]*a[0]
  829. add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
  830. mov $ai,%rax # a[3]
  831. adc %rdx,$A0[0]
  832. mov $A0[1],($tptr,$j) # t[4]
  833. mov 8($aptr,$j),$ai # a[5]
  834. xor $A1[1],$A1[1]
  835. add 8($tptr,$j),$A1[0]
  836. adc \$0,$A1[1]
  837. mul $a1 # a[4]*a[3]
  838. add %rax,$A1[0] # a[4]*a[3]+t[5]
  839. mov $ai,%rax
  840. adc %rdx,$A1[1]
  841. xor $A0[1],$A0[1]
  842. add $A1[0],$A0[0]
  843. lea 16($j),$j # j++
  844. adc \$0,$A0[1]
  845. mul $a0 # a[5]*a[2]
  846. add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
  847. mov $ai,%rax
  848. adc %rdx,$A0[1]
  849. mov $A0[0],-8($tptr,$j) # t[5]
  850. cmp \$0,$j
  851. jne .Lsqr4x_inner
  852. xor $A1[0],$A1[0]
  853. add $A0[1],$A1[1]
  854. adc \$0,$A1[0]
  855. mul $a1 # a[5]*a[3]
  856. add %rax,$A1[1]
  857. adc %rdx,$A1[0]
  858. mov $A1[1],($tptr) # t[6]
  859. mov $A1[0],8($tptr) # t[7]
  860. add \$16,$i
  861. jnz .Lsqr4x_outer
  862. # comments apply to $num==4 case
  863. mov -32($aptr),$a0 # a[0]
  864. lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
  865. mov -24($aptr),%rax # a[1]
  866. lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
  867. mov -16($aptr),$ai # a[2]
  868. mov %rax,$a1
  869. mov -24($tptr),$A0[0] # t[1]
  870. xor $A0[1],$A0[1]
  871. mul $a0 # a[1]*a[0]
  872. add %rax,$A0[0] # a[1]*a[0]+t[1]
  873. mov $ai,%rax # a[2]
  874. adc %rdx,$A0[1]
  875. mov $A0[0],-24($tptr) # t[1]
  876. xor $A0[0],$A0[0]
  877. add -16($tptr),$A0[1] # a[2]*a[0]+t[2]
  878. adc \$0,$A0[0]
  879. mul $a0 # a[2]*a[0]
  880. add %rax,$A0[1]
  881. mov $ai,%rax
  882. adc %rdx,$A0[0]
  883. mov $A0[1],-16($tptr) # t[2]
  884. xor $A1[0],$A1[0]
  885. mov -8($aptr),$ai # a[3]
  886. xor $A1[1],$A1[1]
  887. add -8($tptr),$A1[0]
  888. adc \$0,$A1[1]
  889. mul $a1 # a[2]*a[1]
  890. add %rax,$A1[0] # a[2]*a[1]+t[3]
  891. mov $ai,%rax
  892. adc %rdx,$A1[1]
  893. xor $A0[1],$A0[1]
  894. add $A1[0],$A0[0]
  895. adc \$0,$A0[1]
  896. mul $a0 # a[3]*a[0]
  897. add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
  898. mov $ai,%rax
  899. adc %rdx,$A0[1]
  900. mov $A0[0],-8($tptr) # t[3]
  901. xor $A1[0],$A1[0]
  902. add $A0[1],$A1[1]
  903. adc \$0,$A1[0]
  904. mul $a1 # a[3]*a[1]
  905. add %rax,$A1[1]
  906. mov -16($aptr),%rax # a[2]
  907. adc %rdx,$A1[0]
  908. mov $A1[1],($tptr) # t[4]
  909. mov $A1[0],8($tptr) # t[5]
  910. mul $ai # a[2]*a[3]
  911. ___
  912. {
  913. my ($shift,$carry)=($a0,$a1);
  914. my @S=(@A1,$ai,$n0);
  915. $code.=<<___;
  916. add \$16,$i
  917. xor $shift,$shift
  918. sub $num,$i # $i=16-$num
  919. xor $carry,$carry
  920. add $A1[0],%rax # t[5]
  921. adc \$0,%rdx
  922. mov %rax,8($tptr) # t[5]
  923. mov %rdx,16($tptr) # t[6]
  924. mov $carry,24($tptr) # t[7]
  925. mov -16($aptr,$i),%rax # a[0]
  926. lea 64(%rsp,$num,2),$tptr
  927. xor $A0[0],$A0[0] # t[0]
  928. mov -24($tptr,$i,2),$A0[1] # t[1]
  929. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  930. shr \$63,$A0[0]
  931. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  932. shr \$63,$A0[1]
  933. or $A0[0],$S[1] # | t[2*i]>>63
  934. mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  935. mov $A0[1],$shift # shift=t[2*i+1]>>63
  936. mul %rax # a[i]*a[i]
  937. neg $carry # mov $carry,cf
  938. mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  939. adc %rax,$S[0]
  940. mov -8($aptr,$i),%rax # a[i+1] # prefetch
  941. mov $S[0],-32($tptr,$i,2)
  942. adc %rdx,$S[1]
  943. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
  944. mov $S[1],-24($tptr,$i,2)
  945. sbb $carry,$carry # mov cf,$carry
  946. shr \$63,$A0[0]
  947. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  948. shr \$63,$A0[1]
  949. or $A0[0],$S[3] # | t[2*i]>>63
  950. mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  951. mov $A0[1],$shift # shift=t[2*i+1]>>63
  952. mul %rax # a[i]*a[i]
  953. neg $carry # mov $carry,cf
  954. mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  955. adc %rax,$S[2]
  956. mov 0($aptr,$i),%rax # a[i+1] # prefetch
  957. mov $S[2],-16($tptr,$i,2)
  958. adc %rdx,$S[3]
  959. lea 16($i),$i
  960. mov $S[3],-40($tptr,$i,2)
  961. sbb $carry,$carry # mov cf,$carry
  962. jmp .Lsqr4x_shift_n_add
  963. .align 16
  964. .Lsqr4x_shift_n_add:
  965. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  966. shr \$63,$A0[0]
  967. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  968. shr \$63,$A0[1]
  969. or $A0[0],$S[1] # | t[2*i]>>63
  970. mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  971. mov $A0[1],$shift # shift=t[2*i+1]>>63
  972. mul %rax # a[i]*a[i]
  973. neg $carry # mov $carry,cf
  974. mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  975. adc %rax,$S[0]
  976. mov -8($aptr,$i),%rax # a[i+1] # prefetch
  977. mov $S[0],-32($tptr,$i,2)
  978. adc %rdx,$S[1]
  979. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
  980. mov $S[1],-24($tptr,$i,2)
  981. sbb $carry,$carry # mov cf,$carry
  982. shr \$63,$A0[0]
  983. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  984. shr \$63,$A0[1]
  985. or $A0[0],$S[3] # | t[2*i]>>63
  986. mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  987. mov $A0[1],$shift # shift=t[2*i+1]>>63
  988. mul %rax # a[i]*a[i]
  989. neg $carry # mov $carry,cf
  990. mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  991. adc %rax,$S[2]
  992. mov 0($aptr,$i),%rax # a[i+1] # prefetch
  993. mov $S[2],-16($tptr,$i,2)
  994. adc %rdx,$S[3]
  995. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  996. mov $S[3],-8($tptr,$i,2)
  997. sbb $carry,$carry # mov cf,$carry
  998. shr \$63,$A0[0]
  999. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  1000. shr \$63,$A0[1]
  1001. or $A0[0],$S[1] # | t[2*i]>>63
  1002. mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  1003. mov $A0[1],$shift # shift=t[2*i+1]>>63
  1004. mul %rax # a[i]*a[i]
  1005. neg $carry # mov $carry,cf
  1006. mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  1007. adc %rax,$S[0]
  1008. mov 8($aptr,$i),%rax # a[i+1] # prefetch
  1009. mov $S[0],0($tptr,$i,2)
  1010. adc %rdx,$S[1]
  1011. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
  1012. mov $S[1],8($tptr,$i,2)
  1013. sbb $carry,$carry # mov cf,$carry
  1014. shr \$63,$A0[0]
  1015. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  1016. shr \$63,$A0[1]
  1017. or $A0[0],$S[3] # | t[2*i]>>63
  1018. mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  1019. mov $A0[1],$shift # shift=t[2*i+1]>>63
  1020. mul %rax # a[i]*a[i]
  1021. neg $carry # mov $carry,cf
  1022. mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  1023. adc %rax,$S[2]
  1024. mov 16($aptr,$i),%rax # a[i+1] # prefetch
  1025. mov $S[2],16($tptr,$i,2)
  1026. adc %rdx,$S[3]
  1027. mov $S[3],24($tptr,$i,2)
  1028. sbb $carry,$carry # mov cf,$carry
  1029. add \$32,$i
  1030. jnz .Lsqr4x_shift_n_add
  1031. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  1032. shr \$63,$A0[0]
  1033. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  1034. shr \$63,$A0[1]
  1035. or $A0[0],$S[1] # | t[2*i]>>63
  1036. mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
  1037. mov $A0[1],$shift # shift=t[2*i+1]>>63
  1038. mul %rax # a[i]*a[i]
  1039. neg $carry # mov $carry,cf
  1040. mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
  1041. adc %rax,$S[0]
  1042. mov -8($aptr),%rax # a[i+1] # prefetch
  1043. mov $S[0],-32($tptr)
  1044. adc %rdx,$S[1]
  1045. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
  1046. mov $S[1],-24($tptr)
  1047. sbb $carry,$carry # mov cf,$carry
  1048. shr \$63,$A0[0]
  1049. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  1050. shr \$63,$A0[1]
  1051. or $A0[0],$S[3] # | t[2*i]>>63
  1052. mul %rax # a[i]*a[i]
  1053. neg $carry # mov $carry,cf
  1054. adc %rax,$S[2]
  1055. adc %rdx,$S[3]
  1056. mov $S[2],-16($tptr)
  1057. mov $S[3],-8($tptr)
  1058. ___
  1059. }
  1060. ##############################################################
  1061. # Montgomery reduction part, "word-by-word" algorithm.
  1062. #
  1063. {
  1064. my ($topbit,$nptr)=("%rbp",$aptr);
  1065. my ($m0,$m1)=($a0,$a1);
  1066. my @Ni=("%rbx","%r9");
  1067. $code.=<<___;
  1068. mov 40(%rsp),$nptr # restore $nptr
  1069. mov 48(%rsp),$n0 # restore *n0
  1070. xor $j,$j
  1071. mov $num,0(%rsp) # save $num
  1072. sub $num,$j # $j=-$num
  1073. mov 64(%rsp),$A0[0] # t[0] # modsched #
  1074. mov $n0,$m0 # # modsched #
  1075. lea 64(%rsp,$num,2),%rax # end of t[] buffer
  1076. lea 64(%rsp,$num),$tptr # end of t[] window
  1077. mov %rax,8(%rsp) # save end of t[] buffer
  1078. lea ($nptr,$num),$nptr # end of n[] buffer
  1079. xor $topbit,$topbit # $topbit=0
  1080. mov 0($nptr,$j),%rax # n[0] # modsched #
  1081. mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
  1082. imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
  1083. mov %rax,$Ni[0] # # modsched #
  1084. jmp .Lsqr4x_mont_outer
  1085. .align 16
  1086. .Lsqr4x_mont_outer:
  1087. xor $A0[1],$A0[1]
  1088. mul $m0 # n[0]*m0
  1089. add %rax,$A0[0] # n[0]*m0+t[0]
  1090. mov $Ni[1],%rax
  1091. adc %rdx,$A0[1]
  1092. mov $n0,$m1
  1093. xor $A0[0],$A0[0]
  1094. add 8($tptr,$j),$A0[1]
  1095. adc \$0,$A0[0]
  1096. mul $m0 # n[1]*m0
  1097. add %rax,$A0[1] # n[1]*m0+t[1]
  1098. mov $Ni[0],%rax
  1099. adc %rdx,$A0[0]
  1100. imulq $A0[1],$m1
  1101. mov 16($nptr,$j),$Ni[0] # n[2]
  1102. xor $A1[1],$A1[1]
  1103. add $A0[1],$A1[0]
  1104. adc \$0,$A1[1]
  1105. mul $m1 # n[0]*m1
  1106. add %rax,$A1[0] # n[0]*m1+"t[1]"
  1107. mov $Ni[0],%rax
  1108. adc %rdx,$A1[1]
  1109. mov $A1[0],8($tptr,$j) # "t[1]"
  1110. xor $A0[1],$A0[1]
  1111. add 16($tptr,$j),$A0[0]
  1112. adc \$0,$A0[1]
  1113. mul $m0 # n[2]*m0
  1114. add %rax,$A0[0] # n[2]*m0+t[2]
  1115. mov $Ni[1],%rax
  1116. adc %rdx,$A0[1]
  1117. mov 24($nptr,$j),$Ni[1] # n[3]
  1118. xor $A1[0],$A1[0]
  1119. add $A0[0],$A1[1]
  1120. adc \$0,$A1[0]
  1121. mul $m1 # n[1]*m1
  1122. add %rax,$A1[1] # n[1]*m1+"t[2]"
  1123. mov $Ni[1],%rax
  1124. adc %rdx,$A1[0]
  1125. mov $A1[1],16($tptr,$j) # "t[2]"
  1126. xor $A0[0],$A0[0]
  1127. add 24($tptr,$j),$A0[1]
  1128. lea 32($j),$j
  1129. adc \$0,$A0[0]
  1130. mul $m0 # n[3]*m0
  1131. add %rax,$A0[1] # n[3]*m0+t[3]
  1132. mov $Ni[0],%rax
  1133. adc %rdx,$A0[0]
  1134. jmp .Lsqr4x_mont_inner
  1135. .align 16
  1136. .Lsqr4x_mont_inner:
  1137. mov ($nptr,$j),$Ni[0] # n[4]
  1138. xor $A1[1],$A1[1]
  1139. add $A0[1],$A1[0]
  1140. adc \$0,$A1[1]
  1141. mul $m1 # n[2]*m1
  1142. add %rax,$A1[0] # n[2]*m1+"t[3]"
  1143. mov $Ni[0],%rax
  1144. adc %rdx,$A1[1]
  1145. mov $A1[0],-8($tptr,$j) # "t[3]"
  1146. xor $A0[1],$A0[1]
  1147. add ($tptr,$j),$A0[0]
  1148. adc \$0,$A0[1]
  1149. mul $m0 # n[4]*m0
  1150. add %rax,$A0[0] # n[4]*m0+t[4]
  1151. mov $Ni[1],%rax
  1152. adc %rdx,$A0[1]
  1153. mov 8($nptr,$j),$Ni[1] # n[5]
  1154. xor $A1[0],$A1[0]
  1155. add $A0[0],$A1[1]
  1156. adc \$0,$A1[0]
  1157. mul $m1 # n[3]*m1
  1158. add %rax,$A1[1] # n[3]*m1+"t[4]"
  1159. mov $Ni[1],%rax
  1160. adc %rdx,$A1[0]
  1161. mov $A1[1],($tptr,$j) # "t[4]"
  1162. xor $A0[0],$A0[0]
  1163. add 8($tptr,$j),$A0[1]
  1164. adc \$0,$A0[0]
  1165. mul $m0 # n[5]*m0
  1166. add %rax,$A0[1] # n[5]*m0+t[5]
  1167. mov $Ni[0],%rax
  1168. adc %rdx,$A0[0]
  1169. mov 16($nptr,$j),$Ni[0] # n[6]
  1170. xor $A1[1],$A1[1]
  1171. add $A0[1],$A1[0]
  1172. adc \$0,$A1[1]
  1173. mul $m1 # n[4]*m1
  1174. add %rax,$A1[0] # n[4]*m1+"t[5]"
  1175. mov $Ni[0],%rax
  1176. adc %rdx,$A1[1]
  1177. mov $A1[0],8($tptr,$j) # "t[5]"
  1178. xor $A0[1],$A0[1]
  1179. add 16($tptr,$j),$A0[0]
  1180. adc \$0,$A0[1]
  1181. mul $m0 # n[6]*m0
  1182. add %rax,$A0[0] # n[6]*m0+t[6]
  1183. mov $Ni[1],%rax
  1184. adc %rdx,$A0[1]
  1185. mov 24($nptr,$j),$Ni[1] # n[7]
  1186. xor $A1[0],$A1[0]
  1187. add $A0[0],$A1[1]
  1188. adc \$0,$A1[0]
  1189. mul $m1 # n[5]*m1
  1190. add %rax,$A1[1] # n[5]*m1+"t[6]"
  1191. mov $Ni[1],%rax
  1192. adc %rdx,$A1[0]
  1193. mov $A1[1],16($tptr,$j) # "t[6]"
  1194. xor $A0[0],$A0[0]
  1195. add 24($tptr,$j),$A0[1]
  1196. lea 32($j),$j
  1197. adc \$0,$A0[0]
  1198. mul $m0 # n[7]*m0
  1199. add %rax,$A0[1] # n[7]*m0+t[7]
  1200. mov $Ni[0],%rax
  1201. adc %rdx,$A0[0]
  1202. cmp \$0,$j
  1203. jne .Lsqr4x_mont_inner
  1204. sub 0(%rsp),$j # $j=-$num # modsched #
  1205. mov $n0,$m0 # # modsched #
  1206. xor $A1[1],$A1[1]
  1207. add $A0[1],$A1[0]
  1208. adc \$0,$A1[1]
  1209. mul $m1 # n[6]*m1
  1210. add %rax,$A1[0] # n[6]*m1+"t[7]"
  1211. mov $Ni[1],%rax
  1212. adc %rdx,$A1[1]
  1213. mov $A1[0],-8($tptr) # "t[7]"
  1214. xor $A0[1],$A0[1]
  1215. add ($tptr),$A0[0] # +t[8]
  1216. adc \$0,$A0[1]
  1217. mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
  1218. add $topbit,$A0[0]
  1219. adc \$0,$A0[1]
  1220. imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
  1221. xor $A1[0],$A1[0]
  1222. mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
  1223. add $A0[0],$A1[1]
  1224. mov 16($tptr,$j),$A0[0] # t[0] # modsched #
  1225. adc \$0,$A1[0]
  1226. mul $m1 # n[7]*m1
  1227. add %rax,$A1[1] # n[7]*m1+"t[8]"
  1228. mov $Ni[0],%rax # # modsched #
  1229. adc %rdx,$A1[0]
  1230. mov $A1[1],($tptr) # "t[8]"
  1231. xor $topbit,$topbit
  1232. add 8($tptr),$A1[0] # +t[9]
  1233. adc $topbit,$topbit
  1234. add $A0[1],$A1[0]
  1235. lea 16($tptr),$tptr # "t[$num]>>128"
  1236. adc \$0,$topbit
  1237. mov $A1[0],-8($tptr) # "t[9]"
  1238. cmp 8(%rsp),$tptr # are we done?
  1239. jb .Lsqr4x_mont_outer
  1240. mov 0(%rsp),$num # restore $num
  1241. mov $topbit,($tptr) # save $topbit
  1242. ___
  1243. }
  1244. ##############################################################
  1245. # Post-condition, 4x unrolled copy from bn_mul_mont
  1246. #
  1247. {
  1248. my ($tptr,$nptr)=("%rbx",$aptr);
  1249. my @ri=("%rax","%rdx","%r10","%r11");
  1250. $code.=<<___;
  1251. mov 64(%rsp,$num),@ri[0] # tp[0]
  1252. lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
  1253. mov 40(%rsp),$nptr # restore $nptr
  1254. shr \$5,$num # num/4
  1255. mov 8($tptr),@ri[1] # t[1]
  1256. xor $i,$i # i=0 and clear CF!
  1257. mov 32(%rsp),$rptr # restore $rptr
  1258. sub 0($nptr),@ri[0]
  1259. mov 16($tptr),@ri[2] # t[2]
  1260. mov 24($tptr),@ri[3] # t[3]
  1261. sbb 8($nptr),@ri[1]
  1262. lea -1($num),$j # j=num/4-1
  1263. jmp .Lsqr4x_sub
  1264. .align 16
  1265. .Lsqr4x_sub:
  1266. mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1267. mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1268. sbb 16($nptr,$i,8),@ri[2]
  1269. mov 32($tptr,$i,8),@ri[0] # tp[i+1]
  1270. mov 40($tptr,$i,8),@ri[1]
  1271. sbb 24($nptr,$i,8),@ri[3]
  1272. mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1273. mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1274. sbb 32($nptr,$i,8),@ri[0]
  1275. mov 48($tptr,$i,8),@ri[2]
  1276. mov 56($tptr,$i,8),@ri[3]
  1277. sbb 40($nptr,$i,8),@ri[1]
  1278. lea 4($i),$i # i++
  1279. dec $j # doesn't affect CF!
  1280. jnz .Lsqr4x_sub
  1281. mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1282. mov 32($tptr,$i,8),@ri[0] # load overflow bit
  1283. sbb 16($nptr,$i,8),@ri[2]
  1284. mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1285. sbb 24($nptr,$i,8),@ri[3]
  1286. mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1287. sbb \$0,@ri[0] # handle upmost overflow bit
  1288. mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1289. xor $i,$i # i=0
  1290. and @ri[0],$tptr
  1291. not @ri[0]
  1292. mov $rptr,$nptr
  1293. and @ri[0],$nptr
  1294. lea -1($num),$j
  1295. or $nptr,$tptr # tp=borrow?tp:rp
  1296. pxor %xmm0,%xmm0
  1297. lea 64(%rsp,$num,8),$nptr
  1298. movdqu ($tptr),%xmm1
  1299. lea ($nptr,$num,8),$nptr
  1300. movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
  1301. movdqa %xmm0,($nptr) # zap upper half of temporary vector
  1302. movdqu %xmm1,($rptr)
  1303. jmp .Lsqr4x_copy
  1304. .align 16
  1305. .Lsqr4x_copy: # copy or in-place refresh
  1306. movdqu 16($tptr,$i),%xmm2
  1307. movdqu 32($tptr,$i),%xmm1
  1308. movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
  1309. movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
  1310. movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
  1311. movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
  1312. movdqu %xmm2,16($rptr,$i)
  1313. movdqu %xmm1,32($rptr,$i)
  1314. lea 32($i),$i
  1315. dec $j
  1316. jnz .Lsqr4x_copy
  1317. movdqu 16($tptr,$i),%xmm2
  1318. movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
  1319. movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
  1320. movdqu %xmm2,16($rptr,$i)
  1321. ___
  1322. }
  1323. $code.=<<___;
  1324. mov 56(%rsp),%rsi # restore %rsp
  1325. mov \$1,%rax
  1326. mov 0(%rsi),%r15
  1327. mov 8(%rsi),%r14
  1328. mov 16(%rsi),%r13
  1329. mov 24(%rsi),%r12
  1330. mov 32(%rsi),%rbp
  1331. mov 40(%rsi),%rbx
  1332. lea 48(%rsi),%rsp
  1333. .Lsqr4x_epilogue:
  1334. ret
  1335. .size bn_sqr4x_mont,.-bn_sqr4x_mont
  1336. ___
  1337. }}}
  1338. $code.=<<___;
  1339. .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  1340. .align 16
  1341. ___
  1342. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  1343. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  1344. if ($win64) {
  1345. $rec="%rcx";
  1346. $frame="%rdx";
  1347. $context="%r8";
  1348. $disp="%r9";
  1349. $code.=<<___;
  1350. .extern __imp_RtlVirtualUnwind
  1351. .type mul_handler,\@abi-omnipotent
  1352. .align 16
  1353. mul_handler:
  1354. push %rsi
  1355. push %rdi
  1356. push %rbx
  1357. push %rbp
  1358. push %r12
  1359. push %r13
  1360. push %r14
  1361. push %r15
  1362. pushfq
  1363. sub \$64,%rsp
  1364. mov 120($context),%rax # pull context->Rax
  1365. mov 248($context),%rbx # pull context->Rip
  1366. mov 8($disp),%rsi # disp->ImageBase
  1367. mov 56($disp),%r11 # disp->HandlerData
  1368. mov 0(%r11),%r10d # HandlerData[0]
  1369. lea (%rsi,%r10),%r10 # end of prologue label
  1370. cmp %r10,%rbx # context->Rip<end of prologue label
  1371. jb .Lcommon_seh_tail
  1372. mov 152($context),%rax # pull context->Rsp
  1373. mov 4(%r11),%r10d # HandlerData[1]
  1374. lea (%rsi,%r10),%r10 # epilogue label
  1375. cmp %r10,%rbx # context->Rip>=epilogue label
  1376. jae .Lcommon_seh_tail
  1377. mov 192($context),%r10 # pull $num
  1378. mov 8(%rax,%r10,8),%rax # pull saved stack pointer
  1379. lea 48(%rax),%rax
  1380. mov -8(%rax),%rbx
  1381. mov -16(%rax),%rbp
  1382. mov -24(%rax),%r12
  1383. mov -32(%rax),%r13
  1384. mov -40(%rax),%r14
  1385. mov -48(%rax),%r15
  1386. mov %rbx,144($context) # restore context->Rbx
  1387. mov %rbp,160($context) # restore context->Rbp
  1388. mov %r12,216($context) # restore context->R12
  1389. mov %r13,224($context) # restore context->R13
  1390. mov %r14,232($context) # restore context->R14
  1391. mov %r15,240($context) # restore context->R15
  1392. jmp .Lcommon_seh_tail
  1393. .size mul_handler,.-mul_handler
  1394. .type sqr_handler,\@abi-omnipotent
  1395. .align 16
  1396. sqr_handler:
  1397. push %rsi
  1398. push %rdi
  1399. push %rbx
  1400. push %rbp
  1401. push %r12
  1402. push %r13
  1403. push %r14
  1404. push %r15
  1405. pushfq
  1406. sub \$64,%rsp
  1407. mov 120($context),%rax # pull context->Rax
  1408. mov 248($context),%rbx # pull context->Rip
  1409. lea .Lsqr4x_body(%rip),%r10
  1410. cmp %r10,%rbx # context->Rip<.Lsqr_body
  1411. jb .Lcommon_seh_tail
  1412. mov 152($context),%rax # pull context->Rsp
  1413. lea .Lsqr4x_epilogue(%rip),%r10
  1414. cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
  1415. jae .Lcommon_seh_tail
  1416. mov 56(%rax),%rax # pull saved stack pointer
  1417. lea 48(%rax),%rax
  1418. mov -8(%rax),%rbx
  1419. mov -16(%rax),%rbp
  1420. mov -24(%rax),%r12
  1421. mov -32(%rax),%r13
  1422. mov -40(%rax),%r14
  1423. mov -48(%rax),%r15
  1424. mov %rbx,144($context) # restore context->Rbx
  1425. mov %rbp,160($context) # restore context->Rbp
  1426. mov %r12,216($context) # restore context->R12
  1427. mov %r13,224($context) # restore context->R13
  1428. mov %r14,232($context) # restore context->R14
  1429. mov %r15,240($context) # restore context->R15
  1430. .Lcommon_seh_tail:
  1431. mov 8(%rax),%rdi
  1432. mov 16(%rax),%rsi
  1433. mov %rax,152($context) # restore context->Rsp
  1434. mov %rsi,168($context) # restore context->Rsi
  1435. mov %rdi,176($context) # restore context->Rdi
  1436. mov 40($disp),%rdi # disp->ContextRecord
  1437. mov $context,%rsi # context
  1438. mov \$154,%ecx # sizeof(CONTEXT)
  1439. .long 0xa548f3fc # cld; rep movsq
  1440. mov $disp,%rsi
  1441. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1442. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1443. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1444. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1445. mov 40(%rsi),%r10 # disp->ContextRecord
  1446. lea 56(%rsi),%r11 # &disp->HandlerData
  1447. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1448. mov %r10,32(%rsp) # arg5
  1449. mov %r11,40(%rsp) # arg6
  1450. mov %r12,48(%rsp) # arg7
  1451. mov %rcx,56(%rsp) # arg8, (NULL)
  1452. call *__imp_RtlVirtualUnwind(%rip)
  1453. mov \$1,%eax # ExceptionContinueSearch
  1454. add \$64,%rsp
  1455. popfq
  1456. pop %r15
  1457. pop %r14
  1458. pop %r13
  1459. pop %r12
  1460. pop %rbp
  1461. pop %rbx
  1462. pop %rdi
  1463. pop %rsi
  1464. ret
  1465. .size sqr_handler,.-sqr_handler
  1466. .section .pdata
  1467. .align 4
  1468. .rva .LSEH_begin_bn_mul_mont
  1469. .rva .LSEH_end_bn_mul_mont
  1470. .rva .LSEH_info_bn_mul_mont
  1471. .rva .LSEH_begin_bn_mul4x_mont
  1472. .rva .LSEH_end_bn_mul4x_mont
  1473. .rva .LSEH_info_bn_mul4x_mont
  1474. .rva .LSEH_begin_bn_sqr4x_mont
  1475. .rva .LSEH_end_bn_sqr4x_mont
  1476. .rva .LSEH_info_bn_sqr4x_mont
  1477. .section .xdata
  1478. .align 8
  1479. .LSEH_info_bn_mul_mont:
  1480. .byte 9,0,0,0
  1481. .rva mul_handler
  1482. .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
  1483. .LSEH_info_bn_mul4x_mont:
  1484. .byte 9,0,0,0
  1485. .rva mul_handler
  1486. .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
  1487. .LSEH_info_bn_sqr4x_mont:
  1488. .byte 9,0,0,0
  1489. .rva sqr_handler
  1490. ___
  1491. }
  1492. print $code;
  1493. close STDOUT;