x86_64-mont.pl 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # October 2005.
  9. #
  10. # Montgomery multiplication routine for x86_64. While it gives modest
  11. # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
  12. # than twice, >2x, as fast. Most common rsa1024 sign is improved by
  13. # respectful 50%. It remains to be seen if loop unrolling and
  14. # dedicated squaring routine can provide further improvement...
  15. # July 2011.
  16. #
  17. # Add dedicated squaring procedure. Performance improvement varies
  18. # from platform to platform, but in average it's ~5%/15%/25%/33%
  19. # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
  20. # August 2011.
  21. #
  22. # Unroll and modulo-schedule inner loops in such manner that they
  23. # are "fallen through" for input lengths of 8, which is critical for
  24. # 1024-bit RSA *sign*. Average performance improvement in comparison
  25. # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
  26. # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
  27. $flavour = shift;
  28. $output = shift;
  29. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  30. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  31. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  32. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  33. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  34. die "can't locate x86_64-xlate.pl";
  35. open STDOUT,"| $^X $xlate $flavour $output";
  36. # int bn_mul_mont(
  37. $rp="%rdi"; # BN_ULONG *rp,
  38. $ap="%rsi"; # const BN_ULONG *ap,
  39. $bp="%rdx"; # const BN_ULONG *bp,
  40. $np="%rcx"; # const BN_ULONG *np,
  41. $n0="%r8"; # const BN_ULONG *n0,
  42. $num="%r9"; # int num);
  43. $lo0="%r10";
  44. $hi0="%r11";
  45. $hi1="%r13";
  46. $i="%r14";
  47. $j="%r15";
  48. $m0="%rbx";
  49. $m1="%rbp";
  50. $code=<<___;
  51. .text
  52. .globl bn_mul_mont
  53. .type bn_mul_mont,\@function,6
  54. .align 16
  55. bn_mul_mont:
  56. test \$3,${num}d
  57. jnz .Lmul_enter
  58. cmp \$8,${num}d
  59. jb .Lmul_enter
  60. cmp $ap,$bp
  61. jne .Lmul4x_enter
  62. jmp .Lsqr4x_enter
  63. .align 16
  64. .Lmul_enter:
  65. push %rbx
  66. push %rbp
  67. push %r12
  68. push %r13
  69. push %r14
  70. push %r15
  71. mov ${num}d,${num}d
  72. lea 2($num),%r10
  73. mov %rsp,%r11
  74. neg %r10
  75. lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
  76. and \$-1024,%rsp # minimize TLB usage
  77. mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
  78. .Lmul_body:
  79. mov $bp,%r12 # reassign $bp
  80. ___
  81. $bp="%r12";
  82. $code.=<<___;
  83. mov ($n0),$n0 # pull n0[0] value
  84. mov ($bp),$m0 # m0=bp[0]
  85. mov ($ap),%rax
  86. xor $i,$i # i=0
  87. xor $j,$j # j=0
  88. mov $n0,$m1
  89. mulq $m0 # ap[0]*bp[0]
  90. mov %rax,$lo0
  91. mov ($np),%rax
  92. imulq $lo0,$m1 # "tp[0]"*n0
  93. mov %rdx,$hi0
  94. mulq $m1 # np[0]*m1
  95. add %rax,$lo0 # discarded
  96. mov 8($ap),%rax
  97. adc \$0,%rdx
  98. mov %rdx,$hi1
  99. lea 1($j),$j # j++
  100. jmp .L1st_enter
  101. .align 16
  102. .L1st:
  103. add %rax,$hi1
  104. mov ($ap,$j,8),%rax
  105. adc \$0,%rdx
  106. add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
  107. mov $lo0,$hi0
  108. adc \$0,%rdx
  109. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  110. mov %rdx,$hi1
  111. .L1st_enter:
  112. mulq $m0 # ap[j]*bp[0]
  113. add %rax,$hi0
  114. mov ($np,$j,8),%rax
  115. adc \$0,%rdx
  116. lea 1($j),$j # j++
  117. mov %rdx,$lo0
  118. mulq $m1 # np[j]*m1
  119. cmp $num,$j
  120. jne .L1st
  121. add %rax,$hi1
  122. mov ($ap),%rax # ap[0]
  123. adc \$0,%rdx
  124. add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
  125. adc \$0,%rdx
  126. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  127. mov %rdx,$hi1
  128. mov $lo0,$hi0
  129. xor %rdx,%rdx
  130. add $hi0,$hi1
  131. adc \$0,%rdx
  132. mov $hi1,-8(%rsp,$num,8)
  133. mov %rdx,(%rsp,$num,8) # store upmost overflow bit
  134. lea 1($i),$i # i++
  135. jmp .Louter
  136. .align 16
  137. .Louter:
  138. mov ($bp,$i,8),$m0 # m0=bp[i]
  139. xor $j,$j # j=0
  140. mov $n0,$m1
  141. mov (%rsp),$lo0
  142. mulq $m0 # ap[0]*bp[i]
  143. add %rax,$lo0 # ap[0]*bp[i]+tp[0]
  144. mov ($np),%rax
  145. adc \$0,%rdx
  146. imulq $lo0,$m1 # tp[0]*n0
  147. mov %rdx,$hi0
  148. mulq $m1 # np[0]*m1
  149. add %rax,$lo0 # discarded
  150. mov 8($ap),%rax
  151. adc \$0,%rdx
  152. mov 8(%rsp),$lo0 # tp[1]
  153. mov %rdx,$hi1
  154. lea 1($j),$j # j++
  155. jmp .Linner_enter
  156. .align 16
  157. .Linner:
  158. add %rax,$hi1
  159. mov ($ap,$j,8),%rax
  160. adc \$0,%rdx
  161. add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
  162. mov (%rsp,$j,8),$lo0
  163. adc \$0,%rdx
  164. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  165. mov %rdx,$hi1
  166. .Linner_enter:
  167. mulq $m0 # ap[j]*bp[i]
  168. add %rax,$hi0
  169. mov ($np,$j,8),%rax
  170. adc \$0,%rdx
  171. add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
  172. mov %rdx,$hi0
  173. adc \$0,$hi0
  174. lea 1($j),$j # j++
  175. mulq $m1 # np[j]*m1
  176. cmp $num,$j
  177. jne .Linner
  178. add %rax,$hi1
  179. mov ($ap),%rax # ap[0]
  180. adc \$0,%rdx
  181. add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
  182. mov (%rsp,$j,8),$lo0
  183. adc \$0,%rdx
  184. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  185. mov %rdx,$hi1
  186. xor %rdx,%rdx
  187. add $hi0,$hi1
  188. adc \$0,%rdx
  189. add $lo0,$hi1 # pull upmost overflow bit
  190. adc \$0,%rdx
  191. mov $hi1,-8(%rsp,$num,8)
  192. mov %rdx,(%rsp,$num,8) # store upmost overflow bit
  193. lea 1($i),$i # i++
  194. cmp $num,$i
  195. jl .Louter
  196. xor $i,$i # i=0 and clear CF!
  197. mov (%rsp),%rax # tp[0]
  198. lea (%rsp),$ap # borrow ap for tp
  199. mov $num,$j # j=num
  200. jmp .Lsub
  201. .align 16
  202. .Lsub: sbb ($np,$i,8),%rax
  203. mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
  204. mov 8($ap,$i,8),%rax # tp[i+1]
  205. lea 1($i),$i # i++
  206. dec $j # doesnn't affect CF!
  207. jnz .Lsub
  208. sbb \$0,%rax # handle upmost overflow bit
  209. xor $i,$i
  210. and %rax,$ap
  211. not %rax
  212. mov $rp,$np
  213. and %rax,$np
  214. mov $num,$j # j=num
  215. or $np,$ap # ap=borrow?tp:rp
  216. .align 16
  217. .Lcopy: # copy or in-place refresh
  218. mov ($ap,$i,8),%rax
  219. mov $i,(%rsp,$i,8) # zap temporary vector
  220. mov %rax,($rp,$i,8) # rp[i]=tp[i]
  221. lea 1($i),$i
  222. sub \$1,$j
  223. jnz .Lcopy
  224. mov 8(%rsp,$num,8),%rsi # restore %rsp
  225. mov \$1,%rax
  226. mov (%rsi),%r15
  227. mov 8(%rsi),%r14
  228. mov 16(%rsi),%r13
  229. mov 24(%rsi),%r12
  230. mov 32(%rsi),%rbp
  231. mov 40(%rsi),%rbx
  232. lea 48(%rsi),%rsp
  233. .Lmul_epilogue:
  234. ret
  235. .size bn_mul_mont,.-bn_mul_mont
  236. ___
  237. {{{
  238. my @A=("%r10","%r11");
  239. my @N=("%r13","%rdi");
  240. $code.=<<___;
  241. .type bn_mul4x_mont,\@function,6
  242. .align 16
  243. bn_mul4x_mont:
  244. .Lmul4x_enter:
  245. push %rbx
  246. push %rbp
  247. push %r12
  248. push %r13
  249. push %r14
  250. push %r15
  251. mov ${num}d,${num}d
  252. lea 4($num),%r10
  253. mov %rsp,%r11
  254. neg %r10
  255. lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
  256. and \$-1024,%rsp # minimize TLB usage
  257. mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
  258. .Lmul4x_body:
  259. mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
  260. mov %rdx,%r12 # reassign $bp
  261. ___
  262. $bp="%r12";
  263. $code.=<<___;
  264. mov ($n0),$n0 # pull n0[0] value
  265. mov ($bp),$m0 # m0=bp[0]
  266. mov ($ap),%rax
  267. xor $i,$i # i=0
  268. xor $j,$j # j=0
  269. mov $n0,$m1
  270. mulq $m0 # ap[0]*bp[0]
  271. mov %rax,$A[0]
  272. mov ($np),%rax
  273. imulq $A[0],$m1 # "tp[0]"*n0
  274. mov %rdx,$A[1]
  275. mulq $m1 # np[0]*m1
  276. add %rax,$A[0] # discarded
  277. mov 8($ap),%rax
  278. adc \$0,%rdx
  279. mov %rdx,$N[1]
  280. mulq $m0
  281. add %rax,$A[1]
  282. mov 8($np),%rax
  283. adc \$0,%rdx
  284. mov %rdx,$A[0]
  285. mulq $m1
  286. add %rax,$N[1]
  287. mov 16($ap),%rax
  288. adc \$0,%rdx
  289. add $A[1],$N[1]
  290. lea 4($j),$j # j++
  291. adc \$0,%rdx
  292. mov $N[1],(%rsp)
  293. mov %rdx,$N[0]
  294. jmp .L1st4x
  295. .align 16
  296. .L1st4x:
  297. mulq $m0 # ap[j]*bp[0]
  298. add %rax,$A[0]
  299. mov -16($np,$j,8),%rax
  300. adc \$0,%rdx
  301. mov %rdx,$A[1]
  302. mulq $m1 # np[j]*m1
  303. add %rax,$N[0]
  304. mov -8($ap,$j,8),%rax
  305. adc \$0,%rdx
  306. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  307. adc \$0,%rdx
  308. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  309. mov %rdx,$N[1]
  310. mulq $m0 # ap[j]*bp[0]
  311. add %rax,$A[1]
  312. mov -8($np,$j,8),%rax
  313. adc \$0,%rdx
  314. mov %rdx,$A[0]
  315. mulq $m1 # np[j]*m1
  316. add %rax,$N[1]
  317. mov ($ap,$j,8),%rax
  318. adc \$0,%rdx
  319. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  320. adc \$0,%rdx
  321. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  322. mov %rdx,$N[0]
  323. mulq $m0 # ap[j]*bp[0]
  324. add %rax,$A[0]
  325. mov ($np,$j,8),%rax
  326. adc \$0,%rdx
  327. mov %rdx,$A[1]
  328. mulq $m1 # np[j]*m1
  329. add %rax,$N[0]
  330. mov 8($ap,$j,8),%rax
  331. adc \$0,%rdx
  332. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  333. adc \$0,%rdx
  334. mov $N[0],-8(%rsp,$j,8) # tp[j-1]
  335. mov %rdx,$N[1]
  336. mulq $m0 # ap[j]*bp[0]
  337. add %rax,$A[1]
  338. mov 8($np,$j,8),%rax
  339. adc \$0,%rdx
  340. lea 4($j),$j # j++
  341. mov %rdx,$A[0]
  342. mulq $m1 # np[j]*m1
  343. add %rax,$N[1]
  344. mov -16($ap,$j,8),%rax
  345. adc \$0,%rdx
  346. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  347. adc \$0,%rdx
  348. mov $N[1],-32(%rsp,$j,8) # tp[j-1]
  349. mov %rdx,$N[0]
  350. cmp $num,$j
  351. jl .L1st4x
  352. mulq $m0 # ap[j]*bp[0]
  353. add %rax,$A[0]
  354. mov -16($np,$j,8),%rax
  355. adc \$0,%rdx
  356. mov %rdx,$A[1]
  357. mulq $m1 # np[j]*m1
  358. add %rax,$N[0]
  359. mov -8($ap,$j,8),%rax
  360. adc \$0,%rdx
  361. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  362. adc \$0,%rdx
  363. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  364. mov %rdx,$N[1]
  365. mulq $m0 # ap[j]*bp[0]
  366. add %rax,$A[1]
  367. mov -8($np,$j,8),%rax
  368. adc \$0,%rdx
  369. mov %rdx,$A[0]
  370. mulq $m1 # np[j]*m1
  371. add %rax,$N[1]
  372. mov ($ap),%rax # ap[0]
  373. adc \$0,%rdx
  374. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  375. adc \$0,%rdx
  376. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  377. mov %rdx,$N[0]
  378. xor $N[1],$N[1]
  379. add $A[0],$N[0]
  380. adc \$0,$N[1]
  381. mov $N[0],-8(%rsp,$j,8)
  382. mov $N[1],(%rsp,$j,8) # store upmost overflow bit
  383. lea 1($i),$i # i++
  384. .align 4
  385. .Louter4x:
  386. mov ($bp,$i,8),$m0 # m0=bp[i]
  387. xor $j,$j # j=0
  388. mov (%rsp),$A[0]
  389. mov $n0,$m1
  390. mulq $m0 # ap[0]*bp[i]
  391. add %rax,$A[0] # ap[0]*bp[i]+tp[0]
  392. mov ($np),%rax
  393. adc \$0,%rdx
  394. imulq $A[0],$m1 # tp[0]*n0
  395. mov %rdx,$A[1]
  396. mulq $m1 # np[0]*m1
  397. add %rax,$A[0] # "$N[0]", discarded
  398. mov 8($ap),%rax
  399. adc \$0,%rdx
  400. mov %rdx,$N[1]
  401. mulq $m0 # ap[j]*bp[i]
  402. add %rax,$A[1]
  403. mov 8($np),%rax
  404. adc \$0,%rdx
  405. add 8(%rsp),$A[1] # +tp[1]
  406. adc \$0,%rdx
  407. mov %rdx,$A[0]
  408. mulq $m1 # np[j]*m1
  409. add %rax,$N[1]
  410. mov 16($ap),%rax
  411. adc \$0,%rdx
  412. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
  413. lea 4($j),$j # j+=2
  414. adc \$0,%rdx
  415. mov $N[1],(%rsp) # tp[j-1]
  416. mov %rdx,$N[0]
  417. jmp .Linner4x
  418. .align 16
  419. .Linner4x:
  420. mulq $m0 # ap[j]*bp[i]
  421. add %rax,$A[0]
  422. mov -16($np,$j,8),%rax
  423. adc \$0,%rdx
  424. add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  425. adc \$0,%rdx
  426. mov %rdx,$A[1]
  427. mulq $m1 # np[j]*m1
  428. add %rax,$N[0]
  429. mov -8($ap,$j,8),%rax
  430. adc \$0,%rdx
  431. add $A[0],$N[0]
  432. adc \$0,%rdx
  433. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  434. mov %rdx,$N[1]
  435. mulq $m0 # ap[j]*bp[i]
  436. add %rax,$A[1]
  437. mov -8($np,$j,8),%rax
  438. adc \$0,%rdx
  439. add -8(%rsp,$j,8),$A[1]
  440. adc \$0,%rdx
  441. mov %rdx,$A[0]
  442. mulq $m1 # np[j]*m1
  443. add %rax,$N[1]
  444. mov ($ap,$j,8),%rax
  445. adc \$0,%rdx
  446. add $A[1],$N[1]
  447. adc \$0,%rdx
  448. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  449. mov %rdx,$N[0]
  450. mulq $m0 # ap[j]*bp[i]
  451. add %rax,$A[0]
  452. mov ($np,$j,8),%rax
  453. adc \$0,%rdx
  454. add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  455. adc \$0,%rdx
  456. mov %rdx,$A[1]
  457. mulq $m1 # np[j]*m1
  458. add %rax,$N[0]
  459. mov 8($ap,$j,8),%rax
  460. adc \$0,%rdx
  461. add $A[0],$N[0]
  462. adc \$0,%rdx
  463. mov $N[0],-8(%rsp,$j,8) # tp[j-1]
  464. mov %rdx,$N[1]
  465. mulq $m0 # ap[j]*bp[i]
  466. add %rax,$A[1]
  467. mov 8($np,$j,8),%rax
  468. adc \$0,%rdx
  469. add 8(%rsp,$j,8),$A[1]
  470. adc \$0,%rdx
  471. lea 4($j),$j # j++
  472. mov %rdx,$A[0]
  473. mulq $m1 # np[j]*m1
  474. add %rax,$N[1]
  475. mov -16($ap,$j,8),%rax
  476. adc \$0,%rdx
  477. add $A[1],$N[1]
  478. adc \$0,%rdx
  479. mov $N[1],-32(%rsp,$j,8) # tp[j-1]
  480. mov %rdx,$N[0]
  481. cmp $num,$j
  482. jl .Linner4x
  483. mulq $m0 # ap[j]*bp[i]
  484. add %rax,$A[0]
  485. mov -16($np,$j,8),%rax
  486. adc \$0,%rdx
  487. add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  488. adc \$0,%rdx
  489. mov %rdx,$A[1]
  490. mulq $m1 # np[j]*m1
  491. add %rax,$N[0]
  492. mov -8($ap,$j,8),%rax
  493. adc \$0,%rdx
  494. add $A[0],$N[0]
  495. adc \$0,%rdx
  496. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  497. mov %rdx,$N[1]
  498. mulq $m0 # ap[j]*bp[i]
  499. add %rax,$A[1]
  500. mov -8($np,$j,8),%rax
  501. adc \$0,%rdx
  502. add -8(%rsp,$j,8),$A[1]
  503. adc \$0,%rdx
  504. lea 1($i),$i # i++
  505. mov %rdx,$A[0]
  506. mulq $m1 # np[j]*m1
  507. add %rax,$N[1]
  508. mov ($ap),%rax # ap[0]
  509. adc \$0,%rdx
  510. add $A[1],$N[1]
  511. adc \$0,%rdx
  512. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  513. mov %rdx,$N[0]
  514. xor $N[1],$N[1]
  515. add $A[0],$N[0]
  516. adc \$0,$N[1]
  517. add (%rsp,$num,8),$N[0] # pull upmost overflow bit
  518. adc \$0,$N[1]
  519. mov $N[0],-8(%rsp,$j,8)
  520. mov $N[1],(%rsp,$j,8) # store upmost overflow bit
  521. cmp $num,$i
  522. jl .Louter4x
  523. ___
  524. {
  525. my @ri=("%rax","%rdx",$m0,$m1);
  526. $code.=<<___;
  527. mov 16(%rsp,$num,8),$rp # restore $rp
  528. mov 0(%rsp),@ri[0] # tp[0]
  529. pxor %xmm0,%xmm0
  530. mov 8(%rsp),@ri[1] # tp[1]
  531. shr \$2,$num # num/=4
  532. lea (%rsp),$ap # borrow ap for tp
  533. xor $i,$i # i=0 and clear CF!
  534. sub 0($np),@ri[0]
  535. mov 16($ap),@ri[2] # tp[2]
  536. mov 24($ap),@ri[3] # tp[3]
  537. sbb 8($np),@ri[1]
  538. lea -1($num),$j # j=num/4-1
  539. jmp .Lsub4x
  540. .align 16
  541. .Lsub4x:
  542. mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
  543. mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
  544. sbb 16($np,$i,8),@ri[2]
  545. mov 32($ap,$i,8),@ri[0] # tp[i+1]
  546. mov 40($ap,$i,8),@ri[1]
  547. sbb 24($np,$i,8),@ri[3]
  548. mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
  549. mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
  550. sbb 32($np,$i,8),@ri[0]
  551. mov 48($ap,$i,8),@ri[2]
  552. mov 56($ap,$i,8),@ri[3]
  553. sbb 40($np,$i,8),@ri[1]
  554. lea 4($i),$i # i++
  555. dec $j # doesnn't affect CF!
  556. jnz .Lsub4x
  557. mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
  558. mov 32($ap,$i,8),@ri[0] # load overflow bit
  559. sbb 16($np,$i,8),@ri[2]
  560. mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
  561. sbb 24($np,$i,8),@ri[3]
  562. mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
  563. sbb \$0,@ri[0] # handle upmost overflow bit
  564. mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
  565. xor $i,$i # i=0
  566. and @ri[0],$ap
  567. not @ri[0]
  568. mov $rp,$np
  569. and @ri[0],$np
  570. lea -1($num),$j
  571. or $np,$ap # ap=borrow?tp:rp
  572. movdqu ($ap),%xmm1
  573. movdqa %xmm0,(%rsp)
  574. movdqu %xmm1,($rp)
  575. jmp .Lcopy4x
  576. .align 16
  577. .Lcopy4x: # copy or in-place refresh
  578. movdqu 16($ap,$i),%xmm2
  579. movdqu 32($ap,$i),%xmm1
  580. movdqa %xmm0,16(%rsp,$i)
  581. movdqu %xmm2,16($rp,$i)
  582. movdqa %xmm0,32(%rsp,$i)
  583. movdqu %xmm1,32($rp,$i)
  584. lea 32($i),$i
  585. dec $j
  586. jnz .Lcopy4x
  587. shl \$2,$num
  588. movdqu 16($ap,$i),%xmm2
  589. movdqa %xmm0,16(%rsp,$i)
  590. movdqu %xmm2,16($rp,$i)
  591. ___
  592. }
  593. $code.=<<___;
  594. mov 8(%rsp,$num,8),%rsi # restore %rsp
  595. mov \$1,%rax
  596. mov (%rsi),%r15
  597. mov 8(%rsi),%r14
  598. mov 16(%rsi),%r13
  599. mov 24(%rsi),%r12
  600. mov 32(%rsi),%rbp
  601. mov 40(%rsi),%rbx
  602. lea 48(%rsi),%rsp
  603. .Lmul4x_epilogue:
  604. ret
  605. .size bn_mul4x_mont,.-bn_mul4x_mont
  606. ___
  607. }}}
  608. {{{
  609. ######################################################################
  610. # void bn_sqr4x_mont(
  611. my $rptr="%rdi"; # const BN_ULONG *rptr,
  612. my $aptr="%rsi"; # const BN_ULONG *aptr,
  613. my $bptr="%rdx"; # not used
  614. my $nptr="%rcx"; # const BN_ULONG *nptr,
  615. my $n0 ="%r8"; # const BN_ULONG *n0);
  616. my $num ="%r9"; # int num, has to be divisible by 4 and
  617. # not less than 8
  618. my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
  619. my @A0=("%r10","%r11");
  620. my @A1=("%r12","%r13");
  621. my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
  622. $code.=<<___;
  623. .type bn_sqr4x_mont,\@function,6
  624. .align 16
  625. bn_sqr4x_mont:
  626. .Lsqr4x_enter:
  627. push %rbx
  628. push %rbp
  629. push %r12
  630. push %r13
  631. push %r14
  632. push %r15
  633. shl \$3,${num}d # convert $num to bytes
  634. xor %r10,%r10
  635. mov %rsp,%r11 # put aside %rsp
  636. sub $num,%r10 # -$num
  637. mov ($n0),$n0 # *n0
  638. lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
  639. and \$-1024,%rsp # minimize TLB usage
  640. ##############################################################
  641. # Stack layout
  642. #
  643. # +0 saved $num, used in reduction section
  644. # +8 &t[2*$num], used in reduction section
  645. # +32 saved $rptr
  646. # +40 saved $nptr
  647. # +48 saved *n0
  648. # +56 saved %rsp
  649. # +64 t[2*$num]
  650. #
  651. mov $rptr,32(%rsp) # save $rptr
  652. mov $nptr,40(%rsp)
  653. mov $n0, 48(%rsp)
  654. mov %r11, 56(%rsp) # save original %rsp
  655. .Lsqr4x_body:
  656. ##############################################################
  657. # Squaring part:
  658. #
  659. # a) multiply-n-add everything but a[i]*a[i];
  660. # b) shift result of a) by 1 to the left and accumulate
  661. # a[i]*a[i] products;
  662. #
  663. lea 32(%r10),$i # $i=-($num-32)
  664. lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
  665. mov $num,$j # $j=$num
  666. # comments apply to $num==8 case
  667. mov -32($aptr,$i),$a0 # a[0]
  668. lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
  669. mov -24($aptr,$i),%rax # a[1]
  670. lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
  671. mov -16($aptr,$i),$ai # a[2]
  672. mov %rax,$a1
  673. mul $a0 # a[1]*a[0]
  674. mov %rax,$A0[0] # a[1]*a[0]
  675. mov $ai,%rax # a[2]
  676. mov %rdx,$A0[1]
  677. mov $A0[0],-24($tptr,$i) # t[1]
  678. xor $A0[0],$A0[0]
  679. mul $a0 # a[2]*a[0]
  680. add %rax,$A0[1]
  681. mov $ai,%rax
  682. adc %rdx,$A0[0]
  683. mov $A0[1],-16($tptr,$i) # t[2]
  684. lea -16($i),$j # j=-16
  685. mov 8($aptr,$j),$ai # a[3]
  686. mul $a1 # a[2]*a[1]
  687. mov %rax,$A1[0] # a[2]*a[1]+t[3]
  688. mov $ai,%rax
  689. mov %rdx,$A1[1]
  690. xor $A0[1],$A0[1]
  691. add $A1[0],$A0[0]
  692. lea 16($j),$j
  693. adc \$0,$A0[1]
  694. mul $a0 # a[3]*a[0]
  695. add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
  696. mov $ai,%rax
  697. adc %rdx,$A0[1]
  698. mov $A0[0],-8($tptr,$j) # t[3]
  699. jmp .Lsqr4x_1st
  700. .align 16
  701. .Lsqr4x_1st:
  702. mov ($aptr,$j),$ai # a[4]
  703. xor $A1[0],$A1[0]
  704. mul $a1 # a[3]*a[1]
  705. add %rax,$A1[1] # a[3]*a[1]+t[4]
  706. mov $ai,%rax
  707. adc %rdx,$A1[0]
  708. xor $A0[0],$A0[0]
  709. add $A1[1],$A0[1]
  710. adc \$0,$A0[0]
  711. mul $a0 # a[4]*a[0]
  712. add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
  713. mov $ai,%rax # a[3]
  714. adc %rdx,$A0[0]
  715. mov $A0[1],($tptr,$j) # t[4]
  716. mov 8($aptr,$j),$ai # a[5]
  717. xor $A1[1],$A1[1]
  718. mul $a1 # a[4]*a[3]
  719. add %rax,$A1[0] # a[4]*a[3]+t[5]
  720. mov $ai,%rax
  721. adc %rdx,$A1[1]
  722. xor $A0[1],$A0[1]
  723. add $A1[0],$A0[0]
  724. adc \$0,$A0[1]
  725. mul $a0 # a[5]*a[2]
  726. add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
  727. mov $ai,%rax
  728. adc %rdx,$A0[1]
  729. mov $A0[0],8($tptr,$j) # t[5]
  730. mov 16($aptr,$j),$ai # a[6]
  731. xor $A1[0],$A1[0]
  732. mul $a1 # a[5]*a[3]
  733. add %rax,$A1[1] # a[5]*a[3]+t[6]
  734. mov $ai,%rax
  735. adc %rdx,$A1[0]
  736. xor $A0[0],$A0[0]
  737. add $A1[1],$A0[1]
  738. adc \$0,$A0[0]
  739. mul $a0 # a[6]*a[2]
  740. add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
  741. mov $ai,%rax # a[3]
  742. adc %rdx,$A0[0]
  743. mov $A0[1],16($tptr,$j) # t[6]
  744. mov 24($aptr,$j),$ai # a[7]
  745. xor $A1[1],$A1[1]
  746. mul $a1 # a[6]*a[5]
  747. add %rax,$A1[0] # a[6]*a[5]+t[7]
  748. mov $ai,%rax
  749. adc %rdx,$A1[1]
  750. xor $A0[1],$A0[1]
  751. add $A1[0],$A0[0]
  752. lea 32($j),$j
  753. adc \$0,$A0[1]
  754. mul $a0 # a[7]*a[4]
  755. add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
  756. mov $ai,%rax
  757. adc %rdx,$A0[1]
  758. mov $A0[0],-8($tptr,$j) # t[7]
  759. cmp \$0,$j
  760. jne .Lsqr4x_1st
  761. xor $A1[0],$A1[0]
  762. add $A0[1],$A1[1]
  763. adc \$0,$A1[0]
  764. mul $a1 # a[7]*a[5]
  765. add %rax,$A1[1]
  766. adc %rdx,$A1[0]
  767. mov $A1[1],($tptr) # t[8]
  768. lea 16($i),$i
  769. mov $A1[0],8($tptr) # t[9]
  770. jmp .Lsqr4x_outer
  771. .align 16
  772. .Lsqr4x_outer: # comments apply to $num==6 case
  773. mov -32($aptr,$i),$a0 # a[0]
  774. lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
  775. mov -24($aptr,$i),%rax # a[1]
  776. lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
  777. mov -16($aptr,$i),$ai # a[2]
  778. mov %rax,$a1
  779. mov -24($tptr,$i),$A0[0] # t[1]
  780. xor $A0[1],$A0[1]
  781. mul $a0 # a[1]*a[0]
  782. add %rax,$A0[0] # a[1]*a[0]+t[1]
  783. mov $ai,%rax # a[2]
  784. adc %rdx,$A0[1]
  785. mov $A0[0],-24($tptr,$i) # t[1]
  786. xor $A0[0],$A0[0]
  787. add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
  788. adc \$0,$A0[0]
  789. mul $a0 # a[2]*a[0]
  790. add %rax,$A0[1]
  791. mov $ai,%rax
  792. adc %rdx,$A0[0]
  793. mov $A0[1],-16($tptr,$i) # t[2]
  794. lea -16($i),$j # j=-16
  795. xor $A1[0],$A1[0]
  796. mov 8($aptr,$j),$ai # a[3]
  797. xor $A1[1],$A1[1]
  798. add 8($tptr,$j),$A1[0]
  799. adc \$0,$A1[1]
  800. mul $a1 # a[2]*a[1]
  801. add %rax,$A1[0] # a[2]*a[1]+t[3]
  802. mov $ai,%rax
  803. adc %rdx,$A1[1]
  804. xor $A0[1],$A0[1]
  805. add $A1[0],$A0[0]
  806. adc \$0,$A0[1]
  807. mul $a0 # a[3]*a[0]
  808. add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
  809. mov $ai,%rax
  810. adc %rdx,$A0[1]
  811. mov $A0[0],8($tptr,$j) # t[3]
  812. lea 16($j),$j
  813. jmp .Lsqr4x_inner
  814. .align 16
  815. .Lsqr4x_inner:
  816. mov ($aptr,$j),$ai # a[4]
  817. xor $A1[0],$A1[0]
  818. add ($tptr,$j),$A1[1]
  819. adc \$0,$A1[0]
  820. mul $a1 # a[3]*a[1]
  821. add %rax,$A1[1] # a[3]*a[1]+t[4]
  822. mov $ai,%rax
  823. adc %rdx,$A1[0]
  824. xor $A0[0],$A0[0]
  825. add $A1[1],$A0[1]
  826. adc \$0,$A0[0]
  827. mul $a0 # a[4]*a[0]
  828. add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
  829. mov $ai,%rax # a[3]
  830. adc %rdx,$A0[0]
  831. mov $A0[1],($tptr,$j) # t[4]
  832. mov 8($aptr,$j),$ai # a[5]
  833. xor $A1[1],$A1[1]
  834. add 8($tptr,$j),$A1[0]
  835. adc \$0,$A1[1]
  836. mul $a1 # a[4]*a[3]
  837. add %rax,$A1[0] # a[4]*a[3]+t[5]
  838. mov $ai,%rax
  839. adc %rdx,$A1[1]
  840. xor $A0[1],$A0[1]
  841. add $A1[0],$A0[0]
  842. lea 16($j),$j # j++
  843. adc \$0,$A0[1]
  844. mul $a0 # a[5]*a[2]
  845. add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
  846. mov $ai,%rax
  847. adc %rdx,$A0[1]
  848. mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
  849. cmp \$0,$j
  850. jne .Lsqr4x_inner
  851. xor $A1[0],$A1[0]
  852. add $A0[1],$A1[1]
  853. adc \$0,$A1[0]
  854. mul $a1 # a[5]*a[3]
  855. add %rax,$A1[1]
  856. adc %rdx,$A1[0]
  857. mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
  858. mov $A1[0],8($tptr) # t[7], "preloaded t[3]" below
  859. add \$16,$i
  860. jnz .Lsqr4x_outer
  861. # comments apply to $num==4 case
  862. mov -32($aptr),$a0 # a[0]
  863. lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
  864. mov -24($aptr),%rax # a[1]
  865. lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
  866. mov -16($aptr),$ai # a[2]
  867. mov %rax,$a1
  868. xor $A0[1],$A0[1]
  869. mul $a0 # a[1]*a[0]
  870. add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
  871. mov $ai,%rax # a[2]
  872. adc %rdx,$A0[1]
  873. mov $A0[0],-24($tptr) # t[1]
  874. xor $A0[0],$A0[0]
  875. add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
  876. adc \$0,$A0[0]
  877. mul $a0 # a[2]*a[0]
  878. add %rax,$A0[1]
  879. mov $ai,%rax
  880. adc %rdx,$A0[0]
  881. mov $A0[1],-16($tptr) # t[2]
  882. mov -8($aptr),$ai # a[3]
  883. mul $a1 # a[2]*a[1]
  884. add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
  885. mov $ai,%rax
  886. adc \$0,%rdx
  887. xor $A0[1],$A0[1]
  888. add $A1[0],$A0[0]
  889. mov %rdx,$A1[1]
  890. adc \$0,$A0[1]
  891. mul $a0 # a[3]*a[0]
  892. add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
  893. mov $ai,%rax
  894. adc %rdx,$A0[1]
  895. mov $A0[0],-8($tptr) # t[3]
  896. xor $A1[0],$A1[0]
  897. add $A0[1],$A1[1]
  898. adc \$0,$A1[0]
  899. mul $a1 # a[3]*a[1]
  900. add %rax,$A1[1]
  901. mov -16($aptr),%rax # a[2]
  902. adc %rdx,$A1[0]
  903. mov $A1[1],($tptr) # t[4]
  904. mov $A1[0],8($tptr) # t[5]
  905. mul $ai # a[2]*a[3]
  906. ___
  907. {
  908. my ($shift,$carry)=($a0,$a1);
  909. my @S=(@A1,$ai,$n0);
  910. $code.=<<___;
  911. add \$16,$i
  912. xor $shift,$shift
  913. sub $num,$i # $i=16-$num
  914. xor $carry,$carry
  915. add $A1[0],%rax # t[5]
  916. adc \$0,%rdx
  917. mov %rax,8($tptr) # t[5]
  918. mov %rdx,16($tptr) # t[6]
  919. mov $carry,24($tptr) # t[7]
  920. mov -16($aptr,$i),%rax # a[0]
  921. lea 64(%rsp,$num,2),$tptr
  922. xor $A0[0],$A0[0] # t[0]
  923. mov -24($tptr,$i,2),$A0[1] # t[1]
  924. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  925. shr \$63,$A0[0]
  926. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  927. shr \$63,$A0[1]
  928. or $A0[0],$S[1] # | t[2*i]>>63
  929. mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  930. mov $A0[1],$shift # shift=t[2*i+1]>>63
  931. mul %rax # a[i]*a[i]
  932. neg $carry # mov $carry,cf
  933. mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  934. adc %rax,$S[0]
  935. mov -8($aptr,$i),%rax # a[i+1] # prefetch
  936. mov $S[0],-32($tptr,$i,2)
  937. adc %rdx,$S[1]
  938. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
  939. mov $S[1],-24($tptr,$i,2)
  940. sbb $carry,$carry # mov cf,$carry
  941. shr \$63,$A0[0]
  942. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  943. shr \$63,$A0[1]
  944. or $A0[0],$S[3] # | t[2*i]>>63
  945. mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  946. mov $A0[1],$shift # shift=t[2*i+1]>>63
  947. mul %rax # a[i]*a[i]
  948. neg $carry # mov $carry,cf
  949. mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  950. adc %rax,$S[2]
  951. mov 0($aptr,$i),%rax # a[i+1] # prefetch
  952. mov $S[2],-16($tptr,$i,2)
  953. adc %rdx,$S[3]
  954. lea 16($i),$i
  955. mov $S[3],-40($tptr,$i,2)
  956. sbb $carry,$carry # mov cf,$carry
  957. jmp .Lsqr4x_shift_n_add
  958. .align 16
  959. .Lsqr4x_shift_n_add:
  960. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  961. shr \$63,$A0[0]
  962. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  963. shr \$63,$A0[1]
  964. or $A0[0],$S[1] # | t[2*i]>>63
  965. mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  966. mov $A0[1],$shift # shift=t[2*i+1]>>63
  967. mul %rax # a[i]*a[i]
  968. neg $carry # mov $carry,cf
  969. mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  970. adc %rax,$S[0]
  971. mov -8($aptr,$i),%rax # a[i+1] # prefetch
  972. mov $S[0],-32($tptr,$i,2)
  973. adc %rdx,$S[1]
  974. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
  975. mov $S[1],-24($tptr,$i,2)
  976. sbb $carry,$carry # mov cf,$carry
  977. shr \$63,$A0[0]
  978. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  979. shr \$63,$A0[1]
  980. or $A0[0],$S[3] # | t[2*i]>>63
  981. mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  982. mov $A0[1],$shift # shift=t[2*i+1]>>63
  983. mul %rax # a[i]*a[i]
  984. neg $carry # mov $carry,cf
  985. mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  986. adc %rax,$S[2]
  987. mov 0($aptr,$i),%rax # a[i+1] # prefetch
  988. mov $S[2],-16($tptr,$i,2)
  989. adc %rdx,$S[3]
  990. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  991. mov $S[3],-8($tptr,$i,2)
  992. sbb $carry,$carry # mov cf,$carry
  993. shr \$63,$A0[0]
  994. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  995. shr \$63,$A0[1]
  996. or $A0[0],$S[1] # | t[2*i]>>63
  997. mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  998. mov $A0[1],$shift # shift=t[2*i+1]>>63
  999. mul %rax # a[i]*a[i]
  1000. neg $carry # mov $carry,cf
  1001. mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  1002. adc %rax,$S[0]
  1003. mov 8($aptr,$i),%rax # a[i+1] # prefetch
  1004. mov $S[0],0($tptr,$i,2)
  1005. adc %rdx,$S[1]
  1006. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
  1007. mov $S[1],8($tptr,$i,2)
  1008. sbb $carry,$carry # mov cf,$carry
  1009. shr \$63,$A0[0]
  1010. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  1011. shr \$63,$A0[1]
  1012. or $A0[0],$S[3] # | t[2*i]>>63
  1013. mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
  1014. mov $A0[1],$shift # shift=t[2*i+1]>>63
  1015. mul %rax # a[i]*a[i]
  1016. neg $carry # mov $carry,cf
  1017. mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
  1018. adc %rax,$S[2]
  1019. mov 16($aptr,$i),%rax # a[i+1] # prefetch
  1020. mov $S[2],16($tptr,$i,2)
  1021. adc %rdx,$S[3]
  1022. mov $S[3],24($tptr,$i,2)
  1023. sbb $carry,$carry # mov cf,$carry
  1024. add \$32,$i
  1025. jnz .Lsqr4x_shift_n_add
  1026. lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
  1027. shr \$63,$A0[0]
  1028. lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
  1029. shr \$63,$A0[1]
  1030. or $A0[0],$S[1] # | t[2*i]>>63
  1031. mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
  1032. mov $A0[1],$shift # shift=t[2*i+1]>>63
  1033. mul %rax # a[i]*a[i]
  1034. neg $carry # mov $carry,cf
  1035. mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
  1036. adc %rax,$S[0]
  1037. mov -8($aptr),%rax # a[i+1] # prefetch
  1038. mov $S[0],-32($tptr)
  1039. adc %rdx,$S[1]
  1040. lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
  1041. mov $S[1],-24($tptr)
  1042. sbb $carry,$carry # mov cf,$carry
  1043. shr \$63,$A0[0]
  1044. lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
  1045. shr \$63,$A0[1]
  1046. or $A0[0],$S[3] # | t[2*i]>>63
  1047. mul %rax # a[i]*a[i]
  1048. neg $carry # mov $carry,cf
  1049. adc %rax,$S[2]
  1050. adc %rdx,$S[3]
  1051. mov $S[2],-16($tptr)
  1052. mov $S[3],-8($tptr)
  1053. ___
  1054. }
  1055. ##############################################################
  1056. # Montgomery reduction part, "word-by-word" algorithm.
  1057. #
  1058. {
  1059. my ($topbit,$nptr)=("%rbp",$aptr);
  1060. my ($m0,$m1)=($a0,$a1);
  1061. my @Ni=("%rbx","%r9");
  1062. $code.=<<___;
  1063. mov 40(%rsp),$nptr # restore $nptr
  1064. mov 48(%rsp),$n0 # restore *n0
  1065. xor $j,$j
  1066. mov $num,0(%rsp) # save $num
  1067. sub $num,$j # $j=-$num
  1068. mov 64(%rsp),$A0[0] # t[0] # modsched #
  1069. mov $n0,$m0 # # modsched #
  1070. lea 64(%rsp,$num,2),%rax # end of t[] buffer
  1071. lea 64(%rsp,$num),$tptr # end of t[] window
  1072. mov %rax,8(%rsp) # save end of t[] buffer
  1073. lea ($nptr,$num),$nptr # end of n[] buffer
  1074. xor $topbit,$topbit # $topbit=0
  1075. mov 0($nptr,$j),%rax # n[0] # modsched #
  1076. mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
  1077. imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
  1078. mov %rax,$Ni[0] # # modsched #
  1079. jmp .Lsqr4x_mont_outer
  1080. .align 16
  1081. .Lsqr4x_mont_outer:
  1082. xor $A0[1],$A0[1]
  1083. mul $m0 # n[0]*m0
  1084. add %rax,$A0[0] # n[0]*m0+t[0]
  1085. mov $Ni[1],%rax
  1086. adc %rdx,$A0[1]
  1087. mov $n0,$m1
  1088. xor $A0[0],$A0[0]
  1089. add 8($tptr,$j),$A0[1]
  1090. adc \$0,$A0[0]
  1091. mul $m0 # n[1]*m0
  1092. add %rax,$A0[1] # n[1]*m0+t[1]
  1093. mov $Ni[0],%rax
  1094. adc %rdx,$A0[0]
  1095. imulq $A0[1],$m1
  1096. mov 16($nptr,$j),$Ni[0] # n[2]
  1097. xor $A1[1],$A1[1]
  1098. add $A0[1],$A1[0]
  1099. adc \$0,$A1[1]
  1100. mul $m1 # n[0]*m1
  1101. add %rax,$A1[0] # n[0]*m1+"t[1]"
  1102. mov $Ni[0],%rax
  1103. adc %rdx,$A1[1]
  1104. mov $A1[0],8($tptr,$j) # "t[1]"
  1105. xor $A0[1],$A0[1]
  1106. add 16($tptr,$j),$A0[0]
  1107. adc \$0,$A0[1]
  1108. mul $m0 # n[2]*m0
  1109. add %rax,$A0[0] # n[2]*m0+t[2]
  1110. mov $Ni[1],%rax
  1111. adc %rdx,$A0[1]
  1112. mov 24($nptr,$j),$Ni[1] # n[3]
  1113. xor $A1[0],$A1[0]
  1114. add $A0[0],$A1[1]
  1115. adc \$0,$A1[0]
  1116. mul $m1 # n[1]*m1
  1117. add %rax,$A1[1] # n[1]*m1+"t[2]"
  1118. mov $Ni[1],%rax
  1119. adc %rdx,$A1[0]
  1120. mov $A1[1],16($tptr,$j) # "t[2]"
  1121. xor $A0[0],$A0[0]
  1122. add 24($tptr,$j),$A0[1]
  1123. lea 32($j),$j
  1124. adc \$0,$A0[0]
  1125. mul $m0 # n[3]*m0
  1126. add %rax,$A0[1] # n[3]*m0+t[3]
  1127. mov $Ni[0],%rax
  1128. adc %rdx,$A0[0]
  1129. jmp .Lsqr4x_mont_inner
  1130. .align 16
  1131. .Lsqr4x_mont_inner:
  1132. mov ($nptr,$j),$Ni[0] # n[4]
  1133. xor $A1[1],$A1[1]
  1134. add $A0[1],$A1[0]
  1135. adc \$0,$A1[1]
  1136. mul $m1 # n[2]*m1
  1137. add %rax,$A1[0] # n[2]*m1+"t[3]"
  1138. mov $Ni[0],%rax
  1139. adc %rdx,$A1[1]
  1140. mov $A1[0],-8($tptr,$j) # "t[3]"
  1141. xor $A0[1],$A0[1]
  1142. add ($tptr,$j),$A0[0]
  1143. adc \$0,$A0[1]
  1144. mul $m0 # n[4]*m0
  1145. add %rax,$A0[0] # n[4]*m0+t[4]
  1146. mov $Ni[1],%rax
  1147. adc %rdx,$A0[1]
  1148. mov 8($nptr,$j),$Ni[1] # n[5]
  1149. xor $A1[0],$A1[0]
  1150. add $A0[0],$A1[1]
  1151. adc \$0,$A1[0]
  1152. mul $m1 # n[3]*m1
  1153. add %rax,$A1[1] # n[3]*m1+"t[4]"
  1154. mov $Ni[1],%rax
  1155. adc %rdx,$A1[0]
  1156. mov $A1[1],($tptr,$j) # "t[4]"
  1157. xor $A0[0],$A0[0]
  1158. add 8($tptr,$j),$A0[1]
  1159. adc \$0,$A0[0]
  1160. mul $m0 # n[5]*m0
  1161. add %rax,$A0[1] # n[5]*m0+t[5]
  1162. mov $Ni[0],%rax
  1163. adc %rdx,$A0[0]
  1164. mov 16($nptr,$j),$Ni[0] # n[6]
  1165. xor $A1[1],$A1[1]
  1166. add $A0[1],$A1[0]
  1167. adc \$0,$A1[1]
  1168. mul $m1 # n[4]*m1
  1169. add %rax,$A1[0] # n[4]*m1+"t[5]"
  1170. mov $Ni[0],%rax
  1171. adc %rdx,$A1[1]
  1172. mov $A1[0],8($tptr,$j) # "t[5]"
  1173. xor $A0[1],$A0[1]
  1174. add 16($tptr,$j),$A0[0]
  1175. adc \$0,$A0[1]
  1176. mul $m0 # n[6]*m0
  1177. add %rax,$A0[0] # n[6]*m0+t[6]
  1178. mov $Ni[1],%rax
  1179. adc %rdx,$A0[1]
  1180. mov 24($nptr,$j),$Ni[1] # n[7]
  1181. xor $A1[0],$A1[0]
  1182. add $A0[0],$A1[1]
  1183. adc \$0,$A1[0]
  1184. mul $m1 # n[5]*m1
  1185. add %rax,$A1[1] # n[5]*m1+"t[6]"
  1186. mov $Ni[1],%rax
  1187. adc %rdx,$A1[0]
  1188. mov $A1[1],16($tptr,$j) # "t[6]"
  1189. xor $A0[0],$A0[0]
  1190. add 24($tptr,$j),$A0[1]
  1191. lea 32($j),$j
  1192. adc \$0,$A0[0]
  1193. mul $m0 # n[7]*m0
  1194. add %rax,$A0[1] # n[7]*m0+t[7]
  1195. mov $Ni[0],%rax
  1196. adc %rdx,$A0[0]
  1197. cmp \$0,$j
  1198. jne .Lsqr4x_mont_inner
  1199. sub 0(%rsp),$j # $j=-$num # modsched #
  1200. mov $n0,$m0 # # modsched #
  1201. xor $A1[1],$A1[1]
  1202. add $A0[1],$A1[0]
  1203. adc \$0,$A1[1]
  1204. mul $m1 # n[6]*m1
  1205. add %rax,$A1[0] # n[6]*m1+"t[7]"
  1206. mov $Ni[1],%rax
  1207. adc %rdx,$A1[1]
  1208. mov $A1[0],-8($tptr) # "t[7]"
  1209. xor $A0[1],$A0[1]
  1210. add ($tptr),$A0[0] # +t[8]
  1211. adc \$0,$A0[1]
  1212. mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
  1213. add $topbit,$A0[0]
  1214. adc \$0,$A0[1]
  1215. imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
  1216. xor $A1[0],$A1[0]
  1217. mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
  1218. add $A0[0],$A1[1]
  1219. mov 16($tptr,$j),$A0[0] # t[0] # modsched #
  1220. adc \$0,$A1[0]
  1221. mul $m1 # n[7]*m1
  1222. add %rax,$A1[1] # n[7]*m1+"t[8]"
  1223. mov $Ni[0],%rax # # modsched #
  1224. adc %rdx,$A1[0]
  1225. mov $A1[1],($tptr) # "t[8]"
  1226. xor $topbit,$topbit
  1227. add 8($tptr),$A1[0] # +t[9]
  1228. adc $topbit,$topbit
  1229. add $A0[1],$A1[0]
  1230. lea 16($tptr),$tptr # "t[$num]>>128"
  1231. adc \$0,$topbit
  1232. mov $A1[0],-8($tptr) # "t[9]"
  1233. cmp 8(%rsp),$tptr # are we done?
  1234. jb .Lsqr4x_mont_outer
  1235. mov 0(%rsp),$num # restore $num
  1236. mov $topbit,($tptr) # save $topbit
  1237. ___
  1238. }
  1239. ##############################################################
  1240. # Post-condition, 4x unrolled copy from bn_mul_mont
  1241. #
  1242. {
  1243. my ($tptr,$nptr)=("%rbx",$aptr);
  1244. my @ri=("%rax","%rdx","%r10","%r11");
  1245. $code.=<<___;
  1246. mov 64(%rsp,$num),@ri[0] # tp[0]
  1247. lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
  1248. mov 40(%rsp),$nptr # restore $nptr
  1249. shr \$5,$num # num/4
  1250. mov 8($tptr),@ri[1] # t[1]
  1251. xor $i,$i # i=0 and clear CF!
  1252. mov 32(%rsp),$rptr # restore $rptr
  1253. sub 0($nptr),@ri[0]
  1254. mov 16($tptr),@ri[2] # t[2]
  1255. mov 24($tptr),@ri[3] # t[3]
  1256. sbb 8($nptr),@ri[1]
  1257. lea -1($num),$j # j=num/4-1
  1258. jmp .Lsqr4x_sub
  1259. .align 16
  1260. .Lsqr4x_sub:
  1261. mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1262. mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1263. sbb 16($nptr,$i,8),@ri[2]
  1264. mov 32($tptr,$i,8),@ri[0] # tp[i+1]
  1265. mov 40($tptr,$i,8),@ri[1]
  1266. sbb 24($nptr,$i,8),@ri[3]
  1267. mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1268. mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1269. sbb 32($nptr,$i,8),@ri[0]
  1270. mov 48($tptr,$i,8),@ri[2]
  1271. mov 56($tptr,$i,8),@ri[3]
  1272. sbb 40($nptr,$i,8),@ri[1]
  1273. lea 4($i),$i # i++
  1274. dec $j # doesn't affect CF!
  1275. jnz .Lsqr4x_sub
  1276. mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1277. mov 32($tptr,$i,8),@ri[0] # load overflow bit
  1278. sbb 16($nptr,$i,8),@ri[2]
  1279. mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1280. sbb 24($nptr,$i,8),@ri[3]
  1281. mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1282. sbb \$0,@ri[0] # handle upmost overflow bit
  1283. mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
  1284. xor $i,$i # i=0
  1285. and @ri[0],$tptr
  1286. not @ri[0]
  1287. mov $rptr,$nptr
  1288. and @ri[0],$nptr
  1289. lea -1($num),$j
  1290. or $nptr,$tptr # tp=borrow?tp:rp
  1291. pxor %xmm0,%xmm0
  1292. lea 64(%rsp,$num,8),$nptr
  1293. movdqu ($tptr),%xmm1
  1294. lea ($nptr,$num,8),$nptr
  1295. movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
  1296. movdqa %xmm0,($nptr) # zap upper half of temporary vector
  1297. movdqu %xmm1,($rptr)
  1298. jmp .Lsqr4x_copy
  1299. .align 16
  1300. .Lsqr4x_copy: # copy or in-place refresh
  1301. movdqu 16($tptr,$i),%xmm2
  1302. movdqu 32($tptr,$i),%xmm1
  1303. movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
  1304. movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
  1305. movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
  1306. movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
  1307. movdqu %xmm2,16($rptr,$i)
  1308. movdqu %xmm1,32($rptr,$i)
  1309. lea 32($i),$i
  1310. dec $j
  1311. jnz .Lsqr4x_copy
  1312. movdqu 16($tptr,$i),%xmm2
  1313. movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
  1314. movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
  1315. movdqu %xmm2,16($rptr,$i)
  1316. ___
  1317. }
  1318. $code.=<<___;
  1319. mov 56(%rsp),%rsi # restore %rsp
  1320. mov \$1,%rax
  1321. mov 0(%rsi),%r15
  1322. mov 8(%rsi),%r14
  1323. mov 16(%rsi),%r13
  1324. mov 24(%rsi),%r12
  1325. mov 32(%rsi),%rbp
  1326. mov 40(%rsi),%rbx
  1327. lea 48(%rsi),%rsp
  1328. .Lsqr4x_epilogue:
  1329. ret
  1330. .size bn_sqr4x_mont,.-bn_sqr4x_mont
  1331. ___
  1332. }}}
  1333. $code.=<<___;
  1334. .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  1335. .align 16
  1336. ___
  1337. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  1338. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  1339. if ($win64) {
  1340. $rec="%rcx";
  1341. $frame="%rdx";
  1342. $context="%r8";
  1343. $disp="%r9";
  1344. $code.=<<___;
  1345. .extern __imp_RtlVirtualUnwind
  1346. .type mul_handler,\@abi-omnipotent
  1347. .align 16
  1348. mul_handler:
  1349. push %rsi
  1350. push %rdi
  1351. push %rbx
  1352. push %rbp
  1353. push %r12
  1354. push %r13
  1355. push %r14
  1356. push %r15
  1357. pushfq
  1358. sub \$64,%rsp
  1359. mov 120($context),%rax # pull context->Rax
  1360. mov 248($context),%rbx # pull context->Rip
  1361. mov 8($disp),%rsi # disp->ImageBase
  1362. mov 56($disp),%r11 # disp->HandlerData
  1363. mov 0(%r11),%r10d # HandlerData[0]
  1364. lea (%rsi,%r10),%r10 # end of prologue label
  1365. cmp %r10,%rbx # context->Rip<end of prologue label
  1366. jb .Lcommon_seh_tail
  1367. mov 152($context),%rax # pull context->Rsp
  1368. mov 4(%r11),%r10d # HandlerData[1]
  1369. lea (%rsi,%r10),%r10 # epilogue label
  1370. cmp %r10,%rbx # context->Rip>=epilogue label
  1371. jae .Lcommon_seh_tail
  1372. mov 192($context),%r10 # pull $num
  1373. mov 8(%rax,%r10,8),%rax # pull saved stack pointer
  1374. lea 48(%rax),%rax
  1375. mov -8(%rax),%rbx
  1376. mov -16(%rax),%rbp
  1377. mov -24(%rax),%r12
  1378. mov -32(%rax),%r13
  1379. mov -40(%rax),%r14
  1380. mov -48(%rax),%r15
  1381. mov %rbx,144($context) # restore context->Rbx
  1382. mov %rbp,160($context) # restore context->Rbp
  1383. mov %r12,216($context) # restore context->R12
  1384. mov %r13,224($context) # restore context->R13
  1385. mov %r14,232($context) # restore context->R14
  1386. mov %r15,240($context) # restore context->R15
  1387. jmp .Lcommon_seh_tail
  1388. .size mul_handler,.-mul_handler
  1389. .type sqr_handler,\@abi-omnipotent
  1390. .align 16
  1391. sqr_handler:
  1392. push %rsi
  1393. push %rdi
  1394. push %rbx
  1395. push %rbp
  1396. push %r12
  1397. push %r13
  1398. push %r14
  1399. push %r15
  1400. pushfq
  1401. sub \$64,%rsp
  1402. mov 120($context),%rax # pull context->Rax
  1403. mov 248($context),%rbx # pull context->Rip
  1404. lea .Lsqr4x_body(%rip),%r10
  1405. cmp %r10,%rbx # context->Rip<.Lsqr_body
  1406. jb .Lcommon_seh_tail
  1407. mov 152($context),%rax # pull context->Rsp
  1408. lea .Lsqr4x_epilogue(%rip),%r10
  1409. cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
  1410. jae .Lcommon_seh_tail
  1411. mov 56(%rax),%rax # pull saved stack pointer
  1412. lea 48(%rax),%rax
  1413. mov -8(%rax),%rbx
  1414. mov -16(%rax),%rbp
  1415. mov -24(%rax),%r12
  1416. mov -32(%rax),%r13
  1417. mov -40(%rax),%r14
  1418. mov -48(%rax),%r15
  1419. mov %rbx,144($context) # restore context->Rbx
  1420. mov %rbp,160($context) # restore context->Rbp
  1421. mov %r12,216($context) # restore context->R12
  1422. mov %r13,224($context) # restore context->R13
  1423. mov %r14,232($context) # restore context->R14
  1424. mov %r15,240($context) # restore context->R15
  1425. .Lcommon_seh_tail:
  1426. mov 8(%rax),%rdi
  1427. mov 16(%rax),%rsi
  1428. mov %rax,152($context) # restore context->Rsp
  1429. mov %rsi,168($context) # restore context->Rsi
  1430. mov %rdi,176($context) # restore context->Rdi
  1431. mov 40($disp),%rdi # disp->ContextRecord
  1432. mov $context,%rsi # context
  1433. mov \$154,%ecx # sizeof(CONTEXT)
  1434. .long 0xa548f3fc # cld; rep movsq
  1435. mov $disp,%rsi
  1436. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1437. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1438. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1439. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1440. mov 40(%rsi),%r10 # disp->ContextRecord
  1441. lea 56(%rsi),%r11 # &disp->HandlerData
  1442. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1443. mov %r10,32(%rsp) # arg5
  1444. mov %r11,40(%rsp) # arg6
  1445. mov %r12,48(%rsp) # arg7
  1446. mov %rcx,56(%rsp) # arg8, (NULL)
  1447. call *__imp_RtlVirtualUnwind(%rip)
  1448. mov \$1,%eax # ExceptionContinueSearch
  1449. add \$64,%rsp
  1450. popfq
  1451. pop %r15
  1452. pop %r14
  1453. pop %r13
  1454. pop %r12
  1455. pop %rbp
  1456. pop %rbx
  1457. pop %rdi
  1458. pop %rsi
  1459. ret
  1460. .size sqr_handler,.-sqr_handler
  1461. .section .pdata
  1462. .align 4
  1463. .rva .LSEH_begin_bn_mul_mont
  1464. .rva .LSEH_end_bn_mul_mont
  1465. .rva .LSEH_info_bn_mul_mont
  1466. .rva .LSEH_begin_bn_mul4x_mont
  1467. .rva .LSEH_end_bn_mul4x_mont
  1468. .rva .LSEH_info_bn_mul4x_mont
  1469. .rva .LSEH_begin_bn_sqr4x_mont
  1470. .rva .LSEH_end_bn_sqr4x_mont
  1471. .rva .LSEH_info_bn_sqr4x_mont
  1472. .section .xdata
  1473. .align 8
  1474. .LSEH_info_bn_mul_mont:
  1475. .byte 9,0,0,0
  1476. .rva mul_handler
  1477. .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
  1478. .LSEH_info_bn_mul4x_mont:
  1479. .byte 9,0,0,0
  1480. .rva mul_handler
  1481. .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
  1482. .LSEH_info_bn_sqr4x_mont:
  1483. .byte 9,0,0,0
  1484. .rva sqr_handler
  1485. ___
  1486. }
  1487. print $code;
  1488. close STDOUT;