2
0

x86_64-mont.pl 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. #!/usr/bin/env perl
  2. # ====================================================================
  3. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  4. # project. The module is, however, dual licensed under OpenSSL and
  5. # CRYPTOGAMS licenses depending on where you obtain it. For further
  6. # details see http://www.openssl.org/~appro/cryptogams/.
  7. # ====================================================================
  8. # October 2005.
  9. #
  10. # Montgomery multiplication routine for x86_64. While it gives modest
  11. # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
  12. # than twice, >2x, as fast. Most common rsa1024 sign is improved by
  13. # respectful 50%. It remains to be seen if loop unrolling and
  14. # dedicated squaring routine can provide further improvement...
  15. # July 2011.
  16. #
  17. # Add dedicated squaring procedure. Performance improvement varies
  18. # from platform to platform, but in average it's ~5%/15%/25%/33%
  19. # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
  20. # August 2011.
  21. #
  22. # Unroll and modulo-schedule inner loops in such manner that they
  23. # are "fallen through" for input lengths of 8, which is critical for
  24. # 1024-bit RSA *sign*. Average performance improvement in comparison
  25. # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
  26. # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
  27. # June 2013.
  28. #
  29. # Optimize reduction in squaring procedure and improve 1024+-bit RSA
  30. # sign performance by 10-16% on Intel Sandy Bridge and later
  31. # (virtually same on non-Intel processors).
  32. # August 2013.
  33. #
  34. # Add MULX/ADOX/ADCX code path.
  35. $flavour = shift;
  36. $output = shift;
  37. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  38. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  39. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  40. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  41. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  42. die "can't locate x86_64-xlate.pl";
  43. open OUT,"| \"$^X\" $xlate $flavour $output";
  44. *STDOUT=*OUT;
  45. if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
  46. =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
  47. $addx = ($1>=2.23);
  48. }
  49. if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
  50. `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
  51. $addx = ($1>=2.10);
  52. }
  53. if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
  54. `ml64 2>&1` =~ /Version ([0-9]+)\./) {
  55. $addx = ($1>=12);
  56. }
  57. if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
  58. my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
  59. $addx = ($ver>=3.03);
  60. }
  61. # int bn_mul_mont(
  62. $rp="%rdi"; # BN_ULONG *rp,
  63. $ap="%rsi"; # const BN_ULONG *ap,
  64. $bp="%rdx"; # const BN_ULONG *bp,
  65. $np="%rcx"; # const BN_ULONG *np,
  66. $n0="%r8"; # const BN_ULONG *n0,
  67. $num="%r9"; # int num);
  68. $lo0="%r10";
  69. $hi0="%r11";
  70. $hi1="%r13";
  71. $i="%r14";
  72. $j="%r15";
  73. $m0="%rbx";
  74. $m1="%rbp";
  75. $code=<<___;
  76. .text
  77. .extern OPENSSL_ia32cap_P
  78. .globl bn_mul_mont
  79. .type bn_mul_mont,\@function,6
  80. .align 16
  81. bn_mul_mont:
  82. test \$3,${num}d
  83. jnz .Lmul_enter
  84. cmp \$8,${num}d
  85. jb .Lmul_enter
  86. ___
  87. $code.=<<___ if ($addx);
  88. mov OPENSSL_ia32cap_P+8(%rip),%r11d
  89. ___
  90. $code.=<<___;
  91. cmp $ap,$bp
  92. jne .Lmul4x_enter
  93. test \$7,${num}d
  94. jz .Lsqr8x_enter
  95. jmp .Lmul4x_enter
  96. .align 16
  97. .Lmul_enter:
  98. push %rbx
  99. push %rbp
  100. push %r12
  101. push %r13
  102. push %r14
  103. push %r15
  104. mov ${num}d,${num}d
  105. lea 2($num),%r10
  106. mov %rsp,%r11
  107. neg %r10
  108. lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
  109. and \$-1024,%rsp # minimize TLB usage
  110. mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
  111. .Lmul_body:
  112. mov $bp,%r12 # reassign $bp
  113. ___
  114. $bp="%r12";
  115. $code.=<<___;
  116. mov ($n0),$n0 # pull n0[0] value
  117. mov ($bp),$m0 # m0=bp[0]
  118. mov ($ap),%rax
  119. xor $i,$i # i=0
  120. xor $j,$j # j=0
  121. mov $n0,$m1
  122. mulq $m0 # ap[0]*bp[0]
  123. mov %rax,$lo0
  124. mov ($np),%rax
  125. imulq $lo0,$m1 # "tp[0]"*n0
  126. mov %rdx,$hi0
  127. mulq $m1 # np[0]*m1
  128. add %rax,$lo0 # discarded
  129. mov 8($ap),%rax
  130. adc \$0,%rdx
  131. mov %rdx,$hi1
  132. lea 1($j),$j # j++
  133. jmp .L1st_enter
  134. .align 16
  135. .L1st:
  136. add %rax,$hi1
  137. mov ($ap,$j,8),%rax
  138. adc \$0,%rdx
  139. add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
  140. mov $lo0,$hi0
  141. adc \$0,%rdx
  142. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  143. mov %rdx,$hi1
  144. .L1st_enter:
  145. mulq $m0 # ap[j]*bp[0]
  146. add %rax,$hi0
  147. mov ($np,$j,8),%rax
  148. adc \$0,%rdx
  149. lea 1($j),$j # j++
  150. mov %rdx,$lo0
  151. mulq $m1 # np[j]*m1
  152. cmp $num,$j
  153. jne .L1st
  154. add %rax,$hi1
  155. mov ($ap),%rax # ap[0]
  156. adc \$0,%rdx
  157. add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
  158. adc \$0,%rdx
  159. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  160. mov %rdx,$hi1
  161. mov $lo0,$hi0
  162. xor %rdx,%rdx
  163. add $hi0,$hi1
  164. adc \$0,%rdx
  165. mov $hi1,-8(%rsp,$num,8)
  166. mov %rdx,(%rsp,$num,8) # store upmost overflow bit
  167. lea 1($i),$i # i++
  168. jmp .Louter
  169. .align 16
  170. .Louter:
  171. mov ($bp,$i,8),$m0 # m0=bp[i]
  172. xor $j,$j # j=0
  173. mov $n0,$m1
  174. mov (%rsp),$lo0
  175. mulq $m0 # ap[0]*bp[i]
  176. add %rax,$lo0 # ap[0]*bp[i]+tp[0]
  177. mov ($np),%rax
  178. adc \$0,%rdx
  179. imulq $lo0,$m1 # tp[0]*n0
  180. mov %rdx,$hi0
  181. mulq $m1 # np[0]*m1
  182. add %rax,$lo0 # discarded
  183. mov 8($ap),%rax
  184. adc \$0,%rdx
  185. mov 8(%rsp),$lo0 # tp[1]
  186. mov %rdx,$hi1
  187. lea 1($j),$j # j++
  188. jmp .Linner_enter
  189. .align 16
  190. .Linner:
  191. add %rax,$hi1
  192. mov ($ap,$j,8),%rax
  193. adc \$0,%rdx
  194. add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
  195. mov (%rsp,$j,8),$lo0
  196. adc \$0,%rdx
  197. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  198. mov %rdx,$hi1
  199. .Linner_enter:
  200. mulq $m0 # ap[j]*bp[i]
  201. add %rax,$hi0
  202. mov ($np,$j,8),%rax
  203. adc \$0,%rdx
  204. add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
  205. mov %rdx,$hi0
  206. adc \$0,$hi0
  207. lea 1($j),$j # j++
  208. mulq $m1 # np[j]*m1
  209. cmp $num,$j
  210. jne .Linner
  211. add %rax,$hi1
  212. mov ($ap),%rax # ap[0]
  213. adc \$0,%rdx
  214. add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
  215. mov (%rsp,$j,8),$lo0
  216. adc \$0,%rdx
  217. mov $hi1,-16(%rsp,$j,8) # tp[j-1]
  218. mov %rdx,$hi1
  219. xor %rdx,%rdx
  220. add $hi0,$hi1
  221. adc \$0,%rdx
  222. add $lo0,$hi1 # pull upmost overflow bit
  223. adc \$0,%rdx
  224. mov $hi1,-8(%rsp,$num,8)
  225. mov %rdx,(%rsp,$num,8) # store upmost overflow bit
  226. lea 1($i),$i # i++
  227. cmp $num,$i
  228. jb .Louter
  229. xor $i,$i # i=0 and clear CF!
  230. mov (%rsp),%rax # tp[0]
  231. lea (%rsp),$ap # borrow ap for tp
  232. mov $num,$j # j=num
  233. jmp .Lsub
  234. .align 16
  235. .Lsub: sbb ($np,$i,8),%rax
  236. mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
  237. mov 8($ap,$i,8),%rax # tp[i+1]
  238. lea 1($i),$i # i++
  239. dec $j # doesnn't affect CF!
  240. jnz .Lsub
  241. sbb \$0,%rax # handle upmost overflow bit
  242. xor $i,$i
  243. and %rax,$ap
  244. not %rax
  245. mov $rp,$np
  246. and %rax,$np
  247. mov $num,$j # j=num
  248. or $np,$ap # ap=borrow?tp:rp
  249. .align 16
  250. .Lcopy: # copy or in-place refresh
  251. mov ($ap,$i,8),%rax
  252. mov $i,(%rsp,$i,8) # zap temporary vector
  253. mov %rax,($rp,$i,8) # rp[i]=tp[i]
  254. lea 1($i),$i
  255. sub \$1,$j
  256. jnz .Lcopy
  257. mov 8(%rsp,$num,8),%rsi # restore %rsp
  258. mov \$1,%rax
  259. mov (%rsi),%r15
  260. mov 8(%rsi),%r14
  261. mov 16(%rsi),%r13
  262. mov 24(%rsi),%r12
  263. mov 32(%rsi),%rbp
  264. mov 40(%rsi),%rbx
  265. lea 48(%rsi),%rsp
  266. .Lmul_epilogue:
  267. ret
  268. .size bn_mul_mont,.-bn_mul_mont
  269. ___
  270. {{{
  271. my @A=("%r10","%r11");
  272. my @N=("%r13","%rdi");
  273. $code.=<<___;
  274. .type bn_mul4x_mont,\@function,6
  275. .align 16
  276. bn_mul4x_mont:
  277. .Lmul4x_enter:
  278. ___
  279. $code.=<<___ if ($addx);
  280. and \$0x80100,%r11d
  281. cmp \$0x80100,%r11d
  282. je .Lmulx4x_enter
  283. ___
  284. $code.=<<___;
  285. push %rbx
  286. push %rbp
  287. push %r12
  288. push %r13
  289. push %r14
  290. push %r15
  291. mov ${num}d,${num}d
  292. lea 4($num),%r10
  293. mov %rsp,%r11
  294. neg %r10
  295. lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
  296. and \$-1024,%rsp # minimize TLB usage
  297. mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
  298. .Lmul4x_body:
  299. mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
  300. mov %rdx,%r12 # reassign $bp
  301. ___
  302. $bp="%r12";
  303. $code.=<<___;
  304. mov ($n0),$n0 # pull n0[0] value
  305. mov ($bp),$m0 # m0=bp[0]
  306. mov ($ap),%rax
  307. xor $i,$i # i=0
  308. xor $j,$j # j=0
  309. mov $n0,$m1
  310. mulq $m0 # ap[0]*bp[0]
  311. mov %rax,$A[0]
  312. mov ($np),%rax
  313. imulq $A[0],$m1 # "tp[0]"*n0
  314. mov %rdx,$A[1]
  315. mulq $m1 # np[0]*m1
  316. add %rax,$A[0] # discarded
  317. mov 8($ap),%rax
  318. adc \$0,%rdx
  319. mov %rdx,$N[1]
  320. mulq $m0
  321. add %rax,$A[1]
  322. mov 8($np),%rax
  323. adc \$0,%rdx
  324. mov %rdx,$A[0]
  325. mulq $m1
  326. add %rax,$N[1]
  327. mov 16($ap),%rax
  328. adc \$0,%rdx
  329. add $A[1],$N[1]
  330. lea 4($j),$j # j++
  331. adc \$0,%rdx
  332. mov $N[1],(%rsp)
  333. mov %rdx,$N[0]
  334. jmp .L1st4x
  335. .align 16
  336. .L1st4x:
  337. mulq $m0 # ap[j]*bp[0]
  338. add %rax,$A[0]
  339. mov -16($np,$j,8),%rax
  340. adc \$0,%rdx
  341. mov %rdx,$A[1]
  342. mulq $m1 # np[j]*m1
  343. add %rax,$N[0]
  344. mov -8($ap,$j,8),%rax
  345. adc \$0,%rdx
  346. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  347. adc \$0,%rdx
  348. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  349. mov %rdx,$N[1]
  350. mulq $m0 # ap[j]*bp[0]
  351. add %rax,$A[1]
  352. mov -8($np,$j,8),%rax
  353. adc \$0,%rdx
  354. mov %rdx,$A[0]
  355. mulq $m1 # np[j]*m1
  356. add %rax,$N[1]
  357. mov ($ap,$j,8),%rax
  358. adc \$0,%rdx
  359. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  360. adc \$0,%rdx
  361. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  362. mov %rdx,$N[0]
  363. mulq $m0 # ap[j]*bp[0]
  364. add %rax,$A[0]
  365. mov ($np,$j,8),%rax
  366. adc \$0,%rdx
  367. mov %rdx,$A[1]
  368. mulq $m1 # np[j]*m1
  369. add %rax,$N[0]
  370. mov 8($ap,$j,8),%rax
  371. adc \$0,%rdx
  372. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  373. adc \$0,%rdx
  374. mov $N[0],-8(%rsp,$j,8) # tp[j-1]
  375. mov %rdx,$N[1]
  376. mulq $m0 # ap[j]*bp[0]
  377. add %rax,$A[1]
  378. mov 8($np,$j,8),%rax
  379. adc \$0,%rdx
  380. lea 4($j),$j # j++
  381. mov %rdx,$A[0]
  382. mulq $m1 # np[j]*m1
  383. add %rax,$N[1]
  384. mov -16($ap,$j,8),%rax
  385. adc \$0,%rdx
  386. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  387. adc \$0,%rdx
  388. mov $N[1],-32(%rsp,$j,8) # tp[j-1]
  389. mov %rdx,$N[0]
  390. cmp $num,$j
  391. jb .L1st4x
  392. mulq $m0 # ap[j]*bp[0]
  393. add %rax,$A[0]
  394. mov -16($np,$j,8),%rax
  395. adc \$0,%rdx
  396. mov %rdx,$A[1]
  397. mulq $m1 # np[j]*m1
  398. add %rax,$N[0]
  399. mov -8($ap,$j,8),%rax
  400. adc \$0,%rdx
  401. add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
  402. adc \$0,%rdx
  403. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  404. mov %rdx,$N[1]
  405. mulq $m0 # ap[j]*bp[0]
  406. add %rax,$A[1]
  407. mov -8($np,$j,8),%rax
  408. adc \$0,%rdx
  409. mov %rdx,$A[0]
  410. mulq $m1 # np[j]*m1
  411. add %rax,$N[1]
  412. mov ($ap),%rax # ap[0]
  413. adc \$0,%rdx
  414. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
  415. adc \$0,%rdx
  416. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  417. mov %rdx,$N[0]
  418. xor $N[1],$N[1]
  419. add $A[0],$N[0]
  420. adc \$0,$N[1]
  421. mov $N[0],-8(%rsp,$j,8)
  422. mov $N[1],(%rsp,$j,8) # store upmost overflow bit
  423. lea 1($i),$i # i++
  424. .align 4
  425. .Louter4x:
  426. mov ($bp,$i,8),$m0 # m0=bp[i]
  427. xor $j,$j # j=0
  428. mov (%rsp),$A[0]
  429. mov $n0,$m1
  430. mulq $m0 # ap[0]*bp[i]
  431. add %rax,$A[0] # ap[0]*bp[i]+tp[0]
  432. mov ($np),%rax
  433. adc \$0,%rdx
  434. imulq $A[0],$m1 # tp[0]*n0
  435. mov %rdx,$A[1]
  436. mulq $m1 # np[0]*m1
  437. add %rax,$A[0] # "$N[0]", discarded
  438. mov 8($ap),%rax
  439. adc \$0,%rdx
  440. mov %rdx,$N[1]
  441. mulq $m0 # ap[j]*bp[i]
  442. add %rax,$A[1]
  443. mov 8($np),%rax
  444. adc \$0,%rdx
  445. add 8(%rsp),$A[1] # +tp[1]
  446. adc \$0,%rdx
  447. mov %rdx,$A[0]
  448. mulq $m1 # np[j]*m1
  449. add %rax,$N[1]
  450. mov 16($ap),%rax
  451. adc \$0,%rdx
  452. add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
  453. lea 4($j),$j # j+=2
  454. adc \$0,%rdx
  455. mov $N[1],(%rsp) # tp[j-1]
  456. mov %rdx,$N[0]
  457. jmp .Linner4x
  458. .align 16
  459. .Linner4x:
  460. mulq $m0 # ap[j]*bp[i]
  461. add %rax,$A[0]
  462. mov -16($np,$j,8),%rax
  463. adc \$0,%rdx
  464. add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  465. adc \$0,%rdx
  466. mov %rdx,$A[1]
  467. mulq $m1 # np[j]*m1
  468. add %rax,$N[0]
  469. mov -8($ap,$j,8),%rax
  470. adc \$0,%rdx
  471. add $A[0],$N[0]
  472. adc \$0,%rdx
  473. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  474. mov %rdx,$N[1]
  475. mulq $m0 # ap[j]*bp[i]
  476. add %rax,$A[1]
  477. mov -8($np,$j,8),%rax
  478. adc \$0,%rdx
  479. add -8(%rsp,$j,8),$A[1]
  480. adc \$0,%rdx
  481. mov %rdx,$A[0]
  482. mulq $m1 # np[j]*m1
  483. add %rax,$N[1]
  484. mov ($ap,$j,8),%rax
  485. adc \$0,%rdx
  486. add $A[1],$N[1]
  487. adc \$0,%rdx
  488. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  489. mov %rdx,$N[0]
  490. mulq $m0 # ap[j]*bp[i]
  491. add %rax,$A[0]
  492. mov ($np,$j,8),%rax
  493. adc \$0,%rdx
  494. add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  495. adc \$0,%rdx
  496. mov %rdx,$A[1]
  497. mulq $m1 # np[j]*m1
  498. add %rax,$N[0]
  499. mov 8($ap,$j,8),%rax
  500. adc \$0,%rdx
  501. add $A[0],$N[0]
  502. adc \$0,%rdx
  503. mov $N[0],-8(%rsp,$j,8) # tp[j-1]
  504. mov %rdx,$N[1]
  505. mulq $m0 # ap[j]*bp[i]
  506. add %rax,$A[1]
  507. mov 8($np,$j,8),%rax
  508. adc \$0,%rdx
  509. add 8(%rsp,$j,8),$A[1]
  510. adc \$0,%rdx
  511. lea 4($j),$j # j++
  512. mov %rdx,$A[0]
  513. mulq $m1 # np[j]*m1
  514. add %rax,$N[1]
  515. mov -16($ap,$j,8),%rax
  516. adc \$0,%rdx
  517. add $A[1],$N[1]
  518. adc \$0,%rdx
  519. mov $N[1],-32(%rsp,$j,8) # tp[j-1]
  520. mov %rdx,$N[0]
  521. cmp $num,$j
  522. jb .Linner4x
  523. mulq $m0 # ap[j]*bp[i]
  524. add %rax,$A[0]
  525. mov -16($np,$j,8),%rax
  526. adc \$0,%rdx
  527. add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
  528. adc \$0,%rdx
  529. mov %rdx,$A[1]
  530. mulq $m1 # np[j]*m1
  531. add %rax,$N[0]
  532. mov -8($ap,$j,8),%rax
  533. adc \$0,%rdx
  534. add $A[0],$N[0]
  535. adc \$0,%rdx
  536. mov $N[0],-24(%rsp,$j,8) # tp[j-1]
  537. mov %rdx,$N[1]
  538. mulq $m0 # ap[j]*bp[i]
  539. add %rax,$A[1]
  540. mov -8($np,$j,8),%rax
  541. adc \$0,%rdx
  542. add -8(%rsp,$j,8),$A[1]
  543. adc \$0,%rdx
  544. lea 1($i),$i # i++
  545. mov %rdx,$A[0]
  546. mulq $m1 # np[j]*m1
  547. add %rax,$N[1]
  548. mov ($ap),%rax # ap[0]
  549. adc \$0,%rdx
  550. add $A[1],$N[1]
  551. adc \$0,%rdx
  552. mov $N[1],-16(%rsp,$j,8) # tp[j-1]
  553. mov %rdx,$N[0]
  554. xor $N[1],$N[1]
  555. add $A[0],$N[0]
  556. adc \$0,$N[1]
  557. add (%rsp,$num,8),$N[0] # pull upmost overflow bit
  558. adc \$0,$N[1]
  559. mov $N[0],-8(%rsp,$j,8)
  560. mov $N[1],(%rsp,$j,8) # store upmost overflow bit
  561. cmp $num,$i
  562. jb .Louter4x
  563. ___
  564. {
  565. my @ri=("%rax","%rdx",$m0,$m1);
  566. $code.=<<___;
  567. mov 16(%rsp,$num,8),$rp # restore $rp
  568. mov 0(%rsp),@ri[0] # tp[0]
  569. pxor %xmm0,%xmm0
  570. mov 8(%rsp),@ri[1] # tp[1]
  571. shr \$2,$num # num/=4
  572. lea (%rsp),$ap # borrow ap for tp
  573. xor $i,$i # i=0 and clear CF!
  574. sub 0($np),@ri[0]
  575. mov 16($ap),@ri[2] # tp[2]
  576. mov 24($ap),@ri[3] # tp[3]
  577. sbb 8($np),@ri[1]
  578. lea -1($num),$j # j=num/4-1
  579. jmp .Lsub4x
  580. .align 16
  581. .Lsub4x:
  582. mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
  583. mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
  584. sbb 16($np,$i,8),@ri[2]
  585. mov 32($ap,$i,8),@ri[0] # tp[i+1]
  586. mov 40($ap,$i,8),@ri[1]
  587. sbb 24($np,$i,8),@ri[3]
  588. mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
  589. mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
  590. sbb 32($np,$i,8),@ri[0]
  591. mov 48($ap,$i,8),@ri[2]
  592. mov 56($ap,$i,8),@ri[3]
  593. sbb 40($np,$i,8),@ri[1]
  594. lea 4($i),$i # i++
  595. dec $j # doesnn't affect CF!
  596. jnz .Lsub4x
  597. mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
  598. mov 32($ap,$i,8),@ri[0] # load overflow bit
  599. sbb 16($np,$i,8),@ri[2]
  600. mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
  601. sbb 24($np,$i,8),@ri[3]
  602. mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
  603. sbb \$0,@ri[0] # handle upmost overflow bit
  604. mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
  605. xor $i,$i # i=0
  606. and @ri[0],$ap
  607. not @ri[0]
  608. mov $rp,$np
  609. and @ri[0],$np
  610. lea -1($num),$j
  611. or $np,$ap # ap=borrow?tp:rp
  612. movdqu ($ap),%xmm1
  613. movdqa %xmm0,(%rsp)
  614. movdqu %xmm1,($rp)
  615. jmp .Lcopy4x
  616. .align 16
  617. .Lcopy4x: # copy or in-place refresh
  618. movdqu 16($ap,$i),%xmm2
  619. movdqu 32($ap,$i),%xmm1
  620. movdqa %xmm0,16(%rsp,$i)
  621. movdqu %xmm2,16($rp,$i)
  622. movdqa %xmm0,32(%rsp,$i)
  623. movdqu %xmm1,32($rp,$i)
  624. lea 32($i),$i
  625. dec $j
  626. jnz .Lcopy4x
  627. shl \$2,$num
  628. movdqu 16($ap,$i),%xmm2
  629. movdqa %xmm0,16(%rsp,$i)
  630. movdqu %xmm2,16($rp,$i)
  631. ___
  632. }
  633. $code.=<<___;
  634. mov 8(%rsp,$num,8),%rsi # restore %rsp
  635. mov \$1,%rax
  636. mov (%rsi),%r15
  637. mov 8(%rsi),%r14
  638. mov 16(%rsi),%r13
  639. mov 24(%rsi),%r12
  640. mov 32(%rsi),%rbp
  641. mov 40(%rsi),%rbx
  642. lea 48(%rsi),%rsp
  643. .Lmul4x_epilogue:
  644. ret
  645. .size bn_mul4x_mont,.-bn_mul4x_mont
  646. ___
  647. }}}
  648. {{{
  649. ######################################################################
  650. # void bn_sqr8x_mont(
  651. my $rptr="%rdi"; # const BN_ULONG *rptr,
  652. my $aptr="%rsi"; # const BN_ULONG *aptr,
  653. my $bptr="%rdx"; # not used
  654. my $nptr="%rcx"; # const BN_ULONG *nptr,
  655. my $n0 ="%r8"; # const BN_ULONG *n0);
  656. my $num ="%r9"; # int num, has to be divisible by 8
  657. my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
  658. my @A0=("%r10","%r11");
  659. my @A1=("%r12","%r13");
  660. my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
  661. $code.=<<___ if ($addx);
  662. .extern bn_sqrx8x_internal # see x86_64-mont5 module
  663. ___
  664. $code.=<<___;
  665. .extern bn_sqr8x_internal # see x86_64-mont5 module
  666. .type bn_sqr8x_mont,\@function,6
  667. .align 32
  668. bn_sqr8x_mont:
  669. .Lsqr8x_enter:
  670. mov %rsp,%rax
  671. push %rbx
  672. push %rbp
  673. push %r12
  674. push %r13
  675. push %r14
  676. push %r15
  677. mov ${num}d,%r10d
  678. shl \$3,${num}d # convert $num to bytes
  679. shl \$3+2,%r10 # 4*$num
  680. neg $num
  681. ##############################################################
  682. # ensure that stack frame doesn't alias with $aptr modulo
  683. # 4096. this is done to allow memory disambiguation logic
  684. # do its job.
  685. #
  686. lea -64(%rsp,$num,4),%r11
  687. mov ($n0),$n0 # *n0
  688. sub $aptr,%r11
  689. and \$4095,%r11
  690. cmp %r11,%r10
  691. jb .Lsqr8x_sp_alt
  692. sub %r11,%rsp # align with $aptr
  693. lea -64(%rsp,$num,4),%rsp # alloca(frame+4*$num)
  694. jmp .Lsqr8x_sp_done
  695. .align 32
  696. .Lsqr8x_sp_alt:
  697. lea 4096-64(,$num,4),%r10 # 4096-frame-4*$num
  698. lea -64(%rsp,$num,4),%rsp # alloca(frame+4*$num)
  699. sub %r10,%r11
  700. mov \$0,%r10
  701. cmovc %r10,%r11
  702. sub %r11,%rsp
  703. .Lsqr8x_sp_done:
  704. and \$-64,%rsp
  705. mov $num,%r10
  706. neg $num
  707. lea 64(%rsp,$num,2),%r11 # copy of modulus
  708. mov $n0, 32(%rsp)
  709. mov %rax, 40(%rsp) # save original %rsp
  710. .Lsqr8x_body:
  711. mov $num,$i
  712. movq %r11, %xmm2 # save pointer to modulus copy
  713. shr \$3+2,$i
  714. mov OPENSSL_ia32cap_P+8(%rip),%eax
  715. jmp .Lsqr8x_copy_n
  716. .align 32
  717. .Lsqr8x_copy_n:
  718. movq 8*0($nptr),%xmm0
  719. movq 8*1($nptr),%xmm1
  720. movq 8*2($nptr),%xmm3
  721. movq 8*3($nptr),%xmm4
  722. lea 8*4($nptr),$nptr
  723. movdqa %xmm0,16*0(%r11)
  724. movdqa %xmm1,16*1(%r11)
  725. movdqa %xmm3,16*2(%r11)
  726. movdqa %xmm4,16*3(%r11)
  727. lea 16*4(%r11),%r11
  728. dec $i
  729. jnz .Lsqr8x_copy_n
  730. pxor %xmm0,%xmm0
  731. movq $rptr,%xmm1 # save $rptr
  732. movq %r10, %xmm3 # -$num
  733. ___
  734. $code.=<<___ if ($addx);
  735. and \$0x80100,%eax
  736. cmp \$0x80100,%eax
  737. jne .Lsqr8x_nox
  738. call bn_sqrx8x_internal # see x86_64-mont5 module
  739. pxor %xmm0,%xmm0
  740. lea 48(%rsp),%rax
  741. lea 64(%rsp,$num,2),%rdx
  742. shr \$3+2,$num
  743. mov 40(%rsp),%rsi # restore %rsp
  744. jmp .Lsqr8x_zero
  745. .align 32
  746. .Lsqr8x_nox:
  747. ___
  748. $code.=<<___;
  749. call bn_sqr8x_internal # see x86_64-mont5 module
  750. pxor %xmm0,%xmm0
  751. lea 48(%rsp),%rax
  752. lea 64(%rsp,$num,2),%rdx
  753. shr \$3+2,$num
  754. mov 40(%rsp),%rsi # restore %rsp
  755. jmp .Lsqr8x_zero
  756. .align 32
  757. .Lsqr8x_zero:
  758. movdqa %xmm0,16*0(%rax) # wipe t
  759. movdqa %xmm0,16*1(%rax)
  760. movdqa %xmm0,16*2(%rax)
  761. movdqa %xmm0,16*3(%rax)
  762. lea 16*4(%rax),%rax
  763. movdqa %xmm0,16*0(%rdx) # wipe n
  764. movdqa %xmm0,16*1(%rdx)
  765. movdqa %xmm0,16*2(%rdx)
  766. movdqa %xmm0,16*3(%rdx)
  767. lea 16*4(%rdx),%rdx
  768. dec $num
  769. jnz .Lsqr8x_zero
  770. mov \$1,%rax
  771. mov -48(%rsi),%r15
  772. mov -40(%rsi),%r14
  773. mov -32(%rsi),%r13
  774. mov -24(%rsi),%r12
  775. mov -16(%rsi),%rbp
  776. mov -8(%rsi),%rbx
  777. lea (%rsi),%rsp
  778. .Lsqr8x_epilogue:
  779. ret
  780. .size bn_sqr8x_mont,.-bn_sqr8x_mont
  781. ___
  782. }}}
  783. if ($addx) {{{
  784. my $bp="%rdx"; # original value
  785. $code.=<<___;
  786. .type bn_mulx4x_mont,\@function,6
  787. .align 32
  788. bn_mulx4x_mont:
  789. .Lmulx4x_enter:
  790. mov %rsp,%rax
  791. push %rbx
  792. push %rbp
  793. push %r12
  794. push %r13
  795. push %r14
  796. push %r15
  797. shl \$3,${num}d # convert $num to bytes
  798. .byte 0x67
  799. xor %r10,%r10
  800. sub $num,%r10 # -$num
  801. mov ($n0),$n0 # *n0
  802. lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
  803. lea ($bp,$num),%r10
  804. and \$-128,%rsp
  805. ##############################################################
  806. # Stack layout
  807. # +0 num
  808. # +8 off-loaded &b[i]
  809. # +16 end of b[num]
  810. # +24 saved n0
  811. # +32 saved rp
  812. # +40 saved %rsp
  813. # +48 inner counter
  814. # +56
  815. # +64 tmp[num+1]
  816. #
  817. mov $num,0(%rsp) # save $num
  818. shr \$5,$num
  819. mov %r10,16(%rsp) # end of b[num]
  820. sub \$1,$num
  821. mov $n0, 24(%rsp) # save *n0
  822. mov $rp, 32(%rsp) # save $rp
  823. mov %rax,40(%rsp) # save original %rsp
  824. mov $num,48(%rsp) # inner counter
  825. jmp .Lmulx4x_body
  826. .align 32
  827. .Lmulx4x_body:
  828. ___
  829. my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
  830. ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
  831. my $rptr=$bptr;
  832. $code.=<<___;
  833. lea 8($bp),$bptr
  834. mov ($bp),%rdx # b[0], $bp==%rdx actually
  835. lea 64+32(%rsp),$tptr
  836. mov %rdx,$bi
  837. mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
  838. mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
  839. add %rax,%r11
  840. mov $bptr,8(%rsp) # off-load &b[i]
  841. mulx 2*8($aptr),%r12,%r13 # ...
  842. adc %r14,%r12
  843. adc \$0,%r13
  844. mov $mi,$bptr # borrow $bptr
  845. imulq 24(%rsp),$mi # "t[0]"*n0
  846. xor $zero,$zero # cf=0, of=0
  847. mulx 3*8($aptr),%rax,%r14
  848. mov $mi,%rdx
  849. lea 4*8($aptr),$aptr
  850. adcx %rax,%r13
  851. adcx $zero,%r14 # cf=0
  852. mulx 0*8($nptr),%rax,%r10
  853. adcx %rax,$bptr # discarded
  854. adox %r11,%r10
  855. mulx 1*8($nptr),%rax,%r11
  856. adcx %rax,%r10
  857. adox %r12,%r11
  858. .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
  859. mov 48(%rsp),$bptr # counter value
  860. mov %r10,-4*8($tptr)
  861. adcx %rax,%r11
  862. adox %r13,%r12
  863. mulx 3*8($nptr),%rax,%r15
  864. mov $bi,%rdx
  865. mov %r11,-3*8($tptr)
  866. adcx %rax,%r12
  867. adox $zero,%r15 # of=0
  868. lea 4*8($nptr),$nptr
  869. mov %r12,-2*8($tptr)
  870. jmp .Lmulx4x_1st
  871. .align 32
  872. .Lmulx4x_1st:
  873. adcx $zero,%r15 # cf=0, modulo-scheduled
  874. mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
  875. adcx %r14,%r10
  876. mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
  877. adcx %rax,%r11
  878. mulx 2*8($aptr),%r12,%rax # ...
  879. adcx %r14,%r12
  880. mulx 3*8($aptr),%r13,%r14
  881. .byte 0x67,0x67
  882. mov $mi,%rdx
  883. adcx %rax,%r13
  884. adcx $zero,%r14 # cf=0
  885. lea 4*8($aptr),$aptr
  886. lea 4*8($tptr),$tptr
  887. adox %r15,%r10
  888. mulx 0*8($nptr),%rax,%r15
  889. adcx %rax,%r10
  890. adox %r15,%r11
  891. mulx 1*8($nptr),%rax,%r15
  892. adcx %rax,%r11
  893. adox %r15,%r12
  894. mulx 2*8($nptr),%rax,%r15
  895. mov %r10,-5*8($tptr)
  896. adcx %rax,%r12
  897. mov %r11,-4*8($tptr)
  898. adox %r15,%r13
  899. mulx 3*8($nptr),%rax,%r15
  900. mov $bi,%rdx
  901. mov %r12,-3*8($tptr)
  902. adcx %rax,%r13
  903. adox $zero,%r15
  904. lea 4*8($nptr),$nptr
  905. mov %r13,-2*8($tptr)
  906. dec $bptr # of=0, pass cf
  907. jnz .Lmulx4x_1st
  908. mov 0(%rsp),$num # load num
  909. mov 8(%rsp),$bptr # re-load &b[i]
  910. adc $zero,%r15 # modulo-scheduled
  911. add %r15,%r14
  912. sbb %r15,%r15 # top-most carry
  913. mov %r14,-1*8($tptr)
  914. jmp .Lmulx4x_outer
  915. .align 32
  916. .Lmulx4x_outer:
  917. mov ($bptr),%rdx # b[i]
  918. lea 8($bptr),$bptr # b++
  919. sub $num,$aptr # rewind $aptr
  920. mov %r15,($tptr) # save top-most carry
  921. lea 64+4*8(%rsp),$tptr
  922. sub $num,$nptr # rewind $nptr
  923. mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
  924. xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
  925. mov %rdx,$bi
  926. mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
  927. adox -4*8($tptr),$mi
  928. adcx %r14,%r11
  929. mulx 2*8($aptr),%r15,%r13 # ...
  930. adox -3*8($tptr),%r11
  931. adcx %r15,%r12
  932. adox $zero,%r12
  933. adcx $zero,%r13
  934. mov $bptr,8(%rsp) # off-load &b[i]
  935. .byte 0x67
  936. mov $mi,%r15
  937. imulq 24(%rsp),$mi # "t[0]"*n0
  938. xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
  939. mulx 3*8($aptr),%rax,%r14
  940. mov $mi,%rdx
  941. adox -2*8($tptr),%r12
  942. adcx %rax,%r13
  943. adox -1*8($tptr),%r13
  944. adcx $zero,%r14
  945. lea 4*8($aptr),$aptr
  946. adox $zero,%r14
  947. mulx 0*8($nptr),%rax,%r10
  948. adcx %rax,%r15 # discarded
  949. adox %r11,%r10
  950. mulx 1*8($nptr),%rax,%r11
  951. adcx %rax,%r10
  952. adox %r12,%r11
  953. mulx 2*8($nptr),%rax,%r12
  954. mov %r10,-4*8($tptr)
  955. adcx %rax,%r11
  956. adox %r13,%r12
  957. mulx 3*8($nptr),%rax,%r15
  958. mov $bi,%rdx
  959. mov %r11,-3*8($tptr)
  960. lea 4*8($nptr),$nptr
  961. adcx %rax,%r12
  962. adox $zero,%r15 # of=0
  963. mov 48(%rsp),$bptr # counter value
  964. mov %r12,-2*8($tptr)
  965. jmp .Lmulx4x_inner
  966. .align 32
  967. .Lmulx4x_inner:
  968. mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
  969. adcx $zero,%r15 # cf=0, modulo-scheduled
  970. adox %r14,%r10
  971. mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
  972. adcx 0*8($tptr),%r10
  973. adox %rax,%r11
  974. mulx 2*8($aptr),%r12,%rax # ...
  975. adcx 1*8($tptr),%r11
  976. adox %r14,%r12
  977. mulx 3*8($aptr),%r13,%r14
  978. mov $mi,%rdx
  979. adcx 2*8($tptr),%r12
  980. adox %rax,%r13
  981. adcx 3*8($tptr),%r13
  982. adox $zero,%r14 # of=0
  983. lea 4*8($aptr),$aptr
  984. lea 4*8($tptr),$tptr
  985. adcx $zero,%r14 # cf=0
  986. adox %r15,%r10
  987. mulx 0*8($nptr),%rax,%r15
  988. adcx %rax,%r10
  989. adox %r15,%r11
  990. mulx 1*8($nptr),%rax,%r15
  991. adcx %rax,%r11
  992. adox %r15,%r12
  993. mulx 2*8($nptr),%rax,%r15
  994. mov %r10,-5*8($tptr)
  995. adcx %rax,%r12
  996. adox %r15,%r13
  997. mulx 3*8($nptr),%rax,%r15
  998. mov $bi,%rdx
  999. mov %r11,-4*8($tptr)
  1000. mov %r12,-3*8($tptr)
  1001. adcx %rax,%r13
  1002. adox $zero,%r15
  1003. lea 4*8($nptr),$nptr
  1004. mov %r13,-2*8($tptr)
  1005. dec $bptr # of=0, pass cf
  1006. jnz .Lmulx4x_inner
  1007. mov 0(%rsp),$num # load num
  1008. mov 8(%rsp),$bptr # re-load &b[i]
  1009. adc $zero,%r15 # modulo-scheduled
  1010. sub 0*8($tptr),$zero # pull top-most carry
  1011. adc %r15,%r14
  1012. mov -8($nptr),$mi
  1013. sbb %r15,%r15 # top-most carry
  1014. mov %r14,-1*8($tptr)
  1015. cmp 16(%rsp),$bptr
  1016. jne .Lmulx4x_outer
  1017. sub %r14,$mi # compare top-most words
  1018. sbb $mi,$mi
  1019. or $mi,%r15
  1020. neg $num
  1021. xor %rdx,%rdx
  1022. mov 32(%rsp),$rptr # restore rp
  1023. lea 64(%rsp),$tptr
  1024. pxor %xmm0,%xmm0
  1025. mov 0*8($nptr,$num),%r8
  1026. mov 1*8($nptr,$num),%r9
  1027. neg %r8
  1028. jmp .Lmulx4x_sub_entry
  1029. .align 32
  1030. .Lmulx4x_sub:
  1031. mov 0*8($nptr,$num),%r8
  1032. mov 1*8($nptr,$num),%r9
  1033. not %r8
  1034. .Lmulx4x_sub_entry:
  1035. mov 2*8($nptr,$num),%r10
  1036. not %r9
  1037. and %r15,%r8
  1038. mov 3*8($nptr,$num),%r11
  1039. not %r10
  1040. and %r15,%r9
  1041. not %r11
  1042. and %r15,%r10
  1043. and %r15,%r11
  1044. neg %rdx # mov %rdx,%cf
  1045. adc 0*8($tptr),%r8
  1046. adc 1*8($tptr),%r9
  1047. movdqa %xmm0,($tptr)
  1048. adc 2*8($tptr),%r10
  1049. adc 3*8($tptr),%r11
  1050. movdqa %xmm0,16($tptr)
  1051. lea 4*8($tptr),$tptr
  1052. sbb %rdx,%rdx # mov %cf,%rdx
  1053. mov %r8,0*8($rptr)
  1054. mov %r9,1*8($rptr)
  1055. mov %r10,2*8($rptr)
  1056. mov %r11,3*8($rptr)
  1057. lea 4*8($rptr),$rptr
  1058. add \$32,$num
  1059. jnz .Lmulx4x_sub
  1060. mov 40(%rsp),%rsi # restore %rsp
  1061. mov \$1,%rax
  1062. mov -48(%rsi),%r15
  1063. mov -40(%rsi),%r14
  1064. mov -32(%rsi),%r13
  1065. mov -24(%rsi),%r12
  1066. mov -16(%rsi),%rbp
  1067. mov -8(%rsi),%rbx
  1068. lea (%rsi),%rsp
  1069. .Lmulx4x_epilogue:
  1070. ret
  1071. .size bn_mulx4x_mont,.-bn_mulx4x_mont
  1072. ___
  1073. }}}
  1074. $code.=<<___;
  1075. .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  1076. .align 16
  1077. ___
  1078. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  1079. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  1080. if ($win64) {
  1081. $rec="%rcx";
  1082. $frame="%rdx";
  1083. $context="%r8";
  1084. $disp="%r9";
  1085. $code.=<<___;
  1086. .extern __imp_RtlVirtualUnwind
  1087. .type mul_handler,\@abi-omnipotent
  1088. .align 16
  1089. mul_handler:
  1090. push %rsi
  1091. push %rdi
  1092. push %rbx
  1093. push %rbp
  1094. push %r12
  1095. push %r13
  1096. push %r14
  1097. push %r15
  1098. pushfq
  1099. sub \$64,%rsp
  1100. mov 120($context),%rax # pull context->Rax
  1101. mov 248($context),%rbx # pull context->Rip
  1102. mov 8($disp),%rsi # disp->ImageBase
  1103. mov 56($disp),%r11 # disp->HandlerData
  1104. mov 0(%r11),%r10d # HandlerData[0]
  1105. lea (%rsi,%r10),%r10 # end of prologue label
  1106. cmp %r10,%rbx # context->Rip<end of prologue label
  1107. jb .Lcommon_seh_tail
  1108. mov 152($context),%rax # pull context->Rsp
  1109. mov 4(%r11),%r10d # HandlerData[1]
  1110. lea (%rsi,%r10),%r10 # epilogue label
  1111. cmp %r10,%rbx # context->Rip>=epilogue label
  1112. jae .Lcommon_seh_tail
  1113. mov 192($context),%r10 # pull $num
  1114. mov 8(%rax,%r10,8),%rax # pull saved stack pointer
  1115. lea 48(%rax),%rax
  1116. mov -8(%rax),%rbx
  1117. mov -16(%rax),%rbp
  1118. mov -24(%rax),%r12
  1119. mov -32(%rax),%r13
  1120. mov -40(%rax),%r14
  1121. mov -48(%rax),%r15
  1122. mov %rbx,144($context) # restore context->Rbx
  1123. mov %rbp,160($context) # restore context->Rbp
  1124. mov %r12,216($context) # restore context->R12
  1125. mov %r13,224($context) # restore context->R13
  1126. mov %r14,232($context) # restore context->R14
  1127. mov %r15,240($context) # restore context->R15
  1128. jmp .Lcommon_seh_tail
  1129. .size mul_handler,.-mul_handler
  1130. .type sqr_handler,\@abi-omnipotent
  1131. .align 16
  1132. sqr_handler:
  1133. push %rsi
  1134. push %rdi
  1135. push %rbx
  1136. push %rbp
  1137. push %r12
  1138. push %r13
  1139. push %r14
  1140. push %r15
  1141. pushfq
  1142. sub \$64,%rsp
  1143. mov 120($context),%rax # pull context->Rax
  1144. mov 248($context),%rbx # pull context->Rip
  1145. mov 8($disp),%rsi # disp->ImageBase
  1146. mov 56($disp),%r11 # disp->HandlerData
  1147. mov 0(%r11),%r10d # HandlerData[0]
  1148. lea (%rsi,%r10),%r10 # end of prologue label
  1149. cmp %r10,%rbx # context->Rip<.Lsqr_body
  1150. jb .Lcommon_seh_tail
  1151. mov 152($context),%rax # pull context->Rsp
  1152. mov 4(%r11),%r10d # HandlerData[1]
  1153. lea (%rsi,%r10),%r10 # epilogue label
  1154. cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
  1155. jae .Lcommon_seh_tail
  1156. mov 40(%rax),%rax # pull saved stack pointer
  1157. mov -8(%rax),%rbx
  1158. mov -16(%rax),%rbp
  1159. mov -24(%rax),%r12
  1160. mov -32(%rax),%r13
  1161. mov -40(%rax),%r14
  1162. mov -48(%rax),%r15
  1163. mov %rbx,144($context) # restore context->Rbx
  1164. mov %rbp,160($context) # restore context->Rbp
  1165. mov %r12,216($context) # restore context->R12
  1166. mov %r13,224($context) # restore context->R13
  1167. mov %r14,232($context) # restore context->R14
  1168. mov %r15,240($context) # restore context->R15
  1169. .Lcommon_seh_tail:
  1170. mov 8(%rax),%rdi
  1171. mov 16(%rax),%rsi
  1172. mov %rax,152($context) # restore context->Rsp
  1173. mov %rsi,168($context) # restore context->Rsi
  1174. mov %rdi,176($context) # restore context->Rdi
  1175. mov 40($disp),%rdi # disp->ContextRecord
  1176. mov $context,%rsi # context
  1177. mov \$154,%ecx # sizeof(CONTEXT)
  1178. .long 0xa548f3fc # cld; rep movsq
  1179. mov $disp,%rsi
  1180. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  1181. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  1182. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  1183. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  1184. mov 40(%rsi),%r10 # disp->ContextRecord
  1185. lea 56(%rsi),%r11 # &disp->HandlerData
  1186. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  1187. mov %r10,32(%rsp) # arg5
  1188. mov %r11,40(%rsp) # arg6
  1189. mov %r12,48(%rsp) # arg7
  1190. mov %rcx,56(%rsp) # arg8, (NULL)
  1191. call *__imp_RtlVirtualUnwind(%rip)
  1192. mov \$1,%eax # ExceptionContinueSearch
  1193. add \$64,%rsp
  1194. popfq
  1195. pop %r15
  1196. pop %r14
  1197. pop %r13
  1198. pop %r12
  1199. pop %rbp
  1200. pop %rbx
  1201. pop %rdi
  1202. pop %rsi
  1203. ret
  1204. .size sqr_handler,.-sqr_handler
  1205. .section .pdata
  1206. .align 4
  1207. .rva .LSEH_begin_bn_mul_mont
  1208. .rva .LSEH_end_bn_mul_mont
  1209. .rva .LSEH_info_bn_mul_mont
  1210. .rva .LSEH_begin_bn_mul4x_mont
  1211. .rva .LSEH_end_bn_mul4x_mont
  1212. .rva .LSEH_info_bn_mul4x_mont
  1213. .rva .LSEH_begin_bn_sqr8x_mont
  1214. .rva .LSEH_end_bn_sqr8x_mont
  1215. .rva .LSEH_info_bn_sqr8x_mont
  1216. ___
  1217. $code.=<<___ if ($addx);
  1218. .rva .LSEH_begin_bn_mulx4x_mont
  1219. .rva .LSEH_end_bn_mulx4x_mont
  1220. .rva .LSEH_info_bn_mulx4x_mont
  1221. ___
  1222. $code.=<<___;
  1223. .section .xdata
  1224. .align 8
  1225. .LSEH_info_bn_mul_mont:
  1226. .byte 9,0,0,0
  1227. .rva mul_handler
  1228. .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
  1229. .LSEH_info_bn_mul4x_mont:
  1230. .byte 9,0,0,0
  1231. .rva mul_handler
  1232. .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
  1233. .LSEH_info_bn_sqr8x_mont:
  1234. .byte 9,0,0,0
  1235. .rva sqr_handler
  1236. .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
  1237. ___
  1238. $code.=<<___ if ($addx);
  1239. .LSEH_info_bn_mulx4x_mont:
  1240. .byte 9,0,0,0
  1241. .rva sqr_handler
  1242. .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
  1243. ___
  1244. }
  1245. print $code;
  1246. close STDOUT;