rc4-x86_64.pl 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # July 2004
  11. #
  12. # 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
  13. # "hand-coded assembler"] doesn't stand for the whole improvement
  14. # coefficient. It turned out that eliminating RC4_CHAR from config
  15. # line results in ~40% improvement (yes, even for C implementation).
  16. # Presumably it has everything to do with AMD cache architecture and
  17. # RAW or whatever penalties. Once again! The module *requires* config
  18. # line *without* RC4_CHAR! As for coding "secret," I bet on partial
  19. # register arithmetics. For example instead of 'inc %r8; and $255,%r8'
  20. # I simply 'inc %r8b'. Even though optimization manual discourages
  21. # to operate on partial registers, it turned out to be the best bet.
  22. # At least for AMD... How IA32E would perform remains to be seen...
  23. # November 2004
  24. #
  25. # As was shown by Marc Bevand reordering of couple of load operations
  26. # results in even higher performance gain of 3.3x:-) At least on
  27. # Opteron... For reference, 1x in this case is RC4_CHAR C-code
  28. # compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
  29. # Latter means that if you want to *estimate* what to expect from
  30. # *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
  31. # November 2004
  32. #
  33. # Intel P4 EM64T core was found to run the AMD64 code really slow...
  34. # The only way to achieve comparable performance on P4 was to keep
  35. # RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
  36. # compose blended code, which would perform even within 30% marginal
  37. # on either AMD and Intel platforms, I implement both cases. See
  38. # rc4_skey.c for further details...
  39. # April 2005
  40. #
  41. # P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
  42. # those with add/sub results in 50% performance improvement of folded
  43. # loop...
  44. # May 2005
  45. #
  46. # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
  47. # performance by >30% [unlike P4 32-bit case that is]. But this is
  48. # provided that loads are reordered even more aggressively! Both code
  49. # pathes, AMD64 and EM64T, reorder loads in essentially same manner
  50. # as my IA-64 implementation. On Opteron this resulted in modest 5%
  51. # improvement [I had to test it], while final Intel P4 performance
  52. # achieves respectful 432MBps on 2.8GHz processor now. For reference.
  53. # If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
  54. # RC4_INT code-path. While if executed on Opteron, it's only 25%
  55. # slower than the RC4_INT one [meaning that if CPU µ-arch detection
  56. # is not implemented, then this final RC4_CHAR code-path should be
  57. # preferred, as it provides better *all-round* performance].
  58. # March 2007
  59. #
  60. # Intel Core2 was observed to perform poorly on both code paths:-( It
  61. # apparently suffers from some kind of partial register stall, which
  62. # occurs in 64-bit mode only [as virtually identical 32-bit loop was
  63. # observed to outperform 64-bit one by almost 50%]. Adding two movzb to
  64. # cloop1 boosts its performance by 80%! This loop appears to be optimal
  65. # fit for Core2 and therefore the code was modified to skip cloop8 on
  66. # this CPU.
  67. # May 2010
  68. #
  69. # Intel Westmere was observed to perform suboptimally. Adding yet
  70. # another movzb to cloop1 improved performance by almost 50%! Core2
  71. # performance is improved too, but nominally...
  72. # May 2011
  73. #
  74. # The only code path that was not modified is P4-specific one. Non-P4
  75. # Intel code path optimization is heavily based on submission by Maxim
  76. # Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used
  77. # some of the ideas even in attempt to optmize the original RC4_INT
  78. # code path... Current performance in cycles per processed byte (less
  79. # is better) and improvement coefficients relative to previous
  80. # version of this module are:
  81. #
  82. # Opteron 5.3/+0%(*)
  83. # P4 6.5
  84. # Core2 6.2/+15%(**)
  85. # Westmere 4.2/+60%
  86. # Sandy Bridge 4.2/+120%
  87. # Atom 9.3/+80%
  88. #
  89. # (*) But corresponding loop has less instructions, which should have
  90. # positive effect on upcoming Bulldozer, which has one less ALU.
  91. # For reference, Intel code runs at 6.8 cpb rate on Opteron.
  92. # (**) Note that Core2 result is ~15% lower than corresponding result
  93. # for 32-bit code, meaning that it's possible to improve it,
  94. # but more than likely at the cost of the others (see rc4-586.pl
  95. # to get the idea)...
  96. $flavour = shift;
  97. $output = shift;
  98. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  99. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  100. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  101. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  102. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  103. die "can't locate x86_64-xlate.pl";
  104. open OUT,"| \"$^X\" $xlate $flavour $output";
  105. *STDOUT=*OUT;
  106. $dat="%rdi"; # arg1
  107. $len="%rsi"; # arg2
  108. $inp="%rdx"; # arg3
  109. $out="%rcx"; # arg4
  110. {
  111. $code=<<___;
  112. .text
  113. .extern OPENSSL_ia32cap_P
  114. .globl RC4
  115. .type RC4,\@function,4
  116. .align 16
  117. RC4: or $len,$len
  118. jne .Lentry
  119. ret
  120. .Lentry:
  121. push %rbx
  122. push %r12
  123. push %r13
  124. .Lprologue:
  125. mov $len,%r11
  126. mov $inp,%r12
  127. mov $out,%r13
  128. ___
  129. my $len="%r11"; # reassign input arguments
  130. my $inp="%r12";
  131. my $out="%r13";
  132. my @XX=("%r10","%rsi");
  133. my @TX=("%rax","%rbx");
  134. my $YY="%rcx";
  135. my $TY="%rdx";
  136. $code.=<<___;
  137. xor $XX[0],$XX[0]
  138. xor $YY,$YY
  139. lea 8($dat),$dat
  140. mov -8($dat),$XX[0]#b
  141. mov -4($dat),$YY#b
  142. cmpl \$-1,256($dat)
  143. je .LRC4_CHAR
  144. mov OPENSSL_ia32cap_P(%rip),%r8d
  145. xor $TX[1],$TX[1]
  146. inc $XX[0]#b
  147. sub $XX[0],$TX[1]
  148. sub $inp,$out
  149. movl ($dat,$XX[0],4),$TX[0]#d
  150. test \$-16,$len
  151. jz .Lloop1
  152. bt \$30,%r8d # Intel CPU?
  153. jc .Lintel
  154. and \$7,$TX[1]
  155. lea 1($XX[0]),$XX[1]
  156. jz .Loop8
  157. sub $TX[1],$len
  158. .Loop8_warmup:
  159. add $TX[0]#b,$YY#b
  160. movl ($dat,$YY,4),$TY#d
  161. movl $TX[0]#d,($dat,$YY,4)
  162. movl $TY#d,($dat,$XX[0],4)
  163. add $TY#b,$TX[0]#b
  164. inc $XX[0]#b
  165. movl ($dat,$TX[0],4),$TY#d
  166. movl ($dat,$XX[0],4),$TX[0]#d
  167. xorb ($inp),$TY#b
  168. movb $TY#b,($out,$inp)
  169. lea 1($inp),$inp
  170. dec $TX[1]
  171. jnz .Loop8_warmup
  172. lea 1($XX[0]),$XX[1]
  173. jmp .Loop8
  174. .align 16
  175. .Loop8:
  176. ___
  177. for ($i=0;$i<8;$i++) {
  178. $code.=<<___ if ($i==7);
  179. add \$8,$XX[1]#b
  180. ___
  181. $code.=<<___;
  182. add $TX[0]#b,$YY#b
  183. movl ($dat,$YY,4),$TY#d
  184. movl $TX[0]#d,($dat,$YY,4)
  185. movl `4*($i==7?-1:$i)`($dat,$XX[1],4),$TX[1]#d
  186. ror \$8,%r8 # ror is redundant when $i=0
  187. movl $TY#d,4*$i($dat,$XX[0],4)
  188. add $TX[0]#b,$TY#b
  189. movb ($dat,$TY,4),%r8b
  190. ___
  191. push(@TX,shift(@TX)); #push(@XX,shift(@XX)); # "rotate" registers
  192. }
  193. $code.=<<___;
  194. add \$8,$XX[0]#b
  195. ror \$8,%r8
  196. sub \$8,$len
  197. xor ($inp),%r8
  198. mov %r8,($out,$inp)
  199. lea 8($inp),$inp
  200. test \$-8,$len
  201. jnz .Loop8
  202. cmp \$0,$len
  203. jne .Lloop1
  204. jmp .Lexit
  205. .align 16
  206. .Lintel:
  207. test \$-32,$len
  208. jz .Lloop1
  209. and \$15,$TX[1]
  210. jz .Loop16_is_hot
  211. sub $TX[1],$len
  212. .Loop16_warmup:
  213. add $TX[0]#b,$YY#b
  214. movl ($dat,$YY,4),$TY#d
  215. movl $TX[0]#d,($dat,$YY,4)
  216. movl $TY#d,($dat,$XX[0],4)
  217. add $TY#b,$TX[0]#b
  218. inc $XX[0]#b
  219. movl ($dat,$TX[0],4),$TY#d
  220. movl ($dat,$XX[0],4),$TX[0]#d
  221. xorb ($inp),$TY#b
  222. movb $TY#b,($out,$inp)
  223. lea 1($inp),$inp
  224. dec $TX[1]
  225. jnz .Loop16_warmup
  226. mov $YY,$TX[1]
  227. xor $YY,$YY
  228. mov $TX[1]#b,$YY#b
  229. .Loop16_is_hot:
  230. lea ($dat,$XX[0],4),$XX[1]
  231. ___
  232. sub RC4_loop {
  233. my $i=shift;
  234. my $j=$i<0?0:$i;
  235. my $xmm="%xmm".($j&1);
  236. $code.=" add \$16,$XX[0]#b\n" if ($i==15);
  237. $code.=" movdqu ($inp),%xmm2\n" if ($i==15);
  238. $code.=" add $TX[0]#b,$YY#b\n" if ($i<=0);
  239. $code.=" movl ($dat,$YY,4),$TY#d\n";
  240. $code.=" pxor %xmm0,%xmm2\n" if ($i==0);
  241. $code.=" psllq \$8,%xmm1\n" if ($i==0);
  242. $code.=" pxor $xmm,$xmm\n" if ($i<=1);
  243. $code.=" movl $TX[0]#d,($dat,$YY,4)\n";
  244. $code.=" add $TY#b,$TX[0]#b\n";
  245. $code.=" movl `4*($j+1)`($XX[1]),$TX[1]#d\n" if ($i<15);
  246. $code.=" movz $TX[0]#b,$TX[0]#d\n";
  247. $code.=" movl $TY#d,4*$j($XX[1])\n";
  248. $code.=" pxor %xmm1,%xmm2\n" if ($i==0);
  249. $code.=" lea ($dat,$XX[0],4),$XX[1]\n" if ($i==15);
  250. $code.=" add $TX[1]#b,$YY#b\n" if ($i<15);
  251. $code.=" pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n";
  252. $code.=" movdqu %xmm2,($out,$inp)\n" if ($i==0);
  253. $code.=" lea 16($inp),$inp\n" if ($i==0);
  254. $code.=" movl ($XX[1]),$TX[1]#d\n" if ($i==15);
  255. }
  256. RC4_loop(-1);
  257. $code.=<<___;
  258. jmp .Loop16_enter
  259. .align 16
  260. .Loop16:
  261. ___
  262. for ($i=0;$i<16;$i++) {
  263. $code.=".Loop16_enter:\n" if ($i==1);
  264. RC4_loop($i);
  265. push(@TX,shift(@TX)); # "rotate" registers
  266. }
  267. $code.=<<___;
  268. mov $YY,$TX[1]
  269. xor $YY,$YY # keyword to partial register
  270. sub \$16,$len
  271. mov $TX[1]#b,$YY#b
  272. test \$-16,$len
  273. jnz .Loop16
  274. psllq \$8,%xmm1
  275. pxor %xmm0,%xmm2
  276. pxor %xmm1,%xmm2
  277. movdqu %xmm2,($out,$inp)
  278. lea 16($inp),$inp
  279. cmp \$0,$len
  280. jne .Lloop1
  281. jmp .Lexit
  282. .align 16
  283. .Lloop1:
  284. add $TX[0]#b,$YY#b
  285. movl ($dat,$YY,4),$TY#d
  286. movl $TX[0]#d,($dat,$YY,4)
  287. movl $TY#d,($dat,$XX[0],4)
  288. add $TY#b,$TX[0]#b
  289. inc $XX[0]#b
  290. movl ($dat,$TX[0],4),$TY#d
  291. movl ($dat,$XX[0],4),$TX[0]#d
  292. xorb ($inp),$TY#b
  293. movb $TY#b,($out,$inp)
  294. lea 1($inp),$inp
  295. dec $len
  296. jnz .Lloop1
  297. jmp .Lexit
  298. .align 16
  299. .LRC4_CHAR:
  300. add \$1,$XX[0]#b
  301. movzb ($dat,$XX[0]),$TX[0]#d
  302. test \$-8,$len
  303. jz .Lcloop1
  304. jmp .Lcloop8
  305. .align 16
  306. .Lcloop8:
  307. mov ($inp),%r8d
  308. mov 4($inp),%r9d
  309. ___
  310. # unroll 2x4-wise, because 64-bit rotates kill Intel P4...
  311. for ($i=0;$i<4;$i++) {
  312. $code.=<<___;
  313. add $TX[0]#b,$YY#b
  314. lea 1($XX[0]),$XX[1]
  315. movzb ($dat,$YY),$TY#d
  316. movzb $XX[1]#b,$XX[1]#d
  317. movzb ($dat,$XX[1]),$TX[1]#d
  318. movb $TX[0]#b,($dat,$YY)
  319. cmp $XX[1],$YY
  320. movb $TY#b,($dat,$XX[0])
  321. jne .Lcmov$i # Intel cmov is sloooow...
  322. mov $TX[0],$TX[1]
  323. .Lcmov$i:
  324. add $TX[0]#b,$TY#b
  325. xor ($dat,$TY),%r8b
  326. ror \$8,%r8d
  327. ___
  328. push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
  329. }
  330. for ($i=4;$i<8;$i++) {
  331. $code.=<<___;
  332. add $TX[0]#b,$YY#b
  333. lea 1($XX[0]),$XX[1]
  334. movzb ($dat,$YY),$TY#d
  335. movzb $XX[1]#b,$XX[1]#d
  336. movzb ($dat,$XX[1]),$TX[1]#d
  337. movb $TX[0]#b,($dat,$YY)
  338. cmp $XX[1],$YY
  339. movb $TY#b,($dat,$XX[0])
  340. jne .Lcmov$i # Intel cmov is sloooow...
  341. mov $TX[0],$TX[1]
  342. .Lcmov$i:
  343. add $TX[0]#b,$TY#b
  344. xor ($dat,$TY),%r9b
  345. ror \$8,%r9d
  346. ___
  347. push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
  348. }
  349. $code.=<<___;
  350. lea -8($len),$len
  351. mov %r8d,($out)
  352. lea 8($inp),$inp
  353. mov %r9d,4($out)
  354. lea 8($out),$out
  355. test \$-8,$len
  356. jnz .Lcloop8
  357. cmp \$0,$len
  358. jne .Lcloop1
  359. jmp .Lexit
  360. ___
  361. $code.=<<___;
  362. .align 16
  363. .Lcloop1:
  364. add $TX[0]#b,$YY#b
  365. movzb $YY#b,$YY#d
  366. movzb ($dat,$YY),$TY#d
  367. movb $TX[0]#b,($dat,$YY)
  368. movb $TY#b,($dat,$XX[0])
  369. add $TX[0]#b,$TY#b
  370. add \$1,$XX[0]#b
  371. movzb $TY#b,$TY#d
  372. movzb $XX[0]#b,$XX[0]#d
  373. movzb ($dat,$TY),$TY#d
  374. movzb ($dat,$XX[0]),$TX[0]#d
  375. xorb ($inp),$TY#b
  376. lea 1($inp),$inp
  377. movb $TY#b,($out)
  378. lea 1($out),$out
  379. sub \$1,$len
  380. jnz .Lcloop1
  381. jmp .Lexit
  382. .align 16
  383. .Lexit:
  384. sub \$1,$XX[0]#b
  385. movl $XX[0]#d,-8($dat)
  386. movl $YY#d,-4($dat)
  387. mov (%rsp),%r13
  388. mov 8(%rsp),%r12
  389. mov 16(%rsp),%rbx
  390. add \$24,%rsp
  391. .Lepilogue:
  392. ret
  393. .size RC4,.-RC4
  394. ___
  395. }
  396. $idx="%r8";
  397. $ido="%r9";
  398. $code.=<<___;
  399. .globl private_RC4_set_key
  400. .type private_RC4_set_key,\@function,3
  401. .align 16
  402. private_RC4_set_key:
  403. lea 8($dat),$dat
  404. lea ($inp,$len),$inp
  405. neg $len
  406. mov $len,%rcx
  407. xor %eax,%eax
  408. xor $ido,$ido
  409. xor %r10,%r10
  410. xor %r11,%r11
  411. mov OPENSSL_ia32cap_P(%rip),$idx#d
  412. bt \$20,$idx#d # RC4_CHAR?
  413. jc .Lc1stloop
  414. jmp .Lw1stloop
  415. .align 16
  416. .Lw1stloop:
  417. mov %eax,($dat,%rax,4)
  418. add \$1,%al
  419. jnc .Lw1stloop
  420. xor $ido,$ido
  421. xor $idx,$idx
  422. .align 16
  423. .Lw2ndloop:
  424. mov ($dat,$ido,4),%r10d
  425. add ($inp,$len,1),$idx#b
  426. add %r10b,$idx#b
  427. add \$1,$len
  428. mov ($dat,$idx,4),%r11d
  429. cmovz %rcx,$len
  430. mov %r10d,($dat,$idx,4)
  431. mov %r11d,($dat,$ido,4)
  432. add \$1,$ido#b
  433. jnc .Lw2ndloop
  434. jmp .Lexit_key
  435. .align 16
  436. .Lc1stloop:
  437. mov %al,($dat,%rax)
  438. add \$1,%al
  439. jnc .Lc1stloop
  440. xor $ido,$ido
  441. xor $idx,$idx
  442. .align 16
  443. .Lc2ndloop:
  444. mov ($dat,$ido),%r10b
  445. add ($inp,$len),$idx#b
  446. add %r10b,$idx#b
  447. add \$1,$len
  448. mov ($dat,$idx),%r11b
  449. jnz .Lcnowrap
  450. mov %rcx,$len
  451. .Lcnowrap:
  452. mov %r10b,($dat,$idx)
  453. mov %r11b,($dat,$ido)
  454. add \$1,$ido#b
  455. jnc .Lc2ndloop
  456. movl \$-1,256($dat)
  457. .align 16
  458. .Lexit_key:
  459. xor %eax,%eax
  460. mov %eax,-8($dat)
  461. mov %eax,-4($dat)
  462. ret
  463. .size private_RC4_set_key,.-private_RC4_set_key
  464. .globl RC4_options
  465. .type RC4_options,\@abi-omnipotent
  466. .align 16
  467. RC4_options:
  468. lea .Lopts(%rip),%rax
  469. mov OPENSSL_ia32cap_P(%rip),%edx
  470. bt \$20,%edx
  471. jc .L8xchar
  472. bt \$30,%edx
  473. jnc .Ldone
  474. add \$25,%rax
  475. ret
  476. .L8xchar:
  477. add \$12,%rax
  478. .Ldone:
  479. ret
  480. .align 64
  481. .Lopts:
  482. .asciz "rc4(8x,int)"
  483. .asciz "rc4(8x,char)"
  484. .asciz "rc4(16x,int)"
  485. .asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  486. .align 64
  487. .size RC4_options,.-RC4_options
  488. ___
  489. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  490. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  491. if ($win64) {
  492. $rec="%rcx";
  493. $frame="%rdx";
  494. $context="%r8";
  495. $disp="%r9";
  496. $code.=<<___;
  497. .extern __imp_RtlVirtualUnwind
  498. .type stream_se_handler,\@abi-omnipotent
  499. .align 16
  500. stream_se_handler:
  501. push %rsi
  502. push %rdi
  503. push %rbx
  504. push %rbp
  505. push %r12
  506. push %r13
  507. push %r14
  508. push %r15
  509. pushfq
  510. sub \$64,%rsp
  511. mov 120($context),%rax # pull context->Rax
  512. mov 248($context),%rbx # pull context->Rip
  513. lea .Lprologue(%rip),%r10
  514. cmp %r10,%rbx # context->Rip<prologue label
  515. jb .Lin_prologue
  516. mov 152($context),%rax # pull context->Rsp
  517. lea .Lepilogue(%rip),%r10
  518. cmp %r10,%rbx # context->Rip>=epilogue label
  519. jae .Lin_prologue
  520. lea 24(%rax),%rax
  521. mov -8(%rax),%rbx
  522. mov -16(%rax),%r12
  523. mov -24(%rax),%r13
  524. mov %rbx,144($context) # restore context->Rbx
  525. mov %r12,216($context) # restore context->R12
  526. mov %r13,224($context) # restore context->R13
  527. .Lin_prologue:
  528. mov 8(%rax),%rdi
  529. mov 16(%rax),%rsi
  530. mov %rax,152($context) # restore context->Rsp
  531. mov %rsi,168($context) # restore context->Rsi
  532. mov %rdi,176($context) # restore context->Rdi
  533. jmp .Lcommon_seh_exit
  534. .size stream_se_handler,.-stream_se_handler
  535. .type key_se_handler,\@abi-omnipotent
  536. .align 16
  537. key_se_handler:
  538. push %rsi
  539. push %rdi
  540. push %rbx
  541. push %rbp
  542. push %r12
  543. push %r13
  544. push %r14
  545. push %r15
  546. pushfq
  547. sub \$64,%rsp
  548. mov 152($context),%rax # pull context->Rsp
  549. mov 8(%rax),%rdi
  550. mov 16(%rax),%rsi
  551. mov %rsi,168($context) # restore context->Rsi
  552. mov %rdi,176($context) # restore context->Rdi
  553. .Lcommon_seh_exit:
  554. mov 40($disp),%rdi # disp->ContextRecord
  555. mov $context,%rsi # context
  556. mov \$154,%ecx # sizeof(CONTEXT)
  557. .long 0xa548f3fc # cld; rep movsq
  558. mov $disp,%rsi
  559. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  560. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  561. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  562. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  563. mov 40(%rsi),%r10 # disp->ContextRecord
  564. lea 56(%rsi),%r11 # &disp->HandlerData
  565. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  566. mov %r10,32(%rsp) # arg5
  567. mov %r11,40(%rsp) # arg6
  568. mov %r12,48(%rsp) # arg7
  569. mov %rcx,56(%rsp) # arg8, (NULL)
  570. call *__imp_RtlVirtualUnwind(%rip)
  571. mov \$1,%eax # ExceptionContinueSearch
  572. add \$64,%rsp
  573. popfq
  574. pop %r15
  575. pop %r14
  576. pop %r13
  577. pop %r12
  578. pop %rbp
  579. pop %rbx
  580. pop %rdi
  581. pop %rsi
  582. ret
  583. .size key_se_handler,.-key_se_handler
  584. .section .pdata
  585. .align 4
  586. .rva .LSEH_begin_RC4
  587. .rva .LSEH_end_RC4
  588. .rva .LSEH_info_RC4
  589. .rva .LSEH_begin_private_RC4_set_key
  590. .rva .LSEH_end_private_RC4_set_key
  591. .rva .LSEH_info_private_RC4_set_key
  592. .section .xdata
  593. .align 8
  594. .LSEH_info_RC4:
  595. .byte 9,0,0,0
  596. .rva stream_se_handler
  597. .LSEH_info_private_RC4_set_key:
  598. .byte 9,0,0,0
  599. .rva key_se_handler
  600. ___
  601. }
  602. sub reg_part {
  603. my ($reg,$conv)=@_;
  604. if ($reg =~ /%r[0-9]+/) { $reg .= $conv; }
  605. elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; }
  606. elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; }
  607. elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; }
  608. return $reg;
  609. }
  610. $code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
  611. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  612. print $code;
  613. close STDOUT;