rc4-x86_64.pl 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # July 2004
  11. #
  12. # 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
  13. # "hand-coded assembler"] doesn't stand for the whole improvement
  14. # coefficient. It turned out that eliminating RC4_CHAR from config
  15. # line results in ~40% improvement (yes, even for C implementation).
  16. # Presumably it has everything to do with AMD cache architecture and
  17. # RAW or whatever penalties. Once again! The module *requires* config
  18. # line *without* RC4_CHAR! As for coding "secret," I bet on partial
  19. # register arithmetics. For example instead of 'inc %r8; and $255,%r8'
  20. # I simply 'inc %r8b'. Even though optimization manual discourages
  21. # to operate on partial registers, it turned out to be the best bet.
  22. # At least for AMD... How IA32E would perform remains to be seen...
  23. # November 2004
  24. #
  25. # As was shown by Marc Bevand reordering of couple of load operations
  26. # results in even higher performance gain of 3.3x:-) At least on
  27. # Opteron... For reference, 1x in this case is RC4_CHAR C-code
  28. # compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
  29. # Latter means that if you want to *estimate* what to expect from
  30. # *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
  31. # November 2004
  32. #
  33. # Intel P4 EM64T core was found to run the AMD64 code really slow...
  34. # The only way to achieve comparable performance on P4 was to keep
  35. # RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
  36. # compose blended code, which would perform even within 30% marginal
  37. # on either AMD and Intel platforms, I implement both cases. See
  38. # rc4_skey.c for further details...
  39. # April 2005
  40. #
  41. # P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
  42. # those with add/sub results in 50% performance improvement of folded
  43. # loop...
  44. # May 2005
  45. #
  46. # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
  47. # performance by >30% [unlike P4 32-bit case that is]. But this is
  48. # provided that loads are reordered even more aggressively! Both code
  49. # pathes, AMD64 and EM64T, reorder loads in essentially same manner
  50. # as my IA-64 implementation. On Opteron this resulted in modest 5%
  51. # improvement [I had to test it], while final Intel P4 performance
  52. # achieves respectful 432MBps on 2.8GHz processor now. For reference.
  53. # If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
  54. # RC4_INT code-path. While if executed on Opteron, it's only 25%
  55. # slower than the RC4_INT one [meaning that if CPU µ-arch detection
  56. # is not implemented, then this final RC4_CHAR code-path should be
  57. # preferred, as it provides better *all-round* performance].
  58. # March 2007
  59. #
  60. # Intel Core2 was observed to perform poorly on both code paths:-( It
  61. # apparently suffers from some kind of partial register stall, which
  62. # occurs in 64-bit mode only [as virtually identical 32-bit loop was
  63. # observed to outperform 64-bit one by almost 50%]. Adding two movzb to
  64. # cloop1 boosts its performance by 80%! This loop appears to be optimal
  65. # fit for Core2 and therefore the code was modified to skip cloop8 on
  66. # this CPU.
  67. # May 2010
  68. #
  69. # Intel Westmere was observed to perform suboptimally. Adding yet
  70. # another movzb to cloop1 improved performance by almost 50%! Core2
  71. # performance is improved too, but nominally...
  72. # May 2011
  73. #
  74. # The only code path that was not modified is P4-specific one. Non-P4
  75. # Intel code path optimization is heavily based on submission by Maxim
  76. # Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used
  77. # some of the ideas even in attempt to optmize the original RC4_INT
  78. # code path... Current performance in cycles per processed byte (less
  79. # is better) and improvement coefficients relative to previous
  80. # version of this module are:
  81. #
  82. # Opteron 5.3/+0%
  83. # P4 6.5
  84. # Core2 6.2/+15%(*)
  85. # Westmere 4.2/+60%
  86. # Sandy Bridge 4.2/+120%
  87. # Atom 9.3/+80%
  88. #
  89. # (*) Note that Core2 result is ~15% lower than corresponding result
  90. # for 32-bit code, meaning that it's possible to improve it,
  91. # but more than likely at the cost of the others (see rc4-586.pl
  92. # to get the idea)...
  93. $flavour = shift;
  94. $output = shift;
  95. if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
  96. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  97. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  98. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  99. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  100. die "can't locate x86_64-xlate.pl";
  101. open STDOUT,"| $^X $xlate $flavour $output";
  102. $dat="%rdi"; # arg1
  103. $len="%rsi"; # arg2
  104. $inp="%rdx"; # arg3
  105. $out="%rcx"; # arg4
  106. {
  107. $code=<<___;
  108. .text
  109. .extern OPENSSL_ia32cap_P
  110. .globl RC4
  111. .type RC4,\@function,4
  112. .align 16
  113. RC4: or $len,$len
  114. jne .Lentry
  115. ret
  116. .Lentry:
  117. push %rbx
  118. push %r12
  119. push %r13
  120. .Lprologue:
  121. mov $len,%r11
  122. mov $inp,%r12
  123. mov $out,%r13
  124. ___
  125. my $len="%r11"; # reassign input arguments
  126. my $inp="%r12";
  127. my $out="%r13";
  128. my @XX=("%r10","%rsi");
  129. my @TX=("%rax","%rbx");
  130. my $YY="%rcx";
  131. my $TY="%rdx";
  132. $code.=<<___;
  133. xor $XX[0],$XX[0]
  134. xor $YY,$YY
  135. lea 8($dat),$dat
  136. mov -8($dat),$XX[0]#b
  137. mov -4($dat),$YY#b
  138. cmpl \$-1,256($dat)
  139. je .LRC4_CHAR
  140. mov OPENSSL_ia32cap_P(%rip),%r8d
  141. xor $TX[1],$TX[1]
  142. inc $XX[0]#b
  143. sub $XX[0],$TX[1]
  144. sub $inp,$out
  145. movl ($dat,$XX[0],4),$TX[0]#d
  146. test \$-16,$len
  147. jz .Lloop1
  148. bt \$30,%r8d # Intel CPU?
  149. jc .Lintel
  150. and \$7,$TX[1]
  151. lea 1($XX[0]),$XX[1]
  152. jz .Loop8
  153. sub $TX[1],$len
  154. .Loop8_warmup:
  155. add $TX[0]#b,$YY#b
  156. movl ($dat,$YY,4),$TY#d
  157. movl $TX[0]#d,($dat,$YY,4)
  158. movl $TY#d,($dat,$XX[0],4)
  159. add $TY#b,$TX[0]#b
  160. inc $XX[0]#b
  161. movl ($dat,$TX[0],4),$TY#d
  162. movl ($dat,$XX[0],4),$TX[0]#d
  163. xorb ($inp),$TY#b
  164. movb $TY#b,($out,$inp)
  165. lea 1($inp),$inp
  166. dec $TX[1]
  167. jnz .Loop8_warmup
  168. lea 1($XX[0]),$XX[1]
  169. jmp .Loop8
  170. .align 16
  171. .Loop8:
  172. ___
  173. for ($i=0;$i<8;$i++) {
  174. $code.=<<___ if ($i==7);
  175. add \$8,$XX[1]#b
  176. ___
  177. $code.=<<___;
  178. add $TX[0]#b,$YY#b
  179. movl ($dat,$YY,4),$TY#d
  180. movl $TX[0]#d,($dat,$YY,4)
  181. movl `4*($i==7?-1:$i)`($dat,$XX[1],4),$TX[1]#d
  182. ror \$8,%r8 # ror is redundant when $i=0
  183. movl $TY#d,4*$i($dat,$XX[0],4)
  184. add $TX[0]#b,$TY#b
  185. movb ($dat,$TY,4),%r8b
  186. ___
  187. push(@TX,shift(@TX)); #push(@XX,shift(@XX)); # "rotate" registers
  188. }
  189. $code.=<<___;
  190. add \$8,$XX[0]#b
  191. ror \$8,%r8
  192. sub \$8,$len
  193. xor ($inp),%r8
  194. mov %r8,($out,$inp)
  195. lea 8($inp),$inp
  196. test \$-8,$len
  197. jnz .Loop8
  198. cmp \$0,$len
  199. jne .Lloop1
  200. jmp .Lexit
  201. .align 16
  202. .Lintel:
  203. test \$-32,$len
  204. jz .Lloop1
  205. and \$15,$TX[1]
  206. jz .Loop16_is_hot
  207. sub $TX[1],$len
  208. .Loop16_warmup:
  209. add $TX[0]#b,$YY#b
  210. movl ($dat,$YY,4),$TY#d
  211. movl $TX[0]#d,($dat,$YY,4)
  212. movl $TY#d,($dat,$XX[0],4)
  213. add $TY#b,$TX[0]#b
  214. inc $XX[0]#b
  215. movl ($dat,$TX[0],4),$TY#d
  216. movl ($dat,$XX[0],4),$TX[0]#d
  217. xorb ($inp),$TY#b
  218. movb $TY#b,($out,$inp)
  219. lea 1($inp),$inp
  220. dec $TX[1]
  221. jnz .Loop16_warmup
  222. mov $YY,$TX[1]
  223. xor $YY,$YY
  224. mov $TX[1]#b,$YY#b
  225. .Loop16_is_hot:
  226. lea ($dat,$XX[0],4),$XX[1]
  227. ___
  228. sub RC4_loop {
  229. my $i=shift;
  230. my $j=$i<0?0:$i;
  231. my $xmm="%xmm".($j&1);
  232. $code.=" add \$16,$XX[0]#b\n" if ($i==15);
  233. $code.=" movdqu ($inp),%xmm2\n" if ($i==15);
  234. $code.=" add $TX[0]#b,$YY#b\n" if ($i<=0);
  235. $code.=" movl ($dat,$YY,4),$TY#d\n";
  236. $code.=" pxor %xmm0,%xmm2\n" if ($i==0);
  237. $code.=" psllq \$8,%xmm1\n" if ($i==0);
  238. $code.=" pxor $xmm,$xmm\n" if ($i<=1);
  239. $code.=" movl $TX[0]#d,($dat,$YY,4)\n";
  240. $code.=" add $TY#b,$TX[0]#b\n";
  241. $code.=" movl `4*($j+1)`($XX[1]),$TX[1]#d\n" if ($i<15);
  242. $code.=" movz $TX[0]#b,$TX[0]#d\n";
  243. $code.=" movl $TY#d,4*$j($XX[1])\n";
  244. $code.=" pxor %xmm1,%xmm2\n" if ($i==0);
  245. $code.=" lea ($dat,$XX[0],4),$XX[1]\n" if ($i==15);
  246. $code.=" add $TX[1]#b,$YY#b\n" if ($i<15);
  247. $code.=" pinsrw \$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n";
  248. $code.=" movdqu %xmm2,($out,$inp)\n" if ($i==0);
  249. $code.=" lea 16($inp),$inp\n" if ($i==0);
  250. $code.=" movl ($XX[1]),$TX[1]#d\n" if ($i==15);
  251. }
  252. RC4_loop(-1);
  253. $code.=<<___;
  254. jmp .Loop16_enter
  255. .align 16
  256. .Loop16:
  257. ___
  258. for ($i=0;$i<16;$i++) {
  259. $code.=".Loop16_enter:\n" if ($i==1);
  260. RC4_loop($i);
  261. push(@TX,shift(@TX)); # "rotate" registers
  262. }
  263. $code.=<<___;
  264. mov $YY,$TX[1]
  265. xor $YY,$YY # keyword to partial register
  266. sub \$16,$len
  267. mov $TX[1]#b,$YY#b
  268. test \$-16,$len
  269. jnz .Loop16
  270. psllq \$8,%xmm1
  271. pxor %xmm0,%xmm2
  272. pxor %xmm1,%xmm2
  273. movdqu %xmm2,($out,$inp)
  274. lea 16($inp),$inp
  275. cmp \$0,$len
  276. jne .Lloop1
  277. jmp .Lexit
  278. .align 16
  279. .Lloop1:
  280. add $TX[0]#b,$YY#b
  281. movl ($dat,$YY,4),$TY#d
  282. movl $TX[0]#d,($dat,$YY,4)
  283. movl $TY#d,($dat,$XX[0],4)
  284. add $TY#b,$TX[0]#b
  285. inc $XX[0]#b
  286. movl ($dat,$TX[0],4),$TY#d
  287. movl ($dat,$XX[0],4),$TX[0]#d
  288. xorb ($inp),$TY#b
  289. movb $TY#b,($out,$inp)
  290. lea 1($inp),$inp
  291. dec $len
  292. jnz .Lloop1
  293. jmp .Lexit
  294. .align 16
  295. .LRC4_CHAR:
  296. add \$1,$XX[0]#b
  297. movzb ($dat,$XX[0]),$TX[0]#d
  298. test \$-8,$len
  299. jz .Lcloop1
  300. jmp .Lcloop8
  301. .align 16
  302. .Lcloop8:
  303. mov ($inp),%r8d
  304. mov 4($inp),%r9d
  305. ___
  306. # unroll 2x4-wise, because 64-bit rotates kill Intel P4...
  307. for ($i=0;$i<4;$i++) {
  308. $code.=<<___;
  309. add $TX[0]#b,$YY#b
  310. lea 1($XX[0]),$XX[1]
  311. movzb ($dat,$YY),$TY#d
  312. movzb $XX[1]#b,$XX[1]#d
  313. movzb ($dat,$XX[1]),$TX[1]#d
  314. movb $TX[0]#b,($dat,$YY)
  315. cmp $XX[1],$YY
  316. movb $TY#b,($dat,$XX[0])
  317. jne .Lcmov$i # Intel cmov is sloooow...
  318. mov $TX[0],$TX[1]
  319. .Lcmov$i:
  320. add $TX[0]#b,$TY#b
  321. xor ($dat,$TY),%r8b
  322. ror \$8,%r8d
  323. ___
  324. push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
  325. }
  326. for ($i=4;$i<8;$i++) {
  327. $code.=<<___;
  328. add $TX[0]#b,$YY#b
  329. lea 1($XX[0]),$XX[1]
  330. movzb ($dat,$YY),$TY#d
  331. movzb $XX[1]#b,$XX[1]#d
  332. movzb ($dat,$XX[1]),$TX[1]#d
  333. movb $TX[0]#b,($dat,$YY)
  334. cmp $XX[1],$YY
  335. movb $TY#b,($dat,$XX[0])
  336. jne .Lcmov$i # Intel cmov is sloooow...
  337. mov $TX[0],$TX[1]
  338. .Lcmov$i:
  339. add $TX[0]#b,$TY#b
  340. xor ($dat,$TY),%r9b
  341. ror \$8,%r9d
  342. ___
  343. push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
  344. }
  345. $code.=<<___;
  346. lea -8($len),$len
  347. mov %r8d,($out)
  348. lea 8($inp),$inp
  349. mov %r9d,4($out)
  350. lea 8($out),$out
  351. test \$-8,$len
  352. jnz .Lcloop8
  353. cmp \$0,$len
  354. jne .Lcloop1
  355. jmp .Lexit
  356. ___
  357. $code.=<<___;
  358. .align 16
  359. .Lcloop1:
  360. add $TX[0]#b,$YY#b
  361. movzb $YY#b,$YY#d
  362. movzb ($dat,$YY),$TY#d
  363. movb $TX[0]#b,($dat,$YY)
  364. movb $TY#b,($dat,$XX[0])
  365. add $TX[0]#b,$TY#b
  366. add \$1,$XX[0]#b
  367. movzb $TY#b,$TY#d
  368. movzb $XX[0]#b,$XX[0]#d
  369. movzb ($dat,$TY),$TY#d
  370. movzb ($dat,$XX[0]),$TX[0]#d
  371. xorb ($inp),$TY#b
  372. lea 1($inp),$inp
  373. movb $TY#b,($out)
  374. lea 1($out),$out
  375. sub \$1,$len
  376. jnz .Lcloop1
  377. jmp .Lexit
  378. .align 16
  379. .Lexit:
  380. sub \$1,$XX[0]#b
  381. movl $XX[0]#d,-8($dat)
  382. movl $YY#d,-4($dat)
  383. mov (%rsp),%r13
  384. mov 8(%rsp),%r12
  385. mov 16(%rsp),%rbx
  386. add \$24,%rsp
  387. .Lepilogue:
  388. ret
  389. .size RC4,.-RC4
  390. ___
  391. }
  392. $idx="%r8";
  393. $ido="%r9";
  394. $code.=<<___;
  395. .globl RC4_set_key
  396. .type RC4_set_key,\@function,3
  397. .align 16
  398. RC4_set_key:
  399. lea 8($dat),$dat
  400. lea ($inp,$len),$inp
  401. neg $len
  402. mov $len,%rcx
  403. xor %eax,%eax
  404. xor $ido,$ido
  405. xor %r10,%r10
  406. xor %r11,%r11
  407. mov OPENSSL_ia32cap_P(%rip),$idx#d
  408. bt \$20,$idx#d # RC4_CHAR?
  409. jc .Lc1stloop
  410. jmp .Lw1stloop
  411. .align 16
  412. .Lw1stloop:
  413. mov %eax,($dat,%rax,4)
  414. add \$1,%al
  415. jnc .Lw1stloop
  416. xor $ido,$ido
  417. xor $idx,$idx
  418. .align 16
  419. .Lw2ndloop:
  420. mov ($dat,$ido,4),%r10d
  421. add ($inp,$len,1),$idx#b
  422. add %r10b,$idx#b
  423. add \$1,$len
  424. mov ($dat,$idx,4),%r11d
  425. cmovz %rcx,$len
  426. mov %r10d,($dat,$idx,4)
  427. mov %r11d,($dat,$ido,4)
  428. add \$1,$ido#b
  429. jnc .Lw2ndloop
  430. jmp .Lexit_key
  431. .align 16
  432. .Lc1stloop:
  433. mov %al,($dat,%rax)
  434. add \$1,%al
  435. jnc .Lc1stloop
  436. xor $ido,$ido
  437. xor $idx,$idx
  438. .align 16
  439. .Lc2ndloop:
  440. mov ($dat,$ido),%r10b
  441. add ($inp,$len),$idx#b
  442. add %r10b,$idx#b
  443. add \$1,$len
  444. mov ($dat,$idx),%r11b
  445. jnz .Lcnowrap
  446. mov %rcx,$len
  447. .Lcnowrap:
  448. mov %r10b,($dat,$idx)
  449. mov %r11b,($dat,$ido)
  450. add \$1,$ido#b
  451. jnc .Lc2ndloop
  452. movl \$-1,256($dat)
  453. .align 16
  454. .Lexit_key:
  455. xor %eax,%eax
  456. mov %eax,-8($dat)
  457. mov %eax,-4($dat)
  458. ret
  459. .size RC4_set_key,.-RC4_set_key
  460. .globl RC4_options
  461. .type RC4_options,\@abi-omnipotent
  462. .align 16
  463. RC4_options:
  464. lea .Lopts(%rip),%rax
  465. mov OPENSSL_ia32cap_P(%rip),%edx
  466. bt \$20,%edx
  467. jc .L8xchar
  468. bt \$30,%edx
  469. jnc .Ldone
  470. add \$25,%rax
  471. ret
  472. .L8xchar:
  473. add \$12,%rax
  474. .Ldone:
  475. ret
  476. .align 64
  477. .Lopts:
  478. .asciz "rc4(8x,int)"
  479. .asciz "rc4(8x,char)"
  480. .asciz "rc4(16x,int)"
  481. .asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  482. .align 64
  483. .size RC4_options,.-RC4_options
  484. ___
  485. # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
  486. # CONTEXT *context,DISPATCHER_CONTEXT *disp)
  487. if ($win64) {
  488. $rec="%rcx";
  489. $frame="%rdx";
  490. $context="%r8";
  491. $disp="%r9";
  492. $code.=<<___;
  493. .extern __imp_RtlVirtualUnwind
  494. .type stream_se_handler,\@abi-omnipotent
  495. .align 16
  496. stream_se_handler:
  497. push %rsi
  498. push %rdi
  499. push %rbx
  500. push %rbp
  501. push %r12
  502. push %r13
  503. push %r14
  504. push %r15
  505. pushfq
  506. sub \$64,%rsp
  507. mov 120($context),%rax # pull context->Rax
  508. mov 248($context),%rbx # pull context->Rip
  509. lea .Lprologue(%rip),%r10
  510. cmp %r10,%rbx # context->Rip<prologue label
  511. jb .Lin_prologue
  512. mov 152($context),%rax # pull context->Rsp
  513. lea .Lepilogue(%rip),%r10
  514. cmp %r10,%rbx # context->Rip>=epilogue label
  515. jae .Lin_prologue
  516. lea 24(%rax),%rax
  517. mov -8(%rax),%rbx
  518. mov -16(%rax),%r12
  519. mov -24(%rax),%r13
  520. mov %rbx,144($context) # restore context->Rbx
  521. mov %r12,216($context) # restore context->R12
  522. mov %r13,224($context) # restore context->R13
  523. .Lin_prologue:
  524. mov 8(%rax),%rdi
  525. mov 16(%rax),%rsi
  526. mov %rax,152($context) # restore context->Rsp
  527. mov %rsi,168($context) # restore context->Rsi
  528. mov %rdi,176($context) # restore context->Rdi
  529. jmp .Lcommon_seh_exit
  530. .size stream_se_handler,.-stream_se_handler
  531. .type key_se_handler,\@abi-omnipotent
  532. .align 16
  533. key_se_handler:
  534. push %rsi
  535. push %rdi
  536. push %rbx
  537. push %rbp
  538. push %r12
  539. push %r13
  540. push %r14
  541. push %r15
  542. pushfq
  543. sub \$64,%rsp
  544. mov 152($context),%rax # pull context->Rsp
  545. mov 8(%rax),%rdi
  546. mov 16(%rax),%rsi
  547. mov %rsi,168($context) # restore context->Rsi
  548. mov %rdi,176($context) # restore context->Rdi
  549. .Lcommon_seh_exit:
  550. mov 40($disp),%rdi # disp->ContextRecord
  551. mov $context,%rsi # context
  552. mov \$154,%ecx # sizeof(CONTEXT)
  553. .long 0xa548f3fc # cld; rep movsq
  554. mov $disp,%rsi
  555. xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
  556. mov 8(%rsi),%rdx # arg2, disp->ImageBase
  557. mov 0(%rsi),%r8 # arg3, disp->ControlPc
  558. mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
  559. mov 40(%rsi),%r10 # disp->ContextRecord
  560. lea 56(%rsi),%r11 # &disp->HandlerData
  561. lea 24(%rsi),%r12 # &disp->EstablisherFrame
  562. mov %r10,32(%rsp) # arg5
  563. mov %r11,40(%rsp) # arg6
  564. mov %r12,48(%rsp) # arg7
  565. mov %rcx,56(%rsp) # arg8, (NULL)
  566. call *__imp_RtlVirtualUnwind(%rip)
  567. mov \$1,%eax # ExceptionContinueSearch
  568. add \$64,%rsp
  569. popfq
  570. pop %r15
  571. pop %r14
  572. pop %r13
  573. pop %r12
  574. pop %rbp
  575. pop %rbx
  576. pop %rdi
  577. pop %rsi
  578. ret
  579. .size key_se_handler,.-key_se_handler
  580. .section .pdata
  581. .align 4
  582. .rva .LSEH_begin_RC4
  583. .rva .LSEH_end_RC4
  584. .rva .LSEH_info_RC4
  585. .rva .LSEH_begin_RC4_set_key
  586. .rva .LSEH_end_RC4_set_key
  587. .rva .LSEH_info_RC4_set_key
  588. .section .xdata
  589. .align 8
  590. .LSEH_info_RC4:
  591. .byte 9,0,0,0
  592. .rva stream_se_handler
  593. .LSEH_info_RC4_set_key:
  594. .byte 9,0,0,0
  595. .rva key_se_handler
  596. ___
  597. }
  598. sub reg_part {
  599. my ($reg,$conv)=@_;
  600. if ($reg =~ /%r[0-9]+/) { $reg .= $conv; }
  601. elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; }
  602. elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; }
  603. elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; }
  604. return $reg;
  605. }
  606. $code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
  607. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  608. print $code;
  609. close STDOUT;