e_padlock-x86_64.pl 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. #! /usr/bin/env perl
  2. # Copyright 2011-2023 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # September 2011
  15. #
  16. # Assembler helpers for Padlock engine. See even e_padlock-x86.pl for
  17. # details.
  18. # $output is the last argument if it looks like a file (it has an extension)
  19. # $flavour is the first argument if it doesn't look like a file
  20. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  21. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  22. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  23. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  24. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  25. ( $xlate="${dir}../../crypto/perlasm/x86_64-xlate.pl" and -f $xlate) or
  26. die "can't locate x86_64-xlate.pl";
  27. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
  28. or die "can't call $xlate: $!";
  29. *STDOUT=*OUT;
  30. $code=".text\n";
  31. %PADLOCK_PREFETCH=(ecb=>128, cbc=>64, ctr32=>32); # prefetch errata
  32. $PADLOCK_CHUNK=512; # Must be a power of 2 between 32 and 2^20
  33. $ctx="%rdx";
  34. $out="%rdi";
  35. $inp="%rsi";
  36. $len="%rcx";
  37. $chunk="%rbx";
  38. ($arg1,$arg2,$arg3,$arg4)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
  39. ("%rdi","%rsi","%rdx","%rcx"); # Unix order
  40. $code.=<<___;
  41. .globl padlock_capability
  42. .type padlock_capability,\@abi-omnipotent
  43. .align 16
  44. padlock_capability:
  45. mov %rbx,%r8
  46. xor %eax,%eax
  47. cpuid
  48. xor %eax,%eax
  49. cmp \$`"0x".unpack("H*",'tneC')`,%ebx
  50. jne .Lzhaoxin
  51. cmp \$`"0x".unpack("H*",'Hrua')`,%edx
  52. jne .Lnoluck
  53. cmp \$`"0x".unpack("H*",'slua')`,%ecx
  54. jne .Lnoluck
  55. jmp .LzhaoxinEnd
  56. .Lzhaoxin:
  57. cmp \$`"0x".unpack("H*",'hS ')`,%ebx
  58. jne .Lnoluck
  59. cmp \$`"0x".unpack("H*",'hgna')`,%edx
  60. jne .Lnoluck
  61. cmp \$`"0x".unpack("H*",' ia')`,%ecx
  62. jne .Lnoluck
  63. .LzhaoxinEnd:
  64. mov \$0xC0000000,%eax
  65. cpuid
  66. mov %eax,%edx
  67. xor %eax,%eax
  68. cmp \$0xC0000001,%edx
  69. jb .Lnoluck
  70. mov \$0xC0000001,%eax
  71. cpuid
  72. mov %edx,%eax
  73. and \$0xffffffef,%eax
  74. or \$0x10,%eax # set Nano bit#4
  75. .Lnoluck:
  76. mov %r8,%rbx
  77. ret
  78. .size padlock_capability,.-padlock_capability
  79. .globl padlock_key_bswap
  80. .type padlock_key_bswap,\@abi-omnipotent,0
  81. .align 16
  82. padlock_key_bswap:
  83. mov 240($arg1),%edx
  84. inc %edx
  85. shl \$2,%edx
  86. .Lbswap_loop:
  87. mov ($arg1),%eax
  88. bswap %eax
  89. mov %eax,($arg1)
  90. lea 4($arg1),$arg1
  91. sub \$1,%edx
  92. jnz .Lbswap_loop
  93. ret
  94. .size padlock_key_bswap,.-padlock_key_bswap
  95. .globl padlock_verify_context
  96. .type padlock_verify_context,\@abi-omnipotent
  97. .align 16
  98. padlock_verify_context:
  99. mov $arg1,$ctx
  100. pushf
  101. lea .Lpadlock_saved_context(%rip),%rax
  102. call _padlock_verify_ctx
  103. lea 8(%rsp),%rsp
  104. ret
  105. .size padlock_verify_context,.-padlock_verify_context
  106. .type _padlock_verify_ctx,\@abi-omnipotent
  107. .align 16
  108. _padlock_verify_ctx:
  109. mov 8(%rsp),%r8
  110. bt \$30,%r8
  111. jnc .Lverified
  112. cmp (%rax),$ctx
  113. je .Lverified
  114. pushf
  115. popf
  116. .Lverified:
  117. mov $ctx,(%rax)
  118. ret
  119. .size _padlock_verify_ctx,.-_padlock_verify_ctx
  120. .globl padlock_reload_key
  121. .type padlock_reload_key,\@abi-omnipotent
  122. .align 16
  123. padlock_reload_key:
  124. pushf
  125. popf
  126. ret
  127. .size padlock_reload_key,.-padlock_reload_key
  128. .globl padlock_aes_block
  129. .type padlock_aes_block,\@function,3
  130. .align 16
  131. padlock_aes_block:
  132. mov %rbx,%r8
  133. mov \$1,$len
  134. lea 32($ctx),%rbx # key
  135. lea 16($ctx),$ctx # control word
  136. .byte 0xf3,0x0f,0xa7,0xc8 # rep xcryptecb
  137. mov %r8,%rbx
  138. ret
  139. .size padlock_aes_block,.-padlock_aes_block
  140. .globl padlock_xstore
  141. .type padlock_xstore,\@function,2
  142. .align 16
  143. padlock_xstore:
  144. mov %esi,%edx
  145. .byte 0x0f,0xa7,0xc0 # xstore
  146. ret
  147. .size padlock_xstore,.-padlock_xstore
  148. .globl padlock_sha1_oneshot
  149. .type padlock_sha1_oneshot,\@function,3
  150. .align 16
  151. padlock_sha1_oneshot:
  152. mov %rdx,%rcx
  153. mov %rdi,%rdx # put aside %rdi
  154. movups (%rdi),%xmm0 # copy-in context
  155. sub \$128+8,%rsp
  156. mov 16(%rdi),%eax
  157. movaps %xmm0,(%rsp)
  158. mov %rsp,%rdi
  159. mov %eax,16(%rsp)
  160. xor %rax,%rax
  161. .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1
  162. movaps (%rsp),%xmm0
  163. mov 16(%rsp),%eax
  164. add \$128+8,%rsp
  165. movups %xmm0,(%rdx) # copy-out context
  166. mov %eax,16(%rdx)
  167. ret
  168. .size padlock_sha1_oneshot,.-padlock_sha1_oneshot
  169. .globl padlock_sha1_blocks
  170. .type padlock_sha1_blocks,\@function,3
  171. .align 16
  172. padlock_sha1_blocks:
  173. mov %rdx,%rcx
  174. mov %rdi,%rdx # put aside %rdi
  175. movups (%rdi),%xmm0 # copy-in context
  176. sub \$128+8,%rsp
  177. mov 16(%rdi),%eax
  178. movaps %xmm0,(%rsp)
  179. mov %rsp,%rdi
  180. mov %eax,16(%rsp)
  181. mov \$-1,%rax
  182. .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1
  183. movaps (%rsp),%xmm0
  184. mov 16(%rsp),%eax
  185. add \$128+8,%rsp
  186. movups %xmm0,(%rdx) # copy-out context
  187. mov %eax,16(%rdx)
  188. ret
  189. .size padlock_sha1_blocks,.-padlock_sha1_blocks
  190. .globl padlock_sha256_oneshot
  191. .type padlock_sha256_oneshot,\@function,3
  192. .align 16
  193. padlock_sha256_oneshot:
  194. mov %rdx,%rcx
  195. mov %rdi,%rdx # put aside %rdi
  196. movups (%rdi),%xmm0 # copy-in context
  197. sub \$128+8,%rsp
  198. movups 16(%rdi),%xmm1
  199. movaps %xmm0,(%rsp)
  200. mov %rsp,%rdi
  201. movaps %xmm1,16(%rsp)
  202. xor %rax,%rax
  203. .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256
  204. movaps (%rsp),%xmm0
  205. movaps 16(%rsp),%xmm1
  206. add \$128+8,%rsp
  207. movups %xmm0,(%rdx) # copy-out context
  208. movups %xmm1,16(%rdx)
  209. ret
  210. .size padlock_sha256_oneshot,.-padlock_sha256_oneshot
  211. .globl padlock_sha256_blocks
  212. .type padlock_sha256_blocks,\@function,3
  213. .align 16
  214. padlock_sha256_blocks:
  215. mov %rdx,%rcx
  216. mov %rdi,%rdx # put aside %rdi
  217. movups (%rdi),%xmm0 # copy-in context
  218. sub \$128+8,%rsp
  219. movups 16(%rdi),%xmm1
  220. movaps %xmm0,(%rsp)
  221. mov %rsp,%rdi
  222. movaps %xmm1,16(%rsp)
  223. mov \$-1,%rax
  224. .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256
  225. movaps (%rsp),%xmm0
  226. movaps 16(%rsp),%xmm1
  227. add \$128+8,%rsp
  228. movups %xmm0,(%rdx) # copy-out context
  229. movups %xmm1,16(%rdx)
  230. ret
  231. .size padlock_sha256_blocks,.-padlock_sha256_blocks
  232. .globl padlock_sha512_blocks
  233. .type padlock_sha512_blocks,\@function,3
  234. .align 16
  235. padlock_sha512_blocks:
  236. mov %rdx,%rcx
  237. mov %rdi,%rdx # put aside %rdi
  238. movups (%rdi),%xmm0 # copy-in context
  239. sub \$128+8,%rsp
  240. movups 16(%rdi),%xmm1
  241. movups 32(%rdi),%xmm2
  242. movups 48(%rdi),%xmm3
  243. movaps %xmm0,(%rsp)
  244. mov %rsp,%rdi
  245. movaps %xmm1,16(%rsp)
  246. movaps %xmm2,32(%rsp)
  247. movaps %xmm3,48(%rsp)
  248. .byte 0xf3,0x0f,0xa6,0xe0 # rep xha512
  249. movaps (%rsp),%xmm0
  250. movaps 16(%rsp),%xmm1
  251. movaps 32(%rsp),%xmm2
  252. movaps 48(%rsp),%xmm3
  253. add \$128+8,%rsp
  254. movups %xmm0,(%rdx) # copy-out context
  255. movups %xmm1,16(%rdx)
  256. movups %xmm2,32(%rdx)
  257. movups %xmm3,48(%rdx)
  258. ret
  259. .size padlock_sha512_blocks,.-padlock_sha512_blocks
  260. ___
  261. sub generate_mode {
  262. my ($mode,$opcode) = @_;
  263. # int padlock_$mode_encrypt(void *out, const void *inp,
  264. # struct padlock_cipher_data *ctx, size_t len);
  265. $code.=<<___;
  266. .globl padlock_${mode}_encrypt
  267. .type padlock_${mode}_encrypt,\@function,4
  268. .align 16
  269. padlock_${mode}_encrypt:
  270. push %rbp
  271. push %rbx
  272. xor %eax,%eax
  273. test \$15,$ctx
  274. jnz .L${mode}_abort
  275. test \$15,$len
  276. jnz .L${mode}_abort
  277. lea .Lpadlock_saved_context(%rip),%rax
  278. pushf
  279. cld
  280. call _padlock_verify_ctx
  281. lea 16($ctx),$ctx # control word
  282. xor %eax,%eax
  283. xor %ebx,%ebx
  284. testl \$`1<<5`,($ctx) # align bit in control word
  285. jnz .L${mode}_aligned
  286. test \$0x0f,$out
  287. setz %al # !out_misaligned
  288. test \$0x0f,$inp
  289. setz %bl # !inp_misaligned
  290. test %ebx,%eax
  291. jnz .L${mode}_aligned
  292. neg %rax
  293. mov \$$PADLOCK_CHUNK,$chunk
  294. not %rax # out_misaligned?-1:0
  295. lea (%rsp),%rbp
  296. cmp $chunk,$len
  297. cmovc $len,$chunk # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len
  298. and $chunk,%rax # out_misaligned?chunk:0
  299. mov $len,$chunk
  300. neg %rax
  301. and \$$PADLOCK_CHUNK-1,$chunk # chunk%=PADLOCK_CHUNK
  302. lea (%rax,%rbp),%rsp
  303. mov \$$PADLOCK_CHUNK,%rax
  304. cmovz %rax,$chunk # chunk=chunk?:PADLOCK_CHUNK
  305. ___
  306. $code.=<<___ if ($mode eq "ctr32");
  307. .L${mode}_reenter:
  308. mov -4($ctx),%eax # pull 32-bit counter
  309. bswap %eax
  310. neg %eax
  311. and \$`$PADLOCK_CHUNK/16-1`,%eax
  312. mov \$$PADLOCK_CHUNK,$chunk
  313. shl \$4,%eax
  314. cmovz $chunk,%rax
  315. cmp %rax,$len
  316. cmova %rax,$chunk # don't let counter cross PADLOCK_CHUNK
  317. cmovbe $len,$chunk
  318. ___
  319. $code.=<<___ if ($PADLOCK_PREFETCH{$mode});
  320. cmp $chunk,$len
  321. ja .L${mode}_loop
  322. mov $inp,%rax # check if prefetch crosses page
  323. cmp %rsp,%rbp
  324. cmove $out,%rax
  325. add $len,%rax
  326. neg %rax
  327. and \$0xfff,%rax # distance to page boundary
  328. cmp \$$PADLOCK_PREFETCH{$mode},%rax
  329. mov \$-$PADLOCK_PREFETCH{$mode},%rax
  330. cmovae $chunk,%rax # mask=distance<prefetch?-prefetch:-1
  331. and %rax,$chunk
  332. jz .L${mode}_unaligned_tail
  333. ___
  334. $code.=<<___;
  335. jmp .L${mode}_loop
  336. .align 16
  337. .L${mode}_loop:
  338. cmp $len,$chunk # ctr32 artefact
  339. cmova $len,$chunk # ctr32 artefact
  340. mov $out,%r8 # save parameters
  341. mov $inp,%r9
  342. mov $len,%r10
  343. mov $chunk,$len
  344. mov $chunk,%r11
  345. test \$0x0f,$out # out_misaligned
  346. cmovnz %rsp,$out
  347. test \$0x0f,$inp # inp_misaligned
  348. jz .L${mode}_inp_aligned
  349. shr \$3,$len
  350. .byte 0xf3,0x48,0xa5 # rep movsq
  351. sub $chunk,$out
  352. mov $chunk,$len
  353. mov $out,$inp
  354. .L${mode}_inp_aligned:
  355. lea -16($ctx),%rax # ivp
  356. lea 16($ctx),%rbx # key
  357. shr \$4,$len
  358. .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
  359. ___
  360. $code.=<<___ if ($mode !~ /ecb|ctr/);
  361. movdqa (%rax),%xmm0
  362. movdqa %xmm0,-16($ctx) # copy [or refresh] iv
  363. ___
  364. $code.=<<___ if ($mode eq "ctr32");
  365. mov -4($ctx),%eax # pull 32-bit counter
  366. test \$0xffff0000,%eax
  367. jnz .L${mode}_no_carry
  368. bswap %eax
  369. add \$0x10000,%eax
  370. bswap %eax
  371. mov %eax,-4($ctx)
  372. .L${mode}_no_carry:
  373. ___
  374. $code.=<<___;
  375. mov %r8,$out # restore parameters
  376. mov %r11,$chunk
  377. test \$0x0f,$out
  378. jz .L${mode}_out_aligned
  379. mov $chunk,$len
  380. lea (%rsp),$inp
  381. shr \$3,$len
  382. .byte 0xf3,0x48,0xa5 # rep movsq
  383. sub $chunk,$out
  384. .L${mode}_out_aligned:
  385. mov %r9,$inp
  386. mov %r10,$len
  387. add $chunk,$out
  388. add $chunk,$inp
  389. sub $chunk,$len
  390. mov \$$PADLOCK_CHUNK,$chunk
  391. ___
  392. if (!$PADLOCK_PREFETCH{$mode}) {
  393. $code.=<<___;
  394. jnz .L${mode}_loop
  395. ___
  396. } else {
  397. $code.=<<___;
  398. jz .L${mode}_break
  399. cmp $chunk,$len
  400. jae .L${mode}_loop
  401. ___
  402. $code.=<<___ if ($mode eq "ctr32");
  403. mov $len,$chunk
  404. mov $inp,%rax # check if prefetch crosses page
  405. cmp %rsp,%rbp
  406. cmove $out,%rax
  407. add $len,%rax
  408. neg %rax
  409. and \$0xfff,%rax # distance to page boundary
  410. cmp \$$PADLOCK_PREFETCH{$mode},%rax
  411. mov \$-$PADLOCK_PREFETCH{$mode},%rax
  412. cmovae $chunk,%rax
  413. and %rax,$chunk
  414. jnz .L${mode}_loop
  415. ___
  416. $code.=<<___;
  417. .L${mode}_unaligned_tail:
  418. xor %eax,%eax
  419. cmp %rsp,%rbp
  420. cmove $len,%rax
  421. mov $out,%r8 # save parameters
  422. mov $len,$chunk
  423. sub %rax,%rsp # alloca
  424. shr \$3,$len
  425. lea (%rsp),$out
  426. .byte 0xf3,0x48,0xa5 # rep movsq
  427. mov %rsp,$inp
  428. mov %r8, $out # restore parameters
  429. mov $chunk,$len
  430. jmp .L${mode}_loop
  431. .align 16
  432. .L${mode}_break:
  433. ___
  434. }
  435. $code.=<<___;
  436. cmp %rbp,%rsp
  437. je .L${mode}_done
  438. pxor %xmm0,%xmm0
  439. lea (%rsp),%rax
  440. .L${mode}_bzero:
  441. movaps %xmm0,(%rax)
  442. lea 16(%rax),%rax
  443. cmp %rax,%rbp
  444. ja .L${mode}_bzero
  445. .L${mode}_done:
  446. lea (%rbp),%rsp
  447. jmp .L${mode}_exit
  448. .align 16
  449. .L${mode}_aligned:
  450. ___
  451. $code.=<<___ if ($mode eq "ctr32");
  452. mov -4($ctx),%eax # pull 32-bit counter
  453. bswap %eax
  454. neg %eax
  455. and \$0xffff,%eax
  456. mov \$`16*0x10000`,$chunk
  457. shl \$4,%eax
  458. cmovz $chunk,%rax
  459. cmp %rax,$len
  460. cmova %rax,$chunk # don't let counter cross 2^16
  461. cmovbe $len,$chunk
  462. jbe .L${mode}_aligned_skip
  463. .L${mode}_aligned_loop:
  464. mov $len,%r10 # save parameters
  465. mov $chunk,$len
  466. mov $chunk,%r11
  467. lea -16($ctx),%rax # ivp
  468. lea 16($ctx),%rbx # key
  469. shr \$4,$len # len/=AES_BLOCK_SIZE
  470. .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
  471. mov -4($ctx),%eax # pull 32-bit counter
  472. bswap %eax
  473. add \$0x10000,%eax
  474. bswap %eax
  475. mov %eax,-4($ctx)
  476. mov %r10,$len # restore parameters
  477. sub %r11,$len
  478. mov \$`16*0x10000`,$chunk
  479. jz .L${mode}_exit
  480. cmp $chunk,$len
  481. jae .L${mode}_aligned_loop
  482. .L${mode}_aligned_skip:
  483. ___
  484. $code.=<<___ if ($PADLOCK_PREFETCH{$mode});
  485. lea ($inp,$len),%rbp
  486. neg %rbp
  487. and \$0xfff,%rbp # distance to page boundary
  488. xor %eax,%eax
  489. cmp \$$PADLOCK_PREFETCH{$mode},%rbp
  490. mov \$$PADLOCK_PREFETCH{$mode}-1,%rbp
  491. cmovae %rax,%rbp
  492. and $len,%rbp # remainder
  493. sub %rbp,$len
  494. jz .L${mode}_aligned_tail
  495. ___
  496. $code.=<<___;
  497. lea -16($ctx),%rax # ivp
  498. lea 16($ctx),%rbx # key
  499. shr \$4,$len # len/=AES_BLOCK_SIZE
  500. .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
  501. ___
  502. $code.=<<___ if ($mode !~ /ecb|ctr/);
  503. movdqa (%rax),%xmm0
  504. movdqa %xmm0,-16($ctx) # copy [or refresh] iv
  505. ___
  506. $code.=<<___ if ($PADLOCK_PREFETCH{$mode});
  507. test %rbp,%rbp # check remainder
  508. jz .L${mode}_exit
  509. .L${mode}_aligned_tail:
  510. mov $out,%r8
  511. mov %rbp,$chunk
  512. mov %rbp,$len
  513. lea (%rsp),%rbp
  514. sub $len,%rsp
  515. shr \$3,$len
  516. lea (%rsp),$out
  517. .byte 0xf3,0x48,0xa5 # rep movsq
  518. lea (%r8),$out
  519. lea (%rsp),$inp
  520. mov $chunk,$len
  521. jmp .L${mode}_loop
  522. ___
  523. $code.=<<___;
  524. .L${mode}_exit:
  525. mov \$1,%eax
  526. lea 8(%rsp),%rsp
  527. .L${mode}_abort:
  528. pop %rbx
  529. pop %rbp
  530. ret
  531. .size padlock_${mode}_encrypt,.-padlock_${mode}_encrypt
  532. ___
  533. }
  534. &generate_mode("ecb",0xc8);
  535. &generate_mode("cbc",0xd0);
  536. &generate_mode("cfb",0xe0);
  537. &generate_mode("ofb",0xe8);
  538. &generate_mode("ctr32",0xd8); # all 64-bit CPUs have working CTR...
  539. $code.=<<___;
  540. .asciz "VIA Padlock x86_64 module, CRYPTOGAMS by <appro\@openssl.org>"
  541. .align 16
  542. .data
  543. .align 8
  544. .Lpadlock_saved_context:
  545. .quad 0
  546. ___
  547. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  548. print $code;
  549. close STDOUT;