2
0

e_padlock-x86_64.pl 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. #! /usr/bin/env perl
  2. # Copyright 2011-2018 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. # ====================================================================
  9. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  10. # project. The module is, however, dual licensed under OpenSSL and
  11. # CRYPTOGAMS licenses depending on where you obtain it. For further
  12. # details see http://www.openssl.org/~appro/cryptogams/.
  13. # ====================================================================
  14. # September 2011
  15. #
  16. # Assembler helpers for Padlock engine. See even e_padlock-x86.pl for
  17. # details.
  18. # $output is the last argument if it looks like a file (it has an extension)
  19. # $flavour is the first argument if it doesn't look like a file
  20. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  21. $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  22. $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
  23. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  24. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  25. ( $xlate="${dir}../../crypto/perlasm/x86_64-xlate.pl" and -f $xlate) or
  26. die "can't locate x86_64-xlate.pl";
  27. open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
  28. or die "can't call $xlate: $!";
  29. *STDOUT=*OUT;
  30. $code=".text\n";
  31. %PADLOCK_PREFETCH=(ecb=>128, cbc=>64, ctr32=>32); # prefetch errata
  32. $PADLOCK_CHUNK=512; # Must be a power of 2 between 32 and 2^20
  33. $ctx="%rdx";
  34. $out="%rdi";
  35. $inp="%rsi";
  36. $len="%rcx";
  37. $chunk="%rbx";
  38. ($arg1,$arg2,$arg3,$arg4)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
  39. ("%rdi","%rsi","%rdx","%rcx"); # Unix order
  40. $code.=<<___;
  41. .globl padlock_capability
  42. .type padlock_capability,\@abi-omnipotent
  43. .align 16
  44. padlock_capability:
  45. mov %rbx,%r8
  46. xor %eax,%eax
  47. cpuid
  48. xor %eax,%eax
  49. cmp \$`"0x".unpack("H*",'tneC')`,%ebx
  50. jne .Lzhaoxin
  51. cmp \$`"0x".unpack("H*",'Hrua')`,%edx
  52. jne .Lnoluck
  53. cmp \$`"0x".unpack("H*",'slua')`,%ecx
  54. jne .Lnoluck
  55. jmp .LzhaoxinEnd
  56. .Lzhaoxin:
  57. cmp \$`"0x".unpack("H*",'hS ')`,%ebx
  58. jne .Lnoluck
  59. cmp \$`"0x".unpack("H*",'hgna')`,%edx
  60. jne .Lnoluck
  61. cmp \$`"0x".unpack("H*",' ia')`,%ecx
  62. jne .Lnoluck
  63. .LzhaoxinEnd:
  64. mov \$0xC0000000,%eax
  65. cpuid
  66. mov %eax,%edx
  67. xor %eax,%eax
  68. cmp \$0xC0000001,%edx
  69. jb .Lnoluck
  70. mov \$0xC0000001,%eax
  71. cpuid
  72. mov %edx,%eax
  73. and \$0xffffffef,%eax
  74. or \$0x10,%eax # set Nano bit#4
  75. .Lnoluck:
  76. mov %r8,%rbx
  77. ret
  78. .size padlock_capability,.-padlock_capability
  79. .globl padlock_key_bswap
  80. .type padlock_key_bswap,\@abi-omnipotent,0
  81. .align 16
  82. padlock_key_bswap:
  83. mov 240($arg1),%edx
  84. .Lbswap_loop:
  85. mov ($arg1),%eax
  86. bswap %eax
  87. mov %eax,($arg1)
  88. lea 4($arg1),$arg1
  89. sub \$1,%edx
  90. jnz .Lbswap_loop
  91. ret
  92. .size padlock_key_bswap,.-padlock_key_bswap
  93. .globl padlock_verify_context
  94. .type padlock_verify_context,\@abi-omnipotent
  95. .align 16
  96. padlock_verify_context:
  97. mov $arg1,$ctx
  98. pushf
  99. lea .Lpadlock_saved_context(%rip),%rax
  100. call _padlock_verify_ctx
  101. lea 8(%rsp),%rsp
  102. ret
  103. .size padlock_verify_context,.-padlock_verify_context
  104. .type _padlock_verify_ctx,\@abi-omnipotent
  105. .align 16
  106. _padlock_verify_ctx:
  107. mov 8(%rsp),%r8
  108. bt \$30,%r8
  109. jnc .Lverified
  110. cmp (%rax),$ctx
  111. je .Lverified
  112. pushf
  113. popf
  114. .Lverified:
  115. mov $ctx,(%rax)
  116. ret
  117. .size _padlock_verify_ctx,.-_padlock_verify_ctx
  118. .globl padlock_reload_key
  119. .type padlock_reload_key,\@abi-omnipotent
  120. .align 16
  121. padlock_reload_key:
  122. pushf
  123. popf
  124. ret
  125. .size padlock_reload_key,.-padlock_reload_key
  126. .globl padlock_aes_block
  127. .type padlock_aes_block,\@function,3
  128. .align 16
  129. padlock_aes_block:
  130. mov %rbx,%r8
  131. mov \$1,$len
  132. lea 32($ctx),%rbx # key
  133. lea 16($ctx),$ctx # control word
  134. .byte 0xf3,0x0f,0xa7,0xc8 # rep xcryptecb
  135. mov %r8,%rbx
  136. ret
  137. .size padlock_aes_block,.-padlock_aes_block
  138. .globl padlock_xstore
  139. .type padlock_xstore,\@function,2
  140. .align 16
  141. padlock_xstore:
  142. mov %esi,%edx
  143. .byte 0x0f,0xa7,0xc0 # xstore
  144. ret
  145. .size padlock_xstore,.-padlock_xstore
  146. .globl padlock_sha1_oneshot
  147. .type padlock_sha1_oneshot,\@function,3
  148. .align 16
  149. padlock_sha1_oneshot:
  150. mov %rdx,%rcx
  151. mov %rdi,%rdx # put aside %rdi
  152. movups (%rdi),%xmm0 # copy-in context
  153. sub \$128+8,%rsp
  154. mov 16(%rdi),%eax
  155. movaps %xmm0,(%rsp)
  156. mov %rsp,%rdi
  157. mov %eax,16(%rsp)
  158. xor %rax,%rax
  159. .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1
  160. movaps (%rsp),%xmm0
  161. mov 16(%rsp),%eax
  162. add \$128+8,%rsp
  163. movups %xmm0,(%rdx) # copy-out context
  164. mov %eax,16(%rdx)
  165. ret
  166. .size padlock_sha1_oneshot,.-padlock_sha1_oneshot
  167. .globl padlock_sha1_blocks
  168. .type padlock_sha1_blocks,\@function,3
  169. .align 16
  170. padlock_sha1_blocks:
  171. mov %rdx,%rcx
  172. mov %rdi,%rdx # put aside %rdi
  173. movups (%rdi),%xmm0 # copy-in context
  174. sub \$128+8,%rsp
  175. mov 16(%rdi),%eax
  176. movaps %xmm0,(%rsp)
  177. mov %rsp,%rdi
  178. mov %eax,16(%rsp)
  179. mov \$-1,%rax
  180. .byte 0xf3,0x0f,0xa6,0xc8 # rep xsha1
  181. movaps (%rsp),%xmm0
  182. mov 16(%rsp),%eax
  183. add \$128+8,%rsp
  184. movups %xmm0,(%rdx) # copy-out context
  185. mov %eax,16(%rdx)
  186. ret
  187. .size padlock_sha1_blocks,.-padlock_sha1_blocks
  188. .globl padlock_sha256_oneshot
  189. .type padlock_sha256_oneshot,\@function,3
  190. .align 16
  191. padlock_sha256_oneshot:
  192. mov %rdx,%rcx
  193. mov %rdi,%rdx # put aside %rdi
  194. movups (%rdi),%xmm0 # copy-in context
  195. sub \$128+8,%rsp
  196. movups 16(%rdi),%xmm1
  197. movaps %xmm0,(%rsp)
  198. mov %rsp,%rdi
  199. movaps %xmm1,16(%rsp)
  200. xor %rax,%rax
  201. .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256
  202. movaps (%rsp),%xmm0
  203. movaps 16(%rsp),%xmm1
  204. add \$128+8,%rsp
  205. movups %xmm0,(%rdx) # copy-out context
  206. movups %xmm1,16(%rdx)
  207. ret
  208. .size padlock_sha256_oneshot,.-padlock_sha256_oneshot
  209. .globl padlock_sha256_blocks
  210. .type padlock_sha256_blocks,\@function,3
  211. .align 16
  212. padlock_sha256_blocks:
  213. mov %rdx,%rcx
  214. mov %rdi,%rdx # put aside %rdi
  215. movups (%rdi),%xmm0 # copy-in context
  216. sub \$128+8,%rsp
  217. movups 16(%rdi),%xmm1
  218. movaps %xmm0,(%rsp)
  219. mov %rsp,%rdi
  220. movaps %xmm1,16(%rsp)
  221. mov \$-1,%rax
  222. .byte 0xf3,0x0f,0xa6,0xd0 # rep xsha256
  223. movaps (%rsp),%xmm0
  224. movaps 16(%rsp),%xmm1
  225. add \$128+8,%rsp
  226. movups %xmm0,(%rdx) # copy-out context
  227. movups %xmm1,16(%rdx)
  228. ret
  229. .size padlock_sha256_blocks,.-padlock_sha256_blocks
  230. .globl padlock_sha512_blocks
  231. .type padlock_sha512_blocks,\@function,3
  232. .align 16
  233. padlock_sha512_blocks:
  234. mov %rdx,%rcx
  235. mov %rdi,%rdx # put aside %rdi
  236. movups (%rdi),%xmm0 # copy-in context
  237. sub \$128+8,%rsp
  238. movups 16(%rdi),%xmm1
  239. movups 32(%rdi),%xmm2
  240. movups 48(%rdi),%xmm3
  241. movaps %xmm0,(%rsp)
  242. mov %rsp,%rdi
  243. movaps %xmm1,16(%rsp)
  244. movaps %xmm2,32(%rsp)
  245. movaps %xmm3,48(%rsp)
  246. .byte 0xf3,0x0f,0xa6,0xe0 # rep xha512
  247. movaps (%rsp),%xmm0
  248. movaps 16(%rsp),%xmm1
  249. movaps 32(%rsp),%xmm2
  250. movaps 48(%rsp),%xmm3
  251. add \$128+8,%rsp
  252. movups %xmm0,(%rdx) # copy-out context
  253. movups %xmm1,16(%rdx)
  254. movups %xmm2,32(%rdx)
  255. movups %xmm3,48(%rdx)
  256. ret
  257. .size padlock_sha512_blocks,.-padlock_sha512_blocks
  258. ___
  259. sub generate_mode {
  260. my ($mode,$opcode) = @_;
  261. # int padlock_$mode_encrypt(void *out, const void *inp,
  262. # struct padlock_cipher_data *ctx, size_t len);
  263. $code.=<<___;
  264. .globl padlock_${mode}_encrypt
  265. .type padlock_${mode}_encrypt,\@function,4
  266. .align 16
  267. padlock_${mode}_encrypt:
  268. push %rbp
  269. push %rbx
  270. xor %eax,%eax
  271. test \$15,$ctx
  272. jnz .L${mode}_abort
  273. test \$15,$len
  274. jnz .L${mode}_abort
  275. lea .Lpadlock_saved_context(%rip),%rax
  276. pushf
  277. cld
  278. call _padlock_verify_ctx
  279. lea 16($ctx),$ctx # control word
  280. xor %eax,%eax
  281. xor %ebx,%ebx
  282. testl \$`1<<5`,($ctx) # align bit in control word
  283. jnz .L${mode}_aligned
  284. test \$0x0f,$out
  285. setz %al # !out_misaligned
  286. test \$0x0f,$inp
  287. setz %bl # !inp_misaligned
  288. test %ebx,%eax
  289. jnz .L${mode}_aligned
  290. neg %rax
  291. mov \$$PADLOCK_CHUNK,$chunk
  292. not %rax # out_misaligned?-1:0
  293. lea (%rsp),%rbp
  294. cmp $chunk,$len
  295. cmovc $len,$chunk # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len
  296. and $chunk,%rax # out_misaligned?chunk:0
  297. mov $len,$chunk
  298. neg %rax
  299. and \$$PADLOCK_CHUNK-1,$chunk # chunk%=PADLOCK_CHUNK
  300. lea (%rax,%rbp),%rsp
  301. mov \$$PADLOCK_CHUNK,%rax
  302. cmovz %rax,$chunk # chunk=chunk?:PADLOCK_CHUNK
  303. ___
  304. $code.=<<___ if ($mode eq "ctr32");
  305. .L${mode}_reenter:
  306. mov -4($ctx),%eax # pull 32-bit counter
  307. bswap %eax
  308. neg %eax
  309. and \$`$PADLOCK_CHUNK/16-1`,%eax
  310. mov \$$PADLOCK_CHUNK,$chunk
  311. shl \$4,%eax
  312. cmovz $chunk,%rax
  313. cmp %rax,$len
  314. cmova %rax,$chunk # don't let counter cross PADLOCK_CHUNK
  315. cmovbe $len,$chunk
  316. ___
  317. $code.=<<___ if ($PADLOCK_PREFETCH{$mode});
  318. cmp $chunk,$len
  319. ja .L${mode}_loop
  320. mov $inp,%rax # check if prefetch crosses page
  321. cmp %rsp,%rbp
  322. cmove $out,%rax
  323. add $len,%rax
  324. neg %rax
  325. and \$0xfff,%rax # distance to page boundary
  326. cmp \$$PADLOCK_PREFETCH{$mode},%rax
  327. mov \$-$PADLOCK_PREFETCH{$mode},%rax
  328. cmovae $chunk,%rax # mask=distance<prefetch?-prefetch:-1
  329. and %rax,$chunk
  330. jz .L${mode}_unaligned_tail
  331. ___
  332. $code.=<<___;
  333. jmp .L${mode}_loop
  334. .align 16
  335. .L${mode}_loop:
  336. cmp $len,$chunk # ctr32 artefact
  337. cmova $len,$chunk # ctr32 artefact
  338. mov $out,%r8 # save parameters
  339. mov $inp,%r9
  340. mov $len,%r10
  341. mov $chunk,$len
  342. mov $chunk,%r11
  343. test \$0x0f,$out # out_misaligned
  344. cmovnz %rsp,$out
  345. test \$0x0f,$inp # inp_misaligned
  346. jz .L${mode}_inp_aligned
  347. shr \$3,$len
  348. .byte 0xf3,0x48,0xa5 # rep movsq
  349. sub $chunk,$out
  350. mov $chunk,$len
  351. mov $out,$inp
  352. .L${mode}_inp_aligned:
  353. lea -16($ctx),%rax # ivp
  354. lea 16($ctx),%rbx # key
  355. shr \$4,$len
  356. .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
  357. ___
  358. $code.=<<___ if ($mode !~ /ecb|ctr/);
  359. movdqa (%rax),%xmm0
  360. movdqa %xmm0,-16($ctx) # copy [or refresh] iv
  361. ___
  362. $code.=<<___ if ($mode eq "ctr32");
  363. mov -4($ctx),%eax # pull 32-bit counter
  364. test \$0xffff0000,%eax
  365. jnz .L${mode}_no_carry
  366. bswap %eax
  367. add \$0x10000,%eax
  368. bswap %eax
  369. mov %eax,-4($ctx)
  370. .L${mode}_no_carry:
  371. ___
  372. $code.=<<___;
  373. mov %r8,$out # restore parameters
  374. mov %r11,$chunk
  375. test \$0x0f,$out
  376. jz .L${mode}_out_aligned
  377. mov $chunk,$len
  378. lea (%rsp),$inp
  379. shr \$3,$len
  380. .byte 0xf3,0x48,0xa5 # rep movsq
  381. sub $chunk,$out
  382. .L${mode}_out_aligned:
  383. mov %r9,$inp
  384. mov %r10,$len
  385. add $chunk,$out
  386. add $chunk,$inp
  387. sub $chunk,$len
  388. mov \$$PADLOCK_CHUNK,$chunk
  389. ___
  390. if (!$PADLOCK_PREFETCH{$mode}) {
  391. $code.=<<___;
  392. jnz .L${mode}_loop
  393. ___
  394. } else {
  395. $code.=<<___;
  396. jz .L${mode}_break
  397. cmp $chunk,$len
  398. jae .L${mode}_loop
  399. ___
  400. $code.=<<___ if ($mode eq "ctr32");
  401. mov $len,$chunk
  402. mov $inp,%rax # check if prefetch crosses page
  403. cmp %rsp,%rbp
  404. cmove $out,%rax
  405. add $len,%rax
  406. neg %rax
  407. and \$0xfff,%rax # distance to page boundary
  408. cmp \$$PADLOCK_PREFETCH{$mode},%rax
  409. mov \$-$PADLOCK_PREFETCH{$mode},%rax
  410. cmovae $chunk,%rax
  411. and %rax,$chunk
  412. jnz .L${mode}_loop
  413. ___
  414. $code.=<<___;
  415. .L${mode}_unaligned_tail:
  416. xor %eax,%eax
  417. cmp %rsp,%rbp
  418. cmove $len,%rax
  419. mov $out,%r8 # save parameters
  420. mov $len,$chunk
  421. sub %rax,%rsp # alloca
  422. shr \$3,$len
  423. lea (%rsp),$out
  424. .byte 0xf3,0x48,0xa5 # rep movsq
  425. mov %rsp,$inp
  426. mov %r8, $out # restore parameters
  427. mov $chunk,$len
  428. jmp .L${mode}_loop
  429. .align 16
  430. .L${mode}_break:
  431. ___
  432. }
  433. $code.=<<___;
  434. cmp %rbp,%rsp
  435. je .L${mode}_done
  436. pxor %xmm0,%xmm0
  437. lea (%rsp),%rax
  438. .L${mode}_bzero:
  439. movaps %xmm0,(%rax)
  440. lea 16(%rax),%rax
  441. cmp %rax,%rbp
  442. ja .L${mode}_bzero
  443. .L${mode}_done:
  444. lea (%rbp),%rsp
  445. jmp .L${mode}_exit
  446. .align 16
  447. .L${mode}_aligned:
  448. ___
  449. $code.=<<___ if ($mode eq "ctr32");
  450. mov -4($ctx),%eax # pull 32-bit counter
  451. bswap %eax
  452. neg %eax
  453. and \$0xffff,%eax
  454. mov \$`16*0x10000`,$chunk
  455. shl \$4,%eax
  456. cmovz $chunk,%rax
  457. cmp %rax,$len
  458. cmova %rax,$chunk # don't let counter cross 2^16
  459. cmovbe $len,$chunk
  460. jbe .L${mode}_aligned_skip
  461. .L${mode}_aligned_loop:
  462. mov $len,%r10 # save parameters
  463. mov $chunk,$len
  464. mov $chunk,%r11
  465. lea -16($ctx),%rax # ivp
  466. lea 16($ctx),%rbx # key
  467. shr \$4,$len # len/=AES_BLOCK_SIZE
  468. .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
  469. mov -4($ctx),%eax # pull 32-bit counter
  470. bswap %eax
  471. add \$0x10000,%eax
  472. bswap %eax
  473. mov %eax,-4($ctx)
  474. mov %r10,$len # restore parameters
  475. sub %r11,$len
  476. mov \$`16*0x10000`,$chunk
  477. jz .L${mode}_exit
  478. cmp $chunk,$len
  479. jae .L${mode}_aligned_loop
  480. .L${mode}_aligned_skip:
  481. ___
  482. $code.=<<___ if ($PADLOCK_PREFETCH{$mode});
  483. lea ($inp,$len),%rbp
  484. neg %rbp
  485. and \$0xfff,%rbp # distance to page boundary
  486. xor %eax,%eax
  487. cmp \$$PADLOCK_PREFETCH{$mode},%rbp
  488. mov \$$PADLOCK_PREFETCH{$mode}-1,%rbp
  489. cmovae %rax,%rbp
  490. and $len,%rbp # remainder
  491. sub %rbp,$len
  492. jz .L${mode}_aligned_tail
  493. ___
  494. $code.=<<___;
  495. lea -16($ctx),%rax # ivp
  496. lea 16($ctx),%rbx # key
  497. shr \$4,$len # len/=AES_BLOCK_SIZE
  498. .byte 0xf3,0x0f,0xa7,$opcode # rep xcrypt*
  499. ___
  500. $code.=<<___ if ($mode !~ /ecb|ctr/);
  501. movdqa (%rax),%xmm0
  502. movdqa %xmm0,-16($ctx) # copy [or refresh] iv
  503. ___
  504. $code.=<<___ if ($PADLOCK_PREFETCH{$mode});
  505. test %rbp,%rbp # check remainder
  506. jz .L${mode}_exit
  507. .L${mode}_aligned_tail:
  508. mov $out,%r8
  509. mov %rbp,$chunk
  510. mov %rbp,$len
  511. lea (%rsp),%rbp
  512. sub $len,%rsp
  513. shr \$3,$len
  514. lea (%rsp),$out
  515. .byte 0xf3,0x48,0xa5 # rep movsq
  516. lea (%r8),$out
  517. lea (%rsp),$inp
  518. mov $chunk,$len
  519. jmp .L${mode}_loop
  520. ___
  521. $code.=<<___;
  522. .L${mode}_exit:
  523. mov \$1,%eax
  524. lea 8(%rsp),%rsp
  525. .L${mode}_abort:
  526. pop %rbx
  527. pop %rbp
  528. ret
  529. .size padlock_${mode}_encrypt,.-padlock_${mode}_encrypt
  530. ___
  531. }
  532. &generate_mode("ecb",0xc8);
  533. &generate_mode("cbc",0xd0);
  534. &generate_mode("cfb",0xe0);
  535. &generate_mode("ofb",0xe8);
  536. &generate_mode("ctr32",0xd8); # all 64-bit CPUs have working CTR...
  537. $code.=<<___;
  538. .asciz "VIA Padlock x86_64 module, CRYPTOGAMS by <appro\@openssl.org>"
  539. .align 16
  540. .data
  541. .align 8
  542. .Lpadlock_saved_context:
  543. .quad 0
  544. ___
  545. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  546. print $code;
  547. close STDOUT;