sha1-x86_64.pl 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # sha1_block procedure for x86_64.
  11. #
  12. # It was brought to my attention that on EM64T compiler-generated code
  13. # was far behind 32-bit assembler implementation. This is unlike on
  14. # Opteron where compiler-generated code was only 15% behind 32-bit
  15. # assembler, which originally made it hard to motivate the effort.
  16. # There was suggestion to mechanically translate 32-bit code, but I
  17. # dismissed it, reasoning that x86_64 offers enough register bank
  18. # capacity to fully utilize SHA-1 parallelism. Therefore this fresh
  19. # implementation:-) However! While 64-bit code does performs better
  20. # on Opteron, I failed to beat 32-bit assembler on EM64T core. Well,
  21. # x86_64 does offer larger *addressable* bank, but out-of-order core
  22. # reaches for even more registers through dynamic aliasing, and EM64T
  23. # core must have managed to run-time optimize even 32-bit code just as
  24. # good as 64-bit one. Performance improvement is summarized in the
  25. # following table:
  26. #
  27. # gcc 3.4 32-bit asm cycles/byte
  28. # Opteron +45% +20% 6.8
  29. # Xeon P4 +65% +0% 9.9
  30. # Core2 +60% +10% 7.0
  31. $output=shift;
  32. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  33. ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
  34. ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
  35. die "can't locate x86_64-xlate.pl";
  36. open STDOUT,"| $^X $xlate $output";
  37. $ctx="%rdi"; # 1st arg
  38. $inp="%rsi"; # 2nd arg
  39. $num="%rdx"; # 3rd arg
  40. # reassign arguments in order to produce more compact code
  41. $ctx="%r8";
  42. $inp="%r9";
  43. $num="%r10";
  44. $xi="%eax";
  45. $t0="%ebx";
  46. $t1="%ecx";
  47. $A="%edx";
  48. $B="%esi";
  49. $C="%edi";
  50. $D="%ebp";
  51. $E="%r11d";
  52. $T="%r12d";
  53. @V=($A,$B,$C,$D,$E,$T);
  54. sub PROLOGUE {
  55. my $func=shift;
  56. $code.=<<___;
  57. .globl $func
  58. .type $func,\@function,3
  59. .align 16
  60. $func:
  61. push %rbx
  62. push %rbp
  63. push %r12
  64. mov %rsp,%rax
  65. mov %rdi,$ctx # reassigned argument
  66. sub \$`8+16*4`,%rsp
  67. mov %rsi,$inp # reassigned argument
  68. and \$-64,%rsp
  69. mov %rdx,$num # reassigned argument
  70. mov %rax,`16*4`(%rsp)
  71. mov 0($ctx),$A
  72. mov 4($ctx),$B
  73. mov 8($ctx),$C
  74. mov 12($ctx),$D
  75. mov 16($ctx),$E
  76. ___
  77. }
  78. sub EPILOGUE {
  79. my $func=shift;
  80. $code.=<<___;
  81. mov `16*4`(%rsp),%rsp
  82. pop %r12
  83. pop %rbp
  84. pop %rbx
  85. ret
  86. .size $func,.-$func
  87. ___
  88. }
  89. sub BODY_00_19 {
  90. my ($i,$a,$b,$c,$d,$e,$f,$host)=@_;
  91. my $j=$i+1;
  92. $code.=<<___ if ($i==0);
  93. mov `4*$i`($inp),$xi
  94. `"bswap $xi" if(!defined($host))`
  95. mov $xi,`4*$i`(%rsp)
  96. ___
  97. $code.=<<___ if ($i<15);
  98. lea 0x5a827999($xi,$e),$f
  99. mov $c,$t0
  100. mov `4*$j`($inp),$xi
  101. mov $a,$e
  102. xor $d,$t0
  103. `"bswap $xi" if(!defined($host))`
  104. rol \$5,$e
  105. and $b,$t0
  106. mov $xi,`4*$j`(%rsp)
  107. add $e,$f
  108. xor $d,$t0
  109. rol \$30,$b
  110. add $t0,$f
  111. ___
  112. $code.=<<___ if ($i>=15);
  113. lea 0x5a827999($xi,$e),$f
  114. mov `4*($j%16)`(%rsp),$xi
  115. mov $c,$t0
  116. mov $a,$e
  117. xor `4*(($j+2)%16)`(%rsp),$xi
  118. xor $d,$t0
  119. rol \$5,$e
  120. xor `4*(($j+8)%16)`(%rsp),$xi
  121. and $b,$t0
  122. add $e,$f
  123. xor `4*(($j+13)%16)`(%rsp),$xi
  124. xor $d,$t0
  125. rol \$30,$b
  126. add $t0,$f
  127. rol \$1,$xi
  128. mov $xi,`4*($j%16)`(%rsp)
  129. ___
  130. }
  131. sub BODY_20_39 {
  132. my ($i,$a,$b,$c,$d,$e,$f)=@_;
  133. my $j=$i+1;
  134. my $K=($i<40)?0x6ed9eba1:0xca62c1d6;
  135. $code.=<<___ if ($i<79);
  136. lea $K($xi,$e),$f
  137. mov `4*($j%16)`(%rsp),$xi
  138. mov $c,$t0
  139. mov $a,$e
  140. xor `4*(($j+2)%16)`(%rsp),$xi
  141. xor $b,$t0
  142. rol \$5,$e
  143. xor `4*(($j+8)%16)`(%rsp),$xi
  144. xor $d,$t0
  145. add $e,$f
  146. xor `4*(($j+13)%16)`(%rsp),$xi
  147. rol \$30,$b
  148. add $t0,$f
  149. rol \$1,$xi
  150. ___
  151. $code.=<<___ if ($i<76);
  152. mov $xi,`4*($j%16)`(%rsp)
  153. ___
  154. $code.=<<___ if ($i==79);
  155. lea $K($xi,$e),$f
  156. mov $c,$t0
  157. mov $a,$e
  158. xor $b,$t0
  159. rol \$5,$e
  160. xor $d,$t0
  161. add $e,$f
  162. rol \$30,$b
  163. add $t0,$f
  164. ___
  165. }
  166. sub BODY_40_59 {
  167. my ($i,$a,$b,$c,$d,$e,$f)=@_;
  168. my $j=$i+1;
  169. $code.=<<___;
  170. lea 0x8f1bbcdc($xi,$e),$f
  171. mov `4*($j%16)`(%rsp),$xi
  172. mov $b,$t0
  173. mov $b,$t1
  174. xor `4*(($j+2)%16)`(%rsp),$xi
  175. mov $a,$e
  176. and $c,$t0
  177. xor `4*(($j+8)%16)`(%rsp),$xi
  178. or $c,$t1
  179. rol \$5,$e
  180. xor `4*(($j+13)%16)`(%rsp),$xi
  181. and $d,$t1
  182. add $e,$f
  183. rol \$1,$xi
  184. or $t1,$t0
  185. rol \$30,$b
  186. mov $xi,`4*($j%16)`(%rsp)
  187. add $t0,$f
  188. ___
  189. }
  190. $code=".text\n";
  191. &PROLOGUE("sha1_block_data_order");
  192. $code.=".align 4\n.Lloop:\n";
  193. for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
  194. for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
  195. for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
  196. for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
  197. $code.=<<___;
  198. add 0($ctx),$E
  199. add 4($ctx),$T
  200. add 8($ctx),$A
  201. add 12($ctx),$B
  202. add 16($ctx),$C
  203. mov $E,0($ctx)
  204. mov $T,4($ctx)
  205. mov $A,8($ctx)
  206. mov $B,12($ctx)
  207. mov $C,16($ctx)
  208. xchg $E,$A # mov $E,$A
  209. xchg $T,$B # mov $T,$B
  210. xchg $E,$C # mov $A,$C
  211. xchg $T,$D # mov $B,$D
  212. # mov $C,$E
  213. lea `16*4`($inp),$inp
  214. sub \$1,$num
  215. jnz .Lloop
  216. ___
  217. &EPILOGUE("sha1_block_data_order");
  218. $code.=<<___;
  219. .asciz "SHA1 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
  220. ___
  221. ####################################################################
  222. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  223. print $code;
  224. close STDOUT;