armv4-gf2m.pl 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. #!/usr/bin/env perl
  2. #
  3. # ====================================================================
  4. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  5. # project. The module is, however, dual licensed under OpenSSL and
  6. # CRYPTOGAMS licenses depending on where you obtain it. For further
  7. # details see http://www.openssl.org/~appro/cryptogams/.
  8. # ====================================================================
  9. #
  10. # May 2011
  11. #
  12. # The module implements bn_GF2m_mul_2x2 polynomial multiplication
  13. # used in bn_gf2m.c. It's kind of low-hanging mechanical port from
  14. # C for the time being... Except that it has two code paths: pure
  15. # integer code suitable for any ARMv4 and later CPU and NEON code
  16. # suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
  17. # in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
  18. # faster than compiler-generated code. For ECDH and ECDSA verify (but
  19. # not for ECDSA sign) it means 25%-45% improvement depending on key
  20. # length, more for longer keys. Even though NEON 1x1 multiplication
  21. # runs in even less cycles, ~30, improvement is measurable only on
  22. # longer keys. One has to optimize code elsewhere to get NEON glow...
  23. $flavour = shift;
  24. if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
  25. else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
  26. if ($flavour && $flavour ne "void") {
  27. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
  28. ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
  29. ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
  30. die "can't locate arm-xlate.pl";
  31. open STDOUT,"| \"$^X\" $xlate $flavour $output";
  32. } else {
  33. open STDOUT,">$output";
  34. }
  35. sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
  36. sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
  37. sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
  38. $code=<<___;
  39. #include "arm_arch.h"
  40. .text
  41. .code 32
  42. #if __ARM_ARCH__>=7
  43. .fpu neon
  44. .type mul_1x1_neon,%function
  45. .align 5
  46. mul_1x1_neon:
  47. vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a
  48. vmull.p8 `&Q("d0")`,d16,d17 @ a·bb
  49. vshl.u64 `&Dlo("q2")`,d16,#16
  50. vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb
  51. vshl.u64 `&Dlo("q3")`,d16,#24
  52. vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb
  53. vshr.u64 `&Dlo("q1")`,#8
  54. vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb
  55. vshl.u64 `&Dhi("q1")`,#24
  56. veor d0,`&Dlo("q1")`
  57. vshr.u64 `&Dlo("q2")`,#16
  58. veor d0,`&Dhi("q1")`
  59. vshl.u64 `&Dhi("q2")`,#16
  60. veor d0,`&Dlo("q2")`
  61. vshr.u64 `&Dlo("q3")`,#24
  62. veor d0,`&Dhi("q2")`
  63. vshl.u64 `&Dhi("q3")`,#8
  64. veor d0,`&Dlo("q3")`
  65. veor d0,`&Dhi("q3")`
  66. bx lr
  67. .size mul_1x1_neon,.-mul_1x1_neon
  68. #endif
  69. ___
  70. ################
  71. # private interface to mul_1x1_ialu
  72. #
  73. $a="r1";
  74. $b="r0";
  75. ($a0,$a1,$a2,$a12,$a4,$a14)=
  76. ($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
  77. $mask="r12";
  78. $code.=<<___;
  79. .type mul_1x1_ialu,%function
  80. .align 5
  81. mul_1x1_ialu:
  82. mov $a0,#0
  83. bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
  84. str $a0,[sp,#0] @ tab[0]=0
  85. add $a2,$a1,$a1 @ a2=a1<<1
  86. str $a1,[sp,#4] @ tab[1]=a1
  87. eor $a12,$a1,$a2 @ a1^a2
  88. str $a2,[sp,#8] @ tab[2]=a2
  89. mov $a4,$a1,lsl#2 @ a4=a1<<2
  90. str $a12,[sp,#12] @ tab[3]=a1^a2
  91. eor $a14,$a1,$a4 @ a1^a4
  92. str $a4,[sp,#16] @ tab[4]=a4
  93. eor $a0,$a2,$a4 @ a2^a4
  94. str $a14,[sp,#20] @ tab[5]=a1^a4
  95. eor $a12,$a12,$a4 @ a1^a2^a4
  96. str $a0,[sp,#24] @ tab[6]=a2^a4
  97. and $i0,$mask,$b,lsl#2
  98. str $a12,[sp,#28] @ tab[7]=a1^a2^a4
  99. and $i1,$mask,$b,lsr#1
  100. ldr $lo,[sp,$i0] @ tab[b & 0x7]
  101. and $i0,$mask,$b,lsr#4
  102. ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
  103. and $i1,$mask,$b,lsr#7
  104. ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
  105. eor $lo,$lo,$t1,lsl#3 @ stall
  106. mov $hi,$t1,lsr#29
  107. ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
  108. and $i0,$mask,$b,lsr#10
  109. eor $lo,$lo,$t0,lsl#6
  110. eor $hi,$hi,$t0,lsr#26
  111. ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
  112. and $i1,$mask,$b,lsr#13
  113. eor $lo,$lo,$t1,lsl#9
  114. eor $hi,$hi,$t1,lsr#23
  115. ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
  116. and $i0,$mask,$b,lsr#16
  117. eor $lo,$lo,$t0,lsl#12
  118. eor $hi,$hi,$t0,lsr#20
  119. ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
  120. and $i1,$mask,$b,lsr#19
  121. eor $lo,$lo,$t1,lsl#15
  122. eor $hi,$hi,$t1,lsr#17
  123. ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
  124. and $i0,$mask,$b,lsr#22
  125. eor $lo,$lo,$t0,lsl#18
  126. eor $hi,$hi,$t0,lsr#14
  127. ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
  128. and $i1,$mask,$b,lsr#25
  129. eor $lo,$lo,$t1,lsl#21
  130. eor $hi,$hi,$t1,lsr#11
  131. ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
  132. tst $a,#1<<30
  133. and $i0,$mask,$b,lsr#28
  134. eor $lo,$lo,$t0,lsl#24
  135. eor $hi,$hi,$t0,lsr#8
  136. ldr $t0,[sp,$i0] @ tab[b >> 30 ]
  137. eorne $lo,$lo,$b,lsl#30
  138. eorne $hi,$hi,$b,lsr#2
  139. tst $a,#1<<31
  140. eor $lo,$lo,$t1,lsl#27
  141. eor $hi,$hi,$t1,lsr#5
  142. eorne $lo,$lo,$b,lsl#31
  143. eorne $hi,$hi,$b,lsr#1
  144. eor $lo,$lo,$t0,lsl#30
  145. eor $hi,$hi,$t0,lsr#2
  146. mov pc,lr
  147. .size mul_1x1_ialu,.-mul_1x1_ialu
  148. ___
  149. ################
  150. # void bn_GF2m_mul_2x2(BN_ULONG *r,
  151. # BN_ULONG a1,BN_ULONG a0,
  152. # BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
  153. ($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23));
  154. $code.=<<___;
  155. .global bn_GF2m_mul_2x2
  156. .type bn_GF2m_mul_2x2,%function
  157. .align 5
  158. bn_GF2m_mul_2x2:
  159. #if __ARM_ARCH__>=7
  160. ldr r12,.LOPENSSL_armcap
  161. .Lpic: ldr r12,[pc,r12]
  162. #ifdef __APPLE__
  163. ldr r12,[r12]
  164. #endif
  165. tst r12,#1
  166. beq .Lialu
  167. veor $A1,$A1
  168. #ifdef __APPLE__
  169. vmov $B1,r3,r3 @ two copies of b1
  170. #else
  171. vmov.32 $B1,r3,r3 @ two copies of b1
  172. #endif
  173. vmov.32 ${A1}[0],r1 @ a1
  174. veor $A0,$A0
  175. vld1.32 ${B0}[],[sp,:32] @ two copies of b0
  176. vmov.32 ${A0}[0],r2 @ a0
  177. mov r12,lr
  178. vmov d16,$A1
  179. vmov d17,$B1
  180. bl mul_1x1_neon @ a1·b1
  181. vmov $A1B1,d0
  182. vmov d16,$A0
  183. vmov d17,$B0
  184. bl mul_1x1_neon @ a0·b0
  185. vmov $A0B0,d0
  186. veor d16,$A0,$A1
  187. veor d17,$B0,$B1
  188. veor $A0,$A0B0,$A1B1
  189. bl mul_1x1_neon @ (a0+a1)·(b0+b1)
  190. veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
  191. vshl.u64 d1,d0,#32
  192. vshr.u64 d0,d0,#32
  193. veor $A0B0,d1
  194. veor $A1B1,d0
  195. vst1.32 {${A0B0}[0]},[r0,:32]!
  196. vst1.32 {${A0B0}[1]},[r0,:32]!
  197. vst1.32 {${A1B1}[0]},[r0,:32]!
  198. vst1.32 {${A1B1}[1]},[r0,:32]
  199. bx r12
  200. .align 4
  201. .Lialu:
  202. #endif
  203. ___
  204. $ret="r10"; # reassigned 1st argument
  205. $code.=<<___;
  206. stmdb sp!,{r4-r10,lr}
  207. mov $ret,r0 @ reassign 1st argument
  208. mov $b,r3 @ $b=b1
  209. ldr r3,[sp,#32] @ load b0
  210. mov $mask,#7<<2
  211. sub sp,sp,#32 @ allocate tab[8]
  212. bl mul_1x1_ialu @ a1·b1
  213. str $lo,[$ret,#8]
  214. str $hi,[$ret,#12]
  215. eor $b,$b,r3 @ flip b0 and b1
  216. eor $a,$a,r2 @ flip a0 and a1
  217. eor r3,r3,$b
  218. eor r2,r2,$a
  219. eor $b,$b,r3
  220. eor $a,$a,r2
  221. bl mul_1x1_ialu @ a0·b0
  222. str $lo,[$ret]
  223. str $hi,[$ret,#4]
  224. eor $a,$a,r2
  225. eor $b,$b,r3
  226. bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
  227. ___
  228. @r=map("r$_",(6..9));
  229. $code.=<<___;
  230. ldmia $ret,{@r[0]-@r[3]}
  231. eor $lo,$lo,$hi
  232. eor $hi,$hi,@r[1]
  233. eor $lo,$lo,@r[0]
  234. eor $hi,$hi,@r[2]
  235. eor $lo,$lo,@r[3]
  236. eor $hi,$hi,@r[3]
  237. str $hi,[$ret,#8]
  238. eor $lo,$lo,$hi
  239. add sp,sp,#32 @ destroy tab[8]
  240. str $lo,[$ret,#4]
  241. #if __ARM_ARCH__>=5
  242. ldmia sp!,{r4-r10,pc}
  243. #else
  244. ldmia sp!,{r4-r10,lr}
  245. tst lr,#1
  246. moveq pc,lr @ be binary compatible with V4, yet
  247. bx lr @ interoperable with Thumb ISA:-)
  248. #endif
  249. .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
  250. #if __ARM_ARCH__>=7
  251. .align 5
  252. .LOPENSSL_armcap:
  253. .word OPENSSL_armcap_P-(.Lpic+8)
  254. #endif
  255. .asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
  256. .align 5
  257. .comm OPENSSL_armcap_P,4,4
  258. ___
  259. $code =~ s/\`([^\`]*)\`/eval $1/gem;
  260. $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
  261. print $code;
  262. close STDOUT; # enforce flush