poly1305-c64xplus.pl 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. #! /usr/bin/env perl
  2. # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License 2.0 (the "License"). You may not use
  5. # this file except in compliance with the License. You can obtain a copy
  6. # in the file LICENSE in the source distribution or at
  7. # https://www.openssl.org/source/license.html
  8. #
  9. # ====================================================================
  10. # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
  11. # project. The module is, however, dual licensed under OpenSSL and
  12. # CRYPTOGAMS licenses depending on where you obtain it. For further
  13. # details see http://www.openssl.org/~appro/cryptogams/.
  14. # ====================================================================
  15. #
  16. # Poly1305 hash for C64x+.
  17. #
  18. # October 2015
  19. #
  20. # Performance is [incredible for a 32-bit processor] 1.82 cycles per
  21. # processed byte. Comparison to compiler-generated code is problematic,
  22. # because results were observed to vary from 2.1 to 7.6 cpb depending
  23. # on compiler's ability to inline small functions. Compiler also
  24. # disables interrupts for some reason, thus making interrupt response
  25. # time dependent on input length. This module on the other hand is free
  26. # from such limitation.
  27. $output=pop and open STDOUT,">$output";
  28. ($CTXA,$INPB,$LEN,$PADBIT)=("A4","B4","A6","B6");
  29. ($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN);
  30. ($D0,$D1,$D2,$D3)= ("A9","B9","A11","B11");
  31. ($R0,$R1,$R2,$R3,$S1,$S2,$S3,$S3b)=("A0","B0","A1","B1","A12","B12","A13","B13");
  32. ($THREE,$R0b,$S2a)=("B7","B5","A5");
  33. $code.=<<___;
  34. .text
  35. .if .ASSEMBLER_VERSION<7000000
  36. .asg 0,__TI_EABI__
  37. .endif
  38. .if __TI_EABI__
  39. .asg poly1305_init,_poly1305_init
  40. .asg poly1305_blocks,_poly1305_blocks
  41. .asg poly1305_emit,_poly1305_emit
  42. .endif
  43. .asg B3,RA
  44. .asg A15,FP
  45. .asg B15,SP
  46. .if .LITTLE_ENDIAN
  47. .asg MV,SWAP2
  48. .asg MV.L,SWAP4
  49. .endif
  50. .global _poly1305_init
  51. _poly1305_init:
  52. .asmfunc
  53. LDNDW *${INPB}[0],B17:B16 ; load key material
  54. LDNDW *${INPB}[1],A17:A16
  55. || ZERO B9:B8
  56. || MVK -1,B0
  57. STDW B9:B8,*${CTXA}[0] ; initialize h1:h0
  58. || SHRU B0,4,B0 ; 0x0fffffff
  59. || MVK -4,B1
  60. STDW B9:B8,*${CTXA}[1] ; initialize h3:h2
  61. || AND B0,B1,B1 ; 0x0ffffffc
  62. STW B8,*${CTXA}[4] ; initialize h4
  63. .if .BIG_ENDIAN
  64. SWAP2 B16,B17
  65. || SWAP2 B17,B16
  66. SWAP2 A16,A17
  67. || SWAP2 A17,A16
  68. SWAP4 B16,B16
  69. || SWAP4 A16,A16
  70. SWAP4 B17,B17
  71. || SWAP4 A17,A17
  72. .endif
  73. AND B16,B0,B20 ; r0 = key[0] & 0x0fffffff
  74. || AND B17,B1,B22 ; r1 = key[1] & 0x0ffffffc
  75. || EXTU B17,4,6,B16 ; r1>>2
  76. AND A16,B1,B21 ; r2 = key[2] & 0x0ffffffc
  77. || AND A17,B1,A23 ; r3 = key[3] & 0x0ffffffc
  78. || BNOP RA
  79. SHRU B21,2,B18
  80. || ADD B22,B16,B16 ; s1 = r1 + r1>>2
  81. STDW B21:B20,*${CTXA}[3] ; save r2:r0
  82. || ADD B21,B18,B18 ; s2 = r2 + r2>>2
  83. || SHRU A23,2,B17
  84. || MV A23,B23
  85. STDW B23:B22,*${CTXA}[4] ; save r3:r1
  86. || ADD B23,B17,B19 ; s3 = r3 + r3>>2
  87. || ADD B23,B17,B17 ; s3 = r3 + r3>>2
  88. STDW B17:B16,*${CTXA}[5] ; save s3:s1
  89. STDW B19:B18,*${CTXA}[6] ; save s3:s2
  90. || ZERO A4 ; return 0
  91. .endasmfunc
  92. .global _poly1305_blocks
  93. .align 32
  94. _poly1305_blocks:
  95. .asmfunc stack_usage(40)
  96. SHRU $LEN,4,A2 ; A2 is loop counter, number of blocks
  97. [!A2] BNOP RA ; no data
  98. || [A2] STW FP,*SP--(40) ; save frame pointer and alloca(40)
  99. || [A2] MV SP,FP
  100. [A2] STDW B13:B12,*SP[4] ; ABI says so
  101. || [A2] MV $CTXA,$S3b ; borrow $S3b
  102. [A2] STDW B11:B10,*SP[3]
  103. || [A2] STDW A13:A12,*FP[-3]
  104. [A2] STDW A11:A10,*FP[-4]
  105. || [A2] LDDW *${S3b}[0],B25:B24 ; load h1:h0
  106. [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
  107. [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
  108. LDDW *${CTXA}[1],B29:B28 ; load h3:h2, B28 is h2
  109. LDNW *${INPB}[-2],$D2 ; load inp[2]
  110. LDNW *${INPB}[-1],$D3 ; load inp[3]
  111. LDDW *${CTXA}[3],$R2:$R0 ; load r2:r0
  112. || LDDW *${S3b}[4],$R3:$R1 ; load r3:r1
  113. || SWAP2 $D0,$D0
  114. LDDW *${CTXA}[5],$S3:$S1 ; load s3:s1
  115. || LDDW *${S3b}[6],$S3b:$S2 ; load s3:s2
  116. || SWAP4 $D0,$D0
  117. || SWAP2 $D1,$D1
  118. ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
  119. || ADD $D0,B24,B27 ; B-copy of h0+inp[0]
  120. || SWAP4 $D1,$D1
  121. ADDU $D1,B25,$D1:$H1 ; h1+=inp[1]
  122. || MVK 3,$THREE
  123. || SWAP2 $D2,$D2
  124. LDW *${CTXA}[4],$H4 ; load h4
  125. || SWAP4 $D2,$D2
  126. || MV B29,B30 ; B30 is h3
  127. MV $R0,$R0b
  128. loop?:
  129. MPY32U $H0,$R0,A17:A16
  130. || MPY32U B27,$R1,B17:B16 ; MPY32U $H0,$R1,B17:B16
  131. || ADDU $D0,$D1:$H1,B25:B24 ; ADDU $D0,$D1:$H1,$D1:$H1
  132. || ADDU $D2,B28,$D2:$H2 ; h2+=inp[2]
  133. || SWAP2 $D3,$D3
  134. MPY32U $H0,$R2,A19:A18
  135. || MPY32U B27,$R3,B19:B18 ; MPY32U $H0,$R3,B19:B18
  136. || ADD $D0,$H1,A24 ; A-copy of B24
  137. || SWAP4 $D3,$D3
  138. || [A2] SUB A2,1,A2 ; decrement loop counter
  139. MPY32U A24,$S3,A21:A20 ; MPY32U $H1,$S3,A21:A20
  140. || MPY32U B24,$R0b,B21:B20 ; MPY32U $H1,$R0,B21:B20
  141. || ADDU B25,$D2:$H2,$D2:$H2 ; ADDU $D1,$D2:$H2,$D2:$H2
  142. || ADDU $D3,B30,$D3:$H3 ; h3+=inp[3]
  143. || ADD B25,$H2,B25 ; B-copy of $H2
  144. MPY32U A24,$R1,A23:A22 ; MPY32U $H1,$R1,A23:A22
  145. || MPY32U B24,$R2,B23:B22 ; MPY32U $H1,$R2,B23:B22
  146. MPY32U $H2,$S2,A25:A24
  147. || MPY32U B25,$S3b,B25:B24 ; MPY32U $H2,$S3,B25:B24
  148. || ADDU $D2,$D3:$H3,$D3:$H3
  149. || ADD $PADBIT,$H4,$H4 ; h4+=padbit
  150. MPY32U $H2,$R0,A27:A26
  151. || MPY32U $H2,$R1,B27:B26
  152. || ADD $D3,$H4,$H4
  153. || MV $S2,$S2a
  154. MPY32U $H3,$S1,A29:A28
  155. || MPY32U $H3,$S2,B29:B28
  156. || ADD A21,A17,A21 ; start accumulating "d3:d0"
  157. || ADD B21,B17,B21
  158. || ADDU A20,A16,A17:A16
  159. || ADDU B20,B16,B17:B16
  160. || [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
  161. MPY32U $H3,$S3,A31:A30
  162. || MPY32U $H3,$R0b,B31:B30
  163. || ADD A23,A19,A23
  164. || ADD B23,B19,B23
  165. || ADDU A22,A18,A19:A18
  166. || ADDU B22,B18,B19:B18
  167. || [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
  168. MPY32 $H4,$S1,B20
  169. || MPY32 $H4,$S2a,A20
  170. || ADD A25,A21,A21
  171. || ADD B25,B21,B21
  172. || ADDU A24,A17:A16,A17:A16
  173. || ADDU B24,B17:B16,B17:B16
  174. || [A2] LDNW *${INPB}[-2],$D2 ; load inp[2]
  175. MPY32 $H4,$S3b,B22
  176. || ADD A27,A23,A23
  177. || ADD B27,B23,B23
  178. || ADDU A26,A19:A18,A19:A18
  179. || ADDU B26,B19:B18,B19:B18
  180. || [A2] LDNW *${INPB}[-1],$D3 ; load inp[3]
  181. MPY32 $H4,$R0b,$H4
  182. || ADD A29,A21,A21 ; final hi("d0")
  183. || ADD B29,B21,B21 ; final hi("d1")
  184. || ADDU A28,A17:A16,A17:A16 ; final lo("d0")
  185. || ADDU B28,B17:B16,B17:B16
  186. ADD A31,A23,A23 ; final hi("d2")
  187. || ADD B31,B23,B23 ; final hi("d3")
  188. || ADDU A30,A19:A18,A19:A18
  189. || ADDU B30,B19:B18,B19:B18
  190. ADDU B20,B17:B16,B17:B16 ; final lo("d1")
  191. || ADDU A20,A19:A18,A19:A18 ; final lo("d2")
  192. ADDU B22,B19:B18,B19:B18 ; final lo("d3")
  193. || ADD A17,A21,A21 ; "flatten" "d3:d0"
  194. MV A19,B29 ; move to avoid cross-path stalls
  195. ADDU A21,B17:B16,B27:B26 ; B26 is h1
  196. ADD B21,B27,B27
  197. || DMV B29,A18,B29:B28 ; move to avoid cross-path stalls
  198. ADDU B27,B29:B28,B29:B28 ; B28 is h2
  199. || [A2] SWAP2 $D0,$D0
  200. ADD A23,B29,B29
  201. || [A2] SWAP4 $D0,$D0
  202. ADDU B29,B19:B18,B31:B30 ; B30 is h3
  203. ADD B23,B31,B31
  204. || MV A16,B24 ; B24 is h0
  205. || [A2] SWAP2 $D1,$D1
  206. ADD B31,$H4,$H4
  207. || [A2] SWAP4 $D1,$D1
  208. SHRU $H4,2,B16 ; last reduction step
  209. || AND $H4,$THREE,$H4
  210. ADDAW B16,B16,B16 ; 5*(h4>>2)
  211. || [A2] BNOP loop?
  212. ADDU B24,B16,B25:B24 ; B24 is h0
  213. || [A2] SWAP2 $D2,$D2
  214. ADDU B26,B25,B27:B26 ; B26 is h1
  215. || [A2] SWAP4 $D2,$D2
  216. ADDU B28,B27,B29:B28 ; B28 is h2
  217. || [A2] ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
  218. || [A2] ADD $D0,B24,B27 ; B-copy of h0+inp[0]
  219. ADDU B30,B29,B31:B30 ; B30 is h3
  220. ADD B31,$H4,$H4
  221. || [A2] ADDU $D1,B26,$D1:$H1 ; h1+=inp[1]
  222. ;;===== branch to loop? is taken here
  223. LDDW *FP[-4],A11:A10 ; ABI says so
  224. LDDW *FP[-3],A13:A12
  225. || LDDW *SP[3],B11:B10
  226. LDDW *SP[4],B13:B12
  227. || MV B26,B25
  228. || BNOP RA
  229. LDW *++SP(40),FP ; restore frame pointer
  230. || MV B30,B29
  231. STDW B25:B24,*${CTXA}[0] ; save h1:h0
  232. STDW B29:B28,*${CTXA}[1] ; save h3:h2
  233. STW $H4,*${CTXA}[4] ; save h4
  234. NOP 1
  235. .endasmfunc
  236. ___
  237. {
  238. my ($MAC,$NONCEA,$NONCEB)=($INPB,$LEN,$PADBIT);
  239. $code.=<<___;
  240. .global _poly1305_emit
  241. .align 32
  242. _poly1305_emit:
  243. .asmfunc
  244. LDDW *${CTXA}[0],A17:A16 ; load h1:h0
  245. LDDW *${CTXA}[1],A19:A18 ; load h3:h2
  246. LDW *${CTXA}[4],A20 ; load h4
  247. MV $NONCEA,$NONCEB
  248. MVK 5,A22 ; compare to modulus
  249. ADDU A16,A22,A23:A22
  250. || LDW *${NONCEA}[0],A8
  251. || LDW *${NONCEB}[1],B8
  252. ADDU A17,A23,A25:A24
  253. || LDW *${NONCEA}[2],A9
  254. || LDW *${NONCEB}[3],B9
  255. ADDU A19,A25,A27:A26
  256. ADDU A19,A27,A29:A28
  257. ADD A20,A29,A29
  258. SHRU A29,2,A2 ; check for overflow in 130-th bit
  259. [A2] MV A22,A16 ; select
  260. || [A2] MV A24,A17
  261. [A2] MV A26,A18
  262. || [A2] MV A28,A19
  263. || ADDU A8,A16,A23:A22 ; accumulate nonce
  264. ADDU B8,A17,A25:A24
  265. || SWAP2 A22,A22
  266. ADDU A23,A25:A24,A25:A24
  267. ADDU A9,A18,A27:A26
  268. || SWAP2 A24,A24
  269. ADDU A25,A27:A26,A27:A26
  270. || ADD B9,A19,A28
  271. ADD A27,A28,A28
  272. || SWAP2 A26,A26
  273. .if .BIG_ENDIAN
  274. SWAP2 A28,A28
  275. || SWAP4 A22,A22
  276. || SWAP4 A24,B24
  277. SWAP4 A26,A26
  278. SWAP4 A28,A28
  279. || MV B24,A24
  280. .endif
  281. BNOP RA,1
  282. STNW A22,*${MAC}[0] ; write the result
  283. STNW A24,*${MAC}[1]
  284. STNW A26,*${MAC}[2]
  285. STNW A28,*${MAC}[3]
  286. .endasmfunc
  287. ___
  288. }
  289. $code.=<<___;
  290. .sect .const
  291. .cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
  292. .align 4
  293. ___
  294. print $code;