2
0

poly1305_ieee754.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /*
  2. * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /*
  10. * This module is meant to be used as template for non-x87 floating-
  11. * point assembly modules. The template itself is x86_64-specific
  12. * though, as it was debugged on x86_64. So that implementer would
  13. * have to recognize platform-specific parts, UxTOy and inline asm,
  14. * and act accordingly.
  15. *
  16. * Huh? x86_64-specific code as template for non-x87? Note seven, which
  17. * is not a typo, but reference to 80-bit precision. This module on the
  18. * other hand relies on 64-bit precision operations, which are default
  19. * for x86_64 code. And since we are at it, just for sense of it,
  20. * large-block performance in cycles per processed byte for *this* code
  21. * is:
  22. * gcc-4.8 icc-15.0 clang-3.4(*)
  23. *
  24. * Westmere 4.96 5.09 4.37
  25. * Sandy Bridge 4.95 4.90 4.17
  26. * Haswell 4.92 4.87 3.78
  27. * Bulldozer 4.67 4.49 4.68
  28. * VIA Nano 7.07 7.05 5.98
  29. * Silvermont 10.6 9.61 12.6
  30. *
  31. * (*) clang managed to discover parallelism and deployed SIMD;
  32. *
  33. * And for range of other platforms with unspecified gcc versions:
  34. *
  35. * Freescale e300 12.5
  36. * PPC74x0 10.8
  37. * POWER6 4.92
  38. * POWER7 4.50
  39. * POWER8 4.10
  40. *
  41. * z10 11.2
  42. * z196+ 7.30
  43. *
  44. * UltraSPARC III 16.0
  45. * SPARC T4 16.1
  46. */
  47. #if !(defined(__GNUC__) && __GNUC__>=2)
  48. # error "this is gcc-specific template"
  49. #endif
  50. #include <stdlib.h>
  51. typedef unsigned char u8;
  52. typedef unsigned int u32;
  53. typedef unsigned long long u64;
  54. typedef union { double d; u64 u; } elem64;
  55. #define TWO(p) ((double)(1ULL<<(p)))
  56. #define TWO0 TWO(0)
  57. #define TWO32 TWO(32)
  58. #define TWO64 (TWO32*TWO(32))
  59. #define TWO96 (TWO64*TWO(32))
  60. #define TWO130 (TWO96*TWO(34))
  61. #define EXP(p) ((1023ULL+(p))<<52)
  62. #if defined(__x86_64__) || (defined(__PPC__) && defined(__LITTLE_ENDIAN__))
  63. # define U8TOU32(p) (*(const u32 *)(p))
  64. # define U32TO8(p,v) (*(u32 *)(p) = (v))
  65. #elif defined(__PPC__) || defined(__POWERPC__)
  66. # define U8TOU32(p) ({u32 ret; asm ("lwbrx %0,0,%1":"=r"(ret):"b"(p)); ret; })
  67. # define U32TO8(p,v) asm ("stwbrx %0,0,%1"::"r"(v),"b"(p):"memory")
  68. #elif defined(__s390x__)
  69. # define U8TOU32(p) ({u32 ret; asm ("lrv %0,%1":"=d"(ret):"m"(*(u32 *)(p))); ret; })
  70. # define U32TO8(p,v) asm ("strv %1,%0":"=m"(*(u32 *)(p)):"d"(v))
  71. #endif
  72. #ifndef U8TOU32
  73. # define U8TOU32(p) ((u32)(p)[0] | (u32)(p)[1]<<8 | \
  74. (u32)(p)[2]<<16 | (u32)(p)[3]<<24 )
  75. #endif
  76. #ifndef U32TO8
  77. # define U32TO8(p,v) ((p)[0] = (u8)(v), (p)[1] = (u8)((v)>>8), \
  78. (p)[2] = (u8)((v)>>16), (p)[3] = (u8)((v)>>24) )
  79. #endif
  80. typedef struct {
  81. elem64 h[4];
  82. double r[8];
  83. double s[6];
  84. } poly1305_internal;
  85. /* "round toward zero (truncate), mask all exceptions" */
  86. #if defined(__x86_64__)
  87. static const u32 mxcsr = 0x7f80;
  88. #elif defined(__PPC__) || defined(__POWERPC__)
  89. static const u64 one = 1;
  90. #elif defined(__s390x__)
  91. static const u32 fpc = 1;
  92. #elif defined(__sparc__)
  93. static const u64 fsr = 1ULL<<30;
  94. #elif defined(__mips__)
  95. static const u32 fcsr = 1;
  96. #else
  97. #error "unrecognized platform"
  98. #endif
  99. int poly1305_init(void *ctx, const unsigned char key[16])
  100. {
  101. poly1305_internal *st = (poly1305_internal *) ctx;
  102. elem64 r0, r1, r2, r3;
  103. /* h = 0, biased */
  104. #if 0
  105. st->h[0].d = TWO(52)*TWO0;
  106. st->h[1].d = TWO(52)*TWO32;
  107. st->h[2].d = TWO(52)*TWO64;
  108. st->h[3].d = TWO(52)*TWO96;
  109. #else
  110. st->h[0].u = EXP(52+0);
  111. st->h[1].u = EXP(52+32);
  112. st->h[2].u = EXP(52+64);
  113. st->h[3].u = EXP(52+96);
  114. #endif
  115. if (key) {
  116. /*
  117. * set "truncate" rounding mode
  118. */
  119. #if defined(__x86_64__)
  120. u32 mxcsr_orig;
  121. asm volatile ("stmxcsr %0":"=m"(mxcsr_orig));
  122. asm volatile ("ldmxcsr %0"::"m"(mxcsr));
  123. #elif defined(__PPC__) || defined(__POWERPC__)
  124. double fpscr_orig, fpscr = *(double *)&one;
  125. asm volatile ("mffs %0":"=f"(fpscr_orig));
  126. asm volatile ("mtfsf 255,%0"::"f"(fpscr));
  127. #elif defined(__s390x__)
  128. u32 fpc_orig;
  129. asm volatile ("stfpc %0":"=m"(fpc_orig));
  130. asm volatile ("lfpc %0"::"m"(fpc));
  131. #elif defined(__sparc__)
  132. u64 fsr_orig;
  133. asm volatile ("stx %%fsr,%0":"=m"(fsr_orig));
  134. asm volatile ("ldx %0,%%fsr"::"m"(fsr));
  135. #elif defined(__mips__)
  136. u32 fcsr_orig;
  137. asm volatile ("cfc1 %0,$31":"=r"(fcsr_orig));
  138. asm volatile ("ctc1 %0,$31"::"r"(fcsr));
  139. #endif
  140. /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
  141. r0.u = EXP(52+0) | (U8TOU32(&key[0]) & 0x0fffffff);
  142. r1.u = EXP(52+32) | (U8TOU32(&key[4]) & 0x0ffffffc);
  143. r2.u = EXP(52+64) | (U8TOU32(&key[8]) & 0x0ffffffc);
  144. r3.u = EXP(52+96) | (U8TOU32(&key[12]) & 0x0ffffffc);
  145. st->r[0] = r0.d - TWO(52)*TWO0;
  146. st->r[2] = r1.d - TWO(52)*TWO32;
  147. st->r[4] = r2.d - TWO(52)*TWO64;
  148. st->r[6] = r3.d - TWO(52)*TWO96;
  149. st->s[0] = st->r[2] * (5.0/TWO130);
  150. st->s[2] = st->r[4] * (5.0/TWO130);
  151. st->s[4] = st->r[6] * (5.0/TWO130);
  152. /*
  153. * base 2^32 -> base 2^16
  154. */
  155. st->r[1] = (st->r[0] + TWO(52)*TWO(16)*TWO0) -
  156. TWO(52)*TWO(16)*TWO0;
  157. st->r[0] -= st->r[1];
  158. st->r[3] = (st->r[2] + TWO(52)*TWO(16)*TWO32) -
  159. TWO(52)*TWO(16)*TWO32;
  160. st->r[2] -= st->r[3];
  161. st->r[5] = (st->r[4] + TWO(52)*TWO(16)*TWO64) -
  162. TWO(52)*TWO(16)*TWO64;
  163. st->r[4] -= st->r[5];
  164. st->r[7] = (st->r[6] + TWO(52)*TWO(16)*TWO96) -
  165. TWO(52)*TWO(16)*TWO96;
  166. st->r[6] -= st->r[7];
  167. st->s[1] = (st->s[0] + TWO(52)*TWO(16)*TWO0/TWO96) -
  168. TWO(52)*TWO(16)*TWO0/TWO96;
  169. st->s[0] -= st->s[1];
  170. st->s[3] = (st->s[2] + TWO(52)*TWO(16)*TWO32/TWO96) -
  171. TWO(52)*TWO(16)*TWO32/TWO96;
  172. st->s[2] -= st->s[3];
  173. st->s[5] = (st->s[4] + TWO(52)*TWO(16)*TWO64/TWO96) -
  174. TWO(52)*TWO(16)*TWO64/TWO96;
  175. st->s[4] -= st->s[5];
  176. /*
  177. * restore original FPU control register
  178. */
  179. #if defined(__x86_64__)
  180. asm volatile ("ldmxcsr %0"::"m"(mxcsr_orig));
  181. #elif defined(__PPC__) || defined(__POWERPC__)
  182. asm volatile ("mtfsf 255,%0"::"f"(fpscr_orig));
  183. #elif defined(__s390x__)
  184. asm volatile ("lfpc %0"::"m"(fpc_orig));
  185. #elif defined(__sparc__)
  186. asm volatile ("ldx %0,%%fsr"::"m"(fsr_orig));
  187. #elif defined(__mips__)
  188. asm volatile ("ctc1 %0,$31"::"r"(fcsr_orig));
  189. #endif
  190. }
  191. return 0;
  192. }
  193. void poly1305_blocks(void *ctx, const unsigned char *inp, size_t len,
  194. int padbit)
  195. {
  196. poly1305_internal *st = (poly1305_internal *)ctx;
  197. elem64 in0, in1, in2, in3;
  198. u64 pad = (u64)padbit<<32;
  199. double x0, x1, x2, x3;
  200. double h0lo, h0hi, h1lo, h1hi, h2lo, h2hi, h3lo, h3hi;
  201. double c0lo, c0hi, c1lo, c1hi, c2lo, c2hi, c3lo, c3hi;
  202. const double r0lo = st->r[0];
  203. const double r0hi = st->r[1];
  204. const double r1lo = st->r[2];
  205. const double r1hi = st->r[3];
  206. const double r2lo = st->r[4];
  207. const double r2hi = st->r[5];
  208. const double r3lo = st->r[6];
  209. const double r3hi = st->r[7];
  210. const double s1lo = st->s[0];
  211. const double s1hi = st->s[1];
  212. const double s2lo = st->s[2];
  213. const double s2hi = st->s[3];
  214. const double s3lo = st->s[4];
  215. const double s3hi = st->s[5];
  216. /*
  217. * set "truncate" rounding mode
  218. */
  219. #if defined(__x86_64__)
  220. u32 mxcsr_orig;
  221. asm volatile ("stmxcsr %0":"=m"(mxcsr_orig));
  222. asm volatile ("ldmxcsr %0"::"m"(mxcsr));
  223. #elif defined(__PPC__) || defined(__POWERPC__)
  224. double fpscr_orig, fpscr = *(double *)&one;
  225. asm volatile ("mffs %0":"=f"(fpscr_orig));
  226. asm volatile ("mtfsf 255,%0"::"f"(fpscr));
  227. #elif defined(__s390x__)
  228. u32 fpc_orig;
  229. asm volatile ("stfpc %0":"=m"(fpc_orig));
  230. asm volatile ("lfpc %0"::"m"(fpc));
  231. #elif defined(__sparc__)
  232. u64 fsr_orig;
  233. asm volatile ("stx %%fsr,%0":"=m"(fsr_orig));
  234. asm volatile ("ldx %0,%%fsr"::"m"(fsr));
  235. #elif defined(__mips__)
  236. u32 fcsr_orig;
  237. asm volatile ("cfc1 %0,$31":"=r"(fcsr_orig));
  238. asm volatile ("ctc1 %0,$31"::"r"(fcsr));
  239. #endif
  240. /*
  241. * load base 2^32 and de-bias
  242. */
  243. h0lo = st->h[0].d - TWO(52)*TWO0;
  244. h1lo = st->h[1].d - TWO(52)*TWO32;
  245. h2lo = st->h[2].d - TWO(52)*TWO64;
  246. h3lo = st->h[3].d - TWO(52)*TWO96;
  247. #ifdef __clang__
  248. h0hi = 0;
  249. h1hi = 0;
  250. h2hi = 0;
  251. h3hi = 0;
  252. #else
  253. in0.u = EXP(52+0) | U8TOU32(&inp[0]);
  254. in1.u = EXP(52+32) | U8TOU32(&inp[4]);
  255. in2.u = EXP(52+64) | U8TOU32(&inp[8]);
  256. in3.u = EXP(52+96) | U8TOU32(&inp[12]) | pad;
  257. x0 = in0.d - TWO(52)*TWO0;
  258. x1 = in1.d - TWO(52)*TWO32;
  259. x2 = in2.d - TWO(52)*TWO64;
  260. x3 = in3.d - TWO(52)*TWO96;
  261. x0 += h0lo;
  262. x1 += h1lo;
  263. x2 += h2lo;
  264. x3 += h3lo;
  265. goto fast_entry;
  266. #endif
  267. do {
  268. in0.u = EXP(52+0) | U8TOU32(&inp[0]);
  269. in1.u = EXP(52+32) | U8TOU32(&inp[4]);
  270. in2.u = EXP(52+64) | U8TOU32(&inp[8]);
  271. in3.u = EXP(52+96) | U8TOU32(&inp[12]) | pad;
  272. x0 = in0.d - TWO(52)*TWO0;
  273. x1 = in1.d - TWO(52)*TWO32;
  274. x2 = in2.d - TWO(52)*TWO64;
  275. x3 = in3.d - TWO(52)*TWO96;
  276. /*
  277. * note that there are multiple ways to accumulate input, e.g.
  278. * one can as well accumulate to h0lo-h1lo-h1hi-h2hi...
  279. */
  280. h0lo += x0;
  281. h0hi += x1;
  282. h2lo += x2;
  283. h2hi += x3;
  284. /*
  285. * carries that cross 32n-bit (and 130-bit) boundaries
  286. */
  287. c0lo = (h0lo + TWO(52)*TWO32) - TWO(52)*TWO32;
  288. c1lo = (h1lo + TWO(52)*TWO64) - TWO(52)*TWO64;
  289. c2lo = (h2lo + TWO(52)*TWO96) - TWO(52)*TWO96;
  290. c3lo = (h3lo + TWO(52)*TWO130) - TWO(52)*TWO130;
  291. c0hi = (h0hi + TWO(52)*TWO32) - TWO(52)*TWO32;
  292. c1hi = (h1hi + TWO(52)*TWO64) - TWO(52)*TWO64;
  293. c2hi = (h2hi + TWO(52)*TWO96) - TWO(52)*TWO96;
  294. c3hi = (h3hi + TWO(52)*TWO130) - TWO(52)*TWO130;
  295. /*
  296. * base 2^48 -> base 2^32 with last reduction step
  297. */
  298. x1 = (h1lo - c1lo) + c0lo;
  299. x2 = (h2lo - c2lo) + c1lo;
  300. x3 = (h3lo - c3lo) + c2lo;
  301. x0 = (h0lo - c0lo) + c3lo * (5.0/TWO130);
  302. x1 += (h1hi - c1hi) + c0hi;
  303. x2 += (h2hi - c2hi) + c1hi;
  304. x3 += (h3hi - c3hi) + c2hi;
  305. x0 += (h0hi - c0hi) + c3hi * (5.0/TWO130);
  306. #ifndef __clang__
  307. fast_entry:
  308. #endif
  309. /*
  310. * base 2^32 * base 2^16 = base 2^48
  311. */
  312. h0lo = s3lo * x1 + s2lo * x2 + s1lo * x3 + r0lo * x0;
  313. h1lo = r0lo * x1 + s3lo * x2 + s2lo * x3 + r1lo * x0;
  314. h2lo = r1lo * x1 + r0lo * x2 + s3lo * x3 + r2lo * x0;
  315. h3lo = r2lo * x1 + r1lo * x2 + r0lo * x3 + r3lo * x0;
  316. h0hi = s3hi * x1 + s2hi * x2 + s1hi * x3 + r0hi * x0;
  317. h1hi = r0hi * x1 + s3hi * x2 + s2hi * x3 + r1hi * x0;
  318. h2hi = r1hi * x1 + r0hi * x2 + s3hi * x3 + r2hi * x0;
  319. h3hi = r2hi * x1 + r1hi * x2 + r0hi * x3 + r3hi * x0;
  320. inp += 16;
  321. len -= 16;
  322. } while (len >= 16);
  323. /*
  324. * carries that cross 32n-bit (and 130-bit) boundaries
  325. */
  326. c0lo = (h0lo + TWO(52)*TWO32) - TWO(52)*TWO32;
  327. c1lo = (h1lo + TWO(52)*TWO64) - TWO(52)*TWO64;
  328. c2lo = (h2lo + TWO(52)*TWO96) - TWO(52)*TWO96;
  329. c3lo = (h3lo + TWO(52)*TWO130) - TWO(52)*TWO130;
  330. c0hi = (h0hi + TWO(52)*TWO32) - TWO(52)*TWO32;
  331. c1hi = (h1hi + TWO(52)*TWO64) - TWO(52)*TWO64;
  332. c2hi = (h2hi + TWO(52)*TWO96) - TWO(52)*TWO96;
  333. c3hi = (h3hi + TWO(52)*TWO130) - TWO(52)*TWO130;
  334. /*
  335. * base 2^48 -> base 2^32 with last reduction step
  336. */
  337. x1 = (h1lo - c1lo) + c0lo;
  338. x2 = (h2lo - c2lo) + c1lo;
  339. x3 = (h3lo - c3lo) + c2lo;
  340. x0 = (h0lo - c0lo) + c3lo * (5.0/TWO130);
  341. x1 += (h1hi - c1hi) + c0hi;
  342. x2 += (h2hi - c2hi) + c1hi;
  343. x3 += (h3hi - c3hi) + c2hi;
  344. x0 += (h0hi - c0hi) + c3hi * (5.0/TWO130);
  345. /*
  346. * store base 2^32, with bias
  347. */
  348. st->h[1].d = x1 + TWO(52)*TWO32;
  349. st->h[2].d = x2 + TWO(52)*TWO64;
  350. st->h[3].d = x3 + TWO(52)*TWO96;
  351. st->h[0].d = x0 + TWO(52)*TWO0;
  352. /*
  353. * restore original FPU control register
  354. */
  355. #if defined(__x86_64__)
  356. asm volatile ("ldmxcsr %0"::"m"(mxcsr_orig));
  357. #elif defined(__PPC__) || defined(__POWERPC__)
  358. asm volatile ("mtfsf 255,%0"::"f"(fpscr_orig));
  359. #elif defined(__s390x__)
  360. asm volatile ("lfpc %0"::"m"(fpc_orig));
  361. #elif defined(__sparc__)
  362. asm volatile ("ldx %0,%%fsr"::"m"(fsr_orig));
  363. #elif defined(__mips__)
  364. asm volatile ("ctc1 %0,$31"::"r"(fcsr_orig));
  365. #endif
  366. }
  367. void poly1305_emit(void *ctx, unsigned char mac[16], const u32 nonce[4])
  368. {
  369. poly1305_internal *st = (poly1305_internal *) ctx;
  370. u64 h0, h1, h2, h3, h4;
  371. u32 g0, g1, g2, g3, g4;
  372. u64 t;
  373. u32 mask;
  374. /*
  375. * thanks to bias masking exponent gives integer result
  376. */
  377. h0 = st->h[0].u & 0x000fffffffffffffULL;
  378. h1 = st->h[1].u & 0x000fffffffffffffULL;
  379. h2 = st->h[2].u & 0x000fffffffffffffULL;
  380. h3 = st->h[3].u & 0x000fffffffffffffULL;
  381. /*
  382. * can be partially reduced, so reduce...
  383. */
  384. h4 = h3>>32; h3 &= 0xffffffffU;
  385. g4 = h4&-4;
  386. h4 &= 3;
  387. g4 += g4>>2;
  388. h0 += g4;
  389. h1 += h0>>32; h0 &= 0xffffffffU;
  390. h2 += h1>>32; h1 &= 0xffffffffU;
  391. h3 += h2>>32; h2 &= 0xffffffffU;
  392. /* compute h + -p */
  393. g0 = (u32)(t = h0 + 5);
  394. g1 = (u32)(t = h1 + (t >> 32));
  395. g2 = (u32)(t = h2 + (t >> 32));
  396. g3 = (u32)(t = h3 + (t >> 32));
  397. g4 = h4 + (u32)(t >> 32);
  398. /* if there was carry, select g0-g3 */
  399. mask = 0 - (g4 >> 2);
  400. g0 &= mask;
  401. g1 &= mask;
  402. g2 &= mask;
  403. g3 &= mask;
  404. mask = ~mask;
  405. g0 |= (h0 & mask);
  406. g1 |= (h1 & mask);
  407. g2 |= (h2 & mask);
  408. g3 |= (h3 & mask);
  409. /* mac = (h + nonce) % (2^128) */
  410. g0 = (u32)(t = (u64)g0 + nonce[0]);
  411. g1 = (u32)(t = (u64)g1 + (t >> 32) + nonce[1]);
  412. g2 = (u32)(t = (u64)g2 + (t >> 32) + nonce[2]);
  413. g3 = (u32)(t = (u64)g3 + (t >> 32) + nonce[3]);
  414. U32TO8(mac + 0, g0);
  415. U32TO8(mac + 4, g1);
  416. U32TO8(mac + 8, g2);
  417. U32TO8(mac + 12, g3);
  418. }