hash_md5_sha.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. /* vi: set sw=4 ts=4: */
  2. /*
  3. * Utility routines.
  4. *
  5. * Copyright (C) 2010 Denys Vlasenko
  6. *
  7. * Licensed under GPLv2 or later, see file LICENSE in this source tree.
  8. */
  9. #include "libbb.h"
  10. /* gcc 4.2.1 optimizes rotr64 better with inline than with macro
  11. * (for rotX32, there is no difference). Why? My guess is that
  12. * macro requires clever common subexpression elimination heuristics
  13. * in gcc, while inline basically forces it to happen.
  14. */
  15. //#define rotl32(x,n) (((x) << (n)) | ((x) >> (32 - (n))))
  16. static ALWAYS_INLINE uint32_t rotl32(uint32_t x, unsigned n)
  17. {
  18. return (x << n) | (x >> (32 - n));
  19. }
  20. //#define rotr32(x,n) (((x) >> (n)) | ((x) << (32 - (n))))
  21. static ALWAYS_INLINE uint32_t rotr32(uint32_t x, unsigned n)
  22. {
  23. return (x >> n) | (x << (32 - n));
  24. }
  25. /* rotr64 in needed for sha512 only: */
  26. //#define rotr64(x,n) (((x) >> (n)) | ((x) << (64 - (n))))
  27. static ALWAYS_INLINE uint64_t rotr64(uint64_t x, unsigned n)
  28. {
  29. return (x >> n) | (x << (64 - n));
  30. }
  31. /* rotl64 only used for sha3 currently */
  32. static ALWAYS_INLINE uint64_t rotl64(uint64_t x, unsigned n)
  33. {
  34. return (x << n) | (x >> (64 - n));
  35. }
  36. /* Feed data through a temporary buffer.
  37. * The internal buffer remembers previous data until it has 64
  38. * bytes worth to pass on.
  39. */
  40. static void FAST_FUNC common64_hash(md5_ctx_t *ctx, const void *buffer, size_t len)
  41. {
  42. unsigned bufpos = ctx->total64 & 63;
  43. ctx->total64 += len;
  44. while (1) {
  45. unsigned remaining = 64 - bufpos;
  46. if (remaining > len)
  47. remaining = len;
  48. /* Copy data into aligned buffer */
  49. memcpy(ctx->wbuffer + bufpos, buffer, remaining);
  50. len -= remaining;
  51. buffer = (const char *)buffer + remaining;
  52. bufpos += remaining;
  53. /* Clever way to do "if (bufpos != N) break; ... ; bufpos = 0;" */
  54. bufpos -= 64;
  55. if (bufpos != 0)
  56. break;
  57. /* Buffer is filled up, process it */
  58. ctx->process_block(ctx);
  59. /*bufpos = 0; - already is */
  60. }
  61. }
  62. /* Process the remaining bytes in the buffer */
  63. static void FAST_FUNC common64_end(md5_ctx_t *ctx, int swap_needed)
  64. {
  65. unsigned bufpos = ctx->total64 & 63;
  66. /* Pad the buffer to the next 64-byte boundary with 0x80,0,0,0... */
  67. ctx->wbuffer[bufpos++] = 0x80;
  68. /* This loop iterates either once or twice, no more, no less */
  69. while (1) {
  70. unsigned remaining = 64 - bufpos;
  71. memset(ctx->wbuffer + bufpos, 0, remaining);
  72. /* Do we have enough space for the length count? */
  73. if (remaining >= 8) {
  74. /* Store the 64-bit counter of bits in the buffer */
  75. uint64_t t = ctx->total64 << 3;
  76. if (swap_needed)
  77. t = bb_bswap_64(t);
  78. /* wbuffer is suitably aligned for this */
  79. *(bb__aliased_uint64_t *) (&ctx->wbuffer[64 - 8]) = t;
  80. }
  81. ctx->process_block(ctx);
  82. if (remaining >= 8)
  83. break;
  84. bufpos = 0;
  85. }
  86. }
  87. /*
  88. * Compute MD5 checksum of strings according to the
  89. * definition of MD5 in RFC 1321 from April 1992.
  90. *
  91. * Written by Ulrich Drepper <drepper@gnu.ai.mit.edu>, 1995.
  92. *
  93. * Copyright (C) 1995-1999 Free Software Foundation, Inc.
  94. * Copyright (C) 2001 Manuel Novoa III
  95. * Copyright (C) 2003 Glenn L. McGrath
  96. * Copyright (C) 2003 Erik Andersen
  97. *
  98. * Licensed under GPLv2 or later, see file LICENSE in this source tree.
  99. */
  100. /* 0: fastest, 3: smallest */
  101. #if CONFIG_MD5_SMALL < 0
  102. # define MD5_SMALL 0
  103. #elif CONFIG_MD5_SMALL > 3
  104. # define MD5_SMALL 3
  105. #else
  106. # define MD5_SMALL CONFIG_MD5_SMALL
  107. #endif
  108. /* These are the four functions used in the four steps of the MD5 algorithm
  109. * and defined in the RFC 1321. The first function is a little bit optimized
  110. * (as found in Colin Plumbs public domain implementation).
  111. * #define FF(b, c, d) ((b & c) | (~b & d))
  112. */
  113. #undef FF
  114. #undef FG
  115. #undef FH
  116. #undef FI
  117. #define FF(b, c, d) (d ^ (b & (c ^ d)))
  118. #define FG(b, c, d) FF(d, b, c)
  119. #define FH(b, c, d) (b ^ c ^ d)
  120. #define FI(b, c, d) (c ^ (b | ~d))
  121. /* Hash a single block, 64 bytes long and 4-byte aligned */
  122. static void FAST_FUNC md5_process_block64(md5_ctx_t *ctx)
  123. {
  124. #if MD5_SMALL > 0
  125. /* Before we start, one word to the strange constants.
  126. They are defined in RFC 1321 as
  127. T[i] = (int)(4294967296.0 * fabs(sin(i))), i=1..64
  128. */
  129. static const uint32_t C_array[] = {
  130. /* round 1 */
  131. 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
  132. 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
  133. 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
  134. 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
  135. /* round 2 */
  136. 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
  137. 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
  138. 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
  139. 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
  140. /* round 3 */
  141. 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
  142. 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
  143. 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x4881d05,
  144. 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
  145. /* round 4 */
  146. 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
  147. 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
  148. 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
  149. 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391
  150. };
  151. static const char P_array[] ALIGN1 = {
  152. # if MD5_SMALL > 1
  153. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 1 */
  154. # endif
  155. 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, /* 2 */
  156. 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, /* 3 */
  157. 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9 /* 4 */
  158. };
  159. #endif
  160. uint32_t *words = (void*) ctx->wbuffer;
  161. uint32_t A = ctx->hash[0];
  162. uint32_t B = ctx->hash[1];
  163. uint32_t C = ctx->hash[2];
  164. uint32_t D = ctx->hash[3];
  165. #if MD5_SMALL >= 2 /* 2 or 3 */
  166. static const char S_array[] ALIGN1 = {
  167. 7, 12, 17, 22,
  168. 5, 9, 14, 20,
  169. 4, 11, 16, 23,
  170. 6, 10, 15, 21
  171. };
  172. const uint32_t *pc;
  173. const char *pp;
  174. const char *ps;
  175. int i;
  176. uint32_t temp;
  177. if (BB_BIG_ENDIAN)
  178. for (i = 0; i < 16; i++)
  179. words[i] = SWAP_LE32(words[i]);
  180. # if MD5_SMALL == 3
  181. pc = C_array;
  182. pp = P_array;
  183. ps = S_array - 4;
  184. for (i = 0; i < 64; i++) {
  185. if ((i & 0x0f) == 0)
  186. ps += 4;
  187. temp = A;
  188. switch (i >> 4) {
  189. case 0:
  190. temp += FF(B, C, D);
  191. break;
  192. case 1:
  193. temp += FG(B, C, D);
  194. break;
  195. case 2:
  196. temp += FH(B, C, D);
  197. break;
  198. case 3:
  199. temp += FI(B, C, D);
  200. }
  201. temp += words[(int) (*pp++)] + *pc++;
  202. temp = rotl32(temp, ps[i & 3]);
  203. temp += B;
  204. A = D;
  205. D = C;
  206. C = B;
  207. B = temp;
  208. }
  209. # else /* MD5_SMALL == 2 */
  210. pc = C_array;
  211. pp = P_array;
  212. ps = S_array;
  213. for (i = 0; i < 16; i++) {
  214. temp = A + FF(B, C, D) + words[(int) (*pp++)] + *pc++;
  215. temp = rotl32(temp, ps[i & 3]);
  216. temp += B;
  217. A = D;
  218. D = C;
  219. C = B;
  220. B = temp;
  221. }
  222. ps += 4;
  223. for (i = 0; i < 16; i++) {
  224. temp = A + FG(B, C, D) + words[(int) (*pp++)] + *pc++;
  225. temp = rotl32(temp, ps[i & 3]);
  226. temp += B;
  227. A = D;
  228. D = C;
  229. C = B;
  230. B = temp;
  231. }
  232. ps += 4;
  233. for (i = 0; i < 16; i++) {
  234. temp = A + FH(B, C, D) + words[(int) (*pp++)] + *pc++;
  235. temp = rotl32(temp, ps[i & 3]);
  236. temp += B;
  237. A = D;
  238. D = C;
  239. C = B;
  240. B = temp;
  241. }
  242. ps += 4;
  243. for (i = 0; i < 16; i++) {
  244. temp = A + FI(B, C, D) + words[(int) (*pp++)] + *pc++;
  245. temp = rotl32(temp, ps[i & 3]);
  246. temp += B;
  247. A = D;
  248. D = C;
  249. C = B;
  250. B = temp;
  251. }
  252. # endif
  253. /* Add checksum to the starting values */
  254. ctx->hash[0] += A;
  255. ctx->hash[1] += B;
  256. ctx->hash[2] += C;
  257. ctx->hash[3] += D;
  258. #else /* MD5_SMALL == 0 or 1 */
  259. uint32_t A_save = A;
  260. uint32_t B_save = B;
  261. uint32_t C_save = C;
  262. uint32_t D_save = D;
  263. # if MD5_SMALL == 1
  264. const uint32_t *pc;
  265. const char *pp;
  266. int i;
  267. # endif
  268. /* First round: using the given function, the context and a constant
  269. the next context is computed. Because the algorithm's processing
  270. unit is a 32-bit word and it is determined to work on words in
  271. little endian byte order we perhaps have to change the byte order
  272. before the computation. To reduce the work for the next steps
  273. we save swapped words in WORDS array. */
  274. # undef OP
  275. # define OP(a, b, c, d, s, T) \
  276. do { \
  277. a += FF(b, c, d) + (*words IF_BIG_ENDIAN(= SWAP_LE32(*words))) + T; \
  278. words++; \
  279. a = rotl32(a, s); \
  280. a += b; \
  281. } while (0)
  282. /* Round 1 */
  283. # if MD5_SMALL == 1
  284. pc = C_array;
  285. for (i = 0; i < 4; i++) {
  286. OP(A, B, C, D, 7, *pc++);
  287. OP(D, A, B, C, 12, *pc++);
  288. OP(C, D, A, B, 17, *pc++);
  289. OP(B, C, D, A, 22, *pc++);
  290. }
  291. # else
  292. OP(A, B, C, D, 7, 0xd76aa478);
  293. OP(D, A, B, C, 12, 0xe8c7b756);
  294. OP(C, D, A, B, 17, 0x242070db);
  295. OP(B, C, D, A, 22, 0xc1bdceee);
  296. OP(A, B, C, D, 7, 0xf57c0faf);
  297. OP(D, A, B, C, 12, 0x4787c62a);
  298. OP(C, D, A, B, 17, 0xa8304613);
  299. OP(B, C, D, A, 22, 0xfd469501);
  300. OP(A, B, C, D, 7, 0x698098d8);
  301. OP(D, A, B, C, 12, 0x8b44f7af);
  302. OP(C, D, A, B, 17, 0xffff5bb1);
  303. OP(B, C, D, A, 22, 0x895cd7be);
  304. OP(A, B, C, D, 7, 0x6b901122);
  305. OP(D, A, B, C, 12, 0xfd987193);
  306. OP(C, D, A, B, 17, 0xa679438e);
  307. OP(B, C, D, A, 22, 0x49b40821);
  308. # endif
  309. words -= 16;
  310. /* For the second to fourth round we have the possibly swapped words
  311. in WORDS. Redefine the macro to take an additional first
  312. argument specifying the function to use. */
  313. # undef OP
  314. # define OP(f, a, b, c, d, k, s, T) \
  315. do { \
  316. a += f(b, c, d) + words[k] + T; \
  317. a = rotl32(a, s); \
  318. a += b; \
  319. } while (0)
  320. /* Round 2 */
  321. # if MD5_SMALL == 1
  322. pp = P_array;
  323. for (i = 0; i < 4; i++) {
  324. OP(FG, A, B, C, D, (int) (*pp++), 5, *pc++);
  325. OP(FG, D, A, B, C, (int) (*pp++), 9, *pc++);
  326. OP(FG, C, D, A, B, (int) (*pp++), 14, *pc++);
  327. OP(FG, B, C, D, A, (int) (*pp++), 20, *pc++);
  328. }
  329. # else
  330. OP(FG, A, B, C, D, 1, 5, 0xf61e2562);
  331. OP(FG, D, A, B, C, 6, 9, 0xc040b340);
  332. OP(FG, C, D, A, B, 11, 14, 0x265e5a51);
  333. OP(FG, B, C, D, A, 0, 20, 0xe9b6c7aa);
  334. OP(FG, A, B, C, D, 5, 5, 0xd62f105d);
  335. OP(FG, D, A, B, C, 10, 9, 0x02441453);
  336. OP(FG, C, D, A, B, 15, 14, 0xd8a1e681);
  337. OP(FG, B, C, D, A, 4, 20, 0xe7d3fbc8);
  338. OP(FG, A, B, C, D, 9, 5, 0x21e1cde6);
  339. OP(FG, D, A, B, C, 14, 9, 0xc33707d6);
  340. OP(FG, C, D, A, B, 3, 14, 0xf4d50d87);
  341. OP(FG, B, C, D, A, 8, 20, 0x455a14ed);
  342. OP(FG, A, B, C, D, 13, 5, 0xa9e3e905);
  343. OP(FG, D, A, B, C, 2, 9, 0xfcefa3f8);
  344. OP(FG, C, D, A, B, 7, 14, 0x676f02d9);
  345. OP(FG, B, C, D, A, 12, 20, 0x8d2a4c8a);
  346. # endif
  347. /* Round 3 */
  348. # if MD5_SMALL == 1
  349. for (i = 0; i < 4; i++) {
  350. OP(FH, A, B, C, D, (int) (*pp++), 4, *pc++);
  351. OP(FH, D, A, B, C, (int) (*pp++), 11, *pc++);
  352. OP(FH, C, D, A, B, (int) (*pp++), 16, *pc++);
  353. OP(FH, B, C, D, A, (int) (*pp++), 23, *pc++);
  354. }
  355. # else
  356. OP(FH, A, B, C, D, 5, 4, 0xfffa3942);
  357. OP(FH, D, A, B, C, 8, 11, 0x8771f681);
  358. OP(FH, C, D, A, B, 11, 16, 0x6d9d6122);
  359. OP(FH, B, C, D, A, 14, 23, 0xfde5380c);
  360. OP(FH, A, B, C, D, 1, 4, 0xa4beea44);
  361. OP(FH, D, A, B, C, 4, 11, 0x4bdecfa9);
  362. OP(FH, C, D, A, B, 7, 16, 0xf6bb4b60);
  363. OP(FH, B, C, D, A, 10, 23, 0xbebfbc70);
  364. OP(FH, A, B, C, D, 13, 4, 0x289b7ec6);
  365. OP(FH, D, A, B, C, 0, 11, 0xeaa127fa);
  366. OP(FH, C, D, A, B, 3, 16, 0xd4ef3085);
  367. OP(FH, B, C, D, A, 6, 23, 0x04881d05);
  368. OP(FH, A, B, C, D, 9, 4, 0xd9d4d039);
  369. OP(FH, D, A, B, C, 12, 11, 0xe6db99e5);
  370. OP(FH, C, D, A, B, 15, 16, 0x1fa27cf8);
  371. OP(FH, B, C, D, A, 2, 23, 0xc4ac5665);
  372. # endif
  373. /* Round 4 */
  374. # if MD5_SMALL == 1
  375. for (i = 0; i < 4; i++) {
  376. OP(FI, A, B, C, D, (int) (*pp++), 6, *pc++);
  377. OP(FI, D, A, B, C, (int) (*pp++), 10, *pc++);
  378. OP(FI, C, D, A, B, (int) (*pp++), 15, *pc++);
  379. OP(FI, B, C, D, A, (int) (*pp++), 21, *pc++);
  380. }
  381. # else
  382. OP(FI, A, B, C, D, 0, 6, 0xf4292244);
  383. OP(FI, D, A, B, C, 7, 10, 0x432aff97);
  384. OP(FI, C, D, A, B, 14, 15, 0xab9423a7);
  385. OP(FI, B, C, D, A, 5, 21, 0xfc93a039);
  386. OP(FI, A, B, C, D, 12, 6, 0x655b59c3);
  387. OP(FI, D, A, B, C, 3, 10, 0x8f0ccc92);
  388. OP(FI, C, D, A, B, 10, 15, 0xffeff47d);
  389. OP(FI, B, C, D, A, 1, 21, 0x85845dd1);
  390. OP(FI, A, B, C, D, 8, 6, 0x6fa87e4f);
  391. OP(FI, D, A, B, C, 15, 10, 0xfe2ce6e0);
  392. OP(FI, C, D, A, B, 6, 15, 0xa3014314);
  393. OP(FI, B, C, D, A, 13, 21, 0x4e0811a1);
  394. OP(FI, A, B, C, D, 4, 6, 0xf7537e82);
  395. OP(FI, D, A, B, C, 11, 10, 0xbd3af235);
  396. OP(FI, C, D, A, B, 2, 15, 0x2ad7d2bb);
  397. OP(FI, B, C, D, A, 9, 21, 0xeb86d391);
  398. # undef OP
  399. # endif
  400. /* Add checksum to the starting values */
  401. ctx->hash[0] = A_save + A;
  402. ctx->hash[1] = B_save + B;
  403. ctx->hash[2] = C_save + C;
  404. ctx->hash[3] = D_save + D;
  405. #endif
  406. }
  407. #undef FF
  408. #undef FG
  409. #undef FH
  410. #undef FI
  411. /* Initialize structure containing state of computation.
  412. * (RFC 1321, 3.3: Step 3)
  413. */
  414. void FAST_FUNC md5_begin(md5_ctx_t *ctx)
  415. {
  416. ctx->hash[0] = 0x67452301;
  417. ctx->hash[1] = 0xefcdab89;
  418. ctx->hash[2] = 0x98badcfe;
  419. ctx->hash[3] = 0x10325476;
  420. ctx->total64 = 0;
  421. ctx->process_block = md5_process_block64;
  422. }
  423. /* Used also for sha1 and sha256 */
  424. void FAST_FUNC md5_hash(md5_ctx_t *ctx, const void *buffer, size_t len)
  425. {
  426. common64_hash(ctx, buffer, len);
  427. }
  428. /* Process the remaining bytes in the buffer and put result from CTX
  429. * in first 16 bytes following RESBUF. The result is always in little
  430. * endian byte order, so that a byte-wise output yields to the wanted
  431. * ASCII representation of the message digest.
  432. */
  433. void FAST_FUNC md5_end(md5_ctx_t *ctx, void *resbuf)
  434. {
  435. /* MD5 stores total in LE, need to swap on BE arches: */
  436. common64_end(ctx, /*swap_needed:*/ BB_BIG_ENDIAN);
  437. /* The MD5 result is in little endian byte order */
  438. if (BB_BIG_ENDIAN) {
  439. ctx->hash[0] = SWAP_LE32(ctx->hash[0]);
  440. ctx->hash[1] = SWAP_LE32(ctx->hash[1]);
  441. ctx->hash[2] = SWAP_LE32(ctx->hash[2]);
  442. ctx->hash[3] = SWAP_LE32(ctx->hash[3]);
  443. }
  444. memcpy(resbuf, ctx->hash, sizeof(ctx->hash[0]) * 4);
  445. }
  446. /*
  447. * SHA1 part is:
  448. * Copyright 2007 Rob Landley <rob@landley.net>
  449. *
  450. * Based on the public domain SHA-1 in C by Steve Reid <steve@edmweb.com>
  451. * from http://www.mirrors.wiretapped.net/security/cryptography/hashes/sha1/
  452. *
  453. * Licensed under GPLv2, see file LICENSE in this source tree.
  454. *
  455. * ---------------------------------------------------------------------------
  456. *
  457. * SHA256 and SHA512 parts are:
  458. * Released into the Public Domain by Ulrich Drepper <drepper@redhat.com>.
  459. * Shrank by Denys Vlasenko.
  460. *
  461. * ---------------------------------------------------------------------------
  462. *
  463. * The best way to test random blocksizes is to go to coreutils/md5_sha1_sum.c
  464. * and replace "4096" with something like "2000 + time(NULL) % 2097",
  465. * then rebuild and compare "shaNNNsum bigfile" results.
  466. */
  467. static void FAST_FUNC sha1_process_block64(sha1_ctx_t *ctx)
  468. {
  469. static const uint32_t rconsts[] = {
  470. 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6
  471. };
  472. int i, j;
  473. int cnt;
  474. uint32_t W[16+16];
  475. uint32_t a, b, c, d, e;
  476. /* On-stack work buffer frees up one register in the main loop
  477. * which otherwise will be needed to hold ctx pointer */
  478. for (i = 0; i < 16; i++)
  479. W[i] = W[i+16] = SWAP_BE32(((uint32_t*)ctx->wbuffer)[i]);
  480. a = ctx->hash[0];
  481. b = ctx->hash[1];
  482. c = ctx->hash[2];
  483. d = ctx->hash[3];
  484. e = ctx->hash[4];
  485. /* 4 rounds of 20 operations each */
  486. cnt = 0;
  487. for (i = 0; i < 4; i++) {
  488. j = 19;
  489. do {
  490. uint32_t work;
  491. work = c ^ d;
  492. if (i == 0) {
  493. work = (work & b) ^ d;
  494. if (j <= 3)
  495. goto ge16;
  496. /* Used to do SWAP_BE32 here, but this
  497. * requires ctx (see comment above) */
  498. work += W[cnt];
  499. } else {
  500. if (i == 2)
  501. work = ((b | c) & d) | (b & c);
  502. else /* i = 1 or 3 */
  503. work ^= b;
  504. ge16:
  505. W[cnt] = W[cnt+16] = rotl32(W[cnt+13] ^ W[cnt+8] ^ W[cnt+2] ^ W[cnt], 1);
  506. work += W[cnt];
  507. }
  508. work += e + rotl32(a, 5) + rconsts[i];
  509. /* Rotate by one for next time */
  510. e = d;
  511. d = c;
  512. c = /* b = */ rotl32(b, 30);
  513. b = a;
  514. a = work;
  515. cnt = (cnt + 1) & 15;
  516. } while (--j >= 0);
  517. }
  518. ctx->hash[0] += a;
  519. ctx->hash[1] += b;
  520. ctx->hash[2] += c;
  521. ctx->hash[3] += d;
  522. ctx->hash[4] += e;
  523. }
  524. /* Constants for SHA512 from FIPS 180-2:4.2.3.
  525. * SHA256 constants from FIPS 180-2:4.2.2
  526. * are the most significant half of first 64 elements
  527. * of the same array.
  528. */
  529. static const uint64_t sha_K[80] = {
  530. 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
  531. 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
  532. 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
  533. 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
  534. 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
  535. 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
  536. 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
  537. 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
  538. 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
  539. 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
  540. 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
  541. 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
  542. 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
  543. 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
  544. 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
  545. 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
  546. 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
  547. 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
  548. 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
  549. 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
  550. 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
  551. 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
  552. 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
  553. 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
  554. 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
  555. 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
  556. 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
  557. 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
  558. 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
  559. 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
  560. 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
  561. 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
  562. 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, /* [64]+ are used for sha512 only */
  563. 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
  564. 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
  565. 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
  566. 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
  567. 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
  568. 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
  569. 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
  570. };
  571. #undef Ch
  572. #undef Maj
  573. #undef S0
  574. #undef S1
  575. #undef R0
  576. #undef R1
  577. static void FAST_FUNC sha256_process_block64(sha256_ctx_t *ctx)
  578. {
  579. unsigned t;
  580. uint32_t W[64], a, b, c, d, e, f, g, h;
  581. const uint32_t *words = (uint32_t*) ctx->wbuffer;
  582. /* Operators defined in FIPS 180-2:4.1.2. */
  583. #define Ch(x, y, z) ((x & y) ^ (~x & z))
  584. #define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
  585. #define S0(x) (rotr32(x, 2) ^ rotr32(x, 13) ^ rotr32(x, 22))
  586. #define S1(x) (rotr32(x, 6) ^ rotr32(x, 11) ^ rotr32(x, 25))
  587. #define R0(x) (rotr32(x, 7) ^ rotr32(x, 18) ^ (x >> 3))
  588. #define R1(x) (rotr32(x, 17) ^ rotr32(x, 19) ^ (x >> 10))
  589. /* Compute the message schedule according to FIPS 180-2:6.2.2 step 2. */
  590. for (t = 0; t < 16; ++t)
  591. W[t] = SWAP_BE32(words[t]);
  592. for (/*t = 16*/; t < 64; ++t)
  593. W[t] = R1(W[t - 2]) + W[t - 7] + R0(W[t - 15]) + W[t - 16];
  594. a = ctx->hash[0];
  595. b = ctx->hash[1];
  596. c = ctx->hash[2];
  597. d = ctx->hash[3];
  598. e = ctx->hash[4];
  599. f = ctx->hash[5];
  600. g = ctx->hash[6];
  601. h = ctx->hash[7];
  602. /* The actual computation according to FIPS 180-2:6.2.2 step 3. */
  603. for (t = 0; t < 64; ++t) {
  604. /* Need to fetch upper half of sha_K[t]
  605. * (I hope compiler is clever enough to just fetch
  606. * upper half)
  607. */
  608. uint32_t K_t = sha_K[t] >> 32;
  609. uint32_t T1 = h + S1(e) + Ch(e, f, g) + K_t + W[t];
  610. uint32_t T2 = S0(a) + Maj(a, b, c);
  611. h = g;
  612. g = f;
  613. f = e;
  614. e = d + T1;
  615. d = c;
  616. c = b;
  617. b = a;
  618. a = T1 + T2;
  619. }
  620. #undef Ch
  621. #undef Maj
  622. #undef S0
  623. #undef S1
  624. #undef R0
  625. #undef R1
  626. /* Add the starting values of the context according to FIPS 180-2:6.2.2
  627. step 4. */
  628. ctx->hash[0] += a;
  629. ctx->hash[1] += b;
  630. ctx->hash[2] += c;
  631. ctx->hash[3] += d;
  632. ctx->hash[4] += e;
  633. ctx->hash[5] += f;
  634. ctx->hash[6] += g;
  635. ctx->hash[7] += h;
  636. }
  637. static void FAST_FUNC sha512_process_block128(sha512_ctx_t *ctx)
  638. {
  639. unsigned t;
  640. uint64_t W[80];
  641. /* On i386, having assignments here (not later as sha256 does)
  642. * produces 99 bytes smaller code with gcc 4.3.1
  643. */
  644. uint64_t a = ctx->hash[0];
  645. uint64_t b = ctx->hash[1];
  646. uint64_t c = ctx->hash[2];
  647. uint64_t d = ctx->hash[3];
  648. uint64_t e = ctx->hash[4];
  649. uint64_t f = ctx->hash[5];
  650. uint64_t g = ctx->hash[6];
  651. uint64_t h = ctx->hash[7];
  652. const uint64_t *words = (uint64_t*) ctx->wbuffer;
  653. /* Operators defined in FIPS 180-2:4.1.2. */
  654. #define Ch(x, y, z) ((x & y) ^ (~x & z))
  655. #define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
  656. #define S0(x) (rotr64(x, 28) ^ rotr64(x, 34) ^ rotr64(x, 39))
  657. #define S1(x) (rotr64(x, 14) ^ rotr64(x, 18) ^ rotr64(x, 41))
  658. #define R0(x) (rotr64(x, 1) ^ rotr64(x, 8) ^ (x >> 7))
  659. #define R1(x) (rotr64(x, 19) ^ rotr64(x, 61) ^ (x >> 6))
  660. /* Compute the message schedule according to FIPS 180-2:6.3.2 step 2. */
  661. for (t = 0; t < 16; ++t)
  662. W[t] = SWAP_BE64(words[t]);
  663. for (/*t = 16*/; t < 80; ++t)
  664. W[t] = R1(W[t - 2]) + W[t - 7] + R0(W[t - 15]) + W[t - 16];
  665. /* The actual computation according to FIPS 180-2:6.3.2 step 3. */
  666. for (t = 0; t < 80; ++t) {
  667. uint64_t T1 = h + S1(e) + Ch(e, f, g) + sha_K[t] + W[t];
  668. uint64_t T2 = S0(a) + Maj(a, b, c);
  669. h = g;
  670. g = f;
  671. f = e;
  672. e = d + T1;
  673. d = c;
  674. c = b;
  675. b = a;
  676. a = T1 + T2;
  677. }
  678. #undef Ch
  679. #undef Maj
  680. #undef S0
  681. #undef S1
  682. #undef R0
  683. #undef R1
  684. /* Add the starting values of the context according to FIPS 180-2:6.3.2
  685. step 4. */
  686. ctx->hash[0] += a;
  687. ctx->hash[1] += b;
  688. ctx->hash[2] += c;
  689. ctx->hash[3] += d;
  690. ctx->hash[4] += e;
  691. ctx->hash[5] += f;
  692. ctx->hash[6] += g;
  693. ctx->hash[7] += h;
  694. }
  695. void FAST_FUNC sha1_begin(sha1_ctx_t *ctx)
  696. {
  697. ctx->hash[0] = 0x67452301;
  698. ctx->hash[1] = 0xefcdab89;
  699. ctx->hash[2] = 0x98badcfe;
  700. ctx->hash[3] = 0x10325476;
  701. ctx->hash[4] = 0xc3d2e1f0;
  702. ctx->total64 = 0;
  703. ctx->process_block = sha1_process_block64;
  704. }
  705. static const uint32_t init256[] = {
  706. 0,
  707. 0,
  708. 0x6a09e667,
  709. 0xbb67ae85,
  710. 0x3c6ef372,
  711. 0xa54ff53a,
  712. 0x510e527f,
  713. 0x9b05688c,
  714. 0x1f83d9ab,
  715. 0x5be0cd19,
  716. };
  717. static const uint32_t init512_lo[] = {
  718. 0,
  719. 0,
  720. 0xf3bcc908,
  721. 0x84caa73b,
  722. 0xfe94f82b,
  723. 0x5f1d36f1,
  724. 0xade682d1,
  725. 0x2b3e6c1f,
  726. 0xfb41bd6b,
  727. 0x137e2179,
  728. };
  729. /* Initialize structure containing state of computation.
  730. (FIPS 180-2:5.3.2) */
  731. void FAST_FUNC sha256_begin(sha256_ctx_t *ctx)
  732. {
  733. memcpy(&ctx->total64, init256, sizeof(init256));
  734. /*ctx->total64 = 0; - done by prepending two 32-bit zeros to init256 */
  735. ctx->process_block = sha256_process_block64;
  736. }
  737. /* Initialize structure containing state of computation.
  738. (FIPS 180-2:5.3.3) */
  739. void FAST_FUNC sha512_begin(sha512_ctx_t *ctx)
  740. {
  741. int i;
  742. /* Two extra iterations zero out ctx->total64[2] */
  743. uint64_t *tp = ctx->total64;
  744. for (i = 0; i < 2+8; i++)
  745. tp[i] = ((uint64_t)(init256[i]) << 32) + init512_lo[i];
  746. /*ctx->total64[0] = ctx->total64[1] = 0; - already done */
  747. }
  748. void FAST_FUNC sha512_hash(sha512_ctx_t *ctx, const void *buffer, size_t len)
  749. {
  750. unsigned bufpos = ctx->total64[0] & 127;
  751. unsigned remaining;
  752. /* First increment the byte count. FIPS 180-2 specifies the possible
  753. length of the file up to 2^128 _bits_.
  754. We compute the number of _bytes_ and convert to bits later. */
  755. ctx->total64[0] += len;
  756. if (ctx->total64[0] < len)
  757. ctx->total64[1]++;
  758. #if 0
  759. remaining = 128 - bufpos;
  760. /* Hash whole blocks */
  761. while (len >= remaining) {
  762. memcpy(ctx->wbuffer + bufpos, buffer, remaining);
  763. buffer = (const char *)buffer + remaining;
  764. len -= remaining;
  765. remaining = 128;
  766. bufpos = 0;
  767. sha512_process_block128(ctx);
  768. }
  769. /* Save last, partial blosk */
  770. memcpy(ctx->wbuffer + bufpos, buffer, len);
  771. #else
  772. while (1) {
  773. remaining = 128 - bufpos;
  774. if (remaining > len)
  775. remaining = len;
  776. /* Copy data into aligned buffer */
  777. memcpy(ctx->wbuffer + bufpos, buffer, remaining);
  778. len -= remaining;
  779. buffer = (const char *)buffer + remaining;
  780. bufpos += remaining;
  781. /* Clever way to do "if (bufpos != N) break; ... ; bufpos = 0;" */
  782. bufpos -= 128;
  783. if (bufpos != 0)
  784. break;
  785. /* Buffer is filled up, process it */
  786. sha512_process_block128(ctx);
  787. /*bufpos = 0; - already is */
  788. }
  789. #endif
  790. }
  791. /* Used also for sha256 */
  792. void FAST_FUNC sha1_end(sha1_ctx_t *ctx, void *resbuf)
  793. {
  794. unsigned hash_size;
  795. /* SHA stores total in BE, need to swap on LE arches: */
  796. common64_end(ctx, /*swap_needed:*/ BB_LITTLE_ENDIAN);
  797. hash_size = (ctx->process_block == sha1_process_block64) ? 5 : 8;
  798. /* This way we do not impose alignment constraints on resbuf: */
  799. if (BB_LITTLE_ENDIAN) {
  800. unsigned i;
  801. for (i = 0; i < hash_size; ++i)
  802. ctx->hash[i] = SWAP_BE32(ctx->hash[i]);
  803. }
  804. memcpy(resbuf, ctx->hash, sizeof(ctx->hash[0]) * hash_size);
  805. }
  806. void FAST_FUNC sha512_end(sha512_ctx_t *ctx, void *resbuf)
  807. {
  808. unsigned bufpos = ctx->total64[0] & 127;
  809. /* Pad the buffer to the next 128-byte boundary with 0x80,0,0,0... */
  810. ctx->wbuffer[bufpos++] = 0x80;
  811. while (1) {
  812. unsigned remaining = 128 - bufpos;
  813. memset(ctx->wbuffer + bufpos, 0, remaining);
  814. if (remaining >= 16) {
  815. /* Store the 128-bit counter of bits in the buffer in BE format */
  816. uint64_t t;
  817. t = ctx->total64[0] << 3;
  818. t = SWAP_BE64(t);
  819. *(bb__aliased_uint64_t *) (&ctx->wbuffer[128 - 8]) = t;
  820. t = (ctx->total64[1] << 3) | (ctx->total64[0] >> 61);
  821. t = SWAP_BE64(t);
  822. *(bb__aliased_uint64_t *) (&ctx->wbuffer[128 - 16]) = t;
  823. }
  824. sha512_process_block128(ctx);
  825. if (remaining >= 16)
  826. break;
  827. bufpos = 0;
  828. }
  829. if (BB_LITTLE_ENDIAN) {
  830. unsigned i;
  831. for (i = 0; i < ARRAY_SIZE(ctx->hash); ++i)
  832. ctx->hash[i] = SWAP_BE64(ctx->hash[i]);
  833. }
  834. memcpy(resbuf, ctx->hash, sizeof(ctx->hash));
  835. }
  836. /*
  837. * The Keccak sponge function, designed by Guido Bertoni, Joan Daemen,
  838. * Michael Peeters and Gilles Van Assche. For more information, feedback or
  839. * questions, please refer to our website: http://keccak.noekeon.org/
  840. *
  841. * Implementation by Ronny Van Keer,
  842. * hereby denoted as "the implementer".
  843. *
  844. * To the extent possible under law, the implementer has waived all copyright
  845. * and related or neighboring rights to the source code in this file.
  846. * http://creativecommons.org/publicdomain/zero/1.0/
  847. *
  848. * Busybox modifications (C) Lauri Kasanen, under the GPLv2.
  849. */
  850. #if CONFIG_SHA3_SMALL < 0
  851. # define SHA3_SMALL 0
  852. #elif CONFIG_SHA3_SMALL > 1
  853. # define SHA3_SMALL 1
  854. #else
  855. # define SHA3_SMALL CONFIG_SHA3_SMALL
  856. #endif
  857. enum {
  858. SHA3_IBLK_BYTES = 72, /* 576 bits / 8 */
  859. };
  860. /*
  861. * In the crypto literature this function is usually called Keccak-f().
  862. */
  863. static void sha3_process_block72(uint64_t *state)
  864. {
  865. enum { NROUNDS = 24 };
  866. /* Elements should be 64-bit, but top half is always zero or 0x80000000.
  867. * We encode 63rd bits in a separate word below.
  868. * Same is true for 31th bits, which lets us use 16-bit table instead of 64-bit.
  869. * The speed penalty is lost in the noise.
  870. */
  871. static const uint16_t IOTA_CONST[NROUNDS] = {
  872. 0x0001,
  873. 0x8082,
  874. 0x808a,
  875. 0x8000,
  876. 0x808b,
  877. 0x0001,
  878. 0x8081,
  879. 0x8009,
  880. 0x008a,
  881. 0x0088,
  882. 0x8009,
  883. 0x000a,
  884. 0x808b,
  885. 0x008b,
  886. 0x8089,
  887. 0x8003,
  888. 0x8002,
  889. 0x0080,
  890. 0x800a,
  891. 0x000a,
  892. 0x8081,
  893. 0x8080,
  894. 0x0001,
  895. 0x8008,
  896. };
  897. /* bit for CONST[0] is in msb: 0011 0011 0000 0111 1101 1101 */
  898. const uint32_t IOTA_CONST_bit63 = (uint32_t)(0x3307dd00);
  899. /* bit for CONST[0] is in msb: 0001 0110 0011 1000 0001 1011 */
  900. const uint32_t IOTA_CONST_bit31 = (uint32_t)(0x16381b00);
  901. static const uint8_t ROT_CONST[24] = {
  902. 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
  903. 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44,
  904. };
  905. static const uint8_t PI_LANE[24] = {
  906. 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
  907. 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1,
  908. };
  909. /*static const uint8_t MOD5[10] = { 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, };*/
  910. unsigned x, y;
  911. unsigned round;
  912. if (BB_BIG_ENDIAN) {
  913. for (x = 0; x < 25; x++) {
  914. state[x] = SWAP_LE64(state[x]);
  915. }
  916. }
  917. for (round = 0; round < NROUNDS; ++round) {
  918. /* Theta */
  919. {
  920. uint64_t BC[10];
  921. for (x = 0; x < 5; ++x) {
  922. BC[x + 5] = BC[x] = state[x]
  923. ^ state[x + 5] ^ state[x + 10]
  924. ^ state[x + 15] ^ state[x + 20];
  925. }
  926. /* Using 2x5 vector above eliminates the need to use
  927. * BC[MOD5[x+N]] trick below to fetch BC[(x+N) % 5],
  928. * and the code is a bit _smaller_.
  929. */
  930. for (x = 0; x < 5; ++x) {
  931. uint64_t temp = BC[x + 4] ^ rotl64(BC[x + 1], 1);
  932. state[x] ^= temp;
  933. state[x + 5] ^= temp;
  934. state[x + 10] ^= temp;
  935. state[x + 15] ^= temp;
  936. state[x + 20] ^= temp;
  937. }
  938. }
  939. /* Rho Pi */
  940. if (SHA3_SMALL) {
  941. uint64_t t1 = state[1];
  942. for (x = 0; x < 24; ++x) {
  943. uint64_t t0 = state[PI_LANE[x]];
  944. state[PI_LANE[x]] = rotl64(t1, ROT_CONST[x]);
  945. t1 = t0;
  946. }
  947. } else {
  948. /* Especially large benefit for 32-bit arch (75% faster):
  949. * 64-bit rotations by non-constant usually are SLOW on those.
  950. * We resort to unrolling here.
  951. * This optimizes out PI_LANE[] and ROT_CONST[],
  952. * but generates 300-500 more bytes of code.
  953. */
  954. uint64_t t0;
  955. uint64_t t1 = state[1];
  956. #define RhoPi_twice(x) \
  957. t0 = state[PI_LANE[x ]]; \
  958. state[PI_LANE[x ]] = rotl64(t1, ROT_CONST[x ]); \
  959. t1 = state[PI_LANE[x+1]]; \
  960. state[PI_LANE[x+1]] = rotl64(t0, ROT_CONST[x+1]);
  961. RhoPi_twice(0); RhoPi_twice(2);
  962. RhoPi_twice(4); RhoPi_twice(6);
  963. RhoPi_twice(8); RhoPi_twice(10);
  964. RhoPi_twice(12); RhoPi_twice(14);
  965. RhoPi_twice(16); RhoPi_twice(18);
  966. RhoPi_twice(20); RhoPi_twice(22);
  967. #undef RhoPi_twice
  968. }
  969. /* Chi */
  970. for (y = 0; y <= 20; y += 5) {
  971. uint64_t BC0, BC1, BC2, BC3, BC4;
  972. BC0 = state[y + 0];
  973. BC1 = state[y + 1];
  974. BC2 = state[y + 2];
  975. state[y + 0] = BC0 ^ ((~BC1) & BC2);
  976. BC3 = state[y + 3];
  977. state[y + 1] = BC1 ^ ((~BC2) & BC3);
  978. BC4 = state[y + 4];
  979. state[y + 2] = BC2 ^ ((~BC3) & BC4);
  980. state[y + 3] = BC3 ^ ((~BC4) & BC0);
  981. state[y + 4] = BC4 ^ ((~BC0) & BC1);
  982. }
  983. /* Iota */
  984. state[0] ^= IOTA_CONST[round]
  985. | (uint32_t)((IOTA_CONST_bit31 << round) & 0x80000000)
  986. | (uint64_t)((IOTA_CONST_bit63 << round) & 0x80000000) << 32;
  987. }
  988. if (BB_BIG_ENDIAN) {
  989. for (x = 0; x < 25; x++) {
  990. state[x] = SWAP_LE64(state[x]);
  991. }
  992. }
  993. }
  994. void FAST_FUNC sha3_begin(sha3_ctx_t *ctx)
  995. {
  996. memset(ctx, 0, sizeof(*ctx));
  997. }
  998. void FAST_FUNC sha3_hash(sha3_ctx_t *ctx, const void *buffer, size_t len)
  999. {
  1000. #if SHA3_SMALL
  1001. const uint8_t *data = buffer;
  1002. unsigned bufpos = ctx->bytes_queued;
  1003. while (1) {
  1004. unsigned remaining = SHA3_IBLK_BYTES - bufpos;
  1005. if (remaining > len)
  1006. remaining = len;
  1007. len -= remaining;
  1008. /* XOR data into buffer */
  1009. while (remaining != 0) {
  1010. uint8_t *buf = (uint8_t*)ctx->state;
  1011. buf[bufpos] ^= *data++;
  1012. bufpos++;
  1013. remaining--;
  1014. }
  1015. /* Clever way to do "if (bufpos != N) break; ... ; bufpos = 0;" */
  1016. bufpos -= SHA3_IBLK_BYTES;
  1017. if (bufpos != 0)
  1018. break;
  1019. /* Buffer is filled up, process it */
  1020. sha3_process_block72(ctx->state);
  1021. /*bufpos = 0; - already is */
  1022. }
  1023. ctx->bytes_queued = bufpos + SHA3_IBLK_BYTES;
  1024. #else
  1025. /* +50 bytes code size, but a bit faster because of long-sized XORs */
  1026. const uint8_t *data = buffer;
  1027. unsigned bufpos = ctx->bytes_queued;
  1028. /* If already data in queue, continue queuing first */
  1029. while (len != 0 && bufpos != 0) {
  1030. uint8_t *buf = (uint8_t*)ctx->state;
  1031. buf[bufpos] ^= *data++;
  1032. len--;
  1033. bufpos++;
  1034. if (bufpos == SHA3_IBLK_BYTES) {
  1035. bufpos = 0;
  1036. goto do_block;
  1037. }
  1038. }
  1039. /* Absorb complete blocks */
  1040. while (len >= SHA3_IBLK_BYTES) {
  1041. /* XOR data onto beginning of state[].
  1042. * We try to be efficient - operate one word at a time, not byte.
  1043. * Careful wrt unaligned access: can't just use "*(long*)data"!
  1044. */
  1045. unsigned count = SHA3_IBLK_BYTES / sizeof(long);
  1046. long *buf = (long*)ctx->state;
  1047. do {
  1048. long v;
  1049. move_from_unaligned_long(v, (long*)data);
  1050. *buf++ ^= v;
  1051. data += sizeof(long);
  1052. } while (--count);
  1053. len -= SHA3_IBLK_BYTES;
  1054. do_block:
  1055. sha3_process_block72(ctx->state);
  1056. }
  1057. /* Queue remaining data bytes */
  1058. while (len != 0) {
  1059. uint8_t *buf = (uint8_t*)ctx->state;
  1060. buf[bufpos] ^= *data++;
  1061. bufpos++;
  1062. len--;
  1063. }
  1064. ctx->bytes_queued = bufpos;
  1065. #endif
  1066. }
  1067. void FAST_FUNC sha3_end(sha3_ctx_t *ctx, void *resbuf)
  1068. {
  1069. /* Padding */
  1070. uint8_t *buf = (uint8_t*)ctx->state;
  1071. buf[ctx->bytes_queued] ^= 1;
  1072. buf[SHA3_IBLK_BYTES - 1] ^= 0x80;
  1073. sha3_process_block72(ctx->state);
  1074. /* Output */
  1075. memcpy(resbuf, ctx->state, 64);
  1076. }