2
0

keccak1600.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. /*
  2. * Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include <openssl/e_os2.h>
  10. #include <string.h>
  11. #include <assert.h>
  12. size_t SHA3_absorb(uint64_t A[5][5], const unsigned char *inp, size_t len,
  13. size_t r);
  14. void SHA3_squeeze(uint64_t A[5][5], unsigned char *out, size_t len, size_t r);
  15. #if !defined(KECCAK1600_ASM) || !defined(SELFTEST)
  16. /*
  17. * Choose some sensible defaults
  18. */
  19. #if !defined(KECCAK_REF) && !defined(KECCAK_1X) && !defined(KECCAK_1X_ALT) && \
  20. !defined(KECCAK_2X) && !defined(KECCAK_INPLACE)
  21. # define KECCAK_2X /* default to KECCAK_2X variant */
  22. #endif
  23. #if defined(__i386) || defined(__i386__) || defined(_M_IX86)
  24. # define KECCAK_COMPLEMENTING_TRANSFORM
  25. #endif
  26. #if defined(__x86_64__) || defined(__aarch64__) || \
  27. defined(__mips64) || defined(__ia64) || \
  28. (defined(__VMS) && !defined(__vax))
  29. /*
  30. * These are available even in ILP32 flavours, but even then they are
  31. * capable of performing 64-bit operations as efficiently as in *P64.
  32. * Since it's not given that we can use sizeof(void *), just shunt it.
  33. */
  34. # define BIT_INTERLEAVE (0)
  35. #else
  36. # define BIT_INTERLEAVE (sizeof(void *) < 8)
  37. #endif
  38. #define ROL32(a, offset) (((a) << (offset)) | ((a) >> ((32 - (offset)) & 31)))
  39. static uint64_t ROL64(uint64_t val, int offset)
  40. {
  41. if (offset == 0) {
  42. return val;
  43. } else if (!BIT_INTERLEAVE) {
  44. return (val << offset) | (val >> (64-offset));
  45. } else {
  46. uint32_t hi = (uint32_t)(val >> 32), lo = (uint32_t)val;
  47. if (offset & 1) {
  48. uint32_t tmp = hi;
  49. offset >>= 1;
  50. hi = ROL32(lo, offset);
  51. lo = ROL32(tmp, offset + 1);
  52. } else {
  53. offset >>= 1;
  54. lo = ROL32(lo, offset);
  55. hi = ROL32(hi, offset);
  56. }
  57. return ((uint64_t)hi << 32) | lo;
  58. }
  59. }
  60. static const unsigned char rhotates[5][5] = {
  61. { 0, 1, 62, 28, 27 },
  62. { 36, 44, 6, 55, 20 },
  63. { 3, 10, 43, 25, 39 },
  64. { 41, 45, 15, 21, 8 },
  65. { 18, 2, 61, 56, 14 }
  66. };
  67. static const uint64_t iotas[] = {
  68. BIT_INTERLEAVE ? 0x0000000000000001U : 0x0000000000000001U,
  69. BIT_INTERLEAVE ? 0x0000008900000000U : 0x0000000000008082U,
  70. BIT_INTERLEAVE ? 0x8000008b00000000U : 0x800000000000808aU,
  71. BIT_INTERLEAVE ? 0x8000808000000000U : 0x8000000080008000U,
  72. BIT_INTERLEAVE ? 0x0000008b00000001U : 0x000000000000808bU,
  73. BIT_INTERLEAVE ? 0x0000800000000001U : 0x0000000080000001U,
  74. BIT_INTERLEAVE ? 0x8000808800000001U : 0x8000000080008081U,
  75. BIT_INTERLEAVE ? 0x8000008200000001U : 0x8000000000008009U,
  76. BIT_INTERLEAVE ? 0x0000000b00000000U : 0x000000000000008aU,
  77. BIT_INTERLEAVE ? 0x0000000a00000000U : 0x0000000000000088U,
  78. BIT_INTERLEAVE ? 0x0000808200000001U : 0x0000000080008009U,
  79. BIT_INTERLEAVE ? 0x0000800300000000U : 0x000000008000000aU,
  80. BIT_INTERLEAVE ? 0x0000808b00000001U : 0x000000008000808bU,
  81. BIT_INTERLEAVE ? 0x8000000b00000001U : 0x800000000000008bU,
  82. BIT_INTERLEAVE ? 0x8000008a00000001U : 0x8000000000008089U,
  83. BIT_INTERLEAVE ? 0x8000008100000001U : 0x8000000000008003U,
  84. BIT_INTERLEAVE ? 0x8000008100000000U : 0x8000000000008002U,
  85. BIT_INTERLEAVE ? 0x8000000800000000U : 0x8000000000000080U,
  86. BIT_INTERLEAVE ? 0x0000008300000000U : 0x000000000000800aU,
  87. BIT_INTERLEAVE ? 0x8000800300000000U : 0x800000008000000aU,
  88. BIT_INTERLEAVE ? 0x8000808800000001U : 0x8000000080008081U,
  89. BIT_INTERLEAVE ? 0x8000008800000000U : 0x8000000000008080U,
  90. BIT_INTERLEAVE ? 0x0000800000000001U : 0x0000000080000001U,
  91. BIT_INTERLEAVE ? 0x8000808200000000U : 0x8000000080008008U
  92. };
  93. #if defined(KECCAK_REF)
  94. /*
  95. * This is straightforward or "maximum clarity" implementation aiming
  96. * to resemble section 3.2 of the FIPS PUB 202 "SHA-3 Standard:
  97. * Permutation-Based Hash and Extendible-Output Functions" as much as
  98. * possible. With one caveat. Because of the way C stores matrices,
  99. * references to A[x,y] in the specification are presented as A[y][x].
  100. * Implementation unrolls inner x-loops so that modulo 5 operations are
  101. * explicitly pre-computed.
  102. */
  103. static void Theta(uint64_t A[5][5])
  104. {
  105. uint64_t C[5], D[5];
  106. size_t y;
  107. C[0] = A[0][0];
  108. C[1] = A[0][1];
  109. C[2] = A[0][2];
  110. C[3] = A[0][3];
  111. C[4] = A[0][4];
  112. for (y = 1; y < 5; y++) {
  113. C[0] ^= A[y][0];
  114. C[1] ^= A[y][1];
  115. C[2] ^= A[y][2];
  116. C[3] ^= A[y][3];
  117. C[4] ^= A[y][4];
  118. }
  119. D[0] = ROL64(C[1], 1) ^ C[4];
  120. D[1] = ROL64(C[2], 1) ^ C[0];
  121. D[2] = ROL64(C[3], 1) ^ C[1];
  122. D[3] = ROL64(C[4], 1) ^ C[2];
  123. D[4] = ROL64(C[0], 1) ^ C[3];
  124. for (y = 0; y < 5; y++) {
  125. A[y][0] ^= D[0];
  126. A[y][1] ^= D[1];
  127. A[y][2] ^= D[2];
  128. A[y][3] ^= D[3];
  129. A[y][4] ^= D[4];
  130. }
  131. }
  132. static void Rho(uint64_t A[5][5])
  133. {
  134. size_t y;
  135. for (y = 0; y < 5; y++) {
  136. A[y][0] = ROL64(A[y][0], rhotates[y][0]);
  137. A[y][1] = ROL64(A[y][1], rhotates[y][1]);
  138. A[y][2] = ROL64(A[y][2], rhotates[y][2]);
  139. A[y][3] = ROL64(A[y][3], rhotates[y][3]);
  140. A[y][4] = ROL64(A[y][4], rhotates[y][4]);
  141. }
  142. }
  143. static void Pi(uint64_t A[5][5])
  144. {
  145. uint64_t T[5][5];
  146. /*
  147. * T = A
  148. * A[y][x] = T[x][(3*y+x)%5]
  149. */
  150. memcpy(T, A, sizeof(T));
  151. A[0][0] = T[0][0];
  152. A[0][1] = T[1][1];
  153. A[0][2] = T[2][2];
  154. A[0][3] = T[3][3];
  155. A[0][4] = T[4][4];
  156. A[1][0] = T[0][3];
  157. A[1][1] = T[1][4];
  158. A[1][2] = T[2][0];
  159. A[1][3] = T[3][1];
  160. A[1][4] = T[4][2];
  161. A[2][0] = T[0][1];
  162. A[2][1] = T[1][2];
  163. A[2][2] = T[2][3];
  164. A[2][3] = T[3][4];
  165. A[2][4] = T[4][0];
  166. A[3][0] = T[0][4];
  167. A[3][1] = T[1][0];
  168. A[3][2] = T[2][1];
  169. A[3][3] = T[3][2];
  170. A[3][4] = T[4][3];
  171. A[4][0] = T[0][2];
  172. A[4][1] = T[1][3];
  173. A[4][2] = T[2][4];
  174. A[4][3] = T[3][0];
  175. A[4][4] = T[4][1];
  176. }
  177. static void Chi(uint64_t A[5][5])
  178. {
  179. uint64_t C[5];
  180. size_t y;
  181. for (y = 0; y < 5; y++) {
  182. C[0] = A[y][0] ^ (~A[y][1] & A[y][2]);
  183. C[1] = A[y][1] ^ (~A[y][2] & A[y][3]);
  184. C[2] = A[y][2] ^ (~A[y][3] & A[y][4]);
  185. C[3] = A[y][3] ^ (~A[y][4] & A[y][0]);
  186. C[4] = A[y][4] ^ (~A[y][0] & A[y][1]);
  187. A[y][0] = C[0];
  188. A[y][1] = C[1];
  189. A[y][2] = C[2];
  190. A[y][3] = C[3];
  191. A[y][4] = C[4];
  192. }
  193. }
  194. static void Iota(uint64_t A[5][5], size_t i)
  195. {
  196. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  197. A[0][0] ^= iotas[i];
  198. }
  199. static void KeccakF1600(uint64_t A[5][5])
  200. {
  201. size_t i;
  202. for (i = 0; i < 24; i++) {
  203. Theta(A);
  204. Rho(A);
  205. Pi(A);
  206. Chi(A);
  207. Iota(A, i);
  208. }
  209. }
  210. #elif defined(KECCAK_1X)
  211. /*
  212. * This implementation is optimization of above code featuring unroll
  213. * of even y-loops, their fusion and code motion. It also minimizes
  214. * temporary storage. Compiler would normally do all these things for
  215. * you, purpose of manual optimization is to provide "unobscured"
  216. * reference for assembly implementation [in case this approach is
  217. * chosen for implementation on some platform]. In the nutshell it's
  218. * equivalent of "plane-per-plane processing" approach discussed in
  219. * section 2.4 of "Keccak implementation overview".
  220. */
  221. static void Round(uint64_t A[5][5], size_t i)
  222. {
  223. uint64_t C[5], E[2]; /* registers */
  224. uint64_t D[5], T[2][5]; /* memory */
  225. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  226. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  227. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  228. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  229. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  230. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  231. #if defined(__arm__)
  232. D[1] = E[0] = ROL64(C[2], 1) ^ C[0];
  233. D[4] = E[1] = ROL64(C[0], 1) ^ C[3];
  234. D[0] = C[0] = ROL64(C[1], 1) ^ C[4];
  235. D[2] = C[1] = ROL64(C[3], 1) ^ C[1];
  236. D[3] = C[2] = ROL64(C[4], 1) ^ C[2];
  237. T[0][0] = A[3][0] ^ C[0]; /* borrow T[0][0] */
  238. T[0][1] = A[0][1] ^ E[0]; /* D[1] */
  239. T[0][2] = A[0][2] ^ C[1]; /* D[2] */
  240. T[0][3] = A[0][3] ^ C[2]; /* D[3] */
  241. T[0][4] = A[0][4] ^ E[1]; /* D[4] */
  242. C[3] = ROL64(A[3][3] ^ C[2], rhotates[3][3]); /* D[3] */
  243. C[4] = ROL64(A[4][4] ^ E[1], rhotates[4][4]); /* D[4] */
  244. C[0] = A[0][0] ^ C[0]; /* rotate by 0 */ /* D[0] */
  245. C[2] = ROL64(A[2][2] ^ C[1], rhotates[2][2]); /* D[2] */
  246. C[1] = ROL64(A[1][1] ^ E[0], rhotates[1][1]); /* D[1] */
  247. #else
  248. D[0] = ROL64(C[1], 1) ^ C[4];
  249. D[1] = ROL64(C[2], 1) ^ C[0];
  250. D[2] = ROL64(C[3], 1) ^ C[1];
  251. D[3] = ROL64(C[4], 1) ^ C[2];
  252. D[4] = ROL64(C[0], 1) ^ C[3];
  253. T[0][0] = A[3][0] ^ D[0]; /* borrow T[0][0] */
  254. T[0][1] = A[0][1] ^ D[1];
  255. T[0][2] = A[0][2] ^ D[2];
  256. T[0][3] = A[0][3] ^ D[3];
  257. T[0][4] = A[0][4] ^ D[4];
  258. C[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  259. C[1] = ROL64(A[1][1] ^ D[1], rhotates[1][1]);
  260. C[2] = ROL64(A[2][2] ^ D[2], rhotates[2][2]);
  261. C[3] = ROL64(A[3][3] ^ D[3], rhotates[3][3]);
  262. C[4] = ROL64(A[4][4] ^ D[4], rhotates[4][4]);
  263. #endif
  264. A[0][0] = C[0] ^ (~C[1] & C[2]) ^ iotas[i];
  265. A[0][1] = C[1] ^ (~C[2] & C[3]);
  266. A[0][2] = C[2] ^ (~C[3] & C[4]);
  267. A[0][3] = C[3] ^ (~C[4] & C[0]);
  268. A[0][4] = C[4] ^ (~C[0] & C[1]);
  269. T[1][0] = A[1][0] ^ (C[3] = D[0]);
  270. T[1][1] = A[2][1] ^ (C[4] = D[1]); /* borrow T[1][1] */
  271. T[1][2] = A[1][2] ^ (E[0] = D[2]);
  272. T[1][3] = A[1][3] ^ (E[1] = D[3]);
  273. T[1][4] = A[2][4] ^ (C[2] = D[4]); /* borrow T[1][4] */
  274. C[0] = ROL64(T[0][3], rhotates[0][3]);
  275. C[1] = ROL64(A[1][4] ^ C[2], rhotates[1][4]); /* D[4] */
  276. C[2] = ROL64(A[2][0] ^ C[3], rhotates[2][0]); /* D[0] */
  277. C[3] = ROL64(A[3][1] ^ C[4], rhotates[3][1]); /* D[1] */
  278. C[4] = ROL64(A[4][2] ^ E[0], rhotates[4][2]); /* D[2] */
  279. A[1][0] = C[0] ^ (~C[1] & C[2]);
  280. A[1][1] = C[1] ^ (~C[2] & C[3]);
  281. A[1][2] = C[2] ^ (~C[3] & C[4]);
  282. A[1][3] = C[3] ^ (~C[4] & C[0]);
  283. A[1][4] = C[4] ^ (~C[0] & C[1]);
  284. C[0] = ROL64(T[0][1], rhotates[0][1]);
  285. C[1] = ROL64(T[1][2], rhotates[1][2]);
  286. C[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  287. C[3] = ROL64(A[3][4] ^ D[4], rhotates[3][4]);
  288. C[4] = ROL64(A[4][0] ^ D[0], rhotates[4][0]);
  289. A[2][0] = C[0] ^ (~C[1] & C[2]);
  290. A[2][1] = C[1] ^ (~C[2] & C[3]);
  291. A[2][2] = C[2] ^ (~C[3] & C[4]);
  292. A[2][3] = C[3] ^ (~C[4] & C[0]);
  293. A[2][4] = C[4] ^ (~C[0] & C[1]);
  294. C[0] = ROL64(T[0][4], rhotates[0][4]);
  295. C[1] = ROL64(T[1][0], rhotates[1][0]);
  296. C[2] = ROL64(T[1][1], rhotates[2][1]); /* originally A[2][1] */
  297. C[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  298. C[4] = ROL64(A[4][3] ^ D[3], rhotates[4][3]);
  299. A[3][0] = C[0] ^ (~C[1] & C[2]);
  300. A[3][1] = C[1] ^ (~C[2] & C[3]);
  301. A[3][2] = C[2] ^ (~C[3] & C[4]);
  302. A[3][3] = C[3] ^ (~C[4] & C[0]);
  303. A[3][4] = C[4] ^ (~C[0] & C[1]);
  304. C[0] = ROL64(T[0][2], rhotates[0][2]);
  305. C[1] = ROL64(T[1][3], rhotates[1][3]);
  306. C[2] = ROL64(T[1][4], rhotates[2][4]); /* originally A[2][4] */
  307. C[3] = ROL64(T[0][0], rhotates[3][0]); /* originally A[3][0] */
  308. C[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  309. A[4][0] = C[0] ^ (~C[1] & C[2]);
  310. A[4][1] = C[1] ^ (~C[2] & C[3]);
  311. A[4][2] = C[2] ^ (~C[3] & C[4]);
  312. A[4][3] = C[3] ^ (~C[4] & C[0]);
  313. A[4][4] = C[4] ^ (~C[0] & C[1]);
  314. }
  315. static void KeccakF1600(uint64_t A[5][5])
  316. {
  317. size_t i;
  318. for (i = 0; i < 24; i++) {
  319. Round(A, i);
  320. }
  321. }
  322. #elif defined(KECCAK_1X_ALT)
  323. /*
  324. * This is variant of above KECCAK_1X that reduces requirement for
  325. * temporary storage even further, but at cost of more updates to A[][].
  326. * It's less suitable if A[][] is memory bound, but better if it's
  327. * register bound.
  328. */
  329. static void Round(uint64_t A[5][5], size_t i)
  330. {
  331. uint64_t C[5], D[5];
  332. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  333. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  334. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  335. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  336. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  337. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  338. D[1] = C[0] ^ ROL64(C[2], 1);
  339. D[2] = C[1] ^ ROL64(C[3], 1);
  340. D[3] = C[2] ^= ROL64(C[4], 1);
  341. D[4] = C[3] ^= ROL64(C[0], 1);
  342. D[0] = C[4] ^= ROL64(C[1], 1);
  343. A[0][1] ^= D[1];
  344. A[1][1] ^= D[1];
  345. A[2][1] ^= D[1];
  346. A[3][1] ^= D[1];
  347. A[4][1] ^= D[1];
  348. A[0][2] ^= D[2];
  349. A[1][2] ^= D[2];
  350. A[2][2] ^= D[2];
  351. A[3][2] ^= D[2];
  352. A[4][2] ^= D[2];
  353. A[0][3] ^= C[2];
  354. A[1][3] ^= C[2];
  355. A[2][3] ^= C[2];
  356. A[3][3] ^= C[2];
  357. A[4][3] ^= C[2];
  358. A[0][4] ^= C[3];
  359. A[1][4] ^= C[3];
  360. A[2][4] ^= C[3];
  361. A[3][4] ^= C[3];
  362. A[4][4] ^= C[3];
  363. A[0][0] ^= C[4];
  364. A[1][0] ^= C[4];
  365. A[2][0] ^= C[4];
  366. A[3][0] ^= C[4];
  367. A[4][0] ^= C[4];
  368. C[1] = A[0][1];
  369. C[2] = A[0][2];
  370. C[3] = A[0][3];
  371. C[4] = A[0][4];
  372. A[0][1] = ROL64(A[1][1], rhotates[1][1]);
  373. A[0][2] = ROL64(A[2][2], rhotates[2][2]);
  374. A[0][3] = ROL64(A[3][3], rhotates[3][3]);
  375. A[0][4] = ROL64(A[4][4], rhotates[4][4]);
  376. A[1][1] = ROL64(A[1][4], rhotates[1][4]);
  377. A[2][2] = ROL64(A[2][3], rhotates[2][3]);
  378. A[3][3] = ROL64(A[3][2], rhotates[3][2]);
  379. A[4][4] = ROL64(A[4][1], rhotates[4][1]);
  380. A[1][4] = ROL64(A[4][2], rhotates[4][2]);
  381. A[2][3] = ROL64(A[3][4], rhotates[3][4]);
  382. A[3][2] = ROL64(A[2][1], rhotates[2][1]);
  383. A[4][1] = ROL64(A[1][3], rhotates[1][3]);
  384. A[4][2] = ROL64(A[2][4], rhotates[2][4]);
  385. A[3][4] = ROL64(A[4][3], rhotates[4][3]);
  386. A[2][1] = ROL64(A[1][2], rhotates[1][2]);
  387. A[1][3] = ROL64(A[3][1], rhotates[3][1]);
  388. A[2][4] = ROL64(A[4][0], rhotates[4][0]);
  389. A[4][3] = ROL64(A[3][0], rhotates[3][0]);
  390. A[1][2] = ROL64(A[2][0], rhotates[2][0]);
  391. A[3][1] = ROL64(A[1][0], rhotates[1][0]);
  392. A[1][0] = ROL64(C[3], rhotates[0][3]);
  393. A[2][0] = ROL64(C[1], rhotates[0][1]);
  394. A[3][0] = ROL64(C[4], rhotates[0][4]);
  395. A[4][0] = ROL64(C[2], rhotates[0][2]);
  396. C[0] = A[0][0];
  397. C[1] = A[1][0];
  398. D[0] = A[0][1];
  399. D[1] = A[1][1];
  400. A[0][0] ^= (~A[0][1] & A[0][2]);
  401. A[1][0] ^= (~A[1][1] & A[1][2]);
  402. A[0][1] ^= (~A[0][2] & A[0][3]);
  403. A[1][1] ^= (~A[1][2] & A[1][3]);
  404. A[0][2] ^= (~A[0][3] & A[0][4]);
  405. A[1][2] ^= (~A[1][3] & A[1][4]);
  406. A[0][3] ^= (~A[0][4] & C[0]);
  407. A[1][3] ^= (~A[1][4] & C[1]);
  408. A[0][4] ^= (~C[0] & D[0]);
  409. A[1][4] ^= (~C[1] & D[1]);
  410. C[2] = A[2][0];
  411. C[3] = A[3][0];
  412. D[2] = A[2][1];
  413. D[3] = A[3][1];
  414. A[2][0] ^= (~A[2][1] & A[2][2]);
  415. A[3][0] ^= (~A[3][1] & A[3][2]);
  416. A[2][1] ^= (~A[2][2] & A[2][3]);
  417. A[3][1] ^= (~A[3][2] & A[3][3]);
  418. A[2][2] ^= (~A[2][3] & A[2][4]);
  419. A[3][2] ^= (~A[3][3] & A[3][4]);
  420. A[2][3] ^= (~A[2][4] & C[2]);
  421. A[3][3] ^= (~A[3][4] & C[3]);
  422. A[2][4] ^= (~C[2] & D[2]);
  423. A[3][4] ^= (~C[3] & D[3]);
  424. C[4] = A[4][0];
  425. D[4] = A[4][1];
  426. A[4][0] ^= (~A[4][1] & A[4][2]);
  427. A[4][1] ^= (~A[4][2] & A[4][3]);
  428. A[4][2] ^= (~A[4][3] & A[4][4]);
  429. A[4][3] ^= (~A[4][4] & C[4]);
  430. A[4][4] ^= (~C[4] & D[4]);
  431. A[0][0] ^= iotas[i];
  432. }
  433. static void KeccakF1600(uint64_t A[5][5])
  434. {
  435. size_t i;
  436. for (i = 0; i < 24; i++) {
  437. Round(A, i);
  438. }
  439. }
  440. #elif defined(KECCAK_2X)
  441. /*
  442. * This implementation is variant of KECCAK_1X above with outer-most
  443. * round loop unrolled twice. This allows to take temporary storage
  444. * out of round procedure and simplify references to it by alternating
  445. * it with actual data (see round loop below). Originally it was meant
  446. * rather as reference for an assembly implementation, but it seems to
  447. * play best with compilers [as well as provide best instruction per
  448. * processed byte ratio at minimal round unroll factor]...
  449. */
  450. static void Round(uint64_t R[5][5], uint64_t A[5][5], size_t i)
  451. {
  452. uint64_t C[5], D[5];
  453. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  454. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  455. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  456. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  457. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  458. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  459. D[0] = ROL64(C[1], 1) ^ C[4];
  460. D[1] = ROL64(C[2], 1) ^ C[0];
  461. D[2] = ROL64(C[3], 1) ^ C[1];
  462. D[3] = ROL64(C[4], 1) ^ C[2];
  463. D[4] = ROL64(C[0], 1) ^ C[3];
  464. C[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  465. C[1] = ROL64(A[1][1] ^ D[1], rhotates[1][1]);
  466. C[2] = ROL64(A[2][2] ^ D[2], rhotates[2][2]);
  467. C[3] = ROL64(A[3][3] ^ D[3], rhotates[3][3]);
  468. C[4] = ROL64(A[4][4] ^ D[4], rhotates[4][4]);
  469. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  470. R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i];
  471. R[0][1] = C[1] ^ (~C[2] | C[3]);
  472. R[0][2] = C[2] ^ ( C[3] & C[4]);
  473. R[0][3] = C[3] ^ ( C[4] | C[0]);
  474. R[0][4] = C[4] ^ ( C[0] & C[1]);
  475. #else
  476. R[0][0] = C[0] ^ (~C[1] & C[2]) ^ iotas[i];
  477. R[0][1] = C[1] ^ (~C[2] & C[3]);
  478. R[0][2] = C[2] ^ (~C[3] & C[4]);
  479. R[0][3] = C[3] ^ (~C[4] & C[0]);
  480. R[0][4] = C[4] ^ (~C[0] & C[1]);
  481. #endif
  482. C[0] = ROL64(A[0][3] ^ D[3], rhotates[0][3]);
  483. C[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  484. C[2] = ROL64(A[2][0] ^ D[0], rhotates[2][0]);
  485. C[3] = ROL64(A[3][1] ^ D[1], rhotates[3][1]);
  486. C[4] = ROL64(A[4][2] ^ D[2], rhotates[4][2]);
  487. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  488. R[1][0] = C[0] ^ (C[1] | C[2]);
  489. R[1][1] = C[1] ^ (C[2] & C[3]);
  490. R[1][2] = C[2] ^ (C[3] | ~C[4]);
  491. R[1][3] = C[3] ^ (C[4] | C[0]);
  492. R[1][4] = C[4] ^ (C[0] & C[1]);
  493. #else
  494. R[1][0] = C[0] ^ (~C[1] & C[2]);
  495. R[1][1] = C[1] ^ (~C[2] & C[3]);
  496. R[1][2] = C[2] ^ (~C[3] & C[4]);
  497. R[1][3] = C[3] ^ (~C[4] & C[0]);
  498. R[1][4] = C[4] ^ (~C[0] & C[1]);
  499. #endif
  500. C[0] = ROL64(A[0][1] ^ D[1], rhotates[0][1]);
  501. C[1] = ROL64(A[1][2] ^ D[2], rhotates[1][2]);
  502. C[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  503. C[3] = ROL64(A[3][4] ^ D[4], rhotates[3][4]);
  504. C[4] = ROL64(A[4][0] ^ D[0], rhotates[4][0]);
  505. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  506. R[2][0] = C[0] ^ ( C[1] | C[2]);
  507. R[2][1] = C[1] ^ ( C[2] & C[3]);
  508. R[2][2] = C[2] ^ (~C[3] & C[4]);
  509. R[2][3] = ~C[3] ^ ( C[4] | C[0]);
  510. R[2][4] = C[4] ^ ( C[0] & C[1]);
  511. #else
  512. R[2][0] = C[0] ^ (~C[1] & C[2]);
  513. R[2][1] = C[1] ^ (~C[2] & C[3]);
  514. R[2][2] = C[2] ^ (~C[3] & C[4]);
  515. R[2][3] = C[3] ^ (~C[4] & C[0]);
  516. R[2][4] = C[4] ^ (~C[0] & C[1]);
  517. #endif
  518. C[0] = ROL64(A[0][4] ^ D[4], rhotates[0][4]);
  519. C[1] = ROL64(A[1][0] ^ D[0], rhotates[1][0]);
  520. C[2] = ROL64(A[2][1] ^ D[1], rhotates[2][1]);
  521. C[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  522. C[4] = ROL64(A[4][3] ^ D[3], rhotates[4][3]);
  523. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  524. R[3][0] = C[0] ^ ( C[1] & C[2]);
  525. R[3][1] = C[1] ^ ( C[2] | C[3]);
  526. R[3][2] = C[2] ^ (~C[3] | C[4]);
  527. R[3][3] = ~C[3] ^ ( C[4] & C[0]);
  528. R[3][4] = C[4] ^ ( C[0] | C[1]);
  529. #else
  530. R[3][0] = C[0] ^ (~C[1] & C[2]);
  531. R[3][1] = C[1] ^ (~C[2] & C[3]);
  532. R[3][2] = C[2] ^ (~C[3] & C[4]);
  533. R[3][3] = C[3] ^ (~C[4] & C[0]);
  534. R[3][4] = C[4] ^ (~C[0] & C[1]);
  535. #endif
  536. C[0] = ROL64(A[0][2] ^ D[2], rhotates[0][2]);
  537. C[1] = ROL64(A[1][3] ^ D[3], rhotates[1][3]);
  538. C[2] = ROL64(A[2][4] ^ D[4], rhotates[2][4]);
  539. C[3] = ROL64(A[3][0] ^ D[0], rhotates[3][0]);
  540. C[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  541. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  542. R[4][0] = C[0] ^ (~C[1] & C[2]);
  543. R[4][1] = ~C[1] ^ ( C[2] | C[3]);
  544. R[4][2] = C[2] ^ ( C[3] & C[4]);
  545. R[4][3] = C[3] ^ ( C[4] | C[0]);
  546. R[4][4] = C[4] ^ ( C[0] & C[1]);
  547. #else
  548. R[4][0] = C[0] ^ (~C[1] & C[2]);
  549. R[4][1] = C[1] ^ (~C[2] & C[3]);
  550. R[4][2] = C[2] ^ (~C[3] & C[4]);
  551. R[4][3] = C[3] ^ (~C[4] & C[0]);
  552. R[4][4] = C[4] ^ (~C[0] & C[1]);
  553. #endif
  554. }
  555. static void KeccakF1600(uint64_t A[5][5])
  556. {
  557. uint64_t T[5][5];
  558. size_t i;
  559. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  560. A[0][1] = ~A[0][1];
  561. A[0][2] = ~A[0][2];
  562. A[1][3] = ~A[1][3];
  563. A[2][2] = ~A[2][2];
  564. A[3][2] = ~A[3][2];
  565. A[4][0] = ~A[4][0];
  566. #endif
  567. for (i = 0; i < 24; i += 2) {
  568. Round(T, A, i);
  569. Round(A, T, i + 1);
  570. }
  571. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  572. A[0][1] = ~A[0][1];
  573. A[0][2] = ~A[0][2];
  574. A[1][3] = ~A[1][3];
  575. A[2][2] = ~A[2][2];
  576. A[3][2] = ~A[3][2];
  577. A[4][0] = ~A[4][0];
  578. #endif
  579. }
  580. #else /* define KECCAK_INPLACE to compile this code path */
  581. /*
  582. * This implementation is KECCAK_1X from above combined 4 times with
  583. * a twist that allows to omit temporary storage and perform in-place
  584. * processing. It's discussed in section 2.5 of "Keccak implementation
  585. * overview". It's likely to be best suited for processors with large
  586. * register bank... On the other hand processor with large register
  587. * bank can as well use KECCAK_1X_ALT, it would be as fast but much
  588. * more compact...
  589. */
  590. static void FourRounds(uint64_t A[5][5], size_t i)
  591. {
  592. uint64_t B[5], C[5], D[5];
  593. assert(i <= (sizeof(iotas) / sizeof(iotas[0]) - 4));
  594. /* Round 4*n */
  595. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  596. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  597. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  598. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  599. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  600. D[0] = ROL64(C[1], 1) ^ C[4];
  601. D[1] = ROL64(C[2], 1) ^ C[0];
  602. D[2] = ROL64(C[3], 1) ^ C[1];
  603. D[3] = ROL64(C[4], 1) ^ C[2];
  604. D[4] = ROL64(C[0], 1) ^ C[3];
  605. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  606. B[1] = ROL64(A[1][1] ^ D[1], rhotates[1][1]);
  607. B[2] = ROL64(A[2][2] ^ D[2], rhotates[2][2]);
  608. B[3] = ROL64(A[3][3] ^ D[3], rhotates[3][3]);
  609. B[4] = ROL64(A[4][4] ^ D[4], rhotates[4][4]);
  610. C[0] = A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i];
  611. C[1] = A[1][1] = B[1] ^ (~B[2] & B[3]);
  612. C[2] = A[2][2] = B[2] ^ (~B[3] & B[4]);
  613. C[3] = A[3][3] = B[3] ^ (~B[4] & B[0]);
  614. C[4] = A[4][4] = B[4] ^ (~B[0] & B[1]);
  615. B[0] = ROL64(A[0][3] ^ D[3], rhotates[0][3]);
  616. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  617. B[2] = ROL64(A[2][0] ^ D[0], rhotates[2][0]);
  618. B[3] = ROL64(A[3][1] ^ D[1], rhotates[3][1]);
  619. B[4] = ROL64(A[4][2] ^ D[2], rhotates[4][2]);
  620. C[0] ^= A[2][0] = B[0] ^ (~B[1] & B[2]);
  621. C[1] ^= A[3][1] = B[1] ^ (~B[2] & B[3]);
  622. C[2] ^= A[4][2] = B[2] ^ (~B[3] & B[4]);
  623. C[3] ^= A[0][3] = B[3] ^ (~B[4] & B[0]);
  624. C[4] ^= A[1][4] = B[4] ^ (~B[0] & B[1]);
  625. B[0] = ROL64(A[0][1] ^ D[1], rhotates[0][1]);
  626. B[1] = ROL64(A[1][2] ^ D[2], rhotates[1][2]);
  627. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  628. B[3] = ROL64(A[3][4] ^ D[4], rhotates[3][4]);
  629. B[4] = ROL64(A[4][0] ^ D[0], rhotates[4][0]);
  630. C[0] ^= A[4][0] = B[0] ^ (~B[1] & B[2]);
  631. C[1] ^= A[0][1] = B[1] ^ (~B[2] & B[3]);
  632. C[2] ^= A[1][2] = B[2] ^ (~B[3] & B[4]);
  633. C[3] ^= A[2][3] = B[3] ^ (~B[4] & B[0]);
  634. C[4] ^= A[3][4] = B[4] ^ (~B[0] & B[1]);
  635. B[0] = ROL64(A[0][4] ^ D[4], rhotates[0][4]);
  636. B[1] = ROL64(A[1][0] ^ D[0], rhotates[1][0]);
  637. B[2] = ROL64(A[2][1] ^ D[1], rhotates[2][1]);
  638. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  639. B[4] = ROL64(A[4][3] ^ D[3], rhotates[4][3]);
  640. C[0] ^= A[1][0] = B[0] ^ (~B[1] & B[2]);
  641. C[1] ^= A[2][1] = B[1] ^ (~B[2] & B[3]);
  642. C[2] ^= A[3][2] = B[2] ^ (~B[3] & B[4]);
  643. C[3] ^= A[4][3] = B[3] ^ (~B[4] & B[0]);
  644. C[4] ^= A[0][4] = B[4] ^ (~B[0] & B[1]);
  645. B[0] = ROL64(A[0][2] ^ D[2], rhotates[0][2]);
  646. B[1] = ROL64(A[1][3] ^ D[3], rhotates[1][3]);
  647. B[2] = ROL64(A[2][4] ^ D[4], rhotates[2][4]);
  648. B[3] = ROL64(A[3][0] ^ D[0], rhotates[3][0]);
  649. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  650. C[0] ^= A[3][0] = B[0] ^ (~B[1] & B[2]);
  651. C[1] ^= A[4][1] = B[1] ^ (~B[2] & B[3]);
  652. C[2] ^= A[0][2] = B[2] ^ (~B[3] & B[4]);
  653. C[3] ^= A[1][3] = B[3] ^ (~B[4] & B[0]);
  654. C[4] ^= A[2][4] = B[4] ^ (~B[0] & B[1]);
  655. /* Round 4*n+1 */
  656. D[0] = ROL64(C[1], 1) ^ C[4];
  657. D[1] = ROL64(C[2], 1) ^ C[0];
  658. D[2] = ROL64(C[3], 1) ^ C[1];
  659. D[3] = ROL64(C[4], 1) ^ C[2];
  660. D[4] = ROL64(C[0], 1) ^ C[3];
  661. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  662. B[1] = ROL64(A[3][1] ^ D[1], rhotates[1][1]);
  663. B[2] = ROL64(A[1][2] ^ D[2], rhotates[2][2]);
  664. B[3] = ROL64(A[4][3] ^ D[3], rhotates[3][3]);
  665. B[4] = ROL64(A[2][4] ^ D[4], rhotates[4][4]);
  666. C[0] = A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i + 1];
  667. C[1] = A[3][1] = B[1] ^ (~B[2] & B[3]);
  668. C[2] = A[1][2] = B[2] ^ (~B[3] & B[4]);
  669. C[3] = A[4][3] = B[3] ^ (~B[4] & B[0]);
  670. C[4] = A[2][4] = B[4] ^ (~B[0] & B[1]);
  671. B[0] = ROL64(A[3][3] ^ D[3], rhotates[0][3]);
  672. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  673. B[2] = ROL64(A[4][0] ^ D[0], rhotates[2][0]);
  674. B[3] = ROL64(A[2][1] ^ D[1], rhotates[3][1]);
  675. B[4] = ROL64(A[0][2] ^ D[2], rhotates[4][2]);
  676. C[0] ^= A[4][0] = B[0] ^ (~B[1] & B[2]);
  677. C[1] ^= A[2][1] = B[1] ^ (~B[2] & B[3]);
  678. C[2] ^= A[0][2] = B[2] ^ (~B[3] & B[4]);
  679. C[3] ^= A[3][3] = B[3] ^ (~B[4] & B[0]);
  680. C[4] ^= A[1][4] = B[4] ^ (~B[0] & B[1]);
  681. B[0] = ROL64(A[1][1] ^ D[1], rhotates[0][1]);
  682. B[1] = ROL64(A[4][2] ^ D[2], rhotates[1][2]);
  683. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  684. B[3] = ROL64(A[0][4] ^ D[4], rhotates[3][4]);
  685. B[4] = ROL64(A[3][0] ^ D[0], rhotates[4][0]);
  686. C[0] ^= A[3][0] = B[0] ^ (~B[1] & B[2]);
  687. C[1] ^= A[1][1] = B[1] ^ (~B[2] & B[3]);
  688. C[2] ^= A[4][2] = B[2] ^ (~B[3] & B[4]);
  689. C[3] ^= A[2][3] = B[3] ^ (~B[4] & B[0]);
  690. C[4] ^= A[0][4] = B[4] ^ (~B[0] & B[1]);
  691. B[0] = ROL64(A[4][4] ^ D[4], rhotates[0][4]);
  692. B[1] = ROL64(A[2][0] ^ D[0], rhotates[1][0]);
  693. B[2] = ROL64(A[0][1] ^ D[1], rhotates[2][1]);
  694. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  695. B[4] = ROL64(A[1][3] ^ D[3], rhotates[4][3]);
  696. C[0] ^= A[2][0] = B[0] ^ (~B[1] & B[2]);
  697. C[1] ^= A[0][1] = B[1] ^ (~B[2] & B[3]);
  698. C[2] ^= A[3][2] = B[2] ^ (~B[3] & B[4]);
  699. C[3] ^= A[1][3] = B[3] ^ (~B[4] & B[0]);
  700. C[4] ^= A[4][4] = B[4] ^ (~B[0] & B[1]);
  701. B[0] = ROL64(A[2][2] ^ D[2], rhotates[0][2]);
  702. B[1] = ROL64(A[0][3] ^ D[3], rhotates[1][3]);
  703. B[2] = ROL64(A[3][4] ^ D[4], rhotates[2][4]);
  704. B[3] = ROL64(A[1][0] ^ D[0], rhotates[3][0]);
  705. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  706. C[0] ^= A[1][0] = B[0] ^ (~B[1] & B[2]);
  707. C[1] ^= A[4][1] = B[1] ^ (~B[2] & B[3]);
  708. C[2] ^= A[2][2] = B[2] ^ (~B[3] & B[4]);
  709. C[3] ^= A[0][3] = B[3] ^ (~B[4] & B[0]);
  710. C[4] ^= A[3][4] = B[4] ^ (~B[0] & B[1]);
  711. /* Round 4*n+2 */
  712. D[0] = ROL64(C[1], 1) ^ C[4];
  713. D[1] = ROL64(C[2], 1) ^ C[0];
  714. D[2] = ROL64(C[3], 1) ^ C[1];
  715. D[3] = ROL64(C[4], 1) ^ C[2];
  716. D[4] = ROL64(C[0], 1) ^ C[3];
  717. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  718. B[1] = ROL64(A[2][1] ^ D[1], rhotates[1][1]);
  719. B[2] = ROL64(A[4][2] ^ D[2], rhotates[2][2]);
  720. B[3] = ROL64(A[1][3] ^ D[3], rhotates[3][3]);
  721. B[4] = ROL64(A[3][4] ^ D[4], rhotates[4][4]);
  722. C[0] = A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i + 2];
  723. C[1] = A[2][1] = B[1] ^ (~B[2] & B[3]);
  724. C[2] = A[4][2] = B[2] ^ (~B[3] & B[4]);
  725. C[3] = A[1][3] = B[3] ^ (~B[4] & B[0]);
  726. C[4] = A[3][4] = B[4] ^ (~B[0] & B[1]);
  727. B[0] = ROL64(A[4][3] ^ D[3], rhotates[0][3]);
  728. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  729. B[2] = ROL64(A[3][0] ^ D[0], rhotates[2][0]);
  730. B[3] = ROL64(A[0][1] ^ D[1], rhotates[3][1]);
  731. B[4] = ROL64(A[2][2] ^ D[2], rhotates[4][2]);
  732. C[0] ^= A[3][0] = B[0] ^ (~B[1] & B[2]);
  733. C[1] ^= A[0][1] = B[1] ^ (~B[2] & B[3]);
  734. C[2] ^= A[2][2] = B[2] ^ (~B[3] & B[4]);
  735. C[3] ^= A[4][3] = B[3] ^ (~B[4] & B[0]);
  736. C[4] ^= A[1][4] = B[4] ^ (~B[0] & B[1]);
  737. B[0] = ROL64(A[3][1] ^ D[1], rhotates[0][1]);
  738. B[1] = ROL64(A[0][2] ^ D[2], rhotates[1][2]);
  739. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  740. B[3] = ROL64(A[4][4] ^ D[4], rhotates[3][4]);
  741. B[4] = ROL64(A[1][0] ^ D[0], rhotates[4][0]);
  742. C[0] ^= A[1][0] = B[0] ^ (~B[1] & B[2]);
  743. C[1] ^= A[3][1] = B[1] ^ (~B[2] & B[3]);
  744. C[2] ^= A[0][2] = B[2] ^ (~B[3] & B[4]);
  745. C[3] ^= A[2][3] = B[3] ^ (~B[4] & B[0]);
  746. C[4] ^= A[4][4] = B[4] ^ (~B[0] & B[1]);
  747. B[0] = ROL64(A[2][4] ^ D[4], rhotates[0][4]);
  748. B[1] = ROL64(A[4][0] ^ D[0], rhotates[1][0]);
  749. B[2] = ROL64(A[1][1] ^ D[1], rhotates[2][1]);
  750. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  751. B[4] = ROL64(A[0][3] ^ D[3], rhotates[4][3]);
  752. C[0] ^= A[4][0] = B[0] ^ (~B[1] & B[2]);
  753. C[1] ^= A[1][1] = B[1] ^ (~B[2] & B[3]);
  754. C[2] ^= A[3][2] = B[2] ^ (~B[3] & B[4]);
  755. C[3] ^= A[0][3] = B[3] ^ (~B[4] & B[0]);
  756. C[4] ^= A[2][4] = B[4] ^ (~B[0] & B[1]);
  757. B[0] = ROL64(A[1][2] ^ D[2], rhotates[0][2]);
  758. B[1] = ROL64(A[3][3] ^ D[3], rhotates[1][3]);
  759. B[2] = ROL64(A[0][4] ^ D[4], rhotates[2][4]);
  760. B[3] = ROL64(A[2][0] ^ D[0], rhotates[3][0]);
  761. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  762. C[0] ^= A[2][0] = B[0] ^ (~B[1] & B[2]);
  763. C[1] ^= A[4][1] = B[1] ^ (~B[2] & B[3]);
  764. C[2] ^= A[1][2] = B[2] ^ (~B[3] & B[4]);
  765. C[3] ^= A[3][3] = B[3] ^ (~B[4] & B[0]);
  766. C[4] ^= A[0][4] = B[4] ^ (~B[0] & B[1]);
  767. /* Round 4*n+3 */
  768. D[0] = ROL64(C[1], 1) ^ C[4];
  769. D[1] = ROL64(C[2], 1) ^ C[0];
  770. D[2] = ROL64(C[3], 1) ^ C[1];
  771. D[3] = ROL64(C[4], 1) ^ C[2];
  772. D[4] = ROL64(C[0], 1) ^ C[3];
  773. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  774. B[1] = ROL64(A[0][1] ^ D[1], rhotates[1][1]);
  775. B[2] = ROL64(A[0][2] ^ D[2], rhotates[2][2]);
  776. B[3] = ROL64(A[0][3] ^ D[3], rhotates[3][3]);
  777. B[4] = ROL64(A[0][4] ^ D[4], rhotates[4][4]);
  778. /* C[0] = */ A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i + 3];
  779. /* C[1] = */ A[0][1] = B[1] ^ (~B[2] & B[3]);
  780. /* C[2] = */ A[0][2] = B[2] ^ (~B[3] & B[4]);
  781. /* C[3] = */ A[0][3] = B[3] ^ (~B[4] & B[0]);
  782. /* C[4] = */ A[0][4] = B[4] ^ (~B[0] & B[1]);
  783. B[0] = ROL64(A[1][3] ^ D[3], rhotates[0][3]);
  784. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  785. B[2] = ROL64(A[1][0] ^ D[0], rhotates[2][0]);
  786. B[3] = ROL64(A[1][1] ^ D[1], rhotates[3][1]);
  787. B[4] = ROL64(A[1][2] ^ D[2], rhotates[4][2]);
  788. /* C[0] ^= */ A[1][0] = B[0] ^ (~B[1] & B[2]);
  789. /* C[1] ^= */ A[1][1] = B[1] ^ (~B[2] & B[3]);
  790. /* C[2] ^= */ A[1][2] = B[2] ^ (~B[3] & B[4]);
  791. /* C[3] ^= */ A[1][3] = B[3] ^ (~B[4] & B[0]);
  792. /* C[4] ^= */ A[1][4] = B[4] ^ (~B[0] & B[1]);
  793. B[0] = ROL64(A[2][1] ^ D[1], rhotates[0][1]);
  794. B[1] = ROL64(A[2][2] ^ D[2], rhotates[1][2]);
  795. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  796. B[3] = ROL64(A[2][4] ^ D[4], rhotates[3][4]);
  797. B[4] = ROL64(A[2][0] ^ D[0], rhotates[4][0]);
  798. /* C[0] ^= */ A[2][0] = B[0] ^ (~B[1] & B[2]);
  799. /* C[1] ^= */ A[2][1] = B[1] ^ (~B[2] & B[3]);
  800. /* C[2] ^= */ A[2][2] = B[2] ^ (~B[3] & B[4]);
  801. /* C[3] ^= */ A[2][3] = B[3] ^ (~B[4] & B[0]);
  802. /* C[4] ^= */ A[2][4] = B[4] ^ (~B[0] & B[1]);
  803. B[0] = ROL64(A[3][4] ^ D[4], rhotates[0][4]);
  804. B[1] = ROL64(A[3][0] ^ D[0], rhotates[1][0]);
  805. B[2] = ROL64(A[3][1] ^ D[1], rhotates[2][1]);
  806. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  807. B[4] = ROL64(A[3][3] ^ D[3], rhotates[4][3]);
  808. /* C[0] ^= */ A[3][0] = B[0] ^ (~B[1] & B[2]);
  809. /* C[1] ^= */ A[3][1] = B[1] ^ (~B[2] & B[3]);
  810. /* C[2] ^= */ A[3][2] = B[2] ^ (~B[3] & B[4]);
  811. /* C[3] ^= */ A[3][3] = B[3] ^ (~B[4] & B[0]);
  812. /* C[4] ^= */ A[3][4] = B[4] ^ (~B[0] & B[1]);
  813. B[0] = ROL64(A[4][2] ^ D[2], rhotates[0][2]);
  814. B[1] = ROL64(A[4][3] ^ D[3], rhotates[1][3]);
  815. B[2] = ROL64(A[4][4] ^ D[4], rhotates[2][4]);
  816. B[3] = ROL64(A[4][0] ^ D[0], rhotates[3][0]);
  817. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  818. /* C[0] ^= */ A[4][0] = B[0] ^ (~B[1] & B[2]);
  819. /* C[1] ^= */ A[4][1] = B[1] ^ (~B[2] & B[3]);
  820. /* C[2] ^= */ A[4][2] = B[2] ^ (~B[3] & B[4]);
  821. /* C[3] ^= */ A[4][3] = B[3] ^ (~B[4] & B[0]);
  822. /* C[4] ^= */ A[4][4] = B[4] ^ (~B[0] & B[1]);
  823. }
  824. static void KeccakF1600(uint64_t A[5][5])
  825. {
  826. size_t i;
  827. for (i = 0; i < 24; i += 4) {
  828. FourRounds(A, i);
  829. }
  830. }
  831. #endif
  832. static uint64_t BitInterleave(uint64_t Ai)
  833. {
  834. if (BIT_INTERLEAVE) {
  835. uint32_t hi = (uint32_t)(Ai >> 32), lo = (uint32_t)Ai;
  836. uint32_t t0, t1;
  837. t0 = lo & 0x55555555;
  838. t0 |= t0 >> 1; t0 &= 0x33333333;
  839. t0 |= t0 >> 2; t0 &= 0x0f0f0f0f;
  840. t0 |= t0 >> 4; t0 &= 0x00ff00ff;
  841. t0 |= t0 >> 8; t0 &= 0x0000ffff;
  842. t1 = hi & 0x55555555;
  843. t1 |= t1 >> 1; t1 &= 0x33333333;
  844. t1 |= t1 >> 2; t1 &= 0x0f0f0f0f;
  845. t1 |= t1 >> 4; t1 &= 0x00ff00ff;
  846. t1 |= t1 >> 8; t1 <<= 16;
  847. lo &= 0xaaaaaaaa;
  848. lo |= lo << 1; lo &= 0xcccccccc;
  849. lo |= lo << 2; lo &= 0xf0f0f0f0;
  850. lo |= lo << 4; lo &= 0xff00ff00;
  851. lo |= lo << 8; lo >>= 16;
  852. hi &= 0xaaaaaaaa;
  853. hi |= hi << 1; hi &= 0xcccccccc;
  854. hi |= hi << 2; hi &= 0xf0f0f0f0;
  855. hi |= hi << 4; hi &= 0xff00ff00;
  856. hi |= hi << 8; hi &= 0xffff0000;
  857. Ai = ((uint64_t)(hi | lo) << 32) | (t1 | t0);
  858. }
  859. return Ai;
  860. }
  861. static uint64_t BitDeinterleave(uint64_t Ai)
  862. {
  863. if (BIT_INTERLEAVE) {
  864. uint32_t hi = (uint32_t)(Ai >> 32), lo = (uint32_t)Ai;
  865. uint32_t t0, t1;
  866. t0 = lo & 0x0000ffff;
  867. t0 |= t0 << 8; t0 &= 0x00ff00ff;
  868. t0 |= t0 << 4; t0 &= 0x0f0f0f0f;
  869. t0 |= t0 << 2; t0 &= 0x33333333;
  870. t0 |= t0 << 1; t0 &= 0x55555555;
  871. t1 = hi << 16;
  872. t1 |= t1 >> 8; t1 &= 0xff00ff00;
  873. t1 |= t1 >> 4; t1 &= 0xf0f0f0f0;
  874. t1 |= t1 >> 2; t1 &= 0xcccccccc;
  875. t1 |= t1 >> 1; t1 &= 0xaaaaaaaa;
  876. lo >>= 16;
  877. lo |= lo << 8; lo &= 0x00ff00ff;
  878. lo |= lo << 4; lo &= 0x0f0f0f0f;
  879. lo |= lo << 2; lo &= 0x33333333;
  880. lo |= lo << 1; lo &= 0x55555555;
  881. hi &= 0xffff0000;
  882. hi |= hi >> 8; hi &= 0xff00ff00;
  883. hi |= hi >> 4; hi &= 0xf0f0f0f0;
  884. hi |= hi >> 2; hi &= 0xcccccccc;
  885. hi |= hi >> 1; hi &= 0xaaaaaaaa;
  886. Ai = ((uint64_t)(hi | lo) << 32) | (t1 | t0);
  887. }
  888. return Ai;
  889. }
  890. /*
  891. * SHA3_absorb can be called multiple times, but at each invocation
  892. * largest multiple of |r| out of |len| bytes are processed. Then
  893. * remaining amount of bytes is returned. This is done to spare caller
  894. * trouble of calculating the largest multiple of |r|. |r| can be viewed
  895. * as blocksize. It is commonly (1600 - 256*n)/8, e.g. 168, 136, 104,
  896. * 72, but can also be (1600 - 448)/8 = 144. All this means that message
  897. * padding and intermediate sub-block buffering, byte- or bitwise, is
  898. * caller's responsibility.
  899. */
  900. size_t SHA3_absorb(uint64_t A[5][5], const unsigned char *inp, size_t len,
  901. size_t r)
  902. {
  903. uint64_t *A_flat = (uint64_t *)A;
  904. size_t i, w = r / 8;
  905. assert(r < (25 * sizeof(A[0][0])) && (r % 8) == 0);
  906. while (len >= r) {
  907. for (i = 0; i < w; i++) {
  908. uint64_t Ai = (uint64_t)inp[0] | (uint64_t)inp[1] << 8 |
  909. (uint64_t)inp[2] << 16 | (uint64_t)inp[3] << 24 |
  910. (uint64_t)inp[4] << 32 | (uint64_t)inp[5] << 40 |
  911. (uint64_t)inp[6] << 48 | (uint64_t)inp[7] << 56;
  912. inp += 8;
  913. A_flat[i] ^= BitInterleave(Ai);
  914. }
  915. KeccakF1600(A);
  916. len -= r;
  917. }
  918. return len;
  919. }
  920. /*
  921. * SHA3_squeeze is called once at the end to generate |out| hash value
  922. * of |len| bytes.
  923. */
  924. void SHA3_squeeze(uint64_t A[5][5], unsigned char *out, size_t len, size_t r)
  925. {
  926. uint64_t *A_flat = (uint64_t *)A;
  927. size_t i, w = r / 8;
  928. assert(r < (25 * sizeof(A[0][0])) && (r % 8) == 0);
  929. while (len != 0) {
  930. for (i = 0; i < w && len != 0; i++) {
  931. uint64_t Ai = BitDeinterleave(A_flat[i]);
  932. if (len < 8) {
  933. for (i = 0; i < len; i++) {
  934. *out++ = (unsigned char)Ai;
  935. Ai >>= 8;
  936. }
  937. return;
  938. }
  939. out[0] = (unsigned char)(Ai);
  940. out[1] = (unsigned char)(Ai >> 8);
  941. out[2] = (unsigned char)(Ai >> 16);
  942. out[3] = (unsigned char)(Ai >> 24);
  943. out[4] = (unsigned char)(Ai >> 32);
  944. out[5] = (unsigned char)(Ai >> 40);
  945. out[6] = (unsigned char)(Ai >> 48);
  946. out[7] = (unsigned char)(Ai >> 56);
  947. out += 8;
  948. len -= 8;
  949. }
  950. if (len)
  951. KeccakF1600(A);
  952. }
  953. }
  954. #endif
  955. #ifdef SELFTEST
  956. /*
  957. * Post-padding one-shot implementations would look as following:
  958. *
  959. * SHA3_224 SHA3_sponge(inp, len, out, 224/8, (1600-448)/8);
  960. * SHA3_256 SHA3_sponge(inp, len, out, 256/8, (1600-512)/8);
  961. * SHA3_384 SHA3_sponge(inp, len, out, 384/8, (1600-768)/8);
  962. * SHA3_512 SHA3_sponge(inp, len, out, 512/8, (1600-1024)/8);
  963. * SHAKE_128 SHA3_sponge(inp, len, out, d, (1600-256)/8);
  964. * SHAKE_256 SHA3_sponge(inp, len, out, d, (1600-512)/8);
  965. */
  966. void SHA3_sponge(const unsigned char *inp, size_t len,
  967. unsigned char *out, size_t d, size_t r)
  968. {
  969. uint64_t A[5][5];
  970. memset(A, 0, sizeof(A));
  971. SHA3_absorb(A, inp, len, r);
  972. SHA3_squeeze(A, out, d, r);
  973. }
  974. # include <stdio.h>
  975. int main()
  976. {
  977. /*
  978. * This is 5-bit SHAKE128 test from http://csrc.nist.gov/groups/ST/toolkit/examples.html#aHashing
  979. */
  980. unsigned char test[168] = { '\xf3', '\x3' };
  981. unsigned char out[512];
  982. size_t i;
  983. static const unsigned char result[512] = {
  984. 0x2E, 0x0A, 0xBF, 0xBA, 0x83, 0xE6, 0x72, 0x0B,
  985. 0xFB, 0xC2, 0x25, 0xFF, 0x6B, 0x7A, 0xB9, 0xFF,
  986. 0xCE, 0x58, 0xBA, 0x02, 0x7E, 0xE3, 0xD8, 0x98,
  987. 0x76, 0x4F, 0xEF, 0x28, 0x7D, 0xDE, 0xCC, 0xCA,
  988. 0x3E, 0x6E, 0x59, 0x98, 0x41, 0x1E, 0x7D, 0xDB,
  989. 0x32, 0xF6, 0x75, 0x38, 0xF5, 0x00, 0xB1, 0x8C,
  990. 0x8C, 0x97, 0xC4, 0x52, 0xC3, 0x70, 0xEA, 0x2C,
  991. 0xF0, 0xAF, 0xCA, 0x3E, 0x05, 0xDE, 0x7E, 0x4D,
  992. 0xE2, 0x7F, 0xA4, 0x41, 0xA9, 0xCB, 0x34, 0xFD,
  993. 0x17, 0xC9, 0x78, 0xB4, 0x2D, 0x5B, 0x7E, 0x7F,
  994. 0x9A, 0xB1, 0x8F, 0xFE, 0xFF, 0xC3, 0xC5, 0xAC,
  995. 0x2F, 0x3A, 0x45, 0x5E, 0xEB, 0xFD, 0xC7, 0x6C,
  996. 0xEA, 0xEB, 0x0A, 0x2C, 0xCA, 0x22, 0xEE, 0xF6,
  997. 0xE6, 0x37, 0xF4, 0xCA, 0xBE, 0x5C, 0x51, 0xDE,
  998. 0xD2, 0xE3, 0xFA, 0xD8, 0xB9, 0x52, 0x70, 0xA3,
  999. 0x21, 0x84, 0x56, 0x64, 0xF1, 0x07, 0xD1, 0x64,
  1000. 0x96, 0xBB, 0x7A, 0xBF, 0xBE, 0x75, 0x04, 0xB6,
  1001. 0xED, 0xE2, 0xE8, 0x9E, 0x4B, 0x99, 0x6F, 0xB5,
  1002. 0x8E, 0xFD, 0xC4, 0x18, 0x1F, 0x91, 0x63, 0x38,
  1003. 0x1C, 0xBE, 0x7B, 0xC0, 0x06, 0xA7, 0xA2, 0x05,
  1004. 0x98, 0x9C, 0x52, 0x6C, 0xD1, 0xBD, 0x68, 0x98,
  1005. 0x36, 0x93, 0xB4, 0xBD, 0xC5, 0x37, 0x28, 0xB2,
  1006. 0x41, 0xC1, 0xCF, 0xF4, 0x2B, 0xB6, 0x11, 0x50,
  1007. 0x2C, 0x35, 0x20, 0x5C, 0xAB, 0xB2, 0x88, 0x75,
  1008. 0x56, 0x55, 0xD6, 0x20, 0xC6, 0x79, 0x94, 0xF0,
  1009. 0x64, 0x51, 0x18, 0x7F, 0x6F, 0xD1, 0x7E, 0x04,
  1010. 0x66, 0x82, 0xBA, 0x12, 0x86, 0x06, 0x3F, 0xF8,
  1011. 0x8F, 0xE2, 0x50, 0x8D, 0x1F, 0xCA, 0xF9, 0x03,
  1012. 0x5A, 0x12, 0x31, 0xAD, 0x41, 0x50, 0xA9, 0xC9,
  1013. 0xB2, 0x4C, 0x9B, 0x2D, 0x66, 0xB2, 0xAD, 0x1B,
  1014. 0xDE, 0x0B, 0xD0, 0xBB, 0xCB, 0x8B, 0xE0, 0x5B,
  1015. 0x83, 0x52, 0x29, 0xEF, 0x79, 0x19, 0x73, 0x73,
  1016. 0x23, 0x42, 0x44, 0x01, 0xE1, 0xD8, 0x37, 0xB6,
  1017. 0x6E, 0xB4, 0xE6, 0x30, 0xFF, 0x1D, 0xE7, 0x0C,
  1018. 0xB3, 0x17, 0xC2, 0xBA, 0xCB, 0x08, 0x00, 0x1D,
  1019. 0x34, 0x77, 0xB7, 0xA7, 0x0A, 0x57, 0x6D, 0x20,
  1020. 0x86, 0x90, 0x33, 0x58, 0x9D, 0x85, 0xA0, 0x1D,
  1021. 0xDB, 0x2B, 0x66, 0x46, 0xC0, 0x43, 0xB5, 0x9F,
  1022. 0xC0, 0x11, 0x31, 0x1D, 0xA6, 0x66, 0xFA, 0x5A,
  1023. 0xD1, 0xD6, 0x38, 0x7F, 0xA9, 0xBC, 0x40, 0x15,
  1024. 0xA3, 0x8A, 0x51, 0xD1, 0xDA, 0x1E, 0xA6, 0x1D,
  1025. 0x64, 0x8D, 0xC8, 0xE3, 0x9A, 0x88, 0xB9, 0xD6,
  1026. 0x22, 0xBD, 0xE2, 0x07, 0xFD, 0xAB, 0xC6, 0xF2,
  1027. 0x82, 0x7A, 0x88, 0x0C, 0x33, 0x0B, 0xBF, 0x6D,
  1028. 0xF7, 0x33, 0x77, 0x4B, 0x65, 0x3E, 0x57, 0x30,
  1029. 0x5D, 0x78, 0xDC, 0xE1, 0x12, 0xF1, 0x0A, 0x2C,
  1030. 0x71, 0xF4, 0xCD, 0xAD, 0x92, 0xED, 0x11, 0x3E,
  1031. 0x1C, 0xEA, 0x63, 0xB9, 0x19, 0x25, 0xED, 0x28,
  1032. 0x19, 0x1E, 0x6D, 0xBB, 0xB5, 0xAA, 0x5A, 0x2A,
  1033. 0xFD, 0xA5, 0x1F, 0xC0, 0x5A, 0x3A, 0xF5, 0x25,
  1034. 0x8B, 0x87, 0x66, 0x52, 0x43, 0x55, 0x0F, 0x28,
  1035. 0x94, 0x8A, 0xE2, 0xB8, 0xBE, 0xB6, 0xBC, 0x9C,
  1036. 0x77, 0x0B, 0x35, 0xF0, 0x67, 0xEA, 0xA6, 0x41,
  1037. 0xEF, 0xE6, 0x5B, 0x1A, 0x44, 0x90, 0x9D, 0x1B,
  1038. 0x14, 0x9F, 0x97, 0xEE, 0xA6, 0x01, 0x39, 0x1C,
  1039. 0x60, 0x9E, 0xC8, 0x1D, 0x19, 0x30, 0xF5, 0x7C,
  1040. 0x18, 0xA4, 0xE0, 0xFA, 0xB4, 0x91, 0xD1, 0xCA,
  1041. 0xDF, 0xD5, 0x04, 0x83, 0x44, 0x9E, 0xDC, 0x0F,
  1042. 0x07, 0xFF, 0xB2, 0x4D, 0x2C, 0x6F, 0x9A, 0x9A,
  1043. 0x3B, 0xFF, 0x39, 0xAE, 0x3D, 0x57, 0xF5, 0x60,
  1044. 0x65, 0x4D, 0x7D, 0x75, 0xC9, 0x08, 0xAB, 0xE6,
  1045. 0x25, 0x64, 0x75, 0x3E, 0xAC, 0x39, 0xD7, 0x50,
  1046. 0x3D, 0xA6, 0xD3, 0x7C, 0x2E, 0x32, 0xE1, 0xAF,
  1047. 0x3B, 0x8A, 0xEC, 0x8A, 0xE3, 0x06, 0x9C, 0xD9
  1048. };
  1049. test[167] = '\x80';
  1050. SHA3_sponge(test, sizeof(test), out, sizeof(out), sizeof(test));
  1051. /*
  1052. * Rationale behind keeping output [formatted as below] is that
  1053. * one should be able to redirect it to a file, then copy-n-paste
  1054. * final "output val" from official example to another file, and
  1055. * compare the two with diff(1).
  1056. */
  1057. for (i = 0; i < sizeof(out);) {
  1058. printf("%02X", out[i]);
  1059. printf(++i % 16 && i != sizeof(out) ? " " : "\n");
  1060. }
  1061. if (memcmp(out,result,sizeof(out))) {
  1062. fprintf(stderr,"failure\n");
  1063. return 1;
  1064. } else {
  1065. fprintf(stderr,"success\n");
  1066. return 0;
  1067. }
  1068. }
  1069. #endif