keccak1600.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. /*
  2. * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #include <openssl/e_os2.h>
  10. #include <string.h>
  11. #include <assert.h>
  12. size_t SHA3_absorb(uint64_t A[5][5], const unsigned char *inp, size_t len,
  13. size_t r);
  14. void SHA3_squeeze(uint64_t A[5][5], unsigned char *out, size_t len, size_t r, int next);
  15. #if !defined(KECCAK1600_ASM) || !defined(SELFTEST)
  16. /*
  17. * Choose some sensible defaults
  18. */
  19. #if !defined(KECCAK_REF) && !defined(KECCAK_1X) && !defined(KECCAK_1X_ALT) && \
  20. !defined(KECCAK_2X) && !defined(KECCAK_INPLACE)
  21. # define KECCAK_2X /* default to KECCAK_2X variant */
  22. #endif
  23. #if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
  24. (defined(__x86_64) && !defined(__BMI__)) || defined(_M_X64) || \
  25. defined(__mips) || defined(__riscv) || defined(__s390__) || \
  26. defined(__EMSCRIPTEN__)
  27. /*
  28. * These don't have "and with complement" instruction, so minimize amount
  29. * of "not"-s. Implemented only in the [default] KECCAK_2X variant.
  30. */
  31. # define KECCAK_COMPLEMENTING_TRANSFORM
  32. #endif
  33. #if defined(__x86_64__) || defined(__aarch64__) || \
  34. defined(__mips64) || defined(__ia64) || \
  35. (defined(__VMS) && !defined(__vax))
  36. /*
  37. * These are available even in ILP32 flavours, but even then they are
  38. * capable of performing 64-bit operations as efficiently as in *P64.
  39. * Since it's not given that we can use sizeof(void *), just shunt it.
  40. */
  41. # define BIT_INTERLEAVE (0)
  42. #else
  43. # define BIT_INTERLEAVE (sizeof(void *) < 8)
  44. #endif
  45. #define ROL32(a, offset) (((a) << (offset)) | ((a) >> ((32 - (offset)) & 31)))
  46. static uint64_t ROL64(uint64_t val, int offset)
  47. {
  48. if (offset == 0) {
  49. return val;
  50. } else if (!BIT_INTERLEAVE) {
  51. return (val << offset) | (val >> (64-offset));
  52. } else {
  53. uint32_t hi = (uint32_t)(val >> 32), lo = (uint32_t)val;
  54. if (offset & 1) {
  55. uint32_t tmp = hi;
  56. offset >>= 1;
  57. hi = ROL32(lo, offset);
  58. lo = ROL32(tmp, offset + 1);
  59. } else {
  60. offset >>= 1;
  61. lo = ROL32(lo, offset);
  62. hi = ROL32(hi, offset);
  63. }
  64. return ((uint64_t)hi << 32) | lo;
  65. }
  66. }
  67. static const unsigned char rhotates[5][5] = {
  68. { 0, 1, 62, 28, 27 },
  69. { 36, 44, 6, 55, 20 },
  70. { 3, 10, 43, 25, 39 },
  71. { 41, 45, 15, 21, 8 },
  72. { 18, 2, 61, 56, 14 }
  73. };
  74. static const uint64_t iotas[] = {
  75. BIT_INTERLEAVE ? 0x0000000000000001ULL : 0x0000000000000001ULL,
  76. BIT_INTERLEAVE ? 0x0000008900000000ULL : 0x0000000000008082ULL,
  77. BIT_INTERLEAVE ? 0x8000008b00000000ULL : 0x800000000000808aULL,
  78. BIT_INTERLEAVE ? 0x8000808000000000ULL : 0x8000000080008000ULL,
  79. BIT_INTERLEAVE ? 0x0000008b00000001ULL : 0x000000000000808bULL,
  80. BIT_INTERLEAVE ? 0x0000800000000001ULL : 0x0000000080000001ULL,
  81. BIT_INTERLEAVE ? 0x8000808800000001ULL : 0x8000000080008081ULL,
  82. BIT_INTERLEAVE ? 0x8000008200000001ULL : 0x8000000000008009ULL,
  83. BIT_INTERLEAVE ? 0x0000000b00000000ULL : 0x000000000000008aULL,
  84. BIT_INTERLEAVE ? 0x0000000a00000000ULL : 0x0000000000000088ULL,
  85. BIT_INTERLEAVE ? 0x0000808200000001ULL : 0x0000000080008009ULL,
  86. BIT_INTERLEAVE ? 0x0000800300000000ULL : 0x000000008000000aULL,
  87. BIT_INTERLEAVE ? 0x0000808b00000001ULL : 0x000000008000808bULL,
  88. BIT_INTERLEAVE ? 0x8000000b00000001ULL : 0x800000000000008bULL,
  89. BIT_INTERLEAVE ? 0x8000008a00000001ULL : 0x8000000000008089ULL,
  90. BIT_INTERLEAVE ? 0x8000008100000001ULL : 0x8000000000008003ULL,
  91. BIT_INTERLEAVE ? 0x8000008100000000ULL : 0x8000000000008002ULL,
  92. BIT_INTERLEAVE ? 0x8000000800000000ULL : 0x8000000000000080ULL,
  93. BIT_INTERLEAVE ? 0x0000008300000000ULL : 0x000000000000800aULL,
  94. BIT_INTERLEAVE ? 0x8000800300000000ULL : 0x800000008000000aULL,
  95. BIT_INTERLEAVE ? 0x8000808800000001ULL : 0x8000000080008081ULL,
  96. BIT_INTERLEAVE ? 0x8000008800000000ULL : 0x8000000000008080ULL,
  97. BIT_INTERLEAVE ? 0x0000800000000001ULL : 0x0000000080000001ULL,
  98. BIT_INTERLEAVE ? 0x8000808200000000ULL : 0x8000000080008008ULL
  99. };
  100. #if defined(KECCAK_REF)
  101. /*
  102. * This is straightforward or "maximum clarity" implementation aiming
  103. * to resemble section 3.2 of the FIPS PUB 202 "SHA-3 Standard:
  104. * Permutation-Based Hash and Extendible-Output Functions" as much as
  105. * possible. With one caveat. Because of the way C stores matrices,
  106. * references to A[x,y] in the specification are presented as A[y][x].
  107. * Implementation unrolls inner x-loops so that modulo 5 operations are
  108. * explicitly pre-computed.
  109. */
  110. static void Theta(uint64_t A[5][5])
  111. {
  112. uint64_t C[5], D[5];
  113. size_t y;
  114. C[0] = A[0][0];
  115. C[1] = A[0][1];
  116. C[2] = A[0][2];
  117. C[3] = A[0][3];
  118. C[4] = A[0][4];
  119. for (y = 1; y < 5; y++) {
  120. C[0] ^= A[y][0];
  121. C[1] ^= A[y][1];
  122. C[2] ^= A[y][2];
  123. C[3] ^= A[y][3];
  124. C[4] ^= A[y][4];
  125. }
  126. D[0] = ROL64(C[1], 1) ^ C[4];
  127. D[1] = ROL64(C[2], 1) ^ C[0];
  128. D[2] = ROL64(C[3], 1) ^ C[1];
  129. D[3] = ROL64(C[4], 1) ^ C[2];
  130. D[4] = ROL64(C[0], 1) ^ C[3];
  131. for (y = 0; y < 5; y++) {
  132. A[y][0] ^= D[0];
  133. A[y][1] ^= D[1];
  134. A[y][2] ^= D[2];
  135. A[y][3] ^= D[3];
  136. A[y][4] ^= D[4];
  137. }
  138. }
  139. static void Rho(uint64_t A[5][5])
  140. {
  141. size_t y;
  142. for (y = 0; y < 5; y++) {
  143. A[y][0] = ROL64(A[y][0], rhotates[y][0]);
  144. A[y][1] = ROL64(A[y][1], rhotates[y][1]);
  145. A[y][2] = ROL64(A[y][2], rhotates[y][2]);
  146. A[y][3] = ROL64(A[y][3], rhotates[y][3]);
  147. A[y][4] = ROL64(A[y][4], rhotates[y][4]);
  148. }
  149. }
  150. static void Pi(uint64_t A[5][5])
  151. {
  152. uint64_t T[5][5];
  153. /*
  154. * T = A
  155. * A[y][x] = T[x][(3*y+x)%5]
  156. */
  157. memcpy(T, A, sizeof(T));
  158. A[0][0] = T[0][0];
  159. A[0][1] = T[1][1];
  160. A[0][2] = T[2][2];
  161. A[0][3] = T[3][3];
  162. A[0][4] = T[4][4];
  163. A[1][0] = T[0][3];
  164. A[1][1] = T[1][4];
  165. A[1][2] = T[2][0];
  166. A[1][3] = T[3][1];
  167. A[1][4] = T[4][2];
  168. A[2][0] = T[0][1];
  169. A[2][1] = T[1][2];
  170. A[2][2] = T[2][3];
  171. A[2][3] = T[3][4];
  172. A[2][4] = T[4][0];
  173. A[3][0] = T[0][4];
  174. A[3][1] = T[1][0];
  175. A[3][2] = T[2][1];
  176. A[3][3] = T[3][2];
  177. A[3][4] = T[4][3];
  178. A[4][0] = T[0][2];
  179. A[4][1] = T[1][3];
  180. A[4][2] = T[2][4];
  181. A[4][3] = T[3][0];
  182. A[4][4] = T[4][1];
  183. }
  184. static void Chi(uint64_t A[5][5])
  185. {
  186. uint64_t C[5];
  187. size_t y;
  188. for (y = 0; y < 5; y++) {
  189. C[0] = A[y][0] ^ (~A[y][1] & A[y][2]);
  190. C[1] = A[y][1] ^ (~A[y][2] & A[y][3]);
  191. C[2] = A[y][2] ^ (~A[y][3] & A[y][4]);
  192. C[3] = A[y][3] ^ (~A[y][4] & A[y][0]);
  193. C[4] = A[y][4] ^ (~A[y][0] & A[y][1]);
  194. A[y][0] = C[0];
  195. A[y][1] = C[1];
  196. A[y][2] = C[2];
  197. A[y][3] = C[3];
  198. A[y][4] = C[4];
  199. }
  200. }
  201. static void Iota(uint64_t A[5][5], size_t i)
  202. {
  203. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  204. A[0][0] ^= iotas[i];
  205. }
  206. static void KeccakF1600(uint64_t A[5][5])
  207. {
  208. size_t i;
  209. for (i = 0; i < 24; i++) {
  210. Theta(A);
  211. Rho(A);
  212. Pi(A);
  213. Chi(A);
  214. Iota(A, i);
  215. }
  216. }
  217. #elif defined(KECCAK_1X)
  218. /*
  219. * This implementation is optimization of above code featuring unroll
  220. * of even y-loops, their fusion and code motion. It also minimizes
  221. * temporary storage. Compiler would normally do all these things for
  222. * you, purpose of manual optimization is to provide "unobscured"
  223. * reference for assembly implementation [in case this approach is
  224. * chosen for implementation on some platform]. In the nutshell it's
  225. * equivalent of "plane-per-plane processing" approach discussed in
  226. * section 2.4 of "Keccak implementation overview".
  227. */
  228. static void Round(uint64_t A[5][5], size_t i)
  229. {
  230. uint64_t C[5], E[2]; /* registers */
  231. uint64_t D[5], T[2][5]; /* memory */
  232. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  233. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  234. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  235. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  236. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  237. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  238. #if defined(__arm__)
  239. D[1] = E[0] = ROL64(C[2], 1) ^ C[0];
  240. D[4] = E[1] = ROL64(C[0], 1) ^ C[3];
  241. D[0] = C[0] = ROL64(C[1], 1) ^ C[4];
  242. D[2] = C[1] = ROL64(C[3], 1) ^ C[1];
  243. D[3] = C[2] = ROL64(C[4], 1) ^ C[2];
  244. T[0][0] = A[3][0] ^ C[0]; /* borrow T[0][0] */
  245. T[0][1] = A[0][1] ^ E[0]; /* D[1] */
  246. T[0][2] = A[0][2] ^ C[1]; /* D[2] */
  247. T[0][3] = A[0][3] ^ C[2]; /* D[3] */
  248. T[0][4] = A[0][4] ^ E[1]; /* D[4] */
  249. C[3] = ROL64(A[3][3] ^ C[2], rhotates[3][3]); /* D[3] */
  250. C[4] = ROL64(A[4][4] ^ E[1], rhotates[4][4]); /* D[4] */
  251. C[0] = A[0][0] ^ C[0]; /* rotate by 0 */ /* D[0] */
  252. C[2] = ROL64(A[2][2] ^ C[1], rhotates[2][2]); /* D[2] */
  253. C[1] = ROL64(A[1][1] ^ E[0], rhotates[1][1]); /* D[1] */
  254. #else
  255. D[0] = ROL64(C[1], 1) ^ C[4];
  256. D[1] = ROL64(C[2], 1) ^ C[0];
  257. D[2] = ROL64(C[3], 1) ^ C[1];
  258. D[3] = ROL64(C[4], 1) ^ C[2];
  259. D[4] = ROL64(C[0], 1) ^ C[3];
  260. T[0][0] = A[3][0] ^ D[0]; /* borrow T[0][0] */
  261. T[0][1] = A[0][1] ^ D[1];
  262. T[0][2] = A[0][2] ^ D[2];
  263. T[0][3] = A[0][3] ^ D[3];
  264. T[0][4] = A[0][4] ^ D[4];
  265. C[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  266. C[1] = ROL64(A[1][1] ^ D[1], rhotates[1][1]);
  267. C[2] = ROL64(A[2][2] ^ D[2], rhotates[2][2]);
  268. C[3] = ROL64(A[3][3] ^ D[3], rhotates[3][3]);
  269. C[4] = ROL64(A[4][4] ^ D[4], rhotates[4][4]);
  270. #endif
  271. A[0][0] = C[0] ^ (~C[1] & C[2]) ^ iotas[i];
  272. A[0][1] = C[1] ^ (~C[2] & C[3]);
  273. A[0][2] = C[2] ^ (~C[3] & C[4]);
  274. A[0][3] = C[3] ^ (~C[4] & C[0]);
  275. A[0][4] = C[4] ^ (~C[0] & C[1]);
  276. T[1][0] = A[1][0] ^ (C[3] = D[0]);
  277. T[1][1] = A[2][1] ^ (C[4] = D[1]); /* borrow T[1][1] */
  278. T[1][2] = A[1][2] ^ (E[0] = D[2]);
  279. T[1][3] = A[1][3] ^ (E[1] = D[3]);
  280. T[1][4] = A[2][4] ^ (C[2] = D[4]); /* borrow T[1][4] */
  281. C[0] = ROL64(T[0][3], rhotates[0][3]);
  282. C[1] = ROL64(A[1][4] ^ C[2], rhotates[1][4]); /* D[4] */
  283. C[2] = ROL64(A[2][0] ^ C[3], rhotates[2][0]); /* D[0] */
  284. C[3] = ROL64(A[3][1] ^ C[4], rhotates[3][1]); /* D[1] */
  285. C[4] = ROL64(A[4][2] ^ E[0], rhotates[4][2]); /* D[2] */
  286. A[1][0] = C[0] ^ (~C[1] & C[2]);
  287. A[1][1] = C[1] ^ (~C[2] & C[3]);
  288. A[1][2] = C[2] ^ (~C[3] & C[4]);
  289. A[1][3] = C[3] ^ (~C[4] & C[0]);
  290. A[1][4] = C[4] ^ (~C[0] & C[1]);
  291. C[0] = ROL64(T[0][1], rhotates[0][1]);
  292. C[1] = ROL64(T[1][2], rhotates[1][2]);
  293. C[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  294. C[3] = ROL64(A[3][4] ^ D[4], rhotates[3][4]);
  295. C[4] = ROL64(A[4][0] ^ D[0], rhotates[4][0]);
  296. A[2][0] = C[0] ^ (~C[1] & C[2]);
  297. A[2][1] = C[1] ^ (~C[2] & C[3]);
  298. A[2][2] = C[2] ^ (~C[3] & C[4]);
  299. A[2][3] = C[3] ^ (~C[4] & C[0]);
  300. A[2][4] = C[4] ^ (~C[0] & C[1]);
  301. C[0] = ROL64(T[0][4], rhotates[0][4]);
  302. C[1] = ROL64(T[1][0], rhotates[1][0]);
  303. C[2] = ROL64(T[1][1], rhotates[2][1]); /* originally A[2][1] */
  304. C[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  305. C[4] = ROL64(A[4][3] ^ D[3], rhotates[4][3]);
  306. A[3][0] = C[0] ^ (~C[1] & C[2]);
  307. A[3][1] = C[1] ^ (~C[2] & C[3]);
  308. A[3][2] = C[2] ^ (~C[3] & C[4]);
  309. A[3][3] = C[3] ^ (~C[4] & C[0]);
  310. A[3][4] = C[4] ^ (~C[0] & C[1]);
  311. C[0] = ROL64(T[0][2], rhotates[0][2]);
  312. C[1] = ROL64(T[1][3], rhotates[1][3]);
  313. C[2] = ROL64(T[1][4], rhotates[2][4]); /* originally A[2][4] */
  314. C[3] = ROL64(T[0][0], rhotates[3][0]); /* originally A[3][0] */
  315. C[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  316. A[4][0] = C[0] ^ (~C[1] & C[2]);
  317. A[4][1] = C[1] ^ (~C[2] & C[3]);
  318. A[4][2] = C[2] ^ (~C[3] & C[4]);
  319. A[4][3] = C[3] ^ (~C[4] & C[0]);
  320. A[4][4] = C[4] ^ (~C[0] & C[1]);
  321. }
  322. static void KeccakF1600(uint64_t A[5][5])
  323. {
  324. size_t i;
  325. for (i = 0; i < 24; i++) {
  326. Round(A, i);
  327. }
  328. }
  329. #elif defined(KECCAK_1X_ALT)
  330. /*
  331. * This is variant of above KECCAK_1X that reduces requirement for
  332. * temporary storage even further, but at cost of more updates to A[][].
  333. * It's less suitable if A[][] is memory bound, but better if it's
  334. * register bound.
  335. */
  336. static void Round(uint64_t A[5][5], size_t i)
  337. {
  338. uint64_t C[5], D[5];
  339. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  340. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  341. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  342. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  343. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  344. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  345. D[1] = C[0] ^ ROL64(C[2], 1);
  346. D[2] = C[1] ^ ROL64(C[3], 1);
  347. D[3] = C[2] ^= ROL64(C[4], 1);
  348. D[4] = C[3] ^= ROL64(C[0], 1);
  349. D[0] = C[4] ^= ROL64(C[1], 1);
  350. A[0][1] ^= D[1];
  351. A[1][1] ^= D[1];
  352. A[2][1] ^= D[1];
  353. A[3][1] ^= D[1];
  354. A[4][1] ^= D[1];
  355. A[0][2] ^= D[2];
  356. A[1][2] ^= D[2];
  357. A[2][2] ^= D[2];
  358. A[3][2] ^= D[2];
  359. A[4][2] ^= D[2];
  360. A[0][3] ^= C[2];
  361. A[1][3] ^= C[2];
  362. A[2][3] ^= C[2];
  363. A[3][3] ^= C[2];
  364. A[4][3] ^= C[2];
  365. A[0][4] ^= C[3];
  366. A[1][4] ^= C[3];
  367. A[2][4] ^= C[3];
  368. A[3][4] ^= C[3];
  369. A[4][4] ^= C[3];
  370. A[0][0] ^= C[4];
  371. A[1][0] ^= C[4];
  372. A[2][0] ^= C[4];
  373. A[3][0] ^= C[4];
  374. A[4][0] ^= C[4];
  375. C[1] = A[0][1];
  376. C[2] = A[0][2];
  377. C[3] = A[0][3];
  378. C[4] = A[0][4];
  379. A[0][1] = ROL64(A[1][1], rhotates[1][1]);
  380. A[0][2] = ROL64(A[2][2], rhotates[2][2]);
  381. A[0][3] = ROL64(A[3][3], rhotates[3][3]);
  382. A[0][4] = ROL64(A[4][4], rhotates[4][4]);
  383. A[1][1] = ROL64(A[1][4], rhotates[1][4]);
  384. A[2][2] = ROL64(A[2][3], rhotates[2][3]);
  385. A[3][3] = ROL64(A[3][2], rhotates[3][2]);
  386. A[4][4] = ROL64(A[4][1], rhotates[4][1]);
  387. A[1][4] = ROL64(A[4][2], rhotates[4][2]);
  388. A[2][3] = ROL64(A[3][4], rhotates[3][4]);
  389. A[3][2] = ROL64(A[2][1], rhotates[2][1]);
  390. A[4][1] = ROL64(A[1][3], rhotates[1][3]);
  391. A[4][2] = ROL64(A[2][4], rhotates[2][4]);
  392. A[3][4] = ROL64(A[4][3], rhotates[4][3]);
  393. A[2][1] = ROL64(A[1][2], rhotates[1][2]);
  394. A[1][3] = ROL64(A[3][1], rhotates[3][1]);
  395. A[2][4] = ROL64(A[4][0], rhotates[4][0]);
  396. A[4][3] = ROL64(A[3][0], rhotates[3][0]);
  397. A[1][2] = ROL64(A[2][0], rhotates[2][0]);
  398. A[3][1] = ROL64(A[1][0], rhotates[1][0]);
  399. A[1][0] = ROL64(C[3], rhotates[0][3]);
  400. A[2][0] = ROL64(C[1], rhotates[0][1]);
  401. A[3][0] = ROL64(C[4], rhotates[0][4]);
  402. A[4][0] = ROL64(C[2], rhotates[0][2]);
  403. C[0] = A[0][0];
  404. C[1] = A[1][0];
  405. D[0] = A[0][1];
  406. D[1] = A[1][1];
  407. A[0][0] ^= (~A[0][1] & A[0][2]);
  408. A[1][0] ^= (~A[1][1] & A[1][2]);
  409. A[0][1] ^= (~A[0][2] & A[0][3]);
  410. A[1][1] ^= (~A[1][2] & A[1][3]);
  411. A[0][2] ^= (~A[0][3] & A[0][4]);
  412. A[1][2] ^= (~A[1][3] & A[1][4]);
  413. A[0][3] ^= (~A[0][4] & C[0]);
  414. A[1][3] ^= (~A[1][4] & C[1]);
  415. A[0][4] ^= (~C[0] & D[0]);
  416. A[1][4] ^= (~C[1] & D[1]);
  417. C[2] = A[2][0];
  418. C[3] = A[3][0];
  419. D[2] = A[2][1];
  420. D[3] = A[3][1];
  421. A[2][0] ^= (~A[2][1] & A[2][2]);
  422. A[3][0] ^= (~A[3][1] & A[3][2]);
  423. A[2][1] ^= (~A[2][2] & A[2][3]);
  424. A[3][1] ^= (~A[3][2] & A[3][3]);
  425. A[2][2] ^= (~A[2][3] & A[2][4]);
  426. A[3][2] ^= (~A[3][3] & A[3][4]);
  427. A[2][3] ^= (~A[2][4] & C[2]);
  428. A[3][3] ^= (~A[3][4] & C[3]);
  429. A[2][4] ^= (~C[2] & D[2]);
  430. A[3][4] ^= (~C[3] & D[3]);
  431. C[4] = A[4][0];
  432. D[4] = A[4][1];
  433. A[4][0] ^= (~A[4][1] & A[4][2]);
  434. A[4][1] ^= (~A[4][2] & A[4][3]);
  435. A[4][2] ^= (~A[4][3] & A[4][4]);
  436. A[4][3] ^= (~A[4][4] & C[4]);
  437. A[4][4] ^= (~C[4] & D[4]);
  438. A[0][0] ^= iotas[i];
  439. }
  440. static void KeccakF1600(uint64_t A[5][5])
  441. {
  442. size_t i;
  443. for (i = 0; i < 24; i++) {
  444. Round(A, i);
  445. }
  446. }
  447. #elif defined(KECCAK_2X)
  448. /*
  449. * This implementation is variant of KECCAK_1X above with outer-most
  450. * round loop unrolled twice. This allows to take temporary storage
  451. * out of round procedure and simplify references to it by alternating
  452. * it with actual data (see round loop below). Originally it was meant
  453. * rather as reference for an assembly implementation, but it seems to
  454. * play best with compilers [as well as provide best instruction per
  455. * processed byte ratio at minimal round unroll factor]...
  456. */
  457. static void Round(uint64_t R[5][5], uint64_t A[5][5], size_t i)
  458. {
  459. uint64_t C[5], D[5];
  460. assert(i < (sizeof(iotas) / sizeof(iotas[0])));
  461. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  462. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  463. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  464. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  465. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  466. D[0] = ROL64(C[1], 1) ^ C[4];
  467. D[1] = ROL64(C[2], 1) ^ C[0];
  468. D[2] = ROL64(C[3], 1) ^ C[1];
  469. D[3] = ROL64(C[4], 1) ^ C[2];
  470. D[4] = ROL64(C[0], 1) ^ C[3];
  471. C[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  472. C[1] = ROL64(A[1][1] ^ D[1], rhotates[1][1]);
  473. C[2] = ROL64(A[2][2] ^ D[2], rhotates[2][2]);
  474. C[3] = ROL64(A[3][3] ^ D[3], rhotates[3][3]);
  475. C[4] = ROL64(A[4][4] ^ D[4], rhotates[4][4]);
  476. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  477. R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i];
  478. R[0][1] = C[1] ^ (~C[2] | C[3]);
  479. R[0][2] = C[2] ^ ( C[3] & C[4]);
  480. R[0][3] = C[3] ^ ( C[4] | C[0]);
  481. R[0][4] = C[4] ^ ( C[0] & C[1]);
  482. #else
  483. R[0][0] = C[0] ^ (~C[1] & C[2]) ^ iotas[i];
  484. R[0][1] = C[1] ^ (~C[2] & C[3]);
  485. R[0][2] = C[2] ^ (~C[3] & C[4]);
  486. R[0][3] = C[3] ^ (~C[4] & C[0]);
  487. R[0][4] = C[4] ^ (~C[0] & C[1]);
  488. #endif
  489. C[0] = ROL64(A[0][3] ^ D[3], rhotates[0][3]);
  490. C[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  491. C[2] = ROL64(A[2][0] ^ D[0], rhotates[2][0]);
  492. C[3] = ROL64(A[3][1] ^ D[1], rhotates[3][1]);
  493. C[4] = ROL64(A[4][2] ^ D[2], rhotates[4][2]);
  494. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  495. R[1][0] = C[0] ^ (C[1] | C[2]);
  496. R[1][1] = C[1] ^ (C[2] & C[3]);
  497. R[1][2] = C[2] ^ (C[3] | ~C[4]);
  498. R[1][3] = C[3] ^ (C[4] | C[0]);
  499. R[1][4] = C[4] ^ (C[0] & C[1]);
  500. #else
  501. R[1][0] = C[0] ^ (~C[1] & C[2]);
  502. R[1][1] = C[1] ^ (~C[2] & C[3]);
  503. R[1][2] = C[2] ^ (~C[3] & C[4]);
  504. R[1][3] = C[3] ^ (~C[4] & C[0]);
  505. R[1][4] = C[4] ^ (~C[0] & C[1]);
  506. #endif
  507. C[0] = ROL64(A[0][1] ^ D[1], rhotates[0][1]);
  508. C[1] = ROL64(A[1][2] ^ D[2], rhotates[1][2]);
  509. C[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  510. C[3] = ROL64(A[3][4] ^ D[4], rhotates[3][4]);
  511. C[4] = ROL64(A[4][0] ^ D[0], rhotates[4][0]);
  512. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  513. R[2][0] = C[0] ^ ( C[1] | C[2]);
  514. R[2][1] = C[1] ^ ( C[2] & C[3]);
  515. R[2][2] = C[2] ^ (~C[3] & C[4]);
  516. R[2][3] = ~C[3] ^ ( C[4] | C[0]);
  517. R[2][4] = C[4] ^ ( C[0] & C[1]);
  518. #else
  519. R[2][0] = C[0] ^ (~C[1] & C[2]);
  520. R[2][1] = C[1] ^ (~C[2] & C[3]);
  521. R[2][2] = C[2] ^ (~C[3] & C[4]);
  522. R[2][3] = C[3] ^ (~C[4] & C[0]);
  523. R[2][4] = C[4] ^ (~C[0] & C[1]);
  524. #endif
  525. C[0] = ROL64(A[0][4] ^ D[4], rhotates[0][4]);
  526. C[1] = ROL64(A[1][0] ^ D[0], rhotates[1][0]);
  527. C[2] = ROL64(A[2][1] ^ D[1], rhotates[2][1]);
  528. C[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  529. C[4] = ROL64(A[4][3] ^ D[3], rhotates[4][3]);
  530. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  531. R[3][0] = C[0] ^ ( C[1] & C[2]);
  532. R[3][1] = C[1] ^ ( C[2] | C[3]);
  533. R[3][2] = C[2] ^ (~C[3] | C[4]);
  534. R[3][3] = ~C[3] ^ ( C[4] & C[0]);
  535. R[3][4] = C[4] ^ ( C[0] | C[1]);
  536. #else
  537. R[3][0] = C[0] ^ (~C[1] & C[2]);
  538. R[3][1] = C[1] ^ (~C[2] & C[3]);
  539. R[3][2] = C[2] ^ (~C[3] & C[4]);
  540. R[3][3] = C[3] ^ (~C[4] & C[0]);
  541. R[3][4] = C[4] ^ (~C[0] & C[1]);
  542. #endif
  543. C[0] = ROL64(A[0][2] ^ D[2], rhotates[0][2]);
  544. C[1] = ROL64(A[1][3] ^ D[3], rhotates[1][3]);
  545. C[2] = ROL64(A[2][4] ^ D[4], rhotates[2][4]);
  546. C[3] = ROL64(A[3][0] ^ D[0], rhotates[3][0]);
  547. C[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  548. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  549. R[4][0] = C[0] ^ (~C[1] & C[2]);
  550. R[4][1] = ~C[1] ^ ( C[2] | C[3]);
  551. R[4][2] = C[2] ^ ( C[3] & C[4]);
  552. R[4][3] = C[3] ^ ( C[4] | C[0]);
  553. R[4][4] = C[4] ^ ( C[0] & C[1]);
  554. #else
  555. R[4][0] = C[0] ^ (~C[1] & C[2]);
  556. R[4][1] = C[1] ^ (~C[2] & C[3]);
  557. R[4][2] = C[2] ^ (~C[3] & C[4]);
  558. R[4][3] = C[3] ^ (~C[4] & C[0]);
  559. R[4][4] = C[4] ^ (~C[0] & C[1]);
  560. #endif
  561. }
  562. static void KeccakF1600(uint64_t A[5][5])
  563. {
  564. uint64_t T[5][5];
  565. size_t i;
  566. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  567. A[0][1] = ~A[0][1];
  568. A[0][2] = ~A[0][2];
  569. A[1][3] = ~A[1][3];
  570. A[2][2] = ~A[2][2];
  571. A[3][2] = ~A[3][2];
  572. A[4][0] = ~A[4][0];
  573. #endif
  574. for (i = 0; i < 24; i += 2) {
  575. Round(T, A, i);
  576. Round(A, T, i + 1);
  577. }
  578. #ifdef KECCAK_COMPLEMENTING_TRANSFORM
  579. A[0][1] = ~A[0][1];
  580. A[0][2] = ~A[0][2];
  581. A[1][3] = ~A[1][3];
  582. A[2][2] = ~A[2][2];
  583. A[3][2] = ~A[3][2];
  584. A[4][0] = ~A[4][0];
  585. #endif
  586. }
  587. #else /* define KECCAK_INPLACE to compile this code path */
  588. /*
  589. * This implementation is KECCAK_1X from above combined 4 times with
  590. * a twist that allows to omit temporary storage and perform in-place
  591. * processing. It's discussed in section 2.5 of "Keccak implementation
  592. * overview". It's likely to be best suited for processors with large
  593. * register bank... On the other hand processor with large register
  594. * bank can as well use KECCAK_1X_ALT, it would be as fast but much
  595. * more compact...
  596. */
  597. static void FourRounds(uint64_t A[5][5], size_t i)
  598. {
  599. uint64_t B[5], C[5], D[5];
  600. assert(i <= (sizeof(iotas) / sizeof(iotas[0]) - 4));
  601. /* Round 4*n */
  602. C[0] = A[0][0] ^ A[1][0] ^ A[2][0] ^ A[3][0] ^ A[4][0];
  603. C[1] = A[0][1] ^ A[1][1] ^ A[2][1] ^ A[3][1] ^ A[4][1];
  604. C[2] = A[0][2] ^ A[1][2] ^ A[2][2] ^ A[3][2] ^ A[4][2];
  605. C[3] = A[0][3] ^ A[1][3] ^ A[2][3] ^ A[3][3] ^ A[4][3];
  606. C[4] = A[0][4] ^ A[1][4] ^ A[2][4] ^ A[3][4] ^ A[4][4];
  607. D[0] = ROL64(C[1], 1) ^ C[4];
  608. D[1] = ROL64(C[2], 1) ^ C[0];
  609. D[2] = ROL64(C[3], 1) ^ C[1];
  610. D[3] = ROL64(C[4], 1) ^ C[2];
  611. D[4] = ROL64(C[0], 1) ^ C[3];
  612. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  613. B[1] = ROL64(A[1][1] ^ D[1], rhotates[1][1]);
  614. B[2] = ROL64(A[2][2] ^ D[2], rhotates[2][2]);
  615. B[3] = ROL64(A[3][3] ^ D[3], rhotates[3][3]);
  616. B[4] = ROL64(A[4][4] ^ D[4], rhotates[4][4]);
  617. C[0] = A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i];
  618. C[1] = A[1][1] = B[1] ^ (~B[2] & B[3]);
  619. C[2] = A[2][2] = B[2] ^ (~B[3] & B[4]);
  620. C[3] = A[3][3] = B[3] ^ (~B[4] & B[0]);
  621. C[4] = A[4][4] = B[4] ^ (~B[0] & B[1]);
  622. B[0] = ROL64(A[0][3] ^ D[3], rhotates[0][3]);
  623. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  624. B[2] = ROL64(A[2][0] ^ D[0], rhotates[2][0]);
  625. B[3] = ROL64(A[3][1] ^ D[1], rhotates[3][1]);
  626. B[4] = ROL64(A[4][2] ^ D[2], rhotates[4][2]);
  627. C[0] ^= A[2][0] = B[0] ^ (~B[1] & B[2]);
  628. C[1] ^= A[3][1] = B[1] ^ (~B[2] & B[3]);
  629. C[2] ^= A[4][2] = B[2] ^ (~B[3] & B[4]);
  630. C[3] ^= A[0][3] = B[3] ^ (~B[4] & B[0]);
  631. C[4] ^= A[1][4] = B[4] ^ (~B[0] & B[1]);
  632. B[0] = ROL64(A[0][1] ^ D[1], rhotates[0][1]);
  633. B[1] = ROL64(A[1][2] ^ D[2], rhotates[1][2]);
  634. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  635. B[3] = ROL64(A[3][4] ^ D[4], rhotates[3][4]);
  636. B[4] = ROL64(A[4][0] ^ D[0], rhotates[4][0]);
  637. C[0] ^= A[4][0] = B[0] ^ (~B[1] & B[2]);
  638. C[1] ^= A[0][1] = B[1] ^ (~B[2] & B[3]);
  639. C[2] ^= A[1][2] = B[2] ^ (~B[3] & B[4]);
  640. C[3] ^= A[2][3] = B[3] ^ (~B[4] & B[0]);
  641. C[4] ^= A[3][4] = B[4] ^ (~B[0] & B[1]);
  642. B[0] = ROL64(A[0][4] ^ D[4], rhotates[0][4]);
  643. B[1] = ROL64(A[1][0] ^ D[0], rhotates[1][0]);
  644. B[2] = ROL64(A[2][1] ^ D[1], rhotates[2][1]);
  645. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  646. B[4] = ROL64(A[4][3] ^ D[3], rhotates[4][3]);
  647. C[0] ^= A[1][0] = B[0] ^ (~B[1] & B[2]);
  648. C[1] ^= A[2][1] = B[1] ^ (~B[2] & B[3]);
  649. C[2] ^= A[3][2] = B[2] ^ (~B[3] & B[4]);
  650. C[3] ^= A[4][3] = B[3] ^ (~B[4] & B[0]);
  651. C[4] ^= A[0][4] = B[4] ^ (~B[0] & B[1]);
  652. B[0] = ROL64(A[0][2] ^ D[2], rhotates[0][2]);
  653. B[1] = ROL64(A[1][3] ^ D[3], rhotates[1][3]);
  654. B[2] = ROL64(A[2][4] ^ D[4], rhotates[2][4]);
  655. B[3] = ROL64(A[3][0] ^ D[0], rhotates[3][0]);
  656. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  657. C[0] ^= A[3][0] = B[0] ^ (~B[1] & B[2]);
  658. C[1] ^= A[4][1] = B[1] ^ (~B[2] & B[3]);
  659. C[2] ^= A[0][2] = B[2] ^ (~B[3] & B[4]);
  660. C[3] ^= A[1][3] = B[3] ^ (~B[4] & B[0]);
  661. C[4] ^= A[2][4] = B[4] ^ (~B[0] & B[1]);
  662. /* Round 4*n+1 */
  663. D[0] = ROL64(C[1], 1) ^ C[4];
  664. D[1] = ROL64(C[2], 1) ^ C[0];
  665. D[2] = ROL64(C[3], 1) ^ C[1];
  666. D[3] = ROL64(C[4], 1) ^ C[2];
  667. D[4] = ROL64(C[0], 1) ^ C[3];
  668. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  669. B[1] = ROL64(A[3][1] ^ D[1], rhotates[1][1]);
  670. B[2] = ROL64(A[1][2] ^ D[2], rhotates[2][2]);
  671. B[3] = ROL64(A[4][3] ^ D[3], rhotates[3][3]);
  672. B[4] = ROL64(A[2][4] ^ D[4], rhotates[4][4]);
  673. C[0] = A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i + 1];
  674. C[1] = A[3][1] = B[1] ^ (~B[2] & B[3]);
  675. C[2] = A[1][2] = B[2] ^ (~B[3] & B[4]);
  676. C[3] = A[4][3] = B[3] ^ (~B[4] & B[0]);
  677. C[4] = A[2][4] = B[4] ^ (~B[0] & B[1]);
  678. B[0] = ROL64(A[3][3] ^ D[3], rhotates[0][3]);
  679. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  680. B[2] = ROL64(A[4][0] ^ D[0], rhotates[2][0]);
  681. B[3] = ROL64(A[2][1] ^ D[1], rhotates[3][1]);
  682. B[4] = ROL64(A[0][2] ^ D[2], rhotates[4][2]);
  683. C[0] ^= A[4][0] = B[0] ^ (~B[1] & B[2]);
  684. C[1] ^= A[2][1] = B[1] ^ (~B[2] & B[3]);
  685. C[2] ^= A[0][2] = B[2] ^ (~B[3] & B[4]);
  686. C[3] ^= A[3][3] = B[3] ^ (~B[4] & B[0]);
  687. C[4] ^= A[1][4] = B[4] ^ (~B[0] & B[1]);
  688. B[0] = ROL64(A[1][1] ^ D[1], rhotates[0][1]);
  689. B[1] = ROL64(A[4][2] ^ D[2], rhotates[1][2]);
  690. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  691. B[3] = ROL64(A[0][4] ^ D[4], rhotates[3][4]);
  692. B[4] = ROL64(A[3][0] ^ D[0], rhotates[4][0]);
  693. C[0] ^= A[3][0] = B[0] ^ (~B[1] & B[2]);
  694. C[1] ^= A[1][1] = B[1] ^ (~B[2] & B[3]);
  695. C[2] ^= A[4][2] = B[2] ^ (~B[3] & B[4]);
  696. C[3] ^= A[2][3] = B[3] ^ (~B[4] & B[0]);
  697. C[4] ^= A[0][4] = B[4] ^ (~B[0] & B[1]);
  698. B[0] = ROL64(A[4][4] ^ D[4], rhotates[0][4]);
  699. B[1] = ROL64(A[2][0] ^ D[0], rhotates[1][0]);
  700. B[2] = ROL64(A[0][1] ^ D[1], rhotates[2][1]);
  701. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  702. B[4] = ROL64(A[1][3] ^ D[3], rhotates[4][3]);
  703. C[0] ^= A[2][0] = B[0] ^ (~B[1] & B[2]);
  704. C[1] ^= A[0][1] = B[1] ^ (~B[2] & B[3]);
  705. C[2] ^= A[3][2] = B[2] ^ (~B[3] & B[4]);
  706. C[3] ^= A[1][3] = B[3] ^ (~B[4] & B[0]);
  707. C[4] ^= A[4][4] = B[4] ^ (~B[0] & B[1]);
  708. B[0] = ROL64(A[2][2] ^ D[2], rhotates[0][2]);
  709. B[1] = ROL64(A[0][3] ^ D[3], rhotates[1][3]);
  710. B[2] = ROL64(A[3][4] ^ D[4], rhotates[2][4]);
  711. B[3] = ROL64(A[1][0] ^ D[0], rhotates[3][0]);
  712. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  713. C[0] ^= A[1][0] = B[0] ^ (~B[1] & B[2]);
  714. C[1] ^= A[4][1] = B[1] ^ (~B[2] & B[3]);
  715. C[2] ^= A[2][2] = B[2] ^ (~B[3] & B[4]);
  716. C[3] ^= A[0][3] = B[3] ^ (~B[4] & B[0]);
  717. C[4] ^= A[3][4] = B[4] ^ (~B[0] & B[1]);
  718. /* Round 4*n+2 */
  719. D[0] = ROL64(C[1], 1) ^ C[4];
  720. D[1] = ROL64(C[2], 1) ^ C[0];
  721. D[2] = ROL64(C[3], 1) ^ C[1];
  722. D[3] = ROL64(C[4], 1) ^ C[2];
  723. D[4] = ROL64(C[0], 1) ^ C[3];
  724. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  725. B[1] = ROL64(A[2][1] ^ D[1], rhotates[1][1]);
  726. B[2] = ROL64(A[4][2] ^ D[2], rhotates[2][2]);
  727. B[3] = ROL64(A[1][3] ^ D[3], rhotates[3][3]);
  728. B[4] = ROL64(A[3][4] ^ D[4], rhotates[4][4]);
  729. C[0] = A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i + 2];
  730. C[1] = A[2][1] = B[1] ^ (~B[2] & B[3]);
  731. C[2] = A[4][2] = B[2] ^ (~B[3] & B[4]);
  732. C[3] = A[1][3] = B[3] ^ (~B[4] & B[0]);
  733. C[4] = A[3][4] = B[4] ^ (~B[0] & B[1]);
  734. B[0] = ROL64(A[4][3] ^ D[3], rhotates[0][3]);
  735. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  736. B[2] = ROL64(A[3][0] ^ D[0], rhotates[2][0]);
  737. B[3] = ROL64(A[0][1] ^ D[1], rhotates[3][1]);
  738. B[4] = ROL64(A[2][2] ^ D[2], rhotates[4][2]);
  739. C[0] ^= A[3][0] = B[0] ^ (~B[1] & B[2]);
  740. C[1] ^= A[0][1] = B[1] ^ (~B[2] & B[3]);
  741. C[2] ^= A[2][2] = B[2] ^ (~B[3] & B[4]);
  742. C[3] ^= A[4][3] = B[3] ^ (~B[4] & B[0]);
  743. C[4] ^= A[1][4] = B[4] ^ (~B[0] & B[1]);
  744. B[0] = ROL64(A[3][1] ^ D[1], rhotates[0][1]);
  745. B[1] = ROL64(A[0][2] ^ D[2], rhotates[1][2]);
  746. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  747. B[3] = ROL64(A[4][4] ^ D[4], rhotates[3][4]);
  748. B[4] = ROL64(A[1][0] ^ D[0], rhotates[4][0]);
  749. C[0] ^= A[1][0] = B[0] ^ (~B[1] & B[2]);
  750. C[1] ^= A[3][1] = B[1] ^ (~B[2] & B[3]);
  751. C[2] ^= A[0][2] = B[2] ^ (~B[3] & B[4]);
  752. C[3] ^= A[2][3] = B[3] ^ (~B[4] & B[0]);
  753. C[4] ^= A[4][4] = B[4] ^ (~B[0] & B[1]);
  754. B[0] = ROL64(A[2][4] ^ D[4], rhotates[0][4]);
  755. B[1] = ROL64(A[4][0] ^ D[0], rhotates[1][0]);
  756. B[2] = ROL64(A[1][1] ^ D[1], rhotates[2][1]);
  757. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  758. B[4] = ROL64(A[0][3] ^ D[3], rhotates[4][3]);
  759. C[0] ^= A[4][0] = B[0] ^ (~B[1] & B[2]);
  760. C[1] ^= A[1][1] = B[1] ^ (~B[2] & B[3]);
  761. C[2] ^= A[3][2] = B[2] ^ (~B[3] & B[4]);
  762. C[3] ^= A[0][3] = B[3] ^ (~B[4] & B[0]);
  763. C[4] ^= A[2][4] = B[4] ^ (~B[0] & B[1]);
  764. B[0] = ROL64(A[1][2] ^ D[2], rhotates[0][2]);
  765. B[1] = ROL64(A[3][3] ^ D[3], rhotates[1][3]);
  766. B[2] = ROL64(A[0][4] ^ D[4], rhotates[2][4]);
  767. B[3] = ROL64(A[2][0] ^ D[0], rhotates[3][0]);
  768. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  769. C[0] ^= A[2][0] = B[0] ^ (~B[1] & B[2]);
  770. C[1] ^= A[4][1] = B[1] ^ (~B[2] & B[3]);
  771. C[2] ^= A[1][2] = B[2] ^ (~B[3] & B[4]);
  772. C[3] ^= A[3][3] = B[3] ^ (~B[4] & B[0]);
  773. C[4] ^= A[0][4] = B[4] ^ (~B[0] & B[1]);
  774. /* Round 4*n+3 */
  775. D[0] = ROL64(C[1], 1) ^ C[4];
  776. D[1] = ROL64(C[2], 1) ^ C[0];
  777. D[2] = ROL64(C[3], 1) ^ C[1];
  778. D[3] = ROL64(C[4], 1) ^ C[2];
  779. D[4] = ROL64(C[0], 1) ^ C[3];
  780. B[0] = A[0][0] ^ D[0]; /* rotate by 0 */
  781. B[1] = ROL64(A[0][1] ^ D[1], rhotates[1][1]);
  782. B[2] = ROL64(A[0][2] ^ D[2], rhotates[2][2]);
  783. B[3] = ROL64(A[0][3] ^ D[3], rhotates[3][3]);
  784. B[4] = ROL64(A[0][4] ^ D[4], rhotates[4][4]);
  785. /* C[0] = */ A[0][0] = B[0] ^ (~B[1] & B[2]) ^ iotas[i + 3];
  786. /* C[1] = */ A[0][1] = B[1] ^ (~B[2] & B[3]);
  787. /* C[2] = */ A[0][2] = B[2] ^ (~B[3] & B[4]);
  788. /* C[3] = */ A[0][3] = B[3] ^ (~B[4] & B[0]);
  789. /* C[4] = */ A[0][4] = B[4] ^ (~B[0] & B[1]);
  790. B[0] = ROL64(A[1][3] ^ D[3], rhotates[0][3]);
  791. B[1] = ROL64(A[1][4] ^ D[4], rhotates[1][4]);
  792. B[2] = ROL64(A[1][0] ^ D[0], rhotates[2][0]);
  793. B[3] = ROL64(A[1][1] ^ D[1], rhotates[3][1]);
  794. B[4] = ROL64(A[1][2] ^ D[2], rhotates[4][2]);
  795. /* C[0] ^= */ A[1][0] = B[0] ^ (~B[1] & B[2]);
  796. /* C[1] ^= */ A[1][1] = B[1] ^ (~B[2] & B[3]);
  797. /* C[2] ^= */ A[1][2] = B[2] ^ (~B[3] & B[4]);
  798. /* C[3] ^= */ A[1][3] = B[3] ^ (~B[4] & B[0]);
  799. /* C[4] ^= */ A[1][4] = B[4] ^ (~B[0] & B[1]);
  800. B[0] = ROL64(A[2][1] ^ D[1], rhotates[0][1]);
  801. B[1] = ROL64(A[2][2] ^ D[2], rhotates[1][2]);
  802. B[2] = ROL64(A[2][3] ^ D[3], rhotates[2][3]);
  803. B[3] = ROL64(A[2][4] ^ D[4], rhotates[3][4]);
  804. B[4] = ROL64(A[2][0] ^ D[0], rhotates[4][0]);
  805. /* C[0] ^= */ A[2][0] = B[0] ^ (~B[1] & B[2]);
  806. /* C[1] ^= */ A[2][1] = B[1] ^ (~B[2] & B[3]);
  807. /* C[2] ^= */ A[2][2] = B[2] ^ (~B[3] & B[4]);
  808. /* C[3] ^= */ A[2][3] = B[3] ^ (~B[4] & B[0]);
  809. /* C[4] ^= */ A[2][4] = B[4] ^ (~B[0] & B[1]);
  810. B[0] = ROL64(A[3][4] ^ D[4], rhotates[0][4]);
  811. B[1] = ROL64(A[3][0] ^ D[0], rhotates[1][0]);
  812. B[2] = ROL64(A[3][1] ^ D[1], rhotates[2][1]);
  813. B[3] = ROL64(A[3][2] ^ D[2], rhotates[3][2]);
  814. B[4] = ROL64(A[3][3] ^ D[3], rhotates[4][3]);
  815. /* C[0] ^= */ A[3][0] = B[0] ^ (~B[1] & B[2]);
  816. /* C[1] ^= */ A[3][1] = B[1] ^ (~B[2] & B[3]);
  817. /* C[2] ^= */ A[3][2] = B[2] ^ (~B[3] & B[4]);
  818. /* C[3] ^= */ A[3][3] = B[3] ^ (~B[4] & B[0]);
  819. /* C[4] ^= */ A[3][4] = B[4] ^ (~B[0] & B[1]);
  820. B[0] = ROL64(A[4][2] ^ D[2], rhotates[0][2]);
  821. B[1] = ROL64(A[4][3] ^ D[3], rhotates[1][3]);
  822. B[2] = ROL64(A[4][4] ^ D[4], rhotates[2][4]);
  823. B[3] = ROL64(A[4][0] ^ D[0], rhotates[3][0]);
  824. B[4] = ROL64(A[4][1] ^ D[1], rhotates[4][1]);
  825. /* C[0] ^= */ A[4][0] = B[0] ^ (~B[1] & B[2]);
  826. /* C[1] ^= */ A[4][1] = B[1] ^ (~B[2] & B[3]);
  827. /* C[2] ^= */ A[4][2] = B[2] ^ (~B[3] & B[4]);
  828. /* C[3] ^= */ A[4][3] = B[3] ^ (~B[4] & B[0]);
  829. /* C[4] ^= */ A[4][4] = B[4] ^ (~B[0] & B[1]);
  830. }
  831. static void KeccakF1600(uint64_t A[5][5])
  832. {
  833. size_t i;
  834. for (i = 0; i < 24; i += 4) {
  835. FourRounds(A, i);
  836. }
  837. }
  838. #endif
  839. static uint64_t BitInterleave(uint64_t Ai)
  840. {
  841. if (BIT_INTERLEAVE) {
  842. uint32_t hi = (uint32_t)(Ai >> 32), lo = (uint32_t)Ai;
  843. uint32_t t0, t1;
  844. t0 = lo & 0x55555555;
  845. t0 |= t0 >> 1; t0 &= 0x33333333;
  846. t0 |= t0 >> 2; t0 &= 0x0f0f0f0f;
  847. t0 |= t0 >> 4; t0 &= 0x00ff00ff;
  848. t0 |= t0 >> 8; t0 &= 0x0000ffff;
  849. t1 = hi & 0x55555555;
  850. t1 |= t1 >> 1; t1 &= 0x33333333;
  851. t1 |= t1 >> 2; t1 &= 0x0f0f0f0f;
  852. t1 |= t1 >> 4; t1 &= 0x00ff00ff;
  853. t1 |= t1 >> 8; t1 <<= 16;
  854. lo &= 0xaaaaaaaa;
  855. lo |= lo << 1; lo &= 0xcccccccc;
  856. lo |= lo << 2; lo &= 0xf0f0f0f0;
  857. lo |= lo << 4; lo &= 0xff00ff00;
  858. lo |= lo << 8; lo >>= 16;
  859. hi &= 0xaaaaaaaa;
  860. hi |= hi << 1; hi &= 0xcccccccc;
  861. hi |= hi << 2; hi &= 0xf0f0f0f0;
  862. hi |= hi << 4; hi &= 0xff00ff00;
  863. hi |= hi << 8; hi &= 0xffff0000;
  864. Ai = ((uint64_t)(hi | lo) << 32) | (t1 | t0);
  865. }
  866. return Ai;
  867. }
  868. static uint64_t BitDeinterleave(uint64_t Ai)
  869. {
  870. if (BIT_INTERLEAVE) {
  871. uint32_t hi = (uint32_t)(Ai >> 32), lo = (uint32_t)Ai;
  872. uint32_t t0, t1;
  873. t0 = lo & 0x0000ffff;
  874. t0 |= t0 << 8; t0 &= 0x00ff00ff;
  875. t0 |= t0 << 4; t0 &= 0x0f0f0f0f;
  876. t0 |= t0 << 2; t0 &= 0x33333333;
  877. t0 |= t0 << 1; t0 &= 0x55555555;
  878. t1 = hi << 16;
  879. t1 |= t1 >> 8; t1 &= 0xff00ff00;
  880. t1 |= t1 >> 4; t1 &= 0xf0f0f0f0;
  881. t1 |= t1 >> 2; t1 &= 0xcccccccc;
  882. t1 |= t1 >> 1; t1 &= 0xaaaaaaaa;
  883. lo >>= 16;
  884. lo |= lo << 8; lo &= 0x00ff00ff;
  885. lo |= lo << 4; lo &= 0x0f0f0f0f;
  886. lo |= lo << 2; lo &= 0x33333333;
  887. lo |= lo << 1; lo &= 0x55555555;
  888. hi &= 0xffff0000;
  889. hi |= hi >> 8; hi &= 0xff00ff00;
  890. hi |= hi >> 4; hi &= 0xf0f0f0f0;
  891. hi |= hi >> 2; hi &= 0xcccccccc;
  892. hi |= hi >> 1; hi &= 0xaaaaaaaa;
  893. Ai = ((uint64_t)(hi | lo) << 32) | (t1 | t0);
  894. }
  895. return Ai;
  896. }
  897. /*
  898. * SHA3_absorb can be called multiple times, but at each invocation
  899. * largest multiple of |r| out of |len| bytes are processed. Then
  900. * remaining amount of bytes is returned. This is done to spare caller
  901. * trouble of calculating the largest multiple of |r|. |r| can be viewed
  902. * as blocksize. It is commonly (1600 - 256*n)/8, e.g. 168, 136, 104,
  903. * 72, but can also be (1600 - 448)/8 = 144. All this means that message
  904. * padding and intermediate sub-block buffering, byte- or bitwise, is
  905. * caller's responsibility.
  906. */
  907. size_t SHA3_absorb(uint64_t A[5][5], const unsigned char *inp, size_t len,
  908. size_t r)
  909. {
  910. uint64_t *A_flat = (uint64_t *)A;
  911. size_t i, w = r / 8;
  912. assert(r < (25 * sizeof(A[0][0])) && (r % 8) == 0);
  913. while (len >= r) {
  914. for (i = 0; i < w; i++) {
  915. uint64_t Ai = (uint64_t)inp[0] | (uint64_t)inp[1] << 8 |
  916. (uint64_t)inp[2] << 16 | (uint64_t)inp[3] << 24 |
  917. (uint64_t)inp[4] << 32 | (uint64_t)inp[5] << 40 |
  918. (uint64_t)inp[6] << 48 | (uint64_t)inp[7] << 56;
  919. inp += 8;
  920. A_flat[i] ^= BitInterleave(Ai);
  921. }
  922. KeccakF1600(A);
  923. len -= r;
  924. }
  925. return len;
  926. }
  927. /*
  928. * SHA3_squeeze may be called after SHA3_absorb to generate |out| hash value of
  929. * |len| bytes.
  930. * If multiple SHA3_squeeze calls are required the output length |len| must be a
  931. * multiple of the blocksize, with |next| being 0 on the first call and 1 on
  932. * subsequent calls. It is the callers responsibility to buffer the results.
  933. * When only a single call to SHA3_squeeze is required, |len| can be any size
  934. * and |next| must be 0.
  935. */
  936. void SHA3_squeeze(uint64_t A[5][5], unsigned char *out, size_t len, size_t r,
  937. int next)
  938. {
  939. uint64_t *A_flat = (uint64_t *)A;
  940. size_t i, w = r / 8;
  941. assert(r < (25 * sizeof(A[0][0])) && (r % 8) == 0);
  942. while (len != 0) {
  943. if (next)
  944. KeccakF1600(A);
  945. next = 1;
  946. for (i = 0; i < w && len != 0; i++) {
  947. uint64_t Ai = BitDeinterleave(A_flat[i]);
  948. if (len < 8) {
  949. for (i = 0; i < len; i++) {
  950. *out++ = (unsigned char)Ai;
  951. Ai >>= 8;
  952. }
  953. return;
  954. }
  955. out[0] = (unsigned char)(Ai);
  956. out[1] = (unsigned char)(Ai >> 8);
  957. out[2] = (unsigned char)(Ai >> 16);
  958. out[3] = (unsigned char)(Ai >> 24);
  959. out[4] = (unsigned char)(Ai >> 32);
  960. out[5] = (unsigned char)(Ai >> 40);
  961. out[6] = (unsigned char)(Ai >> 48);
  962. out[7] = (unsigned char)(Ai >> 56);
  963. out += 8;
  964. len -= 8;
  965. }
  966. }
  967. }
  968. #endif
  969. #ifdef SELFTEST
  970. /*
  971. * Post-padding one-shot implementations would look as following:
  972. *
  973. * SHA3_224 SHA3_sponge(inp, len, out, 224/8, (1600-448)/8);
  974. * SHA3_256 SHA3_sponge(inp, len, out, 256/8, (1600-512)/8);
  975. * SHA3_384 SHA3_sponge(inp, len, out, 384/8, (1600-768)/8);
  976. * SHA3_512 SHA3_sponge(inp, len, out, 512/8, (1600-1024)/8);
  977. * SHAKE_128 SHA3_sponge(inp, len, out, d, (1600-256)/8);
  978. * SHAKE_256 SHA3_sponge(inp, len, out, d, (1600-512)/8);
  979. */
  980. void SHA3_sponge(const unsigned char *inp, size_t len,
  981. unsigned char *out, size_t d, size_t r)
  982. {
  983. uint64_t A[5][5];
  984. memset(A, 0, sizeof(A));
  985. SHA3_absorb(A, inp, len, r);
  986. SHA3_squeeze(A, out, d, r);
  987. }
  988. # include <stdio.h>
  989. int main(void)
  990. {
  991. /*
  992. * This is 5-bit SHAKE128 test from http://csrc.nist.gov/groups/ST/toolkit/examples.html#aHashing
  993. */
  994. unsigned char test[168] = { '\xf3', '\x3' };
  995. unsigned char out[512];
  996. size_t i;
  997. static const unsigned char result[512] = {
  998. 0x2E, 0x0A, 0xBF, 0xBA, 0x83, 0xE6, 0x72, 0x0B,
  999. 0xFB, 0xC2, 0x25, 0xFF, 0x6B, 0x7A, 0xB9, 0xFF,
  1000. 0xCE, 0x58, 0xBA, 0x02, 0x7E, 0xE3, 0xD8, 0x98,
  1001. 0x76, 0x4F, 0xEF, 0x28, 0x7D, 0xDE, 0xCC, 0xCA,
  1002. 0x3E, 0x6E, 0x59, 0x98, 0x41, 0x1E, 0x7D, 0xDB,
  1003. 0x32, 0xF6, 0x75, 0x38, 0xF5, 0x00, 0xB1, 0x8C,
  1004. 0x8C, 0x97, 0xC4, 0x52, 0xC3, 0x70, 0xEA, 0x2C,
  1005. 0xF0, 0xAF, 0xCA, 0x3E, 0x05, 0xDE, 0x7E, 0x4D,
  1006. 0xE2, 0x7F, 0xA4, 0x41, 0xA9, 0xCB, 0x34, 0xFD,
  1007. 0x17, 0xC9, 0x78, 0xB4, 0x2D, 0x5B, 0x7E, 0x7F,
  1008. 0x9A, 0xB1, 0x8F, 0xFE, 0xFF, 0xC3, 0xC5, 0xAC,
  1009. 0x2F, 0x3A, 0x45, 0x5E, 0xEB, 0xFD, 0xC7, 0x6C,
  1010. 0xEA, 0xEB, 0x0A, 0x2C, 0xCA, 0x22, 0xEE, 0xF6,
  1011. 0xE6, 0x37, 0xF4, 0xCA, 0xBE, 0x5C, 0x51, 0xDE,
  1012. 0xD2, 0xE3, 0xFA, 0xD8, 0xB9, 0x52, 0x70, 0xA3,
  1013. 0x21, 0x84, 0x56, 0x64, 0xF1, 0x07, 0xD1, 0x64,
  1014. 0x96, 0xBB, 0x7A, 0xBF, 0xBE, 0x75, 0x04, 0xB6,
  1015. 0xED, 0xE2, 0xE8, 0x9E, 0x4B, 0x99, 0x6F, 0xB5,
  1016. 0x8E, 0xFD, 0xC4, 0x18, 0x1F, 0x91, 0x63, 0x38,
  1017. 0x1C, 0xBE, 0x7B, 0xC0, 0x06, 0xA7, 0xA2, 0x05,
  1018. 0x98, 0x9C, 0x52, 0x6C, 0xD1, 0xBD, 0x68, 0x98,
  1019. 0x36, 0x93, 0xB4, 0xBD, 0xC5, 0x37, 0x28, 0xB2,
  1020. 0x41, 0xC1, 0xCF, 0xF4, 0x2B, 0xB6, 0x11, 0x50,
  1021. 0x2C, 0x35, 0x20, 0x5C, 0xAB, 0xB2, 0x88, 0x75,
  1022. 0x56, 0x55, 0xD6, 0x20, 0xC6, 0x79, 0x94, 0xF0,
  1023. 0x64, 0x51, 0x18, 0x7F, 0x6F, 0xD1, 0x7E, 0x04,
  1024. 0x66, 0x82, 0xBA, 0x12, 0x86, 0x06, 0x3F, 0xF8,
  1025. 0x8F, 0xE2, 0x50, 0x8D, 0x1F, 0xCA, 0xF9, 0x03,
  1026. 0x5A, 0x12, 0x31, 0xAD, 0x41, 0x50, 0xA9, 0xC9,
  1027. 0xB2, 0x4C, 0x9B, 0x2D, 0x66, 0xB2, 0xAD, 0x1B,
  1028. 0xDE, 0x0B, 0xD0, 0xBB, 0xCB, 0x8B, 0xE0, 0x5B,
  1029. 0x83, 0x52, 0x29, 0xEF, 0x79, 0x19, 0x73, 0x73,
  1030. 0x23, 0x42, 0x44, 0x01, 0xE1, 0xD8, 0x37, 0xB6,
  1031. 0x6E, 0xB4, 0xE6, 0x30, 0xFF, 0x1D, 0xE7, 0x0C,
  1032. 0xB3, 0x17, 0xC2, 0xBA, 0xCB, 0x08, 0x00, 0x1D,
  1033. 0x34, 0x77, 0xB7, 0xA7, 0x0A, 0x57, 0x6D, 0x20,
  1034. 0x86, 0x90, 0x33, 0x58, 0x9D, 0x85, 0xA0, 0x1D,
  1035. 0xDB, 0x2B, 0x66, 0x46, 0xC0, 0x43, 0xB5, 0x9F,
  1036. 0xC0, 0x11, 0x31, 0x1D, 0xA6, 0x66, 0xFA, 0x5A,
  1037. 0xD1, 0xD6, 0x38, 0x7F, 0xA9, 0xBC, 0x40, 0x15,
  1038. 0xA3, 0x8A, 0x51, 0xD1, 0xDA, 0x1E, 0xA6, 0x1D,
  1039. 0x64, 0x8D, 0xC8, 0xE3, 0x9A, 0x88, 0xB9, 0xD6,
  1040. 0x22, 0xBD, 0xE2, 0x07, 0xFD, 0xAB, 0xC6, 0xF2,
  1041. 0x82, 0x7A, 0x88, 0x0C, 0x33, 0x0B, 0xBF, 0x6D,
  1042. 0xF7, 0x33, 0x77, 0x4B, 0x65, 0x3E, 0x57, 0x30,
  1043. 0x5D, 0x78, 0xDC, 0xE1, 0x12, 0xF1, 0x0A, 0x2C,
  1044. 0x71, 0xF4, 0xCD, 0xAD, 0x92, 0xED, 0x11, 0x3E,
  1045. 0x1C, 0xEA, 0x63, 0xB9, 0x19, 0x25, 0xED, 0x28,
  1046. 0x19, 0x1E, 0x6D, 0xBB, 0xB5, 0xAA, 0x5A, 0x2A,
  1047. 0xFD, 0xA5, 0x1F, 0xC0, 0x5A, 0x3A, 0xF5, 0x25,
  1048. 0x8B, 0x87, 0x66, 0x52, 0x43, 0x55, 0x0F, 0x28,
  1049. 0x94, 0x8A, 0xE2, 0xB8, 0xBE, 0xB6, 0xBC, 0x9C,
  1050. 0x77, 0x0B, 0x35, 0xF0, 0x67, 0xEA, 0xA6, 0x41,
  1051. 0xEF, 0xE6, 0x5B, 0x1A, 0x44, 0x90, 0x9D, 0x1B,
  1052. 0x14, 0x9F, 0x97, 0xEE, 0xA6, 0x01, 0x39, 0x1C,
  1053. 0x60, 0x9E, 0xC8, 0x1D, 0x19, 0x30, 0xF5, 0x7C,
  1054. 0x18, 0xA4, 0xE0, 0xFA, 0xB4, 0x91, 0xD1, 0xCA,
  1055. 0xDF, 0xD5, 0x04, 0x83, 0x44, 0x9E, 0xDC, 0x0F,
  1056. 0x07, 0xFF, 0xB2, 0x4D, 0x2C, 0x6F, 0x9A, 0x9A,
  1057. 0x3B, 0xFF, 0x39, 0xAE, 0x3D, 0x57, 0xF5, 0x60,
  1058. 0x65, 0x4D, 0x7D, 0x75, 0xC9, 0x08, 0xAB, 0xE6,
  1059. 0x25, 0x64, 0x75, 0x3E, 0xAC, 0x39, 0xD7, 0x50,
  1060. 0x3D, 0xA6, 0xD3, 0x7C, 0x2E, 0x32, 0xE1, 0xAF,
  1061. 0x3B, 0x8A, 0xEC, 0x8A, 0xE3, 0x06, 0x9C, 0xD9
  1062. };
  1063. test[167] = '\x80';
  1064. SHA3_sponge(test, sizeof(test), out, sizeof(out), sizeof(test));
  1065. /*
  1066. * Rationale behind keeping output [formatted as below] is that
  1067. * one should be able to redirect it to a file, then copy-n-paste
  1068. * final "output val" from official example to another file, and
  1069. * compare the two with diff(1).
  1070. */
  1071. for (i = 0; i < sizeof(out);) {
  1072. printf("%02X", out[i]);
  1073. printf(++i % 16 && i != sizeof(out) ? " " : "\n");
  1074. }
  1075. if (memcmp(out, result, sizeof(out))) {
  1076. fprintf(stderr, "failure\n");
  1077. return 1;
  1078. } else {
  1079. fprintf(stderr, "success\n");
  1080. return 0;
  1081. }
  1082. }
  1083. #endif