misc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. /* misc.c
  2. *
  3. * Copyright (C) 2006-2022 wolfSSL Inc.
  4. *
  5. * This file is part of wolfSSL.
  6. *
  7. * wolfSSL is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * wolfSSL is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
  20. */
  21. /*
  22. DESCRIPTION
  23. This module implements the arithmetic-shift right, left, byte swapping, XOR,
  24. masking and clearing memory logic.
  25. */
  26. #ifdef HAVE_CONFIG_H
  27. #include <config.h>
  28. #endif
  29. #include <wolfssl/wolfcrypt/settings.h>
  30. #ifndef WOLF_CRYPT_MISC_C
  31. #define WOLF_CRYPT_MISC_C
  32. #include <wolfssl/wolfcrypt/misc.h>
  33. /* inlining these functions is a huge speed increase and a small size decrease,
  34. because the functions are smaller than function call setup/cleanup, e.g.,
  35. md5 benchmark is twice as fast with inline. If you don't want it, then
  36. define NO_INLINE and compile this file into wolfssl, otherwise it's used as
  37. a source header
  38. */
  39. #ifdef NO_INLINE
  40. #define WC_STATIC
  41. #else
  42. #define WC_STATIC static
  43. #endif
  44. /* Check for if compiling misc.c when not needed. */
  45. #if !defined(WOLFSSL_MISC_INCLUDED) && !defined(NO_INLINE)
  46. #ifndef WOLFSSL_IGNORE_FILE_WARN
  47. #warning misc.c does not need to be compiled when using inline (NO_INLINE not defined)
  48. #endif
  49. #else
  50. #if defined(__ICCARM__)
  51. #include <intrinsics.h>
  52. #endif
  53. #ifdef INTEL_INTRINSICS
  54. #include <stdlib.h> /* get intrinsic definitions */
  55. /* for non visual studio probably need no long version, 32 bit only
  56. * i.e., _rotl and _rotr */
  57. #pragma intrinsic(_lrotl, _lrotr)
  58. WC_STATIC WC_INLINE word32 rotlFixed(word32 x, word32 y)
  59. {
  60. return y ? _lrotl(x, y) : x;
  61. }
  62. WC_STATIC WC_INLINE word32 rotrFixed(word32 x, word32 y)
  63. {
  64. return y ? _lrotr(x, y) : x;
  65. }
  66. #elif defined(__CCRX__)
  67. #include <builtin.h> /* get intrinsic definitions */
  68. #if !defined(NO_INLINE)
  69. #define rotlFixed(x, y) _builtin_rotl(x, y)
  70. #define rotrFixed(x, y) _builtin_rotr(x, y)
  71. #else /* create real function */
  72. WC_STATIC WC_INLINE word32 rotlFixed(word32 x, word32 y)
  73. {
  74. return _builtin_rotl(x, y);
  75. }
  76. WC_STATIC WC_INLINE word32 rotrFixed(word32 x, word32 y)
  77. {
  78. return _builtin_rotr(x, y);
  79. }
  80. #endif
  81. #else /* generic */
  82. /* This routine performs a left circular arithmetic shift of <x> by <y> value. */
  83. WC_STATIC WC_INLINE word32 rotlFixed(word32 x, word32 y)
  84. {
  85. return (x << y) | (x >> (sizeof(y) * 8 - y));
  86. }
  87. /* This routine performs a right circular arithmetic shift of <x> by <y> value. */
  88. WC_STATIC WC_INLINE word32 rotrFixed(word32 x, word32 y)
  89. {
  90. return (x >> y) | (x << (sizeof(y) * 8 - y));
  91. }
  92. #endif
  93. #ifdef WC_RC2
  94. /* This routine performs a left circular arithmetic shift of <x> by <y> value */
  95. WC_STATIC WC_INLINE word16 rotlFixed16(word16 x, word16 y)
  96. {
  97. return (x << y) | (x >> (sizeof(y) * 8 - y));
  98. }
  99. /* This routine performs a right circular arithmetic shift of <x> by <y> value */
  100. WC_STATIC WC_INLINE word16 rotrFixed16(word16 x, word16 y)
  101. {
  102. return (x >> y) | (x << (sizeof(y) * 8 - y));
  103. }
  104. #endif /* WC_RC2 */
  105. /* This routine performs a byte swap of 32-bit word value. */
  106. #if defined(__CCRX__) && !defined(NO_INLINE) /* shortest version for CC-RX */
  107. #define ByteReverseWord32(value) _builtin_revl(value)
  108. #else
  109. WC_STATIC WC_INLINE word32 ByteReverseWord32(word32 value)
  110. {
  111. #ifdef PPC_INTRINSICS
  112. /* PPC: load reverse indexed instruction */
  113. return (word32)__lwbrx(&value,0);
  114. #elif defined(__ICCARM__)
  115. return (word32)__REV(value);
  116. #elif defined(KEIL_INTRINSICS)
  117. return (word32)__rev(value);
  118. #elif defined(__CCRX__)
  119. return (word32)_builtin_revl(value);
  120. #elif defined(WOLF_ALLOW_BUILTIN) && \
  121. defined(__GNUC_PREREQ) && __GNUC_PREREQ(4, 3)
  122. return (word32)__builtin_bswap32(value);
  123. #elif defined(WOLFSSL_BYTESWAP32_ASM) && defined(__GNUC__) && \
  124. defined(__aarch64__)
  125. __asm__ volatile (
  126. "REV32 %0, %0 \n"
  127. : "+r" (value)
  128. :
  129. );
  130. return value;
  131. #elif defined(WOLFSSL_BYTESWAP32_ASM) && defined(__GNUC__) && \
  132. (defined(__thumb__) || defined(__arm__))
  133. __asm__ volatile (
  134. "REV %0, %0 \n"
  135. : "+r" (value)
  136. :
  137. );
  138. return value;
  139. #elif defined(FAST_ROTATE)
  140. /* 5 instructions with rotate instruction, 9 without */
  141. return (rotrFixed(value, 8U) & 0xff00ff00) |
  142. (rotlFixed(value, 8U) & 0x00ff00ff);
  143. #else
  144. /* 6 instructions with rotate instruction, 8 without */
  145. value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
  146. return rotlFixed(value, 16U);
  147. #endif
  148. }
  149. #endif /* __CCRX__ */
  150. /* This routine performs a byte swap of words array of a given count. */
  151. WC_STATIC WC_INLINE void ByteReverseWords(word32* out, const word32* in,
  152. word32 byteCount)
  153. {
  154. word32 count = byteCount/(word32)sizeof(word32), i;
  155. for (i = 0; i < count; i++)
  156. out[i] = ByteReverseWord32(in[i]);
  157. }
  158. #if defined(WORD64_AVAILABLE) && !defined(WOLFSSL_NO_WORD64_OPS)
  159. WC_STATIC WC_INLINE word64 rotlFixed64(word64 x, word64 y)
  160. {
  161. return (x << y) | (x >> (sizeof(y) * 8 - y));
  162. }
  163. WC_STATIC WC_INLINE word64 rotrFixed64(word64 x, word64 y)
  164. {
  165. return (x >> y) | (x << (sizeof(y) * 8 - y));
  166. }
  167. WC_STATIC WC_INLINE word64 ByteReverseWord64(word64 value)
  168. {
  169. #if defined(WOLF_ALLOW_BUILTIN) && defined(__GNUC_PREREQ) && __GNUC_PREREQ(4, 3)
  170. return (word64)__builtin_bswap64(value);
  171. #elif defined(WOLFCRYPT_SLOW_WORD64)
  172. return (word64)((word64)ByteReverseWord32((word32) value)) << 32 |
  173. (word64)ByteReverseWord32((word32)(value >> 32));
  174. #else
  175. value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) |
  176. ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
  177. value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) |
  178. ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
  179. return rotlFixed64(value, 32U);
  180. #endif
  181. }
  182. WC_STATIC WC_INLINE void ByteReverseWords64(word64* out, const word64* in,
  183. word32 byteCount)
  184. {
  185. word32 count = byteCount/(word32)sizeof(word64), i;
  186. for (i = 0; i < count; i++)
  187. out[i] = ByteReverseWord64(in[i]);
  188. }
  189. #endif /* WORD64_AVAILABLE && !WOLFSSL_NO_WORD64_OPS */
  190. #ifndef WOLFSSL_NO_XOR_OPS
  191. /* This routine performs a bitwise XOR operation of <*r> and <*a> for <n> number
  192. of wolfssl_words, placing the result in <*r>. */
  193. WC_STATIC WC_INLINE void XorWordsOut(wolfssl_word* r, const wolfssl_word* a,
  194. const wolfssl_word* b, word32 n)
  195. {
  196. word32 i;
  197. for (i = 0; i < n; i++) r[i] = a[i] ^ b[i];
  198. }
  199. /* This routine performs a bitwise XOR operation of <*buf> and <*mask> of n
  200. counts, placing the result in <*buf>. */
  201. WC_STATIC WC_INLINE void xorbufout(void*out, const void* buf, const void* mask,
  202. word32 count)
  203. {
  204. if (((wc_ptr_t)out | (wc_ptr_t)buf | (wc_ptr_t)mask | count) %
  205. WOLFSSL_WORD_SIZE == 0)
  206. XorWordsOut( (wolfssl_word*)out, (wolfssl_word*)buf,
  207. (const wolfssl_word*)mask, count / WOLFSSL_WORD_SIZE);
  208. else {
  209. word32 i;
  210. byte* o = (byte*)out;
  211. byte* b = (byte*)buf;
  212. const byte* m = (const byte*)mask;
  213. for (i = 0; i < count; i++) o[i] = b[i] ^ m[i];
  214. }
  215. }
  216. /* This routine performs a bitwise XOR operation of <*r> and <*a> for <n> number
  217. of wolfssl_words, placing the result in <*r>. */
  218. WC_STATIC WC_INLINE void XorWords(wolfssl_word* r, const wolfssl_word* a, word32 n)
  219. {
  220. word32 i;
  221. for (i = 0; i < n; i++) r[i] ^= a[i];
  222. }
  223. /* This routine performs a bitwise XOR operation of <*buf> and <*mask> of n
  224. counts, placing the result in <*buf>. */
  225. WC_STATIC WC_INLINE void xorbuf(void* buf, const void* mask, word32 count)
  226. {
  227. if (((wc_ptr_t)buf | (wc_ptr_t)mask | count) % WOLFSSL_WORD_SIZE == 0)
  228. XorWords( (wolfssl_word*)buf,
  229. (const wolfssl_word*)mask, count / WOLFSSL_WORD_SIZE);
  230. else {
  231. word32 i;
  232. byte* b = (byte*)buf;
  233. const byte* m = (const byte*)mask;
  234. for (i = 0; i < count; i++) b[i] ^= m[i];
  235. }
  236. }
  237. #endif
  238. #ifndef WOLFSSL_NO_FORCE_ZERO
  239. /* This routine fills the first len bytes of the memory area pointed by mem
  240. with zeros. It ensures compiler optimizations doesn't skip it */
  241. WC_STATIC WC_INLINE void ForceZero(void* mem, word32 len)
  242. {
  243. volatile byte* z = (volatile byte*)mem;
  244. #if (defined(WOLFSSL_X86_64_BUILD) || defined(WOLFSSL_AARCH64_BUILD)) \
  245. && defined(WORD64_AVAILABLE)
  246. volatile word64* w;
  247. #ifndef WOLFSSL_UNALIGNED_64BIT_ACCESS
  248. word32 l = (sizeof(word64) - ((size_t)z & (sizeof(word64)-1))) &
  249. (sizeof(word64)-1);
  250. if (len < l) l = len;
  251. len -= l;
  252. while (l--) *z++ = 0;
  253. #endif
  254. for (w = (volatile word64*)z; len >= sizeof(*w); len -= sizeof(*w))
  255. *w++ = 0;
  256. z = (volatile byte*)w;
  257. #endif
  258. while (len--) *z++ = 0;
  259. }
  260. #endif
  261. #ifndef WOLFSSL_NO_CONST_CMP
  262. /* check all length bytes for equality, return 0 on success */
  263. WC_STATIC WC_INLINE int ConstantCompare(const byte* a, const byte* b, int length)
  264. {
  265. int i;
  266. int compareSum = 0;
  267. for (i = 0; i < length; i++) {
  268. compareSum |= a[i] ^ b[i];
  269. }
  270. return compareSum;
  271. }
  272. #endif
  273. #ifndef WOLFSSL_HAVE_MIN
  274. #define WOLFSSL_HAVE_MIN
  275. #if defined(HAVE_FIPS) && !defined(min) /* so ifdef check passes */
  276. #define min min
  277. #endif
  278. /* returns the smaller of a and b */
  279. WC_STATIC WC_INLINE word32 min(word32 a, word32 b)
  280. {
  281. return a > b ? b : a;
  282. }
  283. #endif /* !WOLFSSL_HAVE_MIN */
  284. #ifndef WOLFSSL_HAVE_MAX
  285. #define WOLFSSL_HAVE_MAX
  286. #if defined(HAVE_FIPS) && !defined(max) /* so ifdef check passes */
  287. #define max max
  288. #endif
  289. WC_STATIC WC_INLINE word32 max(word32 a, word32 b)
  290. {
  291. return a > b ? a : b;
  292. }
  293. #endif /* !WOLFSSL_HAVE_MAX */
  294. #ifndef WOLFSSL_NO_INT_ENCODE
  295. /* converts a 32 bit integer to 24 bit */
  296. WC_STATIC WC_INLINE void c32to24(word32 in, word24 out)
  297. {
  298. out[0] = (in >> 16) & 0xff;
  299. out[1] = (in >> 8) & 0xff;
  300. out[2] = in & 0xff;
  301. }
  302. /* convert 16 bit integer to opaque */
  303. WC_STATIC WC_INLINE void c16toa(word16 wc_u16, byte* c)
  304. {
  305. c[0] = (wc_u16 >> 8) & 0xff;
  306. c[1] = wc_u16 & 0xff;
  307. }
  308. /* convert 32 bit integer to opaque */
  309. WC_STATIC WC_INLINE void c32toa(word32 wc_u32, byte* c)
  310. {
  311. c[0] = (wc_u32 >> 24) & 0xff;
  312. c[1] = (wc_u32 >> 16) & 0xff;
  313. c[2] = (wc_u32 >> 8) & 0xff;
  314. c[3] = wc_u32 & 0xff;
  315. }
  316. #endif
  317. #ifndef WOLFSSL_NO_INT_DECODE
  318. /* convert a 24 bit integer into a 32 bit one */
  319. WC_STATIC WC_INLINE void c24to32(const word24 wc_u24, word32* wc_u32)
  320. {
  321. *wc_u32 = ((word32)wc_u24[0] << 16) | (wc_u24[1] << 8) | wc_u24[2];
  322. }
  323. /* convert opaque to 24 bit integer */
  324. WC_STATIC WC_INLINE void ato24(const byte* c, word32* wc_u24)
  325. {
  326. *wc_u24 = ((word32)c[0] << 16) | (c[1] << 8) | c[2];
  327. }
  328. /* convert opaque to 16 bit integer */
  329. WC_STATIC WC_INLINE void ato16(const byte* c, word16* wc_u16)
  330. {
  331. *wc_u16 = (word16) ((c[0] << 8) | (c[1]));
  332. }
  333. /* convert opaque to 32 bit integer */
  334. WC_STATIC WC_INLINE void ato32(const byte* c, word32* wc_u32)
  335. {
  336. *wc_u32 = ((word32)c[0] << 24) | ((word32)c[1] << 16) | (c[2] << 8) | c[3];
  337. }
  338. WC_STATIC WC_INLINE word32 btoi(byte b)
  339. {
  340. return (word32)(b - 0x30);
  341. }
  342. #endif
  343. WC_STATIC WC_INLINE signed char HexCharToByte(char ch)
  344. {
  345. signed char ret = (signed char)ch;
  346. if (ret >= '0' && ret <= '9')
  347. ret -= '0';
  348. else if (ret >= 'A' && ret <= 'F')
  349. ret -= 'A' - 10;
  350. else if (ret >= 'a' && ret <= 'f')
  351. ret -= 'a' - 10;
  352. else
  353. ret = -1; /* error case - return code must be signed */
  354. return ret;
  355. }
  356. WC_STATIC WC_INLINE char ByteToHex(byte in)
  357. {
  358. static const char kHexChar[] = { '0', '1', '2', '3', '4', '5', '6', '7',
  359. '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
  360. return (char)(kHexChar[in & 0xF]);
  361. }
  362. WC_STATIC WC_INLINE int ByteToHexStr(byte in, char* out)
  363. {
  364. if (out == NULL)
  365. return -1;
  366. out[0] = ByteToHex(in >> 4);
  367. out[1] = ByteToHex(in & 0xf);
  368. return 0;
  369. }
  370. #ifndef WOLFSSL_NO_CT_OPS
  371. /* Constant time - mask set when a > b. */
  372. WC_STATIC WC_INLINE byte ctMaskGT(int a, int b)
  373. {
  374. return (byte)((((word32)a - b - 1) >> 31) - 1);
  375. }
  376. /* Constant time - mask set when a >= b. */
  377. WC_STATIC WC_INLINE byte ctMaskGTE(int a, int b)
  378. {
  379. return (byte)((((word32)a - b ) >> 31) - 1);
  380. }
  381. /* Constant time - mask set when a >= b. */
  382. WC_STATIC WC_INLINE int ctMaskIntGTE(int a, int b)
  383. {
  384. return (int)((((word32)a - b ) >> 31) - 1);
  385. }
  386. /* Constant time - mask set when a < b. */
  387. WC_STATIC WC_INLINE byte ctMaskLT(int a, int b)
  388. {
  389. return (byte)((((word32)b - a - 1) >> 31) - 1);
  390. }
  391. /* Constant time - mask set when a <= b. */
  392. WC_STATIC WC_INLINE byte ctMaskLTE(int a, int b)
  393. {
  394. return (byte)((((word32)b - a ) >> 31) - 1);
  395. }
  396. /* Constant time - mask set when a == b. */
  397. WC_STATIC WC_INLINE byte ctMaskEq(int a, int b)
  398. {
  399. return (byte)(~ctMaskGT(a, b)) & (byte)(~ctMaskLT(a, b));
  400. }
  401. /* Constant time - sets 16 bit integer mask when a > b */
  402. WC_STATIC WC_INLINE word16 ctMask16GT(int a, int b)
  403. {
  404. return (word16)((((word32)a - b - 1) >> 31) - 1);
  405. }
  406. /* Constant time - sets 16 bit integer mask when a >= b */
  407. WC_STATIC WC_INLINE word16 ctMask16GTE(int a, int b)
  408. {
  409. return (word16)((((word32)a - b ) >> 31) - 1);
  410. }
  411. /* Constant time - sets 16 bit integer mask when a < b. */
  412. WC_STATIC WC_INLINE word16 ctMask16LT(int a, int b)
  413. {
  414. return (word16)((((word32)b - a - 1) >> 31) - 1);
  415. }
  416. /* Constant time - sets 16 bit integer mask when a <= b. */
  417. WC_STATIC WC_INLINE word16 ctMask16LTE(int a, int b)
  418. {
  419. return (word16)((((word32)b - a ) >> 31) - 1);
  420. }
  421. /* Constant time - sets 16 bit integer mask when a == b. */
  422. WC_STATIC WC_INLINE word16 ctMask16Eq(int a, int b)
  423. {
  424. return (word16)(~ctMask16GT(a, b)) & (word16)(~ctMask16LT(a, b));
  425. }
  426. /* Constant time - mask set when a != b. */
  427. WC_STATIC WC_INLINE byte ctMaskNotEq(int a, int b)
  428. {
  429. return (byte)ctMaskGT(a, b) | (byte)ctMaskLT(a, b);
  430. }
  431. /* Constant time - select a when mask is set and b otherwise. */
  432. WC_STATIC WC_INLINE byte ctMaskSel(byte m, byte a, byte b)
  433. {
  434. return (byte)((b & ((byte)~(word32)m)) | (a & m));
  435. }
  436. /* Constant time - select integer a when mask is set and integer b otherwise. */
  437. WC_STATIC WC_INLINE int ctMaskSelInt(byte m, int a, int b)
  438. {
  439. return (b & (~(signed int)(signed char)m)) |
  440. (a & ( (signed int)(signed char)m));
  441. }
  442. /* Constant time - bit set when a <= b. */
  443. WC_STATIC WC_INLINE byte ctSetLTE(int a, int b)
  444. {
  445. return (byte)(((word32)a - b - 1) >> 31);
  446. }
  447. /* Constant time - conditionally copy size bytes from src to dst if mask is set
  448. */
  449. WC_STATIC WC_INLINE void ctMaskCopy(byte mask, byte* dst, byte* src,
  450. word16 size)
  451. {
  452. int i;
  453. for (i = 0; i < size; ++i) {
  454. dst[i] ^= (dst[i] ^ src[i]) & mask;
  455. }
  456. }
  457. #endif
  458. #if defined(WOLFSSL_W64_WRAPPER)
  459. #if defined(WORD64_AVAILABLE) && !defined(WOLFSSL_W64_WRAPPER_TEST)
  460. WC_STATIC WC_INLINE void w64Increment(w64wrapper *n) {
  461. n->n++;
  462. }
  463. WC_STATIC WC_INLINE void w64Decrement(w64wrapper *n) {
  464. n->n--;
  465. }
  466. WC_STATIC WC_INLINE byte w64Equal(w64wrapper a, w64wrapper b) {
  467. return (a.n == b.n);
  468. }
  469. WC_STATIC WC_INLINE word32 w64GetLow32(w64wrapper n) {
  470. return (word32)n.n;
  471. }
  472. WC_STATIC WC_INLINE word32 w64GetHigh32(w64wrapper n) {
  473. return (word32)(n.n >> 32);
  474. }
  475. WC_STATIC WC_INLINE void w64SetLow32(w64wrapper *n, word32 low) {
  476. n->n = (n->n & (~(word64)(0xffffffff))) | low;
  477. }
  478. WC_STATIC WC_INLINE w64wrapper w64Add32(w64wrapper a, word32 b, byte *wrap) {
  479. a.n = a.n + b;
  480. if (a.n < b && wrap != NULL)
  481. *wrap = 1;
  482. return a;
  483. }
  484. WC_STATIC WC_INLINE w64wrapper w64Sub32(w64wrapper a, word32 b, byte *wrap)
  485. {
  486. if (a.n < b && wrap != NULL)
  487. *wrap = 1;
  488. a.n = a.n - b;
  489. return a;
  490. }
  491. WC_STATIC WC_INLINE byte w64GT(w64wrapper a, w64wrapper b)
  492. {
  493. return a.n > b.n;
  494. }
  495. WC_STATIC WC_INLINE byte w64IsZero(w64wrapper a)
  496. {
  497. return a.n == 0;
  498. }
  499. WC_STATIC WC_INLINE void c64toa(const w64wrapper *a, byte *out)
  500. {
  501. #ifdef BIG_ENDIAN_ORDER
  502. XMEMCPY(out, &a->n, sizeof(a->n));
  503. #else
  504. word64 _out;
  505. _out = ByteReverseWord64(a->n);
  506. XMEMCPY(out, &_out, sizeof(_out));
  507. #endif /* BIG_ENDIAN_ORDER */
  508. }
  509. WC_STATIC WC_INLINE void ato64(const byte *in, w64wrapper *w64)
  510. {
  511. #ifdef BIG_ENDIAN_ORDER
  512. XMEMCPY(&w64->n, in, sizeof(w64->n));
  513. #else
  514. word64 _in;
  515. XMEMCPY(&_in, in, sizeof(_in));
  516. w64->n = ByteReverseWord64(_in);
  517. #endif /* BIG_ENDIAN_ORDER */
  518. }
  519. WC_STATIC WC_INLINE w64wrapper w64From32(word32 hi, word32 lo)
  520. {
  521. w64wrapper ret;
  522. ret.n = ((word64)hi << 32) | lo;
  523. return ret;
  524. }
  525. WC_STATIC WC_INLINE byte w64GTE(w64wrapper a, w64wrapper b)
  526. {
  527. return a.n >= b.n;
  528. }
  529. WC_STATIC WC_INLINE byte w64LT(w64wrapper a, w64wrapper b)
  530. {
  531. return a.n < b.n;
  532. }
  533. WC_STATIC WC_INLINE w64wrapper w64Sub(w64wrapper a, w64wrapper b)
  534. {
  535. a.n -= b.n;
  536. return a;
  537. }
  538. WC_STATIC WC_INLINE void w64Zero(w64wrapper *a)
  539. {
  540. a->n = 0;
  541. }
  542. #else
  543. WC_STATIC WC_INLINE void w64Increment(w64wrapper *n)
  544. {
  545. n->n[1]++;
  546. if (n->n[1] == 0)
  547. n->n[0]++;
  548. }
  549. WC_STATIC WC_INLINE void w64Decrement(w64wrapper *n) {
  550. if (n->n[1] == 0)
  551. n->n[0]--;
  552. n->n[1]--;
  553. }
  554. WC_STATIC WC_INLINE byte w64Equal(w64wrapper a, w64wrapper b)
  555. {
  556. return (a.n[0] == b.n[0] && a.n[1] == b.n[1]);
  557. }
  558. WC_STATIC WC_INLINE word32 w64GetLow32(w64wrapper n) {
  559. return n.n[1];
  560. }
  561. WC_STATIC WC_INLINE word32 w64GetHigh32(w64wrapper n) {
  562. return n.n[0];
  563. }
  564. WC_STATIC WC_INLINE void w64SetLow32(w64wrapper *n, word32 low)
  565. {
  566. n->n[1] = low;
  567. }
  568. WC_STATIC WC_INLINE w64wrapper w64Add32(w64wrapper a, word32 b, byte *wrap)
  569. {
  570. a.n[1] = a.n[1] + b;
  571. if (a.n[1] < b) {
  572. a.n[0]++;
  573. if (wrap != NULL && a.n[0] == 0)
  574. *wrap = 1;
  575. }
  576. return a;
  577. }
  578. WC_STATIC WC_INLINE w64wrapper w64Sub32(w64wrapper a, word32 b, byte *wrap)
  579. {
  580. byte _underflow = 0;
  581. if (a.n[1] < b)
  582. _underflow = 1;
  583. a.n[1] -= b;
  584. if (_underflow) {
  585. if (a.n[0] == 0 && wrap != NULL)
  586. *wrap = 1;
  587. a.n[0]--;
  588. }
  589. return a;
  590. }
  591. WC_STATIC WC_INLINE w64wrapper w64Sub(w64wrapper a, w64wrapper b)
  592. {
  593. if (a.n[1] < b.n[1])
  594. a.n[0]--;
  595. a.n[1] -= b.n[1];
  596. a.n[0] -= b.n[0];
  597. return a;
  598. }
  599. WC_STATIC WC_INLINE void w64Zero(w64wrapper *a)
  600. {
  601. a->n[0] = a->n[1] = 0;
  602. }
  603. WC_STATIC WC_INLINE byte w64GT(w64wrapper a, w64wrapper b)
  604. {
  605. if (a.n[0] > b.n[0])
  606. return 1;
  607. if (a.n[0] == b.n[0])
  608. return a.n[1] > b.n[1];
  609. return 0;
  610. }
  611. WC_STATIC WC_INLINE byte w64GTE(w64wrapper a, w64wrapper b)
  612. {
  613. if (a.n[0] > b.n[0])
  614. return 1;
  615. if (a.n[0] == b.n[0])
  616. return a.n[1] >= b.n[1];
  617. return 0;
  618. }
  619. WC_STATIC WC_INLINE byte w64IsZero(w64wrapper a)
  620. {
  621. return a.n[0] == 0 && a.n[1] == 0;
  622. }
  623. WC_STATIC WC_INLINE void c64toa(w64wrapper *a, byte *out)
  624. {
  625. #ifdef BIG_ENDIAN_ORDER
  626. word32 *_out = (word32*)(out);
  627. _out[0] = a->n[0];
  628. _out[1] = a->n[1];
  629. #else
  630. c32toa(a->n[0], out);
  631. c32toa(a->n[1], out + 4);
  632. #endif /* BIG_ENDIAN_ORDER */
  633. }
  634. WC_STATIC WC_INLINE void ato64(const byte *in, w64wrapper *w64)
  635. {
  636. #ifdef BIG_ENDIAN_ORDER
  637. const word32 *_in = (const word32*)(in);
  638. w64->n[0] = *_in;
  639. w64->n[1] = *(_in + 1);
  640. #else
  641. ato32(in, &w64->n[0]);
  642. ato32(in + 4, &w64->n[1]);
  643. #endif /* BIG_ENDIAN_ORDER */
  644. }
  645. WC_STATIC WC_INLINE w64wrapper w64From32(word32 hi, word32 lo)
  646. {
  647. w64wrapper w64;
  648. w64.n[0] = hi;
  649. w64.n[1] = lo;
  650. return w64;
  651. }
  652. WC_STATIC WC_INLINE byte w64LT(w64wrapper a, w64wrapper b)
  653. {
  654. if (a.n[0] < b.n[0])
  655. return 1;
  656. if (a.n[0] == b.n[0])
  657. return a.n[1] < b.n[1];
  658. return 0;
  659. }
  660. #endif /* WORD64_AVAILABLE && !WOLFSSL_W64_WRAPPER_TEST */
  661. #endif /* WOLFSSL_W64_WRAPPER */
  662. #undef WC_STATIC
  663. #endif /* !WOLFSSL_MISC_INCLUDED && !NO_INLINE */
  664. #endif /* WOLF_CRYPT_MISC_C */