md32_common.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /* crypto/md32_common.h */
  2. /* ====================================================================
  3. * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in
  14. * the documentation and/or other materials provided with the
  15. * distribution.
  16. *
  17. * 3. All advertising materials mentioning features or use of this
  18. * software must display the following acknowledgment:
  19. * "This product includes software developed by the OpenSSL Project
  20. * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
  21. *
  22. * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
  23. * endorse or promote products derived from this software without
  24. * prior written permission. For written permission, please contact
  25. * licensing@OpenSSL.org.
  26. *
  27. * 5. Products derived from this software may not be called "OpenSSL"
  28. * nor may "OpenSSL" appear in their names without prior written
  29. * permission of the OpenSSL Project.
  30. *
  31. * 6. Redistributions of any form whatsoever must retain the following
  32. * acknowledgment:
  33. * "This product includes software developed by the OpenSSL Project
  34. * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
  37. * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  38. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  39. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
  40. * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  42. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  43. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  44. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  45. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  46. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  47. * OF THE POSSIBILITY OF SUCH DAMAGE.
  48. * ====================================================================
  49. *
  50. * This product includes cryptographic software written by Eric Young
  51. * (eay@cryptsoft.com). This product includes software written by Tim
  52. * Hudson (tjh@cryptsoft.com).
  53. *
  54. */
  55. /*
  56. * This is a generic 32 bit "collector" for message digest algorithms.
  57. * Whenever needed it collects input character stream into chunks of
  58. * 32 bit values and invokes a block function that performs actual hash
  59. * calculations.
  60. *
  61. * Porting guide.
  62. *
  63. * Obligatory macros:
  64. *
  65. * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
  66. * this macro defines byte order of input stream.
  67. * HASH_CBLOCK
  68. * size of a unit chunk HASH_BLOCK operates on.
  69. * HASH_LONG
  70. * has to be at lest 32 bit wide, if it's wider, then
  71. * HASH_LONG_LOG2 *has to* be defined along
  72. * HASH_CTX
  73. * context structure that at least contains following
  74. * members:
  75. * typedef struct {
  76. * ...
  77. * HASH_LONG Nl,Nh;
  78. * HASH_LONG data[HASH_LBLOCK];
  79. * unsigned int num;
  80. * ...
  81. * } HASH_CTX;
  82. * HASH_UPDATE
  83. * name of "Update" function, implemented here.
  84. * HASH_TRANSFORM
  85. * name of "Transform" function, implemented here.
  86. * HASH_FINAL
  87. * name of "Final" function, implemented here.
  88. * HASH_BLOCK_HOST_ORDER
  89. * name of "block" function treating *aligned* input message
  90. * in host byte order, implemented externally.
  91. * HASH_BLOCK_DATA_ORDER
  92. * name of "block" function treating *unaligned* input message
  93. * in original (data) byte order, implemented externally (it
  94. * actually is optional if data and host are of the same
  95. * "endianess").
  96. * HASH_MAKE_STRING
  97. * macro convering context variables to an ASCII hash string.
  98. *
  99. * Optional macros:
  100. *
  101. * B_ENDIAN or L_ENDIAN
  102. * defines host byte-order.
  103. * HASH_LONG_LOG2
  104. * defaults to 2 if not states otherwise.
  105. * HASH_LBLOCK
  106. * assumed to be HASH_CBLOCK/4 if not stated otherwise.
  107. * HASH_BLOCK_DATA_ORDER_ALIGNED
  108. * alternative "block" function capable of treating
  109. * aligned input message in original (data) order,
  110. * implemented externally.
  111. *
  112. * MD5 example:
  113. *
  114. * #define DATA_ORDER_IS_LITTLE_ENDIAN
  115. *
  116. * #define HASH_LONG MD5_LONG
  117. * #define HASH_LONG_LOG2 MD5_LONG_LOG2
  118. * #define HASH_CTX MD5_CTX
  119. * #define HASH_CBLOCK MD5_CBLOCK
  120. * #define HASH_LBLOCK MD5_LBLOCK
  121. * #define HASH_UPDATE MD5_Update
  122. * #define HASH_TRANSFORM MD5_Transform
  123. * #define HASH_FINAL MD5_Final
  124. * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
  125. * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
  126. *
  127. * <appro@fy.chalmers.se>
  128. */
  129. #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  130. #error "DATA_ORDER must be defined!"
  131. #endif
  132. #ifndef HASH_CBLOCK
  133. #error "HASH_CBLOCK must be defined!"
  134. #endif
  135. #ifndef HASH_LONG
  136. #error "HASH_LONG must be defined!"
  137. #endif
  138. #ifndef HASH_CTX
  139. #error "HASH_CTX must be defined!"
  140. #endif
  141. #ifndef HASH_UPDATE
  142. #error "HASH_UPDATE must be defined!"
  143. #endif
  144. #ifndef HASH_TRANSFORM
  145. #error "HASH_TRANSFORM must be defined!"
  146. #endif
  147. #ifndef HASH_FINAL
  148. #error "HASH_FINAL must be defined!"
  149. #endif
  150. #ifndef HASH_BLOCK_HOST_ORDER
  151. #error "HASH_BLOCK_HOST_ORDER must be defined!"
  152. #endif
  153. #if 0
  154. /*
  155. * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
  156. * isn't defined.
  157. */
  158. #ifndef HASH_BLOCK_DATA_ORDER
  159. #error "HASH_BLOCK_DATA_ORDER must be defined!"
  160. #endif
  161. #endif
  162. #ifndef HASH_LBLOCK
  163. #define HASH_LBLOCK (HASH_CBLOCK/4)
  164. #endif
  165. #ifndef HASH_LONG_LOG2
  166. #define HASH_LONG_LOG2 2
  167. #endif
  168. /*
  169. * Engage compiler specific rotate intrinsic function if available.
  170. */
  171. #undef ROTATE
  172. #ifndef PEDANTIC
  173. # if defined(_MSC_VER) || defined(__ICC)
  174. # define ROTATE(a,n) _lrotl(a,n)
  175. # elif defined(__MWERKS__)
  176. # if defined(__POWERPC__)
  177. # define ROTATE(a,n) __rlwinm(a,n,0,31)
  178. # elif defined(__MC68K__)
  179. /* Motorola specific tweak. <appro@fy.chalmers.se> */
  180. # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
  181. # else
  182. # define ROTATE(a,n) __rol(a,n)
  183. # endif
  184. # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
  185. /*
  186. * Some GNU C inline assembler templates. Note that these are
  187. * rotates by *constant* number of bits! But that's exactly
  188. * what we need here...
  189. * <appro@fy.chalmers.se>
  190. */
  191. # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
  192. # define ROTATE(a,n) ({ register unsigned int ret; \
  193. asm ( \
  194. "roll %1,%0" \
  195. : "=r"(ret) \
  196. : "I"(n), "0"(a) \
  197. : "cc"); \
  198. ret; \
  199. })
  200. # elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
  201. defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
  202. # define ROTATE(a,n) ({ register unsigned int ret; \
  203. asm ( \
  204. "rlwinm %0,%1,%2,0,31" \
  205. : "=r"(ret) \
  206. : "r"(a), "I"(n)); \
  207. ret; \
  208. })
  209. # endif
  210. # endif
  211. #endif /* PEDANTIC */
  212. #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
  213. /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
  214. #ifdef ROTATE
  215. /* 5 instructions with rotate instruction, else 9 */
  216. #define REVERSE_FETCH32(a,l) ( \
  217. l=*(const HASH_LONG *)(a), \
  218. ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
  219. )
  220. #else
  221. /* 6 instructions with rotate instruction, else 8 */
  222. #define REVERSE_FETCH32(a,l) ( \
  223. l=*(const HASH_LONG *)(a), \
  224. l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
  225. ROTATE(l,16) \
  226. )
  227. /*
  228. * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
  229. * It's rewritten as above for two reasons:
  230. * - RISCs aren't good at long constants and have to explicitely
  231. * compose 'em with several (well, usually 2) instructions in a
  232. * register before performing the actual operation and (as you
  233. * already realized:-) having same constant should inspire the
  234. * compiler to permanently allocate the only register for it;
  235. * - most modern CPUs have two ALUs, but usually only one has
  236. * circuitry for shifts:-( this minor tweak inspires compiler
  237. * to schedule shift instructions in a better way...
  238. *
  239. * <appro@fy.chalmers.se>
  240. */
  241. #endif
  242. #endif
  243. #ifndef ROTATE
  244. #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
  245. #endif
  246. /*
  247. * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
  248. * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
  249. * and host are of the same "endianess". It's possible to mask
  250. * this with blank #define HASH_BLOCK_DATA_ORDER though...
  251. *
  252. * <appro@fy.chalmers.se>
  253. */
  254. #if defined(B_ENDIAN)
  255. # if defined(DATA_ORDER_IS_BIG_ENDIAN)
  256. # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
  257. # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
  258. # endif
  259. # endif
  260. #elif defined(L_ENDIAN)
  261. # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  262. # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
  263. # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
  264. # endif
  265. # endif
  266. #endif
  267. #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
  268. #ifndef HASH_BLOCK_DATA_ORDER
  269. #error "HASH_BLOCK_DATA_ORDER must be defined!"
  270. #endif
  271. #endif
  272. #if defined(DATA_ORDER_IS_BIG_ENDIAN)
  273. #ifndef PEDANTIC
  274. # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
  275. # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
  276. (defined(__x86_64) || defined(__x86_64__))
  277. # if !defined(B_ENDIAN)
  278. /*
  279. * This gives ~30-40% performance improvement in SHA-256 compiled
  280. * with gcc [on P4]. Well, first macro to be frank. We can pull
  281. * this trick on x86* platforms only, because these CPUs can fetch
  282. * unaligned data without raising an exception.
  283. */
  284. # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
  285. asm ("bswapl %0":"=r"(r):"0"(r)); \
  286. (c)+=4; (l)=r; })
  287. # define HOST_l2c(l,c) ({ unsigned int r=(l); \
  288. asm ("bswapl %0":"=r"(r):"0"(r)); \
  289. *((unsigned int *)(c))=r; (c)+=4; r; })
  290. # endif
  291. # endif
  292. # endif
  293. #endif
  294. #ifndef HOST_c2l
  295. #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
  296. l|=(((unsigned long)(*((c)++)))<<16), \
  297. l|=(((unsigned long)(*((c)++)))<< 8), \
  298. l|=(((unsigned long)(*((c)++))) ), \
  299. l)
  300. #endif
  301. #define HOST_p_c2l(c,l,n) { \
  302. switch (n) { \
  303. case 0: l =((unsigned long)(*((c)++)))<<24; \
  304. case 1: l|=((unsigned long)(*((c)++)))<<16; \
  305. case 2: l|=((unsigned long)(*((c)++)))<< 8; \
  306. case 3: l|=((unsigned long)(*((c)++))); \
  307. } }
  308. #define HOST_p_c2l_p(c,l,sc,len) { \
  309. switch (sc) { \
  310. case 0: l =((unsigned long)(*((c)++)))<<24; \
  311. if (--len == 0) break; \
  312. case 1: l|=((unsigned long)(*((c)++)))<<16; \
  313. if (--len == 0) break; \
  314. case 2: l|=((unsigned long)(*((c)++)))<< 8; \
  315. } }
  316. /* NOTE the pointer is not incremented at the end of this */
  317. #define HOST_c2l_p(c,l,n) { \
  318. l=0; (c)+=n; \
  319. switch (n) { \
  320. case 3: l =((unsigned long)(*(--(c))))<< 8; \
  321. case 2: l|=((unsigned long)(*(--(c))))<<16; \
  322. case 1: l|=((unsigned long)(*(--(c))))<<24; \
  323. } }
  324. #ifndef HOST_l2c
  325. #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
  326. *((c)++)=(unsigned char)(((l)>>16)&0xff), \
  327. *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
  328. *((c)++)=(unsigned char)(((l) )&0xff), \
  329. l)
  330. #endif
  331. #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  332. #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
  333. # ifndef B_ENDIAN
  334. /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
  335. # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
  336. # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
  337. # endif
  338. #endif
  339. #ifndef HOST_c2l
  340. #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
  341. l|=(((unsigned long)(*((c)++)))<< 8), \
  342. l|=(((unsigned long)(*((c)++)))<<16), \
  343. l|=(((unsigned long)(*((c)++)))<<24), \
  344. l)
  345. #endif
  346. #define HOST_p_c2l(c,l,n) { \
  347. switch (n) { \
  348. case 0: l =((unsigned long)(*((c)++))); \
  349. case 1: l|=((unsigned long)(*((c)++)))<< 8; \
  350. case 2: l|=((unsigned long)(*((c)++)))<<16; \
  351. case 3: l|=((unsigned long)(*((c)++)))<<24; \
  352. } }
  353. #define HOST_p_c2l_p(c,l,sc,len) { \
  354. switch (sc) { \
  355. case 0: l =((unsigned long)(*((c)++))); \
  356. if (--len == 0) break; \
  357. case 1: l|=((unsigned long)(*((c)++)))<< 8; \
  358. if (--len == 0) break; \
  359. case 2: l|=((unsigned long)(*((c)++)))<<16; \
  360. } }
  361. /* NOTE the pointer is not incremented at the end of this */
  362. #define HOST_c2l_p(c,l,n) { \
  363. l=0; (c)+=n; \
  364. switch (n) { \
  365. case 3: l =((unsigned long)(*(--(c))))<<16; \
  366. case 2: l|=((unsigned long)(*(--(c))))<< 8; \
  367. case 1: l|=((unsigned long)(*(--(c)))); \
  368. } }
  369. #ifndef HOST_l2c
  370. #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
  371. *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
  372. *((c)++)=(unsigned char)(((l)>>16)&0xff), \
  373. *((c)++)=(unsigned char)(((l)>>24)&0xff), \
  374. l)
  375. #endif
  376. #endif
  377. /*
  378. * Time for some action:-)
  379. */
  380. int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
  381. {
  382. const unsigned char *data=data_;
  383. register HASH_LONG * p;
  384. register HASH_LONG l;
  385. size_t sw,sc,ew,ec;
  386. if (len==0) return 1;
  387. l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
  388. /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
  389. * Wei Dai <weidai@eskimo.com> for pointing it out. */
  390. if (l < c->Nl) /* overflow */
  391. c->Nh++;
  392. c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */
  393. c->Nl=l;
  394. if (c->num != 0)
  395. {
  396. p=c->data;
  397. sw=c->num>>2;
  398. sc=c->num&0x03;
  399. if ((c->num+len) >= HASH_CBLOCK)
  400. {
  401. l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
  402. for (; sw<HASH_LBLOCK; sw++)
  403. {
  404. HOST_c2l(data,l); p[sw]=l;
  405. }
  406. HASH_BLOCK_HOST_ORDER (c,p,1);
  407. len-=(HASH_CBLOCK-c->num);
  408. c->num=0;
  409. /* drop through and do the rest */
  410. }
  411. else
  412. {
  413. c->num+=(unsigned int)len;
  414. if ((sc+len) < 4) /* ugly, add char's to a word */
  415. {
  416. l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
  417. }
  418. else
  419. {
  420. ew=(c->num>>2);
  421. ec=(c->num&0x03);
  422. if (sc)
  423. l=p[sw];
  424. HOST_p_c2l(data,l,sc);
  425. p[sw++]=l;
  426. for (; sw < ew; sw++)
  427. {
  428. HOST_c2l(data,l); p[sw]=l;
  429. }
  430. if (ec)
  431. {
  432. HOST_c2l_p(data,l,ec); p[sw]=l;
  433. }
  434. }
  435. return 1;
  436. }
  437. }
  438. sw=len/HASH_CBLOCK;
  439. if (sw > 0)
  440. {
  441. #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
  442. /*
  443. * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
  444. * only if sizeof(HASH_LONG)==4.
  445. */
  446. if ((((size_t)data)%4) == 0)
  447. {
  448. /* data is properly aligned so that we can cast it: */
  449. HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
  450. sw*=HASH_CBLOCK;
  451. data+=sw;
  452. len-=sw;
  453. }
  454. else
  455. #if !defined(HASH_BLOCK_DATA_ORDER)
  456. while (sw--)
  457. {
  458. memcpy (p=c->data,data,HASH_CBLOCK);
  459. HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
  460. data+=HASH_CBLOCK;
  461. len-=HASH_CBLOCK;
  462. }
  463. #endif
  464. #endif
  465. #if defined(HASH_BLOCK_DATA_ORDER)
  466. {
  467. HASH_BLOCK_DATA_ORDER(c,data,sw);
  468. sw*=HASH_CBLOCK;
  469. data+=sw;
  470. len-=sw;
  471. }
  472. #endif
  473. }
  474. if (len!=0)
  475. {
  476. p = c->data;
  477. c->num = len;
  478. ew=len>>2; /* words to copy */
  479. ec=len&0x03;
  480. for (; ew; ew--,p++)
  481. {
  482. HOST_c2l(data,l); *p=l;
  483. }
  484. HOST_c2l_p(data,l,ec);
  485. *p=l;
  486. }
  487. return 1;
  488. }
  489. void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
  490. {
  491. #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
  492. if ((((size_t)data)%4) == 0)
  493. /* data is properly aligned so that we can cast it: */
  494. HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
  495. else
  496. #if !defined(HASH_BLOCK_DATA_ORDER)
  497. {
  498. memcpy (c->data,data,HASH_CBLOCK);
  499. HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
  500. }
  501. #endif
  502. #endif
  503. #if defined(HASH_BLOCK_DATA_ORDER)
  504. HASH_BLOCK_DATA_ORDER (c,data,1);
  505. #endif
  506. }
  507. int HASH_FINAL (unsigned char *md, HASH_CTX *c)
  508. {
  509. register HASH_LONG *p;
  510. register unsigned long l;
  511. register int i,j;
  512. static const unsigned char end[4]={0x80,0x00,0x00,0x00};
  513. const unsigned char *cp=end;
  514. /* c->num should definitly have room for at least one more byte. */
  515. p=c->data;
  516. i=c->num>>2;
  517. j=c->num&0x03;
  518. #if 0
  519. /* purify often complains about the following line as an
  520. * Uninitialized Memory Read. While this can be true, the
  521. * following p_c2l macro will reset l when that case is true.
  522. * This is because j&0x03 contains the number of 'valid' bytes
  523. * already in p[i]. If and only if j&0x03 == 0, the UMR will
  524. * occur but this is also the only time p_c2l will do
  525. * l= *(cp++) instead of l|= *(cp++)
  526. * Many thanks to Alex Tang <altitude@cic.net> for pickup this
  527. * 'potential bug' */
  528. #ifdef PURIFY
  529. if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
  530. #endif
  531. l=p[i];
  532. #else
  533. l = (j==0) ? 0 : p[i];
  534. #endif
  535. HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
  536. if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
  537. {
  538. if (i<HASH_LBLOCK) p[i]=0;
  539. HASH_BLOCK_HOST_ORDER (c,p,1);
  540. i=0;
  541. }
  542. for (; i<(HASH_LBLOCK-2); i++)
  543. p[i]=0;
  544. #if defined(DATA_ORDER_IS_BIG_ENDIAN)
  545. p[HASH_LBLOCK-2]=c->Nh;
  546. p[HASH_LBLOCK-1]=c->Nl;
  547. #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  548. p[HASH_LBLOCK-2]=c->Nl;
  549. p[HASH_LBLOCK-1]=c->Nh;
  550. #endif
  551. HASH_BLOCK_HOST_ORDER (c,p,1);
  552. #ifndef HASH_MAKE_STRING
  553. #error "HASH_MAKE_STRING must be defined!"
  554. #else
  555. HASH_MAKE_STRING(c,md);
  556. #endif
  557. c->num=0;
  558. /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
  559. * but I'm not worried :-)
  560. OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
  561. */
  562. return 1;
  563. }
  564. #ifndef MD32_REG_T
  565. #define MD32_REG_T long
  566. /*
  567. * This comment was originaly written for MD5, which is why it
  568. * discusses A-D. But it basically applies to all 32-bit digests,
  569. * which is why it was moved to common header file.
  570. *
  571. * In case you wonder why A-D are declared as long and not
  572. * as MD5_LONG. Doing so results in slight performance
  573. * boost on LP64 architectures. The catch is we don't
  574. * really care if 32 MSBs of a 64-bit register get polluted
  575. * with eventual overflows as we *save* only 32 LSBs in
  576. * *either* case. Now declaring 'em long excuses the compiler
  577. * from keeping 32 MSBs zeroed resulting in 13% performance
  578. * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
  579. * Well, to be honest it should say that this *prevents*
  580. * performance degradation.
  581. * <appro@fy.chalmers.se>
  582. * Apparently there're LP64 compilers that generate better
  583. * code if A-D are declared int. Most notably GCC-x86_64
  584. * generates better code.
  585. * <appro@fy.chalmers.se>
  586. */
  587. #endif