md32_common.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /* crypto/md32_common.h */
  2. /* ====================================================================
  3. * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in
  14. * the documentation and/or other materials provided with the
  15. * distribution.
  16. *
  17. * 3. All advertising materials mentioning features or use of this
  18. * software must display the following acknowledgment:
  19. * "This product includes software developed by the OpenSSL Project
  20. * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
  21. *
  22. * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
  23. * endorse or promote products derived from this software without
  24. * prior written permission. For written permission, please contact
  25. * licensing@OpenSSL.org.
  26. *
  27. * 5. Products derived from this software may not be called "OpenSSL"
  28. * nor may "OpenSSL" appear in their names without prior written
  29. * permission of the OpenSSL Project.
  30. *
  31. * 6. Redistributions of any form whatsoever must retain the following
  32. * acknowledgment:
  33. * "This product includes software developed by the OpenSSL Project
  34. * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
  37. * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  38. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  39. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
  40. * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  42. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  43. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  44. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  45. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  46. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  47. * OF THE POSSIBILITY OF SUCH DAMAGE.
  48. * ====================================================================
  49. *
  50. * This product includes cryptographic software written by Eric Young
  51. * (eay@cryptsoft.com). This product includes software written by Tim
  52. * Hudson (tjh@cryptsoft.com).
  53. *
  54. */
  55. /*
  56. * This is a generic 32 bit "collector" for message digest algorithms.
  57. * Whenever needed it collects input character stream into chunks of
  58. * 32 bit values and invokes a block function that performs actual hash
  59. * calculations.
  60. *
  61. * Porting guide.
  62. *
  63. * Obligatory macros:
  64. *
  65. * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
  66. * this macro defines byte order of input stream.
  67. * HASH_CBLOCK
  68. * size of a unit chunk HASH_BLOCK operates on.
  69. * HASH_LONG
  70. * has to be at lest 32 bit wide, if it's wider, then
  71. * HASH_LONG_LOG2 *has to* be defined along
  72. * HASH_CTX
  73. * context structure that at least contains following
  74. * members:
  75. * typedef struct {
  76. * ...
  77. * HASH_LONG Nl,Nh;
  78. * HASH_LONG data[HASH_LBLOCK];
  79. * unsigned int num;
  80. * ...
  81. * } HASH_CTX;
  82. * HASH_UPDATE
  83. * name of "Update" function, implemented here.
  84. * HASH_TRANSFORM
  85. * name of "Transform" function, implemented here.
  86. * HASH_FINAL
  87. * name of "Final" function, implemented here.
  88. * HASH_BLOCK_HOST_ORDER
  89. * name of "block" function treating *aligned* input message
  90. * in host byte order, implemented externally.
  91. * HASH_BLOCK_DATA_ORDER
  92. * name of "block" function treating *unaligned* input message
  93. * in original (data) byte order, implemented externally (it
  94. * actually is optional if data and host are of the same
  95. * "endianess").
  96. * HASH_MAKE_STRING
  97. * macro convering context variables to an ASCII hash string.
  98. *
  99. * Optional macros:
  100. *
  101. * B_ENDIAN or L_ENDIAN
  102. * defines host byte-order.
  103. * HASH_LONG_LOG2
  104. * defaults to 2 if not states otherwise.
  105. * HASH_LBLOCK
  106. * assumed to be HASH_CBLOCK/4 if not stated otherwise.
  107. * HASH_BLOCK_DATA_ORDER_ALIGNED
  108. * alternative "block" function capable of treating
  109. * aligned input message in original (data) order,
  110. * implemented externally.
  111. *
  112. * MD5 example:
  113. *
  114. * #define DATA_ORDER_IS_LITTLE_ENDIAN
  115. *
  116. * #define HASH_LONG MD5_LONG
  117. * #define HASH_LONG_LOG2 MD5_LONG_LOG2
  118. * #define HASH_CTX MD5_CTX
  119. * #define HASH_CBLOCK MD5_CBLOCK
  120. * #define HASH_LBLOCK MD5_LBLOCK
  121. * #define HASH_UPDATE MD5_Update
  122. * #define HASH_TRANSFORM MD5_Transform
  123. * #define HASH_FINAL MD5_Final
  124. * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
  125. * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
  126. *
  127. * <appro@fy.chalmers.se>
  128. */
  129. #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  130. #error "DATA_ORDER must be defined!"
  131. #endif
  132. #ifndef HASH_CBLOCK
  133. #error "HASH_CBLOCK must be defined!"
  134. #endif
  135. #ifndef HASH_LONG
  136. #error "HASH_LONG must be defined!"
  137. #endif
  138. #ifndef HASH_CTX
  139. #error "HASH_CTX must be defined!"
  140. #endif
  141. #ifndef HASH_UPDATE
  142. #error "HASH_UPDATE must be defined!"
  143. #endif
  144. #ifndef HASH_TRANSFORM
  145. #error "HASH_TRANSFORM must be defined!"
  146. #endif
  147. #ifndef HASH_FINAL
  148. #error "HASH_FINAL must be defined!"
  149. #endif
  150. #ifndef HASH_BLOCK_HOST_ORDER
  151. #error "HASH_BLOCK_HOST_ORDER must be defined!"
  152. #endif
  153. #if 0
  154. /*
  155. * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
  156. * isn't defined.
  157. */
  158. #ifndef HASH_BLOCK_DATA_ORDER
  159. #error "HASH_BLOCK_DATA_ORDER must be defined!"
  160. #endif
  161. #endif
  162. #ifndef HASH_LBLOCK
  163. #define HASH_LBLOCK (HASH_CBLOCK/4)
  164. #endif
  165. #ifndef HASH_LONG_LOG2
  166. #define HASH_LONG_LOG2 2
  167. #endif
  168. /*
  169. * Engage compiler specific rotate intrinsic function if available.
  170. */
  171. #undef ROTATE
  172. #ifndef PEDANTIC
  173. # if defined(_MSC_VER) || defined(__ICC)
  174. # define ROTATE(a,n) _lrotl(a,n)
  175. # elif defined(__MWERKS__)
  176. # if defined(__POWERPC__)
  177. # define ROTATE(a,n) __rlwinm(a,n,0,31)
  178. # elif defined(__MC68K__)
  179. /* Motorola specific tweak. <appro@fy.chalmers.se> */
  180. # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
  181. # else
  182. # define ROTATE(a,n) __rol(a,n)
  183. # endif
  184. # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
  185. /*
  186. * Some GNU C inline assembler templates. Note that these are
  187. * rotates by *constant* number of bits! But that's exactly
  188. * what we need here...
  189. * <appro@fy.chalmers.se>
  190. */
  191. # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
  192. # define ROTATE(a,n) ({ register unsigned int ret; \
  193. asm ( \
  194. "roll %1,%0" \
  195. : "=r"(ret) \
  196. : "I"(n), "0"(a) \
  197. : "cc"); \
  198. ret; \
  199. })
  200. # elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
  201. # define ROTATE(a,n) ({ register unsigned int ret; \
  202. asm ( \
  203. "rlwinm %0,%1,%2,0,31" \
  204. : "=r"(ret) \
  205. : "r"(a), "I"(n)); \
  206. ret; \
  207. })
  208. # endif
  209. # endif
  210. #endif /* PEDANTIC */
  211. #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
  212. /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
  213. #ifdef ROTATE
  214. /* 5 instructions with rotate instruction, else 9 */
  215. #define REVERSE_FETCH32(a,l) ( \
  216. l=*(const HASH_LONG *)(a), \
  217. ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
  218. )
  219. #else
  220. /* 6 instructions with rotate instruction, else 8 */
  221. #define REVERSE_FETCH32(a,l) ( \
  222. l=*(const HASH_LONG *)(a), \
  223. l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
  224. ROTATE(l,16) \
  225. )
  226. /*
  227. * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
  228. * It's rewritten as above for two reasons:
  229. * - RISCs aren't good at long constants and have to explicitely
  230. * compose 'em with several (well, usually 2) instructions in a
  231. * register before performing the actual operation and (as you
  232. * already realized:-) having same constant should inspire the
  233. * compiler to permanently allocate the only register for it;
  234. * - most modern CPUs have two ALUs, but usually only one has
  235. * circuitry for shifts:-( this minor tweak inspires compiler
  236. * to schedule shift instructions in a better way...
  237. *
  238. * <appro@fy.chalmers.se>
  239. */
  240. #endif
  241. #endif
  242. #ifndef ROTATE
  243. #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
  244. #endif
  245. /*
  246. * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
  247. * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
  248. * and host are of the same "endianess". It's possible to mask
  249. * this with blank #define HASH_BLOCK_DATA_ORDER though...
  250. *
  251. * <appro@fy.chalmers.se>
  252. */
  253. #if defined(B_ENDIAN)
  254. # if defined(DATA_ORDER_IS_BIG_ENDIAN)
  255. # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
  256. # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
  257. # endif
  258. # endif
  259. #elif defined(L_ENDIAN)
  260. # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  261. # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
  262. # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
  263. # endif
  264. # endif
  265. #endif
  266. #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
  267. #ifndef HASH_BLOCK_DATA_ORDER
  268. #error "HASH_BLOCK_DATA_ORDER must be defined!"
  269. #endif
  270. #endif
  271. #if defined(DATA_ORDER_IS_BIG_ENDIAN)
  272. #ifndef PEDANTIC
  273. # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
  274. # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
  275. (defined(__x86_64) || defined(__x86_64__))
  276. # if !defined(B_ENDIAN)
  277. /*
  278. * This gives ~30-40% performance improvement in SHA-256 compiled
  279. * with gcc [on P4]. Well, first macro to be frank. We can pull
  280. * this trick on x86* platforms only, because these CPUs can fetch
  281. * unaligned data without raising an exception.
  282. */
  283. # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
  284. asm ("bswapl %0":"=r"(r):"0"(r)); \
  285. (c)+=4; (l)=r; })
  286. # define HOST_l2c(l,c) ({ unsigned int r=(l); \
  287. asm ("bswapl %0":"=r"(r):"0"(r)); \
  288. *((unsigned int *)(c))=r; (c)+=4; r; })
  289. # endif
  290. # endif
  291. # endif
  292. #endif
  293. #ifndef HOST_c2l
  294. #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
  295. l|=(((unsigned long)(*((c)++)))<<16), \
  296. l|=(((unsigned long)(*((c)++)))<< 8), \
  297. l|=(((unsigned long)(*((c)++))) ), \
  298. l)
  299. #endif
  300. #define HOST_p_c2l(c,l,n) { \
  301. switch (n) { \
  302. case 0: l =((unsigned long)(*((c)++)))<<24; \
  303. case 1: l|=((unsigned long)(*((c)++)))<<16; \
  304. case 2: l|=((unsigned long)(*((c)++)))<< 8; \
  305. case 3: l|=((unsigned long)(*((c)++))); \
  306. } }
  307. #define HOST_p_c2l_p(c,l,sc,len) { \
  308. switch (sc) { \
  309. case 0: l =((unsigned long)(*((c)++)))<<24; \
  310. if (--len == 0) break; \
  311. case 1: l|=((unsigned long)(*((c)++)))<<16; \
  312. if (--len == 0) break; \
  313. case 2: l|=((unsigned long)(*((c)++)))<< 8; \
  314. } }
  315. /* NOTE the pointer is not incremented at the end of this */
  316. #define HOST_c2l_p(c,l,n) { \
  317. l=0; (c)+=n; \
  318. switch (n) { \
  319. case 3: l =((unsigned long)(*(--(c))))<< 8; \
  320. case 2: l|=((unsigned long)(*(--(c))))<<16; \
  321. case 1: l|=((unsigned long)(*(--(c))))<<24; \
  322. } }
  323. #ifndef HOST_l2c
  324. #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
  325. *((c)++)=(unsigned char)(((l)>>16)&0xff), \
  326. *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
  327. *((c)++)=(unsigned char)(((l) )&0xff), \
  328. l)
  329. #endif
  330. #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  331. #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
  332. # ifndef B_ENDIAN
  333. /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
  334. # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
  335. # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
  336. # endif
  337. #endif
  338. #ifndef HOST_c2l
  339. #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
  340. l|=(((unsigned long)(*((c)++)))<< 8), \
  341. l|=(((unsigned long)(*((c)++)))<<16), \
  342. l|=(((unsigned long)(*((c)++)))<<24), \
  343. l)
  344. #endif
  345. #define HOST_p_c2l(c,l,n) { \
  346. switch (n) { \
  347. case 0: l =((unsigned long)(*((c)++))); \
  348. case 1: l|=((unsigned long)(*((c)++)))<< 8; \
  349. case 2: l|=((unsigned long)(*((c)++)))<<16; \
  350. case 3: l|=((unsigned long)(*((c)++)))<<24; \
  351. } }
  352. #define HOST_p_c2l_p(c,l,sc,len) { \
  353. switch (sc) { \
  354. case 0: l =((unsigned long)(*((c)++))); \
  355. if (--len == 0) break; \
  356. case 1: l|=((unsigned long)(*((c)++)))<< 8; \
  357. if (--len == 0) break; \
  358. case 2: l|=((unsigned long)(*((c)++)))<<16; \
  359. } }
  360. /* NOTE the pointer is not incremented at the end of this */
  361. #define HOST_c2l_p(c,l,n) { \
  362. l=0; (c)+=n; \
  363. switch (n) { \
  364. case 3: l =((unsigned long)(*(--(c))))<<16; \
  365. case 2: l|=((unsigned long)(*(--(c))))<< 8; \
  366. case 1: l|=((unsigned long)(*(--(c)))); \
  367. } }
  368. #ifndef HOST_l2c
  369. #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
  370. *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
  371. *((c)++)=(unsigned char)(((l)>>16)&0xff), \
  372. *((c)++)=(unsigned char)(((l)>>24)&0xff), \
  373. l)
  374. #endif
  375. #endif
  376. /*
  377. * Time for some action:-)
  378. */
  379. int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
  380. {
  381. const unsigned char *data=data_;
  382. register HASH_LONG * p;
  383. register HASH_LONG l;
  384. size_t sw,sc,ew,ec;
  385. if (len==0) return 1;
  386. l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
  387. /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
  388. * Wei Dai <weidai@eskimo.com> for pointing it out. */
  389. if (l < c->Nl) /* overflow */
  390. c->Nh++;
  391. c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */
  392. c->Nl=l;
  393. if (c->num != 0)
  394. {
  395. p=c->data;
  396. sw=c->num>>2;
  397. sc=c->num&0x03;
  398. if ((c->num+len) >= HASH_CBLOCK)
  399. {
  400. l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
  401. for (; sw<HASH_LBLOCK; sw++)
  402. {
  403. HOST_c2l(data,l); p[sw]=l;
  404. }
  405. HASH_BLOCK_HOST_ORDER (c,p,1);
  406. len-=(HASH_CBLOCK-c->num);
  407. c->num=0;
  408. /* drop through and do the rest */
  409. }
  410. else
  411. {
  412. c->num+=(unsigned int)len;
  413. if ((sc+len) < 4) /* ugly, add char's to a word */
  414. {
  415. l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
  416. }
  417. else
  418. {
  419. ew=(c->num>>2);
  420. ec=(c->num&0x03);
  421. if (sc)
  422. l=p[sw];
  423. HOST_p_c2l(data,l,sc);
  424. p[sw++]=l;
  425. for (; sw < ew; sw++)
  426. {
  427. HOST_c2l(data,l); p[sw]=l;
  428. }
  429. if (ec)
  430. {
  431. HOST_c2l_p(data,l,ec); p[sw]=l;
  432. }
  433. }
  434. return 1;
  435. }
  436. }
  437. sw=len/HASH_CBLOCK;
  438. if (sw > 0)
  439. {
  440. #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
  441. /*
  442. * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
  443. * only if sizeof(HASH_LONG)==4.
  444. */
  445. if ((((size_t)data)%4) == 0)
  446. {
  447. /* data is properly aligned so that we can cast it: */
  448. HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
  449. sw*=HASH_CBLOCK;
  450. data+=sw;
  451. len-=sw;
  452. }
  453. else
  454. #if !defined(HASH_BLOCK_DATA_ORDER)
  455. while (sw--)
  456. {
  457. memcpy (p=c->data,data,HASH_CBLOCK);
  458. HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
  459. data+=HASH_CBLOCK;
  460. len-=HASH_CBLOCK;
  461. }
  462. #endif
  463. #endif
  464. #if defined(HASH_BLOCK_DATA_ORDER)
  465. {
  466. HASH_BLOCK_DATA_ORDER(c,data,sw);
  467. sw*=HASH_CBLOCK;
  468. data+=sw;
  469. len-=sw;
  470. }
  471. #endif
  472. }
  473. if (len!=0)
  474. {
  475. p = c->data;
  476. c->num = len;
  477. ew=len>>2; /* words to copy */
  478. ec=len&0x03;
  479. for (; ew; ew--,p++)
  480. {
  481. HOST_c2l(data,l); *p=l;
  482. }
  483. HOST_c2l_p(data,l,ec);
  484. *p=l;
  485. }
  486. return 1;
  487. }
  488. void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
  489. {
  490. #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
  491. if ((((size_t)data)%4) == 0)
  492. /* data is properly aligned so that we can cast it: */
  493. HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
  494. else
  495. #if !defined(HASH_BLOCK_DATA_ORDER)
  496. {
  497. memcpy (c->data,data,HASH_CBLOCK);
  498. HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
  499. }
  500. #endif
  501. #endif
  502. #if defined(HASH_BLOCK_DATA_ORDER)
  503. HASH_BLOCK_DATA_ORDER (c,data,1);
  504. #endif
  505. }
  506. int HASH_FINAL (unsigned char *md, HASH_CTX *c)
  507. {
  508. register HASH_LONG *p;
  509. register unsigned long l;
  510. register int i,j;
  511. static const unsigned char end[4]={0x80,0x00,0x00,0x00};
  512. const unsigned char *cp=end;
  513. /* c->num should definitly have room for at least one more byte. */
  514. p=c->data;
  515. i=c->num>>2;
  516. j=c->num&0x03;
  517. #if 0
  518. /* purify often complains about the following line as an
  519. * Uninitialized Memory Read. While this can be true, the
  520. * following p_c2l macro will reset l when that case is true.
  521. * This is because j&0x03 contains the number of 'valid' bytes
  522. * already in p[i]. If and only if j&0x03 == 0, the UMR will
  523. * occur but this is also the only time p_c2l will do
  524. * l= *(cp++) instead of l|= *(cp++)
  525. * Many thanks to Alex Tang <altitude@cic.net> for pickup this
  526. * 'potential bug' */
  527. #ifdef PURIFY
  528. if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
  529. #endif
  530. l=p[i];
  531. #else
  532. l = (j==0) ? 0 : p[i];
  533. #endif
  534. HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
  535. if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
  536. {
  537. if (i<HASH_LBLOCK) p[i]=0;
  538. HASH_BLOCK_HOST_ORDER (c,p,1);
  539. i=0;
  540. }
  541. for (; i<(HASH_LBLOCK-2); i++)
  542. p[i]=0;
  543. #if defined(DATA_ORDER_IS_BIG_ENDIAN)
  544. p[HASH_LBLOCK-2]=c->Nh;
  545. p[HASH_LBLOCK-1]=c->Nl;
  546. #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
  547. p[HASH_LBLOCK-2]=c->Nl;
  548. p[HASH_LBLOCK-1]=c->Nh;
  549. #endif
  550. HASH_BLOCK_HOST_ORDER (c,p,1);
  551. #ifndef HASH_MAKE_STRING
  552. #error "HASH_MAKE_STRING must be defined!"
  553. #else
  554. HASH_MAKE_STRING(c,md);
  555. #endif
  556. c->num=0;
  557. /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
  558. * but I'm not worried :-)
  559. OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
  560. */
  561. return 1;
  562. }
  563. #ifndef MD32_REG_T
  564. #define MD32_REG_T long
  565. /*
  566. * This comment was originaly written for MD5, which is why it
  567. * discusses A-D. But it basically applies to all 32-bit digests,
  568. * which is why it was moved to common header file.
  569. *
  570. * In case you wonder why A-D are declared as long and not
  571. * as MD5_LONG. Doing so results in slight performance
  572. * boost on LP64 architectures. The catch is we don't
  573. * really care if 32 MSBs of a 64-bit register get polluted
  574. * with eventual overflows as we *save* only 32 LSBs in
  575. * *either* case. Now declaring 'em long excuses the compiler
  576. * from keeping 32 MSBs zeroed resulting in 13% performance
  577. * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
  578. * Well, to be honest it should say that this *prevents*
  579. * performance degradation.
  580. * <appro@fy.chalmers.se>
  581. * Apparently there're LP64 compilers that generate better
  582. * code if A-D are declared int. Most notably GCC-x86_64
  583. * generates better code.
  584. * <appro@fy.chalmers.se>
  585. */
  586. #endif