1 |
matty |
32 |
/* crypto/md32_common.h */ |
2 |
|
|
/* ==================================================================== |
3 |
|
|
* Copyright (c) 1999 The OpenSSL Project. All rights reserved. |
4 |
|
|
* |
5 |
|
|
* Redistribution and use in source and binary forms, with or without |
6 |
|
|
* modification, are permitted provided that the following conditions |
7 |
|
|
* are met: |
8 |
|
|
* |
9 |
|
|
* 1. Redistributions of source code must retain the above copyright |
10 |
|
|
* notice, this list of conditions and the following disclaimer. |
11 |
|
|
* |
12 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
|
* notice, this list of conditions and the following disclaimer in |
14 |
|
|
* the documentation and/or other materials provided with the |
15 |
|
|
* distribution. |
16 |
|
|
* |
17 |
|
|
* 3. All advertising materials mentioning features or use of this |
18 |
|
|
* software must display the following acknowledgment: |
19 |
|
|
* "This product includes software developed by the OpenSSL Project |
20 |
|
|
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" |
21 |
|
|
* |
22 |
|
|
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to |
23 |
|
|
* endorse or promote products derived from this software without |
24 |
|
|
* prior written permission. For written permission, please contact |
25 |
|
|
* licensing@OpenSSL.org. |
26 |
|
|
* |
27 |
|
|
* 5. Products derived from this software may not be called "OpenSSL" |
28 |
|
|
* nor may "OpenSSL" appear in their names without prior written |
29 |
|
|
* permission of the OpenSSL Project. |
30 |
|
|
* |
31 |
|
|
* 6. Redistributions of any form whatsoever must retain the following |
32 |
|
|
* acknowledgment: |
33 |
|
|
* "This product includes software developed by the OpenSSL Project |
34 |
|
|
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" |
35 |
|
|
* |
36 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY |
37 |
|
|
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
38 |
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
39 |
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR |
40 |
|
|
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
41 |
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
42 |
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
43 |
|
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
44 |
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
45 |
|
|
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
46 |
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
47 |
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE. |
48 |
|
|
* ==================================================================== |
49 |
|
|
* |
50 |
|
|
* This product includes cryptographic software written by Eric Young |
51 |
|
|
* (eay@cryptsoft.com). This product includes software written by Tim |
52 |
|
|
* Hudson (tjh@cryptsoft.com). |
53 |
|
|
* |
54 |
|
|
*/ |
55 |
|
|
|
56 |
|
|
/* |
57 |
|
|
* This is a generic 32 bit "collector" for message digest algorithms. |
58 |
|
|
* Whenever needed it collects input character stream into chunks of |
59 |
|
|
* 32 bit values and invokes a block function that performs actual hash |
60 |
|
|
* calculations. |
61 |
|
|
* |
62 |
|
|
* Porting guide. |
63 |
|
|
* |
64 |
|
|
* Obligatory macros: |
65 |
|
|
* |
66 |
|
|
* DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN |
67 |
|
|
* this macro defines byte order of input stream. |
68 |
|
|
* HASH_CBLOCK |
69 |
|
|
* size of a unit chunk HASH_BLOCK operates on. |
70 |
|
|
* HASH_LONG |
71 |
|
|
* has to be at lest 32 bit wide, if it's wider, then |
72 |
|
|
* HASH_LONG_LOG2 *has to* be defined along |
73 |
|
|
* HASH_CTX |
74 |
|
|
* context structure that at least contains following |
75 |
|
|
* members: |
76 |
|
|
* typedef struct { |
77 |
|
|
* ... |
78 |
|
|
* HASH_LONG Nl,Nh; |
79 |
|
|
* HASH_LONG data[HASH_LBLOCK]; |
80 |
|
|
* int num; |
81 |
|
|
* ... |
82 |
|
|
* } HASH_CTX; |
83 |
|
|
* HASH_UPDATE |
84 |
|
|
* name of "Update" function, implemented here. |
85 |
|
|
* HASH_TRANSFORM |
86 |
|
|
* name of "Transform" function, implemented here. |
87 |
|
|
* HASH_FINAL |
88 |
|
|
* name of "Final" function, implemented here. |
89 |
|
|
* HASH_BLOCK_HOST_ORDER |
90 |
|
|
* name of "block" function treating *aligned* input message |
91 |
|
|
* in host byte order, implemented externally. |
92 |
|
|
* HASH_BLOCK_DATA_ORDER |
93 |
|
|
* name of "block" function treating *unaligned* input message |
94 |
|
|
* in original (data) byte order, implemented externally (it |
95 |
|
|
* actually is optional if data and host are of the same |
96 |
|
|
* "endianess"). |
97 |
|
|
* HASH_MAKE_STRING |
98 |
|
|
* macro convering context variables to an ASCII hash string. |
99 |
|
|
* |
100 |
|
|
* Optional macros: |
101 |
|
|
* |
102 |
|
|
* B_ENDIAN or L_ENDIAN |
103 |
|
|
* defines host byte-order. |
104 |
|
|
* HASH_LONG_LOG2 |
105 |
|
|
* defaults to 2 if not states otherwise. |
106 |
|
|
* HASH_LBLOCK |
107 |
|
|
* assumed to be HASH_CBLOCK/4 if not stated otherwise. |
108 |
|
|
* HASH_BLOCK_DATA_ORDER_ALIGNED |
109 |
|
|
* alternative "block" function capable of treating |
110 |
|
|
* aligned input message in original (data) order, |
111 |
|
|
* implemented externally. |
112 |
|
|
* |
113 |
|
|
* MD5 example: |
114 |
|
|
* |
115 |
|
|
* #define DATA_ORDER_IS_LITTLE_ENDIAN |
116 |
|
|
* |
117 |
|
|
* #define HASH_LONG MD5_LONG |
118 |
|
|
* #define HASH_LONG_LOG2 MD5_LONG_LOG2 |
119 |
|
|
* #define HASH_CTX MD5_CTX |
120 |
|
|
* #define HASH_CBLOCK MD5_CBLOCK |
121 |
|
|
* #define HASH_LBLOCK MD5_LBLOCK |
122 |
|
|
* #define HASH_UPDATE MD5_Update |
123 |
|
|
* #define HASH_TRANSFORM MD5_Transform |
124 |
|
|
* #define HASH_FINAL MD5_Final |
125 |
|
|
* #define HASH_BLOCK_HOST_ORDER md5_block_host_order |
126 |
|
|
* #define HASH_BLOCK_DATA_ORDER md5_block_data_order |
127 |
|
|
* |
128 |
|
|
* <appro@fy.chalmers.se> |
129 |
|
|
*/ |
130 |
|
|
|
131 |
|
|
#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
132 |
|
|
#error "DATA_ORDER must be defined!" |
133 |
|
|
#endif |
134 |
|
|
|
135 |
|
|
#ifndef HASH_CBLOCK |
136 |
|
|
#error "HASH_CBLOCK must be defined!" |
137 |
|
|
#endif |
138 |
|
|
#ifndef HASH_LONG |
139 |
|
|
#error "HASH_LONG must be defined!" |
140 |
|
|
#endif |
141 |
|
|
#ifndef HASH_CTX |
142 |
|
|
#error "HASH_CTX must be defined!" |
143 |
|
|
#endif |
144 |
|
|
|
145 |
|
|
#ifndef HASH_UPDATE |
146 |
|
|
#error "HASH_UPDATE must be defined!" |
147 |
|
|
#endif |
148 |
|
|
#ifndef HASH_TRANSFORM |
149 |
|
|
#error "HASH_TRANSFORM must be defined!" |
150 |
|
|
#endif |
151 |
|
|
#ifndef HASH_FINAL |
152 |
|
|
#error "HASH_FINAL must be defined!" |
153 |
|
|
#endif |
154 |
|
|
|
155 |
|
|
#ifndef HASH_BLOCK_HOST_ORDER |
156 |
|
|
#error "HASH_BLOCK_HOST_ORDER must be defined!" |
157 |
|
|
#endif |
158 |
|
|
|
159 |
|
|
#if 0 |
160 |
|
|
/* |
161 |
|
|
* Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED |
162 |
|
|
* isn't defined. |
163 |
|
|
*/ |
164 |
|
|
#ifndef HASH_BLOCK_DATA_ORDER |
165 |
|
|
#error "HASH_BLOCK_DATA_ORDER must be defined!" |
166 |
|
|
#endif |
167 |
|
|
#endif |
168 |
|
|
|
169 |
|
|
#ifndef HASH_LBLOCK |
170 |
|
|
#define HASH_LBLOCK (HASH_CBLOCK/4) |
171 |
|
|
#endif |
172 |
|
|
|
173 |
|
|
#ifndef HASH_LONG_LOG2 |
174 |
|
|
#define HASH_LONG_LOG2 2 |
175 |
|
|
#endif |
176 |
|
|
|
177 |
|
|
/* |
178 |
|
|
* Engage compiler specific rotate intrinsic function if available. |
179 |
|
|
*/ |
180 |
|
|
#undef ROTATE |
181 |
|
|
#ifndef PEDANTIC |
182 |
|
|
# if defined(_MSC_VER) |
183 |
|
|
# define ROTATE(a,n) _lrotl(a,n) |
184 |
|
|
# elif defined(__MWERKS__) |
185 |
|
|
# if defined(__POWERPC__) |
186 |
|
|
# define ROTATE(a,n) __rlwinm(a,n,0,31) |
187 |
|
|
# elif defined(__MC68K__) |
188 |
|
|
/* Motorola specific tweak. <appro@fy.chalmers.se> */ |
189 |
|
|
# define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) ) |
190 |
|
|
# else |
191 |
|
|
# define ROTATE(a,n) __rol(a,n) |
192 |
|
|
# endif |
193 |
|
|
# elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM) |
194 |
|
|
/* |
195 |
|
|
* Some GNU C inline assembler templates. Note that these are |
196 |
|
|
* rotates by *constant* number of bits! But that's exactly |
197 |
|
|
* what we need here... |
198 |
|
|
* |
199 |
|
|
* <appro@fy.chalmers.se> |
200 |
|
|
*/ |
201 |
matthewc |
195 |
# if defined(__i386) || defined(__i386__) |
202 |
matty |
32 |
# define ROTATE(a,n) ({ register unsigned int ret; \ |
203 |
|
|
asm ( \ |
204 |
|
|
"roll %1,%0" \ |
205 |
|
|
: "=r"(ret) \ |
206 |
|
|
: "I"(n), "0"(a) \ |
207 |
|
|
: "cc"); \ |
208 |
|
|
ret; \ |
209 |
|
|
}) |
210 |
|
|
# elif defined(__powerpc) || defined(__ppc) |
211 |
|
|
# define ROTATE(a,n) ({ register unsigned int ret; \ |
212 |
|
|
asm ( \ |
213 |
|
|
"rlwinm %0,%1,%2,0,31" \ |
214 |
|
|
: "=r"(ret) \ |
215 |
|
|
: "r"(a), "I"(n)); \ |
216 |
|
|
ret; \ |
217 |
|
|
}) |
218 |
|
|
# endif |
219 |
|
|
# endif |
220 |
|
|
|
221 |
|
|
/* |
222 |
|
|
* Engage compiler specific "fetch in reverse byte order" |
223 |
|
|
* intrinsic function if available. |
224 |
|
|
*/ |
225 |
|
|
# if defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM) |
226 |
|
|
/* some GNU C inline assembler templates by <appro@fy.chalmers.se> */ |
227 |
matthewc |
195 |
# if (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY) |
228 |
matty |
32 |
# define BE_FETCH32(a) ({ register unsigned int l=(a);\ |
229 |
|
|
asm ( \ |
230 |
|
|
"bswapl %0" \ |
231 |
|
|
: "=r"(l) : "0"(l)); \ |
232 |
|
|
l; \ |
233 |
|
|
}) |
234 |
|
|
# elif defined(__powerpc) |
235 |
|
|
# define LE_FETCH32(a) ({ register unsigned int l; \ |
236 |
|
|
asm ( \ |
237 |
|
|
"lwbrx %0,0,%1" \ |
238 |
|
|
: "=r"(l) \ |
239 |
|
|
: "r"(a)); \ |
240 |
|
|
l; \ |
241 |
|
|
}) |
242 |
|
|
|
243 |
|
|
# elif defined(__sparc) && defined(ULTRASPARC) |
244 |
|
|
# define LE_FETCH32(a) ({ register unsigned int l; \ |
245 |
|
|
asm ( \ |
246 |
|
|
"lda [%1]#ASI_PRIMARY_LITTLE,%0"\ |
247 |
|
|
: "=r"(l) \ |
248 |
|
|
: "r"(a)); \ |
249 |
|
|
l; \ |
250 |
|
|
}) |
251 |
|
|
# endif |
252 |
|
|
# endif |
253 |
|
|
#endif /* PEDANTIC */ |
254 |
|
|
|
255 |
|
|
#if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ |
256 |
|
|
/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ |
257 |
|
|
#ifdef ROTATE |
258 |
|
|
/* 5 instructions with rotate instruction, else 9 */ |
259 |
|
|
#define REVERSE_FETCH32(a,l) ( \ |
260 |
|
|
l=*(const HASH_LONG *)(a), \ |
261 |
|
|
((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \ |
262 |
|
|
) |
263 |
|
|
#else |
264 |
|
|
/* 6 instructions with rotate instruction, else 8 */ |
265 |
|
|
#define REVERSE_FETCH32(a,l) ( \ |
266 |
|
|
l=*(const HASH_LONG *)(a), \ |
267 |
|
|
l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \ |
268 |
|
|
ROTATE(l,16) \ |
269 |
|
|
) |
270 |
|
|
/* |
271 |
|
|
* Originally the middle line started with l=(((l&0xFF00FF00)>>8)|... |
272 |
|
|
* It's rewritten as above for two reasons: |
273 |
|
|
* - RISCs aren't good at long constants and have to explicitely |
274 |
|
|
* compose 'em with several (well, usually 2) instructions in a |
275 |
|
|
* register before performing the actual operation and (as you |
276 |
|
|
* already realized:-) having same constant should inspire the |
277 |
|
|
* compiler to permanently allocate the only register for it; |
278 |
|
|
* - most modern CPUs have two ALUs, but usually only one has |
279 |
|
|
* circuitry for shifts:-( this minor tweak inspires compiler |
280 |
|
|
* to schedule shift instructions in a better way... |
281 |
|
|
* |
282 |
|
|
* <appro@fy.chalmers.se> |
283 |
|
|
*/ |
284 |
|
|
#endif |
285 |
|
|
#endif |
286 |
|
|
|
287 |
|
|
#ifndef ROTATE |
288 |
|
|
#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) |
289 |
|
|
#endif |
290 |
|
|
|
291 |
|
|
/* |
292 |
|
|
* Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED |
293 |
|
|
* and HASH_BLOCK_HOST_ORDER ought to be the same if input data |
294 |
|
|
* and host are of the same "endianess". It's possible to mask |
295 |
|
|
* this with blank #define HASH_BLOCK_DATA_ORDER though... |
296 |
|
|
* |
297 |
|
|
* <appro@fy.chalmers.se> |
298 |
|
|
*/ |
299 |
|
|
#if defined(B_ENDIAN) |
300 |
|
|
# if defined(DATA_ORDER_IS_BIG_ENDIAN) |
301 |
|
|
# if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 |
302 |
|
|
# define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER |
303 |
|
|
# endif |
304 |
|
|
# elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
305 |
|
|
# ifndef HOST_FETCH32 |
306 |
|
|
# ifdef LE_FETCH32 |
307 |
|
|
# define HOST_FETCH32(p,l) LE_FETCH32(p) |
308 |
|
|
# elif defined(REVERSE_FETCH32) |
309 |
|
|
# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) |
310 |
|
|
# endif |
311 |
|
|
# endif |
312 |
|
|
# endif |
313 |
|
|
#elif defined(L_ENDIAN) |
314 |
|
|
# if defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
315 |
|
|
# if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 |
316 |
|
|
# define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER |
317 |
|
|
# endif |
318 |
|
|
# elif defined(DATA_ORDER_IS_BIG_ENDIAN) |
319 |
|
|
# ifndef HOST_FETCH32 |
320 |
|
|
# ifdef BE_FETCH32 |
321 |
|
|
# define HOST_FETCH32(p,l) BE_FETCH32(p) |
322 |
|
|
# elif defined(REVERSE_FETCH32) |
323 |
|
|
# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) |
324 |
|
|
# endif |
325 |
|
|
# endif |
326 |
|
|
# endif |
327 |
|
|
#endif |
328 |
|
|
|
329 |
|
|
#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
330 |
|
|
#ifndef HASH_BLOCK_DATA_ORDER |
331 |
|
|
#error "HASH_BLOCK_DATA_ORDER must be defined!" |
332 |
|
|
#endif |
333 |
|
|
#endif |
334 |
|
|
|
335 |
|
|
#if defined(DATA_ORDER_IS_BIG_ENDIAN) |
336 |
|
|
|
337 |
|
|
#define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ |
338 |
|
|
l|=(((unsigned long)(*((c)++)))<<16), \ |
339 |
|
|
l|=(((unsigned long)(*((c)++)))<< 8), \ |
340 |
|
|
l|=(((unsigned long)(*((c)++))) ), \ |
341 |
|
|
l) |
342 |
|
|
#define HOST_p_c2l(c,l,n) { \ |
343 |
|
|
switch (n) { \ |
344 |
|
|
case 0: l =((unsigned long)(*((c)++)))<<24; \ |
345 |
|
|
case 1: l|=((unsigned long)(*((c)++)))<<16; \ |
346 |
|
|
case 2: l|=((unsigned long)(*((c)++)))<< 8; \ |
347 |
|
|
case 3: l|=((unsigned long)(*((c)++))); \ |
348 |
|
|
} } |
349 |
|
|
#define HOST_p_c2l_p(c,l,sc,len) { \ |
350 |
|
|
switch (sc) { \ |
351 |
|
|
case 0: l =((unsigned long)(*((c)++)))<<24; \ |
352 |
|
|
if (--len == 0) break; \ |
353 |
|
|
case 1: l|=((unsigned long)(*((c)++)))<<16; \ |
354 |
|
|
if (--len == 0) break; \ |
355 |
|
|
case 2: l|=((unsigned long)(*((c)++)))<< 8; \ |
356 |
|
|
} } |
357 |
|
|
/* NOTE the pointer is not incremented at the end of this */ |
358 |
|
|
#define HOST_c2l_p(c,l,n) { \ |
359 |
|
|
l=0; (c)+=n; \ |
360 |
|
|
switch (n) { \ |
361 |
|
|
case 3: l =((unsigned long)(*(--(c))))<< 8; \ |
362 |
|
|
case 2: l|=((unsigned long)(*(--(c))))<<16; \ |
363 |
|
|
case 1: l|=((unsigned long)(*(--(c))))<<24; \ |
364 |
|
|
} } |
365 |
|
|
#define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ |
366 |
|
|
*((c)++)=(unsigned char)(((l)>>16)&0xff), \ |
367 |
|
|
*((c)++)=(unsigned char)(((l)>> 8)&0xff), \ |
368 |
|
|
*((c)++)=(unsigned char)(((l) )&0xff), \ |
369 |
|
|
l) |
370 |
|
|
|
371 |
|
|
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
372 |
|
|
|
373 |
|
|
#define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ |
374 |
|
|
l|=(((unsigned long)(*((c)++)))<< 8), \ |
375 |
|
|
l|=(((unsigned long)(*((c)++)))<<16), \ |
376 |
|
|
l|=(((unsigned long)(*((c)++)))<<24), \ |
377 |
|
|
l) |
378 |
|
|
#define HOST_p_c2l(c,l,n) { \ |
379 |
|
|
switch (n) { \ |
380 |
|
|
case 0: l =((unsigned long)(*((c)++))); \ |
381 |
|
|
case 1: l|=((unsigned long)(*((c)++)))<< 8; \ |
382 |
|
|
case 2: l|=((unsigned long)(*((c)++)))<<16; \ |
383 |
|
|
case 3: l|=((unsigned long)(*((c)++)))<<24; \ |
384 |
|
|
} } |
385 |
|
|
#define HOST_p_c2l_p(c,l,sc,len) { \ |
386 |
|
|
switch (sc) { \ |
387 |
|
|
case 0: l =((unsigned long)(*((c)++))); \ |
388 |
|
|
if (--len == 0) break; \ |
389 |
|
|
case 1: l|=((unsigned long)(*((c)++)))<< 8; \ |
390 |
|
|
if (--len == 0) break; \ |
391 |
|
|
case 2: l|=((unsigned long)(*((c)++)))<<16; \ |
392 |
|
|
} } |
393 |
|
|
/* NOTE the pointer is not incremented at the end of this */ |
394 |
|
|
#define HOST_c2l_p(c,l,n) { \ |
395 |
|
|
l=0; (c)+=n; \ |
396 |
|
|
switch (n) { \ |
397 |
|
|
case 3: l =((unsigned long)(*(--(c))))<<16; \ |
398 |
|
|
case 2: l|=((unsigned long)(*(--(c))))<< 8; \ |
399 |
|
|
case 1: l|=((unsigned long)(*(--(c)))); \ |
400 |
|
|
} } |
401 |
|
|
#define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ |
402 |
|
|
*((c)++)=(unsigned char)(((l)>> 8)&0xff), \ |
403 |
|
|
*((c)++)=(unsigned char)(((l)>>16)&0xff), \ |
404 |
|
|
*((c)++)=(unsigned char)(((l)>>24)&0xff), \ |
405 |
|
|
l) |
406 |
|
|
|
407 |
|
|
#endif |
408 |
|
|
|
409 |
|
|
/* |
410 |
|
|
* Time for some action:-) |
411 |
|
|
*/ |
412 |
|
|
|
413 |
|
|
void HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len) |
414 |
|
|
{ |
415 |
|
|
const unsigned char *data=data_; |
416 |
|
|
register HASH_LONG * p; |
417 |
|
|
register unsigned long l; |
418 |
|
|
int sw,sc,ew,ec; |
419 |
|
|
|
420 |
|
|
if (len==0) return; |
421 |
|
|
|
422 |
|
|
l=(c->Nl+(len<<3))&0xffffffffL; |
423 |
|
|
/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to |
424 |
|
|
* Wei Dai <weidai@eskimo.com> for pointing it out. */ |
425 |
|
|
if (l < c->Nl) /* overflow */ |
426 |
|
|
c->Nh++; |
427 |
|
|
c->Nh+=(len>>29); |
428 |
|
|
c->Nl=l; |
429 |
|
|
|
430 |
|
|
if (c->num != 0) |
431 |
|
|
{ |
432 |
|
|
p=c->data; |
433 |
|
|
sw=c->num>>2; |
434 |
|
|
sc=c->num&0x03; |
435 |
|
|
|
436 |
|
|
if ((c->num+len) >= HASH_CBLOCK) |
437 |
|
|
{ |
438 |
|
|
l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; |
439 |
|
|
for (; sw<HASH_LBLOCK; sw++) |
440 |
|
|
{ |
441 |
|
|
HOST_c2l(data,l); p[sw]=l; |
442 |
|
|
} |
443 |
|
|
HASH_BLOCK_HOST_ORDER (c,p,1); |
444 |
|
|
len-=(HASH_CBLOCK-c->num); |
445 |
|
|
c->num=0; |
446 |
|
|
/* drop through and do the rest */ |
447 |
|
|
} |
448 |
|
|
else |
449 |
|
|
{ |
450 |
|
|
c->num+=len; |
451 |
|
|
if ((sc+len) < 4) /* ugly, add char's to a word */ |
452 |
|
|
{ |
453 |
|
|
l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; |
454 |
|
|
} |
455 |
|
|
else |
456 |
|
|
{ |
457 |
|
|
ew=(c->num>>2); |
458 |
|
|
ec=(c->num&0x03); |
459 |
|
|
l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; |
460 |
|
|
for (; sw < ew; sw++) |
461 |
|
|
{ |
462 |
|
|
HOST_c2l(data,l); p[sw]=l; |
463 |
|
|
} |
464 |
|
|
if (ec) |
465 |
|
|
{ |
466 |
|
|
HOST_c2l_p(data,l,ec); p[sw]=l; |
467 |
|
|
} |
468 |
|
|
} |
469 |
|
|
return; |
470 |
|
|
} |
471 |
|
|
} |
472 |
|
|
|
473 |
|
|
sw=len/HASH_CBLOCK; |
474 |
|
|
if (sw > 0) |
475 |
|
|
{ |
476 |
|
|
#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
477 |
|
|
/* |
478 |
|
|
* Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined |
479 |
|
|
* only if sizeof(HASH_LONG)==4. |
480 |
|
|
*/ |
481 |
|
|
if ((((unsigned long)data)%4) == 0) |
482 |
|
|
{ |
483 |
|
|
/* data is properly aligned so that we can cast it: */ |
484 |
|
|
HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,sw); |
485 |
|
|
sw*=HASH_CBLOCK; |
486 |
|
|
data+=sw; |
487 |
|
|
len-=sw; |
488 |
|
|
} |
489 |
|
|
else |
490 |
|
|
#if !defined(HASH_BLOCK_DATA_ORDER) |
491 |
|
|
while (sw--) |
492 |
|
|
{ |
493 |
|
|
memcpy (p=c->data,data,HASH_CBLOCK); |
494 |
|
|
HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1); |
495 |
|
|
data+=HASH_CBLOCK; |
496 |
|
|
len-=HASH_CBLOCK; |
497 |
|
|
} |
498 |
|
|
#endif |
499 |
|
|
#endif |
500 |
|
|
#if defined(HASH_BLOCK_DATA_ORDER) |
501 |
|
|
{ |
502 |
|
|
HASH_BLOCK_DATA_ORDER(c,data,sw); |
503 |
|
|
sw*=HASH_CBLOCK; |
504 |
|
|
data+=sw; |
505 |
|
|
len-=sw; |
506 |
|
|
} |
507 |
|
|
#endif |
508 |
|
|
} |
509 |
|
|
|
510 |
|
|
if (len!=0) |
511 |
|
|
{ |
512 |
|
|
p = c->data; |
513 |
|
|
c->num = len; |
514 |
|
|
ew=len>>2; /* words to copy */ |
515 |
|
|
ec=len&0x03; |
516 |
|
|
for (; ew; ew--,p++) |
517 |
|
|
{ |
518 |
|
|
HOST_c2l(data,l); *p=l; |
519 |
|
|
} |
520 |
|
|
HOST_c2l_p(data,l,ec); |
521 |
|
|
*p=l; |
522 |
|
|
} |
523 |
|
|
} |
524 |
|
|
|
525 |
|
|
|
526 |
|
|
void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) |
527 |
|
|
{ |
528 |
|
|
#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
529 |
|
|
if ((((unsigned long)data)%4) == 0) |
530 |
|
|
/* data is properly aligned so that we can cast it: */ |
531 |
|
|
HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,1); |
532 |
|
|
else |
533 |
|
|
#if !defined(HASH_BLOCK_DATA_ORDER) |
534 |
|
|
{ |
535 |
|
|
memcpy (c->data,data,HASH_CBLOCK); |
536 |
|
|
HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1); |
537 |
|
|
} |
538 |
|
|
#endif |
539 |
|
|
#endif |
540 |
|
|
#if defined(HASH_BLOCK_DATA_ORDER) |
541 |
|
|
HASH_BLOCK_DATA_ORDER (c,data,1); |
542 |
|
|
#endif |
543 |
|
|
} |
544 |
|
|
|
545 |
|
|
|
546 |
|
|
void HASH_FINAL (unsigned char *md, HASH_CTX *c) |
547 |
|
|
{ |
548 |
|
|
register HASH_LONG *p; |
549 |
|
|
register unsigned long l; |
550 |
|
|
register int i,j; |
551 |
|
|
static const unsigned char end[4]={0x80,0x00,0x00,0x00}; |
552 |
|
|
const unsigned char *cp=end; |
553 |
|
|
|
554 |
|
|
/* c->num should definitly have room for at least one more byte. */ |
555 |
|
|
p=c->data; |
556 |
|
|
i=c->num>>2; |
557 |
|
|
j=c->num&0x03; |
558 |
|
|
|
559 |
|
|
#if 0 |
560 |
|
|
/* purify often complains about the following line as an |
561 |
|
|
* Uninitialized Memory Read. While this can be true, the |
562 |
|
|
* following p_c2l macro will reset l when that case is true. |
563 |
|
|
* This is because j&0x03 contains the number of 'valid' bytes |
564 |
|
|
* already in p[i]. If and only if j&0x03 == 0, the UMR will |
565 |
|
|
* occur but this is also the only time p_c2l will do |
566 |
|
|
* l= *(cp++) instead of l|= *(cp++) |
567 |
|
|
* Many thanks to Alex Tang <altitude@cic.net> for pickup this |
568 |
|
|
* 'potential bug' */ |
569 |
|
|
#ifdef PURIFY |
570 |
|
|
if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */ |
571 |
|
|
#endif |
572 |
|
|
l=p[i]; |
573 |
|
|
#else |
574 |
|
|
l = (j==0) ? 0 : p[i]; |
575 |
|
|
#endif |
576 |
|
|
HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */ |
577 |
|
|
|
578 |
|
|
if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */ |
579 |
|
|
{ |
580 |
|
|
if (i<HASH_LBLOCK) p[i]=0; |
581 |
|
|
HASH_BLOCK_HOST_ORDER (c,p,1); |
582 |
|
|
i=0; |
583 |
|
|
} |
584 |
|
|
for (; i<(HASH_LBLOCK-2); i++) |
585 |
|
|
p[i]=0; |
586 |
|
|
|
587 |
|
|
#if defined(DATA_ORDER_IS_BIG_ENDIAN) |
588 |
|
|
p[HASH_LBLOCK-2]=c->Nh; |
589 |
|
|
p[HASH_LBLOCK-1]=c->Nl; |
590 |
|
|
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
591 |
|
|
p[HASH_LBLOCK-2]=c->Nl; |
592 |
|
|
p[HASH_LBLOCK-1]=c->Nh; |
593 |
|
|
#endif |
594 |
|
|
HASH_BLOCK_HOST_ORDER (c,p,1); |
595 |
|
|
|
596 |
|
|
#ifndef HASH_MAKE_STRING |
597 |
|
|
#error "HASH_MAKE_STRING must be defined!" |
598 |
|
|
#else |
599 |
|
|
HASH_MAKE_STRING(c,md); |
600 |
|
|
#endif |
601 |
|
|
|
602 |
|
|
c->num=0; |
603 |
|
|
/* clear stuff, HASH_BLOCK may be leaving some stuff on the stack |
604 |
|
|
* but I'm not worried :-) |
605 |
|
|
memset((void *)c,0,sizeof(HASH_CTX)); |
606 |
|
|
*/ |
607 |
|
|
} |