misc.h

00001 #ifndef CRYPTOPP_MISC_H
00002 #define CRYPTOPP_MISC_H
00003 
00004 #include "cryptlib.h"
00005 #include "smartptr.h"
00006 
00007 #ifdef _MSC_VER
00008     #include <stdlib.h>
00009     #if _MSC_VER >= 1400
00010         // VC2005 workaround: disable declarations that conflict with winnt.h
00011         #define _interlockedbittestandset CRYPTOPP_DISABLED_INTRINSIC_1
00012         #define _interlockedbittestandreset CRYPTOPP_DISABLED_INTRINSIC_2
00013         #include <intrin.h>
00014         #undef _interlockedbittestandset
00015         #undef _interlockedbittestandreset
00016         #define CRYPTOPP_FAST_ROTATE(x) 1
00017     #elif _MSC_VER >= 1300
00018         #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32 | (x) == 64)
00019     #else
00020         #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00021     #endif
00022 #elif (defined(__MWERKS__) && TARGET_CPU_PPC) || \
00023     (defined(__GNUC__) && (defined(_ARCH_PWR2) || defined(_ARCH_PWR) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || defined(_ARCH_COM)))
00024     #define CRYPTOPP_FAST_ROTATE(x) ((x) == 32)
00025 #elif defined(__GNUC__) && (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86) // depend on GCC's peephole optimization to generate rotate instructions
00026     #define CRYPTOPP_FAST_ROTATE(x) 1
00027 #else
00028     #define CRYPTOPP_FAST_ROTATE(x) 0
00029 #endif
00030 
00031 #ifdef __BORLANDC__
00032 #include <mem.h>
00033 #endif
00034 
00035 #if defined(__GNUC__) && defined(__linux__)
00036 #define CRYPTOPP_BYTESWAP_AVAILABLE
00037 #include <byteswap.h>
00038 #endif
00039 
00040 NAMESPACE_BEGIN(CryptoPP)
00041 
00042 // ************** compile-time assertion ***************
00043 
00044 template <bool b>
00045 struct CompileAssert
00046 {
00047     static char dummy[2*b-1];
00048 };
00049 
00050 #define CRYPTOPP_COMPILE_ASSERT(assertion) CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, __LINE__)
00051 #if defined(CRYPTOPP_EXPORTS) || defined(CRYPTOPP_IMPORTS)
00052 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance)
00053 #else
00054 #define CRYPTOPP_COMPILE_ASSERT_INSTANCE(assertion, instance) static CompileAssert<(assertion)> CRYPTOPP_ASSERT_JOIN(cryptopp_assert_, instance)
00055 #endif
00056 #define CRYPTOPP_ASSERT_JOIN(X, Y) CRYPTOPP_DO_ASSERT_JOIN(X, Y)
00057 #define CRYPTOPP_DO_ASSERT_JOIN(X, Y) X##Y
00058 
00059 // ************** misc classes ***************
00060 
00061 class CRYPTOPP_DLL Empty
00062 {
00063 };
00064 
00065 //! _
00066 template <class BASE1, class BASE2>
00067 class CRYPTOPP_NO_VTABLE TwoBases : public BASE1, public BASE2
00068 {
00069 };
00070 
00071 //! _
00072 template <class BASE1, class BASE2, class BASE3>
00073 class CRYPTOPP_NO_VTABLE ThreeBases : public BASE1, public BASE2, public BASE3
00074 {
00075 };
00076 
00077 template <class T>
00078 class ObjectHolder
00079 {
00080 protected:
00081     T m_object;
00082 };
00083 
00084 class NotCopyable
00085 {
00086 public:
00087     NotCopyable() {}
00088 private:
00089     NotCopyable(const NotCopyable &);
00090     void operator=(const NotCopyable &);
00091 };
00092 
00093 template <class T>
00094 struct NewObject
00095 {
00096     T* operator()() const {return new T;}
00097 };
00098 
00099 /*! This function safely initializes a static object in a multithreaded environment without using locks.
00100     It may leak memory when two threads try to initialize the static object at the same time
00101     but this should be acceptable since each static object is only initialized once per session.
00102 */
00103 template <class T, class F = NewObject<T>, int instance=0>
00104 class Singleton
00105 {
00106 public:
00107     Singleton(F objectFactory = F()) : m_objectFactory(objectFactory) {}
00108 
00109     // prevent this function from being inlined
00110     CRYPTOPP_NOINLINE const T & Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const;
00111 
00112 private:
00113     F m_objectFactory;
00114 };
00115 
00116 template <class T, class F, int instance>
00117 const T & Singleton<T, F, instance>::Ref(CRYPTOPP_NOINLINE_DOTDOTDOT) const
00118 {
00119     static simple_ptr<T> s_pObject;
00120     static char s_objectState = 0;
00121 
00122 retry:
00123     switch (s_objectState)
00124     {
00125     case 0:
00126         s_objectState = 1;
00127         try
00128         {
00129             s_pObject.m_p = m_objectFactory();
00130         }
00131         catch(...)
00132         {
00133             s_objectState = 0;
00134             throw;
00135         }
00136         s_objectState = 2;
00137         break;
00138     case 1:
00139         goto retry;
00140     default:
00141         break;
00142     }
00143     return *s_pObject.m_p;
00144 }
00145 
00146 // ************** misc functions ***************
00147 
00148 #if (!__STDC_WANT_SECURE_LIB__)
00149 inline void memcpy_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00150 {
00151     if (count > sizeInBytes)
00152         throw InvalidArgument("memcpy_s: buffer overflow");
00153     memcpy(dest, src, count);
00154 }
00155 
00156 inline void memmove_s(void *dest, size_t sizeInBytes, const void *src, size_t count)
00157 {
00158     if (count > sizeInBytes)
00159         throw InvalidArgument("memmove_s: buffer overflow");
00160     memmove(dest, src, count);
00161 }
00162 #endif
00163 
00164 // can't use std::min or std::max in MSVC60 or Cygwin 1.1.0
00165 template <class T> inline const T& STDMIN(const T& a, const T& b)
00166 {
00167     return b < a ? b : a;
00168 }
00169 
00170 template <class T1, class T2> inline const T1 UnsignedMin(const T1& a, const T2& b)
00171 {
00172     CRYPTOPP_COMPILE_ASSERT((sizeof(T1)<=sizeof(T2) && T2(-1)>0) || (sizeof(T1)>sizeof(T2) && T1(-1)>0));
00173     assert(a==0 || a>0);    // GCC workaround: get rid of the warning "comparison is always true due to limited range of data type"
00174     assert(b>=0);
00175 
00176     if (sizeof(T1)<=sizeof(T2))
00177         return b < (T2)a ? (T1)b : a;
00178     else
00179         return (T1)b < a ? (T1)b : a;
00180 }
00181 
00182 template <class T> inline const T& STDMAX(const T& a, const T& b)
00183 {
00184     return a < b ? b : a;
00185 }
00186 
00187 #define RETURN_IF_NONZERO(x) size_t returnedValue = x; if (returnedValue) return returnedValue
00188 
00189 // this version of the macro is fastest on Pentium 3 and Pentium 4 with MSVC 6 SP5 w/ Processor Pack
00190 #define GETBYTE(x, y) (unsigned int)byte((x)>>(8*(y)))
00191 // these may be faster on other CPUs/compilers
00192 // #define GETBYTE(x, y) (unsigned int)(((x)>>(8*(y)))&255)
00193 // #define GETBYTE(x, y) (((byte *)&(x))[y])
00194 
00195 #define CRYPTOPP_GET_BYTE_AS_BYTE(x, y) byte((x)>>(8*(y)))
00196 
00197 template <class T>
00198 unsigned int Parity(T value)
00199 {
00200     for (unsigned int i=8*sizeof(value)/2; i>0; i/=2)
00201         value ^= value >> i;
00202     return (unsigned int)value&1;
00203 }
00204 
00205 template <class T>
00206 unsigned int BytePrecision(const T &value)
00207 {
00208     if (!value)
00209         return 0;
00210 
00211     unsigned int l=0, h=8*sizeof(value);
00212 
00213     while (h-l > 8)
00214     {
00215         unsigned int t = (l+h)/2;
00216         if (value >> t)
00217             l = t;
00218         else
00219             h = t;
00220     }
00221 
00222     return h/8;
00223 }
00224 
00225 template <class T>
00226 unsigned int BitPrecision(const T &value)
00227 {
00228     if (!value)
00229         return 0;
00230 
00231     unsigned int l=0, h=8*sizeof(value);
00232 
00233     while (h-l > 1)
00234     {
00235         unsigned int t = (l+h)/2;
00236         if (value >> t)
00237             l = t;
00238         else
00239             h = t;
00240     }
00241 
00242     return h;
00243 }
00244 
00245 template <class T>
00246 inline T Crop(T value, size_t size)
00247 {
00248     if (size < 8*sizeof(value))
00249         return T(value & ((T(1) << size) - 1));
00250     else
00251         return value;
00252 }
00253 
00254 template <class T1, class T2>
00255 inline bool SafeConvert(T1 from, T2 &to)
00256 {
00257     to = (T2)from;
00258     if (from != to || (from > 0) != (to > 0))
00259         return false;
00260     return true;
00261 }
00262 
00263 inline size_t BitsToBytes(size_t bitCount)
00264 {
00265     return ((bitCount+7)/(8));
00266 }
00267 
00268 inline size_t BytesToWords(size_t byteCount)
00269 {
00270     return ((byteCount+WORD_SIZE-1)/WORD_SIZE);
00271 }
00272 
00273 inline size_t BitsToWords(size_t bitCount)
00274 {
00275     return ((bitCount+WORD_BITS-1)/(WORD_BITS));
00276 }
00277 
00278 inline size_t BitsToDwords(size_t bitCount)
00279 {
00280     return ((bitCount+2*WORD_BITS-1)/(2*WORD_BITS));
00281 }
00282 
00283 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *buf, const byte *mask, size_t count);
00284 CRYPTOPP_DLL void CRYPTOPP_API xorbuf(byte *output, const byte *input, const byte *mask, size_t count);
00285 
00286 template <class T>
00287 inline bool IsPowerOf2(const T &n)
00288 {
00289     return n > 0 && (n & (n-1)) == 0;
00290 }
00291 
00292 template <class T1, class T2>
00293 inline T2 ModPowerOf2(const T1 &a, const T2 &b)
00294 {
00295     assert(IsPowerOf2(b));
00296     return T2(a) & (b-1);
00297 }
00298 
00299 template <class T1, class T2>
00300 inline T1 RoundDownToMultipleOf(const T1 &n, const T2 &m)
00301 {
00302     if (IsPowerOf2(m))
00303         return n - ModPowerOf2(n, m);
00304     else
00305         return n - n%m;
00306 }
00307 
00308 template <class T1, class T2>
00309 inline T1 RoundUpToMultipleOf(const T1 &n, const T2 &m)
00310 {
00311     if (n+m-1 < n)
00312         throw InvalidArgument("RoundUpToMultipleOf: integer overflow");
00313     return RoundDownToMultipleOf(n+m-1, m);
00314 }
00315 
00316 template <class T>
00317 inline unsigned int GetAlignmentOf(T *dummy=NULL)   // VC60 workaround
00318 {
00319 #if CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X86
00320     if (sizeof(T) < 16)
00321         return 1;           // alignment not needed on x86 and x64
00322 #endif
00323 
00324 #if (_MSC_VER >= 1300)
00325     return __alignof(T);
00326 #elif defined(__GNUC__)
00327     return __alignof__(T);
00328 #elif defined(CRYPTOPP_SLOW_WORD64)
00329     return UnsignedMin(4U, sizeof(T));
00330 #else
00331     return sizeof(T);
00332 #endif
00333 }
00334 
00335 inline bool IsAlignedOn(const void *p, unsigned int alignment)
00336 {
00337     return alignment==1 || (IsPowerOf2(alignment) ? ModPowerOf2((size_t)p, alignment) == 0 : (size_t)p % alignment == 0);
00338 }
00339 
00340 template <class T>
00341 inline bool IsAligned(const void *p, T *dummy=NULL) // VC60 workaround
00342 {
00343     return IsAlignedOn(p, GetAlignmentOf<T>());
00344 }
00345 
00346 #ifdef IS_LITTLE_ENDIAN
00347     typedef LittleEndian NativeByteOrder;
00348 #else
00349     typedef BigEndian NativeByteOrder;
00350 #endif
00351 
00352 inline ByteOrder GetNativeByteOrder()
00353 {
00354     return NativeByteOrder::ToEnum();
00355 }
00356 
00357 inline bool NativeByteOrderIs(ByteOrder order)
00358 {
00359     return order == GetNativeByteOrder();
00360 }
00361 
00362 template <class T>
00363 std::string IntToString(T a, unsigned int base = 10)
00364 {
00365     if (a == 0)
00366         return "0";
00367     bool negate = false;
00368     if (a < 0)
00369     {
00370         negate = true;
00371         a = 0-a;    // VC .NET does not like -a
00372     }
00373     std::string result;
00374     while (a > 0)
00375     {
00376         T digit = a % base;
00377         result = char((digit < 10 ? '0' : ('a' - 10)) + digit) + result;
00378         a /= base;
00379     }
00380     if (negate)
00381         result = "-" + result;
00382     return result;
00383 }
00384 
00385 template <class T1, class T2>
00386 inline T1 SaturatingSubtract(const T1 &a, const T2 &b)
00387 {
00388     return T1((a > b) ? (a - b) : 0);
00389 }
00390 
00391 template <class T>
00392 inline CipherDir GetCipherDir(const T &obj)
00393 {
00394     return obj.IsForwardTransformation() ? ENCRYPTION : DECRYPTION;
00395 }
00396 
00397 CRYPTOPP_DLL void CallNewHandler();
00398 
00399 inline void IncrementCounterByOne(byte *inout, unsigned int s)
00400 {
00401     for (int i=s-1, carry=1; i>=0 && carry; i--)
00402         carry = !++inout[i];
00403 }
00404 
00405 inline void IncrementCounterByOne(byte *output, const byte *input, unsigned int s)
00406 {
00407     int i, carry;
00408     for (i=s-1, carry=1; i>=0 && carry; i--)
00409         carry = ((output[i] = input[i]+1) == 0);
00410     memcpy_s(output, s, input, i+1);
00411 }
00412 
00413 // ************** rotate functions ***************
00414 
00415 template <class T> inline T rotlFixed(T x, unsigned int y)
00416 {
00417     assert(y < sizeof(T)*8);
00418     return T((x<<y) | (x>>(sizeof(T)*8-y)));
00419 }
00420 
00421 template <class T> inline T rotrFixed(T x, unsigned int y)
00422 {
00423     assert(y < sizeof(T)*8);
00424     return T((x>>y) | (x<<(sizeof(T)*8-y)));
00425 }
00426 
00427 template <class T> inline T rotlVariable(T x, unsigned int y)
00428 {
00429     assert(y < sizeof(T)*8);
00430     return T((x<<y) | (x>>(sizeof(T)*8-y)));
00431 }
00432 
00433 template <class T> inline T rotrVariable(T x, unsigned int y)
00434 {
00435     assert(y < sizeof(T)*8);
00436     return T((x>>y) | (x<<(sizeof(T)*8-y)));
00437 }
00438 
00439 template <class T> inline T rotlMod(T x, unsigned int y)
00440 {
00441     y %= sizeof(T)*8;
00442     return T((x<<y) | (x>>(sizeof(T)*8-y)));
00443 }
00444 
00445 template <class T> inline T rotrMod(T x, unsigned int y)
00446 {
00447     y %= sizeof(T)*8;
00448     return T((x>>y) | (x<<(sizeof(T)*8-y)));
00449 }
00450 
00451 #ifdef _MSC_VER
00452 
00453 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00454 {
00455     assert(y < 8*sizeof(x));
00456     return y ? _lrotl(x, y) : x;
00457 }
00458 
00459 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00460 {
00461     assert(y < 8*sizeof(x));
00462     return y ? _lrotr(x, y) : x;
00463 }
00464 
00465 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00466 {
00467     assert(y < 8*sizeof(x));
00468     return _lrotl(x, y);
00469 }
00470 
00471 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00472 {
00473     assert(y < 8*sizeof(x));
00474     return _lrotr(x, y);
00475 }
00476 
00477 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00478 {
00479     return _lrotl(x, y);
00480 }
00481 
00482 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00483 {
00484     return _lrotr(x, y);
00485 }
00486 
00487 #if _MSC_VER >= 1300
00488 
00489 template<> inline word64 rotlFixed<word64>(word64 x, unsigned int y)
00490 {
00491     assert(y < 8*sizeof(x));
00492     return y ? _rotl64(x, y) : x;
00493 }
00494 
00495 template<> inline word64 rotrFixed<word64>(word64 x, unsigned int y)
00496 {
00497     assert(y < 8*sizeof(x));
00498     return y ? _rotr64(x, y) : x;
00499 }
00500 
00501 template<> inline word64 rotlVariable<word64>(word64 x, unsigned int y)
00502 {
00503     assert(y < 8*sizeof(x));
00504     return _rotl64(x, y);
00505 }
00506 
00507 template<> inline word64 rotrVariable<word64>(word64 x, unsigned int y)
00508 {
00509     assert(y < 8*sizeof(x));
00510     return _rotr64(x, y);
00511 }
00512 
00513 template<> inline word64 rotlMod<word64>(word64 x, unsigned int y)
00514 {
00515     return _rotl64(x, y);
00516 }
00517 
00518 template<> inline word64 rotrMod<word64>(word64 x, unsigned int y)
00519 {
00520     return _rotr64(x, y);
00521 }
00522 
00523 #endif // #if _MSC_VER >= 1310
00524 
00525 #if _MSC_VER >= 1400 && (!defined(__INTEL_COMPILER) || __INTEL_COMPILER >= 1000)
00526 
00527 template<> inline word16 rotlFixed<word16>(word16 x, unsigned int y)
00528 {
00529     assert(y < 8*sizeof(x));
00530     return y ? _rotl16(x, y) : x;
00531 }
00532 
00533 template<> inline word16 rotrFixed<word16>(word16 x, unsigned int y)
00534 {
00535     assert(y < 8*sizeof(x));
00536     return y ? _rotr16(x, y) : x;
00537 }
00538 
00539 template<> inline word16 rotlVariable<word16>(word16 x, unsigned int y)
00540 {
00541     assert(y < 8*sizeof(x));
00542     return _rotl16(x, y);
00543 }
00544 
00545 template<> inline word16 rotrVariable<word16>(word16 x, unsigned int y)
00546 {
00547     assert(y < 8*sizeof(x));
00548     return _rotr16(x, y);
00549 }
00550 
00551 template<> inline word16 rotlMod<word16>(word16 x, unsigned int y)
00552 {
00553     return _rotl16(x, y);
00554 }
00555 
00556 template<> inline word16 rotrMod<word16>(word16 x, unsigned int y)
00557 {
00558     return _rotr16(x, y);
00559 }
00560 
00561 template<> inline byte rotlFixed<byte>(byte x, unsigned int y)
00562 {
00563     assert(y < 8*sizeof(x));
00564     return y ? _rotl8(x, y) : x;
00565 }
00566 
00567 template<> inline byte rotrFixed<byte>(byte x, unsigned int y)
00568 {
00569     assert(y < 8*sizeof(x));
00570     return y ? _rotr8(x, y) : x;
00571 }
00572 
00573 template<> inline byte rotlVariable<byte>(byte x, unsigned int y)
00574 {
00575     assert(y < 8*sizeof(x));
00576     return _rotl8(x, y);
00577 }
00578 
00579 template<> inline byte rotrVariable<byte>(byte x, unsigned int y)
00580 {
00581     assert(y < 8*sizeof(x));
00582     return _rotr8(x, y);
00583 }
00584 
00585 template<> inline byte rotlMod<byte>(byte x, unsigned int y)
00586 {
00587     return _rotl8(x, y);
00588 }
00589 
00590 template<> inline byte rotrMod<byte>(byte x, unsigned int y)
00591 {
00592     return _rotr8(x, y);
00593 }
00594 
00595 #endif // #if _MSC_VER >= 1400
00596 
00597 #endif // #ifdef _MSC_VER
00598 
00599 #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00600 
00601 template<> inline word32 rotlFixed<word32>(word32 x, unsigned int y)
00602 {
00603     assert(y < 32);
00604     return y ? __rlwinm(x,y,0,31) : x;
00605 }
00606 
00607 template<> inline word32 rotrFixed<word32>(word32 x, unsigned int y)
00608 {
00609     assert(y < 32);
00610     return y ? __rlwinm(x,32-y,0,31) : x;
00611 }
00612 
00613 template<> inline word32 rotlVariable<word32>(word32 x, unsigned int y)
00614 {
00615     assert(y < 32);
00616     return (__rlwnm(x,y,0,31));
00617 }
00618 
00619 template<> inline word32 rotrVariable<word32>(word32 x, unsigned int y)
00620 {
00621     assert(y < 32);
00622     return (__rlwnm(x,32-y,0,31));
00623 }
00624 
00625 template<> inline word32 rotlMod<word32>(word32 x, unsigned int y)
00626 {
00627     return (__rlwnm(x,y,0,31));
00628 }
00629 
00630 template<> inline word32 rotrMod<word32>(word32 x, unsigned int y)
00631 {
00632     return (__rlwnm(x,32-y,0,31));
00633 }
00634 
00635 #endif // #if (defined(__MWERKS__) && TARGET_CPU_PPC)
00636 
00637 // ************** endian reversal ***************
00638 
00639 template <class T>
00640 inline unsigned int GetByte(ByteOrder order, T value, unsigned int index)
00641 {
00642     if (order == LITTLE_ENDIAN_ORDER)
00643         return GETBYTE(value, index);
00644     else
00645         return GETBYTE(value, sizeof(T)-index-1);
00646 }
00647 
00648 inline byte ByteReverse(byte value)
00649 {
00650     return value;
00651 }
00652 
00653 inline word16 ByteReverse(word16 value)
00654 {
00655 #ifdef CRYPTOPP_BYTESWAP_AVAILABLE
00656     return bswap_16(value);
00657 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00658     return _byteswap_ushort(value);
00659 #else
00660     return rotlFixed(value, 8U);
00661 #endif
00662 }
00663 
00664 inline word32 ByteReverse(word32 value)
00665 {
00666 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE)
00667     __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00668     return value;
00669 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00670     return bswap_32(value);
00671 #elif defined(__MWERKS__) && TARGET_CPU_PPC
00672     return (word32)__lwbrx(&value,0);
00673 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00674     return _byteswap_ulong(value);
00675 #elif CRYPTOPP_FAST_ROTATE(32)
00676     // 5 instructions with rotate instruction, 9 without
00677     return (rotrFixed(value, 8U) & 0xff00ff00) | (rotlFixed(value, 8U) & 0x00ff00ff);
00678 #else
00679     // 6 instructions with rotate instruction, 8 without
00680     value = ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8);
00681     return rotlFixed(value, 16U);
00682 #endif
00683 }
00684 
00685 #ifdef WORD64_AVAILABLE
00686 inline word64 ByteReverse(word64 value)
00687 {
00688 #if defined(__GNUC__) && defined(CRYPTOPP_X86_ASM_AVAILABLE) && defined(__x86_64__)
00689     __asm__ ("bswap %0" : "=r" (value) : "0" (value));
00690     return value;
00691 #elif defined(CRYPTOPP_BYTESWAP_AVAILABLE)
00692     return bswap_64(value);
00693 #elif defined(_MSC_VER) && _MSC_VER >= 1300
00694     return _byteswap_uint64(value);
00695 #elif defined(CRYPTOPP_SLOW_WORD64)
00696     return (word64(ByteReverse(word32(value))) << 32) | ByteReverse(word32(value>>32));
00697 #else
00698     value = ((value & W64LIT(0xFF00FF00FF00FF00)) >> 8) | ((value & W64LIT(0x00FF00FF00FF00FF)) << 8);
00699     value = ((value & W64LIT(0xFFFF0000FFFF0000)) >> 16) | ((value & W64LIT(0x0000FFFF0000FFFF)) << 16);
00700     return rotlFixed(value, 32U);
00701 #endif
00702 }
00703 #endif
00704 
00705 inline byte BitReverse(byte value)
00706 {
00707     value = ((value & 0xAA) >> 1) | ((value & 0x55) << 1);
00708     value = ((value & 0xCC) >> 2) | ((value & 0x33) << 2);
00709     return rotlFixed(value, 4U);
00710 }
00711 
00712 inline word16 BitReverse(word16 value)
00713 {
00714     value = ((value & 0xAAAA) >> 1) | ((value & 0x5555) << 1);
00715     value = ((value & 0xCCCC) >> 2) | ((value & 0x3333) << 2);
00716     value = ((value & 0xF0F0) >> 4) | ((value & 0x0F0F) << 4);
00717     return ByteReverse(value);
00718 }
00719 
00720 inline word32 BitReverse(word32 value)
00721 {
00722     value = ((value & 0xAAAAAAAA) >> 1) | ((value & 0x55555555) << 1);
00723     value = ((value & 0xCCCCCCCC) >> 2) | ((value & 0x33333333) << 2);
00724     value = ((value & 0xF0F0F0F0) >> 4) | ((value & 0x0F0F0F0F) << 4);
00725     return ByteReverse(value);
00726 }
00727 
00728 #ifdef WORD64_AVAILABLE
00729 inline word64 BitReverse(word64 value)
00730 {
00731 #ifdef CRYPTOPP_SLOW_WORD64
00732     return (word64(BitReverse(word32(value))) << 32) | BitReverse(word32(value>>32));
00733 #else
00734     value = ((value & W64LIT(0xAAAAAAAAAAAAAAAA)) >> 1) | ((value & W64LIT(0x5555555555555555)) << 1);
00735     value = ((value & W64LIT(0xCCCCCCCCCCCCCCCC)) >> 2) | ((value & W64LIT(0x3333333333333333)) << 2);
00736     value = ((value & W64LIT(0xF0F0F0F0F0F0F0F0)) >> 4) | ((value & W64LIT(0x0F0F0F0F0F0F0F0F)) << 4);
00737     return ByteReverse(value);
00738 #endif
00739 }
00740 #endif
00741 
00742 template <class T>
00743 inline T BitReverse(T value)
00744 {
00745     if (sizeof(T) == 1)
00746         return (T)BitReverse((byte)value);
00747     else if (sizeof(T) == 2)
00748         return (T)BitReverse((word16)value);
00749     else if (sizeof(T) == 4)
00750         return (T)BitReverse((word32)value);
00751     else
00752     {
00753 #ifdef WORD64_AVAILABLE
00754         assert(sizeof(T) == 8);
00755         return (T)BitReverse((word64)value);
00756 #else
00757         assert(false);
00758         return 0;
00759 #endif
00760     }
00761 }
00762 
00763 template <class T>
00764 inline T ConditionalByteReverse(ByteOrder order, T value)
00765 {
00766     return NativeByteOrderIs(order) ? value : ByteReverse(value);
00767 }
00768 
00769 template <class T>
00770 void ByteReverse(T *out, const T *in, size_t byteCount)
00771 {
00772     assert(byteCount % sizeof(T) == 0);
00773     size_t count = byteCount/sizeof(T);
00774     for (size_t i=0; i<count; i++)
00775         out[i] = ByteReverse(in[i]);
00776 }
00777 
00778 template <class T>
00779 inline void ConditionalByteReverse(ByteOrder order, T *out, const T *in, size_t byteCount)
00780 {
00781     if (!NativeByteOrderIs(order))
00782         ByteReverse(out, in, byteCount);
00783     else if (in != out)
00784         memcpy_s(out, byteCount, in, byteCount);
00785 }
00786 
00787 template <class T>
00788 inline void GetUserKey(ByteOrder order, T *out, size_t outlen, const byte *in, size_t inlen)
00789 {
00790     const size_t U = sizeof(T);
00791     assert(inlen <= outlen*U);
00792     memcpy(out, in, inlen);
00793     memset((byte *)out+inlen, 0, outlen*U-inlen);
00794     ConditionalByteReverse(order, out, out, RoundUpToMultipleOf(inlen, U));
00795 }
00796 
00797 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00798 inline byte UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const byte *)
00799 {
00800     return block[0];
00801 }
00802 
00803 inline word16 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word16 *)
00804 {
00805     return (order == BIG_ENDIAN_ORDER)
00806         ? block[1] | (block[0] << 8)
00807         : block[0] | (block[1] << 8);
00808 }
00809 
00810 inline word32 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word32 *)
00811 {
00812     return (order == BIG_ENDIAN_ORDER)
00813         ? word32(block[3]) | (word32(block[2]) << 8) | (word32(block[1]) << 16) | (word32(block[0]) << 24)
00814         : word32(block[0]) | (word32(block[1]) << 8) | (word32(block[2]) << 16) | (word32(block[3]) << 24);
00815 }
00816 
00817 #ifdef WORD64_AVAILABLE
00818 inline word64 UnalignedGetWordNonTemplate(ByteOrder order, const byte *block, const word64 *)
00819 {
00820     return (order == BIG_ENDIAN_ORDER)
00821         ?
00822         (word64(block[7]) |
00823         (word64(block[6]) <<  8) |
00824         (word64(block[5]) << 16) |
00825         (word64(block[4]) << 24) |
00826         (word64(block[3]) << 32) |
00827         (word64(block[2]) << 40) |
00828         (word64(block[1]) << 48) |
00829         (word64(block[0]) << 56))
00830         :
00831         (word64(block[0]) |
00832         (word64(block[1]) <<  8) |
00833         (word64(block[2]) << 16) |
00834         (word64(block[3]) << 24) |
00835         (word64(block[4]) << 32) |
00836         (word64(block[5]) << 40) |
00837         (word64(block[6]) << 48) |
00838         (word64(block[7]) << 56));
00839 }
00840 #endif
00841 
00842 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, byte value, const byte *xorBlock)
00843 {
00844     block[0] = xorBlock ? (value ^ xorBlock[0]) : value;
00845 }
00846 
00847 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word16 value, const byte *xorBlock)
00848 {
00849     if (order == BIG_ENDIAN_ORDER)
00850     {
00851         if (xorBlock)
00852         {
00853             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00854             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00855         }
00856         else
00857         {
00858             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00859             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00860         }
00861     }
00862     else
00863     {
00864         if (xorBlock)
00865         {
00866             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00867             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00868         }
00869         else
00870         {
00871             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00872             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00873         }
00874     }
00875 }
00876 
00877 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word32 value, const byte *xorBlock)
00878 {
00879     if (order == BIG_ENDIAN_ORDER)
00880     {
00881         if (xorBlock)
00882         {
00883             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00884             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00885             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00886             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00887         }
00888         else
00889         {
00890             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00891             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00892             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00893             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00894         }
00895     }
00896     else
00897     {
00898         if (xorBlock)
00899         {
00900             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00901             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00902             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00903             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00904         }
00905         else
00906         {
00907             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00908             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00909             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00910             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00911         }
00912     }
00913 }
00914 
00915 #ifdef WORD64_AVAILABLE
00916 inline void UnalignedPutWordNonTemplate(ByteOrder order, byte *block, word64 value, const byte *xorBlock)
00917 {
00918     if (order == BIG_ENDIAN_ORDER)
00919     {
00920         if (xorBlock)
00921         {
00922             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00923             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00924             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00925             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00926             block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00927             block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00928             block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00929             block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00930         }
00931         else
00932         {
00933             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00934             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00935             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00936             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00937             block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00938             block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00939             block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00940             block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00941         }
00942     }
00943     else
00944     {
00945         if (xorBlock)
00946         {
00947             block[0] = xorBlock[0] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00948             block[1] = xorBlock[1] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00949             block[2] = xorBlock[2] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00950             block[3] = xorBlock[3] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00951             block[4] = xorBlock[4] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00952             block[5] = xorBlock[5] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00953             block[6] = xorBlock[6] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00954             block[7] = xorBlock[7] ^ CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00955         }
00956         else
00957         {
00958             block[0] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 0);
00959             block[1] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 1);
00960             block[2] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 2);
00961             block[3] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 3);
00962             block[4] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 4);
00963             block[5] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 5);
00964             block[6] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 6);
00965             block[7] = CRYPTOPP_GET_BYTE_AS_BYTE(value, 7);
00966         }
00967     }
00968 }
00969 #endif
00970 #endif  // #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00971 
00972 template <class T>
00973 inline T GetWord(bool assumeAligned, ByteOrder order, const byte *block)
00974 {
00975 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00976     if (!assumeAligned)
00977         return UnalignedGetWordNonTemplate(order, block, (T*)NULL);
00978     assert(IsAligned<T>(block));
00979 #endif
00980     return ConditionalByteReverse(order, *reinterpret_cast<const T *>(block));
00981 }
00982 
00983 template <class T>
00984 inline void GetWord(bool assumeAligned, ByteOrder order, T &result, const byte *block)
00985 {
00986     result = GetWord<T>(assumeAligned, order, block);
00987 }
00988 
00989 template <class T>
00990 inline void PutWord(bool assumeAligned, ByteOrder order, byte *block, T value, const byte *xorBlock = NULL)
00991 {
00992 #ifndef CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS
00993     if (!assumeAligned)
00994         return UnalignedPutWordNonTemplate(order, block, value, xorBlock);
00995     assert(IsAligned<T>(block));
00996     assert(IsAligned<T>(xorBlock));
00997 #endif
00998     *reinterpret_cast<T *>(block) = ConditionalByteReverse(order, value) ^ (xorBlock ? *reinterpret_cast<const T *>(xorBlock) : 0);
00999 }
01000 
01001 template <class T, class B, bool A=true>
01002 class GetBlock
01003 {
01004 public:
01005     GetBlock(const void *block)
01006         : m_block((const byte *)block) {}
01007 
01008     template <class U>
01009     inline GetBlock<T, B, A> & operator()(U &x)
01010     {
01011         CRYPTOPP_COMPILE_ASSERT(sizeof(U) >= sizeof(T));
01012         x = GetWord<T>(A, B::ToEnum(), m_block);
01013         m_block += sizeof(T);
01014         return *this;
01015     }
01016 
01017 private:
01018     const byte *m_block;
01019 };
01020 
01021 template <class T, class B, bool A=false>
01022 class PutBlock
01023 {
01024 public:
01025     PutBlock(const void *xorBlock, void *block)
01026         : m_xorBlock((const byte *)xorBlock), m_block((byte *)block) {}
01027 
01028     template <class U>
01029     inline PutBlock<T, B, A> & operator()(U x)
01030     {
01031         PutWord(A, B::ToEnum(), m_block, (T)x, m_xorBlock);
01032         m_block += sizeof(T);
01033         if (m_xorBlock)
01034             m_xorBlock += sizeof(T);
01035         return *this;
01036     }
01037 
01038 private:
01039     const byte *m_xorBlock;
01040     byte *m_block;
01041 };
01042 
01043 template <class T, class B, bool GA=true, bool PA=false>
01044 struct BlockGetAndPut
01045 {
01046     // function needed because of C++ grammatical ambiguity between expression-statements and declarations
01047     static inline GetBlock<T, B, GA> Get(const void *block) {return GetBlock<T, B, GA>(block);}
01048     typedef PutBlock<T, B, PA> Put;
01049 };
01050 
01051 template <class T>
01052 std::string WordToString(T value, ByteOrder order = BIG_ENDIAN_ORDER)
01053 {
01054     if (!NativeByteOrderIs(order))
01055         value = ByteReverse(value);
01056 
01057     return std::string((char *)&value, sizeof(value));
01058 }
01059 
01060 template <class T>
01061 T StringToWord(const std::string &str, ByteOrder order = BIG_ENDIAN_ORDER)
01062 {
01063     T value = 0;
01064     memcpy_s(&value, sizeof(value), str.data(), UnsignedMin(str.size(), sizeof(value)));
01065     return NativeByteOrderIs(order) ? value : ByteReverse(value);
01066 }
01067 
01068 // ************** help remove warning on g++ ***************
01069 
01070 template <bool overflow> struct SafeShifter;
01071 
01072 template<> struct SafeShifter<true>
01073 {
01074     template <class T>
01075     static inline T RightShift(T value, unsigned int bits)
01076     {
01077         return 0;
01078     }
01079 
01080     template <class T>
01081     static inline T LeftShift(T value, unsigned int bits)
01082     {
01083         return 0;
01084     }
01085 };
01086 
01087 template<> struct SafeShifter<false>
01088 {
01089     template <class T>
01090     static inline T RightShift(T value, unsigned int bits)
01091     {
01092         return value >> bits;
01093     }
01094 
01095     template <class T>
01096     static inline T LeftShift(T value, unsigned int bits)
01097     {
01098         return value << bits;
01099     }
01100 };
01101 
01102 template <unsigned int bits, class T>
01103 inline T SafeRightShift(T value)
01104 {
01105     return SafeShifter<(bits>=(8*sizeof(T)))>::RightShift(value, bits);
01106 }
01107 
01108 template <unsigned int bits, class T>
01109 inline T SafeLeftShift(T value)
01110 {
01111     return SafeShifter<(bits>=(8*sizeof(T)))>::LeftShift(value, bits);
01112 }
01113 
01114 NAMESPACE_END
01115 
01116 #endif

Generated on Thu Jul 5 22:21:37 2007 for Crypto++ by  doxygen 1.5.2