1 /**
2  * Written in the D programming language.
3  * This module provides functions to uniform calculating hash values for different types
4  *
5  * Copyright: Copyright Igor Stepanov 2013-2013.
6  * License:   $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
7  * Authors:   Igor Stepanov
8  * Source: $(DRUNTIMESRC core/internal/_hash.d)
9  */
10 module core.internal.hash;
11 
12 import core.internal.convert;
13 import core.internal.traits : allSatisfy, Unconst;
14 
15 // If true ensure that positive zero and negative zero have the same hash.
16 // Historically typeid(float).getHash did this but hashOf(float) did not.
17 private enum floatCoalesceZeroes = true;
18 // If true ensure that all NaNs of the same floating point type have the same hash.
19 // Historically typeid(float).getHash didn't do this but hashOf(float) did.
20 private enum floatCoalesceNaNs = true;
21 
22 // If either of the above are true then no struct or array that contains the
23 // representation of a floating point number may be hashed with `bytesHash`.
24 
25 @nogc nothrow pure @safe unittest
26 {
27     static if (floatCoalesceZeroes)
28         assert(hashOf(+0.0) == hashOf(-0.0)); // Same hash for +0.0 and -0.0.
29     static if (floatCoalesceNaNs)
30         assert(hashOf(double.nan) == hashOf(-double.nan)); // Same hash for different NaN.
31 }
32 
33 private enum hasCallableToHash(T) = __traits(compiles,
34     {
35         size_t hash = ((T* x) => (*x).toHash())(null);
36     });
37 
38 @nogc nothrow pure @safe unittest
39 {
40     static struct S { size_t toHash() { return 4; } }
41     assert(hasCallableToHash!S);
42     assert(!hasCallableToHash!(shared const S));
43 }
44 
45 private enum isFinalClassWithAddressBasedHash(T) = __traits(isFinalClass, T)
46     // Use __traits(compiles, ...) in case there are multiple overloads of `toHash`.
47     && __traits(compiles, {static assert(&Object.toHash is &T.toHash);});
48 
49 @nogc nothrow pure @safe unittest
50 {
51     static class C1 {}
52     final static class C2 : C1 {}
53     final static class C3 : C1 { override size_t toHash() const nothrow { return 1; }}
54     static assert(!isFinalClassWithAddressBasedHash!Object);
55     static assert(!isFinalClassWithAddressBasedHash!C1);
56     static assert(isFinalClassWithAddressBasedHash!C2);
57     static assert(!isFinalClassWithAddressBasedHash!C3);
58 }
59 
60 private template isCppClassWithoutHash(T)
61 {
62     static if (!is(T == class) && !is(T == interface))
63         enum isCppClassWithoutHash = false;
64     else
65     {
66         import core.internal.traits : Unqual;
67         enum bool isCppClassWithoutHash = __traits(getLinkage, T) == "C++"
68             && !is(Unqual!T : Object) && !hasCallableToHash!T;
69     }
70 }
71 
72 /+
73 Is it valid to calculate a hash code for T based on the bits of its
74 representation? Always false for interfaces, dynamic arrays, and
75 associative arrays. False for all classes except final classes that do
76 not override `toHash`.
77 
78 Note: according to the spec as of
79 https://github.com/dlang/dlang.org/commit/d66eff16491b0664c0fc00ba80a7aa291703f1f2
80 the contents of unnamed paddings between fields is undefined. Currently
81 this hashing implementation assumes that the padding contents (if any)
82 for all instances of `T` are the same. The correctness of this
83 assumption is yet to be verified.
84 +/
85 private template canBitwiseHash(T)
86 {
87     static if (is(T EType == enum))
88         enum canBitwiseHash = .canBitwiseHash!EType;
89     else static if (__traits(isFloating, T))
90         enum canBitwiseHash = !(floatCoalesceZeroes || floatCoalesceNaNs);
91     else static if (__traits(isScalar, T))
92         enum canBitwiseHash = true;
93     else static if (is(T == class))
94     {
95         enum canBitwiseHash = isFinalClassWithAddressBasedHash!T || isCppClassWithoutHash!T;
96     }
97     else static if (is(T == interface))
98     {
99         enum canBitwiseHash = isCppClassWithoutHash!T;
100     }
101     else static if (is(T == struct))
102     {
103         static if (hasCallableToHash!T || __traits(isNested, T))
104             enum canBitwiseHash = false;
105         else
106             enum canBitwiseHash = allSatisfy!(.canBitwiseHash, typeof(T.tupleof));
107     }
108     else static if (is(T == union))
109     {
110         // Right now we always bytewise hash unions that lack callable `toHash`.
111         enum canBitwiseHash = !hasCallableToHash!T;
112     }
113     else static if (is(T E : E[]))
114     {
115         static if (__traits(isStaticArray, T))
116             enum canBitwiseHash = (T.length == 0) || .canBitwiseHash!E;
117         else
118             enum canBitwiseHash = false;
119     }
120     else static if (__traits(isAssociativeArray, T))
121     {
122         enum canBitwiseHash = false;
123     }
124     else
125     {
126         static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)),
127             "Internal error: unanticipated type "~T.stringof);
128         enum canBitwiseHash = true;
129     }
130 }
131 
132 // Overly restrictive for simplicity: has false negatives but no false positives.
133 private template useScopeConstPassByValue(T)
134 {
135     static if (__traits(isScalar, T))
136         enum useScopeConstPassByValue = true;
137     else static if (is(T == class) || is(T == interface))
138         // Overly restrictive for simplicity.
139         enum useScopeConstPassByValue = isFinalClassWithAddressBasedHash!T;
140     else static if (is(T == struct) || is(T == union))
141     {
142         // Overly restrictive for simplicity.
143         enum useScopeConstPassByValue = T.sizeof <= (int[]).sizeof &&
144             __traits(isPOD, T) && // "isPOD" just to check there's no dtor or postblit.
145             canBitwiseHash!T; // We can't verify toHash doesn't leak.
146     }
147     else static if (is(T : E[], E))
148     {
149         static if (!__traits(isStaticArray, T))
150             // Overly restrictive for simplicity.
151             enum useScopeConstPassByValue = .useScopeConstPassByValue!E;
152         else static if (T.length == 0)
153             enum useScopeConstPassByValue = true;
154         else
155             enum useScopeConstPassByValue = T.sizeof <= (uint[]).sizeof
156                 && .useScopeConstPassByValue!(typeof(T.init[0]));
157     }
158     else static if (is(T : V[K], K, V))
159     {
160         // Overly restrictive for simplicity.
161         enum useScopeConstPassByValue = .useScopeConstPassByValue!K
162             && .useScopeConstPassByValue!V;
163     }
164     else
165     {
166         static assert(is(T == delegate) || is(T : void) || is(T : typeof(null)),
167             "Internal error: unanticipated type "~T.stringof);
168         enum useScopeConstPassByValue = true;
169     }
170 }
171 
172 @safe unittest
173 {
174     static assert(useScopeConstPassByValue!int);
175     static assert(useScopeConstPassByValue!string);
176 
177     static int ctr;
178     static struct S1 { ~this() { ctr++; } }
179     static struct S2 { this(this) { ctr++; } }
180     static assert(!useScopeConstPassByValue!S1,
181         "Don't default pass by value a struct with a non-vacuous destructor.");
182     static assert(!useScopeConstPassByValue!S2,
183         "Don't default pass by value a struct with a non-vacuous postblit.");
184 }
185 
186 //enum hash. CTFE depends on base type
187 size_t hashOf(T)(scope const T val)
188 if (is(T EType == enum) && useScopeConstPassByValue!EType)
189 {
190     static if (is(T EType == enum)) //for EType
191     {
192         return hashOf(cast(const EType) val);
193     }
194     else
195     {
196         static assert(0);
197     }
198 }
199 
200 //enum hash. CTFE depends on base type
201 size_t hashOf(T)(scope const T val, size_t seed)
202 if (is(T EType == enum) && useScopeConstPassByValue!EType)
203 {
204     static if (is(T EType == enum)) //for EType
205     {
206         return hashOf(cast(const EType) val, seed);
207     }
208     else
209     {
210         static assert(0);
211     }
212 }
213 
214 //enum hash. CTFE depends on base type
215 size_t hashOf(T)(auto ref T val, size_t seed = 0)
216 if (is(T EType == enum) && !useScopeConstPassByValue!EType)
217 {
218     static if (is(T EType == enum)) //for EType
219     {
220         EType e_val = cast(EType)val;
221         return hashOf(e_val, seed);
222     }
223     else
224     {
225         static assert(0);
226     }
227 }
228 
229 //CTFE ready (depends on base type).
230 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
231 if (!is(T == enum) && __traits(isStaticArray, T) && canBitwiseHash!T)
232 {
233     // FIXME:
234     // We would like to to do this:
235     //
236     //static if (T.length == 0)
237     //    return seed;
238     //else static if (T.length == 1)
239     //    return hashOf(val[0], seed);
240     //else
241     //    return bytesHashWithExactSizeAndAlignment!T(toUbyte(val), seed);
242     //
243     // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
244     // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
245     // hashOf(val).
246     static if (T.length == 0)
247     {
248         return bytesHashAlignedBy!size_t((ubyte[]).init, seed);
249     }
250     static if (is(typeof(toUbyte(val)) == const(ubyte)[]))
251     {
252         return bytesHashAlignedBy!T(toUbyte(val), seed);
253     }
254     else //Other types. CTFE unsupported
255     {
256         assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
257         return bytesHashAlignedBy!T((cast(const(ubyte)*) &val)[0 .. T.sizeof], seed);
258     }
259 }
260 
261 //CTFE ready (depends on base type).
262 size_t hashOf(T)(auto ref T val, size_t seed = 0)
263 if (!is(T == enum) && __traits(isStaticArray, T) && !canBitwiseHash!T)
264 {
265     // FIXME:
266     // We would like to to do this:
267     //
268     //static if (T.length == 0)
269     //    return seed;
270     //else static if (T.length == 1)
271     //    return hashOf(val[0], seed);
272     //else
273     //    /+ hash like a dynamic array +/
274     //
275     // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
276     // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
277     // hashOf(val).
278     return hashOf(val[], seed);
279 }
280 
281 //dynamic array hash
282 size_t hashOf(T)(scope const T val, size_t seed = 0)
283 if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStaticArray, T)
284     && !is(T == struct) && !is(T == class) && !is(T == union)
285     && (__traits(isScalar, S) || canBitwiseHash!S))
286 {
287     alias ElementType = typeof(val[0]);
288     static if (!canBitwiseHash!ElementType)
289     {
290         size_t hash = seed;
291         foreach (ref o; val)
292         {
293             hash = hashOf(hashOf(o), hash); // double hashing to match TypeInfo.getHash
294         }
295         return hash;
296     }
297     else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))
298     //ubyteble array (arithmetic types and structs without toHash) CTFE ready for arithmetic types and structs without reference fields
299     {
300         return bytesHashAlignedBy!ElementType(toUbyte(val), seed);
301     }
302     else //Other types. CTFE unsupported
303     {
304         assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
305         return bytesHashAlignedBy!ElementType((cast(const(ubyte)*) val.ptr)[0 .. ElementType.sizeof*val.length], seed);
306     }
307 }
308 
309 //dynamic array hash
310 size_t hashOf(T)(T val, size_t seed = 0)
311 if (!is(T == enum) && !is(T : typeof(null)) && is(T S: S[]) && !__traits(isStaticArray, T)
312     && !is(T == struct) && !is(T == class) && !is(T == union)
313     && !(__traits(isScalar, S) || canBitwiseHash!S))
314 {
315     size_t hash = seed;
316     foreach (ref o; val)
317     {
318         hash = hashOf(hashOf(o), hash); // double hashing because TypeInfo.getHash doesn't allow to pass seed value
319     }
320     return hash;
321 }
322 
323 //arithmetic type hash
324 @trusted @nogc nothrow pure
325 size_t hashOf(T)(scope const T val) if (!is(T == enum) && __traits(isArithmetic, T)
326     && __traits(isIntegral, T) && T.sizeof <= size_t.sizeof && !is(T == __vector))
327 {
328     return val;
329 }
330 
331 //arithmetic type hash
332 @trusted @nogc nothrow pure
333 size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && __traits(isArithmetic, T)
334     && __traits(isIntegral, T) && T.sizeof <= size_t.sizeof && !is(T == __vector))
335 {
336     static if (size_t.sizeof < ulong.sizeof)
337     {
338         //MurmurHash3 32-bit single round
339         enum uint c1 = 0xcc9e2d51;
340         enum uint c2 = 0x1b873593;
341         enum uint c3 = 0xe6546b64;
342         enum uint r1 = 15;
343         enum uint r2 = 13;
344     }
345     else
346     {
347         //Half of MurmurHash3 64-bit single round
348         //(omits second interleaved update)
349         enum ulong c1 = 0x87c37b91114253d5;
350         enum ulong c2 = 0x4cf5ad432745937f;
351         enum ulong c3 = 0x52dce729;
352         enum uint r1 = 31;
353         enum uint r2 = 27;
354     }
355     size_t h = c1 * val;
356     h = (h << r1) | (h >>> (size_t.sizeof * 8 - r1));
357     h = (h * c2) ^ seed;
358     h = (h << r2) | (h >>> (size_t.sizeof * 8 - r2));
359     return h * 5 + c3;
360 }
361 
362 //arithmetic type hash
363 @trusted @nogc nothrow pure
364 size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && __traits(isArithmetic, T)
365     && (!__traits(isIntegral, T) || T.sizeof > size_t.sizeof) && !is(T == __vector))
366 {
367     static if (__traits(isFloating, val))
368     {
369         import core.internal.convert : floatSize;
370 
371         static if (floatCoalesceZeroes || floatCoalesceNaNs)
372         {
373             import core.internal.traits : Unqual;
374             Unqual!T data = val;
375             // +0.0 and -0.0 become the same.
376             static if (floatCoalesceZeroes && is(typeof(data = 0)))
377                 if (data == 0) data = 0;
378             static if (floatCoalesceZeroes && is(typeof(data = 0.0i)))
379                 if (data == 0.0i) data = 0.0i;
380             static if (floatCoalesceZeroes && is(typeof(data = 0.0 + 0.0i)))
381             {
382                 if (data.re == 0.0) data = 0.0 + (data.im * 1.0i);
383                 if (data.im == 0.0i) data = data.re + 0.0i;
384             }
385             static if (floatCoalesceNaNs)
386                 if (data != data) data = T.nan; // All NaN patterns become the same.
387         }
388         else
389         {
390             alias data = val;
391         }
392 
393         static if (T.mant_dig == float.mant_dig && T.sizeof == uint.sizeof)
394             return hashOf(*cast(const uint*) &data, seed);
395         else static if (T.mant_dig == double.mant_dig && T.sizeof == ulong.sizeof)
396             return hashOf(*cast(const ulong*) &data, seed);
397         else
398         {
399             static if (is(T : creal) && T.sizeof != 2 * floatSize!(typeof(T.re)))
400             {
401                 auto h1 = hashOf(data.re);
402                 return hashOf(data.im, h1);
403             }
404             else static if (is(T : real) || is(T : ireal))
405             {
406                 // Ignore trailing padding
407                 auto bytes = toUbyte(data)[0 .. floatSize!T];
408                 return bytesHashWithExactSizeAndAlignment!T(bytes, seed);
409             }
410             else
411             {
412                 return bytesHashWithExactSizeAndAlignment!T(toUbyte(data), seed);
413             }
414         }
415     }
416     else
417     {
418         static assert(T.sizeof > size_t.sizeof && __traits(isIntegral, T));
419         static foreach (i; 0 .. T.sizeof / size_t.sizeof)
420             seed = hashOf(cast(size_t) (val >>> (size_t.sizeof * 8 * i)), seed);
421         return seed;
422     }
423 }
424 
425 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0) @safe @nogc nothrow pure
426 if (is(T == __vector) && !is(T == enum))
427 {
428     static if (__traits(isFloating, T) && (floatCoalesceZeroes || floatCoalesceNaNs))
429     {
430         if (__ctfe)
431         {
432             // Workaround for CTFE bug.
433             alias E = Unqual!(typeof(val[0]));
434             E[T.sizeof / E.sizeof] array;
435             foreach (i; 0 .. T.sizeof / E.sizeof)
436                 array[i] = val[i];
437             return hashOf(array, seed);
438         }
439         return hashOf(val.array, seed);
440     }
441     else
442     {
443         return bytesHashAlignedBy!T(toUbyte(val), seed);
444     }
445 }
446 
447 //typeof(null) hash. CTFE supported
448 @trusted @nogc nothrow pure
449 size_t hashOf(T)(scope const T val) if (!is(T == enum) && is(T : typeof(null)))
450 {
451     return 0;
452 }
453 
454 //typeof(null) hash. CTFE supported
455 @trusted @nogc nothrow pure
456 size_t hashOf(T)(scope const T val, size_t seed) if (!is(T == enum) && is(T : typeof(null)))
457 {
458     return hashOf(size_t(0), seed);
459 }
460 
461 //Pointers hash. CTFE unsupported if not null
462 @trusted @nogc nothrow pure
463 size_t hashOf(T)(scope const T val)
464 if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null))
465     && !is(T == struct) && !is(T == class) && !is(T == union))
466 {
467     if (__ctfe)
468     {
469         if (val is null)
470         {
471             return 0;
472         }
473         else
474         {
475             assert(0, "Unable to calculate hash of non-null pointer at compile time");
476         }
477 
478     }
479     auto addr = cast(size_t) val;
480     return addr ^ (addr >>> 4);
481 }
482 
483 //Pointers hash. CTFE unsupported if not null
484 @trusted @nogc nothrow pure
485 size_t hashOf(T)(scope const T val, size_t seed)
486 if (!is(T == enum) && is(T V : V*) && !is(T : typeof(null))
487     && !is(T == struct) && !is(T == class) && !is(T == union))
488 {
489     if (__ctfe)
490     {
491         if (val is null)
492         {
493             return hashOf(cast(size_t)0, seed);
494         }
495         else
496         {
497             assert(0, "Unable to calculate hash of non-null pointer at compile time");
498         }
499 
500     }
501     return hashOf(cast(size_t)val, seed);
502 }
503 
504 private enum _hashOfStruct =
505 q{
506     enum bool isChained = is(typeof(seed) : size_t);
507     static if (!isChained) enum size_t seed = 0;
508     static if (hasCallableToHash!(typeof(val))) //CTFE depends on toHash()
509     {
510         static if (isChained)
511             return hashOf(cast(size_t) val.toHash(), seed);
512         else
513             return val.toHash();
514     }
515     else
516     {
517         static if (__traits(hasMember, T, "toHash") && is(typeof(T.toHash) == function))
518         {
519             // TODO: in the future maybe this should be changed to a static
520             // assert(0), because if there's a `toHash` the programmer probably
521             // expected it to be called and a compilation failure here will
522             // expose a bug in his code.
523             //   In the future we also might want to disallow non-const toHash
524             // altogether.
525             pragma(msg, "Warning: struct "~__traits(identifier, T)
526                 ~" has method toHash, however it cannot be called with "
527                 ~typeof(val).stringof~" this.");
528         }
529 
530         static if (T.tupleof.length == 0)
531         {
532             return seed;
533         }
534         else static if ((is(T == struct) && !canBitwiseHash!T) || T.tupleof.length == 1)
535         {
536             static if (isChained) size_t h = seed;
537             static foreach (i, F; typeof(val.tupleof))
538             {
539                 static if (__traits(isStaticArray, F))
540                 {
541                     static if (i == 0 && !isChained) size_t h = 0;
542                     static if (F.sizeof > 0 && canBitwiseHash!F)
543                         // May use smallBytesHash instead of bytesHash.
544                         h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
545                     else
546                         // We can avoid the "double hashing" the top-level version uses
547                         // for consistency with TypeInfo.getHash.
548                         foreach (ref e; val.tupleof[i])
549                             h = hashOf(e, h);
550                 }
551                 else static if (is(F == struct) || is(F == union))
552                 {
553                     static if (hasCallableToHash!F)
554                     {
555                         static if (i == 0 && !isChained)
556                             size_t h = val.tupleof[i].toHash();
557                         else
558                             h = hashOf(cast(size_t) val.tupleof[i].toHash(), h);
559                     }
560                     else static if (F.tupleof.length == 1)
561                     {
562                         // Handle the single member case separately to avoid unnecessarily using bytesHash.
563                         static if (i == 0 && !isChained)
564                             size_t h = hashOf(val.tupleof[i].tupleof[0]);
565                         else
566                             h = hashOf(val.tupleof[i].tupleof[0], h);
567                     }
568                     else static if (canBitwiseHash!F)
569                     {
570                         // May use smallBytesHash instead of bytesHash.
571                         static if (i == 0 && !isChained) size_t h = 0;
572                         h = bytesHashWithExactSizeAndAlignment!F(toUbyte(val.tupleof[i]), h);
573                     }
574                     else
575                     {
576                         // Nothing special happening.
577                         static if (i == 0 && !isChained)
578                             size_t h = hashOf(val.tupleof[i]);
579                         else
580                             h = hashOf(val.tupleof[i], h);
581                     }
582                 }
583                 else
584                 {
585                     // Nothing special happening.
586                     static if (i == 0 && !isChained)
587                         size_t h = hashOf(val.tupleof[i]);
588                     else
589                         h = hashOf(val.tupleof[i], h);
590                 }
591             }
592             return h;
593         }
594         else static if (is(typeof(toUbyte(val)) == const(ubyte)[]))//CTFE ready for structs without reference fields
595         {
596             // Not using bytesHashWithExactSizeAndAlignment here because
597             // the result may differ from typeid(T).hashOf(&val).
598             return bytesHashAlignedBy!T(toUbyte(val), seed);
599         }
600         else // CTFE unsupported
601         {
602             assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
603             const(ubyte)[] bytes = (() @trusted => (cast(const(ubyte)*)&val)[0 .. T.sizeof])();
604             // Not using bytesHashWithExactSizeAndAlignment here because
605             // the result may differ from typeid(T).hashOf(&val).
606             return bytesHashAlignedBy!T(bytes, seed);
607         }
608     }
609 };
610 
611 //struct or union hash
612 size_t hashOf(T)(scope const auto ref T val, size_t seed = 0)
613 if (!is(T == enum) && (is(T == struct) || is(T == union))
614     && !is(T == const) && !is(T == immutable)
615     && canBitwiseHash!T)
616 {
617     mixin(_hashOfStruct);
618 }
619 
620 //struct or union hash
621 size_t hashOf(T)(auto ref T val)
622 if (!is(T == enum) && (is(T == struct) || is(T == union))
623     && !canBitwiseHash!T)
624 {
625     mixin(_hashOfStruct);
626 }
627 
628 //struct or union hash
629 size_t hashOf(T)(auto ref T val, size_t seed)
630 if (!is(T == enum) && (is(T == struct) || is(T == union))
631     && !canBitwiseHash!T)
632 {
633     mixin(_hashOfStruct);
634 }
635 
636 //struct or union hash - https://issues.dlang.org/show_bug.cgi?id=19332 (support might be removed in future)
637 size_t hashOf(T)(scope auto ref T val, size_t seed = 0)
638 if (!is(T == enum) && (is(T == struct) || is(T == union))
639     && (is(T == const) || is(T == immutable))
640     && canBitwiseHash!T && !canBitwiseHash!(Unconst!T))
641 {
642     mixin(_hashOfStruct);
643 }
644 
645 //delegate hash. CTFE unsupported
646 @trusted @nogc nothrow pure
647 size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && is(T == delegate))
648 {
649     assert(!__ctfe, "unable to compute hash of "~T.stringof~" at compile time");
650     const(ubyte)[] bytes = (cast(const(ubyte)*)&val)[0 .. T.sizeof];
651     return bytesHashWithExactSizeAndAlignment!T(bytes, seed);
652 }
653 
654 //address-based class hash. CTFE only if null.
655 @nogc nothrow pure @trusted
656 size_t hashOf(T)(scope const T val)
657 if (!is(T == enum) && (is(T == interface) || is(T == class))
658     && canBitwiseHash!T)
659 {
660     if (__ctfe) if (val is null) return 0;
661     return hashOf(cast(const void*) val);
662 }
663 
664 //address-based class hash. CTFE only if null.
665 @nogc nothrow pure @trusted
666 size_t hashOf(T)(scope const T val, size_t seed)
667 if (!is(T == enum) && (is(T == interface) || is(T == class))
668     && canBitwiseHash!T)
669 {
670     if (__ctfe) if (val is null) return hashOf(size_t(0), seed);
671     return hashOf(cast(const void*) val, seed);
672 }
673 
674 //class or interface hash. CTFE depends on toHash
675 size_t hashOf(T)(T val)
676 if (!is(T == enum) && (is(T == interface) || is(T == class))
677     && !canBitwiseHash!T)
678 {
679     static if (__traits(compiles, {size_t h = val.toHash();}))
680         return val ? val.toHash() : 0;
681     else
682         return val ? (cast(Object)val).toHash() : 0;
683 }
684 
685 //class or interface hash. CTFE depends on toHash
686 size_t hashOf(T)(T val, size_t seed)
687 if (!is(T == enum) && (is(T == interface) || is(T == class))
688     && !canBitwiseHash!T)
689 {
690     static if (__traits(compiles, {size_t h = val.toHash();}))
691         return hashOf(val ? cast(size_t) val.toHash() : size_t(0), seed);
692     else
693         return hashOf(val ? (cast(Object)val).toHash() : 0, seed);
694 }
695 
696 //associative array hash. CTFE depends on base types
697 size_t hashOf(T)(T aa) if (!is(T == enum) && __traits(isAssociativeArray, T))
698 {
699     static if (is(typeof(aa) : V[K], K, V)) {} // Put K & V in scope.
700     static if (__traits(compiles, (ref K k, ref V v) nothrow => .hashOf(k) + .hashOf(v)))
701         scope (failure) assert(0); // Allow compiler to infer nothrow.
702 
703     if (!aa.length) return 0;
704     size_t h = 0;
705 
706     // The computed hash is independent of the foreach traversal order.
707     foreach (key, ref val; aa)
708     {
709         size_t[2] hpair;
710         hpair[0] = key.hashOf();
711         hpair[1] = val.hashOf();
712         h += hpair.hashOf();
713     }
714     return h;
715 }
716 
717 //associative array hash. CTFE depends on base types
718 size_t hashOf(T)(T aa, size_t seed) if (!is(T == enum) && __traits(isAssociativeArray, T))
719 {
720     return hashOf(hashOf(aa), seed);
721 }
722 
723 // MurmurHash3 was written by Austin Appleby, and is placed in the public
724 // domain. The author hereby disclaims copyright to this source code.
725 
726 // This overload is for backwards compatibility.
727 @system pure nothrow @nogc
728 size_t bytesHash()(scope const(void)* buf, size_t len, size_t seed)
729 {
730     return bytesHashAlignedBy!ubyte((cast(const(ubyte)*) buf)[0 .. len], seed);
731 }
732 
733 private template bytesHashAlignedBy(AlignType)
734 {
735     alias bytesHashAlignedBy = bytesHash!(AlignType.alignof >= uint.alignof);
736 }
737 
738 private template bytesHashWithExactSizeAndAlignment(SizeAndAlignType)
739 {
740     static if (SizeAndAlignType.alignof < uint.alignof
741             ? SizeAndAlignType.sizeof <= 12
742             : SizeAndAlignType.sizeof <= 10)
743         alias bytesHashWithExactSizeAndAlignment = smallBytesHash;
744     else
745         alias bytesHashWithExactSizeAndAlignment = bytesHashAlignedBy!SizeAndAlignType;
746 }
747 
748 // Fowler/Noll/Vo hash. http://www.isthe.com/chongo/tech/comp/fnv/
749 private size_t fnv()(scope const(ubyte)[] bytes, size_t seed) @nogc nothrow pure @safe
750 {
751     static if (size_t.max <= uint.max)
752         enum prime = (1U << 24) + (1U << 8) + 0x93U;
753     else static if (size_t.max <= ulong.max)
754         enum prime = (1UL << 40) + (1UL << 8) + 0xb3UL;
755     else
756         enum prime = (size_t(1) << 88) + (size_t(1) << 8) + size_t(0x3b);
757     foreach (b; bytes)
758         seed = (seed ^ b) * prime;
759     return seed;
760 }
761 private alias smallBytesHash = fnv;
762 
763 //-----------------------------------------------------------------------------
764 // Block read - if your platform needs to do endian-swapping or can only
765 // handle aligned reads, do the conversion here
766 private uint get32bits()(scope const(ubyte)* x) @nogc nothrow pure @system
767 {
768     version (BigEndian)
769     {
770         return ((cast(uint) x[0]) << 24) | ((cast(uint) x[1]) << 16) | ((cast(uint) x[2]) << 8) | (cast(uint) x[3]);
771     }
772     else
773     {
774         return ((cast(uint) x[3]) << 24) | ((cast(uint) x[2]) << 16) | ((cast(uint) x[1]) << 8) | (cast(uint) x[0]);
775     }
776 }
777 
778 /+
779 Params:
780     dataKnownToBeAligned = whether the data is known at compile time to be uint-aligned.
781 +/
782 @nogc nothrow pure @trusted
783 private size_t bytesHash(bool dataKnownToBeAligned)(scope const(ubyte)[] bytes, size_t seed)
784 {
785     auto len = bytes.length;
786     auto data = bytes.ptr;
787     auto nblocks = len / 4;
788 
789     uint h1 = cast(uint)seed;
790 
791     enum uint c1 = 0xcc9e2d51;
792     enum uint c2 = 0x1b873593;
793     enum uint c3 = 0xe6546b64;
794 
795     //----------
796     // body
797     auto end_data = data+nblocks*uint.sizeof;
798     for (; data!=end_data; data += uint.sizeof)
799     {
800         static if (dataKnownToBeAligned)
801             uint k1 = __ctfe ? get32bits(data) : *(cast(const uint*) data);
802         else
803             uint k1 = get32bits(data);
804         k1 *= c1;
805         k1 = (k1 << 15) | (k1 >> (32 - 15));
806         k1 *= c2;
807 
808         h1 ^= k1;
809         h1 = (h1 << 13) | (h1 >> (32 - 13));
810         h1 = h1*5+c3;
811     }
812 
813     //----------
814     // tail
815     uint k1 = 0;
816 
817     switch (len & 3)
818     {
819         case 3: k1 ^= data[2] << 16; goto case;
820         case 2: k1 ^= data[1] << 8;  goto case;
821         case 1: k1 ^= data[0];
822                 k1 *= c1; k1 = (k1 << 15) | (k1 >> (32 - 15)); k1 *= c2; h1 ^= k1;
823                 goto default;
824         default:
825     }
826 
827     //----------
828     // finalization
829     h1 ^= len;
830     // Force all bits of the hash block to avalanche.
831     h1 = (h1 ^ (h1 >> 16)) * 0x85ebca6b;
832     h1 = (h1 ^ (h1 >> 13)) * 0xc2b2ae35;
833     h1 ^= h1 >> 16;
834     return h1;
835 }
836 
837 //  Check that bytesHash works with CTFE
838 pure nothrow @system @nogc unittest
839 {
840     size_t ctfeHash(string x)
841     {
842         return bytesHash(x.ptr, x.length, 0);
843     }
844 
845     enum test_str = "Sample string";
846     enum size_t hashVal = ctfeHash(test_str);
847     assert(hashVal == bytesHash(&test_str[0], test_str.length, 0));
848 
849     // Detect unintended changes to bytesHash on unaligned and aligned inputs.
850     version (BigEndian)
851     {
852         const ubyte[7] a = [99, 4, 3, 2, 1, 5, 88];
853         const uint[2] b = [0x04_03_02_01, 0x05_ff_ff_ff];
854     }
855     else
856     {
857         const ubyte[7] a = [99, 1, 2, 3, 4, 5, 88];
858         const uint[2] b = [0x04_03_02_01, 0xff_ff_ff_05];
859     }
860     // It is okay to change the below values if you make a change
861     // that you expect to change the result of bytesHash.
862     assert(bytesHash(&a[1], a.length - 2, 0) == 2727459272);
863     assert(bytesHash(&b, 5, 0) == 2727459272);
864     assert(bytesHashAlignedBy!uint((cast(const ubyte*) &b)[0 .. 5], 0) == 2727459272);
865 }
Suggestion Box / Bug Report