Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[deliverable/linux.git] / include / linux / hash.h
1 #ifndef _LINUX_HASH_H
2 #define _LINUX_HASH_H
3 /* Fast hashing routine for ints, longs and pointers.
4 (C) 2002 Nadia Yvette Chambers, IBM */
5
6 /*
7 * Knuth recommends primes in approximately golden ratio to the maximum
8 * integer representable by a machine word for multiplicative hashing.
9 * Chuck Lever verified the effectiveness of this technique:
10 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
11 *
12 * These primes are chosen to be bit-sparse, that is operations on
13 * them can use shifts and additions instead of multiplications for
14 * machines where multiplications are slow.
15 */
16
17 #include <asm/types.h>
18 #include <linux/compiler.h>
19
20 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
21 #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
22 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
23 #define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
24
25 #if BITS_PER_LONG == 32
26 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
27 #define hash_long(val, bits) hash_32(val, bits)
28 #elif BITS_PER_LONG == 64
29 #define hash_long(val, bits) hash_64(val, bits)
30 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
31 #else
32 #error Wordsize not 32 or 64
33 #endif
34
35 /*
36 * The above primes are actively bad for hashing, since they are
37 * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
38 * real problems. Besides, the "prime" part is pointless for the
39 * multiplicative hash.
40 *
41 * Although a random odd number will do, it turns out that the golden
42 * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
43 * properties.
44 *
45 * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
46 * (See Knuth vol 3, section 6.4, exercise 9.)
47 */
48 #define GOLDEN_RATIO_32 0x61C88647
49 #define GOLDEN_RATIO_64 0x61C8864680B583EBull
50
51 static __always_inline u64 hash_64(u64 val, unsigned int bits)
52 {
53 u64 hash = val;
54
55 #if BITS_PER_LONG == 64
56 hash = hash * GOLDEN_RATIO_64;
57 #else
58 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
59 u64 n = hash;
60 n <<= 18;
61 hash -= n;
62 n <<= 33;
63 hash -= n;
64 n <<= 3;
65 hash += n;
66 n <<= 3;
67 hash -= n;
68 n <<= 4;
69 hash += n;
70 n <<= 2;
71 hash += n;
72 #endif
73
74 /* High bits are more random, so use them. */
75 return hash >> (64 - bits);
76 }
77
78 static inline u32 hash_32(u32 val, unsigned int bits)
79 {
80 /* On some cpus multiply is faster, on others gcc will do shifts */
81 u32 hash = val * GOLDEN_RATIO_PRIME_32;
82
83 /* High bits are more random, so use them. */
84 return hash >> (32 - bits);
85 }
86
87 static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
88 {
89 return hash_long((unsigned long)ptr, bits);
90 }
91
92 static inline u32 hash32_ptr(const void *ptr)
93 {
94 unsigned long val = (unsigned long)ptr;
95
96 #if BITS_PER_LONG == 64
97 val ^= (val >> 32);
98 #endif
99 return (u32)val;
100 }
101
102 #endif /* _LINUX_HASH_H */
This page took 0.03198 seconds and 5 git commands to generate.