rhashtable: Remove shift from bucket_table
authorHerbert Xu <herbert@gondor.apana.org.au>
Wed, 18 Mar 2015 09:01:15 +0000 (20:01 +1100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 18 Mar 2015 16:46:40 +0000 (12:46 -0400)
Keeping both size and shift is silly.  We only need one.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/rhashtable.h
lib/rhashtable.c

index 1695378b3c5bf9c889900e5e2728b2498bd3167f..f16e856929592d1fb0e6634a3806976bc327b65e 100644 (file)
@@ -51,7 +51,6 @@ struct rhash_head {
  * @size: Number of hash buckets
  * @rehash: Current bucket being rehashed
  * @hash_rnd: Random seed to fold into hash
- * @shift: Current size (1 << shift)
  * @locks_mask: Mask to apply before accessing locks[]
  * @locks: Array of spinlocks protecting individual buckets
  * @walkers: List of active walkers
@@ -63,7 +62,6 @@ struct bucket_table {
        unsigned int            size;
        unsigned int            rehash;
        u32                     hash_rnd;
-       u32                     shift;
        unsigned int            locks_mask;
        spinlock_t              *locks;
        struct list_head        walkers;
index 09a7ada89ade49f91108187f7ca8dcc1e52ede8c..09740036246751d41b898480a0b8a7211107de74 100644 (file)
@@ -162,7 +162,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
                return NULL;
 
        tbl->size = nbuckets;
-       tbl->shift = ilog2(nbuckets);
 
        if (alloc_bucket_locks(ht, tbl) < 0) {
                bucket_table_free(tbl);
@@ -189,7 +188,7 @@ static bool rht_grow_above_75(const struct rhashtable *ht,
 {
        /* Expand table when exceeding 75% load */
        return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
-              (!ht->p.max_shift || tbl->shift < ht->p.max_shift);
+              (!ht->p.max_shift || tbl->size < (1 << ht->p.max_shift));
 }
 
 /**
@@ -202,7 +201,7 @@ static bool rht_shrink_below_30(const struct rhashtable *ht,
 {
        /* Shrink table beneath 30% load */
        return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
-              tbl->shift > ht->p.min_shift;
+              tbl->size > (1 << ht->p.min_shift);
 }
 
 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
This page took 0.032349 seconds and 5 git commands to generate.