From c88455ce50ae4224d84960ce2baa53e61580df27 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 5 Feb 2015 02:03:31 +0100 Subject: rhashtable: key_hashfn() must return full hash value The value computed by key_hashfn() is used by rhashtable_lookup_compare() to traverse both tables during a resize. key_hashfn() must therefore return the hash value without the buckets mask applied so it can be masked to the size of each individual table. Fixes: 97defe1ecf86 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 0579191..71fd0dd 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -94,13 +94,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) { - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - u32 hash; - - hash = ht->p.hashfn(key, len, ht->p.hash_rnd); - hash >>= HASH_RESERVED_SPACE; - - return rht_bucket_index(tbl, hash); + return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; } static u32 head_hashfn(const struct rhashtable *ht, -- cgit v0.10.2 From a5ec68e3b8f2c95ea1a5d23dd543abbe0c8d0624 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 5 Feb 2015 02:03:32 +0100 Subject: rhashtable: Use a single bucket lock for sibling buckets rhashtable currently allows to use a bucket lock per bucket. This requires multiple levels of complicated nested locking because when resizing, a single bucket of the smaller table will map to two buckets in the larger table. So far rhashtable has explicitly locked both buckets in the larger table. By excluding the highest bit of the hash from the bucket lock map and thus only allowing locks to buckets in a ratio of 1:2, the locking can be simplified a lot without losing the benefits of multiple locks. Larger tables which benefit from multiple locks will not have a single lock per bucket anyway. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 71fd0dd..cea4244 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -1,7 +1,7 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf + * Copyright (c) 2014-2015 Thomas Graf * Copyright (c) 2008-2014 Patrick McHardy * * Based on the following paper: @@ -34,12 +34,17 @@ enum { RHT_LOCK_NORMAL, RHT_LOCK_NESTED, - RHT_LOCK_NESTED2, }; /* The bucket lock is selected based on the hash and protects mutations * on a group of hash buckets. * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * * IMPORTANT: When holding the bucket lock of both the old and new table * during expansions and shrinking, the old bucket lock must always be * acquired first. @@ -128,8 +133,8 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); - /* Never allocate more than one lock per bucket */ - size = min_t(unsigned int, size, tbl->size); + /* Never allocate more than 0.5 locks per bucket */ + size = min_t(unsigned int, size, tbl->size >> 1); if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA @@ -211,13 +216,36 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) } EXPORT_SYMBOL_GPL(rht_shrink_below_30); -static void hashtable_chain_unzip(const struct rhashtable *ht, +static void lock_buckets(struct bucket_table *new_tbl, + struct bucket_table *old_tbl, unsigned int hash) + __acquires(old_bucket_lock) +{ + spin_lock_bh(bucket_lock(old_tbl, hash)); + if (new_tbl != old_tbl) + spin_lock_bh_nested(bucket_lock(new_tbl, hash), + RHT_LOCK_NESTED); +} + +static void unlock_buckets(struct bucket_table *new_tbl, + struct bucket_table *old_tbl, unsigned int hash) + __releases(old_bucket_lock) +{ + if (new_tbl != old_tbl) + spin_unlock_bh(bucket_lock(new_tbl, hash)); + spin_unlock_bh(bucket_lock(old_tbl, hash)); +} + +/** + * Unlink entries on bucket which hash to different bucket. + * + * Returns true if no more work needs to be performed on the bucket. + */ +static bool hashtable_chain_unzip(const struct rhashtable *ht, const struct bucket_table *new_tbl, struct bucket_table *old_tbl, size_t old_hash) { struct rhash_head *he, *p, *next; - spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL; unsigned int new_hash, new_hash2; ASSERT_BUCKET_LOCK(old_tbl, old_hash); @@ -226,10 +254,10 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, old_hash); if (rht_is_a_nulls(p)) - return; + return false; - new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); - new_bucket_lock = bucket_lock(new_tbl, new_hash); + new_hash = head_hashfn(ht, new_tbl, p); + ASSERT_BUCKET_LOCK(new_tbl, new_hash); /* Advance the old bucket pointer one or more times until it * reaches a node that doesn't hash to the same bucket as the @@ -237,22 +265,14 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, */ rht_for_each_continue(he, p->next, old_tbl, old_hash) { new_hash2 = head_hashfn(ht, new_tbl, he); + ASSERT_BUCKET_LOCK(new_tbl, new_hash2); + if (new_hash != new_hash2) break; p = he; } rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); - - /* If we have encountered an entry that maps to a different bucket in - * the new table, lock down that bucket as well as we might cut off - * the end of the chain. - */ - new_bucket_lock2 = bucket_lock(new_tbl, new_hash); - if (new_bucket_lock != new_bucket_lock2) - spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2); - /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ @@ -271,21 +291,16 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, */ rcu_assign_pointer(p->next, next); - if (new_bucket_lock != new_bucket_lock2) - spin_unlock_bh(new_bucket_lock2); - spin_unlock_bh(new_bucket_lock); + p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, + old_hash); + + return !rht_is_a_nulls(p); } static void link_old_to_new(struct bucket_table *new_tbl, unsigned int new_hash, struct rhash_head *entry) { - spinlock_t *new_bucket_lock; - - new_bucket_lock = bucket_lock(new_tbl, new_hash); - - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); - spin_unlock_bh(new_bucket_lock); } /** @@ -308,7 +323,6 @@ int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; - spinlock_t *old_bucket_lock; unsigned int new_hash, old_hash; bool complete = false; @@ -338,16 +352,14 @@ int rhashtable_expand(struct rhashtable *ht) */ for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { old_hash = rht_bucket_index(old_tbl, new_hash); - old_bucket_lock = bucket_lock(old_tbl, old_hash); - - spin_lock_bh(old_bucket_lock); + lock_buckets(new_tbl, old_tbl, new_hash); rht_for_each(he, old_tbl, old_hash) { if (head_hashfn(ht, new_tbl, he) == new_hash) { link_old_to_new(new_tbl, new_hash, he); break; } } - spin_unlock_bh(old_bucket_lock); + unlock_buckets(new_tbl, old_tbl, new_hash); } /* Publish the new table pointer. Lookups may now traverse @@ -370,18 +382,13 @@ int rhashtable_expand(struct rhashtable *ht) */ complete = true; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { - struct rhash_head *head; + lock_buckets(new_tbl, old_tbl, old_hash); - old_bucket_lock = bucket_lock(old_tbl, old_hash); - spin_lock_bh(old_bucket_lock); - - hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); - head = rht_dereference_bucket(old_tbl->buckets[old_hash], - old_tbl, old_hash); - if (!rht_is_a_nulls(head)) + if (hashtable_chain_unzip(ht, new_tbl, old_tbl, + old_hash)) complete = false; - spin_unlock_bh(old_bucket_lock); + unlock_buckets(new_tbl, old_tbl, old_hash); } } @@ -409,7 +416,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); - spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2; unsigned int new_hash; ASSERT_RHT_MUTEX(ht); @@ -427,36 +433,16 @@ int rhashtable_shrink(struct rhashtable *ht) * always divide the size in half when shrinking, each bucket * in the new table maps to exactly two buckets in the old * table. - * - * As removals can occur concurrently on the old table, we need - * to lock down both matching buckets in the old table. */ for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { - old_bucket_lock1 = bucket_lock(tbl, new_hash); - old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size); - new_bucket_lock = bucket_lock(new_tbl, new_hash); - - spin_lock_bh(old_bucket_lock1); - - /* Depending on the lock per buckets mapping, the bucket in - * the lower and upper region may map to the same lock. - */ - if (old_bucket_lock1 != old_bucket_lock2) { - spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); - } else { - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); - } + lock_buckets(new_tbl, tbl, new_hash); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash]); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash + new_tbl->size]); - spin_unlock_bh(new_bucket_lock); - if (old_bucket_lock1 != old_bucket_lock2) - spin_unlock_bh(old_bucket_lock2); - spin_unlock_bh(old_bucket_lock1); + unlock_buckets(new_tbl, tbl, new_hash); } /* Publish the new, valid hash table */ @@ -547,19 +533,18 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, */ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl; - spinlock_t *lock; + struct bucket_table *tbl, *old_tbl; unsigned hash; rcu_read_lock(); tbl = rht_dereference_rcu(ht->future_tbl, ht); + old_tbl = rht_dereference_rcu(ht->tbl, ht); hash = head_hashfn(ht, tbl, obj); - lock = bucket_lock(tbl, hash); - spin_lock_bh(lock); + lock_buckets(tbl, old_tbl, hash); __rhashtable_insert(ht, obj, tbl, hash); - spin_unlock_bh(lock); + unlock_buckets(tbl, old_tbl, hash); rcu_read_unlock(); } @@ -582,21 +567,20 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); */ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl; + struct bucket_table *tbl, *new_tbl, *old_tbl; struct rhash_head __rcu **pprev; struct rhash_head *he; - spinlock_t *lock; - unsigned int hash; + unsigned int hash, new_hash; bool ret = false; rcu_read_lock(); - tbl = rht_dereference_rcu(ht->tbl, ht); - hash = head_hashfn(ht, tbl, obj); - - lock = bucket_lock(tbl, hash); - spin_lock_bh(lock); + tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht); + new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + new_hash = head_hashfn(ht, new_tbl, obj); + lock_buckets(new_tbl, old_tbl, new_hash); restart: + hash = rht_bucket_index(tbl, new_hash); pprev = &tbl->buckets[hash]; rht_for_each(he, tbl, hash) { if (he != obj) { @@ -615,18 +599,12 @@ restart: * resizing. Thus traversing both is fine and the added cost is * very rare. */ - if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) { - spin_unlock_bh(lock); - - tbl = rht_dereference_rcu(ht->future_tbl, ht); - hash = head_hashfn(ht, tbl, obj); - - lock = bucket_lock(tbl, hash); - spin_lock_bh(lock); + if (tbl != new_tbl) { + tbl = new_tbl; goto restart; } - spin_unlock_bh(lock); + unlock_buckets(new_tbl, old_tbl, new_hash); if (ret) { atomic_dec(&ht->nelems); @@ -782,24 +760,17 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, void *arg) { struct bucket_table *new_tbl, *old_tbl; - spinlock_t *new_bucket_lock, *old_bucket_lock; - u32 new_hash, old_hash; + u32 new_hash; bool success = true; BUG_ON(!ht->p.key_len); rcu_read_lock(); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - old_hash = head_hashfn(ht, old_tbl, obj); - old_bucket_lock = bucket_lock(old_tbl, old_hash); - spin_lock_bh(old_bucket_lock); - new_tbl = rht_dereference_rcu(ht->future_tbl, ht); new_hash = head_hashfn(ht, new_tbl, obj); - new_bucket_lock = bucket_lock(new_tbl, new_hash); - if (unlikely(old_tbl != new_tbl)) - spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + + lock_buckets(new_tbl, old_tbl, new_hash); if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, compare, arg)) { @@ -810,10 +781,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, __rhashtable_insert(ht, obj, new_tbl, new_hash); exit: - if (unlikely(old_tbl != new_tbl)) - spin_unlock_bh(new_bucket_lock); - spin_unlock_bh(old_bucket_lock); - + unlock_buckets(new_tbl, old_tbl, new_hash); rcu_read_unlock(); return success; -- cgit v0.10.2 From 2af4b52988fd4f7ae525fcada29d4db8680033d6 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 5 Feb 2015 02:03:33 +0100 Subject: rhashtable: Wait for RCU readers after final unzip work We need to wait for all RCU readers to complete after the last bit of unzipping has been completed. Otherwise the old table is freed up prematurely. Fixes: 7e1e77636e36 ("lib: Resizable, Scalable, Concurrent Hash Table") Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index cea4244..fd1033d 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -392,6 +392,8 @@ int rhashtable_expand(struct rhashtable *ht) } } + synchronize_rcu(); + bucket_table_free(old_tbl); return 0; } -- cgit v0.10.2 From a03eaec0df52a0f1fd37ebf7dcb2dc505d891255 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 5 Feb 2015 02:03:34 +0100 Subject: rhashtable: Dump bucket tables on locking violation under PROVE_LOCKING This simplifies debugging of locking violations if compiled with CONFIG_PROVE_LOCKING. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index fd1033d..c2c3949 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -54,26 +54,6 @@ static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) return &tbl->locks[hash & tbl->locks_mask]; } -#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) -#define ASSERT_BUCKET_LOCK(TBL, HASH) \ - BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH)) - -#ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(struct rhashtable *ht) -{ - return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; -} -EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); - -int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) -{ - spinlock_t *lock = bucket_lock(tbl, hash); - - return (debug_locks) ? lockdep_is_held(lock) : 1; -} -EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); -#endif - static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { return (void *) he - ht->p.head_offset; @@ -109,6 +89,77 @@ static u32 head_hashfn(const struct rhashtable *ht, return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); } +#ifdef CONFIG_PROVE_LOCKING +static void debug_dump_buckets(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + struct rhash_head *he; + unsigned int i, hash; + + for (i = 0; i < tbl->size; i++) { + pr_warn(" [Bucket %d] ", i); + rht_for_each_rcu(he, tbl, i) { + hash = head_hashfn(ht, tbl, he); + pr_cont("[hash = %#x, lock = %p] ", + hash, bucket_lock(tbl, hash)); + } + pr_cont("\n"); + } + +} + +static void debug_dump_table(struct rhashtable *ht, + const struct bucket_table *tbl, + unsigned int hash) +{ + struct bucket_table *old_tbl, *future_tbl; + + pr_emerg("BUG: lock for hash %#x in table %p not held\n", + hash, tbl); + + rcu_read_lock(); + future_tbl = rht_dereference_rcu(ht->future_tbl, ht); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + if (future_tbl != old_tbl) { + pr_warn("Future table %p (size: %zd)\n", + future_tbl, future_tbl->size); + debug_dump_buckets(ht, future_tbl); + } + + pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); + debug_dump_buckets(ht, old_tbl); + + rcu_read_unlock(); +} + +#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) +#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ + do { \ + if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ + debug_dump_table(HT, TBL, HASH); \ + BUG(); \ + } \ + } while (0) + +int lockdep_rht_mutex_is_held(struct rhashtable *ht) +{ + return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); + +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) +{ + spinlock_t *lock = bucket_lock(tbl, hash); + + return (debug_locks) ? lockdep_is_held(lock) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); +#else +#define ASSERT_RHT_MUTEX(HT) +#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) +#endif + + static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) { struct rhash_head __rcu **pprev; @@ -240,7 +291,7 @@ static void unlock_buckets(struct bucket_table *new_tbl, * * Returns true if no more work needs to be performed on the bucket. */ -static bool hashtable_chain_unzip(const struct rhashtable *ht, +static bool hashtable_chain_unzip(struct rhashtable *ht, const struct bucket_table *new_tbl, struct bucket_table *old_tbl, size_t old_hash) @@ -248,7 +299,7 @@ static bool hashtable_chain_unzip(const struct rhashtable *ht, struct rhash_head *he, *p, *next; unsigned int new_hash, new_hash2; - ASSERT_BUCKET_LOCK(old_tbl, old_hash); + ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); /* Old bucket empty, no work needed. */ p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, @@ -257,7 +308,7 @@ static bool hashtable_chain_unzip(const struct rhashtable *ht, return false; new_hash = head_hashfn(ht, new_tbl, p); - ASSERT_BUCKET_LOCK(new_tbl, new_hash); + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); /* Advance the old bucket pointer one or more times until it * reaches a node that doesn't hash to the same bucket as the @@ -265,7 +316,7 @@ static bool hashtable_chain_unzip(const struct rhashtable *ht, */ rht_for_each_continue(he, p->next, old_tbl, old_hash) { new_hash2 = head_hashfn(ht, new_tbl, he); - ASSERT_BUCKET_LOCK(new_tbl, new_hash2); + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); if (new_hash != new_hash2) break; -- cgit v0.10.2 From 7cd10db8de2b6a32ccabef2e0e01c7444faa49d4 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 5 Feb 2015 02:03:35 +0100 Subject: rhashtable: Add more lock verification Catch hash miscalculations which result in hard to track down race conditions. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index c2c3949..ef0816b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -348,9 +348,11 @@ static bool hashtable_chain_unzip(struct rhashtable *ht, return !rht_is_a_nulls(p); } -static void link_old_to_new(struct bucket_table *new_tbl, +static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, unsigned int new_hash, struct rhash_head *entry) { + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); } @@ -406,7 +408,7 @@ int rhashtable_expand(struct rhashtable *ht) lock_buckets(new_tbl, old_tbl, new_hash); rht_for_each(he, old_tbl, old_hash) { if (head_hashfn(ht, new_tbl, he) == new_hash) { - link_old_to_new(new_tbl, new_hash, he); + link_old_to_new(ht, new_tbl, new_hash, he); break; } } @@ -492,6 +494,7 @@ int rhashtable_shrink(struct rhashtable *ht) rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash]); + ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), tbl->buckets[new_hash + new_tbl->size]); @@ -557,6 +560,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + ASSERT_BUCKET_LOCK(ht, tbl, hash); + if (rht_is_a_nulls(head)) INIT_RHT_NULLS_HEAD(obj->next, ht, hash); else @@ -641,6 +646,7 @@ restart: continue; } + ASSERT_BUCKET_LOCK(ht, tbl, hash); rcu_assign_pointer(*pprev, obj->next); ret = true; -- cgit v0.10.2 From cf52d52f9ccb9966ac019d9f79824195583e3e6c Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 5 Feb 2015 02:03:36 +0100 Subject: rhashtable: Avoid bucket cross reference after removal During a resize, when two buckets in the larger table map to a single bucket in the smaller table and the new table has already been (partially) linked to the old table. Removal of an element may result the bucket in the larger table to point to entries which all hash to a different value than the bucket index. Thus causing two buckets to point to the same sub chain after unzipping. This is not illegal *during* the resize phase but after it has completed. Keep the old table around until all of the unzipping is done to allow the removal code to only search for matching hashed entries during this special period. Reported-by: Ying Xue Fixes: 97defe1ecf86 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ef0816b..5919d63 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -415,12 +415,6 @@ int rhashtable_expand(struct rhashtable *ht) unlock_buckets(new_tbl, old_tbl, new_hash); } - /* Publish the new table pointer. Lookups may now traverse - * the new table, but they will not benefit from any - * additional efficiency until later steps unzip the buckets. - */ - rcu_assign_pointer(ht->tbl, new_tbl); - /* Unzip interleaved hash chains */ while (!complete && !ht->being_destroyed) { /* Wait for readers. All new readers will see the new @@ -445,6 +439,7 @@ int rhashtable_expand(struct rhashtable *ht) } } + rcu_assign_pointer(ht->tbl, new_tbl); synchronize_rcu(); bucket_table_free(old_tbl); @@ -627,14 +622,14 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl, *new_tbl, *old_tbl; struct rhash_head __rcu **pprev; - struct rhash_head *he; + struct rhash_head *he, *he2; unsigned int hash, new_hash; bool ret = false; rcu_read_lock(); tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht); new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - new_hash = head_hashfn(ht, new_tbl, obj); + new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); lock_buckets(new_tbl, old_tbl, new_hash); restart: @@ -647,8 +642,21 @@ restart: } ASSERT_BUCKET_LOCK(ht, tbl, hash); - rcu_assign_pointer(*pprev, obj->next); + if (unlikely(new_tbl != tbl)) { + rht_for_each_continue(he2, he->next, tbl, hash) { + if (head_hashfn(ht, tbl, he2) == hash) { + rcu_assign_pointer(*pprev, he2); + goto found; + } + } + + INIT_RHT_NULLS_HEAD(*pprev, ht, hash); + } else { + rcu_assign_pointer(*pprev, obj->next); + } + +found: ret = true; break; } -- cgit v0.10.2