2 * Resizable, Scalable, Concurrent Hash Table
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
32 #define HASH_DEFAULT_SIZE 64UL
33 #define HASH_MIN_SIZE 4U
34 #define BUCKET_LOCKS_PER_CPU 32UL
37 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket;
41 static u32 head_hashfn(struct rhashtable *ht,
42 const struct bucket_table *tbl,
43 const struct rhash_head *he)
45 return rht_head_hashfn(ht, tbl, he, ht->p);
48 #ifdef CONFIG_PROVE_LOCKING
49 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
51 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
53 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
55 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
57 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
59 spinlock_t *lock = rht_bucket_lock(tbl, hash);
61 return (debug_locks) ? lockdep_is_held(lock) : 1;
63 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65 #define ASSERT_RHT_MUTEX(HT)
69 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
73 #if defined(CONFIG_PROVE_LOCKING)
74 unsigned int nr_pcpus = 2;
76 unsigned int nr_pcpus = num_possible_cpus();
79 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
80 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
82 /* Never allocate more than 0.5 locks per bucket */
83 size = min_t(unsigned int, size, tbl->size >> 1);
86 size = min(size, 1U << tbl->nest);
88 if (sizeof(spinlock_t) != 0) {
91 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
93 tbl->locks = vmalloc(size * sizeof(spinlock_t));
95 if (gfp != GFP_KERNEL)
96 gfp |= __GFP_NOWARN | __GFP_NORETRY;
99 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
103 for (i = 0; i < size; i++)
104 spin_lock_init(&tbl->locks[i]);
106 tbl->locks_mask = size - 1;
111 static void nested_table_free(union nested_table *ntbl, unsigned int size)
113 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
114 const unsigned int len = 1 << shift;
117 ntbl = rcu_dereference_raw(ntbl->table);
123 for (i = 0; i < len; i++)
124 nested_table_free(ntbl + i, size);
130 static void nested_bucket_table_free(const struct bucket_table *tbl)
132 unsigned int size = tbl->size >> tbl->nest;
133 unsigned int len = 1 << tbl->nest;
134 union nested_table *ntbl;
137 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
139 for (i = 0; i < len; i++)
140 nested_table_free(ntbl + i, size);
145 static void bucket_table_free(const struct bucket_table *tbl)
148 nested_bucket_table_free(tbl);
154 static void bucket_table_free_rcu(struct rcu_head *head)
156 bucket_table_free(container_of(head, struct bucket_table, rcu));
159 static union nested_table *nested_table_alloc(struct rhashtable *ht,
160 union nested_table __rcu **prev,
161 unsigned int shifted,
164 union nested_table *ntbl;
167 ntbl = rcu_dereference(*prev);
171 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
173 if (ntbl && shifted) {
174 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
175 INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
176 (i << shifted) | nhash);
179 rcu_assign_pointer(*prev, ntbl);
184 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
188 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
189 struct bucket_table *tbl;
192 if (nbuckets < (1 << (shift + 1)))
195 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
197 tbl = kzalloc(size, gfp);
201 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
207 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
212 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
216 struct bucket_table *tbl = NULL;
220 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
221 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
223 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
224 if (tbl == NULL && gfp == GFP_KERNEL)
229 if (tbl == NULL && gfp != GFP_KERNEL) {
230 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
238 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
239 bucket_table_free(tbl);
243 INIT_LIST_HEAD(&tbl->walkers);
245 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
247 for (i = 0; i < nbuckets; i++)
248 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
253 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
254 struct bucket_table *tbl)
256 struct bucket_table *new_tbl;
260 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
266 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
268 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
269 struct bucket_table *new_tbl = rhashtable_last_table(ht,
270 rht_dereference_rcu(old_tbl->future_tbl, ht));
271 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
273 struct rhash_head *head, *next, *entry;
274 spinlock_t *new_bucket_lock;
275 unsigned int new_hash;
282 rht_for_each(entry, old_tbl, old_hash) {
284 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
286 if (rht_is_a_nulls(next))
289 pprev = &entry->next;
295 new_hash = head_hashfn(ht, new_tbl, entry);
297 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
299 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
300 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
303 RCU_INIT_POINTER(entry->next, head);
305 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
306 spin_unlock(new_bucket_lock);
308 rcu_assign_pointer(*pprev, next);
314 static int rhashtable_rehash_chain(struct rhashtable *ht,
315 unsigned int old_hash)
317 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
318 spinlock_t *old_bucket_lock;
321 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
323 spin_lock_bh(old_bucket_lock);
324 while (!(err = rhashtable_rehash_one(ht, old_hash)))
327 if (err == -ENOENT) {
331 spin_unlock_bh(old_bucket_lock);
336 static int rhashtable_rehash_attach(struct rhashtable *ht,
337 struct bucket_table *old_tbl,
338 struct bucket_table *new_tbl)
340 /* Protect future_tbl using the first bucket lock. */
341 spin_lock_bh(old_tbl->locks);
343 /* Did somebody beat us to it? */
344 if (rcu_access_pointer(old_tbl->future_tbl)) {
345 spin_unlock_bh(old_tbl->locks);
349 /* Make insertions go into the new, empty table right away. Deletions
350 * and lookups will be attempted in both tables until we synchronize.
352 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
354 spin_unlock_bh(old_tbl->locks);
359 static int rhashtable_rehash_table(struct rhashtable *ht)
361 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
362 struct bucket_table *new_tbl;
363 struct rhashtable_walker *walker;
364 unsigned int old_hash;
367 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
371 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
372 err = rhashtable_rehash_chain(ht, old_hash);
377 /* Publish the new table pointer. */
378 rcu_assign_pointer(ht->tbl, new_tbl);
380 spin_lock(&ht->lock);
381 list_for_each_entry(walker, &old_tbl->walkers, list)
383 spin_unlock(&ht->lock);
385 /* Wait for readers. All new readers will see the new
386 * table, and thus no references to the old table will
389 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
391 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
394 static int rhashtable_rehash_alloc(struct rhashtable *ht,
395 struct bucket_table *old_tbl,
398 struct bucket_table *new_tbl;
401 ASSERT_RHT_MUTEX(ht);
403 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
407 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
409 bucket_table_free(new_tbl);
415 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
416 * @ht: the hash table to shrink
418 * This function shrinks the hash table to fit, i.e., the smallest
419 * size would not cause it to expand right away automatically.
421 * The caller must ensure that no concurrent resizing occurs by holding
424 * The caller must ensure that no concurrent table mutations take place.
425 * It is however valid to have concurrent lookups if they are RCU protected.
427 * It is valid to have concurrent insertions and deletions protected by per
428 * bucket locks or concurrent RCU protected lookups and traversals.
430 static int rhashtable_shrink(struct rhashtable *ht)
432 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
433 unsigned int nelems = atomic_read(&ht->nelems);
434 unsigned int size = 0;
437 size = roundup_pow_of_two(nelems * 3 / 2);
438 if (size < ht->p.min_size)
439 size = ht->p.min_size;
441 if (old_tbl->size <= size)
444 if (rht_dereference(old_tbl->future_tbl, ht))
447 return rhashtable_rehash_alloc(ht, old_tbl, size);
450 static void rht_deferred_worker(struct work_struct *work)
452 struct rhashtable *ht;
453 struct bucket_table *tbl;
456 ht = container_of(work, struct rhashtable, run_work);
457 mutex_lock(&ht->mutex);
459 tbl = rht_dereference(ht->tbl, ht);
460 tbl = rhashtable_last_table(ht, tbl);
462 if (rht_grow_above_75(ht, tbl))
463 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
464 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
465 err = rhashtable_shrink(ht);
467 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
470 err = rhashtable_rehash_table(ht);
472 mutex_unlock(&ht->mutex);
475 schedule_work(&ht->run_work);
478 static int rhashtable_insert_rehash(struct rhashtable *ht,
479 struct bucket_table *tbl)
481 struct bucket_table *old_tbl;
482 struct bucket_table *new_tbl;
486 old_tbl = rht_dereference_rcu(ht->tbl, ht);
492 if (rht_grow_above_75(ht, tbl))
494 /* Do not schedule more than one rehash */
495 else if (old_tbl != tbl)
500 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
504 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
506 bucket_table_free(new_tbl);
510 schedule_work(&ht->run_work);
515 /* Do not fail the insert if someone else did a rehash. */
516 if (likely(rcu_dereference_raw(tbl->future_tbl)))
519 /* Schedule async rehash to retry allocation in process context. */
521 schedule_work(&ht->run_work);
526 static void *rhashtable_lookup_one(struct rhashtable *ht,
527 struct bucket_table *tbl, unsigned int hash,
528 const void *key, struct rhash_head *obj)
530 struct rhashtable_compare_arg arg = {
534 struct rhash_head __rcu **pprev;
535 struct rhash_head *head;
538 elasticity = ht->elasticity;
539 pprev = rht_bucket_var(tbl, hash);
540 rht_for_each_continue(head, *pprev, tbl, hash) {
541 struct rhlist_head *list;
542 struct rhlist_head *plist;
547 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
548 rhashtable_compare(&arg, rht_obj(ht, head))))
552 return rht_obj(ht, head);
554 list = container_of(obj, struct rhlist_head, rhead);
555 plist = container_of(head, struct rhlist_head, rhead);
557 RCU_INIT_POINTER(list->next, plist);
558 head = rht_dereference_bucket(head->next, tbl, hash);
559 RCU_INIT_POINTER(list->rhead.next, head);
560 rcu_assign_pointer(*pprev, obj);
566 return ERR_PTR(-EAGAIN);
568 return ERR_PTR(-ENOENT);
571 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
572 struct bucket_table *tbl,
574 struct rhash_head *obj,
577 struct rhash_head __rcu **pprev;
578 struct bucket_table *new_tbl;
579 struct rhash_head *head;
581 if (!IS_ERR_OR_NULL(data))
582 return ERR_PTR(-EEXIST);
584 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
585 return ERR_CAST(data);
587 new_tbl = rcu_dereference(tbl->future_tbl);
591 if (PTR_ERR(data) != -ENOENT)
592 return ERR_CAST(data);
594 if (unlikely(rht_grow_above_max(ht, tbl)))
595 return ERR_PTR(-E2BIG);
597 if (unlikely(rht_grow_above_100(ht, tbl)))
598 return ERR_PTR(-EAGAIN);
600 pprev = rht_bucket_insert(ht, tbl, hash);
602 return ERR_PTR(-ENOMEM);
604 head = rht_dereference_bucket(*pprev, tbl, hash);
606 RCU_INIT_POINTER(obj->next, head);
608 struct rhlist_head *list;
610 list = container_of(obj, struct rhlist_head, rhead);
611 RCU_INIT_POINTER(list->next, NULL);
614 rcu_assign_pointer(*pprev, obj);
616 atomic_inc(&ht->nelems);
617 if (rht_grow_above_75(ht, tbl))
618 schedule_work(&ht->run_work);
623 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
624 struct rhash_head *obj)
626 struct bucket_table *new_tbl;
627 struct bucket_table *tbl;
632 tbl = rcu_dereference(ht->tbl);
634 /* All insertions must grab the oldest table containing
635 * the hashed bucket that is yet to be rehashed.
638 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
639 lock = rht_bucket_lock(tbl, hash);
642 if (tbl->rehash <= hash)
645 spin_unlock_bh(lock);
646 tbl = rcu_dereference(tbl->future_tbl);
649 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
650 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
651 if (PTR_ERR(new_tbl) != -EEXIST)
652 data = ERR_CAST(new_tbl);
654 while (!IS_ERR_OR_NULL(new_tbl)) {
656 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
657 spin_lock_nested(rht_bucket_lock(tbl, hash),
658 SINGLE_DEPTH_NESTING);
660 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
661 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
662 if (PTR_ERR(new_tbl) != -EEXIST)
663 data = ERR_CAST(new_tbl);
665 spin_unlock(rht_bucket_lock(tbl, hash));
668 spin_unlock_bh(lock);
670 if (PTR_ERR(data) == -EAGAIN)
671 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
677 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
678 struct rhash_head *obj)
684 data = rhashtable_try_insert(ht, key, obj);
686 } while (PTR_ERR(data) == -EAGAIN);
690 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
693 * rhashtable_walk_enter - Initialise an iterator
694 * @ht: Table to walk over
695 * @iter: Hash table Iterator
697 * This function prepares a hash table walk.
699 * Note that if you restart a walk after rhashtable_walk_stop you
700 * may see the same object twice. Also, you may miss objects if
701 * there are removals in between rhashtable_walk_stop and the next
702 * call to rhashtable_walk_start.
704 * For a completely stable walk you should construct your own data
705 * structure outside the hash table.
707 * This function may sleep so you must not call it from interrupt
708 * context or with spin locks held.
710 * You must call rhashtable_walk_exit after this function returns.
712 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
719 spin_lock(&ht->lock);
721 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
722 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
723 spin_unlock(&ht->lock);
725 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
728 * rhashtable_walk_exit - Free an iterator
729 * @iter: Hash table Iterator
731 * This function frees resources allocated by rhashtable_walk_init.
733 void rhashtable_walk_exit(struct rhashtable_iter *iter)
735 spin_lock(&iter->ht->lock);
736 if (iter->walker.tbl)
737 list_del(&iter->walker.list);
738 spin_unlock(&iter->ht->lock);
740 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
743 * rhashtable_walk_start - Start a hash table walk
744 * @iter: Hash table iterator
746 * Start a hash table walk. Note that we take the RCU lock in all
747 * cases including when we return an error. So you must always call
748 * rhashtable_walk_stop to clean up.
750 * Returns zero if successful.
752 * Returns -EAGAIN if resize event occured. Note that the iterator
753 * will rewind back to the beginning and you may use it immediately
754 * by calling rhashtable_walk_next.
756 int rhashtable_walk_start(struct rhashtable_iter *iter)
759 struct rhashtable *ht = iter->ht;
763 spin_lock(&ht->lock);
764 if (iter->walker.tbl)
765 list_del(&iter->walker.list);
766 spin_unlock(&ht->lock);
768 if (!iter->walker.tbl) {
769 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
775 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
778 * rhashtable_walk_next - Return the next object and advance the iterator
779 * @iter: Hash table iterator
781 * Note that you must call rhashtable_walk_stop when you are finished
784 * Returns the next object or NULL when the end of the table is reached.
786 * Returns -EAGAIN if resize event occured. Note that the iterator
787 * will rewind back to the beginning and you may continue to use it.
789 void *rhashtable_walk_next(struct rhashtable_iter *iter)
791 struct bucket_table *tbl = iter->walker.tbl;
792 struct rhlist_head *list = iter->list;
793 struct rhashtable *ht = iter->ht;
794 struct rhash_head *p = iter->p;
795 bool rhlist = ht->rhlist;
798 if (!rhlist || !(list = rcu_dereference(list->next))) {
799 p = rcu_dereference(p->next);
800 list = container_of(p, struct rhlist_head, rhead);
805 for (; iter->slot < tbl->size; iter->slot++) {
806 int skip = iter->skip;
808 rht_for_each_rcu(p, tbl, iter->slot) {
810 list = container_of(p, struct rhlist_head,
816 list = rcu_dereference(list->next);
827 if (!rht_is_a_nulls(p)) {
831 return rht_obj(ht, rhlist ? &list->rhead : p);
839 /* Ensure we see any new tables. */
842 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
843 if (iter->walker.tbl) {
846 return ERR_PTR(-EAGAIN);
851 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
854 * rhashtable_walk_stop - Finish a hash table walk
855 * @iter: Hash table iterator
857 * Finish a hash table walk.
859 void rhashtable_walk_stop(struct rhashtable_iter *iter)
862 struct rhashtable *ht;
863 struct bucket_table *tbl = iter->walker.tbl;
870 spin_lock(&ht->lock);
871 if (tbl->rehash < tbl->size)
872 list_add(&iter->walker.list, &tbl->walkers);
874 iter->walker.tbl = NULL;
875 spin_unlock(&ht->lock);
882 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
884 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
886 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
887 (unsigned long)params->min_size);
890 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
892 return jhash2(key, length, seed);
896 * rhashtable_init - initialize a new hash table
897 * @ht: hash table to be initialized
898 * @params: configuration parameters
900 * Initializes a new hash table based on the provided configuration
901 * parameters. A table can be configured either with a variable or
904 * Configuration Example 1: Fixed length keys
908 * struct rhash_head node;
911 * struct rhashtable_params params = {
912 * .head_offset = offsetof(struct test_obj, node),
913 * .key_offset = offsetof(struct test_obj, key),
914 * .key_len = sizeof(int),
916 * .nulls_base = (1U << RHT_BASE_SHIFT),
919 * Configuration Example 2: Variable length keys
922 * struct rhash_head node;
925 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
927 * struct test_obj *obj = data;
929 * return [... hash ...];
932 * struct rhashtable_params params = {
933 * .head_offset = offsetof(struct test_obj, node),
935 * .obj_hashfn = my_hash_fn,
938 int rhashtable_init(struct rhashtable *ht,
939 const struct rhashtable_params *params)
941 struct bucket_table *tbl;
944 size = HASH_DEFAULT_SIZE;
946 if ((!params->key_len && !params->obj_hashfn) ||
947 (params->obj_hashfn && !params->obj_cmpfn))
950 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
953 memset(ht, 0, sizeof(*ht));
954 mutex_init(&ht->mutex);
955 spin_lock_init(&ht->lock);
956 memcpy(&ht->p, params, sizeof(*params));
958 if (params->min_size)
959 ht->p.min_size = roundup_pow_of_two(params->min_size);
961 if (params->max_size)
962 ht->p.max_size = rounddown_pow_of_two(params->max_size);
964 if (params->insecure_max_entries)
965 ht->p.insecure_max_entries =
966 rounddown_pow_of_two(params->insecure_max_entries);
968 ht->p.insecure_max_entries = ht->p.max_size * 2;
970 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
972 if (params->nelem_hint)
973 size = rounded_hashtable_size(&ht->p);
975 /* The maximum (not average) chain length grows with the
976 * size of the hash table, at a rate of (log N)/(log log N).
977 * The value of 16 is selected so that even if the hash
978 * table grew to 2^32 you would not expect the maximum
979 * chain length to exceed it unless we are under attack
980 * (or extremely unlucky).
982 * As this limit is only to detect attacks, we don't need
983 * to set it to a lower value as you'd need the chain
984 * length to vastly exceed 16 to have any real effect
987 if (!params->insecure_elasticity)
990 if (params->locks_mul)
991 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
993 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
995 ht->key_len = ht->p.key_len;
996 if (!params->hashfn) {
997 ht->p.hashfn = jhash;
999 if (!(ht->key_len & (sizeof(u32) - 1))) {
1000 ht->key_len /= sizeof(u32);
1001 ht->p.hashfn = rhashtable_jhash2;
1005 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1009 atomic_set(&ht->nelems, 0);
1011 RCU_INIT_POINTER(ht->tbl, tbl);
1013 INIT_WORK(&ht->run_work, rht_deferred_worker);
1017 EXPORT_SYMBOL_GPL(rhashtable_init);
1020 * rhltable_init - initialize a new hash list table
1021 * @hlt: hash list table to be initialized
1022 * @params: configuration parameters
1024 * Initializes a new hash list table.
1026 * See documentation for rhashtable_init.
1028 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1032 /* No rhlist NULLs marking for now. */
1033 if (params->nulls_base)
1036 err = rhashtable_init(&hlt->ht, params);
1037 hlt->ht.rhlist = true;
1040 EXPORT_SYMBOL_GPL(rhltable_init);
1042 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1043 void (*free_fn)(void *ptr, void *arg),
1046 struct rhlist_head *list;
1049 free_fn(rht_obj(ht, obj), arg);
1053 list = container_of(obj, struct rhlist_head, rhead);
1056 list = rht_dereference(list->next, ht);
1057 free_fn(rht_obj(ht, obj), arg);
1062 * rhashtable_free_and_destroy - free elements and destroy hash table
1063 * @ht: the hash table to destroy
1064 * @free_fn: callback to release resources of element
1065 * @arg: pointer passed to free_fn
1067 * Stops an eventual async resize. If defined, invokes free_fn for each
1068 * element to releasal resources. Please note that RCU protected
1069 * readers may still be accessing the elements. Releasing of resources
1070 * must occur in a compatible manner. Then frees the bucket array.
1072 * This function will eventually sleep to wait for an async resize
1073 * to complete. The caller is responsible that no further write operations
1074 * occurs in parallel.
1076 void rhashtable_free_and_destroy(struct rhashtable *ht,
1077 void (*free_fn)(void *ptr, void *arg),
1080 struct bucket_table *tbl;
1083 cancel_work_sync(&ht->run_work);
1085 mutex_lock(&ht->mutex);
1086 tbl = rht_dereference(ht->tbl, ht);
1088 for (i = 0; i < tbl->size; i++) {
1089 struct rhash_head *pos, *next;
1091 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1092 next = !rht_is_a_nulls(pos) ?
1093 rht_dereference(pos->next, ht) : NULL;
1094 !rht_is_a_nulls(pos);
1096 next = !rht_is_a_nulls(pos) ?
1097 rht_dereference(pos->next, ht) : NULL)
1098 rhashtable_free_one(ht, pos, free_fn, arg);
1102 bucket_table_free(tbl);
1103 mutex_unlock(&ht->mutex);
1105 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1107 void rhashtable_destroy(struct rhashtable *ht)
1109 return rhashtable_free_and_destroy(ht, NULL, NULL);
1111 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1113 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1116 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1117 static struct rhash_head __rcu *rhnull =
1118 (struct rhash_head __rcu *)NULLS_MARKER(0);
1119 unsigned int index = hash & ((1 << tbl->nest) - 1);
1120 unsigned int size = tbl->size >> tbl->nest;
1121 unsigned int subhash = hash;
1122 union nested_table *ntbl;
1124 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1125 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1126 subhash >>= tbl->nest;
1128 while (ntbl && size > (1 << shift)) {
1129 index = subhash & ((1 << shift) - 1);
1130 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1139 return &ntbl[subhash].bucket;
1142 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1144 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1145 struct bucket_table *tbl,
1148 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1149 unsigned int index = hash & ((1 << tbl->nest) - 1);
1150 unsigned int size = tbl->size >> tbl->nest;
1151 union nested_table *ntbl;
1152 unsigned int shifted;
1155 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1158 shifted = tbl->nest;
1159 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1160 size <= (1 << shift) ? shifted : 0, nhash);
1162 while (ntbl && size > (1 << shift)) {
1163 index = hash & ((1 << shift) - 1);
1166 nhash |= index << shifted;
1168 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1169 size <= (1 << shift) ? shifted : 0,
1176 return &ntbl[hash].bucket;
1179 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);