]> Git Repo - linux.git/blobdiff - lib/rhashtable.c
mm/sparse.c: make sparse_init_one_section void and remove check
[linux.git] / lib / rhashtable.c
index e5c8586cf7174cfe0526dc8fb3314676601c5e57..ae4223e0f5bcb68610511b2cb7ba2e12b7f7d086 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/rhashtable.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/rhashtable.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
@@ -115,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
 
 static union nested_table *nested_table_alloc(struct rhashtable *ht,
                                              union nested_table __rcu **prev,
-                                             unsigned int shifted,
-                                             unsigned int nhash)
+                                             bool leaf)
 {
        union nested_table *ntbl;
        int i;
@@ -127,10 +127,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
 
        ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
 
-       if (ntbl && shifted) {
-               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
-                       INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
-                                           (i << shifted) | nhash);
+       if (ntbl && leaf) {
+               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+                       INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
        }
 
        rcu_assign_pointer(*prev, ntbl);
@@ -156,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
                return NULL;
 
        if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
-                               0, 0)) {
+                               false)) {
                kfree(tbl);
                return NULL;
        }
@@ -206,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
        tbl->hash_rnd = get_random_u32();
 
        for (i = 0; i < nbuckets; i++)
-               INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+               INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
 
        return tbl;
 }
@@ -227,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
-       struct bucket_table *new_tbl = rhashtable_last_table(ht,
-               rht_dereference_rcu(old_tbl->future_tbl, ht));
+       struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
        struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
        int err = -EAGAIN;
        struct rhash_head *head, *next, *entry;
@@ -298,21 +296,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
                                    struct bucket_table *old_tbl,
                                    struct bucket_table *new_tbl)
 {
-       /* Protect future_tbl using the first bucket lock. */
-       spin_lock_bh(old_tbl->locks);
-
-       /* Did somebody beat us to it? */
-       if (rcu_access_pointer(old_tbl->future_tbl)) {
-               spin_unlock_bh(old_tbl->locks);
-               return -EEXIST;
-       }
-
        /* Make insertions go into the new, empty table right away. Deletions
         * and lookups will be attempted in both tables until we synchronize.
+        * As cmpxchg() provides strong barriers, we do not need
+        * rcu_assign_pointer().
         */
-       rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
 
-       spin_unlock_bh(old_tbl->locks);
+       if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+               return -EEXIST;
 
        return 0;
 }
@@ -475,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
 
 fail:
        /* Do not fail the insert if someone else did a rehash. */
-       if (likely(rcu_dereference_raw(tbl->future_tbl)))
+       if (likely(rcu_access_pointer(tbl->future_tbl)))
                return 0;
 
        /* Schedule async rehash to retry allocation in process context. */
@@ -548,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
        if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
                return ERR_CAST(data);
 
-       new_tbl = rcu_dereference(tbl->future_tbl);
+       new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        if (new_tbl)
                return new_tbl;
 
@@ -607,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
                        break;
 
                spin_unlock_bh(lock);
-               tbl = rcu_dereference(tbl->future_tbl);
+               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        }
 
        data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@@ -1002,7 +993,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
  *     .key_offset = offsetof(struct test_obj, key),
  *     .key_len = sizeof(int),
  *     .hashfn = jhash,
- *     .nulls_base = (1U << RHT_BASE_SHIFT),
  * };
  *
  * Configuration Example 2: Variable length keys
@@ -1034,9 +1024,6 @@ int rhashtable_init(struct rhashtable *ht,
            (params->obj_hashfn && !params->obj_cmpfn))
                return -EINVAL;
 
-       if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
-               return -EINVAL;
-
        memset(ht, 0, sizeof(*ht));
        mutex_init(&ht->mutex);
        spin_lock_init(&ht->lock);
@@ -1100,10 +1087,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
 {
        int err;
 
-       /* No rhlist NULLs marking for now. */
-       if (params->nulls_base)
-               return -EINVAL;
-
        err = rhashtable_init(&hlt->ht, params);
        hlt->ht.rhlist = true;
        return err;
@@ -1227,25 +1210,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
        unsigned int index = hash & ((1 << tbl->nest) - 1);
        unsigned int size = tbl->size >> tbl->nest;
        union nested_table *ntbl;
-       unsigned int shifted;
-       unsigned int nhash;
 
        ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
        hash >>= tbl->nest;
-       nhash = index;
-       shifted = tbl->nest;
        ntbl = nested_table_alloc(ht, &ntbl[index].table,
-                                 size <= (1 << shift) ? shifted : 0, nhash);
+                                 size <= (1 << shift));
 
        while (ntbl && size > (1 << shift)) {
                index = hash & ((1 << shift) - 1);
                size >>= shift;
                hash >>= shift;
-               nhash |= index << shifted;
-               shifted += shift;
                ntbl = nested_table_alloc(ht, &ntbl[index].table,
-                                         size <= (1 << shift) ? shifted : 0,
-                                         nhash);
+                                         size <= (1 << shift));
        }
 
        if (!ntbl)
This page took 0.03267 seconds and 4 git commands to generate.