}
static inline
-struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
+struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
{
return &map->buckets[hash & (map->n_buckets - 1)];
}
* Call with at least a bucket lock held.
* @map should be the value read before acquiring the lock (or locks).
*/
-static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
+static inline bool qht_map_is_stale__locked(const struct qht *ht,
+ const struct qht_map *map)
{
return map != ht->map;
}
return b;
}
-static inline bool qht_map_needs_resize(struct qht_map *map)
+static inline bool qht_map_needs_resize(const struct qht_map *map)
{
return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
}
-static inline void qht_chain_destroy(struct qht_bucket *head)
+static inline void qht_chain_destroy(const struct qht_bucket *head)
{
struct qht_bucket *curr = head->next;
struct qht_bucket *prev;
}
static inline
-void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
+void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
const void *userp, uint32_t hash)
{
- struct qht_bucket *b = head;
+ const struct qht_bucket *b = head;
int i;
do {
}
static __attribute__((noinline))
-void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
+void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
const void *userp, uint32_t hash)
{
unsigned int version;
return ret;
}
-void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
+void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
qht_lookup_func_t func)
{
- struct qht_bucket *b;
- struct qht_map *map;
+ const struct qht_bucket *b;
+ const struct qht_map *map;
unsigned int version;
void *ret;
return qht_lookup__slowpath(b, func, userp, hash);
}
-void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash)
+void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
{
return qht_lookup_custom(ht, userp, hash, ht->cmp);
}
-/* call with head->lock held */
-static void *qht_insert__locked(struct qht *ht, struct qht_map *map,
+/*
+ * call with head->lock held
+ * @ht is const since it is only used for ht->cmp()
+ */
+static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
struct qht_bucket *head, void *p, uint32_t hash,
bool *needs_resize)
{
return false;
}
-static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
+static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
{
if (pos == QHT_BUCKET_ENTRIES - 1) {
if (b->next == NULL) {
}
/*
- * Find the last valid entry in @head, and swap it with @orig[pos], which has
+ * Find the last valid entry in @orig, and swap it with @orig[pos], which has
* just been invalidated.
*/
static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
return ret;
}
-static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *head,
+static inline void qht_bucket_iter(struct qht_bucket *head,
const struct qht_iter *iter, void *userp)
{
struct qht_bucket *b = head;
}
switch (iter->type) {
case QHT_ITER_VOID:
- iter->f.retvoid(ht, b->pointers[i], b->hashes[i], userp);
+ iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
break;
case QHT_ITER_RM:
- if (iter->f.retbool(ht, b->pointers[i], b->hashes[i], userp)) {
+ if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
/* replace i with the last valid element in the bucket */
seqlock_write_begin(&head->sequence);
qht_bucket_remove_entry(b, i);
}
/* call with all of the map's locks held */
-static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
+static inline void qht_map_iter__all_locked(struct qht_map *map,
const struct qht_iter *iter,
void *userp)
{
size_t i;
for (i = 0; i < map->n_buckets; i++) {
- qht_bucket_iter(ht, &map->buckets[i], iter, userp);
+ qht_bucket_iter(&map->buckets[i], iter, userp);
}
}
map = atomic_rcu_read(&ht->map);
qht_map_lock_buckets(map);
- /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
- qht_map_iter__all_locked(ht, map, iter, userp);
+ qht_map_iter__all_locked(map, iter, userp);
qht_map_unlock_buckets(map);
}
do_qht_iter(ht, &iter, userp);
}
-static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
+struct qht_map_copy_data {
+ struct qht *ht;
+ struct qht_map *new;
+};
+
+static void qht_map_copy(void *p, uint32_t hash, void *userp)
{
- struct qht_map *new = userp;
+ struct qht_map_copy_data *data = userp;
+ struct qht *ht = data->ht;
+ struct qht_map *new = data->new;
struct qht_bucket *b = qht_map_to_bucket(new, hash);
/* no need to acquire b->lock because no thread has seen this map yet */
.f.retvoid = qht_map_copy,
.type = QHT_ITER_VOID,
};
+ struct qht_map_copy_data data;
old = ht->map;
qht_map_lock_buckets(old);
}
g_assert(new->n_buckets != old->n_buckets);
- qht_map_iter__all_locked(ht, old, &iter, new);
+ data.ht = ht;
+ data.new = new;
+ qht_map_iter__all_locked(old, &iter, &data);
qht_map_debug__all_locked(new);
atomic_rcu_set(&ht->map, new);
}
/* pass @stats to qht_statistics_destroy() when done */
-void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
+void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
{
- struct qht_map *map;
+ const struct qht_map *map;
int i;
map = atomic_rcu_read(&ht->map);
stats->head_buckets = map->n_buckets;
for (i = 0; i < map->n_buckets; i++) {
- struct qht_bucket *head = &map->buckets[i];
- struct qht_bucket *b;
+ const struct qht_bucket *head = &map->buckets[i];
+ const struct qht_bucket *b;
unsigned int version;
size_t buckets;
size_t entries;