#define QHT_BUCKET_ENTRIES 4
#endif
+/*
+ * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
+ * the profiler (QSP) will deadlock.
+ */
+static inline void qht_lock(struct qht *ht)
+{
+ if (ht->mode & QHT_MODE_RAW_MUTEXES) {
+ qemu_mutex_lock__raw(&ht->lock);
+ } else {
+ qemu_mutex_lock(&ht->lock);
+ }
+}
+
+static inline int qht_trylock(struct qht *ht)
+{
+ if (ht->mode & QHT_MODE_RAW_MUTEXES) {
+ return qemu_mutex_trylock__raw(&(ht)->lock);
+ }
+ return qemu_mutex_trylock(&(ht)->lock);
+}
+
+/* this inline is not really necessary, but it helps keep code consistent */
+static inline void qht_unlock(struct qht *ht)
+{
+ qemu_mutex_unlock(&ht->lock);
+}
+
/*
* Note: reading partially-updated pointers in @pointers could lead to
* segfaults. We thus access them with atomic_read/set; this guarantees
qht_map_unlock_buckets(map);
/* we raced with a resize; acquire ht->lock to see the updated ht->map */
- qemu_mutex_lock(&ht->lock);
+ qht_lock(ht);
map = ht->map;
qht_map_lock_buckets(map);
- qemu_mutex_unlock(&ht->lock);
+ qht_unlock(ht);
*pmap = map;
return;
}
qemu_spin_unlock(&b->lock);
/* we raced with a resize; acquire ht->lock to see the updated ht->map */
- qemu_mutex_lock(&ht->lock);
+ qht_lock(ht);
map = ht->map;
b = qht_map_to_bucket(map, hash);
qemu_spin_lock(&b->lock);
- qemu_mutex_unlock(&ht->lock);
+ qht_unlock(ht);
*pmap = map;
return b;
}
return map;
}
-void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
+void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
+ unsigned int mode)
{
struct qht_map *map;
size_t n_buckets = qht_elems_to_buckets(n_elems);
+ g_assert(cmp);
+ ht->cmp = cmp;
ht->mode = mode;
qemu_mutex_init(&ht->lock);
map = qht_map_create(n_buckets);
n_buckets = qht_elems_to_buckets(n_elems);
- qemu_mutex_lock(&ht->lock);
+ qht_lock(ht);
map = ht->map;
if (n_buckets != map->n_buckets) {
new = qht_map_create(n_buckets);
}
qht_do_resize_and_reset(ht, new);
- qemu_mutex_unlock(&ht->lock);
+ qht_unlock(ht);
return !!new;
}
return ret;
}
-void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
- uint32_t hash)
+void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
+ qht_lookup_func_t func)
{
struct qht_bucket *b;
struct qht_map *map;
return qht_lookup__slowpath(b, func, userp, hash);
}
+void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash)
+{
+ return qht_lookup_custom(ht, userp, hash, ht->cmp);
+}
+
/* call with head->lock held */
-static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
- struct qht_bucket *head, void *p, uint32_t hash,
- bool *needs_resize)
+static void *qht_insert__locked(struct qht *ht, struct qht_map *map,
+ struct qht_bucket *head, void *p, uint32_t hash,
+ bool *needs_resize)
{
struct qht_bucket *b = head;
struct qht_bucket *prev = NULL;
do {
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
if (b->pointers[i]) {
- if (unlikely(b->pointers[i] == p)) {
- return false;
+ if (unlikely(b->hashes[i] == hash &&
+ ht->cmp(b->pointers[i], p))) {
+ return b->pointers[i];
}
} else {
goto found;
atomic_set(&b->hashes[i], hash);
atomic_set(&b->pointers[i], p);
seqlock_write_end(&head->sequence);
- return true;
+ return NULL;
}
static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
* If the lock is taken it probably means there's an ongoing resize,
* so bail out.
*/
- if (qemu_mutex_trylock(&ht->lock)) {
+ if (qht_trylock(ht)) {
return;
}
map = ht->map;
qht_do_resize(ht, new);
}
- qemu_mutex_unlock(&ht->lock);
+ qht_unlock(ht);
}
-bool qht_insert(struct qht *ht, void *p, uint32_t hash)
+bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
{
struct qht_bucket *b;
struct qht_map *map;
bool needs_resize = false;
- bool ret;
+ void *prev;
/* NULL pointers are not supported */
qht_debug_assert(p);
b = qht_bucket_lock__no_stale(ht, hash, &map);
- ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
+ prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
qht_bucket_debug__locked(b);
qemu_spin_unlock(&b->lock);
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
qht_grow_maybe(ht);
}
- return ret;
+ if (likely(prev == NULL)) {
+ return true;
+ }
+ if (existing) {
+ *existing = prev;
+ }
+ return false;
}
static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
size_t n_buckets = qht_elems_to_buckets(n_elems);
size_t ret = false;
- qemu_mutex_lock(&ht->lock);
+ qht_lock(ht);
if (n_buckets != ht->map->n_buckets) {
struct qht_map *new;
qht_do_resize(ht, new);
ret = true;
}
- qemu_mutex_unlock(&ht->lock);
+ qht_unlock(ht);
return ret;
}