#include "qemu/hbitmap.h"
#include "qemu/host-utils.h"
#include "trace.h"
+#include "crypto/hash.h"
/* HBitmaps provides an array of bits. The bits are stored as usual in an
* array of unsigned longs, but HBitmap is also optimized to provide fast
*/
struct HBitmap {
+ /*
+ * Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate.
+ */
+ uint64_t orig_size;
+
/* Number of total bits in the bottom level. */
uint64_t size;
unsigned long cur;
do {
- cur = hbi->cur[--i];
+ i--;
pos >>= BITS_PER_LEVEL;
+ cur = hbi->cur[i] & hb->levels[i][pos];
} while (cur == 0);
/* Check for end of iteration. We always use fewer than BITS_PER_LONG
return cur;
}
+int64_t hbitmap_iter_next(HBitmapIter *hbi)
+{
+ unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
+ hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
+ int64_t item;
+
+ if (cur == 0) {
+ cur = hbitmap_iter_skip_words(hbi);
+ if (cur == 0) {
+ return -1;
+ }
+ }
+
+ /* The next call will resume work from the next bit. */
+ hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
+ item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
+
+ return item << hbi->granularity;
+}
+
void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
{
unsigned i, bit;
}
}
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
+{
+ size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
+ unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
+ unsigned long cur = last_lev[pos];
+ unsigned start_bit_offset;
+ uint64_t end_bit, sz;
+ int64_t res;
+
+ if (start >= hb->orig_size || count == 0) {
+ return -1;
+ }
+
+ end_bit = count > hb->orig_size - start ?
+ hb->size :
+ ((start + count - 1) >> hb->granularity) + 1;
+ sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
+
+ /* There may be some zero bits in @cur before @start. We are not interested
+ * in them, let's set them.
+ */
+ start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);
+ cur |= (1UL << start_bit_offset) - 1;
+ assert((start >> hb->granularity) < hb->size);
+
+ if (cur == (unsigned long)-1) {
+ do {
+ pos++;
+ } while (pos < sz && last_lev[pos] == (unsigned long)-1);
+
+ if (pos >= sz) {
+ return -1;
+ }
+
+ cur = last_lev[pos];
+ }
+
+ res = (pos << BITS_PER_LEVEL) + ctol(cur);
+ if (res >= end_bit) {
+ return -1;
+ }
+
+ res = res << hb->granularity;
+ if (res < start) {
+ assert(((start - res) >> hb->granularity) == 0);
+ return start;
+ }
+
+ return res;
+}
+
+bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
+ uint64_t *count)
+{
+ HBitmapIter hbi;
+ int64_t firt_dirty_off, area_end;
+ uint32_t granularity = 1UL << hb->granularity;
+ uint64_t end;
+
+ if (*start >= hb->orig_size || *count == 0) {
+ return false;
+ }
+
+ end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count;
+
+ hbitmap_iter_init(&hbi, hb, *start);
+ firt_dirty_off = hbitmap_iter_next(&hbi);
+
+ if (firt_dirty_off < 0 || firt_dirty_off >= end) {
+ return false;
+ }
+
+ if (firt_dirty_off + granularity >= end) {
+ area_end = end;
+ } else {
+ area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity,
+ end - firt_dirty_off - granularity);
+ if (area_end < 0) {
+ area_end = end;
+ }
+ }
+
+ if (firt_dirty_off > *start) {
+ *start = firt_dirty_off;
+ }
+ *count = area_end - *start;
+
+ return true;
+}
+
bool hbitmap_empty(const HBitmap *hb)
{
return hb->count == 0;
uint64_t first, n;
uint64_t last = start + count - 1;
+ if (count == 0) {
+ return;
+ }
+
trace_hbitmap_set(hb, start, count,
start >> hb->granularity, last >> hb->granularity);
/* Compute range in the last layer. */
uint64_t first;
uint64_t last = start + count - 1;
+ uint64_t gran = 1ULL << hb->granularity;
+
+ if (count == 0) {
+ return;
+ }
+
+ assert(QEMU_IS_ALIGNED(start, gran));
+ assert(QEMU_IS_ALIGNED(count, gran) || (start + count == hb->orig_size));
trace_hbitmap_reset(hb, start, count,
start >> hb->granularity, last >> hb->granularity);
hb->count = 0;
}
+bool hbitmap_is_serializable(const HBitmap *hb)
+{
+ /* Every serialized chunk must be aligned to 64 bits so that endianness
+ * requirements can be fulfilled on both 64 bit and 32 bit hosts.
+ * We have hbitmap_serialization_align() which converts this
+ * alignment requirement from bitmap bits to items covered (e.g. sectors).
+ * That value is:
+ * 64 << hb->granularity
+ * Since this value must not exceed UINT64_MAX, hb->granularity must be
+ * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
+ *
+ * In order for hbitmap_serialization_align() to always return a
+ * meaningful value, bitmaps that are to be serialized must have a
+ * granularity of less than 58. */
+
+ return hb->granularity < 58;
+}
+
bool hbitmap_get(const HBitmap *hb, uint64_t item)
{
/* Compute position and bit in the last layer. */
return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;
}
-uint64_t hbitmap_serialization_granularity(const HBitmap *hb)
+uint64_t hbitmap_serialization_align(const HBitmap *hb)
{
- /* Must hold true so that the shift below is defined
- * (ld(64) == 6, i.e. 1 << 6 == 64) */
- assert(hb->granularity < 64 - 6);
+ assert(hbitmap_is_serializable(hb));
/* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
* hosts. */
unsigned long **first_el, uint64_t *el_count)
{
uint64_t last = start + count - 1;
- uint64_t gran = hbitmap_serialization_granularity(hb);
+ uint64_t gran = hbitmap_serialization_align(hb);
assert((start & (gran - 1)) == 0);
assert((last >> hb->granularity) < hb->size);
}
}
+void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count,
+ bool finish)
+{
+ uint64_t el_count;
+ unsigned long *first;
+
+ if (!count) {
+ return;
+ }
+ serialization_chunk(hb, start, count, &first, &el_count);
+
+ memset(first, 0xff, el_count * sizeof(unsigned long));
+ if (finish) {
+ hbitmap_deserialize_finish(hb);
+ }
+}
+
void hbitmap_deserialize_finish(HBitmap *bitmap)
{
int64_t i, size, prev_size;
}
bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
+ bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1);
}
void hbitmap_free(HBitmap *hb)
HBitmap *hb = g_new0(struct HBitmap, 1);
unsigned i;
+ hb->orig_size = size;
+
assert(granularity >= 0 && granularity < 64);
size = (size + (1ULL << granularity) - 1) >> granularity;
assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
uint64_t num_elements = size;
uint64_t old;
+ hb->orig_size = size;
+
/* Size comes in as logical elements, adjust for granularity. */
size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
}
}
+bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
+{
+ return (a->orig_size == b->orig_size);
+}
/**
- * Given HBitmaps A and B, let A := A (BITOR) B.
- * Bitmap B will not be modified.
+ * hbitmap_sparse_merge: performs dst = dst | src
+ * works with differing granularities.
+ * best used when src is sparsely populated.
+ */
+static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
+{
+ uint64_t offset = 0;
+ uint64_t count = src->orig_size;
+
+ while (hbitmap_next_dirty_area(src, &offset, &count)) {
+ hbitmap_set(dst, offset, count);
+ offset += count;
+ if (offset >= src->orig_size) {
+ break;
+ }
+ count = src->orig_size - offset;
+ }
+}
+
+/**
+ * Given HBitmaps A and B, let R := A (BITOR) B.
+ * Bitmaps A and B will not be modified,
+ * except when bitmap R is an alias of A or B.
*
* @return true if the merge was successful,
* false if it was not attempted.
*/
-bool hbitmap_merge(HBitmap *a, const HBitmap *b)
+bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)
{
int i;
uint64_t j;
- if ((a->size != b->size) || (a->granularity != b->granularity)) {
+ if (!hbitmap_can_merge(a, b) || !hbitmap_can_merge(a, result)) {
return false;
}
+ assert(hbitmap_can_merge(b, result));
- if (hbitmap_count(b) == 0) {
+ if ((!hbitmap_count(a) && result == b) ||
+ (!hbitmap_count(b) && result == a)) {
+ return true;
+ }
+
+ if (!hbitmap_count(a) && !hbitmap_count(b)) {
+ hbitmap_reset_all(result);
+ return true;
+ }
+
+ if (a->granularity != b->granularity) {
+ if ((a != result) && (b != result)) {
+ hbitmap_reset_all(result);
+ }
+ if (a != result) {
+ hbitmap_sparse_merge(result, a);
+ }
+ if (b != result) {
+ hbitmap_sparse_merge(result, b);
+ }
return true;
}
* It may be possible to improve running times for sparsely populated maps
* by using hbitmap_iter_next, but this is suboptimal for dense maps.
*/
+ assert(a->size == b->size);
for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
for (j = 0; j < a->sizes[i]; j++) {
- a->levels[i][j] |= b->levels[i][j];
+ result->levels[i][j] = a->levels[i][j] | b->levels[i][j];
}
}
+ /* Recompute the dirty count */
+ result->count = hb_count_between(result, 0, result->size - 1);
+
return true;
}
hbitmap_free(hb->meta);
hb->meta = NULL;
}
+
+char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)
+{
+ size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);
+ char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];
+ char *hash = NULL;
+ qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);
+
+ return hash;
+}