]>
Commit | Line | Data |
---|---|---|
e7c033c3 PB |
1 | /* |
2 | * Hierarchical Bitmap Data Type | |
3 | * | |
4 | * Copyright Red Hat, Inc., 2012 | |
5 | * | |
6 | * Author: Paolo Bonzini <[email protected]> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
9 | * later. See the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
e7c033c3 PB |
12 | #include "qemu/osdep.h" |
13 | #include "qemu/hbitmap.h" | |
14 | #include "qemu/host-utils.h" | |
15 | #include "trace.h" | |
a3b52535 | 16 | #include "crypto/hash.h" |
e7c033c3 PB |
17 | |
18 | /* HBitmaps provides an array of bits. The bits are stored as usual in an | |
19 | * array of unsigned longs, but HBitmap is also optimized to provide fast | |
20 | * iteration over set bits; going from one bit to the next is O(logB n) | |
21 | * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough | |
22 | * that the number of levels is in fact fixed. | |
23 | * | |
24 | * In order to do this, it stacks multiple bitmaps with progressively coarser | |
25 | * granularity; in all levels except the last, bit N is set iff the N-th | |
26 | * unsigned long is nonzero in the immediately next level. When iteration | |
27 | * completes on the last level it can examine the 2nd-last level to quickly | |
28 | * skip entire words, and even do so recursively to skip blocks of 64 words or | |
29 | * powers thereof (32 on 32-bit machines). | |
30 | * | |
31 | * Given an index in the bitmap, it can be split in group of bits like | |
32 | * this (for the 64-bit case): | |
33 | * | |
34 | * bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word | |
35 | * bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word | |
36 | * bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word | |
37 | * | |
38 | * So it is easy to move up simply by shifting the index right by | |
39 | * log2(BITS_PER_LONG) bits. To move down, you shift the index left | |
40 | * similarly, and add the word index within the group. Iteration uses | |
41 | * ffs (find first set bit) to find the next word to examine; this | |
42 | * operation can be done in constant time in most current architectures. | |
43 | * | |
44 | * Setting or clearing a range of m bits on all levels, the work to perform | |
45 | * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap. | |
46 | * | |
47 | * When iterating on a bitmap, each bit (on any level) is only visited | |
48 | * once. Hence, The total cost of visiting a bitmap with m bits in it is | |
49 | * the number of bits that are set in all bitmaps. Unless the bitmap is | |
50 | * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized | |
51 | * cost of advancing from one bit to the next is usually constant (worst case | |
52 | * O(logB n) as in the non-amortized complexity). | |
53 | */ | |
54 | ||
55 | struct HBitmap { | |
4e4de222 VSO |
56 | /* |
57 | * Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate. | |
58 | */ | |
76d570dc VSO |
59 | uint64_t orig_size; |
60 | ||
e7c033c3 PB |
61 | /* Number of total bits in the bottom level. */ |
62 | uint64_t size; | |
63 | ||
64 | /* Number of set bits in the bottom level. */ | |
65 | uint64_t count; | |
66 | ||
67 | /* A scaling factor. Given a granularity of G, each bit in the bitmap will | |
68 | * will actually represent a group of 2^G elements. Each operation on a | |
69 | * range of bits first rounds the bits to determine which group they land | |
70 | * in, and then affect the entire page; iteration will only visit the first | |
71 | * bit of each group. Here is an example of operations in a size-16, | |
72 | * granularity-1 HBitmap: | |
73 | * | |
74 | * initial state 00000000 | |
75 | * set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8) | |
76 | * reset(start=1, count=3) 00111000 (iter: 4, 6, 8) | |
77 | * set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10) | |
78 | * reset(start=5, count=5) 00000000 | |
79 | * | |
80 | * From an implementation point of view, when setting or resetting bits, | |
81 | * the bitmap will scale bit numbers right by this amount of bits. When | |
82 | * iterating, the bitmap will scale bit numbers left by this amount of | |
83 | * bits. | |
84 | */ | |
85 | int granularity; | |
86 | ||
07ac4cdb FZ |
87 | /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */ |
88 | HBitmap *meta; | |
89 | ||
e7c033c3 PB |
90 | /* A number of progressively less coarse bitmaps (i.e. level 0 is the |
91 | * coarsest). Each bit in level N represents a word in level N+1 that | |
92 | * has a set bit, except the last level where each bit represents the | |
93 | * actual bitmap. | |
94 | * | |
95 | * Note that all bitmaps have the same number of levels. Even a 1-bit | |
96 | * bitmap will still allocate HBITMAP_LEVELS arrays. | |
97 | */ | |
98 | unsigned long *levels[HBITMAP_LEVELS]; | |
8515efbe JS |
99 | |
100 | /* The length of each levels[] array. */ | |
101 | uint64_t sizes[HBITMAP_LEVELS]; | |
e7c033c3 PB |
102 | }; |
103 | ||
e7c033c3 PB |
104 | /* Advance hbi to the next nonzero word and return it. hbi->pos |
105 | * is updated. Returns zero if we reach the end of the bitmap. | |
106 | */ | |
107 | unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) | |
108 | { | |
109 | size_t pos = hbi->pos; | |
110 | const HBitmap *hb = hbi->hb; | |
111 | unsigned i = HBITMAP_LEVELS - 1; | |
112 | ||
113 | unsigned long cur; | |
114 | do { | |
f63ea4e9 | 115 | i--; |
e7c033c3 | 116 | pos >>= BITS_PER_LEVEL; |
f63ea4e9 | 117 | cur = hbi->cur[i] & hb->levels[i][pos]; |
e7c033c3 PB |
118 | } while (cur == 0); |
119 | ||
120 | /* Check for end of iteration. We always use fewer than BITS_PER_LONG | |
121 | * bits in the level 0 bitmap; thus we can repurpose the most significant | |
122 | * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures | |
123 | * that the above loop ends even without an explicit check on i. | |
124 | */ | |
125 | ||
126 | if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) { | |
127 | return 0; | |
128 | } | |
129 | for (; i < HBITMAP_LEVELS - 1; i++) { | |
130 | /* Shift back pos to the left, matching the right shifts above. | |
131 | * The index of this word's least significant set bit provides | |
132 | * the low-order bits. | |
133 | */ | |
18331e7c RH |
134 | assert(cur); |
135 | pos = (pos << BITS_PER_LEVEL) + ctzl(cur); | |
e7c033c3 PB |
136 | hbi->cur[i] = cur & (cur - 1); |
137 | ||
138 | /* Set up next level for iteration. */ | |
139 | cur = hb->levels[i + 1][pos]; | |
140 | } | |
141 | ||
142 | hbi->pos = pos; | |
143 | trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur); | |
144 | ||
145 | assert(cur); | |
146 | return cur; | |
147 | } | |
148 | ||
19c021e1 | 149 | int64_t hbitmap_iter_next(HBitmapIter *hbi) |
f63ea4e9 VSO |
150 | { |
151 | unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] & | |
152 | hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos]; | |
153 | int64_t item; | |
154 | ||
155 | if (cur == 0) { | |
156 | cur = hbitmap_iter_skip_words(hbi); | |
157 | if (cur == 0) { | |
158 | return -1; | |
159 | } | |
160 | } | |
161 | ||
19c021e1 VSO |
162 | /* The next call will resume work from the next bit. */ |
163 | hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1); | |
f63ea4e9 VSO |
164 | item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur); |
165 | ||
166 | return item << hbi->granularity; | |
167 | } | |
168 | ||
e7c033c3 PB |
169 | void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) |
170 | { | |
171 | unsigned i, bit; | |
172 | uint64_t pos; | |
173 | ||
174 | hbi->hb = hb; | |
175 | pos = first >> hb->granularity; | |
1b095244 | 176 | assert(pos < hb->size); |
e7c033c3 PB |
177 | hbi->pos = pos >> BITS_PER_LEVEL; |
178 | hbi->granularity = hb->granularity; | |
179 | ||
180 | for (i = HBITMAP_LEVELS; i-- > 0; ) { | |
181 | bit = pos & (BITS_PER_LONG - 1); | |
182 | pos >>= BITS_PER_LEVEL; | |
183 | ||
184 | /* Drop bits representing items before first. */ | |
185 | hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1); | |
186 | ||
187 | /* We have already added level i+1, so the lowest set bit has | |
188 | * been processed. Clear it. | |
189 | */ | |
190 | if (i != HBITMAP_LEVELS - 1) { | |
191 | hbi->cur[i] &= ~(1UL << bit); | |
192 | } | |
193 | } | |
194 | } | |
195 | ||
76d570dc | 196 | int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) |
56207df5 VSO |
197 | { |
198 | size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; | |
199 | unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; | |
56207df5 | 200 | unsigned long cur = last_lev[pos]; |
76d570dc VSO |
201 | unsigned start_bit_offset; |
202 | uint64_t end_bit, sz; | |
56207df5 VSO |
203 | int64_t res; |
204 | ||
76d570dc VSO |
205 | if (start >= hb->orig_size || count == 0) { |
206 | return -1; | |
207 | } | |
208 | ||
209 | end_bit = count > hb->orig_size - start ? | |
210 | hb->size : | |
211 | ((start + count - 1) >> hb->granularity) + 1; | |
212 | sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL; | |
213 | ||
214 | /* There may be some zero bits in @cur before @start. We are not interested | |
215 | * in them, let's set them. | |
216 | */ | |
217 | start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1); | |
56207df5 VSO |
218 | cur |= (1UL << start_bit_offset) - 1; |
219 | assert((start >> hb->granularity) < hb->size); | |
220 | ||
221 | if (cur == (unsigned long)-1) { | |
222 | do { | |
223 | pos++; | |
224 | } while (pos < sz && last_lev[pos] == (unsigned long)-1); | |
225 | ||
226 | if (pos >= sz) { | |
227 | return -1; | |
228 | } | |
229 | ||
230 | cur = last_lev[pos]; | |
231 | } | |
232 | ||
233 | res = (pos << BITS_PER_LEVEL) + ctol(cur); | |
76d570dc | 234 | if (res >= end_bit) { |
56207df5 VSO |
235 | return -1; |
236 | } | |
237 | ||
238 | res = res << hb->granularity; | |
239 | if (res < start) { | |
240 | assert(((start - res) >> hb->granularity) == 0); | |
241 | return start; | |
242 | } | |
243 | ||
244 | return res; | |
245 | } | |
246 | ||
a78a1a48 VSO |
247 | bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, |
248 | uint64_t *count) | |
249 | { | |
250 | HBitmapIter hbi; | |
251 | int64_t firt_dirty_off, area_end; | |
252 | uint32_t granularity = 1UL << hb->granularity; | |
253 | uint64_t end; | |
254 | ||
255 | if (*start >= hb->orig_size || *count == 0) { | |
256 | return false; | |
257 | } | |
258 | ||
259 | end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count; | |
260 | ||
261 | hbitmap_iter_init(&hbi, hb, *start); | |
19c021e1 | 262 | firt_dirty_off = hbitmap_iter_next(&hbi); |
a78a1a48 VSO |
263 | |
264 | if (firt_dirty_off < 0 || firt_dirty_off >= end) { | |
265 | return false; | |
266 | } | |
267 | ||
268 | if (firt_dirty_off + granularity >= end) { | |
269 | area_end = end; | |
270 | } else { | |
271 | area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity, | |
272 | end - firt_dirty_off - granularity); | |
273 | if (area_end < 0) { | |
274 | area_end = end; | |
275 | } | |
276 | } | |
277 | ||
278 | if (firt_dirty_off > *start) { | |
279 | *start = firt_dirty_off; | |
280 | } | |
281 | *count = area_end - *start; | |
282 | ||
283 | return true; | |
284 | } | |
285 | ||
e7c033c3 PB |
286 | bool hbitmap_empty(const HBitmap *hb) |
287 | { | |
288 | return hb->count == 0; | |
289 | } | |
290 | ||
291 | int hbitmap_granularity(const HBitmap *hb) | |
292 | { | |
293 | return hb->granularity; | |
294 | } | |
295 | ||
296 | uint64_t hbitmap_count(const HBitmap *hb) | |
297 | { | |
298 | return hb->count << hb->granularity; | |
299 | } | |
300 | ||
301 | /* Count the number of set bits between start and end, not accounting for | |
302 | * the granularity. Also an example of how to use hbitmap_iter_next_word. | |
303 | */ | |
304 | static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last) | |
305 | { | |
306 | HBitmapIter hbi; | |
307 | uint64_t count = 0; | |
308 | uint64_t end = last + 1; | |
309 | unsigned long cur; | |
310 | size_t pos; | |
311 | ||
312 | hbitmap_iter_init(&hbi, hb, start << hb->granularity); | |
313 | for (;;) { | |
314 | pos = hbitmap_iter_next_word(&hbi, &cur); | |
315 | if (pos >= (end >> BITS_PER_LEVEL)) { | |
316 | break; | |
317 | } | |
591b320a | 318 | count += ctpopl(cur); |
e7c033c3 PB |
319 | } |
320 | ||
321 | if (pos == (end >> BITS_PER_LEVEL)) { | |
322 | /* Drop bits representing the END-th and subsequent items. */ | |
323 | int bit = end & (BITS_PER_LONG - 1); | |
324 | cur &= (1UL << bit) - 1; | |
591b320a | 325 | count += ctpopl(cur); |
e7c033c3 PB |
326 | } |
327 | ||
328 | return count; | |
329 | } | |
330 | ||
331 | /* Setting starts at the last layer and propagates up if an element | |
07ac4cdb | 332 | * changes. |
e7c033c3 PB |
333 | */ |
334 | static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last) | |
335 | { | |
336 | unsigned long mask; | |
07ac4cdb | 337 | unsigned long old; |
e7c033c3 PB |
338 | |
339 | assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); | |
340 | assert(start <= last); | |
341 | ||
342 | mask = 2UL << (last & (BITS_PER_LONG - 1)); | |
343 | mask -= 1UL << (start & (BITS_PER_LONG - 1)); | |
07ac4cdb | 344 | old = *elem; |
e7c033c3 | 345 | *elem |= mask; |
07ac4cdb | 346 | return old != *elem; |
e7c033c3 PB |
347 | } |
348 | ||
07ac4cdb FZ |
349 | /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... |
350 | * Returns true if at least one bit is changed. */ | |
351 | static bool hb_set_between(HBitmap *hb, int level, uint64_t start, | |
352 | uint64_t last) | |
e7c033c3 PB |
353 | { |
354 | size_t pos = start >> BITS_PER_LEVEL; | |
355 | size_t lastpos = last >> BITS_PER_LEVEL; | |
356 | bool changed = false; | |
357 | size_t i; | |
358 | ||
359 | i = pos; | |
360 | if (i < lastpos) { | |
361 | uint64_t next = (start | (BITS_PER_LONG - 1)) + 1; | |
362 | changed |= hb_set_elem(&hb->levels[level][i], start, next - 1); | |
363 | for (;;) { | |
364 | start = next; | |
365 | next += BITS_PER_LONG; | |
366 | if (++i == lastpos) { | |
367 | break; | |
368 | } | |
369 | changed |= (hb->levels[level][i] == 0); | |
370 | hb->levels[level][i] = ~0UL; | |
371 | } | |
372 | } | |
373 | changed |= hb_set_elem(&hb->levels[level][i], start, last); | |
374 | ||
375 | /* If there was any change in this layer, we may have to update | |
376 | * the one above. | |
377 | */ | |
378 | if (level > 0 && changed) { | |
379 | hb_set_between(hb, level - 1, pos, lastpos); | |
380 | } | |
07ac4cdb | 381 | return changed; |
e7c033c3 PB |
382 | } |
383 | ||
384 | void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count) | |
385 | { | |
386 | /* Compute range in the last layer. */ | |
07ac4cdb | 387 | uint64_t first, n; |
e7c033c3 PB |
388 | uint64_t last = start + count - 1; |
389 | ||
390 | trace_hbitmap_set(hb, start, count, | |
391 | start >> hb->granularity, last >> hb->granularity); | |
392 | ||
07ac4cdb | 393 | first = start >> hb->granularity; |
e7c033c3 | 394 | last >>= hb->granularity; |
0e321191 | 395 | assert(last < hb->size); |
07ac4cdb | 396 | n = last - first + 1; |
e7c033c3 | 397 | |
07ac4cdb FZ |
398 | hb->count += n - hb_count_between(hb, first, last); |
399 | if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) && | |
400 | hb->meta) { | |
401 | hbitmap_set(hb->meta, start, count); | |
402 | } | |
e7c033c3 PB |
403 | } |
404 | ||
405 | /* Resetting works the other way round: propagate up if the new | |
406 | * value is zero. | |
407 | */ | |
408 | static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last) | |
409 | { | |
410 | unsigned long mask; | |
411 | bool blanked; | |
412 | ||
413 | assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); | |
414 | assert(start <= last); | |
415 | ||
416 | mask = 2UL << (last & (BITS_PER_LONG - 1)); | |
417 | mask -= 1UL << (start & (BITS_PER_LONG - 1)); | |
418 | blanked = *elem != 0 && ((*elem & ~mask) == 0); | |
419 | *elem &= ~mask; | |
420 | return blanked; | |
421 | } | |
422 | ||
07ac4cdb FZ |
423 | /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... |
424 | * Returns true if at least one bit is changed. */ | |
425 | static bool hb_reset_between(HBitmap *hb, int level, uint64_t start, | |
426 | uint64_t last) | |
e7c033c3 PB |
427 | { |
428 | size_t pos = start >> BITS_PER_LEVEL; | |
429 | size_t lastpos = last >> BITS_PER_LEVEL; | |
430 | bool changed = false; | |
431 | size_t i; | |
432 | ||
433 | i = pos; | |
434 | if (i < lastpos) { | |
435 | uint64_t next = (start | (BITS_PER_LONG - 1)) + 1; | |
436 | ||
437 | /* Here we need a more complex test than when setting bits. Even if | |
438 | * something was changed, we must not blank bits in the upper level | |
439 | * unless the lower-level word became entirely zero. So, remove pos | |
440 | * from the upper-level range if bits remain set. | |
441 | */ | |
442 | if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) { | |
443 | changed = true; | |
444 | } else { | |
445 | pos++; | |
446 | } | |
447 | ||
448 | for (;;) { | |
449 | start = next; | |
450 | next += BITS_PER_LONG; | |
451 | if (++i == lastpos) { | |
452 | break; | |
453 | } | |
454 | changed |= (hb->levels[level][i] != 0); | |
455 | hb->levels[level][i] = 0UL; | |
456 | } | |
457 | } | |
458 | ||
459 | /* Same as above, this time for lastpos. */ | |
460 | if (hb_reset_elem(&hb->levels[level][i], start, last)) { | |
461 | changed = true; | |
462 | } else { | |
463 | lastpos--; | |
464 | } | |
465 | ||
466 | if (level > 0 && changed) { | |
467 | hb_reset_between(hb, level - 1, pos, lastpos); | |
468 | } | |
07ac4cdb FZ |
469 | |
470 | return changed; | |
471 | ||
e7c033c3 PB |
472 | } |
473 | ||
474 | void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count) | |
475 | { | |
476 | /* Compute range in the last layer. */ | |
07ac4cdb | 477 | uint64_t first; |
e7c033c3 PB |
478 | uint64_t last = start + count - 1; |
479 | ||
480 | trace_hbitmap_reset(hb, start, count, | |
481 | start >> hb->granularity, last >> hb->granularity); | |
482 | ||
07ac4cdb | 483 | first = start >> hb->granularity; |
e7c033c3 | 484 | last >>= hb->granularity; |
0e321191 | 485 | assert(last < hb->size); |
e7c033c3 | 486 | |
07ac4cdb FZ |
487 | hb->count -= hb_count_between(hb, first, last); |
488 | if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) && | |
489 | hb->meta) { | |
490 | hbitmap_set(hb->meta, start, count); | |
491 | } | |
e7c033c3 PB |
492 | } |
493 | ||
c6a8c328 WC |
494 | void hbitmap_reset_all(HBitmap *hb) |
495 | { | |
496 | unsigned int i; | |
497 | ||
498 | /* Same as hbitmap_alloc() except for memset() instead of malloc() */ | |
499 | for (i = HBITMAP_LEVELS; --i >= 1; ) { | |
500 | memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long)); | |
501 | } | |
502 | ||
503 | hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1); | |
504 | hb->count = 0; | |
505 | } | |
506 | ||
20a579de HR |
507 | bool hbitmap_is_serializable(const HBitmap *hb) |
508 | { | |
509 | /* Every serialized chunk must be aligned to 64 bits so that endianness | |
510 | * requirements can be fulfilled on both 64 bit and 32 bit hosts. | |
ecbfa281 | 511 | * We have hbitmap_serialization_align() which converts this |
20a579de HR |
512 | * alignment requirement from bitmap bits to items covered (e.g. sectors). |
513 | * That value is: | |
514 | * 64 << hb->granularity | |
515 | * Since this value must not exceed UINT64_MAX, hb->granularity must be | |
516 | * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64). | |
517 | * | |
ecbfa281 | 518 | * In order for hbitmap_serialization_align() to always return a |
20a579de HR |
519 | * meaningful value, bitmaps that are to be serialized must have a |
520 | * granularity of less than 58. */ | |
521 | ||
522 | return hb->granularity < 58; | |
523 | } | |
524 | ||
e7c033c3 PB |
525 | bool hbitmap_get(const HBitmap *hb, uint64_t item) |
526 | { | |
527 | /* Compute position and bit in the last layer. */ | |
528 | uint64_t pos = item >> hb->granularity; | |
529 | unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1)); | |
0e321191 | 530 | assert(pos < hb->size); |
e7c033c3 PB |
531 | |
532 | return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0; | |
533 | } | |
534 | ||
ecbfa281 | 535 | uint64_t hbitmap_serialization_align(const HBitmap *hb) |
8258888e | 536 | { |
20a579de | 537 | assert(hbitmap_is_serializable(hb)); |
6725f887 | 538 | |
8258888e VSO |
539 | /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit |
540 | * hosts. */ | |
6725f887 | 541 | return UINT64_C(64) << hb->granularity; |
8258888e VSO |
542 | } |
543 | ||
544 | /* Start should be aligned to serialization granularity, chunk size should be | |
545 | * aligned to serialization granularity too, except for last chunk. | |
546 | */ | |
547 | static void serialization_chunk(const HBitmap *hb, | |
548 | uint64_t start, uint64_t count, | |
549 | unsigned long **first_el, uint64_t *el_count) | |
550 | { | |
551 | uint64_t last = start + count - 1; | |
ecbfa281 | 552 | uint64_t gran = hbitmap_serialization_align(hb); |
8258888e VSO |
553 | |
554 | assert((start & (gran - 1)) == 0); | |
555 | assert((last >> hb->granularity) < hb->size); | |
556 | if ((last >> hb->granularity) != hb->size - 1) { | |
557 | assert((count & (gran - 1)) == 0); | |
558 | } | |
559 | ||
560 | start = (start >> hb->granularity) >> BITS_PER_LEVEL; | |
561 | last = (last >> hb->granularity) >> BITS_PER_LEVEL; | |
562 | ||
563 | *first_el = &hb->levels[HBITMAP_LEVELS - 1][start]; | |
564 | *el_count = last - start + 1; | |
565 | } | |
566 | ||
567 | uint64_t hbitmap_serialization_size(const HBitmap *hb, | |
568 | uint64_t start, uint64_t count) | |
569 | { | |
570 | uint64_t el_count; | |
571 | unsigned long *cur; | |
572 | ||
573 | if (!count) { | |
574 | return 0; | |
575 | } | |
576 | serialization_chunk(hb, start, count, &cur, &el_count); | |
577 | ||
578 | return el_count * sizeof(unsigned long); | |
579 | } | |
580 | ||
581 | void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf, | |
582 | uint64_t start, uint64_t count) | |
583 | { | |
584 | uint64_t el_count; | |
585 | unsigned long *cur, *end; | |
586 | ||
587 | if (!count) { | |
588 | return; | |
589 | } | |
590 | serialization_chunk(hb, start, count, &cur, &el_count); | |
591 | end = cur + el_count; | |
592 | ||
593 | while (cur != end) { | |
594 | unsigned long el = | |
595 | (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur)); | |
596 | ||
597 | memcpy(buf, &el, sizeof(el)); | |
598 | buf += sizeof(el); | |
599 | cur++; | |
600 | } | |
601 | } | |
602 | ||
603 | void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf, | |
604 | uint64_t start, uint64_t count, | |
605 | bool finish) | |
606 | { | |
607 | uint64_t el_count; | |
608 | unsigned long *cur, *end; | |
609 | ||
610 | if (!count) { | |
611 | return; | |
612 | } | |
613 | serialization_chunk(hb, start, count, &cur, &el_count); | |
614 | end = cur + el_count; | |
615 | ||
616 | while (cur != end) { | |
617 | memcpy(cur, buf, sizeof(*cur)); | |
618 | ||
619 | if (BITS_PER_LONG == 32) { | |
620 | le32_to_cpus((uint32_t *)cur); | |
621 | } else { | |
622 | le64_to_cpus((uint64_t *)cur); | |
623 | } | |
624 | ||
625 | buf += sizeof(unsigned long); | |
626 | cur++; | |
627 | } | |
628 | if (finish) { | |
629 | hbitmap_deserialize_finish(hb); | |
630 | } | |
631 | } | |
632 | ||
633 | void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count, | |
634 | bool finish) | |
635 | { | |
636 | uint64_t el_count; | |
637 | unsigned long *first; | |
638 | ||
639 | if (!count) { | |
640 | return; | |
641 | } | |
642 | serialization_chunk(hb, start, count, &first, &el_count); | |
643 | ||
644 | memset(first, 0, el_count * sizeof(unsigned long)); | |
645 | if (finish) { | |
646 | hbitmap_deserialize_finish(hb); | |
647 | } | |
648 | } | |
649 | ||
6bdc8b71 VSO |
650 | void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count, |
651 | bool finish) | |
652 | { | |
653 | uint64_t el_count; | |
654 | unsigned long *first; | |
655 | ||
656 | if (!count) { | |
657 | return; | |
658 | } | |
659 | serialization_chunk(hb, start, count, &first, &el_count); | |
660 | ||
661 | memset(first, 0xff, el_count * sizeof(unsigned long)); | |
662 | if (finish) { | |
663 | hbitmap_deserialize_finish(hb); | |
664 | } | |
665 | } | |
666 | ||
8258888e VSO |
667 | void hbitmap_deserialize_finish(HBitmap *bitmap) |
668 | { | |
669 | int64_t i, size, prev_size; | |
670 | int lev; | |
671 | ||
672 | /* restore levels starting from penultimate to zero level, assuming | |
673 | * that the last level is ok */ | |
674 | size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); | |
675 | for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) { | |
676 | prev_size = size; | |
677 | size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); | |
678 | memset(bitmap->levels[lev], 0, size * sizeof(unsigned long)); | |
679 | ||
680 | for (i = 0; i < prev_size; ++i) { | |
681 | if (bitmap->levels[lev + 1][i]) { | |
682 | bitmap->levels[lev][i >> BITS_PER_LEVEL] |= | |
683 | 1UL << (i & (BITS_PER_LONG - 1)); | |
684 | } | |
685 | } | |
686 | } | |
687 | ||
688 | bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1); | |
3260cdff | 689 | bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1); |
8258888e VSO |
690 | } |
691 | ||
e7c033c3 PB |
692 | void hbitmap_free(HBitmap *hb) |
693 | { | |
694 | unsigned i; | |
07ac4cdb | 695 | assert(!hb->meta); |
e7c033c3 PB |
696 | for (i = HBITMAP_LEVELS; i-- > 0; ) { |
697 | g_free(hb->levels[i]); | |
698 | } | |
699 | g_free(hb); | |
700 | } | |
701 | ||
702 | HBitmap *hbitmap_alloc(uint64_t size, int granularity) | |
703 | { | |
e1cf5582 | 704 | HBitmap *hb = g_new0(struct HBitmap, 1); |
e7c033c3 PB |
705 | unsigned i; |
706 | ||
76d570dc VSO |
707 | hb->orig_size = size; |
708 | ||
e7c033c3 PB |
709 | assert(granularity >= 0 && granularity < 64); |
710 | size = (size + (1ULL << granularity) - 1) >> granularity; | |
711 | assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE)); | |
712 | ||
713 | hb->size = size; | |
714 | hb->granularity = granularity; | |
715 | for (i = HBITMAP_LEVELS; i-- > 0; ) { | |
716 | size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); | |
8515efbe | 717 | hb->sizes[i] = size; |
e1cf5582 | 718 | hb->levels[i] = g_new0(unsigned long, size); |
e7c033c3 PB |
719 | } |
720 | ||
721 | /* We necessarily have free bits in level 0 due to the definition | |
722 | * of HBITMAP_LEVELS, so use one for a sentinel. This speeds up | |
723 | * hbitmap_iter_skip_words. | |
724 | */ | |
725 | assert(size == 1); | |
726 | hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1); | |
727 | return hb; | |
728 | } | |
be58721d | 729 | |
ce1ffea8 JS |
730 | void hbitmap_truncate(HBitmap *hb, uint64_t size) |
731 | { | |
732 | bool shrink; | |
733 | unsigned i; | |
734 | uint64_t num_elements = size; | |
735 | uint64_t old; | |
736 | ||
4e4de222 VSO |
737 | hb->orig_size = size; |
738 | ||
ce1ffea8 JS |
739 | /* Size comes in as logical elements, adjust for granularity. */ |
740 | size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity; | |
741 | assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE)); | |
742 | shrink = size < hb->size; | |
743 | ||
744 | /* bit sizes are identical; nothing to do. */ | |
745 | if (size == hb->size) { | |
746 | return; | |
747 | } | |
748 | ||
749 | /* If we're losing bits, let's clear those bits before we invalidate all of | |
750 | * our invariants. This helps keep the bitcount consistent, and will prevent | |
751 | * us from carrying around garbage bits beyond the end of the map. | |
752 | */ | |
753 | if (shrink) { | |
754 | /* Don't clear partial granularity groups; | |
755 | * start at the first full one. */ | |
6725f887 | 756 | uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity); |
ce1ffea8 JS |
757 | uint64_t fix_count = (hb->size << hb->granularity) - start; |
758 | ||
759 | assert(fix_count); | |
760 | hbitmap_reset(hb, start, fix_count); | |
761 | } | |
762 | ||
763 | hb->size = size; | |
764 | for (i = HBITMAP_LEVELS; i-- > 0; ) { | |
765 | size = MAX(BITS_TO_LONGS(size), 1); | |
766 | if (hb->sizes[i] == size) { | |
767 | break; | |
768 | } | |
769 | old = hb->sizes[i]; | |
770 | hb->sizes[i] = size; | |
771 | hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long)); | |
772 | if (!shrink) { | |
773 | memset(&hb->levels[i][old], 0x00, | |
774 | (size - old) * sizeof(*hb->levels[i])); | |
775 | } | |
776 | } | |
07ac4cdb FZ |
777 | if (hb->meta) { |
778 | hbitmap_truncate(hb->meta, hb->size << hb->granularity); | |
779 | } | |
ce1ffea8 JS |
780 | } |
781 | ||
fa000f2f VSO |
782 | bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) |
783 | { | |
784 | return (a->size == b->size) && (a->granularity == b->granularity); | |
785 | } | |
ce1ffea8 | 786 | |
be58721d JS |
787 | /** |
788 | * Given HBitmaps A and B, let A := A (BITOR) B. | |
789 | * Bitmap B will not be modified. | |
790 | * | |
791 | * @return true if the merge was successful, | |
792 | * false if it was not attempted. | |
793 | */ | |
fa000f2f | 794 | bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) |
be58721d JS |
795 | { |
796 | int i; | |
797 | uint64_t j; | |
798 | ||
fa000f2f | 799 | if (!hbitmap_can_merge(a, b) || !hbitmap_can_merge(a, result)) { |
be58721d JS |
800 | return false; |
801 | } | |
fa000f2f | 802 | assert(hbitmap_can_merge(b, result)); |
be58721d JS |
803 | |
804 | if (hbitmap_count(b) == 0) { | |
805 | return true; | |
806 | } | |
807 | ||
808 | /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant. | |
809 | * It may be possible to improve running times for sparsely populated maps | |
810 | * by using hbitmap_iter_next, but this is suboptimal for dense maps. | |
811 | */ | |
812 | for (i = HBITMAP_LEVELS - 1; i >= 0; i--) { | |
813 | for (j = 0; j < a->sizes[i]; j++) { | |
fa000f2f | 814 | result->levels[i][j] = a->levels[i][j] | b->levels[i][j]; |
be58721d JS |
815 | } |
816 | } | |
817 | ||
d1dde714 EB |
818 | /* Recompute the dirty count */ |
819 | result->count = hb_count_between(result, 0, result->size - 1); | |
820 | ||
be58721d JS |
821 | return true; |
822 | } | |
07ac4cdb FZ |
823 | |
824 | HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size) | |
825 | { | |
826 | assert(!(chunk_size & (chunk_size - 1))); | |
827 | assert(!hb->meta); | |
828 | hb->meta = hbitmap_alloc(hb->size << hb->granularity, | |
829 | hb->granularity + ctz32(chunk_size)); | |
830 | return hb->meta; | |
831 | } | |
832 | ||
833 | void hbitmap_free_meta(HBitmap *hb) | |
834 | { | |
835 | assert(hb->meta); | |
836 | hbitmap_free(hb->meta); | |
837 | hb->meta = NULL; | |
838 | } | |
a3b52535 VSO |
839 | |
840 | char *hbitmap_sha256(const HBitmap *bitmap, Error **errp) | |
841 | { | |
842 | size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long); | |
843 | char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1]; | |
844 | char *hash = NULL; | |
845 | qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp); | |
846 | ||
847 | return hash; | |
848 | } |