]>
Commit | Line | Data |
---|---|---|
e7c033c3 PB |
1 | /* |
2 | * Hierarchical Bitmap Data Type | |
3 | * | |
4 | * Copyright Red Hat, Inc., 2012 | |
5 | * | |
6 | * Author: Paolo Bonzini <[email protected]> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
9 | * later. See the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
12 | #include <string.h> | |
13 | #include <glib.h> | |
14 | #include <assert.h> | |
15 | #include "qemu/osdep.h" | |
16 | #include "qemu/hbitmap.h" | |
17 | #include "qemu/host-utils.h" | |
18 | #include "trace.h" | |
19 | ||
20 | /* HBitmaps provides an array of bits. The bits are stored as usual in an | |
21 | * array of unsigned longs, but HBitmap is also optimized to provide fast | |
22 | * iteration over set bits; going from one bit to the next is O(logB n) | |
23 | * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough | |
24 | * that the number of levels is in fact fixed. | |
25 | * | |
26 | * In order to do this, it stacks multiple bitmaps with progressively coarser | |
27 | * granularity; in all levels except the last, bit N is set iff the N-th | |
28 | * unsigned long is nonzero in the immediately next level. When iteration | |
29 | * completes on the last level it can examine the 2nd-last level to quickly | |
30 | * skip entire words, and even do so recursively to skip blocks of 64 words or | |
31 | * powers thereof (32 on 32-bit machines). | |
32 | * | |
33 | * Given an index in the bitmap, it can be split in group of bits like | |
34 | * this (for the 64-bit case): | |
35 | * | |
36 | * bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word | |
37 | * bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word | |
38 | * bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word | |
39 | * | |
40 | * So it is easy to move up simply by shifting the index right by | |
41 | * log2(BITS_PER_LONG) bits. To move down, you shift the index left | |
42 | * similarly, and add the word index within the group. Iteration uses | |
43 | * ffs (find first set bit) to find the next word to examine; this | |
44 | * operation can be done in constant time in most current architectures. | |
45 | * | |
46 | * Setting or clearing a range of m bits on all levels, the work to perform | |
47 | * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap. | |
48 | * | |
49 | * When iterating on a bitmap, each bit (on any level) is only visited | |
50 | * once. Hence, The total cost of visiting a bitmap with m bits in it is | |
51 | * the number of bits that are set in all bitmaps. Unless the bitmap is | |
52 | * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized | |
53 | * cost of advancing from one bit to the next is usually constant (worst case | |
54 | * O(logB n) as in the non-amortized complexity). | |
55 | */ | |
56 | ||
57 | struct HBitmap { | |
58 | /* Number of total bits in the bottom level. */ | |
59 | uint64_t size; | |
60 | ||
61 | /* Number of set bits in the bottom level. */ | |
62 | uint64_t count; | |
63 | ||
64 | /* A scaling factor. Given a granularity of G, each bit in the bitmap will | |
65 | * will actually represent a group of 2^G elements. Each operation on a | |
66 | * range of bits first rounds the bits to determine which group they land | |
67 | * in, and then affect the entire page; iteration will only visit the first | |
68 | * bit of each group. Here is an example of operations in a size-16, | |
69 | * granularity-1 HBitmap: | |
70 | * | |
71 | * initial state 00000000 | |
72 | * set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8) | |
73 | * reset(start=1, count=3) 00111000 (iter: 4, 6, 8) | |
74 | * set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10) | |
75 | * reset(start=5, count=5) 00000000 | |
76 | * | |
77 | * From an implementation point of view, when setting or resetting bits, | |
78 | * the bitmap will scale bit numbers right by this amount of bits. When | |
79 | * iterating, the bitmap will scale bit numbers left by this amount of | |
80 | * bits. | |
81 | */ | |
82 | int granularity; | |
83 | ||
84 | /* A number of progressively less coarse bitmaps (i.e. level 0 is the | |
85 | * coarsest). Each bit in level N represents a word in level N+1 that | |
86 | * has a set bit, except the last level where each bit represents the | |
87 | * actual bitmap. | |
88 | * | |
89 | * Note that all bitmaps have the same number of levels. Even a 1-bit | |
90 | * bitmap will still allocate HBITMAP_LEVELS arrays. | |
91 | */ | |
92 | unsigned long *levels[HBITMAP_LEVELS]; | |
93 | }; | |
94 | ||
95 | static inline int popcountl(unsigned long l) | |
96 | { | |
97 | return BITS_PER_LONG == 32 ? ctpop32(l) : ctpop64(l); | |
98 | } | |
99 | ||
100 | /* Advance hbi to the next nonzero word and return it. hbi->pos | |
101 | * is updated. Returns zero if we reach the end of the bitmap. | |
102 | */ | |
103 | unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) | |
104 | { | |
105 | size_t pos = hbi->pos; | |
106 | const HBitmap *hb = hbi->hb; | |
107 | unsigned i = HBITMAP_LEVELS - 1; | |
108 | ||
109 | unsigned long cur; | |
110 | do { | |
111 | cur = hbi->cur[--i]; | |
112 | pos >>= BITS_PER_LEVEL; | |
113 | } while (cur == 0); | |
114 | ||
115 | /* Check for end of iteration. We always use fewer than BITS_PER_LONG | |
116 | * bits in the level 0 bitmap; thus we can repurpose the most significant | |
117 | * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures | |
118 | * that the above loop ends even without an explicit check on i. | |
119 | */ | |
120 | ||
121 | if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) { | |
122 | return 0; | |
123 | } | |
124 | for (; i < HBITMAP_LEVELS - 1; i++) { | |
125 | /* Shift back pos to the left, matching the right shifts above. | |
126 | * The index of this word's least significant set bit provides | |
127 | * the low-order bits. | |
128 | */ | |
fbeadf50 | 129 | pos = (pos << BITS_PER_LEVEL) + bitops_ctzl(cur); |
e7c033c3 PB |
130 | hbi->cur[i] = cur & (cur - 1); |
131 | ||
132 | /* Set up next level for iteration. */ | |
133 | cur = hb->levels[i + 1][pos]; | |
134 | } | |
135 | ||
136 | hbi->pos = pos; | |
137 | trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur); | |
138 | ||
139 | assert(cur); | |
140 | return cur; | |
141 | } | |
142 | ||
143 | void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) | |
144 | { | |
145 | unsigned i, bit; | |
146 | uint64_t pos; | |
147 | ||
148 | hbi->hb = hb; | |
149 | pos = first >> hb->granularity; | |
1b095244 | 150 | assert(pos < hb->size); |
e7c033c3 PB |
151 | hbi->pos = pos >> BITS_PER_LEVEL; |
152 | hbi->granularity = hb->granularity; | |
153 | ||
154 | for (i = HBITMAP_LEVELS; i-- > 0; ) { | |
155 | bit = pos & (BITS_PER_LONG - 1); | |
156 | pos >>= BITS_PER_LEVEL; | |
157 | ||
158 | /* Drop bits representing items before first. */ | |
159 | hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1); | |
160 | ||
161 | /* We have already added level i+1, so the lowest set bit has | |
162 | * been processed. Clear it. | |
163 | */ | |
164 | if (i != HBITMAP_LEVELS - 1) { | |
165 | hbi->cur[i] &= ~(1UL << bit); | |
166 | } | |
167 | } | |
168 | } | |
169 | ||
170 | bool hbitmap_empty(const HBitmap *hb) | |
171 | { | |
172 | return hb->count == 0; | |
173 | } | |
174 | ||
175 | int hbitmap_granularity(const HBitmap *hb) | |
176 | { | |
177 | return hb->granularity; | |
178 | } | |
179 | ||
180 | uint64_t hbitmap_count(const HBitmap *hb) | |
181 | { | |
182 | return hb->count << hb->granularity; | |
183 | } | |
184 | ||
185 | /* Count the number of set bits between start and end, not accounting for | |
186 | * the granularity. Also an example of how to use hbitmap_iter_next_word. | |
187 | */ | |
188 | static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last) | |
189 | { | |
190 | HBitmapIter hbi; | |
191 | uint64_t count = 0; | |
192 | uint64_t end = last + 1; | |
193 | unsigned long cur; | |
194 | size_t pos; | |
195 | ||
196 | hbitmap_iter_init(&hbi, hb, start << hb->granularity); | |
197 | for (;;) { | |
198 | pos = hbitmap_iter_next_word(&hbi, &cur); | |
199 | if (pos >= (end >> BITS_PER_LEVEL)) { | |
200 | break; | |
201 | } | |
202 | count += popcountl(cur); | |
203 | } | |
204 | ||
205 | if (pos == (end >> BITS_PER_LEVEL)) { | |
206 | /* Drop bits representing the END-th and subsequent items. */ | |
207 | int bit = end & (BITS_PER_LONG - 1); | |
208 | cur &= (1UL << bit) - 1; | |
209 | count += popcountl(cur); | |
210 | } | |
211 | ||
212 | return count; | |
213 | } | |
214 | ||
215 | /* Setting starts at the last layer and propagates up if an element | |
216 | * changes from zero to non-zero. | |
217 | */ | |
218 | static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last) | |
219 | { | |
220 | unsigned long mask; | |
221 | bool changed; | |
222 | ||
223 | assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); | |
224 | assert(start <= last); | |
225 | ||
226 | mask = 2UL << (last & (BITS_PER_LONG - 1)); | |
227 | mask -= 1UL << (start & (BITS_PER_LONG - 1)); | |
228 | changed = (*elem == 0); | |
229 | *elem |= mask; | |
230 | return changed; | |
231 | } | |
232 | ||
233 | /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */ | |
234 | static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t last) | |
235 | { | |
236 | size_t pos = start >> BITS_PER_LEVEL; | |
237 | size_t lastpos = last >> BITS_PER_LEVEL; | |
238 | bool changed = false; | |
239 | size_t i; | |
240 | ||
241 | i = pos; | |
242 | if (i < lastpos) { | |
243 | uint64_t next = (start | (BITS_PER_LONG - 1)) + 1; | |
244 | changed |= hb_set_elem(&hb->levels[level][i], start, next - 1); | |
245 | for (;;) { | |
246 | start = next; | |
247 | next += BITS_PER_LONG; | |
248 | if (++i == lastpos) { | |
249 | break; | |
250 | } | |
251 | changed |= (hb->levels[level][i] == 0); | |
252 | hb->levels[level][i] = ~0UL; | |
253 | } | |
254 | } | |
255 | changed |= hb_set_elem(&hb->levels[level][i], start, last); | |
256 | ||
257 | /* If there was any change in this layer, we may have to update | |
258 | * the one above. | |
259 | */ | |
260 | if (level > 0 && changed) { | |
261 | hb_set_between(hb, level - 1, pos, lastpos); | |
262 | } | |
263 | } | |
264 | ||
265 | void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count) | |
266 | { | |
267 | /* Compute range in the last layer. */ | |
268 | uint64_t last = start + count - 1; | |
269 | ||
270 | trace_hbitmap_set(hb, start, count, | |
271 | start >> hb->granularity, last >> hb->granularity); | |
272 | ||
273 | start >>= hb->granularity; | |
274 | last >>= hb->granularity; | |
275 | count = last - start + 1; | |
276 | ||
277 | hb->count += count - hb_count_between(hb, start, last); | |
278 | hb_set_between(hb, HBITMAP_LEVELS - 1, start, last); | |
279 | } | |
280 | ||
281 | /* Resetting works the other way round: propagate up if the new | |
282 | * value is zero. | |
283 | */ | |
284 | static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last) | |
285 | { | |
286 | unsigned long mask; | |
287 | bool blanked; | |
288 | ||
289 | assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); | |
290 | assert(start <= last); | |
291 | ||
292 | mask = 2UL << (last & (BITS_PER_LONG - 1)); | |
293 | mask -= 1UL << (start & (BITS_PER_LONG - 1)); | |
294 | blanked = *elem != 0 && ((*elem & ~mask) == 0); | |
295 | *elem &= ~mask; | |
296 | return blanked; | |
297 | } | |
298 | ||
299 | /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */ | |
300 | static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t last) | |
301 | { | |
302 | size_t pos = start >> BITS_PER_LEVEL; | |
303 | size_t lastpos = last >> BITS_PER_LEVEL; | |
304 | bool changed = false; | |
305 | size_t i; | |
306 | ||
307 | i = pos; | |
308 | if (i < lastpos) { | |
309 | uint64_t next = (start | (BITS_PER_LONG - 1)) + 1; | |
310 | ||
311 | /* Here we need a more complex test than when setting bits. Even if | |
312 | * something was changed, we must not blank bits in the upper level | |
313 | * unless the lower-level word became entirely zero. So, remove pos | |
314 | * from the upper-level range if bits remain set. | |
315 | */ | |
316 | if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) { | |
317 | changed = true; | |
318 | } else { | |
319 | pos++; | |
320 | } | |
321 | ||
322 | for (;;) { | |
323 | start = next; | |
324 | next += BITS_PER_LONG; | |
325 | if (++i == lastpos) { | |
326 | break; | |
327 | } | |
328 | changed |= (hb->levels[level][i] != 0); | |
329 | hb->levels[level][i] = 0UL; | |
330 | } | |
331 | } | |
332 | ||
333 | /* Same as above, this time for lastpos. */ | |
334 | if (hb_reset_elem(&hb->levels[level][i], start, last)) { | |
335 | changed = true; | |
336 | } else { | |
337 | lastpos--; | |
338 | } | |
339 | ||
340 | if (level > 0 && changed) { | |
341 | hb_reset_between(hb, level - 1, pos, lastpos); | |
342 | } | |
343 | } | |
344 | ||
345 | void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count) | |
346 | { | |
347 | /* Compute range in the last layer. */ | |
348 | uint64_t last = start + count - 1; | |
349 | ||
350 | trace_hbitmap_reset(hb, start, count, | |
351 | start >> hb->granularity, last >> hb->granularity); | |
352 | ||
353 | start >>= hb->granularity; | |
354 | last >>= hb->granularity; | |
355 | ||
356 | hb->count -= hb_count_between(hb, start, last); | |
357 | hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last); | |
358 | } | |
359 | ||
360 | bool hbitmap_get(const HBitmap *hb, uint64_t item) | |
361 | { | |
362 | /* Compute position and bit in the last layer. */ | |
363 | uint64_t pos = item >> hb->granularity; | |
364 | unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1)); | |
365 | ||
366 | return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0; | |
367 | } | |
368 | ||
369 | void hbitmap_free(HBitmap *hb) | |
370 | { | |
371 | unsigned i; | |
372 | for (i = HBITMAP_LEVELS; i-- > 0; ) { | |
373 | g_free(hb->levels[i]); | |
374 | } | |
375 | g_free(hb); | |
376 | } | |
377 | ||
378 | HBitmap *hbitmap_alloc(uint64_t size, int granularity) | |
379 | { | |
380 | HBitmap *hb = g_malloc0(sizeof (struct HBitmap)); | |
381 | unsigned i; | |
382 | ||
383 | assert(granularity >= 0 && granularity < 64); | |
384 | size = (size + (1ULL << granularity) - 1) >> granularity; | |
385 | assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE)); | |
386 | ||
387 | hb->size = size; | |
388 | hb->granularity = granularity; | |
389 | for (i = HBITMAP_LEVELS; i-- > 0; ) { | |
390 | size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); | |
391 | hb->levels[i] = g_malloc0(size * sizeof(unsigned long)); | |
392 | } | |
393 | ||
394 | /* We necessarily have free bits in level 0 due to the definition | |
395 | * of HBITMAP_LEVELS, so use one for a sentinel. This speeds up | |
396 | * hbitmap_iter_skip_words. | |
397 | */ | |
398 | assert(size == 1); | |
399 | hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1); | |
400 | return hb; | |
401 | } |