]>
Commit | Line | Data |
---|---|---|
2ff1f2e3 AG |
1 | /* |
2 | * QEMU block throttling group infrastructure | |
3 | * | |
4 | * Copyright (C) Nodalink, EURL. 2014 | |
5 | * Copyright (C) Igalia, S.L. 2015 | |
6 | * | |
7 | * Authors: | |
8 | * BenoƮt Canet <[email protected]> | |
9 | * Alberto Garcia <[email protected]> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License as | |
13 | * published by the Free Software Foundation; either version 2 or | |
14 | * (at your option) version 3 of the License. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
19 | * GNU General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
23 | */ | |
24 | ||
80c71a24 | 25 | #include "qemu/osdep.h" |
31dce3cc | 26 | #include "sysemu/block-backend.h" |
2ff1f2e3 | 27 | #include "block/throttle-groups.h" |
76f4afb4 AG |
28 | #include "qemu/queue.h" |
29 | #include "qemu/thread.h" | |
30 | #include "sysemu/qtest.h" | |
2ff1f2e3 AG |
31 | |
32 | /* The ThrottleGroup structure (with its ThrottleState) is shared | |
27ccdd52 | 33 | * among different BlockBackends and it's independent from |
2ff1f2e3 AG |
34 | * AioContext, so in order to use it from different threads it needs |
35 | * its own locking. | |
36 | * | |
37 | * This locking is however handled internally in this file, so it's | |
d87d01e1 | 38 | * transparent to outside users. |
2ff1f2e3 AG |
39 | * |
40 | * The whole ThrottleGroup structure is private and invisible to | |
41 | * outside users, that only use it through its ThrottleState. | |
42 | * | |
27ccdd52 | 43 | * In addition to the ThrottleGroup structure, BlockBackendPublic has |
2ff1f2e3 | 44 | * fields that need to be accessed by other members of the group and |
27ccdd52 KW |
45 | * therefore also need to be protected by this lock. Once a |
46 | * BlockBackend is registered in a group those fields can be accessed | |
47 | * by other threads any time. | |
2ff1f2e3 AG |
48 | * |
49 | * Again, all this is handled internally and is mostly transparent to | |
50 | * the outside. The 'throttle_timers' field however has an additional | |
51 | * constraint because it may be temporarily invalid (see for example | |
52 | * bdrv_set_aio_context()). Therefore in this file a thread will | |
27ccdd52 KW |
53 | * access some other BlockBackend's timers only after verifying that |
54 | * that BlockBackend has throttled requests in the queue. | |
2ff1f2e3 AG |
55 | */ |
56 | typedef struct ThrottleGroup { | |
57 | char *name; /* This is constant during the lifetime of the group */ | |
58 | ||
59 | QemuMutex lock; /* This lock protects the following four fields */ | |
60 | ThrottleState ts; | |
31dce3cc KW |
61 | QLIST_HEAD(, BlockBackendPublic) head; |
62 | BlockBackend *tokens[2]; | |
2ff1f2e3 AG |
63 | bool any_timer_armed[2]; |
64 | ||
65 | /* These two are protected by the global throttle_groups_lock */ | |
66 | unsigned refcount; | |
67 | QTAILQ_ENTRY(ThrottleGroup) list; | |
68 | } ThrottleGroup; | |
69 | ||
70 | static QemuMutex throttle_groups_lock; | |
71 | static QTAILQ_HEAD(, ThrottleGroup) throttle_groups = | |
72 | QTAILQ_HEAD_INITIALIZER(throttle_groups); | |
73 | ||
74 | /* Increments the reference count of a ThrottleGroup given its name. | |
75 | * | |
76 | * If no ThrottleGroup is found with the given name a new one is | |
77 | * created. | |
78 | * | |
79 | * @name: the name of the ThrottleGroup | |
973f2ddf | 80 | * @ret: the ThrottleState member of the ThrottleGroup |
2ff1f2e3 | 81 | */ |
973f2ddf | 82 | ThrottleState *throttle_group_incref(const char *name) |
2ff1f2e3 AG |
83 | { |
84 | ThrottleGroup *tg = NULL; | |
85 | ThrottleGroup *iter; | |
86 | ||
87 | qemu_mutex_lock(&throttle_groups_lock); | |
88 | ||
89 | /* Look for an existing group with that name */ | |
90 | QTAILQ_FOREACH(iter, &throttle_groups, list) { | |
91 | if (!strcmp(name, iter->name)) { | |
92 | tg = iter; | |
93 | break; | |
94 | } | |
95 | } | |
96 | ||
97 | /* Create a new one if not found */ | |
98 | if (!tg) { | |
99 | tg = g_new0(ThrottleGroup, 1); | |
100 | tg->name = g_strdup(name); | |
101 | qemu_mutex_init(&tg->lock); | |
102 | throttle_init(&tg->ts); | |
103 | QLIST_INIT(&tg->head); | |
104 | ||
105 | QTAILQ_INSERT_TAIL(&throttle_groups, tg, list); | |
106 | } | |
107 | ||
108 | tg->refcount++; | |
109 | ||
110 | qemu_mutex_unlock(&throttle_groups_lock); | |
111 | ||
973f2ddf | 112 | return &tg->ts; |
2ff1f2e3 AG |
113 | } |
114 | ||
115 | /* Decrease the reference count of a ThrottleGroup. | |
116 | * | |
117 | * When the reference count reaches zero the ThrottleGroup is | |
118 | * destroyed. | |
119 | * | |
973f2ddf | 120 | * @ts: The ThrottleGroup to unref, given by its ThrottleState member |
2ff1f2e3 | 121 | */ |
973f2ddf | 122 | void throttle_group_unref(ThrottleState *ts) |
2ff1f2e3 | 123 | { |
973f2ddf HR |
124 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
125 | ||
2ff1f2e3 AG |
126 | qemu_mutex_lock(&throttle_groups_lock); |
127 | if (--tg->refcount == 0) { | |
128 | QTAILQ_REMOVE(&throttle_groups, tg, list); | |
129 | qemu_mutex_destroy(&tg->lock); | |
130 | g_free(tg->name); | |
131 | g_free(tg); | |
132 | } | |
133 | qemu_mutex_unlock(&throttle_groups_lock); | |
134 | } | |
135 | ||
49d2165d KW |
136 | /* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer) |
137 | * is guaranteed to remain constant during the lifetime of the group. | |
2ff1f2e3 | 138 | * |
49d2165d | 139 | * @blk: a BlockBackend that is member of a throttling group |
2ff1f2e3 AG |
140 | * @ret: the name of the group. |
141 | */ | |
49d2165d | 142 | const char *throttle_group_get_name(BlockBackend *blk) |
2ff1f2e3 | 143 | { |
27ccdd52 KW |
144 | BlockBackendPublic *blkp = blk_get_public(blk); |
145 | ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); | |
2ff1f2e3 AG |
146 | return tg->name; |
147 | } | |
148 | ||
31dce3cc KW |
149 | /* Return the next BlockBackend in the round-robin sequence, simulating a |
150 | * circular list. | |
2ff1f2e3 AG |
151 | * |
152 | * This assumes that tg->lock is held. | |
153 | * | |
31dce3cc KW |
154 | * @blk: the current BlockBackend |
155 | * @ret: the next BlockBackend in the sequence | |
2ff1f2e3 | 156 | */ |
31dce3cc | 157 | static BlockBackend *throttle_group_next_blk(BlockBackend *blk) |
2ff1f2e3 | 158 | { |
27ccdd52 KW |
159 | BlockBackendPublic *blkp = blk_get_public(blk); |
160 | ThrottleState *ts = blkp->throttle_state; | |
2ff1f2e3 | 161 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
27ccdd52 | 162 | BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin); |
2ff1f2e3 AG |
163 | |
164 | if (!next) { | |
31dce3cc | 165 | next = QLIST_FIRST(&tg->head); |
2ff1f2e3 AG |
166 | } |
167 | ||
31dce3cc | 168 | return blk_by_public(next); |
2ff1f2e3 AG |
169 | } |
170 | ||
6bf77e1c AG |
171 | /* |
172 | * Return whether a BlockBackend has pending requests. | |
173 | * | |
174 | * This assumes that tg->lock is held. | |
175 | * | |
176 | * @blk: the BlockBackend | |
177 | * @is_write: the type of operation (read/write) | |
178 | * @ret: whether the BlockBackend has pending requests. | |
179 | */ | |
180 | static inline bool blk_has_pending_reqs(BlockBackend *blk, | |
181 | bool is_write) | |
182 | { | |
183 | const BlockBackendPublic *blkp = blk_get_public(blk); | |
184 | return blkp->pending_reqs[is_write]; | |
185 | } | |
186 | ||
31dce3cc KW |
187 | /* Return the next BlockBackend in the round-robin sequence with pending I/O |
188 | * requests. | |
76f4afb4 AG |
189 | * |
190 | * This assumes that tg->lock is held. | |
191 | * | |
31dce3cc | 192 | * @blk: the current BlockBackend |
76f4afb4 | 193 | * @is_write: the type of operation (read/write) |
31dce3cc KW |
194 | * @ret: the next BlockBackend with pending requests, or blk if there is |
195 | * none. | |
76f4afb4 | 196 | */ |
31dce3cc | 197 | static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write) |
76f4afb4 | 198 | { |
27ccdd52 KW |
199 | BlockBackendPublic *blkp = blk_get_public(blk); |
200 | ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); | |
31dce3cc | 201 | BlockBackend *token, *start; |
76f4afb4 AG |
202 | |
203 | start = token = tg->tokens[is_write]; | |
204 | ||
205 | /* get next bs round in round robin style */ | |
31dce3cc | 206 | token = throttle_group_next_blk(token); |
6bf77e1c | 207 | while (token != start && !blk_has_pending_reqs(token, is_write)) { |
31dce3cc | 208 | token = throttle_group_next_blk(token); |
76f4afb4 AG |
209 | } |
210 | ||
211 | /* If no IO are queued for scheduling on the next round robin token | |
212 | * then decide the token is the current bs because chances are | |
213 | * the current bs get the current request queued. | |
214 | */ | |
6bf77e1c | 215 | if (token == start && !blk_has_pending_reqs(token, is_write)) { |
31dce3cc | 216 | token = blk; |
76f4afb4 AG |
217 | } |
218 | ||
6bf77e1c AG |
219 | /* Either we return the original BB, or one with pending requests */ |
220 | assert(token == blk || blk_has_pending_reqs(token, is_write)); | |
221 | ||
76f4afb4 AG |
222 | return token; |
223 | } | |
224 | ||
31dce3cc KW |
225 | /* Check if the next I/O request for a BlockBackend needs to be throttled or |
226 | * not. If there's no timer set in this group, set one and update the token | |
227 | * accordingly. | |
76f4afb4 AG |
228 | * |
229 | * This assumes that tg->lock is held. | |
230 | * | |
31dce3cc | 231 | * @blk: the current BlockBackend |
76f4afb4 AG |
232 | * @is_write: the type of operation (read/write) |
233 | * @ret: whether the I/O request needs to be throttled or not | |
234 | */ | |
31dce3cc | 235 | static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) |
76f4afb4 | 236 | { |
27ccdd52 KW |
237 | BlockBackendPublic *blkp = blk_get_public(blk); |
238 | ThrottleState *ts = blkp->throttle_state; | |
239 | ThrottleTimers *tt = &blkp->throttle_timers; | |
76f4afb4 AG |
240 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
241 | bool must_wait; | |
242 | ||
27ccdd52 | 243 | if (blkp->io_limits_disabled) { |
ce0f1412 PB |
244 | return false; |
245 | } | |
246 | ||
76f4afb4 AG |
247 | /* Check if any of the timers in this group is already armed */ |
248 | if (tg->any_timer_armed[is_write]) { | |
249 | return true; | |
250 | } | |
251 | ||
252 | must_wait = throttle_schedule_timer(ts, tt, is_write); | |
253 | ||
31dce3cc | 254 | /* If a timer just got armed, set blk as the current token */ |
76f4afb4 | 255 | if (must_wait) { |
31dce3cc | 256 | tg->tokens[is_write] = blk; |
76f4afb4 AG |
257 | tg->any_timer_armed[is_write] = true; |
258 | } | |
259 | ||
260 | return must_wait; | |
261 | } | |
262 | ||
263 | /* Look for the next pending I/O request and schedule it. | |
264 | * | |
265 | * This assumes that tg->lock is held. | |
266 | * | |
31dce3cc | 267 | * @blk: the current BlockBackend |
76f4afb4 AG |
268 | * @is_write: the type of operation (read/write) |
269 | */ | |
31dce3cc | 270 | static void schedule_next_request(BlockBackend *blk, bool is_write) |
76f4afb4 | 271 | { |
27ccdd52 KW |
272 | BlockBackendPublic *blkp = blk_get_public(blk); |
273 | ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); | |
76f4afb4 | 274 | bool must_wait; |
31dce3cc | 275 | BlockBackend *token; |
76f4afb4 AG |
276 | |
277 | /* Check if there's any pending request to schedule next */ | |
31dce3cc | 278 | token = next_throttle_token(blk, is_write); |
6bf77e1c | 279 | if (!blk_has_pending_reqs(token, is_write)) { |
76f4afb4 AG |
280 | return; |
281 | } | |
282 | ||
283 | /* Set a timer for the request if it needs to be throttled */ | |
284 | must_wait = throttle_group_schedule_timer(token, is_write); | |
285 | ||
286 | /* If it doesn't have to wait, queue it for immediate execution */ | |
287 | if (!must_wait) { | |
27ccdd52 | 288 | /* Give preference to requests from the current blk */ |
76f4afb4 | 289 | if (qemu_in_coroutine() && |
27ccdd52 | 290 | qemu_co_queue_next(&blkp->throttled_reqs[is_write])) { |
31dce3cc | 291 | token = blk; |
76f4afb4 | 292 | } else { |
6bf77e1c | 293 | ThrottleTimers *tt = &blk_get_public(token)->throttle_timers; |
76f4afb4 AG |
294 | int64_t now = qemu_clock_get_ns(tt->clock_type); |
295 | timer_mod(tt->timers[is_write], now + 1); | |
296 | tg->any_timer_armed[is_write] = true; | |
297 | } | |
298 | tg->tokens[is_write] = token; | |
299 | } | |
300 | } | |
301 | ||
302 | /* Check if an I/O request needs to be throttled, wait and set a timer | |
303 | * if necessary, and schedule the next request using a round robin | |
304 | * algorithm. | |
305 | * | |
441565b2 | 306 | * @blk: the current BlockBackend |
76f4afb4 AG |
307 | * @bytes: the number of bytes for this I/O |
308 | * @is_write: the type of operation (read/write) | |
309 | */ | |
441565b2 | 310 | void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk, |
76f4afb4 AG |
311 | unsigned int bytes, |
312 | bool is_write) | |
313 | { | |
314 | bool must_wait; | |
31dce3cc | 315 | BlockBackend *token; |
76f4afb4 | 316 | |
27ccdd52 KW |
317 | BlockBackendPublic *blkp = blk_get_public(blk); |
318 | ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); | |
76f4afb4 AG |
319 | qemu_mutex_lock(&tg->lock); |
320 | ||
321 | /* First we check if this I/O has to be throttled. */ | |
27ccdd52 | 322 | token = next_throttle_token(blk, is_write); |
76f4afb4 AG |
323 | must_wait = throttle_group_schedule_timer(token, is_write); |
324 | ||
325 | /* Wait if there's a timer set or queued requests of this type */ | |
27ccdd52 KW |
326 | if (must_wait || blkp->pending_reqs[is_write]) { |
327 | blkp->pending_reqs[is_write]++; | |
76f4afb4 | 328 | qemu_mutex_unlock(&tg->lock); |
1ace7cea | 329 | qemu_co_queue_wait(&blkp->throttled_reqs[is_write], NULL); |
76f4afb4 | 330 | qemu_mutex_lock(&tg->lock); |
27ccdd52 | 331 | blkp->pending_reqs[is_write]--; |
76f4afb4 AG |
332 | } |
333 | ||
334 | /* The I/O will be executed, so do the accounting */ | |
27ccdd52 | 335 | throttle_account(blkp->throttle_state, is_write, bytes); |
76f4afb4 AG |
336 | |
337 | /* Schedule the next request */ | |
27ccdd52 | 338 | schedule_next_request(blk, is_write); |
76f4afb4 AG |
339 | |
340 | qemu_mutex_unlock(&tg->lock); | |
341 | } | |
342 | ||
27ccdd52 | 343 | void throttle_group_restart_blk(BlockBackend *blk) |
a72f6414 | 344 | { |
27ccdd52 | 345 | BlockBackendPublic *blkp = blk_get_public(blk); |
a72f6414 PB |
346 | int i; |
347 | ||
348 | for (i = 0; i < 2; i++) { | |
27ccdd52 | 349 | while (qemu_co_enter_next(&blkp->throttled_reqs[i])) { |
a72f6414 PB |
350 | ; |
351 | } | |
352 | } | |
353 | } | |
354 | ||
2ff1f2e3 AG |
355 | /* Update the throttle configuration for a particular group. Similar |
356 | * to throttle_config(), but guarantees atomicity within the | |
357 | * throttling group. | |
358 | * | |
97148076 | 359 | * @blk: a BlockBackend that is a member of the group |
2ff1f2e3 AG |
360 | * @cfg: the configuration to set |
361 | */ | |
97148076 | 362 | void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg) |
2ff1f2e3 | 363 | { |
97148076 | 364 | BlockBackendPublic *blkp = blk_get_public(blk); |
27ccdd52 KW |
365 | ThrottleTimers *tt = &blkp->throttle_timers; |
366 | ThrottleState *ts = blkp->throttle_state; | |
2ff1f2e3 AG |
367 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
368 | qemu_mutex_lock(&tg->lock); | |
2ff1f2e3 | 369 | /* throttle_config() cancels the timers */ |
2f388b93 AG |
370 | if (timer_pending(tt->timers[0])) { |
371 | tg->any_timer_armed[0] = false; | |
372 | } | |
373 | if (timer_pending(tt->timers[1])) { | |
374 | tg->any_timer_armed[1] = false; | |
375 | } | |
376 | throttle_config(ts, tt, cfg); | |
2ff1f2e3 | 377 | qemu_mutex_unlock(&tg->lock); |
a72f6414 | 378 | |
27ccdd52 KW |
379 | qemu_co_enter_next(&blkp->throttled_reqs[0]); |
380 | qemu_co_enter_next(&blkp->throttled_reqs[1]); | |
2ff1f2e3 AG |
381 | } |
382 | ||
383 | /* Get the throttle configuration from a particular group. Similar to | |
384 | * throttle_get_config(), but guarantees atomicity within the | |
385 | * throttling group. | |
386 | * | |
97148076 | 387 | * @blk: a BlockBackend that is a member of the group |
2ff1f2e3 AG |
388 | * @cfg: the configuration will be written here |
389 | */ | |
97148076 | 390 | void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg) |
2ff1f2e3 | 391 | { |
97148076 | 392 | BlockBackendPublic *blkp = blk_get_public(blk); |
27ccdd52 | 393 | ThrottleState *ts = blkp->throttle_state; |
2ff1f2e3 AG |
394 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
395 | qemu_mutex_lock(&tg->lock); | |
396 | throttle_get_config(ts, cfg); | |
397 | qemu_mutex_unlock(&tg->lock); | |
398 | } | |
399 | ||
76f4afb4 AG |
400 | /* ThrottleTimers callback. This wakes up a request that was waiting |
401 | * because it had been throttled. | |
402 | * | |
27ccdd52 | 403 | * @blk: the BlockBackend whose request had been throttled |
76f4afb4 AG |
404 | * @is_write: the type of operation (read/write) |
405 | */ | |
27ccdd52 | 406 | static void timer_cb(BlockBackend *blk, bool is_write) |
76f4afb4 | 407 | { |
27ccdd52 KW |
408 | BlockBackendPublic *blkp = blk_get_public(blk); |
409 | ThrottleState *ts = blkp->throttle_state; | |
76f4afb4 AG |
410 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); |
411 | bool empty_queue; | |
412 | ||
413 | /* The timer has just been fired, so we can update the flag */ | |
414 | qemu_mutex_lock(&tg->lock); | |
415 | tg->any_timer_armed[is_write] = false; | |
416 | qemu_mutex_unlock(&tg->lock); | |
417 | ||
418 | /* Run the request that was waiting for this timer */ | |
2f47da5f | 419 | aio_context_acquire(blk_get_aio_context(blk)); |
27ccdd52 | 420 | empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]); |
2f47da5f | 421 | aio_context_release(blk_get_aio_context(blk)); |
76f4afb4 AG |
422 | |
423 | /* If the request queue was empty then we have to take care of | |
424 | * scheduling the next one */ | |
425 | if (empty_queue) { | |
426 | qemu_mutex_lock(&tg->lock); | |
27ccdd52 | 427 | schedule_next_request(blk, is_write); |
76f4afb4 AG |
428 | qemu_mutex_unlock(&tg->lock); |
429 | } | |
430 | } | |
431 | ||
432 | static void read_timer_cb(void *opaque) | |
433 | { | |
434 | timer_cb(opaque, false); | |
435 | } | |
436 | ||
437 | static void write_timer_cb(void *opaque) | |
438 | { | |
439 | timer_cb(opaque, true); | |
440 | } | |
441 | ||
31dce3cc KW |
442 | /* Register a BlockBackend in the throttling group, also initializing its |
443 | * timers and updating its throttle_state pointer to point to it. If a | |
444 | * throttling group with that name does not exist yet, it will be created. | |
2ff1f2e3 | 445 | * |
31dce3cc | 446 | * @blk: the BlockBackend to insert |
2ff1f2e3 AG |
447 | * @groupname: the name of the group |
448 | */ | |
31dce3cc | 449 | void throttle_group_register_blk(BlockBackend *blk, const char *groupname) |
2ff1f2e3 AG |
450 | { |
451 | int i; | |
27ccdd52 | 452 | BlockBackendPublic *blkp = blk_get_public(blk); |
973f2ddf HR |
453 | ThrottleState *ts = throttle_group_incref(groupname); |
454 | ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); | |
76f4afb4 AG |
455 | int clock_type = QEMU_CLOCK_REALTIME; |
456 | ||
457 | if (qtest_enabled()) { | |
458 | /* For testing block IO throttling only */ | |
459 | clock_type = QEMU_CLOCK_VIRTUAL; | |
460 | } | |
2ff1f2e3 | 461 | |
27ccdd52 | 462 | blkp->throttle_state = ts; |
2ff1f2e3 AG |
463 | |
464 | qemu_mutex_lock(&tg->lock); | |
31dce3cc | 465 | /* If the ThrottleGroup is new set this BlockBackend as the token */ |
2ff1f2e3 AG |
466 | for (i = 0; i < 2; i++) { |
467 | if (!tg->tokens[i]) { | |
31dce3cc | 468 | tg->tokens[i] = blk; |
2ff1f2e3 AG |
469 | } |
470 | } | |
471 | ||
27ccdd52 | 472 | QLIST_INSERT_HEAD(&tg->head, blkp, round_robin); |
76f4afb4 | 473 | |
27ccdd52 KW |
474 | throttle_timers_init(&blkp->throttle_timers, |
475 | blk_get_aio_context(blk), | |
76f4afb4 AG |
476 | clock_type, |
477 | read_timer_cb, | |
478 | write_timer_cb, | |
27ccdd52 | 479 | blk); |
76f4afb4 | 480 | |
2ff1f2e3 AG |
481 | qemu_mutex_unlock(&tg->lock); |
482 | } | |
483 | ||
31dce3cc KW |
484 | /* Unregister a BlockBackend from its group, removing it from the list, |
485 | * destroying the timers and setting the throttle_state pointer to NULL. | |
2ff1f2e3 | 486 | * |
31dce3cc KW |
487 | * The BlockBackend must not have pending throttled requests, so the caller has |
488 | * to drain them first. | |
5ac72418 | 489 | * |
2ff1f2e3 AG |
490 | * The group will be destroyed if it's empty after this operation. |
491 | * | |
31dce3cc | 492 | * @blk: the BlockBackend to remove |
2ff1f2e3 | 493 | */ |
31dce3cc | 494 | void throttle_group_unregister_blk(BlockBackend *blk) |
2ff1f2e3 | 495 | { |
27ccdd52 KW |
496 | BlockBackendPublic *blkp = blk_get_public(blk); |
497 | ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); | |
2ff1f2e3 AG |
498 | int i; |
499 | ||
27ccdd52 KW |
500 | assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0); |
501 | assert(qemu_co_queue_empty(&blkp->throttled_reqs[0])); | |
502 | assert(qemu_co_queue_empty(&blkp->throttled_reqs[1])); | |
5ac72418 | 503 | |
2ff1f2e3 AG |
504 | qemu_mutex_lock(&tg->lock); |
505 | for (i = 0; i < 2; i++) { | |
31dce3cc KW |
506 | if (tg->tokens[i] == blk) { |
507 | BlockBackend *token = throttle_group_next_blk(blk); | |
27ccdd52 | 508 | /* Take care of the case where this is the last blk in the group */ |
31dce3cc | 509 | if (token == blk) { |
2ff1f2e3 AG |
510 | token = NULL; |
511 | } | |
512 | tg->tokens[i] = token; | |
513 | } | |
514 | } | |
515 | ||
27ccdd52 KW |
516 | /* remove the current blk from the list */ |
517 | QLIST_REMOVE(blkp, round_robin); | |
518 | throttle_timers_destroy(&blkp->throttle_timers); | |
2ff1f2e3 AG |
519 | qemu_mutex_unlock(&tg->lock); |
520 | ||
973f2ddf | 521 | throttle_group_unref(&tg->ts); |
27ccdd52 | 522 | blkp->throttle_state = NULL; |
2ff1f2e3 AG |
523 | } |
524 | ||
525 | static void throttle_groups_init(void) | |
526 | { | |
527 | qemu_mutex_init(&throttle_groups_lock); | |
528 | } | |
529 | ||
530 | block_init(throttle_groups_init); |