]>
Commit | Line | Data |
---|---|---|
5ddfffbd BC |
1 | /* |
2 | * QEMU throttling infrastructure | |
3 | * | |
a291d5d9 AG |
4 | * Copyright (C) Nodalink, EURL. 2013-2014 |
5 | * Copyright (C) Igalia, S.L. 2015 | |
5ddfffbd | 6 | * |
a291d5d9 AG |
7 | * Authors: |
8 | * Benoît Canet <[email protected]> | |
9 | * Alberto Garcia <[email protected]> | |
5ddfffbd BC |
10 | * |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License as | |
13 | * published by the Free Software Foundation; either version 2 or | |
14 | * (at your option) version 3 of the License. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
19 | * GNU General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
23 | */ | |
24 | ||
aafd7584 | 25 | #include "qemu/osdep.h" |
da34e65c | 26 | #include "qapi/error.h" |
5ddfffbd BC |
27 | #include "qemu/throttle.h" |
28 | #include "qemu/timer.h" | |
13af91eb | 29 | #include "block/aio.h" |
5ddfffbd BC |
30 | |
31 | /* This function make a bucket leak | |
32 | * | |
33 | * @bkt: the bucket to make leak | |
34 | * @delta_ns: the time delta | |
35 | */ | |
36 | void throttle_leak_bucket(LeakyBucket *bkt, int64_t delta_ns) | |
37 | { | |
38 | double leak; | |
39 | ||
40 | /* compute how much to leak */ | |
13566fe3 | 41 | leak = (bkt->avg * (double) delta_ns) / NANOSECONDS_PER_SECOND; |
5ddfffbd BC |
42 | |
43 | /* make the bucket leak */ | |
44 | bkt->level = MAX(bkt->level - leak, 0); | |
100f8f26 AG |
45 | |
46 | /* if we allow bursts for more than one second we also need to | |
47 | * keep track of bkt->burst_level so the bkt->max goal per second | |
48 | * is attained */ | |
49 | if (bkt->burst_length > 1) { | |
50 | leak = (bkt->max * (double) delta_ns) / NANOSECONDS_PER_SECOND; | |
51 | bkt->burst_level = MAX(bkt->burst_level - leak, 0); | |
52 | } | |
5ddfffbd BC |
53 | } |
54 | ||
55 | /* Calculate the time delta since last leak and make proportionals leaks | |
56 | * | |
57 | * @now: the current timestamp in ns | |
58 | */ | |
59 | static void throttle_do_leak(ThrottleState *ts, int64_t now) | |
60 | { | |
61 | /* compute the time elapsed since the last leak */ | |
62 | int64_t delta_ns = now - ts->previous_leak; | |
63 | int i; | |
64 | ||
65 | ts->previous_leak = now; | |
66 | ||
67 | if (delta_ns <= 0) { | |
68 | return; | |
69 | } | |
70 | ||
71 | /* make each bucket leak */ | |
72 | for (i = 0; i < BUCKETS_COUNT; i++) { | |
73 | throttle_leak_bucket(&ts->cfg.buckets[i], delta_ns); | |
74 | } | |
75 | } | |
76 | ||
77 | /* do the real job of computing the time to wait | |
78 | * | |
79 | * @limit: the throttling limit | |
80 | * @extra: the number of operation to delay | |
81 | * @ret: the time to wait in ns | |
82 | */ | |
83 | static int64_t throttle_do_compute_wait(double limit, double extra) | |
84 | { | |
13566fe3 | 85 | double wait = extra * NANOSECONDS_PER_SECOND; |
5ddfffbd BC |
86 | wait /= limit; |
87 | return wait; | |
88 | } | |
89 | ||
90 | /* This function compute the wait time in ns that a leaky bucket should trigger | |
91 | * | |
92 | * @bkt: the leaky bucket we operate on | |
93 | * @ret: the resulting wait time in ns or 0 if the operation can go through | |
94 | */ | |
95 | int64_t throttle_compute_wait(LeakyBucket *bkt) | |
96 | { | |
97 | double extra; /* the number of extra units blocking the io */ | |
2a8be39e AG |
98 | double bucket_size; /* I/O before throttling to bkt->avg */ |
99 | double burst_bucket_size; /* Before throttling to bkt->max */ | |
5ddfffbd BC |
100 | |
101 | if (!bkt->avg) { | |
102 | return 0; | |
103 | } | |
104 | ||
2a8be39e AG |
105 | if (!bkt->max) { |
106 | /* If bkt->max is 0 we still want to allow short bursts of I/O | |
107 | * from the guest, otherwise every other request will be throttled | |
108 | * and performance will suffer considerably. */ | |
d00e6923 | 109 | bucket_size = (double) bkt->avg / 10; |
2a8be39e AG |
110 | burst_bucket_size = 0; |
111 | } else { | |
112 | /* If we have a burst limit then we have to wait until all I/O | |
113 | * at burst rate has finished before throttling to bkt->avg */ | |
114 | bucket_size = bkt->max * bkt->burst_length; | |
d00e6923 | 115 | burst_bucket_size = (double) bkt->max / 10; |
2a8be39e AG |
116 | } |
117 | ||
118 | /* If the main bucket is full then we have to wait */ | |
119 | extra = bkt->level - bucket_size; | |
100f8f26 AG |
120 | if (extra > 0) { |
121 | return throttle_do_compute_wait(bkt->avg, extra); | |
122 | } | |
5ddfffbd | 123 | |
2a8be39e AG |
124 | /* If the main bucket is not full yet we still have to check the |
125 | * burst bucket in order to enforce the burst limit */ | |
100f8f26 | 126 | if (bkt->burst_length > 1) { |
b5806108 | 127 | assert(bkt->max > 0); /* see throttle_is_valid() */ |
2a8be39e | 128 | extra = bkt->burst_level - burst_bucket_size; |
100f8f26 AG |
129 | if (extra > 0) { |
130 | return throttle_do_compute_wait(bkt->max, extra); | |
131 | } | |
5ddfffbd BC |
132 | } |
133 | ||
100f8f26 | 134 | return 0; |
5ddfffbd BC |
135 | } |
136 | ||
137 | /* This function compute the time that must be waited while this IO | |
138 | * | |
139 | * @is_write: true if the current IO is a write, false if it's a read | |
140 | * @ret: time to wait | |
141 | */ | |
142 | static int64_t throttle_compute_wait_for(ThrottleState *ts, | |
143 | bool is_write) | |
144 | { | |
145 | BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL, | |
146 | THROTTLE_OPS_TOTAL, | |
147 | THROTTLE_BPS_READ, | |
148 | THROTTLE_OPS_READ}, | |
149 | {THROTTLE_BPS_TOTAL, | |
150 | THROTTLE_OPS_TOTAL, | |
151 | THROTTLE_BPS_WRITE, | |
152 | THROTTLE_OPS_WRITE}, }; | |
153 | int64_t wait, max_wait = 0; | |
154 | int i; | |
155 | ||
156 | for (i = 0; i < 4; i++) { | |
157 | BucketType index = to_check[is_write][i]; | |
158 | wait = throttle_compute_wait(&ts->cfg.buckets[index]); | |
159 | if (wait > max_wait) { | |
160 | max_wait = wait; | |
161 | } | |
162 | } | |
163 | ||
164 | return max_wait; | |
165 | } | |
166 | ||
167 | /* compute the timer for this type of operation | |
168 | * | |
169 | * @is_write: the type of operation | |
170 | * @now: the current clock timestamp | |
171 | * @next_timestamp: the resulting timer | |
172 | * @ret: true if a timer must be set | |
173 | */ | |
3c9242f5 AG |
174 | static bool throttle_compute_timer(ThrottleState *ts, |
175 | bool is_write, | |
176 | int64_t now, | |
177 | int64_t *next_timestamp) | |
5ddfffbd BC |
178 | { |
179 | int64_t wait; | |
180 | ||
181 | /* leak proportionally to the time elapsed */ | |
182 | throttle_do_leak(ts, now); | |
183 | ||
184 | /* compute the wait time if any */ | |
185 | wait = throttle_compute_wait_for(ts, is_write); | |
186 | ||
187 | /* if the code must wait compute when the next timer should fire */ | |
188 | if (wait) { | |
189 | *next_timestamp = now + wait; | |
190 | return true; | |
191 | } | |
192 | ||
193 | /* else no need to wait at all */ | |
194 | *next_timestamp = now; | |
195 | return false; | |
196 | } | |
197 | ||
13af91eb | 198 | /* Add timers to event loop */ |
0e5b0a2d BC |
199 | void throttle_timers_attach_aio_context(ThrottleTimers *tt, |
200 | AioContext *new_context) | |
13af91eb | 201 | { |
0e5b0a2d BC |
202 | tt->timers[0] = aio_timer_new(new_context, tt->clock_type, SCALE_NS, |
203 | tt->read_timer_cb, tt->timer_opaque); | |
204 | tt->timers[1] = aio_timer_new(new_context, tt->clock_type, SCALE_NS, | |
205 | tt->write_timer_cb, tt->timer_opaque); | |
13af91eb SH |
206 | } |
207 | ||
1588ab5d AG |
208 | /* |
209 | * Initialize the ThrottleConfig structure to a valid state | |
210 | * @cfg: the config to initialize | |
211 | */ | |
212 | void throttle_config_init(ThrottleConfig *cfg) | |
213 | { | |
100f8f26 | 214 | unsigned i; |
1588ab5d | 215 | memset(cfg, 0, sizeof(*cfg)); |
100f8f26 AG |
216 | for (i = 0; i < BUCKETS_COUNT; i++) { |
217 | cfg->buckets[i].burst_length = 1; | |
218 | } | |
1588ab5d AG |
219 | } |
220 | ||
5ddfffbd | 221 | /* To be called first on the ThrottleState */ |
0e5b0a2d | 222 | void throttle_init(ThrottleState *ts) |
5ddfffbd BC |
223 | { |
224 | memset(ts, 0, sizeof(ThrottleState)); | |
1588ab5d | 225 | throttle_config_init(&ts->cfg); |
0e5b0a2d BC |
226 | } |
227 | ||
228 | /* To be called first on the ThrottleTimers */ | |
229 | void throttle_timers_init(ThrottleTimers *tt, | |
230 | AioContext *aio_context, | |
231 | QEMUClockType clock_type, | |
232 | QEMUTimerCB *read_timer_cb, | |
233 | QEMUTimerCB *write_timer_cb, | |
234 | void *timer_opaque) | |
235 | { | |
236 | memset(tt, 0, sizeof(ThrottleTimers)); | |
5ddfffbd | 237 | |
0e5b0a2d BC |
238 | tt->clock_type = clock_type; |
239 | tt->read_timer_cb = read_timer_cb; | |
240 | tt->write_timer_cb = write_timer_cb; | |
241 | tt->timer_opaque = timer_opaque; | |
242 | throttle_timers_attach_aio_context(tt, aio_context); | |
5ddfffbd BC |
243 | } |
244 | ||
245 | /* destroy a timer */ | |
246 | static void throttle_timer_destroy(QEMUTimer **timer) | |
247 | { | |
248 | assert(*timer != NULL); | |
249 | ||
250 | timer_del(*timer); | |
251 | timer_free(*timer); | |
252 | *timer = NULL; | |
253 | } | |
254 | ||
13af91eb | 255 | /* Remove timers from event loop */ |
0e5b0a2d | 256 | void throttle_timers_detach_aio_context(ThrottleTimers *tt) |
5ddfffbd BC |
257 | { |
258 | int i; | |
259 | ||
260 | for (i = 0; i < 2; i++) { | |
0e5b0a2d | 261 | throttle_timer_destroy(&tt->timers[i]); |
5ddfffbd BC |
262 | } |
263 | } | |
264 | ||
0e5b0a2d BC |
265 | /* To be called last on the ThrottleTimers */ |
266 | void throttle_timers_destroy(ThrottleTimers *tt) | |
13af91eb | 267 | { |
0e5b0a2d | 268 | throttle_timers_detach_aio_context(tt); |
13af91eb SH |
269 | } |
270 | ||
5ddfffbd | 271 | /* is any throttling timer configured */ |
0e5b0a2d | 272 | bool throttle_timers_are_initialized(ThrottleTimers *tt) |
5ddfffbd | 273 | { |
0e5b0a2d | 274 | if (tt->timers[0]) { |
5ddfffbd BC |
275 | return true; |
276 | } | |
277 | ||
278 | return false; | |
279 | } | |
280 | ||
281 | /* Does any throttling must be done | |
282 | * | |
283 | * @cfg: the throttling configuration to inspect | |
284 | * @ret: true if throttling must be done else false | |
285 | */ | |
286 | bool throttle_enabled(ThrottleConfig *cfg) | |
287 | { | |
288 | int i; | |
289 | ||
290 | for (i = 0; i < BUCKETS_COUNT; i++) { | |
291 | if (cfg->buckets[i].avg > 0) { | |
292 | return true; | |
293 | } | |
294 | } | |
295 | ||
296 | return false; | |
297 | } | |
298 | ||
d5851089 | 299 | /* check if a throttling configuration is valid |
5ddfffbd | 300 | * @cfg: the throttling configuration to inspect |
d5851089 | 301 | * @ret: true if valid else false |
6921b180 | 302 | * @errp: error object |
5ddfffbd | 303 | */ |
d5851089 | 304 | bool throttle_is_valid(ThrottleConfig *cfg, Error **errp) |
5ddfffbd | 305 | { |
d5851089 | 306 | int i; |
5ddfffbd BC |
307 | bool bps_flag, ops_flag; |
308 | bool bps_max_flag, ops_max_flag; | |
309 | ||
310 | bps_flag = cfg->buckets[THROTTLE_BPS_TOTAL].avg && | |
311 | (cfg->buckets[THROTTLE_BPS_READ].avg || | |
312 | cfg->buckets[THROTTLE_BPS_WRITE].avg); | |
313 | ||
314 | ops_flag = cfg->buckets[THROTTLE_OPS_TOTAL].avg && | |
315 | (cfg->buckets[THROTTLE_OPS_READ].avg || | |
316 | cfg->buckets[THROTTLE_OPS_WRITE].avg); | |
317 | ||
318 | bps_max_flag = cfg->buckets[THROTTLE_BPS_TOTAL].max && | |
319 | (cfg->buckets[THROTTLE_BPS_READ].max || | |
320 | cfg->buckets[THROTTLE_BPS_WRITE].max); | |
321 | ||
322 | ops_max_flag = cfg->buckets[THROTTLE_OPS_TOTAL].max && | |
323 | (cfg->buckets[THROTTLE_OPS_READ].max || | |
324 | cfg->buckets[THROTTLE_OPS_WRITE].max); | |
325 | ||
6921b180 AG |
326 | if (bps_flag || ops_flag || bps_max_flag || ops_max_flag) { |
327 | error_setg(errp, "bps/iops/max total values and read/write values" | |
328 | " cannot be used at the same time"); | |
d5851089 | 329 | return false; |
6921b180 AG |
330 | } |
331 | ||
8860eabd SH |
332 | if (cfg->op_size && |
333 | !cfg->buckets[THROTTLE_OPS_TOTAL].avg && | |
334 | !cfg->buckets[THROTTLE_OPS_READ].avg && | |
335 | !cfg->buckets[THROTTLE_OPS_WRITE].avg) { | |
336 | error_setg(errp, "iops size requires an iops value to be set"); | |
337 | return false; | |
338 | } | |
339 | ||
5ddfffbd | 340 | for (i = 0; i < BUCKETS_COUNT; i++) { |
fa36f1b2 | 341 | LeakyBucket *bkt = &cfg->buckets[i]; |
d00e6923 | 342 | if (bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) { |
03ba36c8 AG |
343 | error_setg(errp, "bps/iops/max values must be within [0, %lld]", |
344 | THROTTLE_VALUE_MAX); | |
972606c4 | 345 | return false; |
5ddfffbd | 346 | } |
5ddfffbd | 347 | |
fa36f1b2 | 348 | if (!bkt->burst_length) { |
100f8f26 AG |
349 | error_setg(errp, "the burst length cannot be 0"); |
350 | return false; | |
351 | } | |
352 | ||
fa36f1b2 | 353 | if (bkt->burst_length > 1 && !bkt->max) { |
100f8f26 AG |
354 | error_setg(errp, "burst length set without burst rate"); |
355 | return false; | |
356 | } | |
357 | ||
67335a45 AG |
358 | if (bkt->max && bkt->burst_length > THROTTLE_VALUE_MAX / bkt->max) { |
359 | error_setg(errp, "burst length too high for this burst rate"); | |
360 | return false; | |
361 | } | |
362 | ||
fa36f1b2 | 363 | if (bkt->max && !bkt->avg) { |
45b2d418 AG |
364 | error_setg(errp, "bps_max/iops_max require corresponding" |
365 | " bps/iops values"); | |
d5851089 | 366 | return false; |
ee2bdc33 | 367 | } |
aaa1e77f | 368 | |
fa36f1b2 | 369 | if (bkt->max && bkt->max < bkt->avg) { |
aaa1e77f AG |
370 | error_setg(errp, "bps_max/iops_max cannot be lower than bps/iops"); |
371 | return false; | |
372 | } | |
ee2bdc33 | 373 | } |
d5851089 AG |
374 | |
375 | return true; | |
ee2bdc33 SH |
376 | } |
377 | ||
5ddfffbd BC |
378 | /* Used to configure the throttle |
379 | * | |
380 | * @ts: the throttle state we are working on | |
dbe824cc | 381 | * @clock_type: the group's clock_type |
5ddfffbd BC |
382 | * @cfg: the config to set |
383 | */ | |
0e5b0a2d | 384 | void throttle_config(ThrottleState *ts, |
dbe824cc | 385 | QEMUClockType clock_type, |
0e5b0a2d | 386 | ThrottleConfig *cfg) |
5ddfffbd BC |
387 | { |
388 | int i; | |
389 | ||
390 | ts->cfg = *cfg; | |
391 | ||
2a8be39e | 392 | /* Zero bucket level */ |
5ddfffbd | 393 | for (i = 0; i < BUCKETS_COUNT; i++) { |
2a8be39e AG |
394 | ts->cfg.buckets[i].level = 0; |
395 | ts->cfg.buckets[i].burst_level = 0; | |
5ddfffbd BC |
396 | } |
397 | ||
dbe824cc | 398 | ts->previous_leak = qemu_clock_get_ns(clock_type); |
5ddfffbd BC |
399 | } |
400 | ||
401 | /* used to get config | |
402 | * | |
403 | * @ts: the throttle state we are working on | |
404 | * @cfg: the config to write | |
405 | */ | |
406 | void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg) | |
407 | { | |
408 | *cfg = ts->cfg; | |
409 | } | |
410 | ||
411 | ||
412 | /* Schedule the read or write timer if needed | |
413 | * | |
414 | * NOTE: this function is not unit tested due to it's usage of timer_mod | |
415 | * | |
0e5b0a2d | 416 | * @tt: the timers structure |
5ddfffbd BC |
417 | * @is_write: the type of operation (read/write) |
418 | * @ret: true if the timer has been scheduled else false | |
419 | */ | |
0e5b0a2d BC |
420 | bool throttle_schedule_timer(ThrottleState *ts, |
421 | ThrottleTimers *tt, | |
422 | bool is_write) | |
5ddfffbd | 423 | { |
0e5b0a2d | 424 | int64_t now = qemu_clock_get_ns(tt->clock_type); |
5ddfffbd BC |
425 | int64_t next_timestamp; |
426 | bool must_wait; | |
427 | ||
428 | must_wait = throttle_compute_timer(ts, | |
429 | is_write, | |
430 | now, | |
431 | &next_timestamp); | |
432 | ||
433 | /* request not throttled */ | |
434 | if (!must_wait) { | |
435 | return false; | |
436 | } | |
437 | ||
438 | /* request throttled and timer pending -> do nothing */ | |
0e5b0a2d | 439 | if (timer_pending(tt->timers[is_write])) { |
5ddfffbd BC |
440 | return true; |
441 | } | |
442 | ||
443 | /* request throttled and timer not pending -> arm timer */ | |
0e5b0a2d | 444 | timer_mod(tt->timers[is_write], next_timestamp); |
5ddfffbd BC |
445 | return true; |
446 | } | |
447 | ||
448 | /* do the accounting for this operation | |
449 | * | |
450 | * @is_write: the type of operation (read/write) | |
451 | * @size: the size of the operation | |
452 | */ | |
453 | void throttle_account(ThrottleState *ts, bool is_write, uint64_t size) | |
454 | { | |
100f8f26 AG |
455 | const BucketType bucket_types_size[2][2] = { |
456 | { THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ }, | |
457 | { THROTTLE_BPS_TOTAL, THROTTLE_BPS_WRITE } | |
458 | }; | |
459 | const BucketType bucket_types_units[2][2] = { | |
460 | { THROTTLE_OPS_TOTAL, THROTTLE_OPS_READ }, | |
461 | { THROTTLE_OPS_TOTAL, THROTTLE_OPS_WRITE } | |
462 | }; | |
5ddfffbd | 463 | double units = 1.0; |
100f8f26 | 464 | unsigned i; |
5ddfffbd BC |
465 | |
466 | /* if cfg.op_size is defined and smaller than size we compute unit count */ | |
467 | if (ts->cfg.op_size && size > ts->cfg.op_size) { | |
468 | units = (double) size / ts->cfg.op_size; | |
469 | } | |
470 | ||
100f8f26 AG |
471 | for (i = 0; i < 2; i++) { |
472 | LeakyBucket *bkt; | |
473 | ||
474 | bkt = &ts->cfg.buckets[bucket_types_size[is_write][i]]; | |
475 | bkt->level += size; | |
476 | if (bkt->burst_length > 1) { | |
477 | bkt->burst_level += size; | |
478 | } | |
5ddfffbd | 479 | |
100f8f26 AG |
480 | bkt = &ts->cfg.buckets[bucket_types_units[is_write][i]]; |
481 | bkt->level += units; | |
482 | if (bkt->burst_length > 1) { | |
483 | bkt->burst_level += units; | |
484 | } | |
5ddfffbd BC |
485 | } |
486 | } | |
487 | ||
432d889e MP |
488 | /* return a ThrottleConfig based on the options in a ThrottleLimits |
489 | * | |
490 | * @arg: the ThrottleLimits object to read from | |
491 | * @cfg: the ThrottleConfig to edit | |
492 | * @errp: error object | |
493 | */ | |
494 | void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg, | |
495 | Error **errp) | |
496 | { | |
497 | if (arg->has_bps_total) { | |
498 | cfg->buckets[THROTTLE_BPS_TOTAL].avg = arg->bps_total; | |
499 | } | |
500 | if (arg->has_bps_read) { | |
501 | cfg->buckets[THROTTLE_BPS_READ].avg = arg->bps_read; | |
502 | } | |
503 | if (arg->has_bps_write) { | |
504 | cfg->buckets[THROTTLE_BPS_WRITE].avg = arg->bps_write; | |
505 | } | |
506 | ||
507 | if (arg->has_iops_total) { | |
508 | cfg->buckets[THROTTLE_OPS_TOTAL].avg = arg->iops_total; | |
509 | } | |
510 | if (arg->has_iops_read) { | |
511 | cfg->buckets[THROTTLE_OPS_READ].avg = arg->iops_read; | |
512 | } | |
513 | if (arg->has_iops_write) { | |
514 | cfg->buckets[THROTTLE_OPS_WRITE].avg = arg->iops_write; | |
515 | } | |
516 | ||
517 | if (arg->has_bps_total_max) { | |
518 | cfg->buckets[THROTTLE_BPS_TOTAL].max = arg->bps_total_max; | |
519 | } | |
520 | if (arg->has_bps_read_max) { | |
521 | cfg->buckets[THROTTLE_BPS_READ].max = arg->bps_read_max; | |
522 | } | |
523 | if (arg->has_bps_write_max) { | |
524 | cfg->buckets[THROTTLE_BPS_WRITE].max = arg->bps_write_max; | |
525 | } | |
526 | if (arg->has_iops_total_max) { | |
527 | cfg->buckets[THROTTLE_OPS_TOTAL].max = arg->iops_total_max; | |
528 | } | |
529 | if (arg->has_iops_read_max) { | |
530 | cfg->buckets[THROTTLE_OPS_READ].max = arg->iops_read_max; | |
531 | } | |
532 | if (arg->has_iops_write_max) { | |
533 | cfg->buckets[THROTTLE_OPS_WRITE].max = arg->iops_write_max; | |
534 | } | |
535 | ||
536 | if (arg->has_bps_total_max_length) { | |
537 | if (arg->bps_total_max_length > UINT_MAX) { | |
538 | error_setg(errp, "bps-total-max-length value must be in" | |
539 | " the range [0, %u]", UINT_MAX); | |
540 | return; | |
541 | } | |
542 | cfg->buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_total_max_length; | |
543 | } | |
544 | if (arg->has_bps_read_max_length) { | |
545 | if (arg->bps_read_max_length > UINT_MAX) { | |
546 | error_setg(errp, "bps-read-max-length value must be in" | |
547 | " the range [0, %u]", UINT_MAX); | |
548 | return; | |
549 | } | |
550 | cfg->buckets[THROTTLE_BPS_READ].burst_length = arg->bps_read_max_length; | |
551 | } | |
552 | if (arg->has_bps_write_max_length) { | |
553 | if (arg->bps_write_max_length > UINT_MAX) { | |
554 | error_setg(errp, "bps-write-max-length value must be in" | |
555 | " the range [0, %u]", UINT_MAX); | |
556 | return; | |
557 | } | |
558 | cfg->buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_write_max_length; | |
559 | } | |
560 | if (arg->has_iops_total_max_length) { | |
561 | if (arg->iops_total_max_length > UINT_MAX) { | |
562 | error_setg(errp, "iops-total-max-length value must be in" | |
563 | " the range [0, %u]", UINT_MAX); | |
564 | return; | |
565 | } | |
566 | cfg->buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_total_max_length; | |
567 | } | |
568 | if (arg->has_iops_read_max_length) { | |
569 | if (arg->iops_read_max_length > UINT_MAX) { | |
570 | error_setg(errp, "iops-read-max-length value must be in" | |
571 | " the range [0, %u]", UINT_MAX); | |
572 | return; | |
573 | } | |
574 | cfg->buckets[THROTTLE_OPS_READ].burst_length = arg->iops_read_max_length; | |
575 | } | |
576 | if (arg->has_iops_write_max_length) { | |
577 | if (arg->iops_write_max_length > UINT_MAX) { | |
578 | error_setg(errp, "iops-write-max-length value must be in" | |
579 | " the range [0, %u]", UINT_MAX); | |
580 | return; | |
581 | } | |
582 | cfg->buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_write_max_length; | |
583 | } | |
584 | ||
585 | if (arg->has_iops_size) { | |
586 | cfg->op_size = arg->iops_size; | |
587 | } | |
588 | ||
589 | throttle_is_valid(cfg, errp); | |
590 | } | |
591 | ||
592 | /* write the options of a ThrottleConfig to a ThrottleLimits | |
593 | * | |
594 | * @cfg: the ThrottleConfig to read from | |
595 | * @var: the ThrottleLimits to write to | |
596 | */ | |
597 | void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var) | |
598 | { | |
599 | var->bps_total = cfg->buckets[THROTTLE_BPS_TOTAL].avg; | |
600 | var->bps_read = cfg->buckets[THROTTLE_BPS_READ].avg; | |
601 | var->bps_write = cfg->buckets[THROTTLE_BPS_WRITE].avg; | |
602 | var->iops_total = cfg->buckets[THROTTLE_OPS_TOTAL].avg; | |
603 | var->iops_read = cfg->buckets[THROTTLE_OPS_READ].avg; | |
604 | var->iops_write = cfg->buckets[THROTTLE_OPS_WRITE].avg; | |
605 | var->bps_total_max = cfg->buckets[THROTTLE_BPS_TOTAL].max; | |
606 | var->bps_read_max = cfg->buckets[THROTTLE_BPS_READ].max; | |
607 | var->bps_write_max = cfg->buckets[THROTTLE_BPS_WRITE].max; | |
608 | var->iops_total_max = cfg->buckets[THROTTLE_OPS_TOTAL].max; | |
609 | var->iops_read_max = cfg->buckets[THROTTLE_OPS_READ].max; | |
610 | var->iops_write_max = cfg->buckets[THROTTLE_OPS_WRITE].max; | |
611 | var->bps_total_max_length = cfg->buckets[THROTTLE_BPS_TOTAL].burst_length; | |
612 | var->bps_read_max_length = cfg->buckets[THROTTLE_BPS_READ].burst_length; | |
613 | var->bps_write_max_length = cfg->buckets[THROTTLE_BPS_WRITE].burst_length; | |
614 | var->iops_total_max_length = cfg->buckets[THROTTLE_OPS_TOTAL].burst_length; | |
615 | var->iops_read_max_length = cfg->buckets[THROTTLE_OPS_READ].burst_length; | |
616 | var->iops_write_max_length = cfg->buckets[THROTTLE_OPS_WRITE].burst_length; | |
617 | var->iops_size = cfg->op_size; | |
618 | ||
619 | var->has_bps_total = true; | |
620 | var->has_bps_read = true; | |
621 | var->has_bps_write = true; | |
622 | var->has_iops_total = true; | |
623 | var->has_iops_read = true; | |
624 | var->has_iops_write = true; | |
625 | var->has_bps_total_max = true; | |
626 | var->has_bps_read_max = true; | |
627 | var->has_bps_write_max = true; | |
628 | var->has_iops_total_max = true; | |
629 | var->has_iops_read_max = true; | |
630 | var->has_iops_write_max = true; | |
631 | var->has_bps_read_max_length = true; | |
632 | var->has_bps_total_max_length = true; | |
633 | var->has_bps_write_max_length = true; | |
634 | var->has_iops_total_max_length = true; | |
635 | var->has_iops_read_max_length = true; | |
636 | var->has_iops_write_max_length = true; | |
637 | var->has_iops_size = true; | |
638 | } |