]>
Commit | Line | Data |
---|---|---|
be8d8537 SH |
1 | /* |
2 | * Event loop thread | |
3 | * | |
c3033fd3 | 4 | * Copyright Red Hat Inc., 2013, 2020 |
be8d8537 SH |
5 | * |
6 | * Authors: | |
7 | * Stefan Hajnoczi <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
d38ea87a | 14 | #include "qemu/osdep.h" |
be8d8537 SH |
15 | #include "qom/object.h" |
16 | #include "qom/object_interfaces.h" | |
17 | #include "qemu/module.h" | |
be8d8537 | 18 | #include "block/aio.h" |
d16341fa | 19 | #include "block/block.h" |
7d5983e3 | 20 | #include "sysemu/event-loop-base.h" |
be8d8537 | 21 | #include "sysemu/iothread.h" |
e688df6b | 22 | #include "qapi/error.h" |
112ed241 | 23 | #include "qapi/qapi-commands-misc.h" |
2f78e491 | 24 | #include "qemu/error-report.h" |
ab28bd23 | 25 | #include "qemu/rcu.h" |
e4370165 | 26 | #include "qemu/main-loop.h" |
be8d8537 | 27 | |
be8d8537 | 28 | typedef ObjectClass IOThreadClass; |
be8d8537 | 29 | |
8110fa1d EH |
30 | DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD, |
31 | TYPE_IOTHREAD) | |
be8d8537 | 32 | |
90c558be | 33 | #ifdef CONFIG_POSIX |
cdd7abfd SH |
34 | /* Benchmark results from 2016 on NVMe SSD drives show max polling times around |
35 | * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32 | |
36 | * workloads. | |
37 | */ | |
38 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL | |
90c558be PX |
39 | #else |
40 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL | |
41 | #endif | |
cdd7abfd | 42 | |
be8d8537 SH |
43 | static void *iothread_run(void *opaque) |
44 | { | |
45 | IOThread *iothread = opaque; | |
46 | ||
ab28bd23 | 47 | rcu_register_thread(); |
b60ec76a PX |
48 | /* |
49 | * g_main_context_push_thread_default() must be called before anything | |
50 | * in this new thread uses glib. | |
51 | */ | |
52 | g_main_context_push_thread_default(iothread->worker_context); | |
5f50be9b | 53 | qemu_set_current_aio_context(iothread->ctx); |
88eb7c29 | 54 | iothread->thread_id = qemu_get_thread_id(); |
21c4d15b | 55 | qemu_sem_post(&iothread->init_done_sem); |
88eb7c29 | 56 | |
2362a28e | 57 | while (iothread->running) { |
6ca20620 PX |
58 | /* |
59 | * Note: from functional-wise the g_main_loop_run() below can | |
60 | * already cover the aio_poll() events, but we can't run the | |
61 | * main loop unconditionally because explicit aio_poll() here | |
62 | * is faster than g_main_loop_run() when we do not need the | |
63 | * gcontext at all (e.g., pure block layer iothreads). In | |
64 | * other words, when we want to run the gcontext with the | |
65 | * iothread we need to pay some performance for functionality. | |
66 | */ | |
65c1b5b6 | 67 | aio_poll(iothread->ctx, true); |
329163cb | 68 | |
6c95363d PX |
69 | /* |
70 | * We must check the running state again in case it was | |
71 | * changed in previous aio_poll() | |
72 | */ | |
d73415a3 | 73 | if (iothread->running && qatomic_read(&iothread->run_gcontext)) { |
329163cb | 74 | g_main_loop_run(iothread->main_loop); |
329163cb | 75 | } |
be8d8537 | 76 | } |
ab28bd23 | 77 | |
b60ec76a | 78 | g_main_context_pop_thread_default(iothread->worker_context); |
ab28bd23 | 79 | rcu_unregister_thread(); |
be8d8537 SH |
80 | return NULL; |
81 | } | |
82 | ||
2362a28e SH |
83 | /* Runs in iothread_run() thread */ |
84 | static void iothread_stop_bh(void *opaque) | |
85 | { | |
86 | IOThread *iothread = opaque; | |
87 | ||
88 | iothread->running = false; /* stop iothread_run() */ | |
89 | ||
90 | if (iothread->main_loop) { | |
91 | g_main_loop_quit(iothread->main_loop); | |
92 | } | |
93 | } | |
94 | ||
82d90705 | 95 | void iothread_stop(IOThread *iothread) |
be8d8537 | 96 | { |
82d90705 PX |
97 | if (!iothread->ctx || iothread->stopping) { |
98 | return; | |
2f78e491 | 99 | } |
be8d8537 | 100 | iothread->stopping = true; |
2362a28e | 101 | aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread); |
be8d8537 | 102 | qemu_thread_join(&iothread->thread); |
82d90705 PX |
103 | } |
104 | ||
cdd7abfd SH |
105 | static void iothread_instance_init(Object *obj) |
106 | { | |
107 | IOThread *iothread = IOTHREAD(obj); | |
108 | ||
109 | iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT; | |
14a2d118 | 110 | iothread->thread_id = -1; |
21c4d15b | 111 | qemu_sem_init(&iothread->init_done_sem, 0); |
b506e0f1 | 112 | /* By default, we don't run gcontext */ |
d73415a3 | 113 | qatomic_set(&iothread->run_gcontext, 0); |
cdd7abfd SH |
114 | } |
115 | ||
dce8921b FZ |
116 | static void iothread_instance_finalize(Object *obj) |
117 | { | |
118 | IOThread *iothread = IOTHREAD(obj); | |
119 | ||
82d90705 | 120 | iothread_stop(iothread); |
14a2d118 | 121 | |
15544349 PX |
122 | /* |
123 | * Before glib2 2.33.10, there is a glib2 bug that GSource context | |
124 | * pointer may not be cleared even if the context has already been | |
125 | * destroyed (while it should). Here let's free the AIO context | |
126 | * earlier to bypass that glib bug. | |
127 | * | |
128 | * We can remove this comment after the minimum supported glib2 | |
129 | * version boosts to 2.33.10. Before that, let's free the | |
130 | * GSources first before destroying any GMainContext. | |
131 | */ | |
132 | if (iothread->ctx) { | |
133 | aio_context_unref(iothread->ctx); | |
134 | iothread->ctx = NULL; | |
135 | } | |
5b3ac23f PX |
136 | if (iothread->worker_context) { |
137 | g_main_context_unref(iothread->worker_context); | |
138 | iothread->worker_context = NULL; | |
0bd2d233 PX |
139 | g_main_loop_unref(iothread->main_loop); |
140 | iothread->main_loop = NULL; | |
5b3ac23f | 141 | } |
21c4d15b | 142 | qemu_sem_destroy(&iothread->init_done_sem); |
be8d8537 SH |
143 | } |
144 | ||
b506e0f1 PX |
145 | static void iothread_init_gcontext(IOThread *iothread) |
146 | { | |
147 | GSource *source; | |
148 | ||
149 | iothread->worker_context = g_main_context_new(); | |
150 | source = aio_get_g_source(iothread_get_aio_context(iothread)); | |
151 | g_source_attach(source, iothread->worker_context); | |
152 | g_source_unref(source); | |
0bd2d233 | 153 | iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); |
b506e0f1 PX |
154 | } |
155 | ||
7d5983e3 | 156 | static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp) |
1793ad02 SG |
157 | { |
158 | ERRP_GUARD(); | |
05e385d2 | 159 | IOThread *iothread = IOTHREAD(base); |
1793ad02 | 160 | |
7d5983e3 NSJ |
161 | if (!iothread->ctx) { |
162 | return; | |
163 | } | |
164 | ||
1793ad02 SG |
165 | aio_context_set_poll_params(iothread->ctx, |
166 | iothread->poll_max_ns, | |
167 | iothread->poll_grow, | |
168 | iothread->poll_shrink, | |
169 | errp); | |
170 | if (*errp) { | |
171 | return; | |
172 | } | |
173 | ||
174 | aio_context_set_aio_params(iothread->ctx, | |
7d5983e3 | 175 | iothread->parent_obj.aio_max_batch, |
1793ad02 | 176 | errp); |
71ad4713 NSJ |
177 | |
178 | aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min, | |
179 | base->thread_pool_max, errp); | |
1793ad02 SG |
180 | } |
181 | ||
7d5983e3 NSJ |
182 | |
183 | static void iothread_init(EventLoopBase *base, Error **errp) | |
be8d8537 | 184 | { |
2f78e491 | 185 | Error *local_error = NULL; |
7d5983e3 | 186 | IOThread *iothread = IOTHREAD(base); |
7a309cc9 | 187 | char *thread_name; |
be8d8537 SH |
188 | |
189 | iothread->stopping = false; | |
2362a28e | 190 | iothread->running = true; |
668f62ec | 191 | iothread->ctx = aio_context_new(errp); |
2f78e491 | 192 | if (!iothread->ctx) { |
2f78e491 CN |
193 | return; |
194 | } | |
88eb7c29 | 195 | |
b506e0f1 PX |
196 | /* |
197 | * Init one GMainContext for the iothread unconditionally, even if | |
198 | * it's not used | |
199 | */ | |
200 | iothread_init_gcontext(iothread); | |
201 | ||
7d5983e3 | 202 | iothread_set_aio_context_params(base, &local_error); |
0d9d86fb SH |
203 | if (local_error) { |
204 | error_propagate(errp, local_error); | |
205 | aio_context_unref(iothread->ctx); | |
206 | iothread->ctx = NULL; | |
207 | return; | |
208 | } | |
209 | ||
be8d8537 SH |
210 | /* This assumes we are called from a thread with useful CPU affinity for us |
211 | * to inherit. | |
212 | */ | |
7a309cc9 | 213 | thread_name = g_strdup_printf("IO %s", |
7d5983e3 | 214 | object_get_canonical_path_component(OBJECT(base))); |
d21e8776 | 215 | qemu_thread_create(&iothread->thread, thread_name, iothread_run, |
be8d8537 | 216 | iothread, QEMU_THREAD_JOINABLE); |
d21e8776 | 217 | g_free(thread_name); |
88eb7c29 SH |
218 | |
219 | /* Wait for initialization to complete */ | |
88eb7c29 | 220 | while (iothread->thread_id == -1) { |
21c4d15b | 221 | qemu_sem_wait(&iothread->init_done_sem); |
88eb7c29 | 222 | } |
be8d8537 SH |
223 | } |
224 | ||
5e5db499 SH |
225 | typedef struct { |
226 | const char *name; | |
227 | ptrdiff_t offset; /* field's byte offset in IOThread struct */ | |
f0ed36a6 | 228 | } IOThreadParamInfo; |
5e5db499 | 229 | |
f0ed36a6 | 230 | static IOThreadParamInfo poll_max_ns_info = { |
5e5db499 SH |
231 | "poll-max-ns", offsetof(IOThread, poll_max_ns), |
232 | }; | |
f0ed36a6 | 233 | static IOThreadParamInfo poll_grow_info = { |
5e5db499 SH |
234 | "poll-grow", offsetof(IOThread, poll_grow), |
235 | }; | |
f0ed36a6 | 236 | static IOThreadParamInfo poll_shrink_info = { |
5e5db499 SH |
237 | "poll-shrink", offsetof(IOThread, poll_shrink), |
238 | }; | |
239 | ||
0445409d | 240 | static void iothread_get_param(Object *obj, Visitor *v, |
1cc7eada | 241 | const char *name, IOThreadParamInfo *info, Error **errp) |
0d9d86fb SH |
242 | { |
243 | IOThread *iothread = IOTHREAD(obj); | |
5e5db499 | 244 | int64_t *field = (void *)iothread + info->offset; |
0d9d86fb | 245 | |
5e5db499 | 246 | visit_type_int64(v, name, field, errp); |
0d9d86fb SH |
247 | } |
248 | ||
0445409d | 249 | static bool iothread_set_param(Object *obj, Visitor *v, |
1cc7eada | 250 | const char *name, IOThreadParamInfo *info, Error **errp) |
0d9d86fb SH |
251 | { |
252 | IOThread *iothread = IOTHREAD(obj); | |
5e5db499 | 253 | int64_t *field = (void *)iothread + info->offset; |
0d9d86fb SH |
254 | int64_t value; |
255 | ||
668f62ec | 256 | if (!visit_type_int64(v, name, &value, errp)) { |
0445409d | 257 | return false; |
0d9d86fb SH |
258 | } |
259 | ||
260 | if (value < 0) { | |
dcfe4805 | 261 | error_setg(errp, "%s value must be in range [0, %" PRId64 "]", |
5e5db499 | 262 | info->name, INT64_MAX); |
0445409d | 263 | return false; |
0d9d86fb SH |
264 | } |
265 | ||
5e5db499 | 266 | *field = value; |
0d9d86fb | 267 | |
0445409d SG |
268 | return true; |
269 | } | |
270 | ||
271 | static void iothread_get_poll_param(Object *obj, Visitor *v, | |
272 | const char *name, void *opaque, Error **errp) | |
273 | { | |
1cc7eada | 274 | IOThreadParamInfo *info = opaque; |
0445409d | 275 | |
1cc7eada | 276 | iothread_get_param(obj, v, name, info, errp); |
0445409d SG |
277 | } |
278 | ||
279 | static void iothread_set_poll_param(Object *obj, Visitor *v, | |
280 | const char *name, void *opaque, Error **errp) | |
281 | { | |
282 | IOThread *iothread = IOTHREAD(obj); | |
1cc7eada | 283 | IOThreadParamInfo *info = opaque; |
0445409d | 284 | |
1cc7eada | 285 | if (!iothread_set_param(obj, v, name, info, errp)) { |
0445409d SG |
286 | return; |
287 | } | |
288 | ||
0d9d86fb | 289 | if (iothread->ctx) { |
5e5db499 SH |
290 | aio_context_set_poll_params(iothread->ctx, |
291 | iothread->poll_max_ns, | |
292 | iothread->poll_grow, | |
293 | iothread->poll_shrink, | |
dcfe4805 | 294 | errp); |
0d9d86fb | 295 | } |
0d9d86fb SH |
296 | } |
297 | ||
be8d8537 SH |
298 | static void iothread_class_init(ObjectClass *klass, void *class_data) |
299 | { | |
7d5983e3 NSJ |
300 | EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass); |
301 | ||
302 | bc->init = iothread_init; | |
303 | bc->update_params = iothread_set_aio_context_params; | |
0d9d86fb SH |
304 | |
305 | object_class_property_add(klass, "poll-max-ns", "int", | |
5e5db499 SH |
306 | iothread_get_poll_param, |
307 | iothread_set_poll_param, | |
d2623129 | 308 | NULL, &poll_max_ns_info); |
5e5db499 SH |
309 | object_class_property_add(klass, "poll-grow", "int", |
310 | iothread_get_poll_param, | |
311 | iothread_set_poll_param, | |
d2623129 | 312 | NULL, &poll_grow_info); |
5e5db499 SH |
313 | object_class_property_add(klass, "poll-shrink", "int", |
314 | iothread_get_poll_param, | |
315 | iothread_set_poll_param, | |
d2623129 | 316 | NULL, &poll_shrink_info); |
be8d8537 SH |
317 | } |
318 | ||
319 | static const TypeInfo iothread_info = { | |
320 | .name = TYPE_IOTHREAD, | |
7d5983e3 | 321 | .parent = TYPE_EVENT_LOOP_BASE, |
be8d8537 SH |
322 | .class_init = iothread_class_init, |
323 | .instance_size = sizeof(IOThread), | |
cdd7abfd | 324 | .instance_init = iothread_instance_init, |
be8d8537 | 325 | .instance_finalize = iothread_instance_finalize, |
be8d8537 SH |
326 | }; |
327 | ||
328 | static void iothread_register_types(void) | |
329 | { | |
330 | type_register_static(&iothread_info); | |
331 | } | |
332 | ||
333 | type_init(iothread_register_types) | |
334 | ||
be8d8537 SH |
335 | char *iothread_get_id(IOThread *iothread) |
336 | { | |
7a309cc9 | 337 | return g_strdup(object_get_canonical_path_component(OBJECT(iothread))); |
be8d8537 SH |
338 | } |
339 | ||
340 | AioContext *iothread_get_aio_context(IOThread *iothread) | |
341 | { | |
342 | return iothread->ctx; | |
343 | } | |
dc3dd0d2 SH |
344 | |
345 | static int query_one_iothread(Object *object, void *opaque) | |
346 | { | |
c3033fd3 | 347 | IOThreadInfoList ***tail = opaque; |
dc3dd0d2 SH |
348 | IOThreadInfo *info; |
349 | IOThread *iothread; | |
350 | ||
351 | iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); | |
352 | if (!iothread) { | |
353 | return 0; | |
354 | } | |
355 | ||
356 | info = g_new0(IOThreadInfo, 1); | |
357 | info->id = iothread_get_id(iothread); | |
358 | info->thread_id = iothread->thread_id; | |
5fc00480 PH |
359 | info->poll_max_ns = iothread->poll_max_ns; |
360 | info->poll_grow = iothread->poll_grow; | |
361 | info->poll_shrink = iothread->poll_shrink; | |
7d5983e3 | 362 | info->aio_max_batch = iothread->parent_obj.aio_max_batch; |
dc3dd0d2 | 363 | |
c3033fd3 | 364 | QAPI_LIST_APPEND(*tail, info); |
dc3dd0d2 SH |
365 | return 0; |
366 | } | |
367 | ||
368 | IOThreadInfoList *qmp_query_iothreads(Error **errp) | |
369 | { | |
370 | IOThreadInfoList *head = NULL; | |
371 | IOThreadInfoList **prev = &head; | |
bc2256c4 | 372 | Object *container = object_get_objects_root(); |
dc3dd0d2 SH |
373 | |
374 | object_child_foreach(container, query_one_iothread, &prev); | |
375 | return head; | |
376 | } | |
dce8921b | 377 | |
329163cb WY |
378 | GMainContext *iothread_get_g_main_context(IOThread *iothread) |
379 | { | |
d73415a3 | 380 | qatomic_set(&iothread->run_gcontext, 1); |
b506e0f1 | 381 | aio_notify(iothread->ctx); |
329163cb WY |
382 | return iothread->worker_context; |
383 | } | |
0173e21b PX |
384 | |
385 | IOThread *iothread_create(const char *id, Error **errp) | |
386 | { | |
387 | Object *obj; | |
388 | ||
389 | obj = object_new_with_props(TYPE_IOTHREAD, | |
390 | object_get_internal_root(), | |
391 | id, errp, NULL); | |
392 | ||
393 | return IOTHREAD(obj); | |
394 | } | |
395 | ||
396 | void iothread_destroy(IOThread *iothread) | |
397 | { | |
398 | object_unparent(OBJECT(iothread)); | |
399 | } | |
fbcc6923 SH |
400 | |
401 | /* Lookup IOThread by its id. Only finds user-created objects, not internal | |
402 | * iothread_create() objects. */ | |
403 | IOThread *iothread_by_id(const char *id) | |
404 | { | |
405 | return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL)); | |
406 | } | |
ad22c308 EU |
407 | |
408 | bool qemu_in_iothread(void) | |
409 | { | |
410 | return qemu_get_current_aio_context() == qemu_get_aio_context() ? | |
411 | false : true; | |
412 | } |