]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Event loop thread | |
3 | * | |
4 | * Copyright Red Hat Inc., 2013 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include "qemu/osdep.h" | |
15 | #include "qom/object.h" | |
16 | #include "qom/object_interfaces.h" | |
17 | #include "qemu/module.h" | |
18 | #include "block/aio.h" | |
19 | #include "block/block.h" | |
20 | #include "sysemu/iothread.h" | |
21 | #include "qapi/error.h" | |
22 | #include "qapi/qapi-commands-misc.h" | |
23 | #include "qemu/error-report.h" | |
24 | #include "qemu/rcu.h" | |
25 | #include "qemu/main-loop.h" | |
26 | ||
27 | typedef ObjectClass IOThreadClass; | |
28 | ||
29 | #define IOTHREAD_GET_CLASS(obj) \ | |
30 | OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD) | |
31 | #define IOTHREAD_CLASS(klass) \ | |
32 | OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD) | |
33 | ||
34 | /* Benchmark results from 2016 on NVMe SSD drives show max polling times around | |
35 | * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32 | |
36 | * workloads. | |
37 | */ | |
38 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL | |
39 | ||
40 | static __thread IOThread *my_iothread; | |
41 | ||
42 | AioContext *qemu_get_current_aio_context(void) | |
43 | { | |
44 | return my_iothread ? my_iothread->ctx : qemu_get_aio_context(); | |
45 | } | |
46 | ||
47 | static void *iothread_run(void *opaque) | |
48 | { | |
49 | IOThread *iothread = opaque; | |
50 | ||
51 | rcu_register_thread(); | |
52 | ||
53 | my_iothread = iothread; | |
54 | qemu_mutex_lock(&iothread->init_done_lock); | |
55 | iothread->thread_id = qemu_get_thread_id(); | |
56 | qemu_cond_signal(&iothread->init_done_cond); | |
57 | qemu_mutex_unlock(&iothread->init_done_lock); | |
58 | ||
59 | while (iothread->running) { | |
60 | aio_poll(iothread->ctx, true); | |
61 | ||
62 | if (atomic_read(&iothread->worker_context)) { | |
63 | GMainLoop *loop; | |
64 | ||
65 | g_main_context_push_thread_default(iothread->worker_context); | |
66 | iothread->main_loop = | |
67 | g_main_loop_new(iothread->worker_context, TRUE); | |
68 | loop = iothread->main_loop; | |
69 | ||
70 | g_main_loop_run(iothread->main_loop); | |
71 | iothread->main_loop = NULL; | |
72 | g_main_loop_unref(loop); | |
73 | ||
74 | g_main_context_pop_thread_default(iothread->worker_context); | |
75 | } | |
76 | } | |
77 | ||
78 | rcu_unregister_thread(); | |
79 | return NULL; | |
80 | } | |
81 | ||
82 | /* Runs in iothread_run() thread */ | |
83 | static void iothread_stop_bh(void *opaque) | |
84 | { | |
85 | IOThread *iothread = opaque; | |
86 | ||
87 | iothread->running = false; /* stop iothread_run() */ | |
88 | ||
89 | if (iothread->main_loop) { | |
90 | g_main_loop_quit(iothread->main_loop); | |
91 | } | |
92 | } | |
93 | ||
94 | void iothread_stop(IOThread *iothread) | |
95 | { | |
96 | if (!iothread->ctx || iothread->stopping) { | |
97 | return; | |
98 | } | |
99 | iothread->stopping = true; | |
100 | aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread); | |
101 | qemu_thread_join(&iothread->thread); | |
102 | } | |
103 | ||
104 | static void iothread_instance_init(Object *obj) | |
105 | { | |
106 | IOThread *iothread = IOTHREAD(obj); | |
107 | ||
108 | iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT; | |
109 | } | |
110 | ||
111 | static void iothread_instance_finalize(Object *obj) | |
112 | { | |
113 | IOThread *iothread = IOTHREAD(obj); | |
114 | ||
115 | iothread_stop(iothread); | |
116 | if (iothread->worker_context) { | |
117 | g_main_context_unref(iothread->worker_context); | |
118 | iothread->worker_context = NULL; | |
119 | } | |
120 | qemu_cond_destroy(&iothread->init_done_cond); | |
121 | qemu_mutex_destroy(&iothread->init_done_lock); | |
122 | if (!iothread->ctx) { | |
123 | return; | |
124 | } | |
125 | aio_context_unref(iothread->ctx); | |
126 | } | |
127 | ||
128 | static void iothread_complete(UserCreatable *obj, Error **errp) | |
129 | { | |
130 | Error *local_error = NULL; | |
131 | IOThread *iothread = IOTHREAD(obj); | |
132 | char *name, *thread_name; | |
133 | ||
134 | iothread->stopping = false; | |
135 | iothread->running = true; | |
136 | iothread->thread_id = -1; | |
137 | iothread->ctx = aio_context_new(&local_error); | |
138 | if (!iothread->ctx) { | |
139 | error_propagate(errp, local_error); | |
140 | return; | |
141 | } | |
142 | ||
143 | aio_context_set_poll_params(iothread->ctx, | |
144 | iothread->poll_max_ns, | |
145 | iothread->poll_grow, | |
146 | iothread->poll_shrink, | |
147 | &local_error); | |
148 | if (local_error) { | |
149 | error_propagate(errp, local_error); | |
150 | aio_context_unref(iothread->ctx); | |
151 | iothread->ctx = NULL; | |
152 | return; | |
153 | } | |
154 | ||
155 | qemu_mutex_init(&iothread->init_done_lock); | |
156 | qemu_cond_init(&iothread->init_done_cond); | |
157 | iothread->once = (GOnce) G_ONCE_INIT; | |
158 | ||
159 | /* This assumes we are called from a thread with useful CPU affinity for us | |
160 | * to inherit. | |
161 | */ | |
162 | name = object_get_canonical_path_component(OBJECT(obj)); | |
163 | thread_name = g_strdup_printf("IO %s", name); | |
164 | qemu_thread_create(&iothread->thread, thread_name, iothread_run, | |
165 | iothread, QEMU_THREAD_JOINABLE); | |
166 | g_free(thread_name); | |
167 | g_free(name); | |
168 | ||
169 | /* Wait for initialization to complete */ | |
170 | qemu_mutex_lock(&iothread->init_done_lock); | |
171 | while (iothread->thread_id == -1) { | |
172 | qemu_cond_wait(&iothread->init_done_cond, | |
173 | &iothread->init_done_lock); | |
174 | } | |
175 | qemu_mutex_unlock(&iothread->init_done_lock); | |
176 | } | |
177 | ||
178 | typedef struct { | |
179 | const char *name; | |
180 | ptrdiff_t offset; /* field's byte offset in IOThread struct */ | |
181 | } PollParamInfo; | |
182 | ||
183 | static PollParamInfo poll_max_ns_info = { | |
184 | "poll-max-ns", offsetof(IOThread, poll_max_ns), | |
185 | }; | |
186 | static PollParamInfo poll_grow_info = { | |
187 | "poll-grow", offsetof(IOThread, poll_grow), | |
188 | }; | |
189 | static PollParamInfo poll_shrink_info = { | |
190 | "poll-shrink", offsetof(IOThread, poll_shrink), | |
191 | }; | |
192 | ||
193 | static void iothread_get_poll_param(Object *obj, Visitor *v, | |
194 | const char *name, void *opaque, Error **errp) | |
195 | { | |
196 | IOThread *iothread = IOTHREAD(obj); | |
197 | PollParamInfo *info = opaque; | |
198 | int64_t *field = (void *)iothread + info->offset; | |
199 | ||
200 | visit_type_int64(v, name, field, errp); | |
201 | } | |
202 | ||
203 | static void iothread_set_poll_param(Object *obj, Visitor *v, | |
204 | const char *name, void *opaque, Error **errp) | |
205 | { | |
206 | IOThread *iothread = IOTHREAD(obj); | |
207 | PollParamInfo *info = opaque; | |
208 | int64_t *field = (void *)iothread + info->offset; | |
209 | Error *local_err = NULL; | |
210 | int64_t value; | |
211 | ||
212 | visit_type_int64(v, name, &value, &local_err); | |
213 | if (local_err) { | |
214 | goto out; | |
215 | } | |
216 | ||
217 | if (value < 0) { | |
218 | error_setg(&local_err, "%s value must be in range [0, %"PRId64"]", | |
219 | info->name, INT64_MAX); | |
220 | goto out; | |
221 | } | |
222 | ||
223 | *field = value; | |
224 | ||
225 | if (iothread->ctx) { | |
226 | aio_context_set_poll_params(iothread->ctx, | |
227 | iothread->poll_max_ns, | |
228 | iothread->poll_grow, | |
229 | iothread->poll_shrink, | |
230 | &local_err); | |
231 | } | |
232 | ||
233 | out: | |
234 | error_propagate(errp, local_err); | |
235 | } | |
236 | ||
237 | static void iothread_class_init(ObjectClass *klass, void *class_data) | |
238 | { | |
239 | UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); | |
240 | ucc->complete = iothread_complete; | |
241 | ||
242 | object_class_property_add(klass, "poll-max-ns", "int", | |
243 | iothread_get_poll_param, | |
244 | iothread_set_poll_param, | |
245 | NULL, &poll_max_ns_info, &error_abort); | |
246 | object_class_property_add(klass, "poll-grow", "int", | |
247 | iothread_get_poll_param, | |
248 | iothread_set_poll_param, | |
249 | NULL, &poll_grow_info, &error_abort); | |
250 | object_class_property_add(klass, "poll-shrink", "int", | |
251 | iothread_get_poll_param, | |
252 | iothread_set_poll_param, | |
253 | NULL, &poll_shrink_info, &error_abort); | |
254 | } | |
255 | ||
256 | static const TypeInfo iothread_info = { | |
257 | .name = TYPE_IOTHREAD, | |
258 | .parent = TYPE_OBJECT, | |
259 | .class_init = iothread_class_init, | |
260 | .instance_size = sizeof(IOThread), | |
261 | .instance_init = iothread_instance_init, | |
262 | .instance_finalize = iothread_instance_finalize, | |
263 | .interfaces = (InterfaceInfo[]) { | |
264 | {TYPE_USER_CREATABLE}, | |
265 | {} | |
266 | }, | |
267 | }; | |
268 | ||
269 | static void iothread_register_types(void) | |
270 | { | |
271 | type_register_static(&iothread_info); | |
272 | } | |
273 | ||
274 | type_init(iothread_register_types) | |
275 | ||
276 | char *iothread_get_id(IOThread *iothread) | |
277 | { | |
278 | return object_get_canonical_path_component(OBJECT(iothread)); | |
279 | } | |
280 | ||
281 | AioContext *iothread_get_aio_context(IOThread *iothread) | |
282 | { | |
283 | return iothread->ctx; | |
284 | } | |
285 | ||
286 | static int query_one_iothread(Object *object, void *opaque) | |
287 | { | |
288 | IOThreadInfoList ***prev = opaque; | |
289 | IOThreadInfoList *elem; | |
290 | IOThreadInfo *info; | |
291 | IOThread *iothread; | |
292 | ||
293 | iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); | |
294 | if (!iothread) { | |
295 | return 0; | |
296 | } | |
297 | ||
298 | info = g_new0(IOThreadInfo, 1); | |
299 | info->id = iothread_get_id(iothread); | |
300 | info->thread_id = iothread->thread_id; | |
301 | info->poll_max_ns = iothread->poll_max_ns; | |
302 | info->poll_grow = iothread->poll_grow; | |
303 | info->poll_shrink = iothread->poll_shrink; | |
304 | ||
305 | elem = g_new0(IOThreadInfoList, 1); | |
306 | elem->value = info; | |
307 | elem->next = NULL; | |
308 | ||
309 | **prev = elem; | |
310 | *prev = &elem->next; | |
311 | return 0; | |
312 | } | |
313 | ||
314 | IOThreadInfoList *qmp_query_iothreads(Error **errp) | |
315 | { | |
316 | IOThreadInfoList *head = NULL; | |
317 | IOThreadInfoList **prev = &head; | |
318 | Object *container = object_get_objects_root(); | |
319 | ||
320 | object_child_foreach(container, query_one_iothread, &prev); | |
321 | return head; | |
322 | } | |
323 | ||
324 | static gpointer iothread_g_main_context_init(gpointer opaque) | |
325 | { | |
326 | AioContext *ctx; | |
327 | IOThread *iothread = opaque; | |
328 | GSource *source; | |
329 | ||
330 | iothread->worker_context = g_main_context_new(); | |
331 | ||
332 | ctx = iothread_get_aio_context(iothread); | |
333 | source = aio_get_g_source(ctx); | |
334 | g_source_attach(source, iothread->worker_context); | |
335 | g_source_unref(source); | |
336 | ||
337 | aio_notify(iothread->ctx); | |
338 | return NULL; | |
339 | } | |
340 | ||
341 | GMainContext *iothread_get_g_main_context(IOThread *iothread) | |
342 | { | |
343 | g_once(&iothread->once, iothread_g_main_context_init, iothread); | |
344 | ||
345 | return iothread->worker_context; | |
346 | } | |
347 | ||
348 | IOThread *iothread_create(const char *id, Error **errp) | |
349 | { | |
350 | Object *obj; | |
351 | ||
352 | obj = object_new_with_props(TYPE_IOTHREAD, | |
353 | object_get_internal_root(), | |
354 | id, errp, NULL); | |
355 | ||
356 | return IOTHREAD(obj); | |
357 | } | |
358 | ||
359 | void iothread_destroy(IOThread *iothread) | |
360 | { | |
361 | object_unparent(OBJECT(iothread)); | |
362 | } | |
363 | ||
364 | /* Lookup IOThread by its id. Only finds user-created objects, not internal | |
365 | * iothread_create() objects. */ | |
366 | IOThread *iothread_by_id(const char *id) | |
367 | { | |
368 | return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL)); | |
369 | } |