]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU block layer thread pool | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * Copyright Red Hat, Inc. 2012 | |
6 | * | |
7 | * Authors: | |
8 | * Anthony Liguori <[email protected]> | |
9 | * Paolo Bonzini <[email protected]> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
14 | * Contributions after 2012-01-13 are licensed under the terms of the | |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
16 | */ | |
17 | #include "qemu-common.h" | |
18 | #include "qemu/queue.h" | |
19 | #include "qemu/thread.h" | |
20 | #include "qemu/osdep.h" | |
21 | #include "block/coroutine.h" | |
22 | #include "trace.h" | |
23 | #include "block/block_int.h" | |
24 | #include "qemu/event_notifier.h" | |
25 | #include "block/thread-pool.h" | |
26 | #include "qemu/main-loop.h" | |
27 | ||
28 | static void do_spawn_thread(ThreadPool *pool); | |
29 | ||
30 | typedef struct ThreadPoolElement ThreadPoolElement; | |
31 | ||
32 | enum ThreadState { | |
33 | THREAD_QUEUED, | |
34 | THREAD_ACTIVE, | |
35 | THREAD_DONE, | |
36 | THREAD_CANCELED, | |
37 | }; | |
38 | ||
39 | struct ThreadPoolElement { | |
40 | BlockDriverAIOCB common; | |
41 | ThreadPool *pool; | |
42 | ThreadPoolFunc *func; | |
43 | void *arg; | |
44 | ||
45 | /* Moving state out of THREAD_QUEUED is protected by lock. After | |
46 | * that, only the worker thread can write to it. Reads and writes | |
47 | * of state and ret are ordered with memory barriers. | |
48 | */ | |
49 | enum ThreadState state; | |
50 | int ret; | |
51 | ||
52 | /* Access to this list is protected by lock. */ | |
53 | QTAILQ_ENTRY(ThreadPoolElement) reqs; | |
54 | ||
55 | /* Access to this list is protected by the global mutex. */ | |
56 | QLIST_ENTRY(ThreadPoolElement) all; | |
57 | }; | |
58 | ||
59 | struct ThreadPool { | |
60 | EventNotifier notifier; | |
61 | AioContext *ctx; | |
62 | QemuMutex lock; | |
63 | QemuCond check_cancel; | |
64 | QemuCond worker_stopped; | |
65 | QemuSemaphore sem; | |
66 | int max_threads; | |
67 | QEMUBH *new_thread_bh; | |
68 | ||
69 | /* The following variables are only accessed from one AioContext. */ | |
70 | QLIST_HEAD(, ThreadPoolElement) head; | |
71 | ||
72 | /* The following variables are protected by lock. */ | |
73 | QTAILQ_HEAD(, ThreadPoolElement) request_list; | |
74 | int cur_threads; | |
75 | int idle_threads; | |
76 | int new_threads; /* backlog of threads we need to create */ | |
77 | int pending_threads; /* threads created but not running yet */ | |
78 | int pending_cancellations; /* whether we need a cond_broadcast */ | |
79 | bool stopping; | |
80 | }; | |
81 | ||
82 | static void *worker_thread(void *opaque) | |
83 | { | |
84 | ThreadPool *pool = opaque; | |
85 | ||
86 | qemu_mutex_lock(&pool->lock); | |
87 | pool->pending_threads--; | |
88 | do_spawn_thread(pool); | |
89 | ||
90 | while (!pool->stopping) { | |
91 | ThreadPoolElement *req; | |
92 | int ret; | |
93 | ||
94 | do { | |
95 | pool->idle_threads++; | |
96 | qemu_mutex_unlock(&pool->lock); | |
97 | ret = qemu_sem_timedwait(&pool->sem, 10000); | |
98 | qemu_mutex_lock(&pool->lock); | |
99 | pool->idle_threads--; | |
100 | } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list)); | |
101 | if (ret == -1 || pool->stopping) { | |
102 | break; | |
103 | } | |
104 | ||
105 | req = QTAILQ_FIRST(&pool->request_list); | |
106 | QTAILQ_REMOVE(&pool->request_list, req, reqs); | |
107 | req->state = THREAD_ACTIVE; | |
108 | qemu_mutex_unlock(&pool->lock); | |
109 | ||
110 | ret = req->func(req->arg); | |
111 | ||
112 | req->ret = ret; | |
113 | /* Write ret before state. */ | |
114 | smp_wmb(); | |
115 | req->state = THREAD_DONE; | |
116 | ||
117 | qemu_mutex_lock(&pool->lock); | |
118 | if (pool->pending_cancellations) { | |
119 | qemu_cond_broadcast(&pool->check_cancel); | |
120 | } | |
121 | ||
122 | event_notifier_set(&pool->notifier); | |
123 | } | |
124 | ||
125 | pool->cur_threads--; | |
126 | qemu_cond_signal(&pool->worker_stopped); | |
127 | qemu_mutex_unlock(&pool->lock); | |
128 | return NULL; | |
129 | } | |
130 | ||
131 | static void do_spawn_thread(ThreadPool *pool) | |
132 | { | |
133 | QemuThread t; | |
134 | ||
135 | /* Runs with lock taken. */ | |
136 | if (!pool->new_threads) { | |
137 | return; | |
138 | } | |
139 | ||
140 | pool->new_threads--; | |
141 | pool->pending_threads++; | |
142 | ||
143 | qemu_thread_create(&t, "worker", worker_thread, pool, QEMU_THREAD_DETACHED); | |
144 | } | |
145 | ||
146 | static void spawn_thread_bh_fn(void *opaque) | |
147 | { | |
148 | ThreadPool *pool = opaque; | |
149 | ||
150 | qemu_mutex_lock(&pool->lock); | |
151 | do_spawn_thread(pool); | |
152 | qemu_mutex_unlock(&pool->lock); | |
153 | } | |
154 | ||
155 | static void spawn_thread(ThreadPool *pool) | |
156 | { | |
157 | pool->cur_threads++; | |
158 | pool->new_threads++; | |
159 | /* If there are threads being created, they will spawn new workers, so | |
160 | * we don't spend time creating many threads in a loop holding a mutex or | |
161 | * starving the current vcpu. | |
162 | * | |
163 | * If there are no idle threads, ask the main thread to create one, so we | |
164 | * inherit the correct affinity instead of the vcpu affinity. | |
165 | */ | |
166 | if (!pool->pending_threads) { | |
167 | qemu_bh_schedule(pool->new_thread_bh); | |
168 | } | |
169 | } | |
170 | ||
171 | static void event_notifier_ready(EventNotifier *notifier) | |
172 | { | |
173 | ThreadPool *pool = container_of(notifier, ThreadPool, notifier); | |
174 | ThreadPoolElement *elem, *next; | |
175 | ||
176 | event_notifier_test_and_clear(notifier); | |
177 | restart: | |
178 | QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { | |
179 | if (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { | |
180 | continue; | |
181 | } | |
182 | if (elem->state == THREAD_DONE) { | |
183 | trace_thread_pool_complete(pool, elem, elem->common.opaque, | |
184 | elem->ret); | |
185 | } | |
186 | if (elem->state == THREAD_DONE && elem->common.cb) { | |
187 | QLIST_REMOVE(elem, all); | |
188 | /* Read state before ret. */ | |
189 | smp_rmb(); | |
190 | elem->common.cb(elem->common.opaque, elem->ret); | |
191 | qemu_aio_release(elem); | |
192 | goto restart; | |
193 | } else { | |
194 | /* remove the request */ | |
195 | QLIST_REMOVE(elem, all); | |
196 | qemu_aio_release(elem); | |
197 | } | |
198 | } | |
199 | } | |
200 | ||
201 | static void thread_pool_cancel(BlockDriverAIOCB *acb) | |
202 | { | |
203 | ThreadPoolElement *elem = (ThreadPoolElement *)acb; | |
204 | ThreadPool *pool = elem->pool; | |
205 | ||
206 | trace_thread_pool_cancel(elem, elem->common.opaque); | |
207 | ||
208 | qemu_mutex_lock(&pool->lock); | |
209 | if (elem->state == THREAD_QUEUED && | |
210 | /* No thread has yet started working on elem. we can try to "steal" | |
211 | * the item from the worker if we can get a signal from the | |
212 | * semaphore. Because this is non-blocking, we can do it with | |
213 | * the lock taken and ensure that elem will remain THREAD_QUEUED. | |
214 | */ | |
215 | qemu_sem_timedwait(&pool->sem, 0) == 0) { | |
216 | QTAILQ_REMOVE(&pool->request_list, elem, reqs); | |
217 | elem->state = THREAD_CANCELED; | |
218 | event_notifier_set(&pool->notifier); | |
219 | } else { | |
220 | pool->pending_cancellations++; | |
221 | while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) { | |
222 | qemu_cond_wait(&pool->check_cancel, &pool->lock); | |
223 | } | |
224 | pool->pending_cancellations--; | |
225 | } | |
226 | qemu_mutex_unlock(&pool->lock); | |
227 | event_notifier_ready(&pool->notifier); | |
228 | } | |
229 | ||
230 | static const AIOCBInfo thread_pool_aiocb_info = { | |
231 | .aiocb_size = sizeof(ThreadPoolElement), | |
232 | .cancel = thread_pool_cancel, | |
233 | }; | |
234 | ||
235 | BlockDriverAIOCB *thread_pool_submit_aio(ThreadPool *pool, | |
236 | ThreadPoolFunc *func, void *arg, | |
237 | BlockDriverCompletionFunc *cb, void *opaque) | |
238 | { | |
239 | ThreadPoolElement *req; | |
240 | ||
241 | req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); | |
242 | req->func = func; | |
243 | req->arg = arg; | |
244 | req->state = THREAD_QUEUED; | |
245 | req->pool = pool; | |
246 | ||
247 | QLIST_INSERT_HEAD(&pool->head, req, all); | |
248 | ||
249 | trace_thread_pool_submit(pool, req, arg); | |
250 | ||
251 | qemu_mutex_lock(&pool->lock); | |
252 | if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) { | |
253 | spawn_thread(pool); | |
254 | } | |
255 | QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs); | |
256 | qemu_mutex_unlock(&pool->lock); | |
257 | qemu_sem_post(&pool->sem); | |
258 | return &req->common; | |
259 | } | |
260 | ||
261 | typedef struct ThreadPoolCo { | |
262 | Coroutine *co; | |
263 | int ret; | |
264 | } ThreadPoolCo; | |
265 | ||
266 | static void thread_pool_co_cb(void *opaque, int ret) | |
267 | { | |
268 | ThreadPoolCo *co = opaque; | |
269 | ||
270 | co->ret = ret; | |
271 | qemu_coroutine_enter(co->co, NULL); | |
272 | } | |
273 | ||
274 | int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func, | |
275 | void *arg) | |
276 | { | |
277 | ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS }; | |
278 | assert(qemu_in_coroutine()); | |
279 | thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc); | |
280 | qemu_coroutine_yield(); | |
281 | return tpc.ret; | |
282 | } | |
283 | ||
284 | void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg) | |
285 | { | |
286 | thread_pool_submit_aio(pool, func, arg, NULL, NULL); | |
287 | } | |
288 | ||
289 | static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) | |
290 | { | |
291 | if (!ctx) { | |
292 | ctx = qemu_get_aio_context(); | |
293 | } | |
294 | ||
295 | memset(pool, 0, sizeof(*pool)); | |
296 | event_notifier_init(&pool->notifier, false); | |
297 | pool->ctx = ctx; | |
298 | qemu_mutex_init(&pool->lock); | |
299 | qemu_cond_init(&pool->check_cancel); | |
300 | qemu_cond_init(&pool->worker_stopped); | |
301 | qemu_sem_init(&pool->sem, 0); | |
302 | pool->max_threads = 64; | |
303 | pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool); | |
304 | ||
305 | QLIST_INIT(&pool->head); | |
306 | QTAILQ_INIT(&pool->request_list); | |
307 | ||
308 | aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready); | |
309 | } | |
310 | ||
311 | ThreadPool *thread_pool_new(AioContext *ctx) | |
312 | { | |
313 | ThreadPool *pool = g_new(ThreadPool, 1); | |
314 | thread_pool_init_one(pool, ctx); | |
315 | return pool; | |
316 | } | |
317 | ||
318 | void thread_pool_free(ThreadPool *pool) | |
319 | { | |
320 | if (!pool) { | |
321 | return; | |
322 | } | |
323 | ||
324 | assert(QLIST_EMPTY(&pool->head)); | |
325 | ||
326 | qemu_mutex_lock(&pool->lock); | |
327 | ||
328 | /* Stop new threads from spawning */ | |
329 | qemu_bh_delete(pool->new_thread_bh); | |
330 | pool->cur_threads -= pool->new_threads; | |
331 | pool->new_threads = 0; | |
332 | ||
333 | /* Wait for worker threads to terminate */ | |
334 | pool->stopping = true; | |
335 | while (pool->cur_threads > 0) { | |
336 | qemu_sem_post(&pool->sem); | |
337 | qemu_cond_wait(&pool->worker_stopped, &pool->lock); | |
338 | } | |
339 | ||
340 | qemu_mutex_unlock(&pool->lock); | |
341 | ||
342 | aio_set_event_notifier(pool->ctx, &pool->notifier, NULL); | |
343 | qemu_sem_destroy(&pool->sem); | |
344 | qemu_cond_destroy(&pool->check_cancel); | |
345 | qemu_cond_destroy(&pool->worker_stopped); | |
346 | qemu_mutex_destroy(&pool->lock); | |
347 | event_notifier_cleanup(&pool->notifier); | |
348 | g_free(pool); | |
349 | } |