1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to io context handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task.h>
14 #include "blk-mq-sched.h"
17 * For io context allocations
19 static struct kmem_cache *iocontext_cachep;
22 * get_io_context - increment reference count to io_context
23 * @ioc: io_context to get
25 * Increment reference count to @ioc.
27 static void get_io_context(struct io_context *ioc)
29 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
30 atomic_long_inc(&ioc->refcount);
33 static void icq_free_icq_rcu(struct rcu_head *head)
35 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
37 kmem_cache_free(icq->__rcu_icq_cache, icq);
41 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
42 * and queue locked for legacy.
44 static void ioc_exit_icq(struct io_cq *icq)
46 struct elevator_type *et = icq->q->elevator->type;
48 if (icq->flags & ICQ_EXITED)
52 et->ops.exit_icq(icq);
54 icq->flags |= ICQ_EXITED;
58 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
59 * and queue locked for legacy.
61 static void ioc_destroy_icq(struct io_cq *icq)
63 struct io_context *ioc = icq->ioc;
64 struct request_queue *q = icq->q;
65 struct elevator_type *et = q->elevator->type;
67 lockdep_assert_held(&ioc->lock);
69 radix_tree_delete(&ioc->icq_tree, icq->q->id);
70 hlist_del_init(&icq->ioc_node);
71 list_del_init(&icq->q_node);
74 * Both setting lookup hint to and clearing it from @icq are done
75 * under queue_lock. If it's not pointing to @icq now, it never
76 * will. Hint assignment itself can race safely.
78 if (rcu_access_pointer(ioc->icq_hint) == icq)
79 rcu_assign_pointer(ioc->icq_hint, NULL);
84 * @icq->q might have gone away by the time RCU callback runs
85 * making it impossible to determine icq_cache. Record it in @icq.
87 icq->__rcu_icq_cache = et->icq_cache;
88 icq->flags |= ICQ_DESTROYED;
89 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
93 * Slow path for ioc release in put_io_context(). Performs double-lock
94 * dancing to unlink all icq's and then frees ioc.
96 static void ioc_release_fn(struct work_struct *work)
98 struct io_context *ioc = container_of(work, struct io_context,
100 spin_lock_irq(&ioc->lock);
102 while (!hlist_empty(&ioc->icq_list)) {
103 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
104 struct io_cq, ioc_node);
105 struct request_queue *q = icq->q;
107 if (spin_trylock(&q->queue_lock)) {
108 ioc_destroy_icq(icq);
109 spin_unlock(&q->queue_lock);
111 /* Make sure q and icq cannot be freed. */
114 /* Re-acquire the locks in the correct order. */
115 spin_unlock(&ioc->lock);
116 spin_lock(&q->queue_lock);
117 spin_lock(&ioc->lock);
120 * The icq may have been destroyed when the ioc lock
123 if (!(icq->flags & ICQ_DESTROYED))
124 ioc_destroy_icq(icq);
126 spin_unlock(&q->queue_lock);
131 spin_unlock_irq(&ioc->lock);
133 kmem_cache_free(iocontext_cachep, ioc);
137 * put_io_context - put a reference of io_context
138 * @ioc: io_context to put
140 * Decrement reference count of @ioc and release it if the count reaches
143 void put_io_context(struct io_context *ioc)
146 bool free_ioc = false;
151 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
154 * Releasing ioc requires reverse order double locking and we may
155 * already be holding a queue_lock. Do it asynchronously from wq.
157 if (atomic_long_dec_and_test(&ioc->refcount)) {
158 spin_lock_irqsave(&ioc->lock, flags);
159 if (!hlist_empty(&ioc->icq_list))
160 queue_work(system_power_efficient_wq,
164 spin_unlock_irqrestore(&ioc->lock, flags);
168 kmem_cache_free(iocontext_cachep, ioc);
170 EXPORT_SYMBOL_GPL(put_io_context);
173 * put_io_context_active - put active reference on ioc
174 * @ioc: ioc of interest
176 * Put an active reference to an ioc. If active reference reaches zero after
177 * put, @ioc can never issue further IOs and ioscheds are notified.
179 static void put_io_context_active(struct io_context *ioc)
183 if (!atomic_dec_and_test(&ioc->active_ref)) {
188 spin_lock_irq(&ioc->lock);
189 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
190 if (icq->flags & ICQ_EXITED)
195 spin_unlock_irq(&ioc->lock);
200 /* Called by the exiting task */
201 void exit_io_context(struct task_struct *task)
203 struct io_context *ioc;
206 ioc = task->io_context;
207 task->io_context = NULL;
210 atomic_dec(&ioc->nr_tasks);
211 put_io_context_active(ioc);
214 static void __ioc_clear_queue(struct list_head *icq_list)
219 while (!list_empty(icq_list)) {
220 struct io_cq *icq = list_entry(icq_list->next,
221 struct io_cq, q_node);
222 struct io_context *ioc = icq->ioc;
224 spin_lock_irqsave(&ioc->lock, flags);
225 if (icq->flags & ICQ_DESTROYED) {
226 spin_unlock_irqrestore(&ioc->lock, flags);
229 ioc_destroy_icq(icq);
230 spin_unlock_irqrestore(&ioc->lock, flags);
236 * ioc_clear_queue - break any ioc association with the specified queue
237 * @q: request_queue being cleared
239 * Walk @q->icq_list and exit all io_cq's.
241 void ioc_clear_queue(struct request_queue *q)
245 spin_lock_irq(&q->queue_lock);
246 list_splice_init(&q->icq_list, &icq_list);
247 spin_unlock_irq(&q->queue_lock);
249 __ioc_clear_queue(&icq_list);
252 static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
254 struct io_context *ioc;
256 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
261 atomic_long_set(&ioc->refcount, 1);
262 atomic_set(&ioc->nr_tasks, 1);
263 atomic_set(&ioc->active_ref, 1);
264 spin_lock_init(&ioc->lock);
265 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
266 INIT_HLIST_HEAD(&ioc->icq_list);
267 INIT_WORK(&ioc->release_work, ioc_release_fn);
271 static struct io_context *create_task_io_context(struct task_struct *task,
272 gfp_t gfp_flags, int node)
274 struct io_context *ioc;
276 ioc = alloc_io_context(gfp_flags, node);
281 * Try to install. ioc shouldn't be installed if someone else
282 * already did or @task, which isn't %current, is exiting. Note
283 * that we need to allow ioc creation on exiting %current as exit
284 * path may issue IOs from e.g. exit_files(). The exit path is
285 * responsible for not issuing IO after exit_io_context().
288 if (!task->io_context &&
289 (task == current || !(task->flags & PF_EXITING)))
290 task->io_context = ioc;
292 kmem_cache_free(iocontext_cachep, ioc);
294 ioc = task->io_context;
302 * get_task_io_context - get io_context of a task
303 * @task: task of interest
304 * @gfp_flags: allocation flags, used if allocation is necessary
305 * @node: allocation node, used if allocation is necessary
307 * Return io_context of @task. If it doesn't exist, it is created with
308 * @gfp_flags and @node. The returned io_context has its reference count
311 * This function always goes through task_lock() and it's better to use
312 * %current->io_context + get_io_context() for %current.
314 struct io_context *get_task_io_context(struct task_struct *task,
315 gfp_t gfp_flags, int node)
317 struct io_context *ioc;
319 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
322 ioc = task->io_context;
323 if (unlikely(!ioc)) {
325 return create_task_io_context(task, gfp_flags, node);
332 int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
334 struct io_context *ioc = current->io_context;
337 * Share io context with parent, if CLONE_IO is set
339 if (clone_flags & CLONE_IO) {
340 atomic_long_inc(&ioc->refcount);
341 atomic_inc(&ioc->active_ref);
342 atomic_inc(&ioc->nr_tasks);
343 tsk->io_context = ioc;
344 } else if (ioprio_valid(ioc->ioprio)) {
345 tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
346 if (!tsk->io_context)
348 tsk->io_context->ioprio = ioc->ioprio;
355 * ioc_lookup_icq - lookup io_cq from ioc
356 * @q: the associated request_queue
358 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
359 * with @q->queue_lock held.
361 struct io_cq *ioc_lookup_icq(struct request_queue *q)
363 struct io_context *ioc = current->io_context;
366 lockdep_assert_held(&q->queue_lock);
369 * icq's are indexed from @ioc using radix tree and hint pointer,
370 * both of which are protected with RCU. All removals are done
371 * holding both q and ioc locks, and we're holding q lock - if we
372 * find a icq which points to us, it's guaranteed to be valid.
375 icq = rcu_dereference(ioc->icq_hint);
376 if (icq && icq->q == q)
379 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
380 if (icq && icq->q == q)
381 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
388 EXPORT_SYMBOL(ioc_lookup_icq);
391 * ioc_create_icq - create and link io_cq
392 * @q: request_queue of interest
394 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
395 * will be created using @gfp_mask.
397 * The caller is responsible for ensuring @ioc won't go away and @q is
398 * alive and will stay alive until this function returns.
400 static struct io_cq *ioc_create_icq(struct request_queue *q)
402 struct io_context *ioc = current->io_context;
403 struct elevator_type *et = q->elevator->type;
407 icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
412 if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
413 kmem_cache_free(et->icq_cache, icq);
419 INIT_LIST_HEAD(&icq->q_node);
420 INIT_HLIST_NODE(&icq->ioc_node);
422 /* lock both q and ioc and try to link @icq */
423 spin_lock_irq(&q->queue_lock);
424 spin_lock(&ioc->lock);
426 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
427 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
428 list_add(&icq->q_node, &q->icq_list);
429 if (et->ops.init_icq)
430 et->ops.init_icq(icq);
432 kmem_cache_free(et->icq_cache, icq);
433 icq = ioc_lookup_icq(q);
435 printk(KERN_ERR "cfq: icq link failed!\n");
438 spin_unlock(&ioc->lock);
439 spin_unlock_irq(&q->queue_lock);
440 radix_tree_preload_end();
444 struct io_cq *ioc_find_get_icq(struct request_queue *q)
446 struct io_context *ioc = current->io_context;
447 struct io_cq *icq = NULL;
449 if (unlikely(!ioc)) {
450 ioc = create_task_io_context(current, GFP_ATOMIC, q->node);
456 spin_lock_irq(&q->queue_lock);
457 icq = ioc_lookup_icq(q);
458 spin_unlock_irq(&q->queue_lock);
462 icq = ioc_create_icq(q);
470 EXPORT_SYMBOL_GPL(ioc_find_get_icq);
472 static int __init blk_ioc_init(void)
474 iocontext_cachep = kmem_cache_create("blkdev_ioc",
475 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
478 subsys_initcall(blk_ioc_init);