1 // SPDX-License-Identifier: GPL-2.0
3 * Contains the core associated with submission side polling of the SQ
4 * ring, offloading submissions from the application to a kernel thread.
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/file.h>
10 #include <linux/slab.h>
11 #include <linux/audit.h>
12 #include <linux/security.h>
13 #include <linux/cpuset.h>
14 #include <linux/io_uring.h>
16 #include <uapi/linux/io_uring.h>
22 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
23 #define IORING_TW_CAP_ENTRIES_VALUE 8
26 IO_SQ_THREAD_SHOULD_STOP = 0,
27 IO_SQ_THREAD_SHOULD_PARK,
30 void io_sq_thread_unpark(struct io_sq_data *sqd)
31 __releases(&sqd->lock)
33 WARN_ON_ONCE(sqd->thread == current);
36 * Do the dance but not conditional clear_bit() because it'd race with
37 * other threads incrementing park_pending and setting the bit.
39 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
40 if (atomic_dec_return(&sqd->park_pending))
41 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
42 mutex_unlock(&sqd->lock);
45 void io_sq_thread_park(struct io_sq_data *sqd)
46 __acquires(&sqd->lock)
48 WARN_ON_ONCE(data_race(sqd->thread) == current);
50 atomic_inc(&sqd->park_pending);
51 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
52 mutex_lock(&sqd->lock);
54 wake_up_process(sqd->thread);
57 void io_sq_thread_stop(struct io_sq_data *sqd)
59 WARN_ON_ONCE(sqd->thread == current);
60 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
62 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
63 mutex_lock(&sqd->lock);
65 wake_up_process(sqd->thread);
66 mutex_unlock(&sqd->lock);
67 wait_for_completion(&sqd->exited);
70 void io_put_sq_data(struct io_sq_data *sqd)
72 if (refcount_dec_and_test(&sqd->refs)) {
73 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
75 io_sq_thread_stop(sqd);
80 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
82 struct io_ring_ctx *ctx;
83 unsigned sq_thread_idle = 0;
85 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
86 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
87 sqd->sq_thread_idle = sq_thread_idle;
90 void io_sq_thread_finish(struct io_ring_ctx *ctx)
92 struct io_sq_data *sqd = ctx->sq_data;
95 io_sq_thread_park(sqd);
96 list_del_init(&ctx->sqd_list);
97 io_sqd_update_thread_idle(sqd);
98 io_sq_thread_unpark(sqd);
105 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
107 struct io_ring_ctx *ctx_attach;
108 struct io_sq_data *sqd;
113 return ERR_PTR(-ENXIO);
114 if (!io_is_uring_fops(fd_file(f))) {
116 return ERR_PTR(-EINVAL);
119 ctx_attach = fd_file(f)->private_data;
120 sqd = ctx_attach->sq_data;
123 return ERR_PTR(-EINVAL);
125 if (sqd->task_tgid != current->tgid) {
127 return ERR_PTR(-EPERM);
130 refcount_inc(&sqd->refs);
135 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
138 struct io_sq_data *sqd;
141 if (p->flags & IORING_SETUP_ATTACH_WQ) {
142 sqd = io_attach_sq_data(p);
147 /* fall through for EPERM case, setup new sqd/task */
148 if (PTR_ERR(sqd) != -EPERM)
152 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
154 return ERR_PTR(-ENOMEM);
156 atomic_set(&sqd->park_pending, 0);
157 refcount_set(&sqd->refs, 1);
158 INIT_LIST_HEAD(&sqd->ctx_list);
159 mutex_init(&sqd->lock);
160 init_waitqueue_head(&sqd->wait);
161 init_completion(&sqd->exited);
165 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
167 return READ_ONCE(sqd->state);
170 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
172 unsigned int to_submit;
175 to_submit = io_sqring_entries(ctx);
176 /* if we're handling multiple rings, cap submit size for fairness */
177 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
178 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
180 if (to_submit || !wq_list_empty(&ctx->iopoll_list)) {
181 const struct cred *creds = NULL;
183 if (ctx->sq_creds != current_cred())
184 creds = override_creds(ctx->sq_creds);
186 mutex_lock(&ctx->uring_lock);
187 if (!wq_list_empty(&ctx->iopoll_list))
188 io_do_iopoll(ctx, true);
191 * Don't submit if refs are dying, good for io_uring_register(),
192 * but also it is relied upon by io_ring_exit_work()
194 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
195 !(ctx->flags & IORING_SETUP_R_DISABLED))
196 ret = io_submit_sqes(ctx, to_submit);
197 mutex_unlock(&ctx->uring_lock);
199 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
200 wake_up(&ctx->sqo_sq_wait);
208 static bool io_sqd_handle_event(struct io_sq_data *sqd)
210 bool did_sig = false;
213 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
214 signal_pending(current)) {
215 mutex_unlock(&sqd->lock);
216 if (signal_pending(current))
217 did_sig = get_signal(&ksig);
219 mutex_lock(&sqd->lock);
220 sqd->sq_cpu = raw_smp_processor_id();
222 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
226 * Run task_work, processing the retry_list first. The retry_list holds
227 * entries that we passed on in the previous run, if we had more task_work
228 * than we were asked to process. Newly queued task_work isn't run until the
229 * retry list has been fully processed.
231 static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
233 struct io_uring_task *tctx = current->io_uring;
234 unsigned int count = 0;
237 *retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
238 if (count >= max_entries)
240 max_entries -= count;
242 *retry_list = tctx_task_work_run(tctx, max_entries, &count);
244 if (task_work_pending(current))
249 static bool io_sq_tw_pending(struct llist_node *retry_list)
251 struct io_uring_task *tctx = current->io_uring;
253 return retry_list || !llist_empty(&tctx->task_list);
256 static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
260 getrusage(current, RUSAGE_SELF, &end);
261 end.ru_stime.tv_sec -= start->ru_stime.tv_sec;
262 end.ru_stime.tv_usec -= start->ru_stime.tv_usec;
264 sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000;
267 static int io_sq_thread(void *data)
269 struct llist_node *retry_list = NULL;
270 struct io_sq_data *sqd = data;
271 struct io_ring_ctx *ctx;
273 unsigned long timeout = 0;
274 char buf[TASK_COMM_LEN];
277 /* offload context creation failed, just exit */
278 if (!current->io_uring)
281 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
282 set_task_comm(current, buf);
284 /* reset to our pid after we've set task_comm, for fdinfo */
285 sqd->task_pid = current->pid;
287 if (sqd->sq_cpu != -1) {
288 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
290 set_cpus_allowed_ptr(current, cpu_online_mask);
291 sqd->sq_cpu = raw_smp_processor_id();
295 * Force audit context to get setup, in case we do prep side async
296 * operations that would trigger an audit call before any issue side
297 * audit has been done.
299 audit_uring_entry(IORING_OP_NOP);
300 audit_uring_exit(true, 0);
302 mutex_lock(&sqd->lock);
304 bool cap_entries, sqt_spin = false;
306 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
307 if (io_sqd_handle_event(sqd))
309 timeout = jiffies + sqd->sq_thread_idle;
312 cap_entries = !list_is_singular(&sqd->ctx_list);
313 getrusage(current, RUSAGE_SELF, &start);
314 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
315 int ret = __io_sq_thread(ctx, cap_entries);
317 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
320 if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
323 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
325 io_napi_sqpoll_busy_poll(ctx);
327 if (sqt_spin || !time_after(jiffies, timeout)) {
329 io_sq_update_worktime(sqd, &start);
330 timeout = jiffies + sqd->sq_thread_idle;
332 if (unlikely(need_resched())) {
333 mutex_unlock(&sqd->lock);
335 mutex_lock(&sqd->lock);
336 sqd->sq_cpu = raw_smp_processor_id();
341 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
342 if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
343 bool needs_sched = true;
345 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
346 atomic_or(IORING_SQ_NEED_WAKEUP,
347 &ctx->rings->sq_flags);
348 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
349 !wq_list_empty(&ctx->iopoll_list)) {
355 * Ensure the store of the wakeup flag is not
356 * reordered with the load of the SQ tail
358 smp_mb__after_atomic();
360 if (io_sqring_entries(ctx)) {
367 mutex_unlock(&sqd->lock);
369 mutex_lock(&sqd->lock);
370 sqd->sq_cpu = raw_smp_processor_id();
372 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
373 atomic_andnot(IORING_SQ_NEED_WAKEUP,
374 &ctx->rings->sq_flags);
377 finish_wait(&sqd->wait, &wait);
378 timeout = jiffies + sqd->sq_thread_idle;
382 io_sq_tw(&retry_list, UINT_MAX);
384 io_uring_cancel_generic(true, sqd);
386 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
387 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
389 mutex_unlock(&sqd->lock);
391 complete(&sqd->exited);
395 void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
400 if (!io_sqring_full(ctx))
402 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
404 if (!io_sqring_full(ctx))
407 } while (!signal_pending(current));
409 finish_wait(&ctx->sqo_sq_wait, &wait);
412 __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
413 struct io_uring_params *p)
417 /* Retain compatibility with failing for an invalid attach attempt */
418 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
419 IORING_SETUP_ATTACH_WQ) {
425 if (!io_is_uring_fops(fd_file(f))) {
431 if (ctx->flags & IORING_SETUP_SQPOLL) {
432 struct task_struct *tsk;
433 struct io_sq_data *sqd;
436 ret = security_uring_sqpoll();
440 sqd = io_get_sq_data(p, &attached);
446 ctx->sq_creds = get_current_cred();
448 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
449 if (!ctx->sq_thread_idle)
450 ctx->sq_thread_idle = HZ;
452 io_sq_thread_park(sqd);
453 list_add(&ctx->sqd_list, &sqd->ctx_list);
454 io_sqd_update_thread_idle(sqd);
455 /* don't attach to a dying SQPOLL thread, would be racy */
456 ret = (attached && !sqd->thread) ? -ENXIO : 0;
457 io_sq_thread_unpark(sqd);
464 if (p->flags & IORING_SETUP_SQ_AFF) {
465 cpumask_var_t allowed_mask;
466 int cpu = p->sq_thread_cpu;
469 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
472 if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
475 cpuset_cpus_allowed(current, allowed_mask);
476 if (!cpumask_test_cpu(cpu, allowed_mask)) {
477 free_cpumask_var(allowed_mask);
480 free_cpumask_var(allowed_mask);
486 sqd->task_pid = current->pid;
487 sqd->task_tgid = current->tgid;
488 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
495 ret = io_uring_alloc_task_context(tsk, ctx);
496 wake_up_new_task(tsk);
499 } else if (p->flags & IORING_SETUP_SQ_AFF) {
500 /* Can't have SQ_AFF without SQPOLL */
507 complete(&ctx->sq_data->exited);
509 io_sq_thread_finish(ctx);
513 __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
516 struct io_sq_data *sqd = ctx->sq_data;
520 io_sq_thread_park(sqd);
521 /* Don't set affinity for a dying thread */
523 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
524 io_sq_thread_unpark(sqd);