io_req_set_res(req, res, req->cqe.flags);
}
- hrtimer_init_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode);
+static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct file *file = req->file;
+
+ if (req->opcode == IORING_OP_URING_CMD) {
+ struct io_uring_cmd *ioucmd;
+
+ ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags);
+ } else {
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+ return file->f_op->iopoll(&rw->kiocb, iob, poll_flags);
+ }
+}
+
+static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req)
+{
+ struct hrtimer_sleeper timer;
+ enum hrtimer_mode mode;
+ ktime_t kt;
+ u64 sleep_time;
+
+ if (req->flags & REQ_F_IOPOLL_STATE)
+ return 0;
+
+ if (ctx->hybrid_poll_time == LLONG_MAX)
+ return 0;
+
+ /* Using half the running time to do schedule */
+ sleep_time = ctx->hybrid_poll_time / 2;
+
+ kt = ktime_set(0, sleep_time);
+ req->flags |= REQ_F_IOPOLL_STATE;
+
+ mode = HRTIMER_MODE_REL;
++ hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode);
+ hrtimer_set_expires(&timer.timer, kt);
+ set_current_state(TASK_INTERRUPTIBLE);
+ hrtimer_sleeper_start_expires(&timer, mode);
+
+ if (timer.task)
+ io_schedule();
+
+ hrtimer_cancel(&timer.timer);
+ __set_current_state(TASK_RUNNING);
+ destroy_hrtimer_on_stack(&timer.timer);
+ return sleep_time;
+}
+
+static int io_uring_hybrid_poll(struct io_kiocb *req,
+ struct io_comp_batch *iob, unsigned int poll_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ u64 runtime, sleep_time;
+ int ret;
+
+ sleep_time = io_hybrid_iopoll_delay(ctx, req);
+ ret = io_uring_classic_poll(req, iob, poll_flags);
+ runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
+
+ /*
+ * Use minimum sleep time if we're polling devices with different
+ * latencies. We could get more completions from the faster ones.
+ */
+ if (ctx->hybrid_poll_time > runtime)
+ ctx->hybrid_poll_time = runtime;
+
+ return ret;
+}
+
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_wq_work_node *pos, *start, *prev;