1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
9 /* Check if task is idle i.e. not running, not scheduled in
10 * tasklet queue and not draining. If so move to busy to
11 * reserve a slot in do_task() by setting to busy and taking
12 * a qp reference to cover the gap from now until the task finishes.
13 * state will move out of busy if task returns a non zero value
14 * in do_task(). If state is already busy it is raised to armed
15 * to indicate to do_task that additional pass should be made
17 * Context: caller should hold task->lock.
18 * Returns: true if state transitioned from idle to busy else false.
20 static bool __reserve_if_idle(struct rxe_task *task)
22 WARN_ON(rxe_read(task->qp) <= 0);
24 if (task->tasklet.state & BIT(TASKLET_STATE_SCHED))
27 if (task->state == TASK_STATE_IDLE) {
29 task->state = TASK_STATE_BUSY;
34 if (task->state == TASK_STATE_BUSY)
35 task->state = TASK_STATE_ARMED;
40 /* check if task is idle or drained and not currently
41 * scheduled in the tasklet queue. This routine is
42 * called by rxe_cleanup_task or rxe_disable_task to
43 * see if the queue is empty.
44 * Context: caller should hold task->lock.
45 * Returns true if done else false.
47 static bool __is_done(struct rxe_task *task)
49 if (task->tasklet.state & BIT(TASKLET_STATE_SCHED))
52 if (task->state == TASK_STATE_IDLE ||
53 task->state == TASK_STATE_DRAINED) {
60 /* a locked version of __is_done */
61 static bool is_done(struct rxe_task *task)
66 spin_lock_irqsave(&task->lock, flags);
67 done = __is_done(task);
68 spin_unlock_irqrestore(&task->lock, flags);
73 /* do_task is a wrapper for the three tasks (requester,
74 * completer, responder) and calls them in a loop until
75 * they return a non-zero value. It is called either
76 * directly by rxe_run_task or indirectly if rxe_sched_task
77 * schedules the task. They must call __reserve_if_idle to
78 * move the task to busy before calling or scheduling.
79 * The task can also be moved to drained or invalid
80 * by calls to rxe-cleanup_task or rxe_disable_task.
81 * In that case tasks which get here are not executed but
82 * just flushed. The tasks are designed to look to see if
83 * there is work to do and do part of it before returning
84 * here with a return value of zero until all the work
85 * has been consumed then it retuens a non-zero value.
86 * The number of times the task can be run is limited by
87 * max iterations so one task cannot hold the cpu forever.
89 static void do_task(struct tasklet_struct *t)
93 struct rxe_task *task = from_tasklet(task, t, tasklet);
94 unsigned int iterations;
98 WARN_ON(rxe_read(task->qp) <= 0);
100 spin_lock_irqsave(&task->lock, flags);
101 if (task->state >= TASK_STATE_DRAINED) {
104 spin_unlock_irqrestore(&task->lock, flags);
107 spin_unlock_irqrestore(&task->lock, flags);
110 iterations = RXE_MAX_ITERATIONS;
114 ret = task->func(task->qp);
115 } while (ret == 0 && iterations-- > 0);
117 spin_lock_irqsave(&task->lock, flags);
118 switch (task->state) {
119 case TASK_STATE_BUSY:
121 task->state = TASK_STATE_IDLE;
123 /* This can happen if the client
124 * can add work faster than the
125 * tasklet can finish it.
126 * Reschedule the tasklet and exit
127 * the loop to give up the cpu
129 task->state = TASK_STATE_IDLE;
134 /* someone tried to run the task since the last time we called
135 * func, so we will call one more time regardless of the
138 case TASK_STATE_ARMED:
139 task->state = TASK_STATE_BUSY;
143 case TASK_STATE_DRAINING:
145 task->state = TASK_STATE_DRAINED;
152 rxe_info_qp(task->qp, "unexpected task state = %d", task->state);
157 if (WARN_ON(task->num_done != task->num_sched))
158 rxe_err_qp(task->qp, "%ld tasks scheduled, %ld tasks done",
159 task->num_sched, task->num_done);
161 spin_unlock_irqrestore(&task->lock, flags);
167 rxe_sched_task(task);
172 int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
173 int (*func)(struct rxe_qp *))
175 WARN_ON(rxe_read(qp) <= 0);
180 tasklet_setup(&task->tasklet, do_task);
182 task->state = TASK_STATE_IDLE;
183 spin_lock_init(&task->lock);
188 /* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
189 * process context. The qp is already completed with no
190 * remaining references. Once the queue is drained the
191 * task is moved to invalid and returns. The qp cleanup
192 * code then calls the task functions directly without
193 * using the task struct to drain any late arriving packets
196 void rxe_cleanup_task(struct rxe_task *task)
200 spin_lock_irqsave(&task->lock, flags);
201 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
202 task->state = TASK_STATE_DRAINING;
204 task->state = TASK_STATE_INVALID;
205 spin_unlock_irqrestore(&task->lock, flags);
208 spin_unlock_irqrestore(&task->lock, flags);
210 /* now the task cannot be scheduled or run just wait
211 * for the previously scheduled tasks to finish.
213 while (!is_done(task))
216 tasklet_kill(&task->tasklet);
218 spin_lock_irqsave(&task->lock, flags);
219 task->state = TASK_STATE_INVALID;
220 spin_unlock_irqrestore(&task->lock, flags);
223 /* run the task inline if it is currently idle
224 * cannot call do_task holding the lock
226 void rxe_run_task(struct rxe_task *task)
231 WARN_ON(rxe_read(task->qp) <= 0);
233 spin_lock_irqsave(&task->lock, flags);
234 run = __reserve_if_idle(task);
235 spin_unlock_irqrestore(&task->lock, flags);
238 do_task(&task->tasklet);
241 /* schedule the task to run later as a tasklet.
242 * the tasklet)schedule call can be called holding
245 void rxe_sched_task(struct rxe_task *task)
249 WARN_ON(rxe_read(task->qp) <= 0);
251 spin_lock_irqsave(&task->lock, flags);
252 if (__reserve_if_idle(task))
253 tasklet_schedule(&task->tasklet);
254 spin_unlock_irqrestore(&task->lock, flags);
257 /* rxe_disable/enable_task are only called from
258 * rxe_modify_qp in process context. Task is moved
259 * to the drained state by do_task.
261 void rxe_disable_task(struct rxe_task *task)
265 WARN_ON(rxe_read(task->qp) <= 0);
267 spin_lock_irqsave(&task->lock, flags);
268 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
269 task->state = TASK_STATE_DRAINING;
271 task->state = TASK_STATE_DRAINED;
272 spin_unlock_irqrestore(&task->lock, flags);
275 spin_unlock_irqrestore(&task->lock, flags);
277 while (!is_done(task))
280 tasklet_disable(&task->tasklet);
283 void rxe_enable_task(struct rxe_task *task)
287 WARN_ON(rxe_read(task->qp) <= 0);
289 spin_lock_irqsave(&task->lock, flags);
290 if (task->state == TASK_STATE_INVALID) {
291 spin_unlock_irqrestore(&task->lock, flags);
294 task->state = TASK_STATE_IDLE;
295 tasklet_enable(&task->tasklet);
296 spin_unlock_irqrestore(&task->lock, flags);