1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
9 static struct workqueue_struct *rxe_wq;
11 int rxe_alloc_wq(void)
13 rxe_wq = alloc_workqueue("rxe_wq", WQ_UNBOUND, WQ_MAX_ACTIVE);
20 void rxe_destroy_wq(void)
22 destroy_workqueue(rxe_wq);
25 /* Check if task is idle i.e. not running, not scheduled in
26 * work queue and not draining. If so move to busy to
27 * reserve a slot in do_task() by setting to busy and taking
28 * a qp reference to cover the gap from now until the task finishes.
29 * state will move out of busy if task returns a non zero value
30 * in do_task(). If state is already busy it is raised to armed
31 * to indicate to do_task that additional pass should be made
33 * Context: caller should hold task->lock.
34 * Returns: true if state transitioned from idle to busy else false.
36 static bool __reserve_if_idle(struct rxe_task *task)
38 WARN_ON(rxe_read(task->qp) <= 0);
40 if (task->state == TASK_STATE_IDLE) {
42 task->state = TASK_STATE_BUSY;
47 if (task->state == TASK_STATE_BUSY)
48 task->state = TASK_STATE_ARMED;
53 /* check if task is idle or drained and not currently
54 * scheduled in the work queue. This routine is
55 * called by rxe_cleanup_task or rxe_disable_task to
56 * see if the queue is empty.
57 * Context: caller should hold task->lock.
58 * Returns true if done else false.
60 static bool __is_done(struct rxe_task *task)
62 if (work_pending(&task->work))
65 if (task->state == TASK_STATE_IDLE ||
66 task->state == TASK_STATE_DRAINED) {
73 /* a locked version of __is_done */
74 static bool is_done(struct rxe_task *task)
79 spin_lock_irqsave(&task->lock, flags);
80 done = __is_done(task);
81 spin_unlock_irqrestore(&task->lock, flags);
86 /* do_task is a wrapper for the three tasks (requester,
87 * completer, responder) and calls them in a loop until
88 * they return a non-zero value. It is called either
89 * directly by rxe_run_task or indirectly if rxe_sched_task
90 * schedules the task. They must call __reserve_if_idle to
91 * move the task to busy before calling or scheduling.
92 * The task can also be moved to drained or invalid
93 * by calls to rxe_cleanup_task or rxe_disable_task.
94 * In that case tasks which get here are not executed but
95 * just flushed. The tasks are designed to look to see if
96 * there is work to do and then do part of it before returning
97 * here with a return value of zero until all the work
98 * has been consumed then it returns a non-zero value.
99 * The number of times the task can be run is limited by
100 * max iterations so one task cannot hold the cpu forever.
101 * If the limit is hit and work remains the task is rescheduled.
103 static void do_task(struct rxe_task *task)
105 unsigned int iterations;
111 WARN_ON(rxe_read(task->qp) <= 0);
113 spin_lock_irqsave(&task->lock, flags);
114 if (task->state >= TASK_STATE_DRAINED) {
117 spin_unlock_irqrestore(&task->lock, flags);
120 spin_unlock_irqrestore(&task->lock, flags);
123 iterations = RXE_MAX_ITERATIONS;
127 ret = task->func(task->qp);
128 } while (ret == 0 && iterations-- > 0);
130 spin_lock_irqsave(&task->lock, flags);
131 /* we're not done yet but we ran out of iterations.
132 * yield the cpu and reschedule the task
135 task->state = TASK_STATE_IDLE;
140 switch (task->state) {
141 case TASK_STATE_BUSY:
142 task->state = TASK_STATE_IDLE;
145 /* someone tried to schedule the task while we
146 * were running, keep going
148 case TASK_STATE_ARMED:
149 task->state = TASK_STATE_BUSY;
153 case TASK_STATE_DRAINING:
154 task->state = TASK_STATE_DRAINED;
159 rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
161 task->state = TASK_STATE_IDLE;
167 if (WARN_ON(task->num_done != task->num_sched))
170 "%ld tasks scheduled, %ld tasks done\n",
171 task->num_sched, task->num_done);
173 spin_unlock_irqrestore(&task->lock, flags);
179 rxe_sched_task(task);
184 /* wrapper around do_task to fix argument for work queue */
185 static void do_work(struct work_struct *work)
187 do_task(container_of(work, struct rxe_task, work));
190 int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
191 int (*func)(struct rxe_qp *))
193 WARN_ON(rxe_read(qp) <= 0);
197 task->state = TASK_STATE_IDLE;
198 spin_lock_init(&task->lock);
199 INIT_WORK(&task->work, do_work);
204 /* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
205 * process context. The qp is already completed with no
206 * remaining references. Once the queue is drained the
207 * task is moved to invalid and returns. The qp cleanup
208 * code then calls the task functions directly without
209 * using the task struct to drain any late arriving packets
212 void rxe_cleanup_task(struct rxe_task *task)
216 spin_lock_irqsave(&task->lock, flags);
217 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
218 task->state = TASK_STATE_DRAINING;
220 task->state = TASK_STATE_INVALID;
221 spin_unlock_irqrestore(&task->lock, flags);
224 spin_unlock_irqrestore(&task->lock, flags);
226 /* now the task cannot be scheduled or run just wait
227 * for the previously scheduled tasks to finish.
229 while (!is_done(task))
232 spin_lock_irqsave(&task->lock, flags);
233 task->state = TASK_STATE_INVALID;
234 spin_unlock_irqrestore(&task->lock, flags);
237 /* run the task inline if it is currently idle
238 * cannot call do_task holding the lock
240 void rxe_run_task(struct rxe_task *task)
245 WARN_ON(rxe_read(task->qp) <= 0);
247 spin_lock_irqsave(&task->lock, flags);
248 run = __reserve_if_idle(task);
249 spin_unlock_irqrestore(&task->lock, flags);
255 /* schedule the task to run later as a work queue entry.
256 * the queue_work call can be called holding
259 void rxe_sched_task(struct rxe_task *task)
263 WARN_ON(rxe_read(task->qp) <= 0);
265 spin_lock_irqsave(&task->lock, flags);
266 if (__reserve_if_idle(task))
267 queue_work(rxe_wq, &task->work);
268 spin_unlock_irqrestore(&task->lock, flags);
271 /* rxe_disable/enable_task are only called from
272 * rxe_modify_qp in process context. Task is moved
273 * to the drained state by do_task.
275 void rxe_disable_task(struct rxe_task *task)
279 WARN_ON(rxe_read(task->qp) <= 0);
281 spin_lock_irqsave(&task->lock, flags);
282 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
283 task->state = TASK_STATE_DRAINING;
285 task->state = TASK_STATE_DRAINED;
286 spin_unlock_irqrestore(&task->lock, flags);
289 spin_unlock_irqrestore(&task->lock, flags);
291 while (!is_done(task))
294 spin_lock_irqsave(&task->lock, flags);
295 task->state = TASK_STATE_DRAINED;
296 spin_unlock_irqrestore(&task->lock, flags);
299 void rxe_enable_task(struct rxe_task *task)
303 WARN_ON(rxe_read(task->qp) <= 0);
305 spin_lock_irqsave(&task->lock, flags);
306 if (task->state == TASK_STATE_INVALID) {
307 spin_unlock_irqrestore(&task->lock, flags);
311 task->state = TASK_STATE_IDLE;
312 spin_unlock_irqrestore(&task->lock, flags);