]>
Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7bf60c52 CK |
2 | /* |
3 | * fence-chain: chain fences together in a timeline | |
4 | * | |
5 | * Copyright (C) 2018 Advanced Micro Devices, Inc. | |
6 | * Authors: | |
7 | * Christian König <[email protected]> | |
7bf60c52 CK |
8 | */ |
9 | ||
10 | #include <linux/dma-fence-chain.h> | |
11 | ||
12 | static bool dma_fence_chain_enable_signaling(struct dma_fence *fence); | |
13 | ||
14 | /** | |
15 | * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence | |
16 | * @chain: chain node to get the previous node from | |
17 | * | |
18 | * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the | |
19 | * chain node. | |
20 | */ | |
21 | static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain) | |
22 | { | |
23 | struct dma_fence *prev; | |
24 | ||
25 | rcu_read_lock(); | |
26 | prev = dma_fence_get_rcu_safe(&chain->prev); | |
27 | rcu_read_unlock(); | |
28 | return prev; | |
29 | } | |
30 | ||
31 | /** | |
32 | * dma_fence_chain_walk - chain walking function | |
33 | * @fence: current chain node | |
34 | * | |
35 | * Walk the chain to the next node. Returns the next fence or NULL if we are at | |
36 | * the end of the chain. Garbage collects chain nodes which are already | |
37 | * signaled. | |
38 | */ | |
39 | struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence) | |
40 | { | |
41 | struct dma_fence_chain *chain, *prev_chain; | |
42 | struct dma_fence *prev, *replacement, *tmp; | |
43 | ||
44 | chain = to_dma_fence_chain(fence); | |
45 | if (!chain) { | |
46 | dma_fence_put(fence); | |
47 | return NULL; | |
48 | } | |
49 | ||
50 | while ((prev = dma_fence_chain_get_prev(chain))) { | |
51 | ||
52 | prev_chain = to_dma_fence_chain(prev); | |
53 | if (prev_chain) { | |
54 | if (!dma_fence_is_signaled(prev_chain->fence)) | |
55 | break; | |
56 | ||
57 | replacement = dma_fence_chain_get_prev(prev_chain); | |
58 | } else { | |
59 | if (!dma_fence_is_signaled(prev)) | |
60 | break; | |
61 | ||
62 | replacement = NULL; | |
63 | } | |
64 | ||
14374e3e CK |
65 | tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev), |
66 | RCU_INITIALIZER(replacement))); | |
7bf60c52 CK |
67 | if (tmp == prev) |
68 | dma_fence_put(tmp); | |
69 | else | |
70 | dma_fence_put(replacement); | |
71 | dma_fence_put(prev); | |
72 | } | |
73 | ||
74 | dma_fence_put(fence); | |
75 | return prev; | |
76 | } | |
77 | EXPORT_SYMBOL(dma_fence_chain_walk); | |
78 | ||
79 | /** | |
80 | * dma_fence_chain_find_seqno - find fence chain node by seqno | |
81 | * @pfence: pointer to the chain node where to start | |
82 | * @seqno: the sequence number to search for | |
83 | * | |
84 | * Advance the fence pointer to the chain node which will signal this sequence | |
85 | * number. If no sequence number is provided then this is a no-op. | |
86 | * | |
87 | * Returns EINVAL if the fence is not a chain node or the sequence number has | |
88 | * not yet advanced far enough. | |
89 | */ | |
90 | int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) | |
91 | { | |
92 | struct dma_fence_chain *chain; | |
93 | ||
94 | if (!seqno) | |
95 | return 0; | |
96 | ||
97 | chain = to_dma_fence_chain(*pfence); | |
98 | if (!chain || chain->base.seqno < seqno) | |
99 | return -EINVAL; | |
100 | ||
101 | dma_fence_chain_for_each(*pfence, &chain->base) { | |
102 | if ((*pfence)->context != chain->base.context || | |
103 | to_dma_fence_chain(*pfence)->prev_seqno < seqno) | |
104 | break; | |
105 | } | |
106 | dma_fence_put(&chain->base); | |
107 | ||
108 | return 0; | |
109 | } | |
110 | EXPORT_SYMBOL(dma_fence_chain_find_seqno); | |
111 | ||
112 | static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence) | |
113 | { | |
114 | return "dma_fence_chain"; | |
115 | } | |
116 | ||
117 | static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence) | |
118 | { | |
119 | return "unbound"; | |
120 | } | |
121 | ||
122 | static void dma_fence_chain_irq_work(struct irq_work *work) | |
123 | { | |
124 | struct dma_fence_chain *chain; | |
125 | ||
126 | chain = container_of(work, typeof(*chain), work); | |
127 | ||
128 | /* Try to rearm the callback */ | |
129 | if (!dma_fence_chain_enable_signaling(&chain->base)) | |
130 | /* Ok, we are done. No more unsignaled fences left */ | |
131 | dma_fence_signal(&chain->base); | |
132 | dma_fence_put(&chain->base); | |
133 | } | |
134 | ||
135 | static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) | |
136 | { | |
137 | struct dma_fence_chain *chain; | |
138 | ||
139 | chain = container_of(cb, typeof(*chain), cb); | |
9c61e789 | 140 | init_irq_work(&chain->work, dma_fence_chain_irq_work); |
7bf60c52 CK |
141 | irq_work_queue(&chain->work); |
142 | dma_fence_put(f); | |
143 | } | |
144 | ||
145 | static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) | |
146 | { | |
147 | struct dma_fence_chain *head = to_dma_fence_chain(fence); | |
148 | ||
149 | dma_fence_get(&head->base); | |
150 | dma_fence_chain_for_each(fence, &head->base) { | |
18f5fad2 | 151 | struct dma_fence *f = dma_fence_chain_contained(fence); |
7bf60c52 CK |
152 | |
153 | dma_fence_get(f); | |
154 | if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { | |
155 | dma_fence_put(fence); | |
156 | return true; | |
157 | } | |
158 | dma_fence_put(f); | |
159 | } | |
160 | dma_fence_put(&head->base); | |
161 | return false; | |
162 | } | |
163 | ||
164 | static bool dma_fence_chain_signaled(struct dma_fence *fence) | |
165 | { | |
166 | dma_fence_chain_for_each(fence, fence) { | |
18f5fad2 | 167 | struct dma_fence *f = dma_fence_chain_contained(fence); |
7bf60c52 CK |
168 | |
169 | if (!dma_fence_is_signaled(f)) { | |
170 | dma_fence_put(fence); | |
171 | return false; | |
172 | } | |
173 | } | |
174 | ||
175 | return true; | |
176 | } | |
177 | ||
178 | static void dma_fence_chain_release(struct dma_fence *fence) | |
179 | { | |
180 | struct dma_fence_chain *chain = to_dma_fence_chain(fence); | |
92cb3e59 CK |
181 | struct dma_fence *prev; |
182 | ||
183 | /* Manually unlink the chain as much as possible to avoid recursion | |
184 | * and potential stack overflow. | |
185 | */ | |
186 | while ((prev = rcu_dereference_protected(chain->prev, true))) { | |
187 | struct dma_fence_chain *prev_chain; | |
188 | ||
189 | if (kref_read(&prev->refcount) > 1) | |
190 | break; | |
191 | ||
192 | prev_chain = to_dma_fence_chain(prev); | |
193 | if (!prev_chain) | |
194 | break; | |
195 | ||
196 | /* No need for atomic operations since we hold the last | |
197 | * reference to prev_chain. | |
198 | */ | |
199 | chain->prev = prev_chain->prev; | |
200 | RCU_INIT_POINTER(prev_chain->prev, NULL); | |
201 | dma_fence_put(prev); | |
202 | } | |
203 | dma_fence_put(prev); | |
7bf60c52 | 204 | |
7bf60c52 CK |
205 | dma_fence_put(chain->fence); |
206 | dma_fence_free(fence); | |
207 | } | |
208 | ||
786119ff RC |
209 | |
210 | static void dma_fence_chain_set_deadline(struct dma_fence *fence, | |
211 | ktime_t deadline) | |
212 | { | |
213 | dma_fence_chain_for_each(fence, fence) { | |
214 | struct dma_fence *f = dma_fence_chain_contained(fence); | |
215 | ||
216 | dma_fence_set_deadline(f, deadline); | |
217 | } | |
218 | } | |
219 | ||
7bf60c52 | 220 | const struct dma_fence_ops dma_fence_chain_ops = { |
5e498abf | 221 | .use_64bit_seqno = true, |
7bf60c52 CK |
222 | .get_driver_name = dma_fence_chain_get_driver_name, |
223 | .get_timeline_name = dma_fence_chain_get_timeline_name, | |
224 | .enable_signaling = dma_fence_chain_enable_signaling, | |
225 | .signaled = dma_fence_chain_signaled, | |
226 | .release = dma_fence_chain_release, | |
786119ff | 227 | .set_deadline = dma_fence_chain_set_deadline, |
7bf60c52 CK |
228 | }; |
229 | EXPORT_SYMBOL(dma_fence_chain_ops); | |
230 | ||
231 | /** | |
232 | * dma_fence_chain_init - initialize a fence chain | |
233 | * @chain: the chain node to initialize | |
234 | * @prev: the previous fence | |
235 | * @fence: the current fence | |
8d0441cf | 236 | * @seqno: the sequence number to use for the fence chain |
7bf60c52 CK |
237 | * |
238 | * Initialize a new chain node and either start a new chain or add the node to | |
239 | * the existing chain of the previous fence. | |
240 | */ | |
241 | void dma_fence_chain_init(struct dma_fence_chain *chain, | |
242 | struct dma_fence *prev, | |
243 | struct dma_fence *fence, | |
244 | uint64_t seqno) | |
245 | { | |
246 | struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev); | |
247 | uint64_t context; | |
248 | ||
249 | spin_lock_init(&chain->lock); | |
250 | rcu_assign_pointer(chain->prev, prev); | |
251 | chain->fence = fence; | |
252 | chain->prev_seqno = 0; | |
7bf60c52 CK |
253 | |
254 | /* Try to reuse the context of the previous chain node. */ | |
5e498abf | 255 | if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { |
7bf60c52 CK |
256 | context = prev->context; |
257 | chain->prev_seqno = prev->seqno; | |
258 | } else { | |
259 | context = dma_fence_context_alloc(1); | |
260 | /* Make sure that we always have a valid sequence number. */ | |
261 | if (prev_chain) | |
262 | seqno = max(prev->seqno, seqno); | |
263 | } | |
264 | ||
265 | dma_fence_init(&chain->base, &dma_fence_chain_ops, | |
266 | &chain->lock, context, seqno); | |
270b48bb CK |
267 | |
268 | /* | |
269 | * Chaining dma_fence_chain container together is only allowed through | |
270 | * the prev fence and not through the contained fence. | |
271 | * | |
272 | * The correct way of handling this is to flatten out the fence | |
273 | * structure into a dma_fence_array by the caller instead. | |
274 | */ | |
275 | WARN_ON(dma_fence_is_chain(fence)); | |
7bf60c52 CK |
276 | } |
277 | EXPORT_SYMBOL(dma_fence_chain_init); |