]> Git Repo - J-linux.git/blob - drivers/dma-buf/dma-resv.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / dma-buf / dma-resv.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35
36 #include <linux/dma-resv.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/export.h>
39 #include <linux/mm.h>
40 #include <linux/sched/mm.h>
41 #include <linux/mmu_notifier.h>
42 #include <linux/seq_file.h>
43
44 /**
45  * DOC: Reservation Object Overview
46  *
47  * The reservation object provides a mechanism to manage a container of
48  * dma_fence object associated with a resource. A reservation object
49  * can have any number of fences attaches to it. Each fence carries an usage
50  * parameter determining how the operation represented by the fence is using the
51  * resource. The RCU mechanism is used to protect read access to fences from
52  * locked write-side updates.
53  *
54  * See struct dma_resv for more details.
55  */
56
57 DEFINE_WD_CLASS(reservation_ww_class);
58 EXPORT_SYMBOL(reservation_ww_class);
59
60 /* Mask for the lower fence pointer bits */
61 #define DMA_RESV_LIST_MASK      0x3
62
63 struct dma_resv_list {
64         struct rcu_head rcu;
65         u32 num_fences, max_fences;
66         struct dma_fence __rcu *table[];
67 };
68
69 /* Extract the fence and usage flags from an RCU protected entry in the list. */
70 static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
71                                 struct dma_resv *resv, struct dma_fence **fence,
72                                 enum dma_resv_usage *usage)
73 {
74         long tmp;
75
76         tmp = (long)rcu_dereference_check(list->table[index],
77                                           resv ? dma_resv_held(resv) : true);
78         *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
79         if (usage)
80                 *usage = tmp & DMA_RESV_LIST_MASK;
81 }
82
83 /* Set the fence and usage flags at the specific index in the list. */
84 static void dma_resv_list_set(struct dma_resv_list *list,
85                               unsigned int index,
86                               struct dma_fence *fence,
87                               enum dma_resv_usage usage)
88 {
89         long tmp = ((long)fence) | usage;
90
91         RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
92 }
93
94 /*
95  * Allocate a new dma_resv_list and make sure to correctly initialize
96  * max_fences.
97  */
98 static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
99 {
100         struct dma_resv_list *list;
101         size_t size;
102
103         /* Round up to the next kmalloc bucket size. */
104         size = kmalloc_size_roundup(struct_size(list, table, max_fences));
105
106         list = kmalloc(size, GFP_KERNEL);
107         if (!list)
108                 return NULL;
109
110         /* Given the resulting bucket size, recalculated max_fences. */
111         list->max_fences = (size - offsetof(typeof(*list), table)) /
112                 sizeof(*list->table);
113
114         return list;
115 }
116
117 /* Free a dma_resv_list and make sure to drop all references. */
118 static void dma_resv_list_free(struct dma_resv_list *list)
119 {
120         unsigned int i;
121
122         if (!list)
123                 return;
124
125         for (i = 0; i < list->num_fences; ++i) {
126                 struct dma_fence *fence;
127
128                 dma_resv_list_entry(list, i, NULL, &fence, NULL);
129                 dma_fence_put(fence);
130         }
131         kfree_rcu(list, rcu);
132 }
133
134 /**
135  * dma_resv_init - initialize a reservation object
136  * @obj: the reservation object
137  */
138 void dma_resv_init(struct dma_resv *obj)
139 {
140         ww_mutex_init(&obj->lock, &reservation_ww_class);
141
142         RCU_INIT_POINTER(obj->fences, NULL);
143 }
144 EXPORT_SYMBOL(dma_resv_init);
145
146 /**
147  * dma_resv_fini - destroys a reservation object
148  * @obj: the reservation object
149  */
150 void dma_resv_fini(struct dma_resv *obj)
151 {
152         /*
153          * This object should be dead and all references must have
154          * been released to it, so no need to be protected with rcu.
155          */
156         dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
157         ww_mutex_destroy(&obj->lock);
158 }
159 EXPORT_SYMBOL(dma_resv_fini);
160
161 /* Dereference the fences while ensuring RCU rules */
162 static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
163 {
164         return rcu_dereference_check(obj->fences, dma_resv_held(obj));
165 }
166
167 /**
168  * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
169  * @obj: reservation object
170  * @num_fences: number of fences we want to add
171  *
172  * Should be called before dma_resv_add_fence().  Must be called with @obj
173  * locked through dma_resv_lock().
174  *
175  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
176  * at any time before calling dma_resv_add_fence(). This is validated when
177  * CONFIG_DEBUG_MUTEXES is enabled.
178  *
179  * RETURNS
180  * Zero for success, or -errno
181  */
182 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
183 {
184         struct dma_resv_list *old, *new;
185         unsigned int i, j, k, max;
186
187         dma_resv_assert_held(obj);
188
189         /* Driver and component code should never call this function with
190          * num_fences=0. If they do it usually points to bugs when calculating
191          * the number of needed fences dynamically.
192          */
193         if (WARN_ON(!num_fences))
194                 return -EINVAL;
195
196         old = dma_resv_fences_list(obj);
197         if (old && old->max_fences) {
198                 if ((old->num_fences + num_fences) <= old->max_fences)
199                         return 0;
200                 max = max(old->num_fences + num_fences, old->max_fences * 2);
201         } else {
202                 max = max(4ul, roundup_pow_of_two(num_fences));
203         }
204
205         new = dma_resv_list_alloc(max);
206         if (!new)
207                 return -ENOMEM;
208
209         /*
210          * no need to bump fence refcounts, rcu_read access
211          * requires the use of kref_get_unless_zero, and the
212          * references from the old struct are carried over to
213          * the new.
214          */
215         for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
216                 enum dma_resv_usage usage;
217                 struct dma_fence *fence;
218
219                 dma_resv_list_entry(old, i, obj, &fence, &usage);
220                 if (dma_fence_is_signaled(fence))
221                         RCU_INIT_POINTER(new->table[--k], fence);
222                 else
223                         dma_resv_list_set(new, j++, fence, usage);
224         }
225         new->num_fences = j;
226
227         /*
228          * We are not changing the effective set of fences here so can
229          * merely update the pointer to the new array; both existing
230          * readers and new readers will see exactly the same set of
231          * active (unsignaled) fences. Individual fences and the
232          * old array are protected by RCU and so will not vanish under
233          * the gaze of the rcu_read_lock() readers.
234          */
235         rcu_assign_pointer(obj->fences, new);
236
237         if (!old)
238                 return 0;
239
240         /* Drop the references to the signaled fences */
241         for (i = k; i < max; ++i) {
242                 struct dma_fence *fence;
243
244                 fence = rcu_dereference_protected(new->table[i],
245                                                   dma_resv_held(obj));
246                 dma_fence_put(fence);
247         }
248         kfree_rcu(old, rcu);
249
250         return 0;
251 }
252 EXPORT_SYMBOL(dma_resv_reserve_fences);
253
254 #ifdef CONFIG_DEBUG_MUTEXES
255 /**
256  * dma_resv_reset_max_fences - reset fences for debugging
257  * @obj: the dma_resv object to reset
258  *
259  * Reset the number of pre-reserved fence slots to test that drivers do
260  * correct slot allocation using dma_resv_reserve_fences(). See also
261  * &dma_resv_list.max_fences.
262  */
263 void dma_resv_reset_max_fences(struct dma_resv *obj)
264 {
265         struct dma_resv_list *fences = dma_resv_fences_list(obj);
266
267         dma_resv_assert_held(obj);
268
269         /* Test fence slot reservation */
270         if (fences)
271                 fences->max_fences = fences->num_fences;
272 }
273 EXPORT_SYMBOL(dma_resv_reset_max_fences);
274 #endif
275
276 /**
277  * dma_resv_add_fence - Add a fence to the dma_resv obj
278  * @obj: the reservation object
279  * @fence: the fence to add
280  * @usage: how the fence is used, see enum dma_resv_usage
281  *
282  * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
283  * dma_resv_reserve_fences() has been called.
284  *
285  * See also &dma_resv.fence for a discussion of the semantics.
286  */
287 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
288                         enum dma_resv_usage usage)
289 {
290         struct dma_resv_list *fobj;
291         struct dma_fence *old;
292         unsigned int i, count;
293
294         dma_fence_get(fence);
295
296         dma_resv_assert_held(obj);
297
298         /* Drivers should not add containers here, instead add each fence
299          * individually.
300          */
301         WARN_ON(dma_fence_is_container(fence));
302
303         fobj = dma_resv_fences_list(obj);
304         count = fobj->num_fences;
305
306         for (i = 0; i < count; ++i) {
307                 enum dma_resv_usage old_usage;
308
309                 dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
310                 if ((old->context == fence->context && old_usage >= usage &&
311                      dma_fence_is_later_or_same(fence, old)) ||
312                     dma_fence_is_signaled(old)) {
313                         dma_resv_list_set(fobj, i, fence, usage);
314                         dma_fence_put(old);
315                         return;
316                 }
317         }
318
319         BUG_ON(fobj->num_fences >= fobj->max_fences);
320         count++;
321
322         dma_resv_list_set(fobj, i, fence, usage);
323         /* pointer update must be visible before we extend the num_fences */
324         smp_store_mb(fobj->num_fences, count);
325 }
326 EXPORT_SYMBOL(dma_resv_add_fence);
327
328 /**
329  * dma_resv_replace_fences - replace fences in the dma_resv obj
330  * @obj: the reservation object
331  * @context: the context of the fences to replace
332  * @replacement: the new fence to use instead
333  * @usage: how the new fence is used, see enum dma_resv_usage
334  *
335  * Replace fences with a specified context with a new fence. Only valid if the
336  * operation represented by the original fence has no longer access to the
337  * resources represented by the dma_resv object when the new fence completes.
338  *
339  * And example for using this is replacing a preemption fence with a page table
340  * update fence which makes the resource inaccessible.
341  */
342 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
343                              struct dma_fence *replacement,
344                              enum dma_resv_usage usage)
345 {
346         struct dma_resv_list *list;
347         unsigned int i;
348
349         dma_resv_assert_held(obj);
350
351         list = dma_resv_fences_list(obj);
352         for (i = 0; list && i < list->num_fences; ++i) {
353                 struct dma_fence *old;
354
355                 dma_resv_list_entry(list, i, obj, &old, NULL);
356                 if (old->context != context)
357                         continue;
358
359                 dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
360                 dma_fence_put(old);
361         }
362 }
363 EXPORT_SYMBOL(dma_resv_replace_fences);
364
365 /* Restart the unlocked iteration by initializing the cursor object. */
366 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
367 {
368         cursor->index = 0;
369         cursor->num_fences = 0;
370         cursor->fences = dma_resv_fences_list(cursor->obj);
371         if (cursor->fences)
372                 cursor->num_fences = cursor->fences->num_fences;
373         cursor->is_restarted = true;
374 }
375
376 /* Walk to the next not signaled fence and grab a reference to it */
377 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
378 {
379         if (!cursor->fences)
380                 return;
381
382         do {
383                 /* Drop the reference from the previous round */
384                 dma_fence_put(cursor->fence);
385
386                 if (cursor->index >= cursor->num_fences) {
387                         cursor->fence = NULL;
388                         break;
389
390                 }
391
392                 dma_resv_list_entry(cursor->fences, cursor->index++,
393                                     cursor->obj, &cursor->fence,
394                                     &cursor->fence_usage);
395                 cursor->fence = dma_fence_get_rcu(cursor->fence);
396                 if (!cursor->fence) {
397                         dma_resv_iter_restart_unlocked(cursor);
398                         continue;
399                 }
400
401                 if (!dma_fence_is_signaled(cursor->fence) &&
402                     cursor->usage >= cursor->fence_usage)
403                         break;
404         } while (true);
405 }
406
407 /**
408  * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
409  * @cursor: the cursor with the current position
410  *
411  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
412  *
413  * Beware that the iterator can be restarted.  Code which accumulates statistics
414  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
415  * this reason prefer the locked dma_resv_iter_first() whenever possible.
416  *
417  * Returns the first fence from an unlocked dma_resv obj.
418  */
419 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
420 {
421         rcu_read_lock();
422         do {
423                 dma_resv_iter_restart_unlocked(cursor);
424                 dma_resv_iter_walk_unlocked(cursor);
425         } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
426         rcu_read_unlock();
427
428         return cursor->fence;
429 }
430 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
431
432 /**
433  * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
434  * @cursor: the cursor with the current position
435  *
436  * Beware that the iterator can be restarted.  Code which accumulates statistics
437  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
438  * this reason prefer the locked dma_resv_iter_next() whenever possible.
439  *
440  * Returns the next fence from an unlocked dma_resv obj.
441  */
442 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
443 {
444         bool restart;
445
446         rcu_read_lock();
447         cursor->is_restarted = false;
448         restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
449         do {
450                 if (restart)
451                         dma_resv_iter_restart_unlocked(cursor);
452                 dma_resv_iter_walk_unlocked(cursor);
453                 restart = true;
454         } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
455         rcu_read_unlock();
456
457         return cursor->fence;
458 }
459 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
460
461 /**
462  * dma_resv_iter_first - first fence from a locked dma_resv object
463  * @cursor: cursor to record the current position
464  *
465  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
466  *
467  * Return the first fence in the dma_resv object while holding the
468  * &dma_resv.lock.
469  */
470 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
471 {
472         struct dma_fence *fence;
473
474         dma_resv_assert_held(cursor->obj);
475
476         cursor->index = 0;
477         cursor->fences = dma_resv_fences_list(cursor->obj);
478
479         fence = dma_resv_iter_next(cursor);
480         cursor->is_restarted = true;
481         return fence;
482 }
483 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
484
485 /**
486  * dma_resv_iter_next - next fence from a locked dma_resv object
487  * @cursor: cursor to record the current position
488  *
489  * Return the next fences from the dma_resv object while holding the
490  * &dma_resv.lock.
491  */
492 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
493 {
494         struct dma_fence *fence;
495
496         dma_resv_assert_held(cursor->obj);
497
498         cursor->is_restarted = false;
499
500         do {
501                 if (!cursor->fences ||
502                     cursor->index >= cursor->fences->num_fences)
503                         return NULL;
504
505                 dma_resv_list_entry(cursor->fences, cursor->index++,
506                                     cursor->obj, &fence, &cursor->fence_usage);
507         } while (cursor->fence_usage > cursor->usage);
508
509         return fence;
510 }
511 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
512
513 /**
514  * dma_resv_copy_fences - Copy all fences from src to dst.
515  * @dst: the destination reservation object
516  * @src: the source reservation object
517  *
518  * Copy all fences from src to dst. dst-lock must be held.
519  */
520 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
521 {
522         struct dma_resv_iter cursor;
523         struct dma_resv_list *list;
524         struct dma_fence *f;
525
526         dma_resv_assert_held(dst);
527
528         list = NULL;
529
530         dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
531         dma_resv_for_each_fence_unlocked(&cursor, f) {
532
533                 if (dma_resv_iter_is_restarted(&cursor)) {
534                         dma_resv_list_free(list);
535
536                         list = dma_resv_list_alloc(cursor.num_fences);
537                         if (!list) {
538                                 dma_resv_iter_end(&cursor);
539                                 return -ENOMEM;
540                         }
541                         list->num_fences = 0;
542                 }
543
544                 dma_fence_get(f);
545                 dma_resv_list_set(list, list->num_fences++, f,
546                                   dma_resv_iter_usage(&cursor));
547         }
548         dma_resv_iter_end(&cursor);
549
550         list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
551         dma_resv_list_free(list);
552         return 0;
553 }
554 EXPORT_SYMBOL(dma_resv_copy_fences);
555
556 /**
557  * dma_resv_get_fences - Get an object's fences
558  * fences without update side lock held
559  * @obj: the reservation object
560  * @usage: controls which fences to include, see enum dma_resv_usage.
561  * @num_fences: the number of fences returned
562  * @fences: the array of fence ptrs returned (array is krealloc'd to the
563  * required size, and must be freed by caller)
564  *
565  * Retrieve all fences from the reservation object.
566  * Returns either zero or -ENOMEM.
567  */
568 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
569                         unsigned int *num_fences, struct dma_fence ***fences)
570 {
571         struct dma_resv_iter cursor;
572         struct dma_fence *fence;
573
574         *num_fences = 0;
575         *fences = NULL;
576
577         dma_resv_iter_begin(&cursor, obj, usage);
578         dma_resv_for_each_fence_unlocked(&cursor, fence) {
579
580                 if (dma_resv_iter_is_restarted(&cursor)) {
581                         struct dma_fence **new_fences;
582                         unsigned int count;
583
584                         while (*num_fences)
585                                 dma_fence_put((*fences)[--(*num_fences)]);
586
587                         count = cursor.num_fences + 1;
588
589                         /* Eventually re-allocate the array */
590                         new_fences = krealloc_array(*fences, count,
591                                                     sizeof(void *),
592                                                     GFP_KERNEL);
593                         if (count && !new_fences) {
594                                 kfree(*fences);
595                                 *fences = NULL;
596                                 *num_fences = 0;
597                                 dma_resv_iter_end(&cursor);
598                                 return -ENOMEM;
599                         }
600                         *fences = new_fences;
601                 }
602
603                 (*fences)[(*num_fences)++] = dma_fence_get(fence);
604         }
605         dma_resv_iter_end(&cursor);
606
607         return 0;
608 }
609 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
610
611 /**
612  * dma_resv_get_singleton - Get a single fence for all the fences
613  * @obj: the reservation object
614  * @usage: controls which fences to include, see enum dma_resv_usage.
615  * @fence: the resulting fence
616  *
617  * Get a single fence representing all the fences inside the resv object.
618  * Returns either 0 for success or -ENOMEM.
619  *
620  * Warning: This can't be used like this when adding the fence back to the resv
621  * object since that can lead to stack corruption when finalizing the
622  * dma_fence_array.
623  *
624  * Returns 0 on success and negative error values on failure.
625  */
626 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
627                            struct dma_fence **fence)
628 {
629         struct dma_fence_array *array;
630         struct dma_fence **fences;
631         unsigned count;
632         int r;
633
634         r = dma_resv_get_fences(obj, usage, &count, &fences);
635         if (r)
636                 return r;
637
638         if (count == 0) {
639                 *fence = NULL;
640                 return 0;
641         }
642
643         if (count == 1) {
644                 *fence = fences[0];
645                 kfree(fences);
646                 return 0;
647         }
648
649         array = dma_fence_array_create(count, fences,
650                                        dma_fence_context_alloc(1),
651                                        1, false);
652         if (!array) {
653                 while (count--)
654                         dma_fence_put(fences[count]);
655                 kfree(fences);
656                 return -ENOMEM;
657         }
658
659         *fence = &array->base;
660         return 0;
661 }
662 EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
663
664 /**
665  * dma_resv_wait_timeout - Wait on reservation's objects fences
666  * @obj: the reservation object
667  * @usage: controls which fences to include, see enum dma_resv_usage.
668  * @intr: if true, do interruptible wait
669  * @timeout: timeout value in jiffies or zero to return immediately
670  *
671  * Callers are not required to hold specific locks, but maybe hold
672  * dma_resv_lock() already
673  * RETURNS
674  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
675  * greater than zero on success.
676  */
677 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
678                            bool intr, unsigned long timeout)
679 {
680         long ret = timeout ? timeout : 1;
681         struct dma_resv_iter cursor;
682         struct dma_fence *fence;
683
684         dma_resv_iter_begin(&cursor, obj, usage);
685         dma_resv_for_each_fence_unlocked(&cursor, fence) {
686
687                 ret = dma_fence_wait_timeout(fence, intr, ret);
688                 if (ret <= 0) {
689                         dma_resv_iter_end(&cursor);
690                         return ret;
691                 }
692         }
693         dma_resv_iter_end(&cursor);
694
695         return ret;
696 }
697 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
698
699 /**
700  * dma_resv_set_deadline - Set a deadline on reservation's objects fences
701  * @obj: the reservation object
702  * @usage: controls which fences to include, see enum dma_resv_usage.
703  * @deadline: the requested deadline (MONOTONIC)
704  *
705  * May be called without holding the dma_resv lock.  Sets @deadline on
706  * all fences filtered by @usage.
707  */
708 void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
709                            ktime_t deadline)
710 {
711         struct dma_resv_iter cursor;
712         struct dma_fence *fence;
713
714         dma_resv_iter_begin(&cursor, obj, usage);
715         dma_resv_for_each_fence_unlocked(&cursor, fence) {
716                 dma_fence_set_deadline(fence, deadline);
717         }
718         dma_resv_iter_end(&cursor);
719 }
720 EXPORT_SYMBOL_GPL(dma_resv_set_deadline);
721
722 /**
723  * dma_resv_test_signaled - Test if a reservation object's fences have been
724  * signaled.
725  * @obj: the reservation object
726  * @usage: controls which fences to include, see enum dma_resv_usage.
727  *
728  * Callers are not required to hold specific locks, but maybe hold
729  * dma_resv_lock() already.
730  *
731  * RETURNS
732  *
733  * True if all fences signaled, else false.
734  */
735 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
736 {
737         struct dma_resv_iter cursor;
738         struct dma_fence *fence;
739
740         dma_resv_iter_begin(&cursor, obj, usage);
741         dma_resv_for_each_fence_unlocked(&cursor, fence) {
742                 dma_resv_iter_end(&cursor);
743                 return false;
744         }
745         dma_resv_iter_end(&cursor);
746         return true;
747 }
748 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
749
750 /**
751  * dma_resv_describe - Dump description of the resv object into seq_file
752  * @obj: the reservation object
753  * @seq: the seq_file to dump the description into
754  *
755  * Dump a textual description of the fences inside an dma_resv object into the
756  * seq_file.
757  */
758 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
759 {
760         static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
761         struct dma_resv_iter cursor;
762         struct dma_fence *fence;
763
764         dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
765                 seq_printf(seq, "\t%s fence:",
766                            usage[dma_resv_iter_usage(&cursor)]);
767                 dma_fence_describe(fence, seq);
768         }
769 }
770 EXPORT_SYMBOL_GPL(dma_resv_describe);
771
772 #if IS_ENABLED(CONFIG_LOCKDEP)
773 static int __init dma_resv_lockdep(void)
774 {
775         struct mm_struct *mm = mm_alloc();
776         struct ww_acquire_ctx ctx;
777         struct dma_resv obj;
778         struct address_space mapping;
779         int ret;
780
781         if (!mm)
782                 return -ENOMEM;
783
784         dma_resv_init(&obj);
785         address_space_init_once(&mapping);
786
787         mmap_read_lock(mm);
788         ww_acquire_init(&ctx, &reservation_ww_class);
789         ret = dma_resv_lock(&obj, &ctx);
790         if (ret == -EDEADLK)
791                 dma_resv_lock_slow(&obj, &ctx);
792         fs_reclaim_acquire(GFP_KERNEL);
793         /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
794         i_mmap_lock_write(&mapping);
795         i_mmap_unlock_write(&mapping);
796 #ifdef CONFIG_MMU_NOTIFIER
797         lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
798         __dma_fence_might_wait();
799         lock_map_release(&__mmu_notifier_invalidate_range_start_map);
800 #else
801         __dma_fence_might_wait();
802 #endif
803         fs_reclaim_release(GFP_KERNEL);
804         ww_mutex_unlock(&obj.lock);
805         ww_acquire_fini(&ctx);
806         mmap_read_unlock(mm);
807
808         mmput(mm);
809
810         return 0;
811 }
812 subsys_initcall(dma_resv_lockdep);
813 #endif
This page took 0.070303 seconds and 4 git commands to generate.