1 // SPDX-License-Identifier: GPL-2.0-only
3 * dma-fence-util: misc functions for dma_fence objects
5 * Copyright (C) 2022 Advanced Micro Devices, Inc.
10 #include <linux/dma-fence.h>
11 #include <linux/dma-fence-array.h>
12 #include <linux/dma-fence-chain.h>
13 #include <linux/dma-fence-unwrap.h>
14 #include <linux/slab.h>
16 /* Internal helper to start new array iteration, don't use directly */
17 static struct dma_fence *
18 __dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
20 cursor->array = dma_fence_chain_contained(cursor->chain);
22 return dma_fence_array_first(cursor->array);
26 * dma_fence_unwrap_first - return the first fence from fence containers
27 * @head: the entrypoint into the containers
28 * @cursor: current position inside the containers
30 * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
33 struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
34 struct dma_fence_unwrap *cursor)
36 cursor->chain = dma_fence_get(head);
37 return __dma_fence_unwrap_array(cursor);
39 EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
42 * dma_fence_unwrap_next - return the next fence from a fence containers
43 * @cursor: current position inside the containers
45 * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
46 * the next fence from them.
48 struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
50 struct dma_fence *tmp;
53 tmp = dma_fence_array_next(cursor->array, cursor->index);
57 cursor->chain = dma_fence_chain_walk(cursor->chain);
58 return __dma_fence_unwrap_array(cursor);
60 EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
62 /* Implementation for the dma_fence_merge() marco, don't use directly */
63 struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
64 struct dma_fence **fences,
65 struct dma_fence_unwrap *iter)
67 struct dma_fence_array *result;
68 struct dma_fence *tmp, **array;
74 timestamp = ns_to_ktime(0);
75 for (i = 0; i < num_fences; ++i) {
76 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
77 if (!dma_fence_is_signaled(tmp)) {
79 } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
81 if (ktime_after(tmp->timestamp, timestamp))
82 timestamp = tmp->timestamp;
85 * Use the current time if the fence is
86 * currently signaling.
88 timestamp = ktime_get();
94 * If we couldn't find a pending fence just return a private signaled
95 * fence with the timestamp of the last signaled one.
98 return dma_fence_allocate_private_stub(timestamp);
100 array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
105 * This trashes the input fence array and uses it as position for the
106 * following merge loop. This works because the dma_fence_merge()
107 * wrapper macro is creating this temporary array on the stack together
108 * with the iterators.
110 for (i = 0; i < num_fences; ++i)
111 fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
119 for (i = 0; i < num_fences; ++i) {
120 struct dma_fence *next;
122 while (fences[i] && dma_fence_is_signaled(fences[i]))
123 fences[i] = dma_fence_unwrap_next(&iter[i]);
130 * We can't guarantee that inpute fences are ordered by
131 * context, but it is still quite likely when this
132 * function is used multiple times. So attempt to order
133 * the fences by context as we pass over them and merge
134 * fences with the same context.
136 if (!tmp || tmp->context > next->context) {
140 } else if (tmp->context < next->context) {
143 } else if (dma_fence_is_later(tmp, next)) {
144 fences[i] = dma_fence_unwrap_next(&iter[i]);
147 fences[sel] = dma_fence_unwrap_next(&iter[sel]);
153 array[count++] = dma_fence_get(tmp);
154 fences[sel] = dma_fence_unwrap_next(&iter[sel]);
159 tmp = dma_fence_allocate_private_stub(ktime_get());
168 result = dma_fence_array_create(count, array,
169 dma_fence_context_alloc(1),
175 return &result->base;
181 EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);