1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/dma-fence-array.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <linux/uaccess.h>
13 #include <drm/drm_print.h>
14 #include <drm/drm_syncobj.h>
15 #include <drm/xe_drm.h>
17 #include "xe_device_types.h"
18 #include "xe_exec_queue.h"
19 #include "xe_macros.h"
20 #include "xe_sched_job_types.h"
22 struct xe_user_fence {
25 struct dma_fence_cb cb;
26 struct work_struct worker;
33 static void user_fence_destroy(struct kref *kref)
35 struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
42 static void user_fence_get(struct xe_user_fence *ufence)
44 kref_get(&ufence->refcount);
47 static void user_fence_put(struct xe_user_fence *ufence)
49 kref_put(&ufence->refcount, user_fence_destroy);
52 static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
55 struct xe_user_fence *ufence;
57 ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
62 kref_init(&ufence->refcount);
63 ufence->addr = u64_to_user_ptr(addr);
64 ufence->value = value;
65 ufence->mm = current->mm;
71 static void user_fence_worker(struct work_struct *w)
73 struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
75 if (mmget_not_zero(ufence->mm)) {
76 kthread_use_mm(ufence->mm);
77 if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
78 XE_WARN_ON("Copy to user failed");
79 kthread_unuse_mm(ufence->mm);
83 wake_up_all(&ufence->xe->ufence_wq);
84 WRITE_ONCE(ufence->signalled, 1);
85 user_fence_put(ufence);
88 static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
90 INIT_WORK(&ufence->worker, user_fence_worker);
91 queue_work(ufence->xe->ordered_wq, &ufence->worker);
95 static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
97 struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
99 kick_ufence(ufence, fence);
102 int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
103 struct xe_sync_entry *sync,
104 struct drm_xe_sync __user *sync_user,
107 struct drm_xe_sync sync_in;
109 bool exec = flags & SYNC_PARSE_FLAG_EXEC;
110 bool in_lr_mode = flags & SYNC_PARSE_FLAG_LR_MODE;
111 bool disallow_user_fence = flags & SYNC_PARSE_FLAG_DISALLOW_USER_FENCE;
114 if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
117 if (XE_IOCTL_DBG(xe, sync_in.flags & ~DRM_XE_SYNC_FLAG_SIGNAL) ||
118 XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
121 signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
122 switch (sync_in.type) {
123 case DRM_XE_SYNC_TYPE_SYNCOBJ:
124 if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
127 if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
130 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
131 if (XE_IOCTL_DBG(xe, !sync->syncobj))
135 sync->fence = drm_syncobj_fence_get(sync->syncobj);
136 if (XE_IOCTL_DBG(xe, !sync->fence))
141 case DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ:
142 if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
145 if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
148 if (XE_IOCTL_DBG(xe, sync_in.timeline_value == 0))
151 sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
152 if (XE_IOCTL_DBG(xe, !sync->syncobj))
156 sync->chain_fence = dma_fence_chain_alloc();
157 if (!sync->chain_fence)
160 sync->fence = drm_syncobj_fence_get(sync->syncobj);
161 if (XE_IOCTL_DBG(xe, !sync->fence))
164 err = dma_fence_chain_find_seqno(&sync->fence,
165 sync_in.timeline_value);
171 case DRM_XE_SYNC_TYPE_USER_FENCE:
172 if (XE_IOCTL_DBG(xe, disallow_user_fence))
175 if (XE_IOCTL_DBG(xe, !signal))
178 if (XE_IOCTL_DBG(xe, sync_in.addr & 0x7))
182 sync->addr = sync_in.addr;
184 sync->ufence = user_fence_create(xe, sync_in.addr,
185 sync_in.timeline_value);
186 if (XE_IOCTL_DBG(xe, !sync->ufence))
196 sync->type = sync_in.type;
197 sync->flags = sync_in.flags;
198 sync->timeline_value = sync_in.timeline_value;
203 int xe_sync_entry_wait(struct xe_sync_entry *sync)
206 dma_fence_wait(sync->fence, true);
211 int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
216 err = drm_sched_job_add_dependency(&job->drm,
217 dma_fence_get(sync->fence));
219 dma_fence_put(sync->fence);
227 void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
228 struct dma_fence *fence)
230 if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
233 if (sync->chain_fence) {
234 drm_syncobj_add_point(sync->syncobj, sync->chain_fence,
235 fence, sync->timeline_value);
237 * The chain's ownership is transferred to the
240 sync->chain_fence = NULL;
241 } else if (sync->syncobj) {
242 drm_syncobj_replace_fence(sync->syncobj, fence);
243 } else if (sync->ufence) {
246 dma_fence_get(fence);
247 user_fence_get(sync->ufence);
248 err = dma_fence_add_callback(fence, &sync->ufence->cb,
250 if (err == -ENOENT) {
251 kick_ufence(sync->ufence, fence);
253 XE_WARN_ON("failed to add user fence");
254 user_fence_put(sync->ufence);
255 dma_fence_put(fence);
257 } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
258 job->user_fence.used = true;
259 job->user_fence.addr = sync->addr;
260 job->user_fence.value = sync->timeline_value;
264 void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
267 drm_syncobj_put(sync->syncobj);
269 dma_fence_put(sync->fence);
270 if (sync->chain_fence)
271 dma_fence_put(&sync->chain_fence->base);
273 user_fence_put(sync->ufence);
277 * xe_sync_in_fence_get() - Get a fence from syncs, exec queue, and VM
279 * @num_sync: number of syncs
283 * Get a fence from syncs, exec queue, and VM. If syncs contain in-fences create
284 * and return a composite fence of all in-fences + last fence. If no in-fences
285 * return last fence on input exec queue. Caller must drop reference to
288 * Return: fence on success, ERR_PTR(-ENOMEM) on failure
291 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
292 struct xe_exec_queue *q, struct xe_vm *vm)
294 struct dma_fence **fences = NULL;
295 struct dma_fence_array *cf = NULL;
296 struct dma_fence *fence;
297 int i, num_in_fence = 0, current_fence = 0;
299 lockdep_assert_held(&vm->lock);
301 /* Count in-fences */
302 for (i = 0; i < num_sync; ++i) {
305 fence = sync[i].fence;
311 fence = xe_exec_queue_last_fence_get(q, vm);
315 /* Create composite fence */
316 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL);
318 return ERR_PTR(-ENOMEM);
319 for (i = 0; i < num_sync; ++i) {
321 dma_fence_get(sync[i].fence);
322 fences[current_fence++] = sync[i].fence;
325 fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
326 cf = dma_fence_array_create(num_in_fence, fences,
327 vm->composite_fence_ctx,
328 vm->composite_fence_seqno++,
331 --vm->composite_fence_seqno;
338 while (current_fence)
339 dma_fence_put(fences[--current_fence]);
343 return ERR_PTR(-ENOMEM);
347 * xe_sync_ufence_get() - Get user fence from sync
350 * Get a user fence reference from sync.
352 * Return: xe_user_fence pointer with reference
354 struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
356 user_fence_get(sync->ufence);
362 * xe_sync_ufence_put() - Put user fence reference
363 * @ufence: user fence reference
366 void xe_sync_ufence_put(struct xe_user_fence *ufence)
368 user_fence_put(ufence);
372 * xe_sync_ufence_get_status() - Get user fence status
373 * @ufence: user fence
375 * Return: 1 if signalled, 0 not signalled, <0 on error
377 int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
379 return READ_ONCE(ufence->signalled);