]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Merge tag 'for-6.4/io_uring-2023-05-07' of git://git.kernel.dk/linux
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <[email protected]>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include <drm/ttm/ttm_tt.h>
36
37 #include "amdgpu_cs.h"
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_gmc.h"
41 #include "amdgpu_gem.h"
42 #include "amdgpu_ras.h"
43
44 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
45                                  struct amdgpu_device *adev,
46                                  struct drm_file *filp,
47                                  union drm_amdgpu_cs *cs)
48 {
49         struct amdgpu_fpriv *fpriv = filp->driver_priv;
50
51         if (cs->in.num_chunks == 0)
52                 return -EINVAL;
53
54         memset(p, 0, sizeof(*p));
55         p->adev = adev;
56         p->filp = filp;
57
58         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
59         if (!p->ctx)
60                 return -EINVAL;
61
62         if (atomic_read(&p->ctx->guilty)) {
63                 amdgpu_ctx_put(p->ctx);
64                 return -ECANCELED;
65         }
66
67         amdgpu_sync_create(&p->sync);
68         return 0;
69 }
70
71 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
72                              struct drm_amdgpu_cs_chunk_ib *chunk_ib)
73 {
74         struct drm_sched_entity *entity;
75         unsigned int i;
76         int r;
77
78         r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
79                                   chunk_ib->ip_instance,
80                                   chunk_ib->ring, &entity);
81         if (r)
82                 return r;
83
84         /*
85          * Abort if there is no run queue associated with this entity.
86          * Possibly because of disabled HW IP.
87          */
88         if (entity->rq == NULL)
89                 return -EINVAL;
90
91         /* Check if we can add this IB to some existing job */
92         for (i = 0; i < p->gang_size; ++i)
93                 if (p->entities[i] == entity)
94                         return i;
95
96         /* If not increase the gang size if possible */
97         if (i == AMDGPU_CS_GANG_SIZE)
98                 return -EINVAL;
99
100         p->entities[i] = entity;
101         p->gang_size = i + 1;
102         return i;
103 }
104
105 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
106                            struct drm_amdgpu_cs_chunk_ib *chunk_ib,
107                            unsigned int *num_ibs)
108 {
109         int r;
110
111         r = amdgpu_cs_job_idx(p, chunk_ib);
112         if (r < 0)
113                 return r;
114
115         ++(num_ibs[r]);
116         p->gang_leader_idx = r;
117         return 0;
118 }
119
120 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
121                                    struct drm_amdgpu_cs_chunk_fence *data,
122                                    uint32_t *offset)
123 {
124         struct drm_gem_object *gobj;
125         struct amdgpu_bo *bo;
126         unsigned long size;
127         int r;
128
129         gobj = drm_gem_object_lookup(p->filp, data->handle);
130         if (gobj == NULL)
131                 return -EINVAL;
132
133         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
134         p->uf_entry.priority = 0;
135         p->uf_entry.tv.bo = &bo->tbo;
136         /* One for TTM and two for the CS job */
137         p->uf_entry.tv.num_shared = 3;
138
139         drm_gem_object_put(gobj);
140
141         size = amdgpu_bo_size(bo);
142         if (size != PAGE_SIZE || (data->offset + 8) > size) {
143                 r = -EINVAL;
144                 goto error_unref;
145         }
146
147         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
148                 r = -EINVAL;
149                 goto error_unref;
150         }
151
152         *offset = data->offset;
153
154         return 0;
155
156 error_unref:
157         amdgpu_bo_unref(&bo);
158         return r;
159 }
160
161 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
162                                    struct drm_amdgpu_bo_list_in *data)
163 {
164         struct drm_amdgpu_bo_list_entry *info;
165         int r;
166
167         r = amdgpu_bo_create_list_entry_array(data, &info);
168         if (r)
169                 return r;
170
171         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
172                                   &p->bo_list);
173         if (r)
174                 goto error_free;
175
176         kvfree(info);
177         return 0;
178
179 error_free:
180         kvfree(info);
181
182         return r;
183 }
184
185 /* Copy the data from userspace and go over it the first time */
186 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
187                            union drm_amdgpu_cs *cs)
188 {
189         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
190         unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
191         struct amdgpu_vm *vm = &fpriv->vm;
192         uint64_t *chunk_array_user;
193         uint64_t *chunk_array;
194         uint32_t uf_offset = 0;
195         unsigned int size;
196         int ret;
197         int i;
198
199         chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
200                                      GFP_KERNEL);
201         if (!chunk_array)
202                 return -ENOMEM;
203
204         /* get chunks */
205         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
206         if (copy_from_user(chunk_array, chunk_array_user,
207                            sizeof(uint64_t)*cs->in.num_chunks)) {
208                 ret = -EFAULT;
209                 goto free_chunk;
210         }
211
212         p->nchunks = cs->in.num_chunks;
213         p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
214                             GFP_KERNEL);
215         if (!p->chunks) {
216                 ret = -ENOMEM;
217                 goto free_chunk;
218         }
219
220         for (i = 0; i < p->nchunks; i++) {
221                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
222                 struct drm_amdgpu_cs_chunk user_chunk;
223                 uint32_t __user *cdata;
224
225                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
226                 if (copy_from_user(&user_chunk, chunk_ptr,
227                                        sizeof(struct drm_amdgpu_cs_chunk))) {
228                         ret = -EFAULT;
229                         i--;
230                         goto free_partial_kdata;
231                 }
232                 p->chunks[i].chunk_id = user_chunk.chunk_id;
233                 p->chunks[i].length_dw = user_chunk.length_dw;
234
235                 size = p->chunks[i].length_dw;
236                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
237
238                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
239                                                     GFP_KERNEL);
240                 if (p->chunks[i].kdata == NULL) {
241                         ret = -ENOMEM;
242                         i--;
243                         goto free_partial_kdata;
244                 }
245                 size *= sizeof(uint32_t);
246                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
247                         ret = -EFAULT;
248                         goto free_partial_kdata;
249                 }
250
251                 /* Assume the worst on the following checks */
252                 ret = -EINVAL;
253                 switch (p->chunks[i].chunk_id) {
254                 case AMDGPU_CHUNK_ID_IB:
255                         if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
256                                 goto free_partial_kdata;
257
258                         ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
259                         if (ret)
260                                 goto free_partial_kdata;
261                         break;
262
263                 case AMDGPU_CHUNK_ID_FENCE:
264                         if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
265                                 goto free_partial_kdata;
266
267                         ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
268                                                       &uf_offset);
269                         if (ret)
270                                 goto free_partial_kdata;
271                         break;
272
273                 case AMDGPU_CHUNK_ID_BO_HANDLES:
274                         if (size < sizeof(struct drm_amdgpu_bo_list_in))
275                                 goto free_partial_kdata;
276
277                         ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
278                         if (ret)
279                                 goto free_partial_kdata;
280                         break;
281
282                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
283                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
284                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
285                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
286                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
287                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
288                         break;
289
290                 default:
291                         goto free_partial_kdata;
292                 }
293         }
294
295         if (!p->gang_size) {
296                 ret = -EINVAL;
297                 goto free_partial_kdata;
298         }
299
300         for (i = 0; i < p->gang_size; ++i) {
301                 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
302                                        num_ibs[i], &p->jobs[i]);
303                 if (ret)
304                         goto free_all_kdata;
305         }
306         p->gang_leader = p->jobs[p->gang_leader_idx];
307
308         if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
309                 ret = -ECANCELED;
310                 goto free_all_kdata;
311         }
312
313         if (p->uf_entry.tv.bo)
314                 p->gang_leader->uf_addr = uf_offset;
315         kvfree(chunk_array);
316
317         /* Use this opportunity to fill in task info for the vm */
318         amdgpu_vm_set_task_info(vm);
319
320         return 0;
321
322 free_all_kdata:
323         i = p->nchunks - 1;
324 free_partial_kdata:
325         for (; i >= 0; i--)
326                 kvfree(p->chunks[i].kdata);
327         kvfree(p->chunks);
328         p->chunks = NULL;
329         p->nchunks = 0;
330 free_chunk:
331         kvfree(chunk_array);
332
333         return ret;
334 }
335
336 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
337                            struct amdgpu_cs_chunk *chunk,
338                            unsigned int *ce_preempt,
339                            unsigned int *de_preempt)
340 {
341         struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
342         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
343         struct amdgpu_vm *vm = &fpriv->vm;
344         struct amdgpu_ring *ring;
345         struct amdgpu_job *job;
346         struct amdgpu_ib *ib;
347         int r;
348
349         r = amdgpu_cs_job_idx(p, chunk_ib);
350         if (r < 0)
351                 return r;
352
353         job = p->jobs[r];
354         ring = amdgpu_job_ring(job);
355         ib = &job->ibs[job->num_ibs++];
356
357         /* MM engine doesn't support user fences */
358         if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
359                 return -EINVAL;
360
361         if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
362             chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
363                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
364                         (*ce_preempt)++;
365                 else
366                         (*de_preempt)++;
367
368                 /* Each GFX command submit allows only 1 IB max
369                  * preemptible for CE & DE */
370                 if (*ce_preempt > 1 || *de_preempt > 1)
371                         return -EINVAL;
372         }
373
374         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
375                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
376
377         r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
378                            chunk_ib->ib_bytes : 0,
379                            AMDGPU_IB_POOL_DELAYED, ib);
380         if (r) {
381                 DRM_ERROR("Failed to get ib !\n");
382                 return r;
383         }
384
385         ib->gpu_addr = chunk_ib->va_start;
386         ib->length_dw = chunk_ib->ib_bytes / 4;
387         ib->flags = chunk_ib->flags;
388         return 0;
389 }
390
391 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
392                                      struct amdgpu_cs_chunk *chunk)
393 {
394         struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
395         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
396         unsigned num_deps;
397         int i, r;
398
399         num_deps = chunk->length_dw * 4 /
400                 sizeof(struct drm_amdgpu_cs_chunk_dep);
401
402         for (i = 0; i < num_deps; ++i) {
403                 struct amdgpu_ctx *ctx;
404                 struct drm_sched_entity *entity;
405                 struct dma_fence *fence;
406
407                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
408                 if (ctx == NULL)
409                         return -EINVAL;
410
411                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
412                                           deps[i].ip_instance,
413                                           deps[i].ring, &entity);
414                 if (r) {
415                         amdgpu_ctx_put(ctx);
416                         return r;
417                 }
418
419                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
420                 amdgpu_ctx_put(ctx);
421
422                 if (IS_ERR(fence))
423                         return PTR_ERR(fence);
424                 else if (!fence)
425                         continue;
426
427                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
428                         struct drm_sched_fence *s_fence;
429                         struct dma_fence *old = fence;
430
431                         s_fence = to_drm_sched_fence(fence);
432                         fence = dma_fence_get(&s_fence->scheduled);
433                         dma_fence_put(old);
434                 }
435
436                 r = amdgpu_sync_fence(&p->sync, fence);
437                 dma_fence_put(fence);
438                 if (r)
439                         return r;
440         }
441         return 0;
442 }
443
444 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
445                                          uint32_t handle, u64 point,
446                                          u64 flags)
447 {
448         struct dma_fence *fence;
449         int r;
450
451         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
452         if (r) {
453                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
454                           handle, point, r);
455                 return r;
456         }
457
458         r = amdgpu_sync_fence(&p->sync, fence);
459         dma_fence_put(fence);
460         return r;
461 }
462
463 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
464                                    struct amdgpu_cs_chunk *chunk)
465 {
466         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
467         unsigned num_deps;
468         int i, r;
469
470         num_deps = chunk->length_dw * 4 /
471                 sizeof(struct drm_amdgpu_cs_chunk_sem);
472         for (i = 0; i < num_deps; ++i) {
473                 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
474                 if (r)
475                         return r;
476         }
477
478         return 0;
479 }
480
481 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
482                                               struct amdgpu_cs_chunk *chunk)
483 {
484         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
485         unsigned num_deps;
486         int i, r;
487
488         num_deps = chunk->length_dw * 4 /
489                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
490         for (i = 0; i < num_deps; ++i) {
491                 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
492                                                   syncobj_deps[i].point,
493                                                   syncobj_deps[i].flags);
494                 if (r)
495                         return r;
496         }
497
498         return 0;
499 }
500
501 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
502                                     struct amdgpu_cs_chunk *chunk)
503 {
504         struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
505         unsigned num_deps;
506         int i;
507
508         num_deps = chunk->length_dw * 4 /
509                 sizeof(struct drm_amdgpu_cs_chunk_sem);
510
511         if (p->post_deps)
512                 return -EINVAL;
513
514         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
515                                      GFP_KERNEL);
516         p->num_post_deps = 0;
517
518         if (!p->post_deps)
519                 return -ENOMEM;
520
521
522         for (i = 0; i < num_deps; ++i) {
523                 p->post_deps[i].syncobj =
524                         drm_syncobj_find(p->filp, deps[i].handle);
525                 if (!p->post_deps[i].syncobj)
526                         return -EINVAL;
527                 p->post_deps[i].chain = NULL;
528                 p->post_deps[i].point = 0;
529                 p->num_post_deps++;
530         }
531
532         return 0;
533 }
534
535 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
536                                                 struct amdgpu_cs_chunk *chunk)
537 {
538         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
539         unsigned num_deps;
540         int i;
541
542         num_deps = chunk->length_dw * 4 /
543                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
544
545         if (p->post_deps)
546                 return -EINVAL;
547
548         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
549                                      GFP_KERNEL);
550         p->num_post_deps = 0;
551
552         if (!p->post_deps)
553                 return -ENOMEM;
554
555         for (i = 0; i < num_deps; ++i) {
556                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
557
558                 dep->chain = NULL;
559                 if (syncobj_deps[i].point) {
560                         dep->chain = dma_fence_chain_alloc();
561                         if (!dep->chain)
562                                 return -ENOMEM;
563                 }
564
565                 dep->syncobj = drm_syncobj_find(p->filp,
566                                                 syncobj_deps[i].handle);
567                 if (!dep->syncobj) {
568                         dma_fence_chain_free(dep->chain);
569                         return -EINVAL;
570                 }
571                 dep->point = syncobj_deps[i].point;
572                 p->num_post_deps++;
573         }
574
575         return 0;
576 }
577
578 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
579 {
580         unsigned int ce_preempt = 0, de_preempt = 0;
581         int i, r;
582
583         for (i = 0; i < p->nchunks; ++i) {
584                 struct amdgpu_cs_chunk *chunk;
585
586                 chunk = &p->chunks[i];
587
588                 switch (chunk->chunk_id) {
589                 case AMDGPU_CHUNK_ID_IB:
590                         r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
591                         if (r)
592                                 return r;
593                         break;
594                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
595                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
596                         r = amdgpu_cs_p2_dependencies(p, chunk);
597                         if (r)
598                                 return r;
599                         break;
600                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
601                         r = amdgpu_cs_p2_syncobj_in(p, chunk);
602                         if (r)
603                                 return r;
604                         break;
605                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
606                         r = amdgpu_cs_p2_syncobj_out(p, chunk);
607                         if (r)
608                                 return r;
609                         break;
610                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
611                         r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
612                         if (r)
613                                 return r;
614                         break;
615                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
616                         r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
617                         if (r)
618                                 return r;
619                         break;
620                 }
621         }
622
623         return 0;
624 }
625
626 /* Convert microseconds to bytes. */
627 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
628 {
629         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
630                 return 0;
631
632         /* Since accum_us is incremented by a million per second, just
633          * multiply it by the number of MB/s to get the number of bytes.
634          */
635         return us << adev->mm_stats.log2_max_MBps;
636 }
637
638 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
639 {
640         if (!adev->mm_stats.log2_max_MBps)
641                 return 0;
642
643         return bytes >> adev->mm_stats.log2_max_MBps;
644 }
645
646 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
647  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
648  * which means it can go over the threshold once. If that happens, the driver
649  * will be in debt and no other buffer migrations can be done until that debt
650  * is repaid.
651  *
652  * This approach allows moving a buffer of any size (it's important to allow
653  * that).
654  *
655  * The currency is simply time in microseconds and it increases as the clock
656  * ticks. The accumulated microseconds (us) are converted to bytes and
657  * returned.
658  */
659 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
660                                               u64 *max_bytes,
661                                               u64 *max_vis_bytes)
662 {
663         s64 time_us, increment_us;
664         u64 free_vram, total_vram, used_vram;
665         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
666          * throttling.
667          *
668          * It means that in order to get full max MBps, at least 5 IBs per
669          * second must be submitted and not more than 200ms apart from each
670          * other.
671          */
672         const s64 us_upper_bound = 200000;
673
674         if (!adev->mm_stats.log2_max_MBps) {
675                 *max_bytes = 0;
676                 *max_vis_bytes = 0;
677                 return;
678         }
679
680         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
681         used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
682         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
683
684         spin_lock(&adev->mm_stats.lock);
685
686         /* Increase the amount of accumulated us. */
687         time_us = ktime_to_us(ktime_get());
688         increment_us = time_us - adev->mm_stats.last_update_us;
689         adev->mm_stats.last_update_us = time_us;
690         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
691                                       us_upper_bound);
692
693         /* This prevents the short period of low performance when the VRAM
694          * usage is low and the driver is in debt or doesn't have enough
695          * accumulated us to fill VRAM quickly.
696          *
697          * The situation can occur in these cases:
698          * - a lot of VRAM is freed by userspace
699          * - the presence of a big buffer causes a lot of evictions
700          *   (solution: split buffers into smaller ones)
701          *
702          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
703          * accum_us to a positive number.
704          */
705         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
706                 s64 min_us;
707
708                 /* Be more aggressive on dGPUs. Try to fill a portion of free
709                  * VRAM now.
710                  */
711                 if (!(adev->flags & AMD_IS_APU))
712                         min_us = bytes_to_us(adev, free_vram / 4);
713                 else
714                         min_us = 0; /* Reset accum_us on APUs. */
715
716                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
717         }
718
719         /* This is set to 0 if the driver is in debt to disallow (optional)
720          * buffer moves.
721          */
722         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
723
724         /* Do the same for visible VRAM if half of it is free */
725         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
726                 u64 total_vis_vram = adev->gmc.visible_vram_size;
727                 u64 used_vis_vram =
728                   amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
729
730                 if (used_vis_vram < total_vis_vram) {
731                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
732                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
733                                                           increment_us, us_upper_bound);
734
735                         if (free_vis_vram >= total_vis_vram / 2)
736                                 adev->mm_stats.accum_us_vis =
737                                         max(bytes_to_us(adev, free_vis_vram / 2),
738                                             adev->mm_stats.accum_us_vis);
739                 }
740
741                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
742         } else {
743                 *max_vis_bytes = 0;
744         }
745
746         spin_unlock(&adev->mm_stats.lock);
747 }
748
749 /* Report how many bytes have really been moved for the last command
750  * submission. This can result in a debt that can stop buffer migrations
751  * temporarily.
752  */
753 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
754                                   u64 num_vis_bytes)
755 {
756         spin_lock(&adev->mm_stats.lock);
757         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
758         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
759         spin_unlock(&adev->mm_stats.lock);
760 }
761
762 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
763 {
764         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
765         struct amdgpu_cs_parser *p = param;
766         struct ttm_operation_ctx ctx = {
767                 .interruptible = true,
768                 .no_wait_gpu = false,
769                 .resv = bo->tbo.base.resv
770         };
771         uint32_t domain;
772         int r;
773
774         if (bo->tbo.pin_count)
775                 return 0;
776
777         /* Don't move this buffer if we have depleted our allowance
778          * to move it. Don't move anything if the threshold is zero.
779          */
780         if (p->bytes_moved < p->bytes_moved_threshold &&
781             (!bo->tbo.base.dma_buf ||
782             list_empty(&bo->tbo.base.dma_buf->attachments))) {
783                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
784                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
785                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
786                          * visible VRAM if we've depleted our allowance to do
787                          * that.
788                          */
789                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
790                                 domain = bo->preferred_domains;
791                         else
792                                 domain = bo->allowed_domains;
793                 } else {
794                         domain = bo->preferred_domains;
795                 }
796         } else {
797                 domain = bo->allowed_domains;
798         }
799
800 retry:
801         amdgpu_bo_placement_from_domain(bo, domain);
802         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
803
804         p->bytes_moved += ctx.bytes_moved;
805         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
806             amdgpu_bo_in_cpu_visible_vram(bo))
807                 p->bytes_moved_vis += ctx.bytes_moved;
808
809         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
810                 domain = bo->allowed_domains;
811                 goto retry;
812         }
813
814         return r;
815 }
816
817 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
818                             struct list_head *validated)
819 {
820         struct ttm_operation_ctx ctx = { true, false };
821         struct amdgpu_bo_list_entry *lobj;
822         int r;
823
824         list_for_each_entry(lobj, validated, tv.head) {
825                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
826                 struct mm_struct *usermm;
827
828                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
829                 if (usermm && usermm != current->mm)
830                         return -EPERM;
831
832                 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
833                     lobj->user_invalidated && lobj->user_pages) {
834                         amdgpu_bo_placement_from_domain(bo,
835                                                         AMDGPU_GEM_DOMAIN_CPU);
836                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
837                         if (r)
838                                 return r;
839
840                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
841                                                      lobj->user_pages);
842                 }
843
844                 r = amdgpu_cs_bo_validate(p, bo);
845                 if (r)
846                         return r;
847
848                 kvfree(lobj->user_pages);
849                 lobj->user_pages = NULL;
850         }
851         return 0;
852 }
853
854 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
855                                 union drm_amdgpu_cs *cs)
856 {
857         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
858         struct amdgpu_vm *vm = &fpriv->vm;
859         struct amdgpu_bo_list_entry *e;
860         struct list_head duplicates;
861         unsigned int i;
862         int r;
863
864         INIT_LIST_HEAD(&p->validated);
865
866         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
867         if (cs->in.bo_list_handle) {
868                 if (p->bo_list)
869                         return -EINVAL;
870
871                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
872                                        &p->bo_list);
873                 if (r)
874                         return r;
875         } else if (!p->bo_list) {
876                 /* Create a empty bo_list when no handle is provided */
877                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
878                                           &p->bo_list);
879                 if (r)
880                         return r;
881         }
882
883         mutex_lock(&p->bo_list->bo_list_mutex);
884
885         /* One for TTM and one for the CS job */
886         amdgpu_bo_list_for_each_entry(e, p->bo_list)
887                 e->tv.num_shared = 2;
888
889         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
890
891         INIT_LIST_HEAD(&duplicates);
892         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
893
894         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
895                 list_add(&p->uf_entry.tv.head, &p->validated);
896
897         /* Get userptr backing pages. If pages are updated after registered
898          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
899          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
900          */
901         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
902                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
903                 bool userpage_invalidated = false;
904                 int i;
905
906                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
907                                         sizeof(struct page *),
908                                         GFP_KERNEL | __GFP_ZERO);
909                 if (!e->user_pages) {
910                         DRM_ERROR("kvmalloc_array failure\n");
911                         r = -ENOMEM;
912                         goto out_free_user_pages;
913                 }
914
915                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
916                 if (r) {
917                         kvfree(e->user_pages);
918                         e->user_pages = NULL;
919                         goto out_free_user_pages;
920                 }
921
922                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
923                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
924                                 userpage_invalidated = true;
925                                 break;
926                         }
927                 }
928                 e->user_invalidated = userpage_invalidated;
929         }
930
931         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
932                                    &duplicates);
933         if (unlikely(r != 0)) {
934                 if (r != -ERESTARTSYS)
935                         DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
936                 goto out_free_user_pages;
937         }
938
939         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
940                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
941
942                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
943         }
944
945         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
946                                           &p->bytes_moved_vis_threshold);
947         p->bytes_moved = 0;
948         p->bytes_moved_vis = 0;
949
950         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
951                                       amdgpu_cs_bo_validate, p);
952         if (r) {
953                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
954                 goto error_validate;
955         }
956
957         r = amdgpu_cs_list_validate(p, &duplicates);
958         if (r)
959                 goto error_validate;
960
961         r = amdgpu_cs_list_validate(p, &p->validated);
962         if (r)
963                 goto error_validate;
964
965         if (p->uf_entry.tv.bo) {
966                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
967
968                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
969                 if (r)
970                         goto error_validate;
971
972                 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
973         }
974
975         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
976                                      p->bytes_moved_vis);
977
978         for (i = 0; i < p->gang_size; ++i)
979                 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
980                                          p->bo_list->gws_obj,
981                                          p->bo_list->oa_obj);
982         return 0;
983
984 error_validate:
985         ttm_eu_backoff_reservation(&p->ticket, &p->validated);
986
987 out_free_user_pages:
988         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
989                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
990
991                 if (!e->user_pages)
992                         continue;
993                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
994                 kvfree(e->user_pages);
995                 e->user_pages = NULL;
996                 e->range = NULL;
997         }
998         mutex_unlock(&p->bo_list->bo_list_mutex);
999         return r;
1000 }
1001
1002 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1003 {
1004         int i, j;
1005
1006         if (!trace_amdgpu_cs_enabled())
1007                 return;
1008
1009         for (i = 0; i < p->gang_size; ++i) {
1010                 struct amdgpu_job *job = p->jobs[i];
1011
1012                 for (j = 0; j < job->num_ibs; ++j)
1013                         trace_amdgpu_cs(p, job, &job->ibs[j]);
1014         }
1015 }
1016
1017 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1018                                struct amdgpu_job *job)
1019 {
1020         struct amdgpu_ring *ring = amdgpu_job_ring(job);
1021         unsigned int i;
1022         int r;
1023
1024         /* Only for UVD/VCE VM emulation */
1025         if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1026                 return 0;
1027
1028         for (i = 0; i < job->num_ibs; ++i) {
1029                 struct amdgpu_ib *ib = &job->ibs[i];
1030                 struct amdgpu_bo_va_mapping *m;
1031                 struct amdgpu_bo *aobj;
1032                 uint64_t va_start;
1033                 uint8_t *kptr;
1034
1035                 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1036                 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1037                 if (r) {
1038                         DRM_ERROR("IB va_start is invalid\n");
1039                         return r;
1040                 }
1041
1042                 if ((va_start + ib->length_dw * 4) >
1043                     (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1044                         DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1045                         return -EINVAL;
1046                 }
1047
1048                 /* the IB should be reserved at this point */
1049                 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1050                 if (r) {
1051                         return r;
1052                 }
1053
1054                 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1055
1056                 if (ring->funcs->parse_cs) {
1057                         memcpy(ib->ptr, kptr, ib->length_dw * 4);
1058                         amdgpu_bo_kunmap(aobj);
1059
1060                         r = amdgpu_ring_parse_cs(ring, p, job, ib);
1061                         if (r)
1062                                 return r;
1063                 } else {
1064                         ib->ptr = (uint32_t *)kptr;
1065                         r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1066                         amdgpu_bo_kunmap(aobj);
1067                         if (r)
1068                                 return r;
1069                 }
1070         }
1071
1072         return 0;
1073 }
1074
1075 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1076 {
1077         unsigned int i;
1078         int r;
1079
1080         for (i = 0; i < p->gang_size; ++i) {
1081                 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1082                 if (r)
1083                         return r;
1084         }
1085         return 0;
1086 }
1087
1088 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1089 {
1090         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1091         struct amdgpu_job *job = p->gang_leader;
1092         struct amdgpu_device *adev = p->adev;
1093         struct amdgpu_vm *vm = &fpriv->vm;
1094         struct amdgpu_bo_list_entry *e;
1095         struct amdgpu_bo_va *bo_va;
1096         struct amdgpu_bo *bo;
1097         unsigned int i;
1098         int r;
1099
1100         r = amdgpu_vm_clear_freed(adev, vm, NULL);
1101         if (r)
1102                 return r;
1103
1104         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1105         if (r)
1106                 return r;
1107
1108         r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1109         if (r)
1110                 return r;
1111
1112         if (fpriv->csa_va) {
1113                 bo_va = fpriv->csa_va;
1114                 BUG_ON(!bo_va);
1115                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1116                 if (r)
1117                         return r;
1118
1119                 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1120                 if (r)
1121                         return r;
1122         }
1123
1124         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1125                 /* ignore duplicates */
1126                 bo = ttm_to_amdgpu_bo(e->tv.bo);
1127                 if (!bo)
1128                         continue;
1129
1130                 bo_va = e->bo_va;
1131                 if (bo_va == NULL)
1132                         continue;
1133
1134                 r = amdgpu_vm_bo_update(adev, bo_va, false);
1135                 if (r)
1136                         return r;
1137
1138                 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1139                 if (r)
1140                         return r;
1141         }
1142
1143         r = amdgpu_vm_handle_moved(adev, vm);
1144         if (r)
1145                 return r;
1146
1147         r = amdgpu_vm_update_pdes(adev, vm, false);
1148         if (r)
1149                 return r;
1150
1151         r = amdgpu_sync_fence(&p->sync, vm->last_update);
1152         if (r)
1153                 return r;
1154
1155         for (i = 0; i < p->gang_size; ++i) {
1156                 job = p->jobs[i];
1157
1158                 if (!job->vm)
1159                         continue;
1160
1161                 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1162         }
1163
1164         if (amdgpu_vm_debug) {
1165                 /* Invalidate all BOs to test for userspace bugs */
1166                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1167                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1168
1169                         /* ignore duplicates */
1170                         if (!bo)
1171                                 continue;
1172
1173                         amdgpu_vm_bo_invalidate(adev, bo, false);
1174                 }
1175         }
1176
1177         return 0;
1178 }
1179
1180 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1181 {
1182         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1183         struct drm_gpu_scheduler *sched;
1184         struct amdgpu_bo_list_entry *e;
1185         struct dma_fence *fence;
1186         unsigned int i;
1187         int r;
1188
1189         r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1190         if (r) {
1191                 if (r != -ERESTARTSYS)
1192                         DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1193                 return r;
1194         }
1195
1196         list_for_each_entry(e, &p->validated, tv.head) {
1197                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1198                 struct dma_resv *resv = bo->tbo.base.resv;
1199                 enum amdgpu_sync_mode sync_mode;
1200
1201                 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1202                         AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1203                 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1204                                      &fpriv->vm);
1205                 if (r)
1206                         return r;
1207         }
1208
1209         for (i = 0; i < p->gang_size; ++i) {
1210                 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1211                 if (r)
1212                         return r;
1213         }
1214
1215         sched = p->gang_leader->base.entity->rq->sched;
1216         while ((fence = amdgpu_sync_get_fence(&p->sync))) {
1217                 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1218
1219                 /*
1220                  * When we have an dependency it might be necessary to insert a
1221                  * pipeline sync to make sure that all caches etc are flushed and the
1222                  * next job actually sees the results from the previous one
1223                  * before we start executing on the same scheduler ring.
1224                  */
1225                 if (!s_fence || s_fence->sched != sched) {
1226                         dma_fence_put(fence);
1227                         continue;
1228                 }
1229
1230                 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
1231                 dma_fence_put(fence);
1232                 if (r)
1233                         return r;
1234         }
1235         return 0;
1236 }
1237
1238 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1239 {
1240         int i;
1241
1242         for (i = 0; i < p->num_post_deps; ++i) {
1243                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1244                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1245                                               p->post_deps[i].chain,
1246                                               p->fence, p->post_deps[i].point);
1247                         p->post_deps[i].chain = NULL;
1248                 } else {
1249                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1250                                                   p->fence);
1251                 }
1252         }
1253 }
1254
1255 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1256                             union drm_amdgpu_cs *cs)
1257 {
1258         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1259         struct amdgpu_job *leader = p->gang_leader;
1260         struct amdgpu_bo_list_entry *e;
1261         unsigned int i;
1262         uint64_t seq;
1263         int r;
1264
1265         for (i = 0; i < p->gang_size; ++i)
1266                 drm_sched_job_arm(&p->jobs[i]->base);
1267
1268         for (i = 0; i < p->gang_size; ++i) {
1269                 struct dma_fence *fence;
1270
1271                 if (p->jobs[i] == leader)
1272                         continue;
1273
1274                 fence = &p->jobs[i]->base.s_fence->scheduled;
1275                 dma_fence_get(fence);
1276                 r = drm_sched_job_add_dependency(&leader->base, fence);
1277                 if (r) {
1278                         dma_fence_put(fence);
1279                         return r;
1280                 }
1281         }
1282
1283         if (p->gang_size > 1) {
1284                 for (i = 0; i < p->gang_size; ++i)
1285                         amdgpu_job_set_gang_leader(p->jobs[i], leader);
1286         }
1287
1288         /* No memory allocation is allowed while holding the notifier lock.
1289          * The lock is held until amdgpu_cs_submit is finished and fence is
1290          * added to BOs.
1291          */
1292         mutex_lock(&p->adev->notifier_lock);
1293
1294         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1295          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1296          */
1297         r = 0;
1298         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1299                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1300
1301                 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1302                 e->range = NULL;
1303         }
1304         if (r) {
1305                 r = -EAGAIN;
1306                 mutex_unlock(&p->adev->notifier_lock);
1307                 return r;
1308         }
1309
1310         p->fence = dma_fence_get(&leader->base.s_fence->finished);
1311         list_for_each_entry(e, &p->validated, tv.head) {
1312
1313                 /* Everybody except for the gang leader uses READ */
1314                 for (i = 0; i < p->gang_size; ++i) {
1315                         if (p->jobs[i] == leader)
1316                                 continue;
1317
1318                         dma_resv_add_fence(e->tv.bo->base.resv,
1319                                            &p->jobs[i]->base.s_fence->finished,
1320                                            DMA_RESV_USAGE_READ);
1321                 }
1322
1323                 /* The gang leader is remembered as writer */
1324                 e->tv.num_shared = 0;
1325         }
1326
1327         seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1328                                    p->fence);
1329         amdgpu_cs_post_dependencies(p);
1330
1331         if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1332             !p->ctx->preamble_presented) {
1333                 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1334                 p->ctx->preamble_presented = true;
1335         }
1336
1337         cs->out.handle = seq;
1338         leader->uf_sequence = seq;
1339
1340         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1341         for (i = 0; i < p->gang_size; ++i) {
1342                 amdgpu_job_free_resources(p->jobs[i]);
1343                 trace_amdgpu_cs_ioctl(p->jobs[i]);
1344                 drm_sched_entity_push_job(&p->jobs[i]->base);
1345                 p->jobs[i] = NULL;
1346         }
1347
1348         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1349         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1350
1351         mutex_unlock(&p->adev->notifier_lock);
1352         mutex_unlock(&p->bo_list->bo_list_mutex);
1353         return 0;
1354 }
1355
1356 /* Cleanup the parser structure */
1357 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1358 {
1359         unsigned i;
1360
1361         amdgpu_sync_free(&parser->sync);
1362         for (i = 0; i < parser->num_post_deps; i++) {
1363                 drm_syncobj_put(parser->post_deps[i].syncobj);
1364                 kfree(parser->post_deps[i].chain);
1365         }
1366         kfree(parser->post_deps);
1367
1368         dma_fence_put(parser->fence);
1369
1370         if (parser->ctx)
1371                 amdgpu_ctx_put(parser->ctx);
1372         if (parser->bo_list)
1373                 amdgpu_bo_list_put(parser->bo_list);
1374
1375         for (i = 0; i < parser->nchunks; i++)
1376                 kvfree(parser->chunks[i].kdata);
1377         kvfree(parser->chunks);
1378         for (i = 0; i < parser->gang_size; ++i) {
1379                 if (parser->jobs[i])
1380                         amdgpu_job_free(parser->jobs[i]);
1381         }
1382         if (parser->uf_entry.tv.bo) {
1383                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1384
1385                 amdgpu_bo_unref(&uf);
1386         }
1387 }
1388
1389 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1390 {
1391         struct amdgpu_device *adev = drm_to_adev(dev);
1392         struct amdgpu_cs_parser parser;
1393         int r;
1394
1395         if (amdgpu_ras_intr_triggered())
1396                 return -EHWPOISON;
1397
1398         if (!adev->accel_working)
1399                 return -EBUSY;
1400
1401         r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1402         if (r) {
1403                 if (printk_ratelimit())
1404                         DRM_ERROR("Failed to initialize parser %d!\n", r);
1405                 return r;
1406         }
1407
1408         r = amdgpu_cs_pass1(&parser, data);
1409         if (r)
1410                 goto error_fini;
1411
1412         r = amdgpu_cs_pass2(&parser);
1413         if (r)
1414                 goto error_fini;
1415
1416         r = amdgpu_cs_parser_bos(&parser, data);
1417         if (r) {
1418                 if (r == -ENOMEM)
1419                         DRM_ERROR("Not enough memory for command submission!\n");
1420                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1421                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1422                 goto error_fini;
1423         }
1424
1425         r = amdgpu_cs_patch_jobs(&parser);
1426         if (r)
1427                 goto error_backoff;
1428
1429         r = amdgpu_cs_vm_handling(&parser);
1430         if (r)
1431                 goto error_backoff;
1432
1433         r = amdgpu_cs_sync_rings(&parser);
1434         if (r)
1435                 goto error_backoff;
1436
1437         trace_amdgpu_cs_ibs(&parser);
1438
1439         r = amdgpu_cs_submit(&parser, data);
1440         if (r)
1441                 goto error_backoff;
1442
1443         amdgpu_cs_parser_fini(&parser);
1444         return 0;
1445
1446 error_backoff:
1447         ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1448         mutex_unlock(&parser.bo_list->bo_list_mutex);
1449
1450 error_fini:
1451         amdgpu_cs_parser_fini(&parser);
1452         return r;
1453 }
1454
1455 /**
1456  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1457  *
1458  * @dev: drm device
1459  * @data: data from userspace
1460  * @filp: file private
1461  *
1462  * Wait for the command submission identified by handle to finish.
1463  */
1464 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1465                          struct drm_file *filp)
1466 {
1467         union drm_amdgpu_wait_cs *wait = data;
1468         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1469         struct drm_sched_entity *entity;
1470         struct amdgpu_ctx *ctx;
1471         struct dma_fence *fence;
1472         long r;
1473
1474         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1475         if (ctx == NULL)
1476                 return -EINVAL;
1477
1478         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1479                                   wait->in.ring, &entity);
1480         if (r) {
1481                 amdgpu_ctx_put(ctx);
1482                 return r;
1483         }
1484
1485         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1486         if (IS_ERR(fence))
1487                 r = PTR_ERR(fence);
1488         else if (fence) {
1489                 r = dma_fence_wait_timeout(fence, true, timeout);
1490                 if (r > 0 && fence->error)
1491                         r = fence->error;
1492                 dma_fence_put(fence);
1493         } else
1494                 r = 1;
1495
1496         amdgpu_ctx_put(ctx);
1497         if (r < 0)
1498                 return r;
1499
1500         memset(wait, 0, sizeof(*wait));
1501         wait->out.status = (r == 0);
1502
1503         return 0;
1504 }
1505
1506 /**
1507  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1508  *
1509  * @adev: amdgpu device
1510  * @filp: file private
1511  * @user: drm_amdgpu_fence copied from user space
1512  */
1513 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1514                                              struct drm_file *filp,
1515                                              struct drm_amdgpu_fence *user)
1516 {
1517         struct drm_sched_entity *entity;
1518         struct amdgpu_ctx *ctx;
1519         struct dma_fence *fence;
1520         int r;
1521
1522         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1523         if (ctx == NULL)
1524                 return ERR_PTR(-EINVAL);
1525
1526         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1527                                   user->ring, &entity);
1528         if (r) {
1529                 amdgpu_ctx_put(ctx);
1530                 return ERR_PTR(r);
1531         }
1532
1533         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1534         amdgpu_ctx_put(ctx);
1535
1536         return fence;
1537 }
1538
1539 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1540                                     struct drm_file *filp)
1541 {
1542         struct amdgpu_device *adev = drm_to_adev(dev);
1543         union drm_amdgpu_fence_to_handle *info = data;
1544         struct dma_fence *fence;
1545         struct drm_syncobj *syncobj;
1546         struct sync_file *sync_file;
1547         int fd, r;
1548
1549         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1550         if (IS_ERR(fence))
1551                 return PTR_ERR(fence);
1552
1553         if (!fence)
1554                 fence = dma_fence_get_stub();
1555
1556         switch (info->in.what) {
1557         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1558                 r = drm_syncobj_create(&syncobj, 0, fence);
1559                 dma_fence_put(fence);
1560                 if (r)
1561                         return r;
1562                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1563                 drm_syncobj_put(syncobj);
1564                 return r;
1565
1566         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1567                 r = drm_syncobj_create(&syncobj, 0, fence);
1568                 dma_fence_put(fence);
1569                 if (r)
1570                         return r;
1571                 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1572                 drm_syncobj_put(syncobj);
1573                 return r;
1574
1575         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1576                 fd = get_unused_fd_flags(O_CLOEXEC);
1577                 if (fd < 0) {
1578                         dma_fence_put(fence);
1579                         return fd;
1580                 }
1581
1582                 sync_file = sync_file_create(fence);
1583                 dma_fence_put(fence);
1584                 if (!sync_file) {
1585                         put_unused_fd(fd);
1586                         return -ENOMEM;
1587                 }
1588
1589                 fd_install(fd, sync_file->file);
1590                 info->out.handle = fd;
1591                 return 0;
1592
1593         default:
1594                 dma_fence_put(fence);
1595                 return -EINVAL;
1596         }
1597 }
1598
1599 /**
1600  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1601  *
1602  * @adev: amdgpu device
1603  * @filp: file private
1604  * @wait: wait parameters
1605  * @fences: array of drm_amdgpu_fence
1606  */
1607 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1608                                      struct drm_file *filp,
1609                                      union drm_amdgpu_wait_fences *wait,
1610                                      struct drm_amdgpu_fence *fences)
1611 {
1612         uint32_t fence_count = wait->in.fence_count;
1613         unsigned int i;
1614         long r = 1;
1615
1616         for (i = 0; i < fence_count; i++) {
1617                 struct dma_fence *fence;
1618                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1619
1620                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1621                 if (IS_ERR(fence))
1622                         return PTR_ERR(fence);
1623                 else if (!fence)
1624                         continue;
1625
1626                 r = dma_fence_wait_timeout(fence, true, timeout);
1627                 dma_fence_put(fence);
1628                 if (r < 0)
1629                         return r;
1630
1631                 if (r == 0)
1632                         break;
1633
1634                 if (fence->error)
1635                         return fence->error;
1636         }
1637
1638         memset(wait, 0, sizeof(*wait));
1639         wait->out.status = (r > 0);
1640
1641         return 0;
1642 }
1643
1644 /**
1645  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1646  *
1647  * @adev: amdgpu device
1648  * @filp: file private
1649  * @wait: wait parameters
1650  * @fences: array of drm_amdgpu_fence
1651  */
1652 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1653                                     struct drm_file *filp,
1654                                     union drm_amdgpu_wait_fences *wait,
1655                                     struct drm_amdgpu_fence *fences)
1656 {
1657         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1658         uint32_t fence_count = wait->in.fence_count;
1659         uint32_t first = ~0;
1660         struct dma_fence **array;
1661         unsigned int i;
1662         long r;
1663
1664         /* Prepare the fence array */
1665         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1666
1667         if (array == NULL)
1668                 return -ENOMEM;
1669
1670         for (i = 0; i < fence_count; i++) {
1671                 struct dma_fence *fence;
1672
1673                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1674                 if (IS_ERR(fence)) {
1675                         r = PTR_ERR(fence);
1676                         goto err_free_fence_array;
1677                 } else if (fence) {
1678                         array[i] = fence;
1679                 } else { /* NULL, the fence has been already signaled */
1680                         r = 1;
1681                         first = i;
1682                         goto out;
1683                 }
1684         }
1685
1686         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1687                                        &first);
1688         if (r < 0)
1689                 goto err_free_fence_array;
1690
1691 out:
1692         memset(wait, 0, sizeof(*wait));
1693         wait->out.status = (r > 0);
1694         wait->out.first_signaled = first;
1695
1696         if (first < fence_count && array[first])
1697                 r = array[first]->error;
1698         else
1699                 r = 0;
1700
1701 err_free_fence_array:
1702         for (i = 0; i < fence_count; i++)
1703                 dma_fence_put(array[i]);
1704         kfree(array);
1705
1706         return r;
1707 }
1708
1709 /**
1710  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1711  *
1712  * @dev: drm device
1713  * @data: data from userspace
1714  * @filp: file private
1715  */
1716 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1717                                 struct drm_file *filp)
1718 {
1719         struct amdgpu_device *adev = drm_to_adev(dev);
1720         union drm_amdgpu_wait_fences *wait = data;
1721         uint32_t fence_count = wait->in.fence_count;
1722         struct drm_amdgpu_fence *fences_user;
1723         struct drm_amdgpu_fence *fences;
1724         int r;
1725
1726         /* Get the fences from userspace */
1727         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1728                         GFP_KERNEL);
1729         if (fences == NULL)
1730                 return -ENOMEM;
1731
1732         fences_user = u64_to_user_ptr(wait->in.fences);
1733         if (copy_from_user(fences, fences_user,
1734                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1735                 r = -EFAULT;
1736                 goto err_free_fences;
1737         }
1738
1739         if (wait->in.wait_all)
1740                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1741         else
1742                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1743
1744 err_free_fences:
1745         kfree(fences);
1746
1747         return r;
1748 }
1749
1750 /**
1751  * amdgpu_cs_find_mapping - find bo_va for VM address
1752  *
1753  * @parser: command submission parser context
1754  * @addr: VM address
1755  * @bo: resulting BO of the mapping found
1756  * @map: Placeholder to return found BO mapping
1757  *
1758  * Search the buffer objects in the command submission context for a certain
1759  * virtual memory address. Returns allocation structure when found, NULL
1760  * otherwise.
1761  */
1762 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1763                            uint64_t addr, struct amdgpu_bo **bo,
1764                            struct amdgpu_bo_va_mapping **map)
1765 {
1766         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1767         struct ttm_operation_ctx ctx = { false, false };
1768         struct amdgpu_vm *vm = &fpriv->vm;
1769         struct amdgpu_bo_va_mapping *mapping;
1770         int r;
1771
1772         addr /= AMDGPU_GPU_PAGE_SIZE;
1773
1774         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1775         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1776                 return -EINVAL;
1777
1778         *bo = mapping->bo_va->base.bo;
1779         *map = mapping;
1780
1781         /* Double check that the BO is reserved by this CS */
1782         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1783                 return -EINVAL;
1784
1785         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1786                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1787                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1788                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1789                 if (r)
1790                         return r;
1791         }
1792
1793         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1794 }
This page took 0.14374 seconds and 4 git commands to generate.