]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Merge tag 'v5.2-rc1' into spi-5.3
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <[email protected]>
26  */
27 #include <linux/pagemap.h>
28 #include <linux/sync_file.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_syncobj.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 #include "amdgpu_gmc.h"
35 #include "amdgpu_gem.h"
36
37 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
38                                       struct drm_amdgpu_cs_chunk_fence *data,
39                                       uint32_t *offset)
40 {
41         struct drm_gem_object *gobj;
42         struct amdgpu_bo *bo;
43         unsigned long size;
44         int r;
45
46         gobj = drm_gem_object_lookup(p->filp, data->handle);
47         if (gobj == NULL)
48                 return -EINVAL;
49
50         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
51         p->uf_entry.priority = 0;
52         p->uf_entry.tv.bo = &bo->tbo;
53         /* One for TTM and one for the CS job */
54         p->uf_entry.tv.num_shared = 2;
55         p->uf_entry.user_pages = NULL;
56
57         drm_gem_object_put_unlocked(gobj);
58
59         size = amdgpu_bo_size(bo);
60         if (size != PAGE_SIZE || (data->offset + 8) > size) {
61                 r = -EINVAL;
62                 goto error_unref;
63         }
64
65         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
66                 r = -EINVAL;
67                 goto error_unref;
68         }
69
70         *offset = data->offset;
71
72         return 0;
73
74 error_unref:
75         amdgpu_bo_unref(&bo);
76         return r;
77 }
78
79 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
80                                       struct drm_amdgpu_bo_list_in *data)
81 {
82         int r;
83         struct drm_amdgpu_bo_list_entry *info = NULL;
84
85         r = amdgpu_bo_create_list_entry_array(data, &info);
86         if (r)
87                 return r;
88
89         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
90                                   &p->bo_list);
91         if (r)
92                 goto error_free;
93
94         kvfree(info);
95         return 0;
96
97 error_free:
98         if (info)
99                 kvfree(info);
100
101         return r;
102 }
103
104 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
105 {
106         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
107         struct amdgpu_vm *vm = &fpriv->vm;
108         uint64_t *chunk_array_user;
109         uint64_t *chunk_array;
110         unsigned size, num_ibs = 0;
111         uint32_t uf_offset = 0;
112         int i;
113         int ret;
114
115         if (cs->in.num_chunks == 0)
116                 return 0;
117
118         chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
119         if (!chunk_array)
120                 return -ENOMEM;
121
122         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
123         if (!p->ctx) {
124                 ret = -EINVAL;
125                 goto free_chunk;
126         }
127
128         mutex_lock(&p->ctx->lock);
129
130         /* skip guilty context job */
131         if (atomic_read(&p->ctx->guilty) == 1) {
132                 ret = -ECANCELED;
133                 goto free_chunk;
134         }
135
136         /* get chunks */
137         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
138         if (copy_from_user(chunk_array, chunk_array_user,
139                            sizeof(uint64_t)*cs->in.num_chunks)) {
140                 ret = -EFAULT;
141                 goto free_chunk;
142         }
143
144         p->nchunks = cs->in.num_chunks;
145         p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
146                             GFP_KERNEL);
147         if (!p->chunks) {
148                 ret = -ENOMEM;
149                 goto free_chunk;
150         }
151
152         for (i = 0; i < p->nchunks; i++) {
153                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
154                 struct drm_amdgpu_cs_chunk user_chunk;
155                 uint32_t __user *cdata;
156
157                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
158                 if (copy_from_user(&user_chunk, chunk_ptr,
159                                        sizeof(struct drm_amdgpu_cs_chunk))) {
160                         ret = -EFAULT;
161                         i--;
162                         goto free_partial_kdata;
163                 }
164                 p->chunks[i].chunk_id = user_chunk.chunk_id;
165                 p->chunks[i].length_dw = user_chunk.length_dw;
166
167                 size = p->chunks[i].length_dw;
168                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
169
170                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
171                 if (p->chunks[i].kdata == NULL) {
172                         ret = -ENOMEM;
173                         i--;
174                         goto free_partial_kdata;
175                 }
176                 size *= sizeof(uint32_t);
177                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
178                         ret = -EFAULT;
179                         goto free_partial_kdata;
180                 }
181
182                 switch (p->chunks[i].chunk_id) {
183                 case AMDGPU_CHUNK_ID_IB:
184                         ++num_ibs;
185                         break;
186
187                 case AMDGPU_CHUNK_ID_FENCE:
188                         size = sizeof(struct drm_amdgpu_cs_chunk_fence);
189                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
190                                 ret = -EINVAL;
191                                 goto free_partial_kdata;
192                         }
193
194                         ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
195                                                          &uf_offset);
196                         if (ret)
197                                 goto free_partial_kdata;
198
199                         break;
200
201                 case AMDGPU_CHUNK_ID_BO_HANDLES:
202                         size = sizeof(struct drm_amdgpu_bo_list_in);
203                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
204                                 ret = -EINVAL;
205                                 goto free_partial_kdata;
206                         }
207
208                         ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
209                         if (ret)
210                                 goto free_partial_kdata;
211
212                         break;
213
214                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
215                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
216                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
217                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
218                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
219                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
220                         break;
221
222                 default:
223                         ret = -EINVAL;
224                         goto free_partial_kdata;
225                 }
226         }
227
228         ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
229         if (ret)
230                 goto free_all_kdata;
231
232         if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
233                 ret = -ECANCELED;
234                 goto free_all_kdata;
235         }
236
237         if (p->uf_entry.tv.bo)
238                 p->job->uf_addr = uf_offset;
239         kfree(chunk_array);
240
241         /* Use this opportunity to fill in task info for the vm */
242         amdgpu_vm_set_task_info(vm);
243
244         return 0;
245
246 free_all_kdata:
247         i = p->nchunks - 1;
248 free_partial_kdata:
249         for (; i >= 0; i--)
250                 kvfree(p->chunks[i].kdata);
251         kfree(p->chunks);
252         p->chunks = NULL;
253         p->nchunks = 0;
254 free_chunk:
255         kfree(chunk_array);
256
257         return ret;
258 }
259
260 /* Convert microseconds to bytes. */
261 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
262 {
263         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
264                 return 0;
265
266         /* Since accum_us is incremented by a million per second, just
267          * multiply it by the number of MB/s to get the number of bytes.
268          */
269         return us << adev->mm_stats.log2_max_MBps;
270 }
271
272 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
273 {
274         if (!adev->mm_stats.log2_max_MBps)
275                 return 0;
276
277         return bytes >> adev->mm_stats.log2_max_MBps;
278 }
279
280 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
281  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
282  * which means it can go over the threshold once. If that happens, the driver
283  * will be in debt and no other buffer migrations can be done until that debt
284  * is repaid.
285  *
286  * This approach allows moving a buffer of any size (it's important to allow
287  * that).
288  *
289  * The currency is simply time in microseconds and it increases as the clock
290  * ticks. The accumulated microseconds (us) are converted to bytes and
291  * returned.
292  */
293 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
294                                               u64 *max_bytes,
295                                               u64 *max_vis_bytes)
296 {
297         s64 time_us, increment_us;
298         u64 free_vram, total_vram, used_vram;
299
300         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
301          * throttling.
302          *
303          * It means that in order to get full max MBps, at least 5 IBs per
304          * second must be submitted and not more than 200ms apart from each
305          * other.
306          */
307         const s64 us_upper_bound = 200000;
308
309         if (!adev->mm_stats.log2_max_MBps) {
310                 *max_bytes = 0;
311                 *max_vis_bytes = 0;
312                 return;
313         }
314
315         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
316         used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
317         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
318
319         spin_lock(&adev->mm_stats.lock);
320
321         /* Increase the amount of accumulated us. */
322         time_us = ktime_to_us(ktime_get());
323         increment_us = time_us - adev->mm_stats.last_update_us;
324         adev->mm_stats.last_update_us = time_us;
325         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
326                                       us_upper_bound);
327
328         /* This prevents the short period of low performance when the VRAM
329          * usage is low and the driver is in debt or doesn't have enough
330          * accumulated us to fill VRAM quickly.
331          *
332          * The situation can occur in these cases:
333          * - a lot of VRAM is freed by userspace
334          * - the presence of a big buffer causes a lot of evictions
335          *   (solution: split buffers into smaller ones)
336          *
337          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
338          * accum_us to a positive number.
339          */
340         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
341                 s64 min_us;
342
343                 /* Be more aggresive on dGPUs. Try to fill a portion of free
344                  * VRAM now.
345                  */
346                 if (!(adev->flags & AMD_IS_APU))
347                         min_us = bytes_to_us(adev, free_vram / 4);
348                 else
349                         min_us = 0; /* Reset accum_us on APUs. */
350
351                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
352         }
353
354         /* This is set to 0 if the driver is in debt to disallow (optional)
355          * buffer moves.
356          */
357         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
358
359         /* Do the same for visible VRAM if half of it is free */
360         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
361                 u64 total_vis_vram = adev->gmc.visible_vram_size;
362                 u64 used_vis_vram =
363                         amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
364
365                 if (used_vis_vram < total_vis_vram) {
366                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
367                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
368                                                           increment_us, us_upper_bound);
369
370                         if (free_vis_vram >= total_vis_vram / 2)
371                                 adev->mm_stats.accum_us_vis =
372                                         max(bytes_to_us(adev, free_vis_vram / 2),
373                                             adev->mm_stats.accum_us_vis);
374                 }
375
376                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
377         } else {
378                 *max_vis_bytes = 0;
379         }
380
381         spin_unlock(&adev->mm_stats.lock);
382 }
383
384 /* Report how many bytes have really been moved for the last command
385  * submission. This can result in a debt that can stop buffer migrations
386  * temporarily.
387  */
388 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
389                                   u64 num_vis_bytes)
390 {
391         spin_lock(&adev->mm_stats.lock);
392         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
393         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
394         spin_unlock(&adev->mm_stats.lock);
395 }
396
397 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
398                                  struct amdgpu_bo *bo)
399 {
400         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
401         struct ttm_operation_ctx ctx = {
402                 .interruptible = true,
403                 .no_wait_gpu = false,
404                 .resv = bo->tbo.resv,
405                 .flags = 0
406         };
407         uint32_t domain;
408         int r;
409
410         if (bo->pin_count)
411                 return 0;
412
413         /* Don't move this buffer if we have depleted our allowance
414          * to move it. Don't move anything if the threshold is zero.
415          */
416         if (p->bytes_moved < p->bytes_moved_threshold) {
417                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
418                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
419                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
420                          * visible VRAM if we've depleted our allowance to do
421                          * that.
422                          */
423                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
424                                 domain = bo->preferred_domains;
425                         else
426                                 domain = bo->allowed_domains;
427                 } else {
428                         domain = bo->preferred_domains;
429                 }
430         } else {
431                 domain = bo->allowed_domains;
432         }
433
434 retry:
435         amdgpu_bo_placement_from_domain(bo, domain);
436         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
437
438         p->bytes_moved += ctx.bytes_moved;
439         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
440             amdgpu_bo_in_cpu_visible_vram(bo))
441                 p->bytes_moved_vis += ctx.bytes_moved;
442
443         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
444                 domain = bo->allowed_domains;
445                 goto retry;
446         }
447
448         return r;
449 }
450
451 /* Last resort, try to evict something from the current working set */
452 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
453                                 struct amdgpu_bo *validated)
454 {
455         uint32_t domain = validated->allowed_domains;
456         struct ttm_operation_ctx ctx = { true, false };
457         int r;
458
459         if (!p->evictable)
460                 return false;
461
462         for (;&p->evictable->tv.head != &p->validated;
463              p->evictable = list_prev_entry(p->evictable, tv.head)) {
464
465                 struct amdgpu_bo_list_entry *candidate = p->evictable;
466                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
467                 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
468                 bool update_bytes_moved_vis;
469                 uint32_t other;
470
471                 /* If we reached our current BO we can forget it */
472                 if (bo == validated)
473                         break;
474
475                 /* We can't move pinned BOs here */
476                 if (bo->pin_count)
477                         continue;
478
479                 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
480
481                 /* Check if this BO is in one of the domains we need space for */
482                 if (!(other & domain))
483                         continue;
484
485                 /* Check if we can move this BO somewhere else */
486                 other = bo->allowed_domains & ~domain;
487                 if (!other)
488                         continue;
489
490                 /* Good we can try to move this BO somewhere else */
491                 update_bytes_moved_vis =
492                                 !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
493                                 amdgpu_bo_in_cpu_visible_vram(bo);
494                 amdgpu_bo_placement_from_domain(bo, other);
495                 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
496                 p->bytes_moved += ctx.bytes_moved;
497                 if (update_bytes_moved_vis)
498                         p->bytes_moved_vis += ctx.bytes_moved;
499
500                 if (unlikely(r))
501                         break;
502
503                 p->evictable = list_prev_entry(p->evictable, tv.head);
504                 list_move(&candidate->tv.head, &p->validated);
505
506                 return true;
507         }
508
509         return false;
510 }
511
512 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
513 {
514         struct amdgpu_cs_parser *p = param;
515         int r;
516
517         do {
518                 r = amdgpu_cs_bo_validate(p, bo);
519         } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
520         if (r)
521                 return r;
522
523         if (bo->shadow)
524                 r = amdgpu_cs_bo_validate(p, bo->shadow);
525
526         return r;
527 }
528
529 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
530                             struct list_head *validated)
531 {
532         struct ttm_operation_ctx ctx = { true, false };
533         struct amdgpu_bo_list_entry *lobj;
534         int r;
535
536         list_for_each_entry(lobj, validated, tv.head) {
537                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
538                 bool binding_userptr = false;
539                 struct mm_struct *usermm;
540
541                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
542                 if (usermm && usermm != current->mm)
543                         return -EPERM;
544
545                 /* Check if we have user pages and nobody bound the BO already */
546                 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
547                     lobj->user_pages) {
548                         amdgpu_bo_placement_from_domain(bo,
549                                                         AMDGPU_GEM_DOMAIN_CPU);
550                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
551                         if (r)
552                                 return r;
553                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
554                                                      lobj->user_pages);
555                         binding_userptr = true;
556                 }
557
558                 if (p->evictable == lobj)
559                         p->evictable = NULL;
560
561                 r = amdgpu_cs_validate(p, bo);
562                 if (r)
563                         return r;
564
565                 if (binding_userptr) {
566                         kvfree(lobj->user_pages);
567                         lobj->user_pages = NULL;
568                 }
569         }
570         return 0;
571 }
572
573 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
574                                 union drm_amdgpu_cs *cs)
575 {
576         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
577         struct amdgpu_vm *vm = &fpriv->vm;
578         struct amdgpu_bo_list_entry *e;
579         struct list_head duplicates;
580         struct amdgpu_bo *gds;
581         struct amdgpu_bo *gws;
582         struct amdgpu_bo *oa;
583         unsigned tries = 10;
584         int r;
585
586         INIT_LIST_HEAD(&p->validated);
587
588         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
589         if (cs->in.bo_list_handle) {
590                 if (p->bo_list)
591                         return -EINVAL;
592
593                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
594                                        &p->bo_list);
595                 if (r)
596                         return r;
597         } else if (!p->bo_list) {
598                 /* Create a empty bo_list when no handle is provided */
599                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
600                                           &p->bo_list);
601                 if (r)
602                         return r;
603         }
604
605         /* One for TTM and one for the CS job */
606         amdgpu_bo_list_for_each_entry(e, p->bo_list)
607                 e->tv.num_shared = 2;
608
609         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
610         if (p->bo_list->first_userptr != p->bo_list->num_entries)
611                 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
612
613         INIT_LIST_HEAD(&duplicates);
614         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
615
616         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
617                 list_add(&p->uf_entry.tv.head, &p->validated);
618
619         while (1) {
620                 struct list_head need_pages;
621
622                 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
623                                            &duplicates);
624                 if (unlikely(r != 0)) {
625                         if (r != -ERESTARTSYS)
626                                 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
627                         goto error_free_pages;
628                 }
629
630                 INIT_LIST_HEAD(&need_pages);
631                 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
632                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
633
634                         if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
635                                  &e->user_invalidated) && e->user_pages) {
636
637                                 /* We acquired a page array, but somebody
638                                  * invalidated it. Free it and try again
639                                  */
640                                 release_pages(e->user_pages,
641                                               bo->tbo.ttm->num_pages);
642                                 kvfree(e->user_pages);
643                                 e->user_pages = NULL;
644                         }
645
646                         if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
647                             !e->user_pages) {
648                                 list_del(&e->tv.head);
649                                 list_add(&e->tv.head, &need_pages);
650
651                                 amdgpu_bo_unreserve(bo);
652                         }
653                 }
654
655                 if (list_empty(&need_pages))
656                         break;
657
658                 /* Unreserve everything again. */
659                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
660
661                 /* We tried too many times, just abort */
662                 if (!--tries) {
663                         r = -EDEADLK;
664                         DRM_ERROR("deadlock in %s\n", __func__);
665                         goto error_free_pages;
666                 }
667
668                 /* Fill the page arrays for all userptrs. */
669                 list_for_each_entry(e, &need_pages, tv.head) {
670                         struct ttm_tt *ttm = e->tv.bo->ttm;
671
672                         e->user_pages = kvmalloc_array(ttm->num_pages,
673                                                          sizeof(struct page*),
674                                                          GFP_KERNEL | __GFP_ZERO);
675                         if (!e->user_pages) {
676                                 r = -ENOMEM;
677                                 DRM_ERROR("calloc failure in %s\n", __func__);
678                                 goto error_free_pages;
679                         }
680
681                         r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
682                         if (r) {
683                                 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
684                                 kvfree(e->user_pages);
685                                 e->user_pages = NULL;
686                                 goto error_free_pages;
687                         }
688                 }
689
690                 /* And try again. */
691                 list_splice(&need_pages, &p->validated);
692         }
693
694         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
695                                           &p->bytes_moved_vis_threshold);
696         p->bytes_moved = 0;
697         p->bytes_moved_vis = 0;
698         p->evictable = list_last_entry(&p->validated,
699                                        struct amdgpu_bo_list_entry,
700                                        tv.head);
701
702         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
703                                       amdgpu_cs_validate, p);
704         if (r) {
705                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
706                 goto error_validate;
707         }
708
709         r = amdgpu_cs_list_validate(p, &duplicates);
710         if (r) {
711                 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
712                 goto error_validate;
713         }
714
715         r = amdgpu_cs_list_validate(p, &p->validated);
716         if (r) {
717                 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
718                 goto error_validate;
719         }
720
721         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
722                                      p->bytes_moved_vis);
723
724         gds = p->bo_list->gds_obj;
725         gws = p->bo_list->gws_obj;
726         oa = p->bo_list->oa_obj;
727
728         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
729                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
730
731                 /* Make sure we use the exclusive slot for shared BOs */
732                 if (bo->prime_shared_count)
733                         e->tv.num_shared = 0;
734                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
735         }
736
737         if (gds) {
738                 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
739                 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
740         }
741         if (gws) {
742                 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
743                 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
744         }
745         if (oa) {
746                 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
747                 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
748         }
749
750         if (!r && p->uf_entry.tv.bo) {
751                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
752
753                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
754                 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
755         }
756
757 error_validate:
758         if (r)
759                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
760
761 error_free_pages:
762
763         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
764                 if (!e->user_pages)
765                         continue;
766
767                 release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
768                 kvfree(e->user_pages);
769         }
770
771         return r;
772 }
773
774 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
775 {
776         struct amdgpu_bo_list_entry *e;
777         int r;
778
779         list_for_each_entry(e, &p->validated, tv.head) {
780                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
781                 struct reservation_object *resv = bo->tbo.resv;
782
783                 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
784                                      amdgpu_bo_explicit_sync(bo));
785
786                 if (r)
787                         return r;
788         }
789         return 0;
790 }
791
792 /**
793  * cs_parser_fini() - clean parser states
794  * @parser:     parser structure holding parsing context.
795  * @error:      error number
796  *
797  * If error is set than unvalidate buffer, otherwise just free memory
798  * used by parsing context.
799  **/
800 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
801                                   bool backoff)
802 {
803         unsigned i;
804
805         if (error && backoff)
806                 ttm_eu_backoff_reservation(&parser->ticket,
807                                            &parser->validated);
808
809         for (i = 0; i < parser->num_post_deps; i++) {
810                 drm_syncobj_put(parser->post_deps[i].syncobj);
811                 kfree(parser->post_deps[i].chain);
812         }
813         kfree(parser->post_deps);
814
815         dma_fence_put(parser->fence);
816
817         if (parser->ctx) {
818                 mutex_unlock(&parser->ctx->lock);
819                 amdgpu_ctx_put(parser->ctx);
820         }
821         if (parser->bo_list)
822                 amdgpu_bo_list_put(parser->bo_list);
823
824         for (i = 0; i < parser->nchunks; i++)
825                 kvfree(parser->chunks[i].kdata);
826         kfree(parser->chunks);
827         if (parser->job)
828                 amdgpu_job_free(parser->job);
829         if (parser->uf_entry.tv.bo) {
830                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
831
832                 amdgpu_bo_unref(&uf);
833         }
834 }
835
836 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
837 {
838         struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
839         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
840         struct amdgpu_device *adev = p->adev;
841         struct amdgpu_vm *vm = &fpriv->vm;
842         struct amdgpu_bo_list_entry *e;
843         struct amdgpu_bo_va *bo_va;
844         struct amdgpu_bo *bo;
845         int r;
846
847         /* Only for UVD/VCE VM emulation */
848         if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
849                 unsigned i, j;
850
851                 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
852                         struct drm_amdgpu_cs_chunk_ib *chunk_ib;
853                         struct amdgpu_bo_va_mapping *m;
854                         struct amdgpu_bo *aobj = NULL;
855                         struct amdgpu_cs_chunk *chunk;
856                         uint64_t offset, va_start;
857                         struct amdgpu_ib *ib;
858                         uint8_t *kptr;
859
860                         chunk = &p->chunks[i];
861                         ib = &p->job->ibs[j];
862                         chunk_ib = chunk->kdata;
863
864                         if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
865                                 continue;
866
867                         va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
868                         r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
869                         if (r) {
870                                 DRM_ERROR("IB va_start is invalid\n");
871                                 return r;
872                         }
873
874                         if ((va_start + chunk_ib->ib_bytes) >
875                             (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
876                                 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
877                                 return -EINVAL;
878                         }
879
880                         /* the IB should be reserved at this point */
881                         r = amdgpu_bo_kmap(aobj, (void **)&kptr);
882                         if (r) {
883                                 return r;
884                         }
885
886                         offset = m->start * AMDGPU_GPU_PAGE_SIZE;
887                         kptr += va_start - offset;
888
889                         if (ring->funcs->parse_cs) {
890                                 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
891                                 amdgpu_bo_kunmap(aobj);
892
893                                 r = amdgpu_ring_parse_cs(ring, p, j);
894                                 if (r)
895                                         return r;
896                         } else {
897                                 ib->ptr = (uint32_t *)kptr;
898                                 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
899                                 amdgpu_bo_kunmap(aobj);
900                                 if (r)
901                                         return r;
902                         }
903
904                         j++;
905                 }
906         }
907
908         if (!p->job->vm)
909                 return amdgpu_cs_sync_rings(p);
910
911
912         r = amdgpu_vm_clear_freed(adev, vm, NULL);
913         if (r)
914                 return r;
915
916         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
917         if (r)
918                 return r;
919
920         r = amdgpu_sync_fence(adev, &p->job->sync,
921                               fpriv->prt_va->last_pt_update, false);
922         if (r)
923                 return r;
924
925         if (amdgpu_sriov_vf(adev)) {
926                 struct dma_fence *f;
927
928                 bo_va = fpriv->csa_va;
929                 BUG_ON(!bo_va);
930                 r = amdgpu_vm_bo_update(adev, bo_va, false);
931                 if (r)
932                         return r;
933
934                 f = bo_va->last_pt_update;
935                 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
936                 if (r)
937                         return r;
938         }
939
940         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
941                 struct dma_fence *f;
942
943                 /* ignore duplicates */
944                 bo = ttm_to_amdgpu_bo(e->tv.bo);
945                 if (!bo)
946                         continue;
947
948                 bo_va = e->bo_va;
949                 if (bo_va == NULL)
950                         continue;
951
952                 r = amdgpu_vm_bo_update(adev, bo_va, false);
953                 if (r)
954                         return r;
955
956                 f = bo_va->last_pt_update;
957                 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
958                 if (r)
959                         return r;
960         }
961
962         r = amdgpu_vm_handle_moved(adev, vm);
963         if (r)
964                 return r;
965
966         r = amdgpu_vm_update_directories(adev, vm);
967         if (r)
968                 return r;
969
970         r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
971         if (r)
972                 return r;
973
974         p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
975
976         if (amdgpu_vm_debug) {
977                 /* Invalidate all BOs to test for userspace bugs */
978                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
979                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
980
981                         /* ignore duplicates */
982                         if (!bo)
983                                 continue;
984
985                         amdgpu_vm_bo_invalidate(adev, bo, false);
986                 }
987         }
988
989         return amdgpu_cs_sync_rings(p);
990 }
991
992 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
993                              struct amdgpu_cs_parser *parser)
994 {
995         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
996         struct amdgpu_vm *vm = &fpriv->vm;
997         int r, ce_preempt = 0, de_preempt = 0;
998         struct amdgpu_ring *ring;
999         int i, j;
1000
1001         for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
1002                 struct amdgpu_cs_chunk *chunk;
1003                 struct amdgpu_ib *ib;
1004                 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
1005                 struct drm_sched_entity *entity;
1006
1007                 chunk = &parser->chunks[i];
1008                 ib = &parser->job->ibs[j];
1009                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
1010
1011                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
1012                         continue;
1013
1014                 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
1015                         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
1016                                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
1017                                         ce_preempt++;
1018                                 else
1019                                         de_preempt++;
1020                         }
1021
1022                         /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
1023                         if (ce_preempt > 1 || de_preempt > 1)
1024                                 return -EINVAL;
1025                 }
1026
1027                 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
1028                                           chunk_ib->ip_instance, chunk_ib->ring,
1029                                           &entity);
1030                 if (r)
1031                         return r;
1032
1033                 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
1034                         parser->job->preamble_status |=
1035                                 AMDGPU_PREAMBLE_IB_PRESENT;
1036
1037                 if (parser->entity && parser->entity != entity)
1038                         return -EINVAL;
1039
1040                 parser->entity = entity;
1041
1042                 ring = to_amdgpu_ring(entity->rq->sched);
1043                 r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
1044                                    chunk_ib->ib_bytes : 0, ib);
1045                 if (r) {
1046                         DRM_ERROR("Failed to get ib !\n");
1047                         return r;
1048                 }
1049
1050                 ib->gpu_addr = chunk_ib->va_start;
1051                 ib->length_dw = chunk_ib->ib_bytes / 4;
1052                 ib->flags = chunk_ib->flags;
1053
1054                 j++;
1055         }
1056
1057         /* UVD & VCE fw doesn't support user fences */
1058         ring = to_amdgpu_ring(parser->entity->rq->sched);
1059         if (parser->job->uf_addr && (
1060             ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
1061             ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1062                 return -EINVAL;
1063
1064         return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
1065 }
1066
1067 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1068                                        struct amdgpu_cs_chunk *chunk)
1069 {
1070         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1071         unsigned num_deps;
1072         int i, r;
1073         struct drm_amdgpu_cs_chunk_dep *deps;
1074
1075         deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1076         num_deps = chunk->length_dw * 4 /
1077                 sizeof(struct drm_amdgpu_cs_chunk_dep);
1078
1079         for (i = 0; i < num_deps; ++i) {
1080                 struct amdgpu_ctx *ctx;
1081                 struct drm_sched_entity *entity;
1082                 struct dma_fence *fence;
1083
1084                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1085                 if (ctx == NULL)
1086                         return -EINVAL;
1087
1088                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
1089                                           deps[i].ip_instance,
1090                                           deps[i].ring, &entity);
1091                 if (r) {
1092                         amdgpu_ctx_put(ctx);
1093                         return r;
1094                 }
1095
1096                 fence = amdgpu_ctx_get_fence(ctx, entity,
1097                                              deps[i].handle);
1098
1099                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1100                         struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
1101                         struct dma_fence *old = fence;
1102
1103                         fence = dma_fence_get(&s_fence->scheduled);
1104                         dma_fence_put(old);
1105                 }
1106
1107                 if (IS_ERR(fence)) {
1108                         r = PTR_ERR(fence);
1109                         amdgpu_ctx_put(ctx);
1110                         return r;
1111                 } else if (fence) {
1112                         r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
1113                                         true);
1114                         dma_fence_put(fence);
1115                         amdgpu_ctx_put(ctx);
1116                         if (r)
1117                                 return r;
1118                 }
1119         }
1120         return 0;
1121 }
1122
1123 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1124                                                  uint32_t handle, u64 point,
1125                                                  u64 flags)
1126 {
1127         struct dma_fence *fence;
1128         int r;
1129
1130         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1131         if (r) {
1132                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1133                           handle, point, r);
1134                 return r;
1135         }
1136
1137         r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1138         dma_fence_put(fence);
1139
1140         return r;
1141 }
1142
1143 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1144                                             struct amdgpu_cs_chunk *chunk)
1145 {
1146         struct drm_amdgpu_cs_chunk_sem *deps;
1147         unsigned num_deps;
1148         int i, r;
1149
1150         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1151         num_deps = chunk->length_dw * 4 /
1152                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1153         for (i = 0; i < num_deps; ++i) {
1154                 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1155                                                           0, 0);
1156                 if (r)
1157                         return r;
1158         }
1159
1160         return 0;
1161 }
1162
1163
1164 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1165                                                      struct amdgpu_cs_chunk *chunk)
1166 {
1167         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1168         unsigned num_deps;
1169         int i, r;
1170
1171         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1172         num_deps = chunk->length_dw * 4 /
1173                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1174         for (i = 0; i < num_deps; ++i) {
1175                 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1176                                                           syncobj_deps[i].handle,
1177                                                           syncobj_deps[i].point,
1178                                                           syncobj_deps[i].flags);
1179                 if (r)
1180                         return r;
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1187                                              struct amdgpu_cs_chunk *chunk)
1188 {
1189         struct drm_amdgpu_cs_chunk_sem *deps;
1190         unsigned num_deps;
1191         int i;
1192
1193         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1194         num_deps = chunk->length_dw * 4 /
1195                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1196
1197         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1198                                      GFP_KERNEL);
1199         p->num_post_deps = 0;
1200
1201         if (!p->post_deps)
1202                 return -ENOMEM;
1203
1204
1205         for (i = 0; i < num_deps; ++i) {
1206                 p->post_deps[i].syncobj =
1207                         drm_syncobj_find(p->filp, deps[i].handle);
1208                 if (!p->post_deps[i].syncobj)
1209                         return -EINVAL;
1210                 p->post_deps[i].chain = NULL;
1211                 p->post_deps[i].point = 0;
1212                 p->num_post_deps++;
1213         }
1214
1215         return 0;
1216 }
1217
1218
1219 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1220                                                       struct amdgpu_cs_chunk
1221                                                       *chunk)
1222 {
1223         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1224         unsigned num_deps;
1225         int i;
1226
1227         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1228         num_deps = chunk->length_dw * 4 /
1229                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1230
1231         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1232                                      GFP_KERNEL);
1233         p->num_post_deps = 0;
1234
1235         if (!p->post_deps)
1236                 return -ENOMEM;
1237
1238         for (i = 0; i < num_deps; ++i) {
1239                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1240
1241                 dep->chain = NULL;
1242                 if (syncobj_deps[i].point) {
1243                         dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1244                         if (!dep->chain)
1245                                 return -ENOMEM;
1246                 }
1247
1248                 dep->syncobj = drm_syncobj_find(p->filp,
1249                                                 syncobj_deps[i].handle);
1250                 if (!dep->syncobj) {
1251                         kfree(dep->chain);
1252                         return -EINVAL;
1253                 }
1254                 dep->point = syncobj_deps[i].point;
1255                 p->num_post_deps++;
1256         }
1257
1258         return 0;
1259 }
1260
1261 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1262                                   struct amdgpu_cs_parser *p)
1263 {
1264         int i, r;
1265
1266         for (i = 0; i < p->nchunks; ++i) {
1267                 struct amdgpu_cs_chunk *chunk;
1268
1269                 chunk = &p->chunks[i];
1270
1271                 switch (chunk->chunk_id) {
1272                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1273                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1274                         r = amdgpu_cs_process_fence_dep(p, chunk);
1275                         if (r)
1276                                 return r;
1277                         break;
1278                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1279                         r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1280                         if (r)
1281                                 return r;
1282                         break;
1283                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1284                         r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1285                         if (r)
1286                                 return r;
1287                         break;
1288                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1289                         r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1290                         if (r)
1291                                 return r;
1292                         break;
1293                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1294                         r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1295                         if (r)
1296                                 return r;
1297                         break;
1298                 }
1299         }
1300
1301         return 0;
1302 }
1303
1304 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1305 {
1306         int i;
1307
1308         for (i = 0; i < p->num_post_deps; ++i) {
1309                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1310                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1311                                               p->post_deps[i].chain,
1312                                               p->fence, p->post_deps[i].point);
1313                         p->post_deps[i].chain = NULL;
1314                 } else {
1315                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1316                                                   p->fence);
1317                 }
1318         }
1319 }
1320
1321 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1322                             union drm_amdgpu_cs *cs)
1323 {
1324         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1325         struct drm_sched_entity *entity = p->entity;
1326         enum drm_sched_priority priority;
1327         struct amdgpu_ring *ring;
1328         struct amdgpu_bo_list_entry *e;
1329         struct amdgpu_job *job;
1330         uint64_t seq;
1331
1332         int r;
1333
1334         job = p->job;
1335         p->job = NULL;
1336
1337         r = drm_sched_job_init(&job->base, entity, p->filp);
1338         if (r)
1339                 goto error_unlock;
1340
1341         /* No memory allocation is allowed while holding the mn lock */
1342         amdgpu_mn_lock(p->mn);
1343         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1344                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1345
1346                 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1347                         r = -ERESTARTSYS;
1348                         goto error_abort;
1349                 }
1350         }
1351
1352         job->owner = p->filp;
1353         p->fence = dma_fence_get(&job->base.s_fence->finished);
1354
1355         amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1356         amdgpu_cs_post_dependencies(p);
1357
1358         if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1359             !p->ctx->preamble_presented) {
1360                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1361                 p->ctx->preamble_presented = true;
1362         }
1363
1364         cs->out.handle = seq;
1365         job->uf_sequence = seq;
1366
1367         amdgpu_job_free_resources(job);
1368
1369         trace_amdgpu_cs_ioctl(job);
1370         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1371         priority = job->base.s_priority;
1372         drm_sched_entity_push_job(&job->base, entity);
1373
1374         ring = to_amdgpu_ring(entity->rq->sched);
1375         amdgpu_ring_priority_get(ring, priority);
1376
1377         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1378
1379         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1380         amdgpu_mn_unlock(p->mn);
1381
1382         return 0;
1383
1384 error_abort:
1385         drm_sched_job_cleanup(&job->base);
1386         amdgpu_mn_unlock(p->mn);
1387
1388 error_unlock:
1389         amdgpu_job_free(job);
1390         return r;
1391 }
1392
1393 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1394 {
1395         struct amdgpu_device *adev = dev->dev_private;
1396         union drm_amdgpu_cs *cs = data;
1397         struct amdgpu_cs_parser parser = {};
1398         bool reserved_buffers = false;
1399         int i, r;
1400
1401         if (!adev->accel_working)
1402                 return -EBUSY;
1403
1404         parser.adev = adev;
1405         parser.filp = filp;
1406
1407         r = amdgpu_cs_parser_init(&parser, data);
1408         if (r) {
1409                 DRM_ERROR("Failed to initialize parser %d!\n", r);
1410                 goto out;
1411         }
1412
1413         r = amdgpu_cs_ib_fill(adev, &parser);
1414         if (r)
1415                 goto out;
1416
1417         r = amdgpu_cs_dependencies(adev, &parser);
1418         if (r) {
1419                 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1420                 goto out;
1421         }
1422
1423         r = amdgpu_cs_parser_bos(&parser, data);
1424         if (r) {
1425                 if (r == -ENOMEM)
1426                         DRM_ERROR("Not enough memory for command submission!\n");
1427                 else if (r != -ERESTARTSYS)
1428                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1429                 goto out;
1430         }
1431
1432         reserved_buffers = true;
1433
1434         for (i = 0; i < parser.job->num_ibs; i++)
1435                 trace_amdgpu_cs(&parser, i);
1436
1437         r = amdgpu_cs_vm_handling(&parser);
1438         if (r)
1439                 goto out;
1440
1441         r = amdgpu_cs_submit(&parser, cs);
1442
1443 out:
1444         amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1445         return r;
1446 }
1447
1448 /**
1449  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1450  *
1451  * @dev: drm device
1452  * @data: data from userspace
1453  * @filp: file private
1454  *
1455  * Wait for the command submission identified by handle to finish.
1456  */
1457 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1458                          struct drm_file *filp)
1459 {
1460         union drm_amdgpu_wait_cs *wait = data;
1461         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1462         struct drm_sched_entity *entity;
1463         struct amdgpu_ctx *ctx;
1464         struct dma_fence *fence;
1465         long r;
1466
1467         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1468         if (ctx == NULL)
1469                 return -EINVAL;
1470
1471         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1472                                   wait->in.ring, &entity);
1473         if (r) {
1474                 amdgpu_ctx_put(ctx);
1475                 return r;
1476         }
1477
1478         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1479         if (IS_ERR(fence))
1480                 r = PTR_ERR(fence);
1481         else if (fence) {
1482                 r = dma_fence_wait_timeout(fence, true, timeout);
1483                 if (r > 0 && fence->error)
1484                         r = fence->error;
1485                 dma_fence_put(fence);
1486         } else
1487                 r = 1;
1488
1489         amdgpu_ctx_put(ctx);
1490         if (r < 0)
1491                 return r;
1492
1493         memset(wait, 0, sizeof(*wait));
1494         wait->out.status = (r == 0);
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1501  *
1502  * @adev: amdgpu device
1503  * @filp: file private
1504  * @user: drm_amdgpu_fence copied from user space
1505  */
1506 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1507                                              struct drm_file *filp,
1508                                              struct drm_amdgpu_fence *user)
1509 {
1510         struct drm_sched_entity *entity;
1511         struct amdgpu_ctx *ctx;
1512         struct dma_fence *fence;
1513         int r;
1514
1515         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1516         if (ctx == NULL)
1517                 return ERR_PTR(-EINVAL);
1518
1519         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1520                                   user->ring, &entity);
1521         if (r) {
1522                 amdgpu_ctx_put(ctx);
1523                 return ERR_PTR(r);
1524         }
1525
1526         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1527         amdgpu_ctx_put(ctx);
1528
1529         return fence;
1530 }
1531
1532 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1533                                     struct drm_file *filp)
1534 {
1535         struct amdgpu_device *adev = dev->dev_private;
1536         union drm_amdgpu_fence_to_handle *info = data;
1537         struct dma_fence *fence;
1538         struct drm_syncobj *syncobj;
1539         struct sync_file *sync_file;
1540         int fd, r;
1541
1542         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1543         if (IS_ERR(fence))
1544                 return PTR_ERR(fence);
1545
1546         if (!fence)
1547                 fence = dma_fence_get_stub();
1548
1549         switch (info->in.what) {
1550         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1551                 r = drm_syncobj_create(&syncobj, 0, fence);
1552                 dma_fence_put(fence);
1553                 if (r)
1554                         return r;
1555                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1556                 drm_syncobj_put(syncobj);
1557                 return r;
1558
1559         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1560                 r = drm_syncobj_create(&syncobj, 0, fence);
1561                 dma_fence_put(fence);
1562                 if (r)
1563                         return r;
1564                 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1565                 drm_syncobj_put(syncobj);
1566                 return r;
1567
1568         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1569                 fd = get_unused_fd_flags(O_CLOEXEC);
1570                 if (fd < 0) {
1571                         dma_fence_put(fence);
1572                         return fd;
1573                 }
1574
1575                 sync_file = sync_file_create(fence);
1576                 dma_fence_put(fence);
1577                 if (!sync_file) {
1578                         put_unused_fd(fd);
1579                         return -ENOMEM;
1580                 }
1581
1582                 fd_install(fd, sync_file->file);
1583                 info->out.handle = fd;
1584                 return 0;
1585
1586         default:
1587                 return -EINVAL;
1588         }
1589 }
1590
1591 /**
1592  * amdgpu_cs_wait_all_fence - wait on all fences to signal
1593  *
1594  * @adev: amdgpu device
1595  * @filp: file private
1596  * @wait: wait parameters
1597  * @fences: array of drm_amdgpu_fence
1598  */
1599 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1600                                      struct drm_file *filp,
1601                                      union drm_amdgpu_wait_fences *wait,
1602                                      struct drm_amdgpu_fence *fences)
1603 {
1604         uint32_t fence_count = wait->in.fence_count;
1605         unsigned int i;
1606         long r = 1;
1607
1608         for (i = 0; i < fence_count; i++) {
1609                 struct dma_fence *fence;
1610                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1611
1612                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1613                 if (IS_ERR(fence))
1614                         return PTR_ERR(fence);
1615                 else if (!fence)
1616                         continue;
1617
1618                 r = dma_fence_wait_timeout(fence, true, timeout);
1619                 dma_fence_put(fence);
1620                 if (r < 0)
1621                         return r;
1622
1623                 if (r == 0)
1624                         break;
1625
1626                 if (fence->error)
1627                         return fence->error;
1628         }
1629
1630         memset(wait, 0, sizeof(*wait));
1631         wait->out.status = (r > 0);
1632
1633         return 0;
1634 }
1635
1636 /**
1637  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1638  *
1639  * @adev: amdgpu device
1640  * @filp: file private
1641  * @wait: wait parameters
1642  * @fences: array of drm_amdgpu_fence
1643  */
1644 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1645                                     struct drm_file *filp,
1646                                     union drm_amdgpu_wait_fences *wait,
1647                                     struct drm_amdgpu_fence *fences)
1648 {
1649         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1650         uint32_t fence_count = wait->in.fence_count;
1651         uint32_t first = ~0;
1652         struct dma_fence **array;
1653         unsigned int i;
1654         long r;
1655
1656         /* Prepare the fence array */
1657         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1658
1659         if (array == NULL)
1660                 return -ENOMEM;
1661
1662         for (i = 0; i < fence_count; i++) {
1663                 struct dma_fence *fence;
1664
1665                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1666                 if (IS_ERR(fence)) {
1667                         r = PTR_ERR(fence);
1668                         goto err_free_fence_array;
1669                 } else if (fence) {
1670                         array[i] = fence;
1671                 } else { /* NULL, the fence has been already signaled */
1672                         r = 1;
1673                         first = i;
1674                         goto out;
1675                 }
1676         }
1677
1678         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1679                                        &first);
1680         if (r < 0)
1681                 goto err_free_fence_array;
1682
1683 out:
1684         memset(wait, 0, sizeof(*wait));
1685         wait->out.status = (r > 0);
1686         wait->out.first_signaled = first;
1687
1688         if (first < fence_count && array[first])
1689                 r = array[first]->error;
1690         else
1691                 r = 0;
1692
1693 err_free_fence_array:
1694         for (i = 0; i < fence_count; i++)
1695                 dma_fence_put(array[i]);
1696         kfree(array);
1697
1698         return r;
1699 }
1700
1701 /**
1702  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1703  *
1704  * @dev: drm device
1705  * @data: data from userspace
1706  * @filp: file private
1707  */
1708 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1709                                 struct drm_file *filp)
1710 {
1711         struct amdgpu_device *adev = dev->dev_private;
1712         union drm_amdgpu_wait_fences *wait = data;
1713         uint32_t fence_count = wait->in.fence_count;
1714         struct drm_amdgpu_fence *fences_user;
1715         struct drm_amdgpu_fence *fences;
1716         int r;
1717
1718         /* Get the fences from userspace */
1719         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1720                         GFP_KERNEL);
1721         if (fences == NULL)
1722                 return -ENOMEM;
1723
1724         fences_user = u64_to_user_ptr(wait->in.fences);
1725         if (copy_from_user(fences, fences_user,
1726                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1727                 r = -EFAULT;
1728                 goto err_free_fences;
1729         }
1730
1731         if (wait->in.wait_all)
1732                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1733         else
1734                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1735
1736 err_free_fences:
1737         kfree(fences);
1738
1739         return r;
1740 }
1741
1742 /**
1743  * amdgpu_cs_find_bo_va - find bo_va for VM address
1744  *
1745  * @parser: command submission parser context
1746  * @addr: VM address
1747  * @bo: resulting BO of the mapping found
1748  *
1749  * Search the buffer objects in the command submission context for a certain
1750  * virtual memory address. Returns allocation structure when found, NULL
1751  * otherwise.
1752  */
1753 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1754                            uint64_t addr, struct amdgpu_bo **bo,
1755                            struct amdgpu_bo_va_mapping **map)
1756 {
1757         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1758         struct ttm_operation_ctx ctx = { false, false };
1759         struct amdgpu_vm *vm = &fpriv->vm;
1760         struct amdgpu_bo_va_mapping *mapping;
1761         int r;
1762
1763         addr /= AMDGPU_GPU_PAGE_SIZE;
1764
1765         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1766         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1767                 return -EINVAL;
1768
1769         *bo = mapping->bo_va->base.bo;
1770         *map = mapping;
1771
1772         /* Double check that the BO is reserved by this CS */
1773         if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
1774                 return -EINVAL;
1775
1776         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1777                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1778                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1779                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1780                 if (r)
1781                         return r;
1782         }
1783
1784         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1785 }
This page took 0.192307 seconds and 4 git commands to generate.