]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
Merge tag 'xfs-5.15-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_placement.h>
29
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33
34 #define VMW_RES_EVICT_ERR_COUNT 10
35
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42         struct vmw_buffer_object *backup = res->backup;
43         struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44
45         dma_resv_assert_held(res->backup->base.base.resv);
46         res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47                 res->func->prio;
48
49         while (*new) {
50                 struct vmw_resource *this =
51                         container_of(*new, struct vmw_resource, mob_node);
52
53                 parent = *new;
54                 new = (res->backup_offset < this->backup_offset) ?
55                         &((*new)->rb_left) : &((*new)->rb_right);
56         }
57
58         rb_link_node(&res->mob_node, parent, new);
59         rb_insert_color(&res->mob_node, &backup->res_tree);
60
61         vmw_bo_prio_add(backup, res->used_prio);
62 }
63
64 /**
65  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66  * @res: The resource
67  */
68 void vmw_resource_mob_detach(struct vmw_resource *res)
69 {
70         struct vmw_buffer_object *backup = res->backup;
71
72         dma_resv_assert_held(backup->base.base.resv);
73         if (vmw_resource_mob_attached(res)) {
74                 rb_erase(&res->mob_node, &backup->res_tree);
75                 RB_CLEAR_NODE(&res->mob_node);
76                 vmw_bo_prio_del(backup, res->used_prio);
77         }
78 }
79
80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81 {
82         kref_get(&res->kref);
83         return res;
84 }
85
86 struct vmw_resource *
87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88 {
89         return kref_get_unless_zero(&res->kref) ? res : NULL;
90 }
91
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101         struct vmw_private *dev_priv = res->dev_priv;
102         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103
104         spin_lock(&dev_priv->resource_lock);
105         if (res->id != -1)
106                 idr_remove(idr, res->id);
107         res->id = -1;
108         spin_unlock(&dev_priv->resource_lock);
109 }
110
111 static void vmw_resource_release(struct kref *kref)
112 {
113         struct vmw_resource *res =
114             container_of(kref, struct vmw_resource, kref);
115         struct vmw_private *dev_priv = res->dev_priv;
116         int id;
117         int ret;
118         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
119
120         spin_lock(&dev_priv->resource_lock);
121         list_del_init(&res->lru_head);
122         spin_unlock(&dev_priv->resource_lock);
123         if (res->backup) {
124                 struct ttm_buffer_object *bo = &res->backup->base;
125
126                 ret = ttm_bo_reserve(bo, false, false, NULL);
127                 BUG_ON(ret);
128                 if (vmw_resource_mob_attached(res) &&
129                     res->func->unbind != NULL) {
130                         struct ttm_validate_buffer val_buf;
131
132                         val_buf.bo = bo;
133                         val_buf.num_shared = 0;
134                         res->func->unbind(res, false, &val_buf);
135                 }
136                 res->backup_dirty = false;
137                 vmw_resource_mob_detach(res);
138                 if (res->dirty)
139                         res->func->dirty_free(res);
140                 if (res->coherent)
141                         vmw_bo_dirty_release(res->backup);
142                 ttm_bo_unreserve(bo);
143                 vmw_bo_unreference(&res->backup);
144         }
145
146         if (likely(res->hw_destroy != NULL)) {
147                 mutex_lock(&dev_priv->binding_mutex);
148                 vmw_binding_res_list_kill(&res->binding_head);
149                 mutex_unlock(&dev_priv->binding_mutex);
150                 res->hw_destroy(res);
151         }
152
153         id = res->id;
154         if (res->res_free != NULL)
155                 res->res_free(res);
156         else
157                 kfree(res);
158
159         spin_lock(&dev_priv->resource_lock);
160         if (id != -1)
161                 idr_remove(idr, id);
162         spin_unlock(&dev_priv->resource_lock);
163 }
164
165 void vmw_resource_unreference(struct vmw_resource **p_res)
166 {
167         struct vmw_resource *res = *p_res;
168
169         *p_res = NULL;
170         kref_put(&res->kref, vmw_resource_release);
171 }
172
173
174 /**
175  * vmw_resource_alloc_id - release a resource id to the id manager.
176  *
177  * @res: Pointer to the resource.
178  *
179  * Allocate the lowest free resource from the resource manager, and set
180  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
181  */
182 int vmw_resource_alloc_id(struct vmw_resource *res)
183 {
184         struct vmw_private *dev_priv = res->dev_priv;
185         int ret;
186         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
187
188         BUG_ON(res->id != -1);
189
190         idr_preload(GFP_KERNEL);
191         spin_lock(&dev_priv->resource_lock);
192
193         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194         if (ret >= 0)
195                 res->id = ret;
196
197         spin_unlock(&dev_priv->resource_lock);
198         idr_preload_end();
199         return ret < 0 ? ret : 0;
200 }
201
202 /**
203  * vmw_resource_init - initialize a struct vmw_resource
204  *
205  * @dev_priv:       Pointer to a device private struct.
206  * @res:            The struct vmw_resource to initialize.
207  * @delay_id:       Boolean whether to defer device id allocation until
208  *                  the first validation.
209  * @res_free:       Resource destructor.
210  * @func:           Resource function table.
211  */
212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
213                       bool delay_id,
214                       void (*res_free) (struct vmw_resource *res),
215                       const struct vmw_res_func *func)
216 {
217         kref_init(&res->kref);
218         res->hw_destroy = NULL;
219         res->res_free = res_free;
220         res->dev_priv = dev_priv;
221         res->func = func;
222         RB_CLEAR_NODE(&res->mob_node);
223         INIT_LIST_HEAD(&res->lru_head);
224         INIT_LIST_HEAD(&res->binding_head);
225         res->id = -1;
226         res->backup = NULL;
227         res->backup_offset = 0;
228         res->backup_dirty = false;
229         res->res_dirty = false;
230         res->coherent = false;
231         res->used_prio = 3;
232         res->dirty = NULL;
233         if (delay_id)
234                 return 0;
235         else
236                 return vmw_resource_alloc_id(res);
237 }
238
239
240 /**
241  * vmw_user_resource_lookup_handle - lookup a struct resource from a
242  * TTM user-space handle and perform basic type checks
243  *
244  * @dev_priv:     Pointer to a device private struct
245  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
246  * @handle:       The TTM user-space handle
247  * @converter:    Pointer to an object describing the resource type
248  * @p_res:        On successful return the location pointed to will contain
249  *                a pointer to a refcounted struct vmw_resource.
250  *
251  * If the handle can't be found or is associated with an incorrect resource
252  * type, -EINVAL will be returned.
253  */
254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
255                                     struct ttm_object_file *tfile,
256                                     uint32_t handle,
257                                     const struct vmw_user_resource_conv
258                                     *converter,
259                                     struct vmw_resource **p_res)
260 {
261         struct ttm_base_object *base;
262         struct vmw_resource *res;
263         int ret = -EINVAL;
264
265         base = ttm_base_object_lookup(tfile, handle);
266         if (unlikely(base == NULL))
267                 return -EINVAL;
268
269         if (unlikely(ttm_base_object_type(base) != converter->object_type))
270                 goto out_bad_resource;
271
272         res = converter->base_obj_to_res(base);
273         kref_get(&res->kref);
274
275         *p_res = res;
276         ret = 0;
277
278 out_bad_resource:
279         ttm_base_object_unref(&base);
280
281         return ret;
282 }
283
284 /**
285  * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
286  * TTM user-space handle and perform basic type checks
287  *
288  * @dev_priv:     Pointer to a device private struct
289  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
290  * @handle:       The TTM user-space handle
291  * @converter:    Pointer to an object describing the resource type
292  *
293  * If the handle can't be found or is associated with an incorrect resource
294  * type, -EINVAL will be returned.
295  */
296 struct vmw_resource *
297 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
298                                       struct ttm_object_file *tfile,
299                                       uint32_t handle,
300                                       const struct vmw_user_resource_conv
301                                       *converter)
302 {
303         struct ttm_base_object *base;
304
305         base = ttm_base_object_noref_lookup(tfile, handle);
306         if (!base)
307                 return ERR_PTR(-ESRCH);
308
309         if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
310                 ttm_base_object_noref_release();
311                 return ERR_PTR(-EINVAL);
312         }
313
314         return converter->base_obj_to_res(base);
315 }
316
317 /*
318  * Helper function that looks either a surface or bo.
319  *
320  * The pointer this pointed at by out_surf and out_buf needs to be null.
321  */
322 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
323                            struct ttm_object_file *tfile,
324                            uint32_t handle,
325                            struct vmw_surface **out_surf,
326                            struct vmw_buffer_object **out_buf)
327 {
328         struct vmw_resource *res;
329         int ret;
330
331         BUG_ON(*out_surf || *out_buf);
332
333         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
334                                               user_surface_converter,
335                                               &res);
336         if (!ret) {
337                 *out_surf = vmw_res_to_srf(res);
338                 return 0;
339         }
340
341         *out_surf = NULL;
342         ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
343         return ret;
344 }
345
346 /**
347  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
348  *
349  * @res:            The resource for which to allocate a backup buffer.
350  * @interruptible:  Whether any sleeps during allocation should be
351  *                  performed while interruptible.
352  */
353 static int vmw_resource_buf_alloc(struct vmw_resource *res,
354                                   bool interruptible)
355 {
356         unsigned long size = PFN_ALIGN(res->backup_size);
357         struct vmw_buffer_object *backup;
358         int ret;
359
360         if (likely(res->backup)) {
361                 BUG_ON(res->backup->base.base.size < size);
362                 return 0;
363         }
364
365         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
366         if (unlikely(!backup))
367                 return -ENOMEM;
368
369         ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
370                               res->func->backup_placement,
371                               interruptible, false,
372                               &vmw_bo_bo_free);
373         if (unlikely(ret != 0))
374                 goto out_no_bo;
375
376         res->backup = backup;
377
378 out_no_bo:
379         return ret;
380 }
381
382 /**
383  * vmw_resource_do_validate - Make a resource up-to-date and visible
384  *                            to the device.
385  *
386  * @res:            The resource to make visible to the device.
387  * @val_buf:        Information about a buffer possibly
388  *                  containing backup data if a bind operation is needed.
389  * @dirtying:       Transfer dirty regions.
390  *
391  * On hardware resource shortage, this function returns -EBUSY and
392  * should be retried once resources have been freed up.
393  */
394 static int vmw_resource_do_validate(struct vmw_resource *res,
395                                     struct ttm_validate_buffer *val_buf,
396                                     bool dirtying)
397 {
398         int ret = 0;
399         const struct vmw_res_func *func = res->func;
400
401         if (unlikely(res->id == -1)) {
402                 ret = func->create(res);
403                 if (unlikely(ret != 0))
404                         return ret;
405         }
406
407         if (func->bind &&
408             ((func->needs_backup && !vmw_resource_mob_attached(res) &&
409               val_buf->bo != NULL) ||
410              (!func->needs_backup && val_buf->bo != NULL))) {
411                 ret = func->bind(res, val_buf);
412                 if (unlikely(ret != 0))
413                         goto out_bind_failed;
414                 if (func->needs_backup)
415                         vmw_resource_mob_attach(res);
416         }
417
418         /*
419          * Handle the case where the backup mob is marked coherent but
420          * the resource isn't.
421          */
422         if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
423             !res->coherent) {
424                 if (res->backup->dirty && !res->dirty) {
425                         ret = func->dirty_alloc(res);
426                         if (ret)
427                                 return ret;
428                 } else if (!res->backup->dirty && res->dirty) {
429                         func->dirty_free(res);
430                 }
431         }
432
433         /*
434          * Transfer the dirty regions to the resource and update
435          * the resource.
436          */
437         if (res->dirty) {
438                 if (dirtying && !res->res_dirty) {
439                         pgoff_t start = res->backup_offset >> PAGE_SHIFT;
440                         pgoff_t end = __KERNEL_DIV_ROUND_UP
441                                 (res->backup_offset + res->backup_size,
442                                  PAGE_SIZE);
443
444                         vmw_bo_dirty_unmap(res->backup, start, end);
445                 }
446
447                 vmw_bo_dirty_transfer_to_res(res);
448                 return func->dirty_sync(res);
449         }
450
451         return 0;
452
453 out_bind_failed:
454         func->destroy(res);
455
456         return ret;
457 }
458
459 /**
460  * vmw_resource_unreserve - Unreserve a resource previously reserved for
461  * command submission.
462  *
463  * @res:               Pointer to the struct vmw_resource to unreserve.
464  * @dirty_set:         Change dirty status of the resource.
465  * @dirty:             When changing dirty status indicates the new status.
466  * @switch_backup:     Backup buffer has been switched.
467  * @new_backup:        Pointer to new backup buffer if command submission
468  *                     switched. May be NULL.
469  * @new_backup_offset: New backup offset if @switch_backup is true.
470  *
471  * Currently unreserving a resource means putting it back on the device's
472  * resource lru list, so that it can be evicted if necessary.
473  */
474 void vmw_resource_unreserve(struct vmw_resource *res,
475                             bool dirty_set,
476                             bool dirty,
477                             bool switch_backup,
478                             struct vmw_buffer_object *new_backup,
479                             unsigned long new_backup_offset)
480 {
481         struct vmw_private *dev_priv = res->dev_priv;
482
483         if (!list_empty(&res->lru_head))
484                 return;
485
486         if (switch_backup && new_backup != res->backup) {
487                 if (res->backup) {
488                         vmw_resource_mob_detach(res);
489                         if (res->coherent)
490                                 vmw_bo_dirty_release(res->backup);
491                         vmw_bo_unreference(&res->backup);
492                 }
493
494                 if (new_backup) {
495                         res->backup = vmw_bo_reference(new_backup);
496
497                         /*
498                          * The validation code should already have added a
499                          * dirty tracker here.
500                          */
501                         WARN_ON(res->coherent && !new_backup->dirty);
502
503                         vmw_resource_mob_attach(res);
504                 } else {
505                         res->backup = NULL;
506                 }
507         } else if (switch_backup && res->coherent) {
508                 vmw_bo_dirty_release(res->backup);
509         }
510
511         if (switch_backup)
512                 res->backup_offset = new_backup_offset;
513
514         if (dirty_set)
515                 res->res_dirty = dirty;
516
517         if (!res->func->may_evict || res->id == -1 || res->pin_count)
518                 return;
519
520         spin_lock(&dev_priv->resource_lock);
521         list_add_tail(&res->lru_head,
522                       &res->dev_priv->res_lru[res->func->res_type]);
523         spin_unlock(&dev_priv->resource_lock);
524 }
525
526 /**
527  * vmw_resource_check_buffer - Check whether a backup buffer is needed
528  *                             for a resource and in that case, allocate
529  *                             one, reserve and validate it.
530  *
531  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
532  * @res:            The resource for which to allocate a backup buffer.
533  * @interruptible:  Whether any sleeps during allocation should be
534  *                  performed while interruptible.
535  * @val_buf:        On successful return contains data about the
536  *                  reserved and validated backup buffer.
537  */
538 static int
539 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
540                           struct vmw_resource *res,
541                           bool interruptible,
542                           struct ttm_validate_buffer *val_buf)
543 {
544         struct ttm_operation_ctx ctx = { true, false };
545         struct list_head val_list;
546         bool backup_dirty = false;
547         int ret;
548
549         if (unlikely(res->backup == NULL)) {
550                 ret = vmw_resource_buf_alloc(res, interruptible);
551                 if (unlikely(ret != 0))
552                         return ret;
553         }
554
555         INIT_LIST_HEAD(&val_list);
556         ttm_bo_get(&res->backup->base);
557         val_buf->bo = &res->backup->base;
558         val_buf->num_shared = 0;
559         list_add_tail(&val_buf->head, &val_list);
560         ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
561         if (unlikely(ret != 0))
562                 goto out_no_reserve;
563
564         if (res->func->needs_backup && !vmw_resource_mob_attached(res))
565                 return 0;
566
567         backup_dirty = res->backup_dirty;
568         ret = ttm_bo_validate(&res->backup->base,
569                               res->func->backup_placement,
570                               &ctx);
571
572         if (unlikely(ret != 0))
573                 goto out_no_validate;
574
575         return 0;
576
577 out_no_validate:
578         ttm_eu_backoff_reservation(ticket, &val_list);
579 out_no_reserve:
580         ttm_bo_put(val_buf->bo);
581         val_buf->bo = NULL;
582         if (backup_dirty)
583                 vmw_bo_unreference(&res->backup);
584
585         return ret;
586 }
587
588 /*
589  * vmw_resource_reserve - Reserve a resource for command submission
590  *
591  * @res:            The resource to reserve.
592  *
593  * This function takes the resource off the LRU list and make sure
594  * a backup buffer is present for guest-backed resources. However,
595  * the buffer may not be bound to the resource at this point.
596  *
597  */
598 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
599                          bool no_backup)
600 {
601         struct vmw_private *dev_priv = res->dev_priv;
602         int ret;
603
604         spin_lock(&dev_priv->resource_lock);
605         list_del_init(&res->lru_head);
606         spin_unlock(&dev_priv->resource_lock);
607
608         if (res->func->needs_backup && res->backup == NULL &&
609             !no_backup) {
610                 ret = vmw_resource_buf_alloc(res, interruptible);
611                 if (unlikely(ret != 0)) {
612                         DRM_ERROR("Failed to allocate a backup buffer "
613                                   "of size %lu. bytes\n",
614                                   (unsigned long) res->backup_size);
615                         return ret;
616                 }
617         }
618
619         return 0;
620 }
621
622 /**
623  * vmw_resource_backoff_reservation - Unreserve and unreference a
624  *                                    backup buffer
625  *.
626  * @ticket:         The ww acquire ctx used for reservation.
627  * @val_buf:        Backup buffer information.
628  */
629 static void
630 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
631                                  struct ttm_validate_buffer *val_buf)
632 {
633         struct list_head val_list;
634
635         if (likely(val_buf->bo == NULL))
636                 return;
637
638         INIT_LIST_HEAD(&val_list);
639         list_add_tail(&val_buf->head, &val_list);
640         ttm_eu_backoff_reservation(ticket, &val_list);
641         ttm_bo_put(val_buf->bo);
642         val_buf->bo = NULL;
643 }
644
645 /**
646  * vmw_resource_do_evict - Evict a resource, and transfer its data
647  *                         to a backup buffer.
648  *
649  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
650  * @res:            The resource to evict.
651  * @interruptible:  Whether to wait interruptible.
652  */
653 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
654                                  struct vmw_resource *res, bool interruptible)
655 {
656         struct ttm_validate_buffer val_buf;
657         const struct vmw_res_func *func = res->func;
658         int ret;
659
660         BUG_ON(!func->may_evict);
661
662         val_buf.bo = NULL;
663         val_buf.num_shared = 0;
664         ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
665         if (unlikely(ret != 0))
666                 return ret;
667
668         if (unlikely(func->unbind != NULL &&
669                      (!func->needs_backup || vmw_resource_mob_attached(res)))) {
670                 ret = func->unbind(res, res->res_dirty, &val_buf);
671                 if (unlikely(ret != 0))
672                         goto out_no_unbind;
673                 vmw_resource_mob_detach(res);
674         }
675         ret = func->destroy(res);
676         res->backup_dirty = true;
677         res->res_dirty = false;
678 out_no_unbind:
679         vmw_resource_backoff_reservation(ticket, &val_buf);
680
681         return ret;
682 }
683
684
685 /**
686  * vmw_resource_validate - Make a resource up-to-date and visible
687  *                         to the device.
688  * @res: The resource to make visible to the device.
689  * @intr: Perform waits interruptible if possible.
690  * @dirtying: Pending GPU operation will dirty the resource
691  *
692  * On succesful return, any backup DMA buffer pointed to by @res->backup will
693  * be reserved and validated.
694  * On hardware resource shortage, this function will repeatedly evict
695  * resources of the same type until the validation succeeds.
696  *
697  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
698  * on failure.
699  */
700 int vmw_resource_validate(struct vmw_resource *res, bool intr,
701                           bool dirtying)
702 {
703         int ret;
704         struct vmw_resource *evict_res;
705         struct vmw_private *dev_priv = res->dev_priv;
706         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
707         struct ttm_validate_buffer val_buf;
708         unsigned err_count = 0;
709
710         if (!res->func->create)
711                 return 0;
712
713         val_buf.bo = NULL;
714         val_buf.num_shared = 0;
715         if (res->backup)
716                 val_buf.bo = &res->backup->base;
717         do {
718                 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
719                 if (likely(ret != -EBUSY))
720                         break;
721
722                 spin_lock(&dev_priv->resource_lock);
723                 if (list_empty(lru_list) || !res->func->may_evict) {
724                         DRM_ERROR("Out of device device resources "
725                                   "for %s.\n", res->func->type_name);
726                         ret = -EBUSY;
727                         spin_unlock(&dev_priv->resource_lock);
728                         break;
729                 }
730
731                 evict_res = vmw_resource_reference
732                         (list_first_entry(lru_list, struct vmw_resource,
733                                           lru_head));
734                 list_del_init(&evict_res->lru_head);
735
736                 spin_unlock(&dev_priv->resource_lock);
737
738                 /* Trylock backup buffers with a NULL ticket. */
739                 ret = vmw_resource_do_evict(NULL, evict_res, intr);
740                 if (unlikely(ret != 0)) {
741                         spin_lock(&dev_priv->resource_lock);
742                         list_add_tail(&evict_res->lru_head, lru_list);
743                         spin_unlock(&dev_priv->resource_lock);
744                         if (ret == -ERESTARTSYS ||
745                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
746                                 vmw_resource_unreference(&evict_res);
747                                 goto out_no_validate;
748                         }
749                 }
750
751                 vmw_resource_unreference(&evict_res);
752         } while (1);
753
754         if (unlikely(ret != 0))
755                 goto out_no_validate;
756         else if (!res->func->needs_backup && res->backup) {
757                 WARN_ON_ONCE(vmw_resource_mob_attached(res));
758                 vmw_bo_unreference(&res->backup);
759         }
760
761         return 0;
762
763 out_no_validate:
764         return ret;
765 }
766
767
768 /**
769  * vmw_resource_unbind_list
770  *
771  * @vbo: Pointer to the current backing MOB.
772  *
773  * Evicts the Guest Backed hardware resource if the backup
774  * buffer is being moved out of MOB memory.
775  * Note that this function will not race with the resource
776  * validation code, since resource validation and eviction
777  * both require the backup buffer to be reserved.
778  */
779 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
780 {
781         struct ttm_validate_buffer val_buf = {
782                 .bo = &vbo->base,
783                 .num_shared = 0
784         };
785
786         dma_resv_assert_held(vbo->base.base.resv);
787         while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
788                 struct rb_node *node = vbo->res_tree.rb_node;
789                 struct vmw_resource *res =
790                         container_of(node, struct vmw_resource, mob_node);
791
792                 if (!WARN_ON_ONCE(!res->func->unbind))
793                         (void) res->func->unbind(res, res->res_dirty, &val_buf);
794
795                 res->backup_dirty = true;
796                 res->res_dirty = false;
797                 vmw_resource_mob_detach(res);
798         }
799
800         (void) ttm_bo_wait(&vbo->base, false, false);
801 }
802
803
804 /**
805  * vmw_query_readback_all - Read back cached query states
806  *
807  * @dx_query_mob: Buffer containing the DX query MOB
808  *
809  * Read back cached states from the device if they exist.  This function
810  * assumings binding_mutex is held.
811  */
812 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
813 {
814         struct vmw_resource *dx_query_ctx;
815         struct vmw_private *dev_priv;
816         struct {
817                 SVGA3dCmdHeader header;
818                 SVGA3dCmdDXReadbackAllQuery body;
819         } *cmd;
820
821
822         /* No query bound, so do nothing */
823         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
824                 return 0;
825
826         dx_query_ctx = dx_query_mob->dx_query_ctx;
827         dev_priv     = dx_query_ctx->dev_priv;
828
829         cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
830         if (unlikely(cmd == NULL))
831                 return -ENOMEM;
832
833         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
834         cmd->header.size = sizeof(cmd->body);
835         cmd->body.cid    = dx_query_ctx->id;
836
837         vmw_cmd_commit(dev_priv, sizeof(*cmd));
838
839         /* Triggers a rebind the next time affected context is bound */
840         dx_query_mob->dx_query_ctx = NULL;
841
842         return 0;
843 }
844
845
846
847 /**
848  * vmw_query_move_notify - Read back cached query states
849  *
850  * @bo: The TTM buffer object about to move.
851  * @old_mem: The memory region @bo is moving from.
852  * @new_mem: The memory region @bo is moving to.
853  *
854  * Called before the query MOB is swapped out to read back cached query
855  * states from the device.
856  */
857 void vmw_query_move_notify(struct ttm_buffer_object *bo,
858                            struct ttm_resource *old_mem,
859                            struct ttm_resource *new_mem)
860 {
861         struct vmw_buffer_object *dx_query_mob;
862         struct ttm_device *bdev = bo->bdev;
863         struct vmw_private *dev_priv;
864
865
866         dev_priv = container_of(bdev, struct vmw_private, bdev);
867
868         mutex_lock(&dev_priv->binding_mutex);
869
870         dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
871         if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
872                 mutex_unlock(&dev_priv->binding_mutex);
873                 return;
874         }
875
876         /* If BO is being moved from MOB to system memory */
877         if (new_mem->mem_type == TTM_PL_SYSTEM &&
878             old_mem->mem_type == VMW_PL_MOB) {
879                 struct vmw_fence_obj *fence;
880
881                 (void) vmw_query_readback_all(dx_query_mob);
882                 mutex_unlock(&dev_priv->binding_mutex);
883
884                 /* Create a fence and attach the BO to it */
885                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
886                 vmw_bo_fence_single(bo, fence);
887
888                 if (fence != NULL)
889                         vmw_fence_obj_unreference(&fence);
890
891                 (void) ttm_bo_wait(bo, false, false);
892         } else
893                 mutex_unlock(&dev_priv->binding_mutex);
894
895 }
896
897 /**
898  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
899  *
900  * @res:            The resource being queried.
901  */
902 bool vmw_resource_needs_backup(const struct vmw_resource *res)
903 {
904         return res->func->needs_backup;
905 }
906
907 /**
908  * vmw_resource_evict_type - Evict all resources of a specific type
909  *
910  * @dev_priv:       Pointer to a device private struct
911  * @type:           The resource type to evict
912  *
913  * To avoid thrashing starvation or as part of the hibernation sequence,
914  * try to evict all evictable resources of a specific type.
915  */
916 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
917                                     enum vmw_res_type type)
918 {
919         struct list_head *lru_list = &dev_priv->res_lru[type];
920         struct vmw_resource *evict_res;
921         unsigned err_count = 0;
922         int ret;
923         struct ww_acquire_ctx ticket;
924
925         do {
926                 spin_lock(&dev_priv->resource_lock);
927
928                 if (list_empty(lru_list))
929                         goto out_unlock;
930
931                 evict_res = vmw_resource_reference(
932                         list_first_entry(lru_list, struct vmw_resource,
933                                          lru_head));
934                 list_del_init(&evict_res->lru_head);
935                 spin_unlock(&dev_priv->resource_lock);
936
937                 /* Wait lock backup buffers with a ticket. */
938                 ret = vmw_resource_do_evict(&ticket, evict_res, false);
939                 if (unlikely(ret != 0)) {
940                         spin_lock(&dev_priv->resource_lock);
941                         list_add_tail(&evict_res->lru_head, lru_list);
942                         spin_unlock(&dev_priv->resource_lock);
943                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
944                                 vmw_resource_unreference(&evict_res);
945                                 return;
946                         }
947                 }
948
949                 vmw_resource_unreference(&evict_res);
950         } while (1);
951
952 out_unlock:
953         spin_unlock(&dev_priv->resource_lock);
954 }
955
956 /**
957  * vmw_resource_evict_all - Evict all evictable resources
958  *
959  * @dev_priv:       Pointer to a device private struct
960  *
961  * To avoid thrashing starvation or as part of the hibernation sequence,
962  * evict all evictable resources. In particular this means that all
963  * guest-backed resources that are registered with the device are
964  * evicted and the OTable becomes clean.
965  */
966 void vmw_resource_evict_all(struct vmw_private *dev_priv)
967 {
968         enum vmw_res_type type;
969
970         mutex_lock(&dev_priv->cmdbuf_mutex);
971
972         for (type = 0; type < vmw_res_max; ++type)
973                 vmw_resource_evict_type(dev_priv, type);
974
975         mutex_unlock(&dev_priv->cmdbuf_mutex);
976 }
977
978 /*
979  * vmw_resource_pin - Add a pin reference on a resource
980  *
981  * @res: The resource to add a pin reference on
982  *
983  * This function adds a pin reference, and if needed validates the resource.
984  * Having a pin reference means that the resource can never be evicted, and
985  * its id will never change as long as there is a pin reference.
986  * This function returns 0 on success and a negative error code on failure.
987  */
988 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
989 {
990         struct ttm_operation_ctx ctx = { interruptible, false };
991         struct vmw_private *dev_priv = res->dev_priv;
992         int ret;
993
994         mutex_lock(&dev_priv->cmdbuf_mutex);
995         ret = vmw_resource_reserve(res, interruptible, false);
996         if (ret)
997                 goto out_no_reserve;
998
999         if (res->pin_count == 0) {
1000                 struct vmw_buffer_object *vbo = NULL;
1001
1002                 if (res->backup) {
1003                         vbo = res->backup;
1004
1005                         ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1006                         if (ret)
1007                                 goto out_no_validate;
1008                         if (!vbo->base.pin_count) {
1009                                 ret = ttm_bo_validate
1010                                         (&vbo->base,
1011                                          res->func->backup_placement,
1012                                          &ctx);
1013                                 if (ret) {
1014                                         ttm_bo_unreserve(&vbo->base);
1015                                         goto out_no_validate;
1016                                 }
1017                         }
1018
1019                         /* Do we really need to pin the MOB as well? */
1020                         vmw_bo_pin_reserved(vbo, true);
1021                 }
1022                 ret = vmw_resource_validate(res, interruptible, true);
1023                 if (vbo)
1024                         ttm_bo_unreserve(&vbo->base);
1025                 if (ret)
1026                         goto out_no_validate;
1027         }
1028         res->pin_count++;
1029
1030 out_no_validate:
1031         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1032 out_no_reserve:
1033         mutex_unlock(&dev_priv->cmdbuf_mutex);
1034
1035         return ret;
1036 }
1037
1038 /**
1039  * vmw_resource_unpin - Remove a pin reference from a resource
1040  *
1041  * @res: The resource to remove a pin reference from
1042  *
1043  * Having a pin reference means that the resource can never be evicted, and
1044  * its id will never change as long as there is a pin reference.
1045  */
1046 void vmw_resource_unpin(struct vmw_resource *res)
1047 {
1048         struct vmw_private *dev_priv = res->dev_priv;
1049         int ret;
1050
1051         mutex_lock(&dev_priv->cmdbuf_mutex);
1052
1053         ret = vmw_resource_reserve(res, false, true);
1054         WARN_ON(ret);
1055
1056         WARN_ON(res->pin_count == 0);
1057         if (--res->pin_count == 0 && res->backup) {
1058                 struct vmw_buffer_object *vbo = res->backup;
1059
1060                 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1061                 vmw_bo_pin_reserved(vbo, false);
1062                 ttm_bo_unreserve(&vbo->base);
1063         }
1064
1065         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1066
1067         mutex_unlock(&dev_priv->cmdbuf_mutex);
1068 }
1069
1070 /**
1071  * vmw_res_type - Return the resource type
1072  *
1073  * @res: Pointer to the resource
1074  */
1075 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1076 {
1077         return res->func->res_type;
1078 }
1079
1080 /**
1081  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1082  * sequential range of touched backing store memory.
1083  * @res: The resource.
1084  * @start: The first page touched.
1085  * @end: The last page touched + 1.
1086  */
1087 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1088                                pgoff_t end)
1089 {
1090         if (res->dirty)
1091                 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1092                                            end << PAGE_SHIFT);
1093 }
1094
1095 /**
1096  * vmw_resources_clean - Clean resources intersecting a mob range
1097  * @vbo: The mob buffer object
1098  * @start: The mob page offset starting the range
1099  * @end: The mob page offset ending the range
1100  * @num_prefault: Returns how many pages including the first have been
1101  * cleaned and are ok to prefault
1102  */
1103 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1104                         pgoff_t end, pgoff_t *num_prefault)
1105 {
1106         struct rb_node *cur = vbo->res_tree.rb_node;
1107         struct vmw_resource *found = NULL;
1108         unsigned long res_start = start << PAGE_SHIFT;
1109         unsigned long res_end = end << PAGE_SHIFT;
1110         unsigned long last_cleaned = 0;
1111
1112         /*
1113          * Find the resource with lowest backup_offset that intersects the
1114          * range.
1115          */
1116         while (cur) {
1117                 struct vmw_resource *cur_res =
1118                         container_of(cur, struct vmw_resource, mob_node);
1119
1120                 if (cur_res->backup_offset >= res_end) {
1121                         cur = cur->rb_left;
1122                 } else if (cur_res->backup_offset + cur_res->backup_size <=
1123                            res_start) {
1124                         cur = cur->rb_right;
1125                 } else {
1126                         found = cur_res;
1127                         cur = cur->rb_left;
1128                         /* Continue to look for resources with lower offsets */
1129                 }
1130         }
1131
1132         /*
1133          * In order of increasing backup_offset, clean dirty resorces
1134          * intersecting the range.
1135          */
1136         while (found) {
1137                 if (found->res_dirty) {
1138                         int ret;
1139
1140                         if (!found->func->clean)
1141                                 return -EINVAL;
1142
1143                         ret = found->func->clean(found);
1144                         if (ret)
1145                                 return ret;
1146
1147                         found->res_dirty = false;
1148                 }
1149                 last_cleaned = found->backup_offset + found->backup_size;
1150                 cur = rb_next(&found->mob_node);
1151                 if (!cur)
1152                         break;
1153
1154                 found = container_of(cur, struct vmw_resource, mob_node);
1155                 if (found->backup_offset >= res_end)
1156                         break;
1157         }
1158
1159         /*
1160          * Set number of pages allowed prefaulting and fence the buffer object
1161          */
1162         *num_prefault = 1;
1163         if (last_cleaned > res_start) {
1164                 struct ttm_buffer_object *bo = &vbo->base;
1165
1166                 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1167                                                       PAGE_SIZE);
1168                 vmw_bo_fence_single(bo, NULL);
1169                 if (bo->moving)
1170                         dma_fence_put(bo->moving);
1171                 bo->moving = dma_fence_get
1172                         (dma_resv_excl_fence(bo->base.resv));
1173         }
1174
1175         return 0;
1176 }
This page took 0.101561 seconds and 4 git commands to generate.