2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/interval_tree.h>
41 /* constant after initialisation */
42 struct amdgpu_device *adev;
44 struct mmu_notifier mn;
46 /* only used on destruction */
47 struct work_struct work;
49 /* protected by adev->mn_lock */
50 struct hlist_node node;
52 /* objects protected by lock */
54 struct rb_root_cached objects;
57 struct amdgpu_mn_node {
58 struct interval_tree_node it;
63 * amdgpu_mn_destroy - destroy the rmn
65 * @work: previously sheduled work item
67 * Lazy destroys the notifier from a work item
69 static void amdgpu_mn_destroy(struct work_struct *work)
71 struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
72 struct amdgpu_device *adev = rmn->adev;
73 struct amdgpu_mn_node *node, *next_node;
74 struct amdgpu_bo *bo, *next_bo;
76 mutex_lock(&adev->mn_lock);
77 mutex_lock(&rmn->lock);
79 rbtree_postorder_for_each_entry_safe(node, next_node,
80 &rmn->objects.rb_root, it.rb) {
81 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
83 list_del_init(&bo->mn_list);
87 mutex_unlock(&rmn->lock);
88 mutex_unlock(&adev->mn_lock);
89 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
94 * amdgpu_mn_release - callback to notify about mm destruction
97 * @mn: the mm this callback is about
99 * Shedule a work item to lazy destroy our notifier.
101 static void amdgpu_mn_release(struct mmu_notifier *mn,
102 struct mm_struct *mm)
104 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
105 INIT_WORK(&rmn->work, amdgpu_mn_destroy);
106 schedule_work(&rmn->work);
110 * amdgpu_mn_invalidate_node - unmap all BOs of a node
112 * @node: the node with the BOs to unmap
114 * We block for all BOs and unmap them by move them
115 * into system domain again.
117 static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
121 struct amdgpu_bo *bo;
124 list_for_each_entry(bo, &node->bos, mn_list) {
126 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
129 r = amdgpu_bo_reserve(bo, true);
131 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
135 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
136 true, false, MAX_SCHEDULE_TIMEOUT);
138 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
140 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
141 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
143 DRM_ERROR("(%ld) failed to validate user bo\n", r);
145 amdgpu_bo_unreserve(bo);
150 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
153 * @mn: the mm this callback is about
154 * @start: start of updated range
155 * @end: end of updated range
157 * We block for all BOs between start and end to be idle and
158 * unmap them by move them into system domain again.
160 static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
161 struct mm_struct *mm,
165 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
166 struct interval_tree_node *it;
168 /* notification is exclusive, but interval is inclusive */
171 mutex_lock(&rmn->lock);
173 it = interval_tree_iter_first(&rmn->objects, start, end);
175 struct amdgpu_mn_node *node;
177 node = container_of(it, struct amdgpu_mn_node, it);
178 it = interval_tree_iter_next(it, start, end);
180 amdgpu_mn_invalidate_node(node, start, end);
183 mutex_unlock(&rmn->lock);
186 static const struct mmu_notifier_ops amdgpu_mn_ops = {
187 .release = amdgpu_mn_release,
188 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
192 * amdgpu_mn_get - create notifier context
194 * @adev: amdgpu device pointer
196 * Creates a notifier context for current->mm.
198 static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
200 struct mm_struct *mm = current->mm;
201 struct amdgpu_mn *rmn;
204 mutex_lock(&adev->mn_lock);
205 if (down_write_killable(&mm->mmap_sem)) {
206 mutex_unlock(&adev->mn_lock);
207 return ERR_PTR(-EINTR);
210 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
214 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
216 rmn = ERR_PTR(-ENOMEM);
222 rmn->mn.ops = &amdgpu_mn_ops;
223 mutex_init(&rmn->lock);
224 rmn->objects = RB_ROOT_CACHED;
226 r = __mmu_notifier_register(&rmn->mn, mm);
230 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
233 up_write(&mm->mmap_sem);
234 mutex_unlock(&adev->mn_lock);
239 up_write(&mm->mmap_sem);
240 mutex_unlock(&adev->mn_lock);
247 * amdgpu_mn_register - register a BO for notifier updates
249 * @bo: amdgpu buffer object
250 * @addr: userptr addr we should monitor
252 * Registers an MMU notifier for the given BO at the specified address.
253 * Returns 0 on success, -ERRNO if anything goes wrong.
255 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
257 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
258 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
259 struct amdgpu_mn *rmn;
260 struct amdgpu_mn_node *node = NULL;
261 struct list_head bos;
262 struct interval_tree_node *it;
264 rmn = amdgpu_mn_get(adev);
268 INIT_LIST_HEAD(&bos);
270 mutex_lock(&rmn->lock);
272 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
274 node = container_of(it, struct amdgpu_mn_node, it);
275 interval_tree_remove(&node->it, &rmn->objects);
276 addr = min(it->start, addr);
277 end = max(it->last, end);
278 list_splice(&node->bos, &bos);
282 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
284 mutex_unlock(&rmn->lock);
291 node->it.start = addr;
293 INIT_LIST_HEAD(&node->bos);
294 list_splice(&bos, &node->bos);
295 list_add(&bo->mn_list, &node->bos);
297 interval_tree_insert(&node->it, &rmn->objects);
299 mutex_unlock(&rmn->lock);
305 * amdgpu_mn_unregister - unregister a BO for notifier updates
307 * @bo: amdgpu buffer object
309 * Remove any registration of MMU notifier updates from the buffer object.
311 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
313 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
314 struct amdgpu_mn *rmn;
315 struct list_head *head;
317 mutex_lock(&adev->mn_lock);
321 mutex_unlock(&adev->mn_lock);
325 mutex_lock(&rmn->lock);
327 /* save the next list entry for later */
328 head = bo->mn_list.next;
331 list_del_init(&bo->mn_list);
333 if (list_empty(head)) {
334 struct amdgpu_mn_node *node;
335 node = container_of(head, struct amdgpu_mn_node, bos);
336 interval_tree_remove(&node->it, &rmn->objects);
340 mutex_unlock(&rmn->lock);
341 mutex_unlock(&adev->mn_lock);