]>
Commit | Line | Data |
---|---|---|
e3c495c7 JR |
1 | /* |
2 | * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. | |
63ce3ae8 | 3 | * Author: Joerg Roedel <[email protected]> |
e3c495c7 JR |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
8736b2c3 | 19 | #include <linux/mmu_notifier.h> |
ed96f228 JR |
20 | #include <linux/amd-iommu.h> |
21 | #include <linux/mm_types.h> | |
8736b2c3 | 22 | #include <linux/profile.h> |
e3c495c7 | 23 | #include <linux/module.h> |
2d5503b6 | 24 | #include <linux/sched.h> |
6e84f315 | 25 | #include <linux/sched/mm.h> |
ed96f228 | 26 | #include <linux/iommu.h> |
028eeacc | 27 | #include <linux/wait.h> |
ed96f228 JR |
28 | #include <linux/pci.h> |
29 | #include <linux/gfp.h> | |
30 | ||
028eeacc | 31 | #include "amd_iommu_types.h" |
ed96f228 | 32 | #include "amd_iommu_proto.h" |
e3c495c7 JR |
33 | |
34 | MODULE_LICENSE("GPL v2"); | |
63ce3ae8 | 35 | MODULE_AUTHOR("Joerg Roedel <[email protected]>"); |
e3c495c7 | 36 | |
ed96f228 JR |
37 | #define MAX_DEVICES 0x10000 |
38 | #define PRI_QUEUE_SIZE 512 | |
39 | ||
40 | struct pri_queue { | |
41 | atomic_t inflight; | |
42 | bool finish; | |
028eeacc | 43 | int status; |
ed96f228 JR |
44 | }; |
45 | ||
46 | struct pasid_state { | |
47 | struct list_head list; /* For global state-list */ | |
48 | atomic_t count; /* Reference count */ | |
d73a6d72 | 49 | unsigned mmu_notifier_count; /* Counting nested mmu_notifier |
e79df31c | 50 | calls */ |
ed96f228 | 51 | struct mm_struct *mm; /* mm_struct for the faults */ |
ff6d0cce | 52 | struct mmu_notifier mn; /* mmu_notifier handle */ |
ed96f228 JR |
53 | struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ |
54 | struct device_state *device_state; /* Link to our device_state */ | |
55 | int pasid; /* PASID index */ | |
d9e1611e JR |
56 | bool invalid; /* Used during setup and |
57 | teardown of the pasid */ | |
d73a6d72 JR |
58 | spinlock_t lock; /* Protect pri_queues and |
59 | mmu_notifer_count */ | |
028eeacc | 60 | wait_queue_head_t wq; /* To wait for count == 0 */ |
ed96f228 JR |
61 | }; |
62 | ||
63 | struct device_state { | |
741669c7 JR |
64 | struct list_head list; |
65 | u16 devid; | |
ed96f228 JR |
66 | atomic_t count; |
67 | struct pci_dev *pdev; | |
68 | struct pasid_state **states; | |
69 | struct iommu_domain *domain; | |
70 | int pasid_levels; | |
71 | int max_pasids; | |
175d6146 | 72 | amd_iommu_invalid_ppr_cb inv_ppr_cb; |
bc21662f | 73 | amd_iommu_invalidate_ctx inv_ctx_cb; |
ed96f228 | 74 | spinlock_t lock; |
028eeacc JR |
75 | wait_queue_head_t wq; |
76 | }; | |
77 | ||
78 | struct fault { | |
79 | struct work_struct work; | |
80 | struct device_state *dev_state; | |
81 | struct pasid_state *state; | |
82 | struct mm_struct *mm; | |
83 | u64 address; | |
84 | u16 devid; | |
85 | u16 pasid; | |
86 | u16 tag; | |
87 | u16 finish; | |
88 | u16 flags; | |
ed96f228 JR |
89 | }; |
90 | ||
741669c7 | 91 | static LIST_HEAD(state_list); |
ed96f228 JR |
92 | static spinlock_t state_lock; |
93 | ||
028eeacc JR |
94 | static struct workqueue_struct *iommu_wq; |
95 | ||
2d5503b6 | 96 | static void free_pasid_states(struct device_state *dev_state); |
ed96f228 JR |
97 | |
98 | static u16 device_id(struct pci_dev *pdev) | |
99 | { | |
100 | u16 devid; | |
101 | ||
102 | devid = pdev->bus->number; | |
103 | devid = (devid << 8) | pdev->devfn; | |
104 | ||
105 | return devid; | |
106 | } | |
107 | ||
b87d2d7c JR |
108 | static struct device_state *__get_device_state(u16 devid) |
109 | { | |
741669c7 JR |
110 | struct device_state *dev_state; |
111 | ||
112 | list_for_each_entry(dev_state, &state_list, list) { | |
113 | if (dev_state->devid == devid) | |
114 | return dev_state; | |
115 | } | |
116 | ||
117 | return NULL; | |
b87d2d7c JR |
118 | } |
119 | ||
ed96f228 JR |
120 | static struct device_state *get_device_state(u16 devid) |
121 | { | |
122 | struct device_state *dev_state; | |
123 | unsigned long flags; | |
124 | ||
125 | spin_lock_irqsave(&state_lock, flags); | |
b87d2d7c | 126 | dev_state = __get_device_state(devid); |
ed96f228 JR |
127 | if (dev_state != NULL) |
128 | atomic_inc(&dev_state->count); | |
129 | spin_unlock_irqrestore(&state_lock, flags); | |
130 | ||
131 | return dev_state; | |
132 | } | |
133 | ||
134 | static void free_device_state(struct device_state *dev_state) | |
135 | { | |
55c99a4d JR |
136 | struct iommu_group *group; |
137 | ||
2d5503b6 JR |
138 | /* |
139 | * First detach device from domain - No more PRI requests will arrive | |
140 | * from that device after it is unbound from the IOMMUv2 domain. | |
141 | */ | |
55c99a4d JR |
142 | group = iommu_group_get(&dev_state->pdev->dev); |
143 | if (WARN_ON(!group)) | |
144 | return; | |
145 | ||
146 | iommu_detach_group(dev_state->domain, group); | |
147 | ||
148 | iommu_group_put(group); | |
2d5503b6 JR |
149 | |
150 | /* Everything is down now, free the IOMMUv2 domain */ | |
ed96f228 | 151 | iommu_domain_free(dev_state->domain); |
2d5503b6 JR |
152 | |
153 | /* Finally get rid of the device-state */ | |
ed96f228 JR |
154 | kfree(dev_state); |
155 | } | |
156 | ||
157 | static void put_device_state(struct device_state *dev_state) | |
158 | { | |
159 | if (atomic_dec_and_test(&dev_state->count)) | |
028eeacc | 160 | wake_up(&dev_state->wq); |
ed96f228 JR |
161 | } |
162 | ||
2d5503b6 JR |
163 | /* Must be called under dev_state->lock */ |
164 | static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, | |
165 | int pasid, bool alloc) | |
166 | { | |
167 | struct pasid_state **root, **ptr; | |
168 | int level, index; | |
169 | ||
170 | level = dev_state->pasid_levels; | |
171 | root = dev_state->states; | |
172 | ||
173 | while (true) { | |
174 | ||
175 | index = (pasid >> (9 * level)) & 0x1ff; | |
176 | ptr = &root[index]; | |
177 | ||
178 | if (level == 0) | |
179 | break; | |
180 | ||
181 | if (*ptr == NULL) { | |
182 | if (!alloc) | |
183 | return NULL; | |
184 | ||
185 | *ptr = (void *)get_zeroed_page(GFP_ATOMIC); | |
186 | if (*ptr == NULL) | |
187 | return NULL; | |
188 | } | |
189 | ||
190 | root = (struct pasid_state **)*ptr; | |
191 | level -= 1; | |
192 | } | |
193 | ||
194 | return ptr; | |
195 | } | |
196 | ||
197 | static int set_pasid_state(struct device_state *dev_state, | |
198 | struct pasid_state *pasid_state, | |
199 | int pasid) | |
200 | { | |
201 | struct pasid_state **ptr; | |
202 | unsigned long flags; | |
203 | int ret; | |
204 | ||
205 | spin_lock_irqsave(&dev_state->lock, flags); | |
206 | ptr = __get_pasid_state_ptr(dev_state, pasid, true); | |
207 | ||
208 | ret = -ENOMEM; | |
209 | if (ptr == NULL) | |
210 | goto out_unlock; | |
211 | ||
212 | ret = -ENOMEM; | |
213 | if (*ptr != NULL) | |
214 | goto out_unlock; | |
215 | ||
216 | *ptr = pasid_state; | |
217 | ||
218 | ret = 0; | |
219 | ||
220 | out_unlock: | |
221 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
222 | ||
223 | return ret; | |
224 | } | |
225 | ||
226 | static void clear_pasid_state(struct device_state *dev_state, int pasid) | |
227 | { | |
228 | struct pasid_state **ptr; | |
229 | unsigned long flags; | |
230 | ||
231 | spin_lock_irqsave(&dev_state->lock, flags); | |
232 | ptr = __get_pasid_state_ptr(dev_state, pasid, true); | |
233 | ||
234 | if (ptr == NULL) | |
235 | goto out_unlock; | |
236 | ||
237 | *ptr = NULL; | |
238 | ||
239 | out_unlock: | |
240 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
241 | } | |
242 | ||
243 | static struct pasid_state *get_pasid_state(struct device_state *dev_state, | |
244 | int pasid) | |
245 | { | |
246 | struct pasid_state **ptr, *ret = NULL; | |
247 | unsigned long flags; | |
248 | ||
249 | spin_lock_irqsave(&dev_state->lock, flags); | |
250 | ptr = __get_pasid_state_ptr(dev_state, pasid, false); | |
251 | ||
252 | if (ptr == NULL) | |
253 | goto out_unlock; | |
254 | ||
255 | ret = *ptr; | |
256 | if (ret) | |
257 | atomic_inc(&ret->count); | |
258 | ||
259 | out_unlock: | |
260 | spin_unlock_irqrestore(&dev_state->lock, flags); | |
261 | ||
262 | return ret; | |
263 | } | |
264 | ||
265 | static void free_pasid_state(struct pasid_state *pasid_state) | |
266 | { | |
267 | kfree(pasid_state); | |
268 | } | |
269 | ||
270 | static void put_pasid_state(struct pasid_state *pasid_state) | |
271 | { | |
1c51099a | 272 | if (atomic_dec_and_test(&pasid_state->count)) |
028eeacc | 273 | wake_up(&pasid_state->wq); |
2d5503b6 JR |
274 | } |
275 | ||
028eeacc JR |
276 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
277 | { | |
1bf1b431 | 278 | atomic_dec(&pasid_state->count); |
a1bec062 | 279 | wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); |
028eeacc JR |
280 | free_pasid_state(pasid_state); |
281 | } | |
282 | ||
61feb438 | 283 | static void unbind_pasid(struct pasid_state *pasid_state) |
8736b2c3 JR |
284 | { |
285 | struct iommu_domain *domain; | |
286 | ||
287 | domain = pasid_state->device_state->domain; | |
288 | ||
53d340ef JR |
289 | /* |
290 | * Mark pasid_state as invalid, no more faults will we added to the | |
291 | * work queue after this is visible everywhere. | |
292 | */ | |
293 | pasid_state->invalid = true; | |
294 | ||
295 | /* Make sure this is visible */ | |
296 | smp_wmb(); | |
297 | ||
298 | /* After this the device/pasid can't access the mm anymore */ | |
8736b2c3 | 299 | amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); |
8736b2c3 JR |
300 | |
301 | /* Make sure no more pending faults are in the queue */ | |
302 | flush_workqueue(iommu_wq); | |
8736b2c3 JR |
303 | } |
304 | ||
2d5503b6 JR |
305 | static void free_pasid_states_level1(struct pasid_state **tbl) |
306 | { | |
307 | int i; | |
308 | ||
309 | for (i = 0; i < 512; ++i) { | |
310 | if (tbl[i] == NULL) | |
311 | continue; | |
312 | ||
313 | free_page((unsigned long)tbl[i]); | |
314 | } | |
315 | } | |
316 | ||
317 | static void free_pasid_states_level2(struct pasid_state **tbl) | |
318 | { | |
319 | struct pasid_state **ptr; | |
320 | int i; | |
321 | ||
322 | for (i = 0; i < 512; ++i) { | |
323 | if (tbl[i] == NULL) | |
324 | continue; | |
325 | ||
326 | ptr = (struct pasid_state **)tbl[i]; | |
327 | free_pasid_states_level1(ptr); | |
328 | } | |
329 | } | |
330 | ||
331 | static void free_pasid_states(struct device_state *dev_state) | |
332 | { | |
333 | struct pasid_state *pasid_state; | |
334 | int i; | |
335 | ||
336 | for (i = 0; i < dev_state->max_pasids; ++i) { | |
337 | pasid_state = get_pasid_state(dev_state, i); | |
338 | if (pasid_state == NULL) | |
339 | continue; | |
340 | ||
2d5503b6 | 341 | put_pasid_state(pasid_state); |
a40d4c67 JR |
342 | |
343 | /* | |
344 | * This will call the mn_release function and | |
345 | * unbind the PASID | |
346 | */ | |
347 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); | |
c5db16ad JR |
348 | |
349 | put_pasid_state_wait(pasid_state); /* Reference taken in | |
daff2f9c | 350 | amd_iommu_bind_pasid */ |
75058a30 JR |
351 | |
352 | /* Drop reference taken in amd_iommu_bind_pasid */ | |
353 | put_device_state(dev_state); | |
2d5503b6 JR |
354 | } |
355 | ||
356 | if (dev_state->pasid_levels == 2) | |
357 | free_pasid_states_level2(dev_state->states); | |
358 | else if (dev_state->pasid_levels == 1) | |
359 | free_pasid_states_level1(dev_state->states); | |
23d3a98c JR |
360 | else |
361 | BUG_ON(dev_state->pasid_levels != 0); | |
2d5503b6 JR |
362 | |
363 | free_page((unsigned long)dev_state->states); | |
364 | } | |
365 | ||
8736b2c3 JR |
366 | static struct pasid_state *mn_to_state(struct mmu_notifier *mn) |
367 | { | |
368 | return container_of(mn, struct pasid_state, mn); | |
369 | } | |
370 | ||
371 | static void __mn_flush_page(struct mmu_notifier *mn, | |
372 | unsigned long address) | |
373 | { | |
374 | struct pasid_state *pasid_state; | |
375 | struct device_state *dev_state; | |
376 | ||
377 | pasid_state = mn_to_state(mn); | |
378 | dev_state = pasid_state->device_state; | |
379 | ||
380 | amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); | |
381 | } | |
382 | ||
383 | static int mn_clear_flush_young(struct mmu_notifier *mn, | |
384 | struct mm_struct *mm, | |
57128468 ALC |
385 | unsigned long start, |
386 | unsigned long end) | |
8736b2c3 | 387 | { |
57128468 ALC |
388 | for (; start < end; start += PAGE_SIZE) |
389 | __mn_flush_page(mn, start); | |
8736b2c3 JR |
390 | |
391 | return 0; | |
392 | } | |
393 | ||
e7cc3dd4 JR |
394 | static void mn_invalidate_range(struct mmu_notifier *mn, |
395 | struct mm_struct *mm, | |
396 | unsigned long start, unsigned long end) | |
8736b2c3 JR |
397 | { |
398 | struct pasid_state *pasid_state; | |
399 | struct device_state *dev_state; | |
400 | ||
401 | pasid_state = mn_to_state(mn); | |
402 | dev_state = pasid_state->device_state; | |
403 | ||
e7cc3dd4 JR |
404 | if ((start ^ (end - 1)) < PAGE_SIZE) |
405 | amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, | |
406 | start); | |
407 | else | |
408 | amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); | |
8736b2c3 JR |
409 | } |
410 | ||
a40d4c67 JR |
411 | static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) |
412 | { | |
413 | struct pasid_state *pasid_state; | |
414 | struct device_state *dev_state; | |
d9e1611e | 415 | bool run_inv_ctx_cb; |
a40d4c67 JR |
416 | |
417 | might_sleep(); | |
418 | ||
d9e1611e JR |
419 | pasid_state = mn_to_state(mn); |
420 | dev_state = pasid_state->device_state; | |
421 | run_inv_ctx_cb = !pasid_state->invalid; | |
a40d4c67 | 422 | |
940f700d | 423 | if (run_inv_ctx_cb && dev_state->inv_ctx_cb) |
a40d4c67 JR |
424 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); |
425 | ||
61feb438 | 426 | unbind_pasid(pasid_state); |
a40d4c67 JR |
427 | } |
428 | ||
759ce23b | 429 | static const struct mmu_notifier_ops iommu_mn = { |
a40d4c67 | 430 | .release = mn_release, |
8736b2c3 | 431 | .clear_flush_young = mn_clear_flush_young, |
e7cc3dd4 | 432 | .invalidate_range = mn_invalidate_range, |
8736b2c3 JR |
433 | }; |
434 | ||
028eeacc JR |
435 | static void set_pri_tag_status(struct pasid_state *pasid_state, |
436 | u16 tag, int status) | |
437 | { | |
438 | unsigned long flags; | |
439 | ||
440 | spin_lock_irqsave(&pasid_state->lock, flags); | |
441 | pasid_state->pri[tag].status = status; | |
442 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
443 | } | |
444 | ||
445 | static void finish_pri_tag(struct device_state *dev_state, | |
446 | struct pasid_state *pasid_state, | |
447 | u16 tag) | |
448 | { | |
449 | unsigned long flags; | |
450 | ||
451 | spin_lock_irqsave(&pasid_state->lock, flags); | |
452 | if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) && | |
453 | pasid_state->pri[tag].finish) { | |
454 | amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid, | |
455 | pasid_state->pri[tag].status, tag); | |
456 | pasid_state->pri[tag].finish = false; | |
457 | pasid_state->pri[tag].status = PPR_SUCCESS; | |
458 | } | |
459 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
460 | } | |
461 | ||
9dc00f4c JB |
462 | static void handle_fault_error(struct fault *fault) |
463 | { | |
464 | int status; | |
465 | ||
466 | if (!fault->dev_state->inv_ppr_cb) { | |
467 | set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); | |
468 | return; | |
469 | } | |
470 | ||
471 | status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, | |
472 | fault->pasid, | |
473 | fault->address, | |
474 | fault->flags); | |
475 | switch (status) { | |
476 | case AMD_IOMMU_INV_PRI_RSP_SUCCESS: | |
477 | set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); | |
478 | break; | |
479 | case AMD_IOMMU_INV_PRI_RSP_INVALID: | |
480 | set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); | |
481 | break; | |
482 | case AMD_IOMMU_INV_PRI_RSP_FAIL: | |
483 | set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); | |
484 | break; | |
485 | default: | |
486 | BUG(); | |
487 | } | |
488 | } | |
489 | ||
7b5cc1a9 JR |
490 | static bool access_error(struct vm_area_struct *vma, struct fault *fault) |
491 | { | |
492 | unsigned long requested = 0; | |
493 | ||
494 | if (fault->flags & PPR_FAULT_EXEC) | |
495 | requested |= VM_EXEC; | |
496 | ||
497 | if (fault->flags & PPR_FAULT_READ) | |
498 | requested |= VM_READ; | |
499 | ||
500 | if (fault->flags & PPR_FAULT_WRITE) | |
501 | requested |= VM_WRITE; | |
502 | ||
503 | return (requested & ~vma->vm_flags) != 0; | |
504 | } | |
505 | ||
028eeacc JR |
506 | static void do_fault(struct work_struct *work) |
507 | { | |
508 | struct fault *fault = container_of(work, struct fault, work); | |
9dc00f4c | 509 | struct vm_area_struct *vma; |
50a7ca3c | 510 | vm_fault_t ret = VM_FAULT_ERROR; |
43c0ea20 JR |
511 | unsigned int flags = 0; |
512 | struct mm_struct *mm; | |
9dc00f4c | 513 | u64 address; |
028eeacc | 514 | |
9dc00f4c JB |
515 | mm = fault->state->mm; |
516 | address = fault->address; | |
517 | ||
43c0ea20 JR |
518 | if (fault->flags & PPR_FAULT_USER) |
519 | flags |= FAULT_FLAG_USER; | |
520 | if (fault->flags & PPR_FAULT_WRITE) | |
521 | flags |= FAULT_FLAG_WRITE; | |
1b2ee126 | 522 | flags |= FAULT_FLAG_REMOTE; |
43c0ea20 | 523 | |
9dc00f4c JB |
524 | down_read(&mm->mmap_sem); |
525 | vma = find_extend_vma(mm, address); | |
492e7459 | 526 | if (!vma || address < vma->vm_start) |
9dc00f4c | 527 | /* failed to get a vma in the right range */ |
9dc00f4c | 528 | goto out; |
028eeacc | 529 | |
7b5cc1a9 | 530 | /* Check if we have the right permissions on the vma */ |
492e7459 | 531 | if (access_error(vma, fault)) |
d14f6fce | 532 | goto out; |
d14f6fce | 533 | |
dcddffd4 | 534 | ret = handle_mm_fault(vma, address, flags); |
492e7459 | 535 | out: |
9dc00f4c JB |
536 | up_read(&mm->mmap_sem); |
537 | ||
492e7459 JR |
538 | if (ret & VM_FAULT_ERROR) |
539 | /* failed to service fault */ | |
540 | handle_fault_error(fault); | |
541 | ||
028eeacc JR |
542 | finish_pri_tag(fault->dev_state, fault->state, fault->tag); |
543 | ||
544 | put_pasid_state(fault->state); | |
545 | ||
546 | kfree(fault); | |
547 | } | |
548 | ||
549 | static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) | |
550 | { | |
551 | struct amd_iommu_fault *iommu_fault; | |
552 | struct pasid_state *pasid_state; | |
553 | struct device_state *dev_state; | |
554 | unsigned long flags; | |
555 | struct fault *fault; | |
556 | bool finish; | |
daae2d25 | 557 | u16 tag, devid; |
028eeacc | 558 | int ret; |
daae2d25 BH |
559 | struct iommu_dev_data *dev_data; |
560 | struct pci_dev *pdev = NULL; | |
028eeacc JR |
561 | |
562 | iommu_fault = data; | |
563 | tag = iommu_fault->tag & 0x1ff; | |
564 | finish = (iommu_fault->tag >> 9) & 1; | |
565 | ||
daae2d25 | 566 | devid = iommu_fault->device_id; |
d5bf0f4f SK |
567 | pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), |
568 | devid & 0xff); | |
daae2d25 BH |
569 | if (!pdev) |
570 | return -ENODEV; | |
571 | dev_data = get_dev_data(&pdev->dev); | |
572 | ||
573 | /* In kdump kernel pci dev is not initialized yet -> send INVALID */ | |
028eeacc | 574 | ret = NOTIFY_DONE; |
daae2d25 BH |
575 | if (translation_pre_enabled(amd_iommu_rlookup_table[devid]) |
576 | && dev_data->defer_attach) { | |
577 | amd_iommu_complete_ppr(pdev, iommu_fault->pasid, | |
578 | PPR_INVALID, tag); | |
579 | goto out; | |
580 | } | |
581 | ||
028eeacc JR |
582 | dev_state = get_device_state(iommu_fault->device_id); |
583 | if (dev_state == NULL) | |
584 | goto out; | |
585 | ||
586 | pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); | |
53d340ef | 587 | if (pasid_state == NULL || pasid_state->invalid) { |
028eeacc JR |
588 | /* We know the device but not the PASID -> send INVALID */ |
589 | amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, | |
590 | PPR_INVALID, tag); | |
591 | goto out_drop_state; | |
592 | } | |
593 | ||
594 | spin_lock_irqsave(&pasid_state->lock, flags); | |
595 | atomic_inc(&pasid_state->pri[tag].inflight); | |
596 | if (finish) | |
597 | pasid_state->pri[tag].finish = true; | |
598 | spin_unlock_irqrestore(&pasid_state->lock, flags); | |
599 | ||
600 | fault = kzalloc(sizeof(*fault), GFP_ATOMIC); | |
601 | if (fault == NULL) { | |
602 | /* We are OOM - send success and let the device re-fault */ | |
603 | finish_pri_tag(dev_state, pasid_state, tag); | |
604 | goto out_drop_state; | |
605 | } | |
606 | ||
607 | fault->dev_state = dev_state; | |
608 | fault->address = iommu_fault->address; | |
609 | fault->state = pasid_state; | |
610 | fault->tag = tag; | |
611 | fault->finish = finish; | |
b00675b8 | 612 | fault->pasid = iommu_fault->pasid; |
028eeacc JR |
613 | fault->flags = iommu_fault->flags; |
614 | INIT_WORK(&fault->work, do_fault); | |
615 | ||
616 | queue_work(iommu_wq, &fault->work); | |
617 | ||
618 | ret = NOTIFY_OK; | |
619 | ||
620 | out_drop_state: | |
dc88db7e JR |
621 | |
622 | if (ret != NOTIFY_OK && pasid_state) | |
623 | put_pasid_state(pasid_state); | |
624 | ||
028eeacc JR |
625 | put_device_state(dev_state); |
626 | ||
627 | out: | |
628 | return ret; | |
629 | } | |
630 | ||
631 | static struct notifier_block ppr_nb = { | |
632 | .notifier_call = ppr_notifier, | |
633 | }; | |
634 | ||
2d5503b6 JR |
635 | int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, |
636 | struct task_struct *task) | |
637 | { | |
638 | struct pasid_state *pasid_state; | |
639 | struct device_state *dev_state; | |
f0aac63b | 640 | struct mm_struct *mm; |
2d5503b6 JR |
641 | u16 devid; |
642 | int ret; | |
643 | ||
644 | might_sleep(); | |
645 | ||
646 | if (!amd_iommu_v2_supported()) | |
647 | return -ENODEV; | |
648 | ||
649 | devid = device_id(pdev); | |
650 | dev_state = get_device_state(devid); | |
651 | ||
652 | if (dev_state == NULL) | |
653 | return -EINVAL; | |
654 | ||
655 | ret = -EINVAL; | |
656 | if (pasid < 0 || pasid >= dev_state->max_pasids) | |
657 | goto out; | |
658 | ||
659 | ret = -ENOMEM; | |
660 | pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL); | |
661 | if (pasid_state == NULL) | |
662 | goto out; | |
663 | ||
f0aac63b | 664 | |
2d5503b6 | 665 | atomic_set(&pasid_state->count, 1); |
028eeacc | 666 | init_waitqueue_head(&pasid_state->wq); |
2c13d47a JR |
667 | spin_lock_init(&pasid_state->lock); |
668 | ||
f0aac63b | 669 | mm = get_task_mm(task); |
f0aac63b | 670 | pasid_state->mm = mm; |
2d5503b6 JR |
671 | pasid_state->device_state = dev_state; |
672 | pasid_state->pasid = pasid; | |
d9e1611e JR |
673 | pasid_state->invalid = true; /* Mark as valid only if we are |
674 | done with setting up the pasid */ | |
8736b2c3 | 675 | pasid_state->mn.ops = &iommu_mn; |
2d5503b6 JR |
676 | |
677 | if (pasid_state->mm == NULL) | |
678 | goto out_free; | |
679 | ||
f0aac63b | 680 | mmu_notifier_register(&pasid_state->mn, mm); |
8736b2c3 | 681 | |
2d5503b6 JR |
682 | ret = set_pasid_state(dev_state, pasid_state, pasid); |
683 | if (ret) | |
8736b2c3 | 684 | goto out_unregister; |
2d5503b6 JR |
685 | |
686 | ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, | |
687 | __pa(pasid_state->mm->pgd)); | |
688 | if (ret) | |
689 | goto out_clear_state; | |
690 | ||
d9e1611e JR |
691 | /* Now we are ready to handle faults */ |
692 | pasid_state->invalid = false; | |
693 | ||
f0aac63b JR |
694 | /* |
695 | * Drop the reference to the mm_struct here. We rely on the | |
696 | * mmu_notifier release call-back to inform us when the mm | |
697 | * is going away. | |
698 | */ | |
699 | mmput(mm); | |
700 | ||
2d5503b6 JR |
701 | return 0; |
702 | ||
703 | out_clear_state: | |
704 | clear_pasid_state(dev_state, pasid); | |
705 | ||
8736b2c3 | 706 | out_unregister: |
f0aac63b | 707 | mmu_notifier_unregister(&pasid_state->mn, mm); |
73dbd4a4 | 708 | mmput(mm); |
8736b2c3 | 709 | |
2d5503b6 | 710 | out_free: |
028eeacc | 711 | free_pasid_state(pasid_state); |
2d5503b6 JR |
712 | |
713 | out: | |
714 | put_device_state(dev_state); | |
715 | ||
716 | return ret; | |
717 | } | |
718 | EXPORT_SYMBOL(amd_iommu_bind_pasid); | |
719 | ||
720 | void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) | |
721 | { | |
a40d4c67 | 722 | struct pasid_state *pasid_state; |
2d5503b6 JR |
723 | struct device_state *dev_state; |
724 | u16 devid; | |
725 | ||
726 | might_sleep(); | |
727 | ||
728 | if (!amd_iommu_v2_supported()) | |
729 | return; | |
730 | ||
731 | devid = device_id(pdev); | |
732 | dev_state = get_device_state(devid); | |
733 | if (dev_state == NULL) | |
734 | return; | |
735 | ||
736 | if (pasid < 0 || pasid >= dev_state->max_pasids) | |
737 | goto out; | |
738 | ||
a40d4c67 JR |
739 | pasid_state = get_pasid_state(dev_state, pasid); |
740 | if (pasid_state == NULL) | |
741 | goto out; | |
742 | /* | |
743 | * Drop reference taken here. We are safe because we still hold | |
744 | * the reference taken in the amd_iommu_bind_pasid function. | |
745 | */ | |
746 | put_pasid_state(pasid_state); | |
747 | ||
53d340ef JR |
748 | /* Clear the pasid state so that the pasid can be re-used */ |
749 | clear_pasid_state(dev_state, pasid_state->pasid); | |
750 | ||
f0aac63b | 751 | /* |
fcaa9606 JR |
752 | * Call mmu_notifier_unregister to drop our reference |
753 | * to pasid_state->mm | |
f0aac63b | 754 | */ |
fcaa9606 | 755 | mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); |
2d5503b6 | 756 | |
c5db16ad | 757 | put_pasid_state_wait(pasid_state); /* Reference taken in |
daff2f9c | 758 | amd_iommu_bind_pasid */ |
2d5503b6 | 759 | out: |
75058a30 JR |
760 | /* Drop reference taken in this function */ |
761 | put_device_state(dev_state); | |
762 | ||
763 | /* Drop reference taken in amd_iommu_bind_pasid */ | |
2d5503b6 JR |
764 | put_device_state(dev_state); |
765 | } | |
766 | EXPORT_SYMBOL(amd_iommu_unbind_pasid); | |
767 | ||
ed96f228 JR |
768 | int amd_iommu_init_device(struct pci_dev *pdev, int pasids) |
769 | { | |
770 | struct device_state *dev_state; | |
55c99a4d | 771 | struct iommu_group *group; |
ed96f228 JR |
772 | unsigned long flags; |
773 | int ret, tmp; | |
774 | u16 devid; | |
775 | ||
776 | might_sleep(); | |
777 | ||
778 | if (!amd_iommu_v2_supported()) | |
779 | return -ENODEV; | |
780 | ||
781 | if (pasids <= 0 || pasids > (PASID_MASK + 1)) | |
782 | return -EINVAL; | |
783 | ||
784 | devid = device_id(pdev); | |
785 | ||
786 | dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL); | |
787 | if (dev_state == NULL) | |
788 | return -ENOMEM; | |
789 | ||
790 | spin_lock_init(&dev_state->lock); | |
028eeacc | 791 | init_waitqueue_head(&dev_state->wq); |
741669c7 JR |
792 | dev_state->pdev = pdev; |
793 | dev_state->devid = devid; | |
ed96f228 JR |
794 | |
795 | tmp = pasids; | |
796 | for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) | |
797 | dev_state->pasid_levels += 1; | |
798 | ||
799 | atomic_set(&dev_state->count, 1); | |
800 | dev_state->max_pasids = pasids; | |
801 | ||
802 | ret = -ENOMEM; | |
803 | dev_state->states = (void *)get_zeroed_page(GFP_KERNEL); | |
804 | if (dev_state->states == NULL) | |
805 | goto out_free_dev_state; | |
806 | ||
807 | dev_state->domain = iommu_domain_alloc(&pci_bus_type); | |
808 | if (dev_state->domain == NULL) | |
809 | goto out_free_states; | |
810 | ||
811 | amd_iommu_domain_direct_map(dev_state->domain); | |
812 | ||
813 | ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); | |
814 | if (ret) | |
815 | goto out_free_domain; | |
816 | ||
55c99a4d | 817 | group = iommu_group_get(&pdev->dev); |
24c790fb DC |
818 | if (!group) { |
819 | ret = -EINVAL; | |
ed96f228 | 820 | goto out_free_domain; |
24c790fb | 821 | } |
ed96f228 | 822 | |
55c99a4d JR |
823 | ret = iommu_attach_group(dev_state->domain, group); |
824 | if (ret != 0) | |
825 | goto out_drop_group; | |
826 | ||
827 | iommu_group_put(group); | |
828 | ||
ed96f228 JR |
829 | spin_lock_irqsave(&state_lock, flags); |
830 | ||
741669c7 | 831 | if (__get_device_state(devid) != NULL) { |
ed96f228 JR |
832 | spin_unlock_irqrestore(&state_lock, flags); |
833 | ret = -EBUSY; | |
834 | goto out_free_domain; | |
835 | } | |
836 | ||
741669c7 | 837 | list_add_tail(&dev_state->list, &state_list); |
ed96f228 JR |
838 | |
839 | spin_unlock_irqrestore(&state_lock, flags); | |
840 | ||
841 | return 0; | |
842 | ||
55c99a4d JR |
843 | out_drop_group: |
844 | iommu_group_put(group); | |
845 | ||
ed96f228 JR |
846 | out_free_domain: |
847 | iommu_domain_free(dev_state->domain); | |
848 | ||
849 | out_free_states: | |
850 | free_page((unsigned long)dev_state->states); | |
851 | ||
852 | out_free_dev_state: | |
853 | kfree(dev_state); | |
854 | ||
855 | return ret; | |
856 | } | |
857 | EXPORT_SYMBOL(amd_iommu_init_device); | |
858 | ||
859 | void amd_iommu_free_device(struct pci_dev *pdev) | |
860 | { | |
861 | struct device_state *dev_state; | |
862 | unsigned long flags; | |
863 | u16 devid; | |
864 | ||
865 | if (!amd_iommu_v2_supported()) | |
866 | return; | |
867 | ||
868 | devid = device_id(pdev); | |
869 | ||
870 | spin_lock_irqsave(&state_lock, flags); | |
871 | ||
b87d2d7c | 872 | dev_state = __get_device_state(devid); |
ed96f228 JR |
873 | if (dev_state == NULL) { |
874 | spin_unlock_irqrestore(&state_lock, flags); | |
875 | return; | |
876 | } | |
877 | ||
741669c7 | 878 | list_del(&dev_state->list); |
ed96f228 JR |
879 | |
880 | spin_unlock_irqrestore(&state_lock, flags); | |
881 | ||
2d5503b6 JR |
882 | /* Get rid of any remaining pasid states */ |
883 | free_pasid_states(dev_state); | |
884 | ||
91f65fac PZ |
885 | put_device_state(dev_state); |
886 | /* | |
887 | * Wait until the last reference is dropped before freeing | |
888 | * the device state. | |
889 | */ | |
890 | wait_event(dev_state->wq, !atomic_read(&dev_state->count)); | |
891 | free_device_state(dev_state); | |
ed96f228 JR |
892 | } |
893 | EXPORT_SYMBOL(amd_iommu_free_device); | |
894 | ||
175d6146 JR |
895 | int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, |
896 | amd_iommu_invalid_ppr_cb cb) | |
897 | { | |
898 | struct device_state *dev_state; | |
899 | unsigned long flags; | |
900 | u16 devid; | |
901 | int ret; | |
902 | ||
903 | if (!amd_iommu_v2_supported()) | |
904 | return -ENODEV; | |
905 | ||
906 | devid = device_id(pdev); | |
907 | ||
908 | spin_lock_irqsave(&state_lock, flags); | |
909 | ||
910 | ret = -EINVAL; | |
b87d2d7c | 911 | dev_state = __get_device_state(devid); |
175d6146 JR |
912 | if (dev_state == NULL) |
913 | goto out_unlock; | |
914 | ||
915 | dev_state->inv_ppr_cb = cb; | |
916 | ||
917 | ret = 0; | |
918 | ||
919 | out_unlock: | |
920 | spin_unlock_irqrestore(&state_lock, flags); | |
921 | ||
922 | return ret; | |
923 | } | |
924 | EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb); | |
925 | ||
bc21662f JR |
926 | int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, |
927 | amd_iommu_invalidate_ctx cb) | |
928 | { | |
929 | struct device_state *dev_state; | |
930 | unsigned long flags; | |
931 | u16 devid; | |
932 | int ret; | |
933 | ||
934 | if (!amd_iommu_v2_supported()) | |
935 | return -ENODEV; | |
936 | ||
937 | devid = device_id(pdev); | |
938 | ||
939 | spin_lock_irqsave(&state_lock, flags); | |
940 | ||
941 | ret = -EINVAL; | |
b87d2d7c | 942 | dev_state = __get_device_state(devid); |
bc21662f JR |
943 | if (dev_state == NULL) |
944 | goto out_unlock; | |
945 | ||
946 | dev_state->inv_ctx_cb = cb; | |
947 | ||
948 | ret = 0; | |
949 | ||
950 | out_unlock: | |
951 | spin_unlock_irqrestore(&state_lock, flags); | |
952 | ||
953 | return ret; | |
954 | } | |
955 | EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb); | |
956 | ||
e3c495c7 JR |
957 | static int __init amd_iommu_v2_init(void) |
958 | { | |
028eeacc | 959 | int ret; |
ed96f228 | 960 | |
63ce3ae8 | 961 | pr_info("AMD IOMMUv2 driver by Joerg Roedel <[email protected]>\n"); |
474d567d JR |
962 | |
963 | if (!amd_iommu_v2_supported()) { | |
07db0409 | 964 | pr_info("AMD IOMMUv2 functionality not available on this system\n"); |
474d567d JR |
965 | /* |
966 | * Load anyway to provide the symbols to other modules | |
967 | * which may use AMD IOMMUv2 optionally. | |
968 | */ | |
969 | return 0; | |
970 | } | |
e3c495c7 | 971 | |
ed96f228 JR |
972 | spin_lock_init(&state_lock); |
973 | ||
028eeacc | 974 | ret = -ENOMEM; |
cf7513e7 | 975 | iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0); |
8736b2c3 | 976 | if (iommu_wq == NULL) |
741669c7 | 977 | goto out; |
8736b2c3 | 978 | |
028eeacc JR |
979 | amd_iommu_register_ppr_notifier(&ppr_nb); |
980 | ||
e3c495c7 | 981 | return 0; |
028eeacc | 982 | |
741669c7 | 983 | out: |
028eeacc | 984 | return ret; |
e3c495c7 JR |
985 | } |
986 | ||
987 | static void __exit amd_iommu_v2_exit(void) | |
988 | { | |
ed96f228 | 989 | struct device_state *dev_state; |
ed96f228 JR |
990 | int i; |
991 | ||
474d567d JR |
992 | if (!amd_iommu_v2_supported()) |
993 | return; | |
994 | ||
028eeacc JR |
995 | amd_iommu_unregister_ppr_notifier(&ppr_nb); |
996 | ||
997 | flush_workqueue(iommu_wq); | |
998 | ||
999 | /* | |
1000 | * The loop below might call flush_workqueue(), so call | |
1001 | * destroy_workqueue() after it | |
1002 | */ | |
ed96f228 JR |
1003 | for (i = 0; i < MAX_DEVICES; ++i) { |
1004 | dev_state = get_device_state(i); | |
1005 | ||
1006 | if (dev_state == NULL) | |
1007 | continue; | |
1008 | ||
1009 | WARN_ON_ONCE(1); | |
1010 | ||
ed96f228 | 1011 | put_device_state(dev_state); |
028eeacc | 1012 | amd_iommu_free_device(dev_state->pdev); |
ed96f228 JR |
1013 | } |
1014 | ||
028eeacc | 1015 | destroy_workqueue(iommu_wq); |
e3c495c7 JR |
1016 | } |
1017 | ||
1018 | module_init(amd_iommu_v2_init); | |
1019 | module_exit(amd_iommu_v2_exit); |