]> Git Repo - linux.git/blob - drivers/iommu/s390-iommu.c
Linux 6.14-rc3
[linux.git] / drivers / iommu / s390-iommu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IOMMU API for s390 PCI devices
4  *
5  * Copyright IBM Corp. 2015
6  * Author(s): Gerald Schaefer <[email protected]>
7  */
8
9 #include <linux/pci.h>
10 #include <linux/iommu.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/sizes.h>
13 #include <linux/rculist.h>
14 #include <linux/rcupdate.h>
15 #include <asm/pci_dma.h>
16
17 #include "dma-iommu.h"
18
19 static const struct iommu_ops s390_iommu_ops;
20
21 static struct kmem_cache *dma_region_table_cache;
22 static struct kmem_cache *dma_page_table_cache;
23
24 static u64 s390_iommu_aperture;
25 static u32 s390_iommu_aperture_factor = 1;
26
27 struct s390_domain {
28         struct iommu_domain     domain;
29         struct list_head        devices;
30         struct zpci_iommu_ctrs  ctrs;
31         unsigned long           *dma_table;
32         spinlock_t              list_lock;
33         struct rcu_head         rcu;
34 };
35
36 static struct iommu_domain blocking_domain;
37
38 static inline unsigned int calc_rtx(dma_addr_t ptr)
39 {
40         return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
41 }
42
43 static inline unsigned int calc_sx(dma_addr_t ptr)
44 {
45         return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
46 }
47
48 static inline unsigned int calc_px(dma_addr_t ptr)
49 {
50         return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
51 }
52
53 static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
54 {
55         *entry &= ZPCI_PTE_FLAG_MASK;
56         *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
57 }
58
59 static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
60 {
61         *entry &= ZPCI_RTE_FLAG_MASK;
62         *entry |= (sto & ZPCI_RTE_ADDR_MASK);
63         *entry |= ZPCI_TABLE_TYPE_RTX;
64 }
65
66 static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
67 {
68         *entry &= ZPCI_STE_FLAG_MASK;
69         *entry |= (pto & ZPCI_STE_ADDR_MASK);
70         *entry |= ZPCI_TABLE_TYPE_SX;
71 }
72
73 static inline void validate_rt_entry(unsigned long *entry)
74 {
75         *entry &= ~ZPCI_TABLE_VALID_MASK;
76         *entry &= ~ZPCI_TABLE_OFFSET_MASK;
77         *entry |= ZPCI_TABLE_VALID;
78         *entry |= ZPCI_TABLE_LEN_RTX;
79 }
80
81 static inline void validate_st_entry(unsigned long *entry)
82 {
83         *entry &= ~ZPCI_TABLE_VALID_MASK;
84         *entry |= ZPCI_TABLE_VALID;
85 }
86
87 static inline void invalidate_pt_entry(unsigned long *entry)
88 {
89         WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
90         *entry &= ~ZPCI_PTE_VALID_MASK;
91         *entry |= ZPCI_PTE_INVALID;
92 }
93
94 static inline void validate_pt_entry(unsigned long *entry)
95 {
96         WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
97         *entry &= ~ZPCI_PTE_VALID_MASK;
98         *entry |= ZPCI_PTE_VALID;
99 }
100
101 static inline void entry_set_protected(unsigned long *entry)
102 {
103         *entry &= ~ZPCI_TABLE_PROT_MASK;
104         *entry |= ZPCI_TABLE_PROTECTED;
105 }
106
107 static inline void entry_clr_protected(unsigned long *entry)
108 {
109         *entry &= ~ZPCI_TABLE_PROT_MASK;
110         *entry |= ZPCI_TABLE_UNPROTECTED;
111 }
112
113 static inline int reg_entry_isvalid(unsigned long entry)
114 {
115         return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
116 }
117
118 static inline int pt_entry_isvalid(unsigned long entry)
119 {
120         return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
121 }
122
123 static inline unsigned long *get_rt_sto(unsigned long entry)
124 {
125         if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
126                 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
127         else
128                 return NULL;
129 }
130
131 static inline unsigned long *get_st_pto(unsigned long entry)
132 {
133         if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
134                 return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
135         else
136                 return NULL;
137 }
138
139 static int __init dma_alloc_cpu_table_caches(void)
140 {
141         dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
142                                                    ZPCI_TABLE_SIZE,
143                                                    ZPCI_TABLE_ALIGN,
144                                                    0, NULL);
145         if (!dma_region_table_cache)
146                 return -ENOMEM;
147
148         dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
149                                                  ZPCI_PT_SIZE,
150                                                  ZPCI_PT_ALIGN,
151                                                  0, NULL);
152         if (!dma_page_table_cache) {
153                 kmem_cache_destroy(dma_region_table_cache);
154                 return -ENOMEM;
155         }
156         return 0;
157 }
158
159 static unsigned long *dma_alloc_cpu_table(gfp_t gfp)
160 {
161         unsigned long *table, *entry;
162
163         table = kmem_cache_alloc(dma_region_table_cache, gfp);
164         if (!table)
165                 return NULL;
166
167         for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
168                 *entry = ZPCI_TABLE_INVALID;
169         return table;
170 }
171
172 static void dma_free_cpu_table(void *table)
173 {
174         kmem_cache_free(dma_region_table_cache, table);
175 }
176
177 static void dma_free_page_table(void *table)
178 {
179         kmem_cache_free(dma_page_table_cache, table);
180 }
181
182 static void dma_free_seg_table(unsigned long entry)
183 {
184         unsigned long *sto = get_rt_sto(entry);
185         int sx;
186
187         for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
188                 if (reg_entry_isvalid(sto[sx]))
189                         dma_free_page_table(get_st_pto(sto[sx]));
190
191         dma_free_cpu_table(sto);
192 }
193
194 static void dma_cleanup_tables(unsigned long *table)
195 {
196         int rtx;
197
198         if (!table)
199                 return;
200
201         for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
202                 if (reg_entry_isvalid(table[rtx]))
203                         dma_free_seg_table(table[rtx]);
204
205         dma_free_cpu_table(table);
206 }
207
208 static unsigned long *dma_alloc_page_table(gfp_t gfp)
209 {
210         unsigned long *table, *entry;
211
212         table = kmem_cache_alloc(dma_page_table_cache, gfp);
213         if (!table)
214                 return NULL;
215
216         for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
217                 *entry = ZPCI_PTE_INVALID;
218         return table;
219 }
220
221 static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
222 {
223         unsigned long old_rte, rte;
224         unsigned long *sto;
225
226         rte = READ_ONCE(*rtep);
227         if (reg_entry_isvalid(rte)) {
228                 sto = get_rt_sto(rte);
229         } else {
230                 sto = dma_alloc_cpu_table(gfp);
231                 if (!sto)
232                         return NULL;
233
234                 set_rt_sto(&rte, virt_to_phys(sto));
235                 validate_rt_entry(&rte);
236                 entry_clr_protected(&rte);
237
238                 old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
239                 if (old_rte != ZPCI_TABLE_INVALID) {
240                         /* Somone else was faster, use theirs */
241                         dma_free_cpu_table(sto);
242                         sto = get_rt_sto(old_rte);
243                 }
244         }
245         return sto;
246 }
247
248 static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
249 {
250         unsigned long old_ste, ste;
251         unsigned long *pto;
252
253         ste = READ_ONCE(*step);
254         if (reg_entry_isvalid(ste)) {
255                 pto = get_st_pto(ste);
256         } else {
257                 pto = dma_alloc_page_table(gfp);
258                 if (!pto)
259                         return NULL;
260                 set_st_pto(&ste, virt_to_phys(pto));
261                 validate_st_entry(&ste);
262                 entry_clr_protected(&ste);
263
264                 old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
265                 if (old_ste != ZPCI_TABLE_INVALID) {
266                         /* Somone else was faster, use theirs */
267                         dma_free_page_table(pto);
268                         pto = get_st_pto(old_ste);
269                 }
270         }
271         return pto;
272 }
273
274 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
275 {
276         unsigned long *sto, *pto;
277         unsigned int rtx, sx, px;
278
279         rtx = calc_rtx(dma_addr);
280         sto = dma_get_seg_table_origin(&rto[rtx], gfp);
281         if (!sto)
282                 return NULL;
283
284         sx = calc_sx(dma_addr);
285         pto = dma_get_page_table_origin(&sto[sx], gfp);
286         if (!pto)
287                 return NULL;
288
289         px = calc_px(dma_addr);
290         return &pto[px];
291 }
292
293 static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
294 {
295         unsigned long pte;
296
297         pte = READ_ONCE(*ptep);
298         if (flags & ZPCI_PTE_INVALID) {
299                 invalidate_pt_entry(&pte);
300         } else {
301                 set_pt_pfaa(&pte, page_addr);
302                 validate_pt_entry(&pte);
303         }
304
305         if (flags & ZPCI_TABLE_PROTECTED)
306                 entry_set_protected(&pte);
307         else
308                 entry_clr_protected(&pte);
309
310         xchg(ptep, pte);
311 }
312
313 static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
314 {
315         return container_of(dom, struct s390_domain, domain);
316 }
317
318 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
319 {
320         struct zpci_dev *zdev = to_zpci_dev(dev);
321
322         switch (cap) {
323         case IOMMU_CAP_CACHE_COHERENCY:
324                 return true;
325         case IOMMU_CAP_DEFERRED_FLUSH:
326                 return zdev->pft != PCI_FUNC_TYPE_ISM;
327         default:
328                 return false;
329         }
330 }
331
332 static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
333 {
334         struct s390_domain *s390_domain;
335
336         s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
337         if (!s390_domain)
338                 return NULL;
339
340         s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
341         if (!s390_domain->dma_table) {
342                 kfree(s390_domain);
343                 return NULL;
344         }
345         s390_domain->domain.geometry.force_aperture = true;
346         s390_domain->domain.geometry.aperture_start = 0;
347         s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
348
349         spin_lock_init(&s390_domain->list_lock);
350         INIT_LIST_HEAD_RCU(&s390_domain->devices);
351
352         return &s390_domain->domain;
353 }
354
355 static void s390_iommu_rcu_free_domain(struct rcu_head *head)
356 {
357         struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
358
359         dma_cleanup_tables(s390_domain->dma_table);
360         kfree(s390_domain);
361 }
362
363 static void s390_domain_free(struct iommu_domain *domain)
364 {
365         struct s390_domain *s390_domain = to_s390_domain(domain);
366
367         rcu_read_lock();
368         WARN_ON(!list_empty(&s390_domain->devices));
369         rcu_read_unlock();
370
371         call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
372 }
373
374 static void zdev_s390_domain_update(struct zpci_dev *zdev,
375                                     struct iommu_domain *domain)
376 {
377         unsigned long flags;
378
379         spin_lock_irqsave(&zdev->dom_lock, flags);
380         zdev->s390_domain = domain;
381         spin_unlock_irqrestore(&zdev->dom_lock, flags);
382 }
383
384 static int blocking_domain_attach_device(struct iommu_domain *domain,
385                                          struct device *dev)
386 {
387         struct zpci_dev *zdev = to_zpci_dev(dev);
388         struct s390_domain *s390_domain;
389         unsigned long flags;
390
391         if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
392                 return 0;
393
394         s390_domain = to_s390_domain(zdev->s390_domain);
395         spin_lock_irqsave(&s390_domain->list_lock, flags);
396         list_del_rcu(&zdev->iommu_list);
397         spin_unlock_irqrestore(&s390_domain->list_lock, flags);
398
399         zpci_unregister_ioat(zdev, 0);
400         zdev->dma_table = NULL;
401         zdev_s390_domain_update(zdev, domain);
402
403         return 0;
404 }
405
406 static int s390_iommu_attach_device(struct iommu_domain *domain,
407                                     struct device *dev)
408 {
409         struct s390_domain *s390_domain = to_s390_domain(domain);
410         struct zpci_dev *zdev = to_zpci_dev(dev);
411         unsigned long flags;
412         u8 status;
413         int cc;
414
415         if (!zdev)
416                 return -ENODEV;
417
418         if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
419                 domain->geometry.aperture_end < zdev->start_dma))
420                 return -EINVAL;
421
422         blocking_domain_attach_device(&blocking_domain, dev);
423
424         /* If we fail now DMA remains blocked via blocking domain */
425         cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
426                                 virt_to_phys(s390_domain->dma_table), &status);
427         if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
428                 return -EIO;
429         zdev->dma_table = s390_domain->dma_table;
430         zdev_s390_domain_update(zdev, domain);
431
432         spin_lock_irqsave(&s390_domain->list_lock, flags);
433         list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
434         spin_unlock_irqrestore(&s390_domain->list_lock, flags);
435
436         return 0;
437 }
438
439 static void s390_iommu_get_resv_regions(struct device *dev,
440                                         struct list_head *list)
441 {
442         struct zpci_dev *zdev = to_zpci_dev(dev);
443         struct iommu_resv_region *region;
444
445         if (zdev->start_dma) {
446                 region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
447                                                  IOMMU_RESV_RESERVED, GFP_KERNEL);
448                 if (!region)
449                         return;
450                 list_add_tail(&region->list, list);
451         }
452
453         if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
454                 region = iommu_alloc_resv_region(zdev->end_dma + 1,
455                                                  ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
456                                                  0, IOMMU_RESV_RESERVED, GFP_KERNEL);
457                 if (!region)
458                         return;
459                 list_add_tail(&region->list, list);
460         }
461 }
462
463 static struct iommu_device *s390_iommu_probe_device(struct device *dev)
464 {
465         struct zpci_dev *zdev;
466
467         if (!dev_is_pci(dev))
468                 return ERR_PTR(-ENODEV);
469
470         zdev = to_zpci_dev(dev);
471
472         if (zdev->start_dma > zdev->end_dma ||
473             zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
474                 return ERR_PTR(-EINVAL);
475
476         if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
477                 zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
478
479         if (zdev->tlb_refresh)
480                 dev->iommu->shadow_on_flush = 1;
481
482         /* Start with DMA blocked */
483         spin_lock_init(&zdev->dom_lock);
484         zdev_s390_domain_update(zdev, &blocking_domain);
485
486         return &zdev->iommu_dev;
487 }
488
489 static int zpci_refresh_all(struct zpci_dev *zdev)
490 {
491         return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
492                                   zdev->end_dma - zdev->start_dma + 1);
493 }
494
495 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
496 {
497         struct s390_domain *s390_domain = to_s390_domain(domain);
498         struct zpci_dev *zdev;
499
500         rcu_read_lock();
501         list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
502                 atomic64_inc(&s390_domain->ctrs.global_rpcits);
503                 zpci_refresh_all(zdev);
504         }
505         rcu_read_unlock();
506 }
507
508 static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
509                                   struct iommu_iotlb_gather *gather)
510 {
511         struct s390_domain *s390_domain = to_s390_domain(domain);
512         size_t size = gather->end - gather->start + 1;
513         struct zpci_dev *zdev;
514
515         /* If gather was never added to there is nothing to flush */
516         if (!gather->end)
517                 return;
518
519         rcu_read_lock();
520         list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
521                 atomic64_inc(&s390_domain->ctrs.sync_rpcits);
522                 zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
523                                    size);
524         }
525         rcu_read_unlock();
526 }
527
528 static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
529                                      unsigned long iova, size_t size)
530 {
531         struct s390_domain *s390_domain = to_s390_domain(domain);
532         struct zpci_dev *zdev;
533         int ret = 0;
534
535         rcu_read_lock();
536         list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
537                 if (!zdev->tlb_refresh)
538                         continue;
539                 atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
540                 ret = zpci_refresh_trans((u64)zdev->fh << 32,
541                                          iova, size);
542                 /*
543                  * let the hypervisor discover invalidated entries
544                  * allowing it to free IOVAs and unpin pages
545                  */
546                 if (ret == -ENOMEM) {
547                         ret = zpci_refresh_all(zdev);
548                         if (ret)
549                                 break;
550                 }
551         }
552         rcu_read_unlock();
553
554         return ret;
555 }
556
557 static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
558                                      phys_addr_t pa, dma_addr_t dma_addr,
559                                      unsigned long nr_pages, int flags,
560                                      gfp_t gfp)
561 {
562         phys_addr_t page_addr = pa & PAGE_MASK;
563         unsigned long *entry;
564         unsigned long i;
565         int rc;
566
567         for (i = 0; i < nr_pages; i++) {
568                 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
569                                            gfp);
570                 if (unlikely(!entry)) {
571                         rc = -ENOMEM;
572                         goto undo_cpu_trans;
573                 }
574                 dma_update_cpu_trans(entry, page_addr, flags);
575                 page_addr += PAGE_SIZE;
576                 dma_addr += PAGE_SIZE;
577         }
578
579         return 0;
580
581 undo_cpu_trans:
582         while (i-- > 0) {
583                 dma_addr -= PAGE_SIZE;
584                 entry = dma_walk_cpu_trans(s390_domain->dma_table,
585                                            dma_addr, gfp);
586                 if (!entry)
587                         break;
588                 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
589         }
590
591         return rc;
592 }
593
594 static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
595                                        dma_addr_t dma_addr, unsigned long nr_pages)
596 {
597         unsigned long *entry;
598         unsigned long i;
599         int rc = 0;
600
601         for (i = 0; i < nr_pages; i++) {
602                 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
603                                            GFP_ATOMIC);
604                 if (unlikely(!entry)) {
605                         rc = -EINVAL;
606                         break;
607                 }
608                 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
609                 dma_addr += PAGE_SIZE;
610         }
611
612         return rc;
613 }
614
615 static int s390_iommu_map_pages(struct iommu_domain *domain,
616                                 unsigned long iova, phys_addr_t paddr,
617                                 size_t pgsize, size_t pgcount,
618                                 int prot, gfp_t gfp, size_t *mapped)
619 {
620         struct s390_domain *s390_domain = to_s390_domain(domain);
621         size_t size = pgcount << __ffs(pgsize);
622         int flags = ZPCI_PTE_VALID, rc = 0;
623
624         if (pgsize != SZ_4K)
625                 return -EINVAL;
626
627         if (iova < s390_domain->domain.geometry.aperture_start ||
628             (iova + size - 1) > s390_domain->domain.geometry.aperture_end)
629                 return -EINVAL;
630
631         if (!IS_ALIGNED(iova | paddr, pgsize))
632                 return -EINVAL;
633
634         if (!(prot & IOMMU_WRITE))
635                 flags |= ZPCI_TABLE_PROTECTED;
636
637         rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
638                                      pgcount, flags, gfp);
639         if (!rc) {
640                 *mapped = size;
641                 atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
642         }
643
644         return rc;
645 }
646
647 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
648                                            dma_addr_t iova)
649 {
650         struct s390_domain *s390_domain = to_s390_domain(domain);
651         unsigned long *rto, *sto, *pto;
652         unsigned long ste, pte, rte;
653         unsigned int rtx, sx, px;
654         phys_addr_t phys = 0;
655
656         if (iova < domain->geometry.aperture_start ||
657             iova > domain->geometry.aperture_end)
658                 return 0;
659
660         rtx = calc_rtx(iova);
661         sx = calc_sx(iova);
662         px = calc_px(iova);
663         rto = s390_domain->dma_table;
664
665         rte = READ_ONCE(rto[rtx]);
666         if (reg_entry_isvalid(rte)) {
667                 sto = get_rt_sto(rte);
668                 ste = READ_ONCE(sto[sx]);
669                 if (reg_entry_isvalid(ste)) {
670                         pto = get_st_pto(ste);
671                         pte = READ_ONCE(pto[px]);
672                         if (pt_entry_isvalid(pte))
673                                 phys = pte & ZPCI_PTE_ADDR_MASK;
674                 }
675         }
676
677         return phys;
678 }
679
680 static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
681                                      unsigned long iova,
682                                      size_t pgsize, size_t pgcount,
683                                      struct iommu_iotlb_gather *gather)
684 {
685         struct s390_domain *s390_domain = to_s390_domain(domain);
686         size_t size = pgcount << __ffs(pgsize);
687         int rc;
688
689         if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
690             (iova + size - 1) > s390_domain->domain.geometry.aperture_end))
691                 return 0;
692
693         rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
694         if (rc)
695                 return 0;
696
697         iommu_iotlb_gather_add_range(gather, iova, size);
698         atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
699
700         return size;
701 }
702
703 struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
704 {
705         struct s390_domain *s390_domain;
706
707         lockdep_assert_held(&zdev->dom_lock);
708
709         if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
710                 return NULL;
711
712         s390_domain = to_s390_domain(zdev->s390_domain);
713         return &s390_domain->ctrs;
714 }
715
716 int zpci_init_iommu(struct zpci_dev *zdev)
717 {
718         u64 aperture_size;
719         int rc = 0;
720
721         rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
722                                     "s390-iommu.%08x", zdev->fid);
723         if (rc)
724                 goto out_err;
725
726         rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
727         if (rc)
728                 goto out_sysfs;
729
730         zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
731         aperture_size = min3(s390_iommu_aperture,
732                              ZPCI_TABLE_SIZE_RT - zdev->start_dma,
733                              zdev->end_dma - zdev->start_dma + 1);
734         zdev->end_dma = zdev->start_dma + aperture_size - 1;
735
736         return 0;
737
738 out_sysfs:
739         iommu_device_sysfs_remove(&zdev->iommu_dev);
740
741 out_err:
742         return rc;
743 }
744
745 void zpci_destroy_iommu(struct zpci_dev *zdev)
746 {
747         iommu_device_unregister(&zdev->iommu_dev);
748         iommu_device_sysfs_remove(&zdev->iommu_dev);
749 }
750
751 static int __init s390_iommu_setup(char *str)
752 {
753         if (!strcmp(str, "strict")) {
754                 pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
755                 iommu_set_dma_strict();
756         }
757         return 1;
758 }
759
760 __setup("s390_iommu=", s390_iommu_setup);
761
762 static int __init s390_iommu_aperture_setup(char *str)
763 {
764         if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
765                 s390_iommu_aperture_factor = 1;
766         return 1;
767 }
768
769 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
770
771 static int __init s390_iommu_init(void)
772 {
773         int rc;
774
775         iommu_dma_forcedac = true;
776         s390_iommu_aperture = (u64)virt_to_phys(high_memory);
777         if (!s390_iommu_aperture_factor)
778                 s390_iommu_aperture = ULONG_MAX;
779         else
780                 s390_iommu_aperture *= s390_iommu_aperture_factor;
781
782         rc = dma_alloc_cpu_table_caches();
783         if (rc)
784                 return rc;
785
786         return rc;
787 }
788 subsys_initcall(s390_iommu_init);
789
790 static struct iommu_domain blocking_domain = {
791         .type = IOMMU_DOMAIN_BLOCKED,
792         .ops = &(const struct iommu_domain_ops) {
793                 .attach_dev     = blocking_domain_attach_device,
794         }
795 };
796
797 static const struct iommu_ops s390_iommu_ops = {
798         .blocked_domain         = &blocking_domain,
799         .release_domain         = &blocking_domain,
800         .capable = s390_iommu_capable,
801         .domain_alloc_paging = s390_domain_alloc_paging,
802         .probe_device = s390_iommu_probe_device,
803         .device_group = generic_device_group,
804         .pgsize_bitmap = SZ_4K,
805         .get_resv_regions = s390_iommu_get_resv_regions,
806         .default_domain_ops = &(const struct iommu_domain_ops) {
807                 .attach_dev     = s390_iommu_attach_device,
808                 .map_pages      = s390_iommu_map_pages,
809                 .unmap_pages    = s390_iommu_unmap_pages,
810                 .flush_iotlb_all = s390_iommu_flush_iotlb_all,
811                 .iotlb_sync      = s390_iommu_iotlb_sync,
812                 .iotlb_sync_map  = s390_iommu_iotlb_sync_map,
813                 .iova_to_phys   = s390_iommu_iova_to_phys,
814                 .free           = s390_domain_free,
815         }
816 };
This page took 0.07844 seconds and 4 git commands to generate.