1 // SPDX-License-Identifier: GPL-2.0
3 * intel-pasid.c - PASID idr, table and entry manipulation
5 * Copyright (C) 2018 Intel Corporation
10 #define pr_fmt(fmt) "DMAR: " fmt
12 #include <linux/bitops.h>
13 #include <linux/cpufeature.h>
14 #include <linux/dmar.h>
15 #include <linux/iommu.h>
16 #include <linux/memory.h>
17 #include <linux/pci.h>
18 #include <linux/pci-ats.h>
19 #include <linux/spinlock.h>
25 * Intel IOMMU system wide PASID name space:
27 u32 intel_pasid_max_id = PASID_MAX;
29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
36 raw_spin_lock_irqsave(&iommu->register_lock, flags);
37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
39 !(res & VCMD_VRSP_IP), res);
40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
42 status_code = VCMD_VRSP_SC(res);
43 switch (status_code) {
44 case VCMD_VRSP_SC_SUCCESS:
45 *pasid = VCMD_VRSP_RESULT_PASID(res);
47 case VCMD_VRSP_SC_NO_PASID_AVAIL:
48 pr_info("IOMMU: %s: No PASID available\n", iommu->name);
53 pr_warn("IOMMU: %s: Unexpected error code %d\n",
54 iommu->name, status_code);
60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
66 raw_spin_lock_irqsave(&iommu->register_lock, flags);
67 dmar_writeq(iommu->reg + DMAR_VCMD_REG,
68 VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
70 !(res & VCMD_VRSP_IP), res);
71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
73 status_code = VCMD_VRSP_SC(res);
74 switch (status_code) {
75 case VCMD_VRSP_SC_SUCCESS:
77 case VCMD_VRSP_SC_INVALID_PASID:
78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
81 pr_warn("IOMMU: %s: Unexpected error code %d\n",
82 iommu->name, status_code);
87 * Per device pasid table management:
91 * Allocate a pasid table for @dev. It should be called in a
92 * single-thread context.
94 int intel_pasid_alloc_table(struct device *dev)
96 struct device_domain_info *info;
97 struct pasid_table *pasid_table;
103 info = dev_iommu_priv_get(dev);
104 if (WARN_ON(!info || !dev_is_pci(dev)))
106 if (WARN_ON(info->pasid_table))
109 pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
113 if (info->pasid_supported)
114 max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
117 size = max_pasid >> (PASID_PDE_SHIFT - 3);
118 order = size ? get_order(size) : 0;
119 pages = alloc_pages_node(info->iommu->node,
120 GFP_KERNEL | __GFP_ZERO, order);
126 pasid_table->table = page_address(pages);
127 pasid_table->order = order;
128 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
129 info->pasid_table = pasid_table;
134 void intel_pasid_free_table(struct device *dev)
136 struct device_domain_info *info;
137 struct pasid_table *pasid_table;
138 struct pasid_dir_entry *dir;
139 struct pasid_entry *table;
142 info = dev_iommu_priv_get(dev);
143 if (!info || !dev_is_pci(dev) || !info->pasid_table)
146 pasid_table = info->pasid_table;
147 info->pasid_table = NULL;
149 /* Free scalable mode PASID directory tables: */
150 dir = pasid_table->table;
151 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
152 for (i = 0; i < max_pde; i++) {
153 table = get_pasid_table_from_pde(&dir[i]);
154 free_pgtable_page(table);
157 free_pages((unsigned long)pasid_table->table, pasid_table->order);
161 struct pasid_table *intel_pasid_get_table(struct device *dev)
163 struct device_domain_info *info;
165 info = dev_iommu_priv_get(dev);
169 return info->pasid_table;
172 static int intel_pasid_get_dev_max_id(struct device *dev)
174 struct device_domain_info *info;
176 info = dev_iommu_priv_get(dev);
177 if (!info || !info->pasid_table)
180 return info->pasid_table->max_pasid;
183 static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
185 struct device_domain_info *info;
186 struct pasid_table *pasid_table;
187 struct pasid_dir_entry *dir;
188 struct pasid_entry *entries;
189 int dir_index, index;
191 pasid_table = intel_pasid_get_table(dev);
192 if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
195 dir = pasid_table->table;
196 info = dev_iommu_priv_get(dev);
197 dir_index = pasid >> PASID_PDE_SHIFT;
198 index = pasid & PASID_PTE_MASK;
201 entries = get_pasid_table_from_pde(&dir[dir_index]);
203 entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
208 * The pasid directory table entry won't be freed after
209 * allocation. No worry about the race with free and
210 * clear. However, this entry might be populated by others
211 * while we are preparing it. Use theirs with a retry.
213 if (cmpxchg64(&dir[dir_index].val, 0ULL,
214 (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
215 free_pgtable_page(entries);
220 return &entries[index];
224 * Interfaces for PASID table entry manipulation:
226 static inline void pasid_clear_entry(struct pasid_entry *pe)
228 WRITE_ONCE(pe->val[0], 0);
229 WRITE_ONCE(pe->val[1], 0);
230 WRITE_ONCE(pe->val[2], 0);
231 WRITE_ONCE(pe->val[3], 0);
232 WRITE_ONCE(pe->val[4], 0);
233 WRITE_ONCE(pe->val[5], 0);
234 WRITE_ONCE(pe->val[6], 0);
235 WRITE_ONCE(pe->val[7], 0);
238 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
240 WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
241 WRITE_ONCE(pe->val[1], 0);
242 WRITE_ONCE(pe->val[2], 0);
243 WRITE_ONCE(pe->val[3], 0);
244 WRITE_ONCE(pe->val[4], 0);
245 WRITE_ONCE(pe->val[5], 0);
246 WRITE_ONCE(pe->val[6], 0);
247 WRITE_ONCE(pe->val[7], 0);
251 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
253 struct pasid_entry *pe;
255 pe = intel_pasid_get_entry(dev, pasid);
259 if (fault_ignore && pasid_pte_is_present(pe))
260 pasid_clear_entry_with_fpd(pe);
262 pasid_clear_entry(pe);
265 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
269 old = READ_ONCE(*ptr);
270 WRITE_ONCE(*ptr, (old & ~mask) | bits);
274 * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
278 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
280 pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
284 * Get domain ID value of a scalable mode PASID entry.
287 pasid_get_domain_id(struct pasid_entry *pe)
289 return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
293 * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
294 * of a scalable mode PASID entry.
297 pasid_set_slptr(struct pasid_entry *pe, u64 value)
299 pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
303 * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
307 pasid_set_address_width(struct pasid_entry *pe, u64 value)
309 pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
313 * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
314 * of a scalable mode PASID entry.
317 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
319 pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
323 * Enable fault processing by clearing the FPD(Fault Processing
324 * Disable) field (Bit 1) of a scalable mode PASID entry.
326 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
328 pasid_set_bits(&pe->val[0], 1 << 1, 0);
332 * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
333 * scalable mode PASID entry.
335 static inline void pasid_set_sre(struct pasid_entry *pe)
337 pasid_set_bits(&pe->val[2], 1 << 0, 1);
341 * Setup the WPE(Write Protect Enable) field (Bit 132) of a
342 * scalable mode PASID entry.
344 static inline void pasid_set_wpe(struct pasid_entry *pe)
346 pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
350 * Setup the P(Present) field (Bit 0) of a scalable mode PASID
353 static inline void pasid_set_present(struct pasid_entry *pe)
355 pasid_set_bits(&pe->val[0], 1 << 0, 1);
359 * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
362 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
364 pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
368 * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
372 pasid_set_pgsnp(struct pasid_entry *pe)
374 pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
378 * Setup the First Level Page table Pointer field (Bit 140~191)
379 * of a scalable mode PASID entry.
382 pasid_set_flptr(struct pasid_entry *pe, u64 value)
384 pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
388 * Setup the First Level Paging Mode field (Bit 130~131) of a
389 * scalable mode PASID entry.
392 pasid_set_flpm(struct pasid_entry *pe, u64 value)
394 pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
398 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
403 desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
404 QI_PC_PASID(pasid) | QI_PC_TYPE;
409 qi_submit_sync(iommu, &desc, 1, 0);
413 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
414 struct device *dev, u32 pasid)
416 struct device_domain_info *info;
417 u16 sid, qdep, pfsid;
419 info = dev_iommu_priv_get(dev);
420 if (!info || !info->ats_enabled)
423 sid = info->bus << 8 | info->devfn;
424 qdep = info->ats_qdep;
428 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
429 * devTLB flush w/o PASID should be used. For non-zero PASID under
430 * SVA usage, device could do DMA with multiple PASIDs. It is more
431 * efficient to flush devTLB specific to the PASID.
433 if (pasid == PASID_RID2PASID)
434 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
436 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
439 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
440 u32 pasid, bool fault_ignore)
442 struct pasid_entry *pte;
445 spin_lock(&iommu->lock);
446 pte = intel_pasid_get_entry(dev, pasid);
447 if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
448 spin_unlock(&iommu->lock);
452 did = pasid_get_domain_id(pte);
453 pgtt = pasid_pte_get_pgtt(pte);
454 intel_pasid_clear_entry(dev, pasid, fault_ignore);
455 spin_unlock(&iommu->lock);
457 if (!ecap_coherent(iommu->ecap))
458 clflush_cache_range(pte, sizeof(*pte));
460 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
462 if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
463 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
465 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
467 /* Device IOTLB doesn't need to be flushed in caching mode. */
468 if (!cap_caching_mode(iommu->cap))
469 devtlb_invalidation_with_pasid(iommu, dev, pasid);
473 * This function flushes cache for a newly setup pasid table entry.
474 * Caller of it should not modify the in-use pasid table entries.
476 static void pasid_flush_caches(struct intel_iommu *iommu,
477 struct pasid_entry *pte,
480 if (!ecap_coherent(iommu->ecap))
481 clflush_cache_range(pte, sizeof(*pte));
483 if (cap_caching_mode(iommu->cap)) {
484 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
485 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
487 iommu_flush_write_buffer(iommu);
492 * Set up the scalable mode pasid table entry for first only
495 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
496 struct device *dev, pgd_t *pgd,
497 u32 pasid, u16 did, int flags)
499 struct pasid_entry *pte;
501 if (!ecap_flts(iommu->ecap)) {
502 pr_err("No first level translation support on %s\n",
507 if (flags & PASID_FLAG_SUPERVISOR_MODE) {
509 unsigned long cr0 = read_cr0();
511 /* CR0.WP is normally set but just to be sure */
512 if (unlikely(!(cr0 & X86_CR0_WP))) {
513 pr_err("No CPU write protect!\n");
517 if (!ecap_srs(iommu->ecap)) {
518 pr_err("No supervisor request support on %s\n",
524 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
525 pr_err("No 5-level paging support for first-level on %s\n",
530 spin_lock(&iommu->lock);
531 pte = intel_pasid_get_entry(dev, pasid);
533 spin_unlock(&iommu->lock);
537 if (pasid_pte_is_present(pte)) {
538 spin_unlock(&iommu->lock);
542 pasid_clear_entry(pte);
544 /* Setup the first level page table pointer: */
545 pasid_set_flptr(pte, (u64)__pa(pgd));
546 if (flags & PASID_FLAG_SUPERVISOR_MODE) {
551 if (flags & PASID_FLAG_FL5LP)
552 pasid_set_flpm(pte, 1);
554 if (flags & PASID_FLAG_PAGE_SNOOP)
555 pasid_set_pgsnp(pte);
557 pasid_set_domain_id(pte, did);
558 pasid_set_address_width(pte, iommu->agaw);
559 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
561 /* Setup Present and PASID Granular Transfer Type: */
562 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
563 pasid_set_present(pte);
564 spin_unlock(&iommu->lock);
566 pasid_flush_caches(iommu, pte, pasid, did);
572 * Skip top levels of page tables for iommu which has less agaw
573 * than default. Unnecessary for PT mode.
575 static inline int iommu_skip_agaw(struct dmar_domain *domain,
576 struct intel_iommu *iommu,
577 struct dma_pte **pgd)
581 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
582 *pgd = phys_to_virt(dma_pte_addr(*pgd));
583 if (!dma_pte_present(*pgd))
591 * Set up the scalable mode pasid entry for second only translation type.
593 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
594 struct dmar_domain *domain,
595 struct device *dev, u32 pasid)
597 struct pasid_entry *pte;
604 * If hardware advertises no support for second level
605 * translation, return directly.
607 if (!ecap_slts(iommu->ecap)) {
608 pr_err("No second level translation support on %s\n",
614 agaw = iommu_skip_agaw(domain, iommu, &pgd);
616 dev_err(dev, "Invalid domain page table\n");
620 pgd_val = virt_to_phys(pgd);
621 did = domain_id_iommu(domain, iommu);
623 spin_lock(&iommu->lock);
624 pte = intel_pasid_get_entry(dev, pasid);
626 spin_unlock(&iommu->lock);
630 if (pasid_pte_is_present(pte)) {
631 spin_unlock(&iommu->lock);
635 pasid_clear_entry(pte);
636 pasid_set_domain_id(pte, did);
637 pasid_set_slptr(pte, pgd_val);
638 pasid_set_address_width(pte, agaw);
639 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
640 pasid_set_fault_enable(pte);
641 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
644 * Since it is a second level only translation setup, we should
645 * set SRE bit as well (addresses are expected to be GPAs).
647 if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
649 pasid_set_present(pte);
650 spin_unlock(&iommu->lock);
652 pasid_flush_caches(iommu, pte, pasid, did);
658 * Set up the scalable mode pasid entry for passthrough translation type.
660 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
661 struct dmar_domain *domain,
662 struct device *dev, u32 pasid)
664 u16 did = FLPT_DEFAULT_DID;
665 struct pasid_entry *pte;
667 spin_lock(&iommu->lock);
668 pte = intel_pasid_get_entry(dev, pasid);
670 spin_unlock(&iommu->lock);
674 if (pasid_pte_is_present(pte)) {
675 spin_unlock(&iommu->lock);
679 pasid_clear_entry(pte);
680 pasid_set_domain_id(pte, did);
681 pasid_set_address_width(pte, iommu->agaw);
682 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
683 pasid_set_fault_enable(pte);
684 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
687 * We should set SRE bit as well since the addresses are expected
690 if (ecap_srs(iommu->ecap))
692 pasid_set_present(pte);
693 spin_unlock(&iommu->lock);
695 pasid_flush_caches(iommu, pte, pasid, did);
701 * Set the page snoop control for a pasid entry which has been set up.
703 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
704 struct device *dev, u32 pasid)
706 struct pasid_entry *pte;
709 spin_lock(&iommu->lock);
710 pte = intel_pasid_get_entry(dev, pasid);
711 if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
712 spin_unlock(&iommu->lock);
716 pasid_set_pgsnp(pte);
717 did = pasid_get_domain_id(pte);
718 spin_unlock(&iommu->lock);
720 if (!ecap_coherent(iommu->ecap))
721 clflush_cache_range(pte, sizeof(*pte));
724 * VT-d spec 3.4 table23 states guides for cache invalidation:
726 * - PASID-selective-within-Domain PASID-cache invalidation
727 * - PASID-selective PASID-based IOTLB invalidation
728 * - If (pasid is RID_PASID)
729 * - Global Device-TLB invalidation to affected functions
731 * - PASID-based Device-TLB invalidation (with S=1 and
732 * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
734 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
735 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
737 /* Device IOTLB doesn't need to be flushed in caching mode. */
738 if (!cap_caching_mode(iommu->cap))
739 devtlb_invalidation_with_pasid(iommu, dev, pasid);