]> Git Repo - J-linux.git/blob - drivers/iommu/intel/prq.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / iommu / intel / prq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Intel Corporation
4  *
5  * Originally split from drivers/iommu/intel/svm.c
6  */
7
8 #include <linux/pci.h>
9 #include <linux/pci-ats.h>
10
11 #include "iommu.h"
12 #include "pasid.h"
13 #include "../iommu-pages.h"
14 #include "trace.h"
15
16 /* Page request queue descriptor */
17 struct page_req_dsc {
18         union {
19                 struct {
20                         u64 type:8;
21                         u64 pasid_present:1;
22                         u64 rsvd:7;
23                         u64 rid:16;
24                         u64 pasid:20;
25                         u64 exe_req:1;
26                         u64 pm_req:1;
27                         u64 rsvd2:10;
28                 };
29                 u64 qw_0;
30         };
31         union {
32                 struct {
33                         u64 rd_req:1;
34                         u64 wr_req:1;
35                         u64 lpig:1;
36                         u64 prg_index:9;
37                         u64 addr:52;
38                 };
39                 u64 qw_1;
40         };
41         u64 qw_2;
42         u64 qw_3;
43 };
44
45 /**
46  * intel_iommu_drain_pasid_prq - Drain page requests and responses for a pasid
47  * @dev: target device
48  * @pasid: pasid for draining
49  *
50  * Drain all pending page requests and responses related to @pasid in both
51  * software and hardware. This is supposed to be called after the device
52  * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
53  * and DevTLB have been invalidated.
54  *
55  * It waits until all pending page requests for @pasid in the page fault
56  * queue are completed by the prq handling thread. Then follow the steps
57  * described in VT-d spec CH7.10 to drain all page requests and page
58  * responses pending in the hardware.
59  */
60 void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
61 {
62         struct device_domain_info *info;
63         struct dmar_domain *domain;
64         struct intel_iommu *iommu;
65         struct qi_desc desc[3];
66         int head, tail;
67         u16 sid, did;
68
69         info = dev_iommu_priv_get(dev);
70         if (!info->pri_enabled)
71                 return;
72
73         iommu = info->iommu;
74         domain = info->domain;
75         sid = PCI_DEVID(info->bus, info->devfn);
76         did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID;
77
78         /*
79          * Check and wait until all pending page requests in the queue are
80          * handled by the prq handling thread.
81          */
82 prq_retry:
83         reinit_completion(&iommu->prq_complete);
84         tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
85         head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
86         while (head != tail) {
87                 struct page_req_dsc *req;
88
89                 req = &iommu->prq[head / sizeof(*req)];
90                 if (!req->pasid_present || req->pasid != pasid) {
91                         head = (head + sizeof(*req)) & PRQ_RING_MASK;
92                         continue;
93                 }
94
95                 wait_for_completion(&iommu->prq_complete);
96                 goto prq_retry;
97         }
98
99         iopf_queue_flush_dev(dev);
100
101         /*
102          * Perform steps described in VT-d spec CH7.10 to drain page
103          * requests and responses in hardware.
104          */
105         memset(desc, 0, sizeof(desc));
106         desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
107                         QI_IWD_FENCE |
108                         QI_IWD_TYPE;
109         if (pasid == IOMMU_NO_PASID) {
110                 qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]);
111                 qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0,
112                                   MAX_AGAW_PFN_WIDTH, &desc[2]);
113         } else {
114                 qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]);
115                 qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep,
116                                         0, MAX_AGAW_PFN_WIDTH, &desc[2]);
117         }
118 qi_retry:
119         reinit_completion(&iommu->prq_complete);
120         qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
121         if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
122                 wait_for_completion(&iommu->prq_complete);
123                 goto qi_retry;
124         }
125 }
126
127 static bool is_canonical_address(u64 addr)
128 {
129         int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
130         long saddr = (long)addr;
131
132         return (((saddr << shift) >> shift) == saddr);
133 }
134
135 static void handle_bad_prq_event(struct intel_iommu *iommu,
136                                  struct page_req_dsc *req, int result)
137 {
138         struct qi_desc desc = { };
139
140         pr_err("%s: Invalid page request: %08llx %08llx\n",
141                iommu->name, ((unsigned long long *)req)[0],
142                ((unsigned long long *)req)[1]);
143
144         if (!req->lpig)
145                 return;
146
147         desc.qw0 = QI_PGRP_PASID(req->pasid) |
148                         QI_PGRP_DID(req->rid) |
149                         QI_PGRP_PASID_P(req->pasid_present) |
150                         QI_PGRP_RESP_CODE(result) |
151                         QI_PGRP_RESP_TYPE;
152         desc.qw1 = QI_PGRP_IDX(req->prg_index) |
153                         QI_PGRP_LPIG(req->lpig);
154
155         qi_submit_sync(iommu, &desc, 1, 0);
156 }
157
158 static int prq_to_iommu_prot(struct page_req_dsc *req)
159 {
160         int prot = 0;
161
162         if (req->rd_req)
163                 prot |= IOMMU_FAULT_PERM_READ;
164         if (req->wr_req)
165                 prot |= IOMMU_FAULT_PERM_WRITE;
166         if (req->exe_req)
167                 prot |= IOMMU_FAULT_PERM_EXEC;
168         if (req->pm_req)
169                 prot |= IOMMU_FAULT_PERM_PRIV;
170
171         return prot;
172 }
173
174 static void intel_prq_report(struct intel_iommu *iommu, struct device *dev,
175                              struct page_req_dsc *desc)
176 {
177         struct iopf_fault event = { };
178
179         /* Fill in event data for device specific processing */
180         event.fault.type = IOMMU_FAULT_PAGE_REQ;
181         event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
182         event.fault.prm.pasid = desc->pasid;
183         event.fault.prm.grpid = desc->prg_index;
184         event.fault.prm.perm = prq_to_iommu_prot(desc);
185
186         if (desc->lpig)
187                 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
188         if (desc->pasid_present) {
189                 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
190                 event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
191         }
192
193         iommu_report_device_fault(dev, &event);
194 }
195
196 static irqreturn_t prq_event_thread(int irq, void *d)
197 {
198         struct intel_iommu *iommu = d;
199         struct page_req_dsc *req;
200         int head, tail, handled;
201         struct device *dev;
202         u64 address;
203
204         /*
205          * Clear PPR bit before reading head/tail registers, to ensure that
206          * we get a new interrupt if needed.
207          */
208         writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
209
210         tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
211         head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
212         handled = (head != tail);
213         while (head != tail) {
214                 req = &iommu->prq[head / sizeof(*req)];
215                 address = (u64)req->addr << VTD_PAGE_SHIFT;
216
217                 if (unlikely(!is_canonical_address(address))) {
218                         pr_err("IOMMU: %s: Address is not canonical\n",
219                                iommu->name);
220 bad_req:
221                         handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
222                         goto prq_advance;
223                 }
224
225                 if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
226                         pr_err("IOMMU: %s: Page request in Privilege Mode\n",
227                                iommu->name);
228                         goto bad_req;
229                 }
230
231                 if (unlikely(req->exe_req && req->rd_req)) {
232                         pr_err("IOMMU: %s: Execution request not supported\n",
233                                iommu->name);
234                         goto bad_req;
235                 }
236
237                 /* Drop Stop Marker message. No need for a response. */
238                 if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
239                         goto prq_advance;
240
241                 /*
242                  * If prq is to be handled outside iommu driver via receiver of
243                  * the fault notifiers, we skip the page response here.
244                  */
245                 mutex_lock(&iommu->iopf_lock);
246                 dev = device_rbtree_find(iommu, req->rid);
247                 if (!dev) {
248                         mutex_unlock(&iommu->iopf_lock);
249                         goto bad_req;
250                 }
251
252                 intel_prq_report(iommu, dev, req);
253                 trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
254                                  req->qw_2, req->qw_3,
255                                  iommu->prq_seq_number++);
256                 mutex_unlock(&iommu->iopf_lock);
257 prq_advance:
258                 head = (head + sizeof(*req)) & PRQ_RING_MASK;
259         }
260
261         dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
262
263         /*
264          * Clear the page request overflow bit and wake up all threads that
265          * are waiting for the completion of this handling.
266          */
267         if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
268                 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
269                                     iommu->name);
270                 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
271                 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
272                 if (head == tail) {
273                         iopf_queue_discard_partial(iommu->iopf_queue);
274                         writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
275                         pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
276                                             iommu->name);
277                 }
278         }
279
280         if (!completion_done(&iommu->prq_complete))
281                 complete(&iommu->prq_complete);
282
283         return IRQ_RETVAL(handled);
284 }
285
286 int intel_iommu_enable_prq(struct intel_iommu *iommu)
287 {
288         struct iopf_queue *iopfq;
289         int irq, ret;
290
291         iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
292         if (!iommu->prq) {
293                 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
294                         iommu->name);
295                 return -ENOMEM;
296         }
297
298         irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
299         if (irq <= 0) {
300                 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
301                        iommu->name);
302                 ret = -EINVAL;
303                 goto free_prq;
304         }
305         iommu->pr_irq = irq;
306
307         snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
308                  "dmar%d-iopfq", iommu->seq_id);
309         iopfq = iopf_queue_alloc(iommu->iopfq_name);
310         if (!iopfq) {
311                 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
312                 ret = -ENOMEM;
313                 goto free_hwirq;
314         }
315         iommu->iopf_queue = iopfq;
316
317         snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
318
319         ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
320                                    iommu->prq_name, iommu);
321         if (ret) {
322                 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
323                        iommu->name);
324                 goto free_iopfq;
325         }
326         dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
327         dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
328         dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
329
330         init_completion(&iommu->prq_complete);
331
332         return 0;
333
334 free_iopfq:
335         iopf_queue_free(iommu->iopf_queue);
336         iommu->iopf_queue = NULL;
337 free_hwirq:
338         dmar_free_hwirq(irq);
339         iommu->pr_irq = 0;
340 free_prq:
341         iommu_free_pages(iommu->prq, PRQ_ORDER);
342         iommu->prq = NULL;
343
344         return ret;
345 }
346
347 int intel_iommu_finish_prq(struct intel_iommu *iommu)
348 {
349         dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
350         dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
351         dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
352
353         if (iommu->pr_irq) {
354                 free_irq(iommu->pr_irq, iommu);
355                 dmar_free_hwirq(iommu->pr_irq);
356                 iommu->pr_irq = 0;
357         }
358
359         if (iommu->iopf_queue) {
360                 iopf_queue_free(iommu->iopf_queue);
361                 iommu->iopf_queue = NULL;
362         }
363
364         iommu_free_pages(iommu->prq, PRQ_ORDER);
365         iommu->prq = NULL;
366
367         return 0;
368 }
369
370 void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
371                                struct iommu_page_response *msg)
372 {
373         struct device_domain_info *info = dev_iommu_priv_get(dev);
374         struct intel_iommu *iommu = info->iommu;
375         u8 bus = info->bus, devfn = info->devfn;
376         struct iommu_fault_page_request *prm;
377         struct qi_desc desc;
378         bool pasid_present;
379         bool last_page;
380         u16 sid;
381
382         prm = &evt->fault.prm;
383         sid = PCI_DEVID(bus, devfn);
384         pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
385         last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
386
387         desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
388                         QI_PGRP_PASID_P(pasid_present) |
389                         QI_PGRP_RESP_CODE(msg->code) |
390                         QI_PGRP_RESP_TYPE;
391         desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
392         desc.qw2 = 0;
393         desc.qw3 = 0;
394
395         qi_submit_sync(iommu, &desc, 1, 0);
396 }
This page took 0.049736 seconds and 4 git commands to generate.