]>
Commit | Line | Data |
---|---|---|
5d2aa710 AP |
1 | /* |
2 | * This file implements the DMA operations for NVLink devices. The NPU | |
3 | * devices all point to the same iommu table as the parent PCI device. | |
4 | * | |
5 | * Copyright Alistair Popple, IBM Corporation 2015. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of version 2 of the GNU General Public | |
9 | * License as published by the Free Software Foundation. | |
10 | */ | |
11 | ||
1ab66d1f AP |
12 | #include <linux/slab.h> |
13 | #include <linux/mmu_notifier.h> | |
14 | #include <linux/mmu_context.h> | |
15 | #include <linux/of.h> | |
5d2aa710 AP |
16 | #include <linux/export.h> |
17 | #include <linux/pci.h> | |
18 | #include <linux/memblock.h> | |
b5cb9ab1 | 19 | #include <linux/iommu.h> |
3689c37d | 20 | #include <linux/sizes.h> |
5d2aa710 | 21 | |
99c3ce33 | 22 | #include <asm/debugfs.h> |
1ab66d1f AP |
23 | #include <asm/tlb.h> |
24 | #include <asm/powernv.h> | |
25 | #include <asm/reg.h> | |
26 | #include <asm/opal.h> | |
27 | #include <asm/io.h> | |
5d2aa710 AP |
28 | #include <asm/iommu.h> |
29 | #include <asm/pnv-pci.h> | |
30 | #include <asm/msi_bitmap.h> | |
31 | #include <asm/opal.h> | |
32 | ||
33 | #include "powernv.h" | |
34 | #include "pci.h" | |
35 | ||
1ab66d1f AP |
36 | #define npu_to_phb(x) container_of(x, struct pnv_phb, npu) |
37 | ||
28a5933e AP |
38 | /* |
39 | * spinlock to protect initialisation of an npu_context for a particular | |
40 | * mm_struct. | |
41 | */ | |
42 | static DEFINE_SPINLOCK(npu_context_lock); | |
43 | ||
5d2aa710 AP |
44 | /* |
45 | * Other types of TCE cache invalidation are not functional in the | |
46 | * hardware. | |
47 | */ | |
5d2aa710 AP |
48 | static struct pci_dev *get_pci_dev(struct device_node *dn) |
49 | { | |
902bdc57 AK |
50 | struct pci_dn *pdn = PCI_DN(dn); |
51 | ||
52 | return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus), | |
53 | pdn->busno, pdn->devfn); | |
5d2aa710 AP |
54 | } |
55 | ||
56 | /* Given a NPU device get the associated PCI device. */ | |
57 | struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev) | |
58 | { | |
59 | struct device_node *dn; | |
60 | struct pci_dev *gpdev; | |
61 | ||
4c3b89ef AP |
62 | if (WARN_ON(!npdev)) |
63 | return NULL; | |
64 | ||
65 | if (WARN_ON(!npdev->dev.of_node)) | |
66 | return NULL; | |
67 | ||
5d2aa710 AP |
68 | /* Get assoicated PCI device */ |
69 | dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0); | |
70 | if (!dn) | |
71 | return NULL; | |
72 | ||
73 | gpdev = get_pci_dev(dn); | |
74 | of_node_put(dn); | |
75 | ||
76 | return gpdev; | |
77 | } | |
78 | EXPORT_SYMBOL(pnv_pci_get_gpu_dev); | |
79 | ||
80 | /* Given the real PCI device get a linked NPU device. */ | |
81 | struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) | |
82 | { | |
83 | struct device_node *dn; | |
84 | struct pci_dev *npdev; | |
85 | ||
4c3b89ef AP |
86 | if (WARN_ON(!gpdev)) |
87 | return NULL; | |
88 | ||
377aa6b0 AP |
89 | /* Not all PCI devices have device-tree nodes */ |
90 | if (!gpdev->dev.of_node) | |
4c3b89ef AP |
91 | return NULL; |
92 | ||
5d2aa710 AP |
93 | /* Get assoicated PCI device */ |
94 | dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index); | |
95 | if (!dn) | |
96 | return NULL; | |
97 | ||
98 | npdev = get_pci_dev(dn); | |
99 | of_node_put(dn); | |
100 | ||
101 | return npdev; | |
102 | } | |
103 | EXPORT_SYMBOL(pnv_pci_get_npu_dev); | |
104 | ||
5d2aa710 AP |
105 | /* |
106 | * Returns the PE assoicated with the PCI device of the given | |
107 | * NPU. Returns the linked pci device if pci_dev != NULL. | |
108 | */ | |
109 | static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, | |
110 | struct pci_dev **gpdev) | |
111 | { | |
112 | struct pnv_phb *phb; | |
113 | struct pci_controller *hose; | |
114 | struct pci_dev *pdev; | |
115 | struct pnv_ioda_pe *pe; | |
116 | struct pci_dn *pdn; | |
117 | ||
85674868 AK |
118 | pdev = pnv_pci_get_gpu_dev(npe->pdev); |
119 | if (!pdev) | |
120 | return NULL; | |
5d2aa710 | 121 | |
85674868 AK |
122 | pdn = pci_get_pdn(pdev); |
123 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
124 | return NULL; | |
125 | ||
126 | hose = pci_bus_to_host(pdev->bus); | |
127 | phb = hose->private_data; | |
128 | pe = &phb->ioda.pe_array[pdn->pe_number]; | |
5d2aa710 AP |
129 | |
130 | if (gpdev) | |
131 | *gpdev = pdev; | |
132 | ||
133 | return pe; | |
134 | } | |
135 | ||
b5cb9ab1 | 136 | long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, |
b575c731 AK |
137 | struct iommu_table *tbl) |
138 | { | |
139 | struct pnv_phb *phb = npe->phb; | |
140 | int64_t rc; | |
141 | const unsigned long size = tbl->it_indirect_levels ? | |
142 | tbl->it_level_size : tbl->it_size; | |
143 | const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; | |
144 | const __u64 win_size = tbl->it_size << tbl->it_page_shift; | |
145 | ||
146 | pe_info(npe, "Setting up window %llx..%llx pg=%lx\n", | |
147 | start_addr, start_addr + win_size - 1, | |
148 | IOMMU_PAGE_SIZE(tbl)); | |
149 | ||
150 | rc = opal_pci_map_pe_dma_window(phb->opal_id, | |
151 | npe->pe_number, | |
152 | npe->pe_number, | |
153 | tbl->it_indirect_levels + 1, | |
154 | __pa(tbl->it_base), | |
155 | size << 3, | |
156 | IOMMU_PAGE_SIZE(tbl)); | |
157 | if (rc) { | |
158 | pe_err(npe, "Failed to configure TCE table, err %lld\n", rc); | |
159 | return rc; | |
160 | } | |
6b3d12a9 | 161 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
b575c731 | 162 | |
85674868 | 163 | /* Add the table to the list so its TCE cache will get invalidated */ |
b5cb9ab1 | 164 | pnv_pci_link_table_and_group(phb->hose->node, num, |
85674868 AK |
165 | tbl, &npe->table_group); |
166 | ||
b575c731 AK |
167 | return 0; |
168 | } | |
169 | ||
b5cb9ab1 | 170 | long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num) |
b575c731 AK |
171 | { |
172 | struct pnv_phb *phb = npe->phb; | |
173 | int64_t rc; | |
174 | ||
175 | pe_info(npe, "Removing DMA window\n"); | |
176 | ||
177 | rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number, | |
178 | npe->pe_number, | |
179 | 0/* levels */, 0/* table address */, | |
180 | 0/* table size */, 0/* page size */); | |
181 | if (rc) { | |
182 | pe_err(npe, "Unmapping failed, ret = %lld\n", rc); | |
183 | return rc; | |
184 | } | |
6b3d12a9 | 185 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
b575c731 | 186 | |
b5cb9ab1 | 187 | pnv_pci_unlink_table_and_group(npe->table_group.tables[num], |
85674868 | 188 | &npe->table_group); |
5d2aa710 | 189 | |
85674868 | 190 | return 0; |
5d2aa710 AP |
191 | } |
192 | ||
193 | /* | |
f9f83456 | 194 | * Enables 32 bit DMA on NPU. |
5d2aa710 | 195 | */ |
f9f83456 | 196 | static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) |
5d2aa710 | 197 | { |
5d2aa710 AP |
198 | struct pci_dev *gpdev; |
199 | struct pnv_ioda_pe *gpe; | |
5d2aa710 AP |
200 | int64_t rc; |
201 | ||
202 | /* | |
203 | * Find the assoicated PCI devices and get the dma window | |
204 | * information from there. | |
205 | */ | |
206 | if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV)) | |
207 | return; | |
208 | ||
209 | gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | |
210 | if (!gpe) | |
211 | return; | |
212 | ||
b5cb9ab1 | 213 | rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); |
5d2aa710 AP |
214 | |
215 | /* | |
3182215d AP |
216 | * NVLink devices use the same TCE table configuration as |
217 | * their parent device so drivers shouldn't be doing DMA | |
218 | * operations directly on these devices. | |
5d2aa710 | 219 | */ |
3182215d | 220 | set_dma_ops(&npe->pdev->dev, NULL); |
5d2aa710 AP |
221 | } |
222 | ||
223 | /* | |
f9f83456 | 224 | * Enables bypass mode on the NPU. The NPU only supports one |
446957ba | 225 | * window per link, so bypass needs to be explicitly enabled or |
5d2aa710 AP |
226 | * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be |
227 | * active at the same time. | |
228 | */ | |
f9f83456 | 229 | static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) |
5d2aa710 AP |
230 | { |
231 | struct pnv_phb *phb = npe->phb; | |
232 | int64_t rc = 0; | |
f9f83456 | 233 | phys_addr_t top = memblock_end_of_DRAM(); |
5d2aa710 | 234 | |
7f2c39e9 | 235 | if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev) |
5d2aa710 AP |
236 | return -EINVAL; |
237 | ||
b5cb9ab1 | 238 | rc = pnv_npu_unset_window(npe, 0); |
b575c731 AK |
239 | if (rc != OPAL_SUCCESS) |
240 | return rc; | |
241 | ||
f9f83456 AK |
242 | /* Enable the bypass window */ |
243 | ||
244 | top = roundup_pow_of_two(top); | |
1f52f176 | 245 | dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n", |
f9f83456 AK |
246 | npe->pe_number); |
247 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | |
248 | npe->pe_number, npe->pe_number, | |
249 | 0 /* bypass base */, top); | |
5d2aa710 | 250 | |
85674868 | 251 | if (rc == OPAL_SUCCESS) |
6b3d12a9 | 252 | pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
85674868 | 253 | |
5d2aa710 AP |
254 | return rc; |
255 | } | |
256 | ||
f9f83456 | 257 | void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass) |
5d2aa710 | 258 | { |
f9f83456 AK |
259 | int i; |
260 | struct pnv_phb *phb; | |
261 | struct pci_dn *pdn; | |
262 | struct pnv_ioda_pe *npe; | |
263 | struct pci_dev *npdev; | |
5d2aa710 | 264 | |
f9f83456 AK |
265 | for (i = 0; ; ++i) { |
266 | npdev = pnv_pci_get_npu_dev(gpdev, i); | |
5d2aa710 | 267 | |
f9f83456 AK |
268 | if (!npdev) |
269 | break; | |
5d2aa710 | 270 | |
f9f83456 AK |
271 | pdn = pci_get_pdn(npdev); |
272 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | |
273 | return; | |
5d2aa710 | 274 | |
f9f83456 | 275 | phb = pci_bus_to_host(npdev->bus)->private_data; |
5d2aa710 | 276 | |
f9f83456 AK |
277 | /* We only do bypass if it's enabled on the linked device */ |
278 | npe = &phb->ioda.pe_array[pdn->pe_number]; | |
5d2aa710 | 279 | |
f9f83456 AK |
280 | if (bypass) { |
281 | dev_info(&npdev->dev, | |
282 | "Using 64-bit DMA iommu bypass\n"); | |
283 | pnv_npu_dma_set_bypass(npe); | |
284 | } else { | |
285 | dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n"); | |
286 | pnv_npu_dma_set_32(npe); | |
287 | } | |
288 | } | |
5d2aa710 | 289 | } |
b5cb9ab1 AK |
290 | |
291 | /* Switch ownership from platform code to external user (e.g. VFIO) */ | |
292 | void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) | |
293 | { | |
294 | struct pnv_phb *phb = npe->phb; | |
295 | int64_t rc; | |
296 | ||
297 | /* | |
298 | * Note: NPU has just a single TVE in the hardware which means that | |
299 | * while used by the kernel, it can have either 32bit window or | |
300 | * DMA bypass but never both. So we deconfigure 32bit window only | |
301 | * if it was enabled at the moment of ownership change. | |
302 | */ | |
303 | if (npe->table_group.tables[0]) { | |
304 | pnv_npu_unset_window(npe, 0); | |
305 | return; | |
306 | } | |
307 | ||
308 | /* Disable bypass */ | |
309 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | |
310 | npe->pe_number, npe->pe_number, | |
311 | 0 /* bypass base */, 0); | |
312 | if (rc) { | |
313 | pe_err(npe, "Failed to disable bypass, err %lld\n", rc); | |
314 | return; | |
315 | } | |
6b3d12a9 | 316 | pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); |
b5cb9ab1 AK |
317 | } |
318 | ||
319 | struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe) | |
320 | { | |
321 | struct pnv_phb *phb = npe->phb; | |
322 | struct pci_bus *pbus = phb->hose->bus; | |
323 | struct pci_dev *npdev, *gpdev = NULL, *gptmp; | |
324 | struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | |
325 | ||
326 | if (!gpe || !gpdev) | |
327 | return NULL; | |
328 | ||
329 | list_for_each_entry(npdev, &pbus->devices, bus_list) { | |
330 | gptmp = pnv_pci_get_gpu_dev(npdev); | |
331 | ||
332 | if (gptmp != gpdev) | |
333 | continue; | |
334 | ||
335 | pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev)); | |
336 | iommu_group_add_device(gpe->table_group.group, &npdev->dev); | |
337 | } | |
338 | ||
339 | return gpe; | |
340 | } | |
1ab66d1f AP |
341 | |
342 | /* Maximum number of nvlinks per npu */ | |
343 | #define NV_MAX_LINKS 6 | |
344 | ||
345 | /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */ | |
346 | static int max_npu2_index; | |
347 | ||
348 | struct npu_context { | |
349 | struct mm_struct *mm; | |
350 | struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS]; | |
351 | struct mmu_notifier mn; | |
352 | struct kref kref; | |
1b2c2b12 | 353 | bool nmmu_flush; |
1ab66d1f AP |
354 | |
355 | /* Callback to stop translation requests on a given GPU */ | |
a1409ada | 356 | void (*release_cb)(struct npu_context *context, void *priv); |
1ab66d1f AP |
357 | |
358 | /* | |
359 | * Private pointer passed to the above callback for usage by | |
360 | * device drivers. | |
361 | */ | |
362 | void *priv; | |
363 | }; | |
364 | ||
2b74e2a9 AP |
365 | struct mmio_atsd_reg { |
366 | struct npu *npu; | |
367 | int reg; | |
368 | }; | |
369 | ||
1ab66d1f AP |
370 | /* |
371 | * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC | |
372 | * if none are available. | |
373 | */ | |
374 | static int get_mmio_atsd_reg(struct npu *npu) | |
375 | { | |
376 | int i; | |
377 | ||
378 | for (i = 0; i < npu->mmio_atsd_count; i++) { | |
9eab9901 RA |
379 | if (!test_bit(i, &npu->mmio_atsd_usage)) |
380 | if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) | |
381 | return i; | |
1ab66d1f AP |
382 | } |
383 | ||
384 | return -ENOSPC; | |
385 | } | |
386 | ||
387 | static void put_mmio_atsd_reg(struct npu *npu, int reg) | |
388 | { | |
2b74e2a9 | 389 | clear_bit_unlock(reg, &npu->mmio_atsd_usage); |
1ab66d1f AP |
390 | } |
391 | ||
392 | /* MMIO ATSD register offsets */ | |
7ead15a1 MH |
393 | #define XTS_ATSD_LAUNCH 0 |
394 | #define XTS_ATSD_AVA 1 | |
395 | #define XTS_ATSD_STAT 2 | |
1ab66d1f | 396 | |
3689c37d | 397 | static unsigned long get_atsd_launch_val(unsigned long pid, unsigned long psize) |
1ab66d1f | 398 | { |
7ead15a1 MH |
399 | unsigned long launch = 0; |
400 | ||
401 | if (psize == MMU_PAGE_COUNT) { | |
402 | /* IS set to invalidate entire matching PID */ | |
403 | launch |= PPC_BIT(12); | |
404 | } else { | |
405 | /* AP set to invalidate region of psize */ | |
406 | launch |= (u64)mmu_get_ap(psize) << PPC_BITLSHIFT(17); | |
407 | } | |
1ab66d1f | 408 | |
7ead15a1 MH |
409 | /* PRS set to process-scoped */ |
410 | launch |= PPC_BIT(13); | |
411 | ||
412 | /* PID */ | |
413 | launch |= pid << PPC_BITLSHIFT(38); | |
414 | ||
3689c37d | 415 | /* Leave "No flush" (bit 39) 0 so every ATSD performs a flush */ |
7ead15a1 MH |
416 | |
417 | return launch; | |
1ab66d1f AP |
418 | } |
419 | ||
7ead15a1 MH |
420 | static void mmio_atsd_regs_write(struct mmio_atsd_reg |
421 | mmio_atsd_reg[NV_MAX_NPUS], unsigned long offset, | |
422 | unsigned long val) | |
1ab66d1f | 423 | { |
7ead15a1 MH |
424 | struct npu *npu; |
425 | int i, reg; | |
1ab66d1f | 426 | |
2b74e2a9 | 427 | for (i = 0; i <= max_npu2_index; i++) { |
7ead15a1 MH |
428 | reg = mmio_atsd_reg[i].reg; |
429 | if (reg < 0) | |
2b74e2a9 AP |
430 | continue; |
431 | ||
7ead15a1 MH |
432 | npu = mmio_atsd_reg[i].npu; |
433 | __raw_writeq_be(val, npu->mmio_atsd_regs[reg] + offset); | |
434 | } | |
435 | } | |
1ab66d1f | 436 | |
7ead15a1 | 437 | static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], |
3689c37d | 438 | unsigned long pid) |
7ead15a1 | 439 | { |
3689c37d | 440 | unsigned long launch = get_atsd_launch_val(pid, MMU_PAGE_COUNT); |
bbd5ff50 | 441 | |
7ead15a1 MH |
442 | /* Invalidating the entire process doesn't use a va */ |
443 | mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch); | |
1ab66d1f AP |
444 | } |
445 | ||
3689c37d MH |
446 | static void mmio_invalidate_range(struct mmio_atsd_reg |
447 | mmio_atsd_reg[NV_MAX_NPUS], unsigned long pid, | |
448 | unsigned long start, unsigned long psize) | |
1ab66d1f | 449 | { |
3689c37d | 450 | unsigned long launch = get_atsd_launch_val(pid, psize); |
1ab66d1f | 451 | |
7ead15a1 | 452 | /* Write all VAs first */ |
3689c37d | 453 | mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_AVA, start); |
1ab66d1f | 454 | |
7ead15a1 MH |
455 | /* Issue one barrier for all address writes */ |
456 | eieio(); | |
bbd5ff50 | 457 | |
7ead15a1 MH |
458 | /* Launch */ |
459 | mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch); | |
1ab66d1f AP |
460 | } |
461 | ||
462 | #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) | |
463 | ||
bbd5ff50 | 464 | static void mmio_invalidate_wait( |
2b74e2a9 | 465 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) |
bbd5ff50 AP |
466 | { |
467 | struct npu *npu; | |
468 | int i, reg; | |
469 | ||
470 | /* Wait for all invalidations to complete */ | |
471 | for (i = 0; i <= max_npu2_index; i++) { | |
472 | if (mmio_atsd_reg[i].reg < 0) | |
473 | continue; | |
474 | ||
475 | /* Wait for completion */ | |
476 | npu = mmio_atsd_reg[i].npu; | |
477 | reg = mmio_atsd_reg[i].reg; | |
478 | while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) | |
479 | cpu_relax(); | |
2b74e2a9 AP |
480 | } |
481 | } | |
482 | ||
483 | /* | |
484 | * Acquires all the address translation shootdown (ATSD) registers required to | |
485 | * launch an ATSD on all links this npu_context is active on. | |
486 | */ | |
487 | static void acquire_atsd_reg(struct npu_context *npu_context, | |
488 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) | |
489 | { | |
490 | int i, j; | |
491 | struct npu *npu; | |
492 | struct pci_dev *npdev; | |
493 | struct pnv_phb *nphb; | |
bbd5ff50 | 494 | |
2b74e2a9 AP |
495 | for (i = 0; i <= max_npu2_index; i++) { |
496 | mmio_atsd_reg[i].reg = -1; | |
497 | for (j = 0; j < NV_MAX_LINKS; j++) { | |
498 | /* | |
499 | * There are no ordering requirements with respect to | |
500 | * the setup of struct npu_context, but to ensure | |
501 | * consistent behaviour we need to ensure npdev[][] is | |
502 | * only read once. | |
503 | */ | |
504 | npdev = READ_ONCE(npu_context->npdev[i][j]); | |
505 | if (!npdev) | |
506 | continue; | |
bbd5ff50 | 507 | |
2b74e2a9 AP |
508 | nphb = pci_bus_to_host(npdev->bus)->private_data; |
509 | npu = &nphb->npu; | |
510 | mmio_atsd_reg[i].npu = npu; | |
511 | mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); | |
512 | while (mmio_atsd_reg[i].reg < 0) { | |
513 | mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); | |
514 | cpu_relax(); | |
515 | } | |
516 | break; | |
517 | } | |
518 | } | |
519 | } | |
520 | ||
521 | /* | |
522 | * Release previously acquired ATSD registers. To avoid deadlocks the registers | |
523 | * must be released in the same order they were acquired above in | |
524 | * acquire_atsd_reg. | |
525 | */ | |
526 | static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) | |
527 | { | |
528 | int i; | |
529 | ||
530 | for (i = 0; i <= max_npu2_index; i++) { | |
bbd5ff50 | 531 | /* |
2b74e2a9 AP |
532 | * We can't rely on npu_context->npdev[][] being the same here |
533 | * as when acquire_atsd_reg() was called, hence we use the | |
534 | * values stored in mmio_atsd_reg during the acquire phase | |
535 | * rather than re-reading npdev[][]. | |
bbd5ff50 | 536 | */ |
2b74e2a9 AP |
537 | if (mmio_atsd_reg[i].reg < 0) |
538 | continue; | |
539 | ||
540 | put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg); | |
bbd5ff50 AP |
541 | } |
542 | } | |
543 | ||
1ab66d1f | 544 | /* |
3689c37d | 545 | * Invalidate a virtual address range |
1ab66d1f | 546 | */ |
3689c37d MH |
547 | static void mmio_invalidate(struct npu_context *npu_context, |
548 | unsigned long start, unsigned long size) | |
1ab66d1f | 549 | { |
bbd5ff50 | 550 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; |
1ab66d1f | 551 | unsigned long pid = npu_context->mm->context.id; |
3689c37d MH |
552 | unsigned long atsd_start = 0; |
553 | unsigned long end = start + size - 1; | |
554 | int atsd_psize = MMU_PAGE_COUNT; | |
555 | ||
556 | /* | |
557 | * Convert the input range into one of the supported sizes. If the range | |
558 | * doesn't fit, use the next larger supported size. Invalidation latency | |
559 | * is high, so over-invalidation is preferred to issuing multiple | |
560 | * invalidates. | |
561 | * | |
562 | * A 4K page size isn't supported by NPU/GPU ATS, so that case is | |
563 | * ignored. | |
564 | */ | |
565 | if (size == SZ_64K) { | |
566 | atsd_start = start; | |
567 | atsd_psize = MMU_PAGE_64K; | |
568 | } else if (ALIGN_DOWN(start, SZ_2M) == ALIGN_DOWN(end, SZ_2M)) { | |
569 | atsd_start = ALIGN_DOWN(start, SZ_2M); | |
570 | atsd_psize = MMU_PAGE_2M; | |
571 | } else if (ALIGN_DOWN(start, SZ_1G) == ALIGN_DOWN(end, SZ_1G)) { | |
572 | atsd_start = ALIGN_DOWN(start, SZ_1G); | |
573 | atsd_psize = MMU_PAGE_1G; | |
574 | } | |
1ab66d1f | 575 | |
1b2c2b12 AP |
576 | if (npu_context->nmmu_flush) |
577 | /* | |
578 | * Unfortunately the nest mmu does not support flushing specific | |
579 | * addresses so we have to flush the whole mm once before | |
580 | * shooting down the GPU translation. | |
581 | */ | |
582 | flush_all_mm(npu_context->mm); | |
bab9f954 | 583 | |
1ab66d1f AP |
584 | /* |
585 | * Loop over all the NPUs this process is active on and launch | |
586 | * an invalidate. | |
587 | */ | |
2b74e2a9 | 588 | acquire_atsd_reg(npu_context, mmio_atsd_reg); |
3689c37d MH |
589 | |
590 | if (atsd_psize == MMU_PAGE_COUNT) | |
591 | mmio_invalidate_pid(mmio_atsd_reg, pid); | |
2b74e2a9 | 592 | else |
3689c37d MH |
593 | mmio_invalidate_range(mmio_atsd_reg, pid, atsd_start, |
594 | atsd_psize); | |
2b74e2a9 AP |
595 | |
596 | mmio_invalidate_wait(mmio_atsd_reg); | |
3689c37d MH |
597 | |
598 | /* | |
599 | * The GPU requires two flush ATSDs to ensure all entries have been | |
600 | * flushed. We use PID 0 as it will never be used for a process on the | |
601 | * GPU. | |
602 | */ | |
603 | mmio_invalidate_pid(mmio_atsd_reg, 0); | |
604 | mmio_invalidate_wait(mmio_atsd_reg); | |
605 | mmio_invalidate_pid(mmio_atsd_reg, 0); | |
606 | mmio_invalidate_wait(mmio_atsd_reg); | |
607 | ||
2b74e2a9 | 608 | release_atsd_reg(mmio_atsd_reg); |
1ab66d1f AP |
609 | } |
610 | ||
611 | static void pnv_npu2_mn_release(struct mmu_notifier *mn, | |
612 | struct mm_struct *mm) | |
613 | { | |
614 | struct npu_context *npu_context = mn_to_npu_context(mn); | |
615 | ||
616 | /* Call into device driver to stop requests to the NMMU */ | |
617 | if (npu_context->release_cb) | |
618 | npu_context->release_cb(npu_context, npu_context->priv); | |
619 | ||
620 | /* | |
621 | * There should be no more translation requests for this PID, but we | |
622 | * need to ensure any entries for it are removed from the TLB. | |
623 | */ | |
3689c37d | 624 | mmio_invalidate(npu_context, 0, ~0UL); |
1ab66d1f AP |
625 | } |
626 | ||
627 | static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, | |
628 | struct mm_struct *mm, | |
629 | unsigned long address, | |
630 | pte_t pte) | |
631 | { | |
632 | struct npu_context *npu_context = mn_to_npu_context(mn); | |
3689c37d | 633 | mmio_invalidate(npu_context, address, PAGE_SIZE); |
1ab66d1f AP |
634 | } |
635 | ||
1ab66d1f AP |
636 | static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, |
637 | struct mm_struct *mm, | |
638 | unsigned long start, unsigned long end) | |
639 | { | |
640 | struct npu_context *npu_context = mn_to_npu_context(mn); | |
3689c37d | 641 | mmio_invalidate(npu_context, start, end - start); |
1ab66d1f AP |
642 | } |
643 | ||
644 | static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { | |
645 | .release = pnv_npu2_mn_release, | |
646 | .change_pte = pnv_npu2_mn_change_pte, | |
1ab66d1f AP |
647 | .invalidate_range = pnv_npu2_mn_invalidate_range, |
648 | }; | |
649 | ||
650 | /* | |
651 | * Call into OPAL to setup the nmmu context for the current task in | |
652 | * the NPU. This must be called to setup the context tables before the | |
653 | * GPU issues ATRs. pdev should be a pointed to PCIe GPU device. | |
654 | * | |
655 | * A release callback should be registered to allow a device driver to | |
656 | * be notified that it should not launch any new translation requests | |
657 | * as the final TLB invalidate is about to occur. | |
658 | * | |
659 | * Returns an error if there no contexts are currently available or a | |
660 | * npu_context which should be passed to pnv_npu2_handle_fault(). | |
661 | * | |
28a5933e AP |
662 | * mmap_sem must be held in write mode and must not be called from interrupt |
663 | * context. | |
1ab66d1f AP |
664 | */ |
665 | struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, | |
666 | unsigned long flags, | |
a1409ada | 667 | void (*cb)(struct npu_context *, void *), |
1ab66d1f AP |
668 | void *priv) |
669 | { | |
670 | int rc; | |
671 | u32 nvlink_index; | |
672 | struct device_node *nvlink_dn; | |
673 | struct mm_struct *mm = current->mm; | |
674 | struct pnv_phb *nphb; | |
675 | struct npu *npu; | |
676 | struct npu_context *npu_context; | |
677 | ||
678 | /* | |
679 | * At present we don't support GPUs connected to multiple NPUs and I'm | |
680 | * not sure the hardware does either. | |
681 | */ | |
682 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
683 | ||
684 | if (!firmware_has_feature(FW_FEATURE_OPAL)) | |
685 | return ERR_PTR(-ENODEV); | |
686 | ||
687 | if (!npdev) | |
688 | /* No nvlink associated with this GPU device */ | |
689 | return ERR_PTR(-ENODEV); | |
690 | ||
720c8404 MH |
691 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); |
692 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", | |
693 | &nvlink_index))) | |
694 | return ERR_PTR(-ENODEV); | |
695 | ||
bbd5ff50 AP |
696 | if (!mm || mm->context.id == 0) { |
697 | /* | |
698 | * Kernel thread contexts are not supported and context id 0 is | |
699 | * reserved on the GPU. | |
700 | */ | |
1ab66d1f AP |
701 | return ERR_PTR(-EINVAL); |
702 | } | |
703 | ||
704 | nphb = pci_bus_to_host(npdev->bus)->private_data; | |
705 | npu = &nphb->npu; | |
706 | ||
707 | /* | |
708 | * Setup the NPU context table for a particular GPU. These need to be | |
709 | * per-GPU as we need the tables to filter ATSDs when there are no | |
28a5933e AP |
710 | * active contexts on a particular GPU. It is safe for these to be |
711 | * called concurrently with destroy as the OPAL call takes appropriate | |
712 | * locks and refcounts on init/destroy. | |
1ab66d1f AP |
713 | */ |
714 | rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, | |
715 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); | |
716 | if (rc < 0) | |
717 | return ERR_PTR(-ENOSPC); | |
718 | ||
719 | /* | |
720 | * We store the npu pci device so we can more easily get at the | |
721 | * associated npus. | |
722 | */ | |
28a5933e | 723 | spin_lock(&npu_context_lock); |
1ab66d1f | 724 | npu_context = mm->context.npu_context; |
a1409ada AP |
725 | if (npu_context) { |
726 | if (npu_context->release_cb != cb || | |
727 | npu_context->priv != priv) { | |
728 | spin_unlock(&npu_context_lock); | |
729 | opal_npu_destroy_context(nphb->opal_id, mm->context.id, | |
730 | PCI_DEVID(gpdev->bus->number, | |
731 | gpdev->devfn)); | |
732 | return ERR_PTR(-EINVAL); | |
733 | } | |
734 | ||
28a5933e | 735 | WARN_ON(!kref_get_unless_zero(&npu_context->kref)); |
a1409ada | 736 | } |
28a5933e AP |
737 | spin_unlock(&npu_context_lock); |
738 | ||
1ab66d1f | 739 | if (!npu_context) { |
28a5933e AP |
740 | /* |
741 | * We can set up these fields without holding the | |
742 | * npu_context_lock as the npu_context hasn't been returned to | |
743 | * the caller meaning it can't be destroyed. Parallel allocation | |
744 | * is protected against by mmap_sem. | |
745 | */ | |
720c8404 | 746 | rc = -ENOMEM; |
1ab66d1f | 747 | npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); |
720c8404 MH |
748 | if (npu_context) { |
749 | kref_init(&npu_context->kref); | |
750 | npu_context->mm = mm; | |
751 | npu_context->mn.ops = &nv_nmmu_notifier_ops; | |
752 | rc = __mmu_notifier_register(&npu_context->mn, mm); | |
753 | } | |
754 | ||
755 | if (rc) { | |
756 | kfree(npu_context); | |
757 | opal_npu_destroy_context(nphb->opal_id, mm->context.id, | |
758 | PCI_DEVID(gpdev->bus->number, | |
759 | gpdev->devfn)); | |
760 | return ERR_PTR(rc); | |
761 | } | |
1ab66d1f AP |
762 | |
763 | mm->context.npu_context = npu_context; | |
1ab66d1f AP |
764 | } |
765 | ||
766 | npu_context->release_cb = cb; | |
767 | npu_context->priv = priv; | |
2b74e2a9 AP |
768 | |
769 | /* | |
770 | * npdev is a pci_dev pointer setup by the PCI code. We assign it to | |
771 | * npdev[][] to indicate to the mmu notifiers that an invalidation | |
772 | * should also be sent over this nvlink. The notifiers don't use any | |
773 | * other fields in npu_context, so we just need to ensure that when they | |
774 | * deference npu_context->npdev[][] it is either a valid pointer or | |
775 | * NULL. | |
776 | */ | |
777 | WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); | |
1ab66d1f | 778 | |
1b2c2b12 AP |
779 | if (!nphb->npu.nmmu_flush) { |
780 | /* | |
781 | * If we're not explicitly flushing ourselves we need to mark | |
782 | * the thread for global flushes | |
783 | */ | |
784 | npu_context->nmmu_flush = false; | |
785 | mm_context_add_copro(mm); | |
786 | } else | |
787 | npu_context->nmmu_flush = true; | |
788 | ||
1ab66d1f AP |
789 | return npu_context; |
790 | } | |
791 | EXPORT_SYMBOL(pnv_npu2_init_context); | |
792 | ||
793 | static void pnv_npu2_release_context(struct kref *kref) | |
794 | { | |
795 | struct npu_context *npu_context = | |
796 | container_of(kref, struct npu_context, kref); | |
797 | ||
1b2c2b12 AP |
798 | if (!npu_context->nmmu_flush) |
799 | mm_context_remove_copro(npu_context->mm); | |
800 | ||
1ab66d1f | 801 | npu_context->mm->context.npu_context = NULL; |
1ab66d1f AP |
802 | } |
803 | ||
28a5933e AP |
804 | /* |
805 | * Destroy a context on the given GPU. May free the npu_context if it is no | |
806 | * longer active on any GPUs. Must not be called from interrupt context. | |
807 | */ | |
1ab66d1f AP |
808 | void pnv_npu2_destroy_context(struct npu_context *npu_context, |
809 | struct pci_dev *gpdev) | |
810 | { | |
28a5933e | 811 | int removed; |
415ba3c1 | 812 | struct pnv_phb *nphb; |
1ab66d1f AP |
813 | struct npu *npu; |
814 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | |
815 | struct device_node *nvlink_dn; | |
816 | u32 nvlink_index; | |
817 | ||
818 | if (WARN_ON(!npdev)) | |
819 | return; | |
820 | ||
821 | if (!firmware_has_feature(FW_FEATURE_OPAL)) | |
822 | return; | |
823 | ||
824 | nphb = pci_bus_to_host(npdev->bus)->private_data; | |
825 | npu = &nphb->npu; | |
1ab66d1f AP |
826 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); |
827 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", | |
828 | &nvlink_index))) | |
829 | return; | |
2b74e2a9 | 830 | WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); |
415ba3c1 | 831 | opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, |
1ab66d1f | 832 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); |
28a5933e AP |
833 | spin_lock(&npu_context_lock); |
834 | removed = kref_put(&npu_context->kref, pnv_npu2_release_context); | |
835 | spin_unlock(&npu_context_lock); | |
836 | ||
837 | /* | |
838 | * We need to do this outside of pnv_npu2_release_context so that it is | |
839 | * outside the spinlock as mmu_notifier_destroy uses SRCU. | |
840 | */ | |
841 | if (removed) { | |
842 | mmu_notifier_unregister(&npu_context->mn, | |
843 | npu_context->mm); | |
844 | ||
845 | kfree(npu_context); | |
846 | } | |
847 | ||
1ab66d1f AP |
848 | } |
849 | EXPORT_SYMBOL(pnv_npu2_destroy_context); | |
850 | ||
851 | /* | |
852 | * Assumes mmap_sem is held for the contexts associated mm. | |
853 | */ | |
854 | int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, | |
855 | unsigned long *flags, unsigned long *status, int count) | |
856 | { | |
857 | u64 rc = 0, result = 0; | |
858 | int i, is_write; | |
859 | struct page *page[1]; | |
860 | ||
861 | /* mmap_sem should be held so the struct_mm must be present */ | |
862 | struct mm_struct *mm = context->mm; | |
863 | ||
864 | if (!firmware_has_feature(FW_FEATURE_OPAL)) | |
865 | return -ENODEV; | |
866 | ||
867 | WARN_ON(!rwsem_is_locked(&mm->mmap_sem)); | |
868 | ||
869 | for (i = 0; i < count; i++) { | |
870 | is_write = flags[i] & NPU2_WRITE; | |
871 | rc = get_user_pages_remote(NULL, mm, ea[i], 1, | |
872 | is_write ? FOLL_WRITE : 0, | |
873 | page, NULL, NULL); | |
874 | ||
875 | /* | |
876 | * To support virtualised environments we will have to do an | |
877 | * access to the page to ensure it gets faulted into the | |
878 | * hypervisor. For the moment virtualisation is not supported in | |
879 | * other areas so leave the access out. | |
880 | */ | |
881 | if (rc != 1) { | |
882 | status[i] = rc; | |
883 | result = -EFAULT; | |
884 | continue; | |
885 | } | |
886 | ||
887 | status[i] = 0; | |
888 | put_page(page[0]); | |
889 | } | |
890 | ||
891 | return result; | |
892 | } | |
893 | EXPORT_SYMBOL(pnv_npu2_handle_fault); | |
894 | ||
895 | int pnv_npu2_init(struct pnv_phb *phb) | |
896 | { | |
897 | unsigned int i; | |
898 | u64 mmio_atsd; | |
899 | struct device_node *dn; | |
900 | struct pci_dev *gpdev; | |
901 | static int npu_index; | |
902 | uint64_t rc = 0; | |
903 | ||
1b2c2b12 AP |
904 | phb->npu.nmmu_flush = |
905 | of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush"); | |
1ab66d1f AP |
906 | for_each_child_of_node(phb->hose->dn, dn) { |
907 | gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn)); | |
908 | if (gpdev) { | |
909 | rc = opal_npu_map_lpar(phb->opal_id, | |
910 | PCI_DEVID(gpdev->bus->number, gpdev->devfn), | |
911 | 0, 0); | |
912 | if (rc) | |
913 | dev_err(&gpdev->dev, | |
914 | "Error %lld mapping device to LPAR\n", | |
915 | rc); | |
916 | } | |
917 | } | |
918 | ||
919 | for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd", | |
920 | i, &mmio_atsd); i++) | |
921 | phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32); | |
922 | ||
923 | pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i); | |
924 | phb->npu.mmio_atsd_count = i; | |
925 | phb->npu.mmio_atsd_usage = 0; | |
926 | npu_index++; | |
927 | if (WARN_ON(npu_index >= NV_MAX_NPUS)) | |
928 | return -ENOSPC; | |
929 | max_npu2_index = npu_index; | |
930 | phb->npu.index = npu_index; | |
931 | ||
932 | return 0; | |
933 | } |