]>
Commit | Line | Data |
---|---|---|
10e5247f KA |
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
98bcef56 | 17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Author: Ashok Raj <[email protected]> | |
19 | * Author: Shaohua Li <[email protected]> | |
20 | * Author: Anil S Keshavamurthy <[email protected]> | |
10e5247f | 21 | * |
e61d98d8 | 22 | * This file implements early detection/parsing of Remapping Devices |
10e5247f KA |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | |
e61d98d8 SS |
25 | * |
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | |
10e5247f KA |
27 | */ |
28 | ||
e9071b0b DD |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */ |
30 | ||
10e5247f KA |
31 | #include <linux/pci.h> |
32 | #include <linux/dmar.h> | |
38717946 KA |
33 | #include <linux/iova.h> |
34 | #include <linux/intel-iommu.h> | |
fe962e90 | 35 | #include <linux/timer.h> |
0ac2491f SS |
36 | #include <linux/irq.h> |
37 | #include <linux/interrupt.h> | |
69575d38 | 38 | #include <linux/tboot.h> |
eb27cae8 | 39 | #include <linux/dmi.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
8a8f422d | 41 | #include <asm/irq_remapping.h> |
4db77ff3 | 42 | #include <asm/iommu_table.h> |
10e5247f | 43 | |
078e1ee2 JR |
44 | #include "irq_remapping.h" |
45 | ||
10e5247f KA |
46 | /* No locks are needed as DMA remapping hardware unit |
47 | * list is constructed at boot time and hotplug of | |
48 | * these units are not supported by the architecture. | |
49 | */ | |
50 | LIST_HEAD(dmar_drhd_units); | |
10e5247f | 51 | |
41750d31 | 52 | struct acpi_table_header * __initdata dmar_tbl; |
8e1568f3 | 53 | static acpi_size dmar_tbl_size; |
10e5247f KA |
54 | |
55 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |
56 | { | |
57 | /* | |
58 | * add INCLUDE_ALL at the tail, so scan the list will find it at | |
59 | * the very end. | |
60 | */ | |
61 | if (drhd->include_all) | |
62 | list_add_tail(&drhd->list, &dmar_drhd_units); | |
63 | else | |
64 | list_add(&drhd->list, &dmar_drhd_units); | |
65 | } | |
66 | ||
10e5247f KA |
67 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
68 | struct pci_dev **dev, u16 segment) | |
69 | { | |
70 | struct pci_bus *bus; | |
71 | struct pci_dev *pdev = NULL; | |
72 | struct acpi_dmar_pci_path *path; | |
73 | int count; | |
74 | ||
75 | bus = pci_find_bus(segment, scope->bus); | |
76 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
77 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
78 | / sizeof(struct acpi_dmar_pci_path); | |
79 | ||
80 | while (count) { | |
81 | if (pdev) | |
82 | pci_dev_put(pdev); | |
83 | /* | |
84 | * Some BIOSes list non-exist devices in DMAR table, just | |
85 | * ignore it | |
86 | */ | |
87 | if (!bus) { | |
e9071b0b | 88 | pr_warn("Device scope bus [%d] not found\n", scope->bus); |
10e5247f KA |
89 | break; |
90 | } | |
91 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); | |
92 | if (!pdev) { | |
e9071b0b | 93 | /* warning will be printed below */ |
10e5247f KA |
94 | break; |
95 | } | |
96 | path ++; | |
97 | count --; | |
98 | bus = pdev->subordinate; | |
99 | } | |
100 | if (!pdev) { | |
e9071b0b | 101 | pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", |
bf947fcb | 102 | segment, scope->bus, path->dev, path->fn); |
10e5247f KA |
103 | *dev = NULL; |
104 | return 0; | |
105 | } | |
106 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ | |
107 | pdev->subordinate) || (scope->entry_type == \ | |
108 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { | |
109 | pci_dev_put(pdev); | |
e9071b0b DD |
110 | pr_warn("Device scope type does not match for %s\n", |
111 | pci_name(pdev)); | |
10e5247f KA |
112 | return -EINVAL; |
113 | } | |
114 | *dev = pdev; | |
115 | return 0; | |
116 | } | |
117 | ||
318fe7df SS |
118 | int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, |
119 | struct pci_dev ***devices, u16 segment) | |
10e5247f KA |
120 | { |
121 | struct acpi_dmar_device_scope *scope; | |
122 | void * tmp = start; | |
123 | int index; | |
124 | int ret; | |
125 | ||
126 | *cnt = 0; | |
127 | while (start < end) { | |
128 | scope = start; | |
129 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
130 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | |
131 | (*cnt)++; | |
5715f0f9 | 132 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { |
e9071b0b | 133 | pr_warn("Unsupported device scope\n"); |
5715f0f9 | 134 | } |
10e5247f KA |
135 | start += scope->length; |
136 | } | |
137 | if (*cnt == 0) | |
138 | return 0; | |
139 | ||
140 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); | |
141 | if (!*devices) | |
142 | return -ENOMEM; | |
143 | ||
144 | start = tmp; | |
145 | index = 0; | |
146 | while (start < end) { | |
147 | scope = start; | |
148 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
149 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { | |
150 | ret = dmar_parse_one_dev_scope(scope, | |
151 | &(*devices)[index], segment); | |
152 | if (ret) { | |
153 | kfree(*devices); | |
154 | return ret; | |
155 | } | |
156 | index ++; | |
157 | } | |
158 | start += scope->length; | |
159 | } | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | /** | |
165 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | |
166 | * structure which uniquely represent one DMA remapping hardware unit | |
167 | * present in the platform | |
168 | */ | |
169 | static int __init | |
170 | dmar_parse_one_drhd(struct acpi_dmar_header *header) | |
171 | { | |
172 | struct acpi_dmar_hardware_unit *drhd; | |
173 | struct dmar_drhd_unit *dmaru; | |
174 | int ret = 0; | |
10e5247f | 175 | |
e523b38e | 176 | drhd = (struct acpi_dmar_hardware_unit *)header; |
10e5247f KA |
177 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
178 | if (!dmaru) | |
179 | return -ENOMEM; | |
180 | ||
1886e8a9 | 181 | dmaru->hdr = header; |
10e5247f | 182 | dmaru->reg_base_addr = drhd->address; |
276dbf99 | 183 | dmaru->segment = drhd->segment; |
10e5247f KA |
184 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
185 | ||
1886e8a9 SS |
186 | ret = alloc_iommu(dmaru); |
187 | if (ret) { | |
188 | kfree(dmaru); | |
189 | return ret; | |
190 | } | |
191 | dmar_register_drhd_unit(dmaru); | |
192 | return 0; | |
193 | } | |
194 | ||
f82851a8 | 195 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
1886e8a9 SS |
196 | { |
197 | struct acpi_dmar_hardware_unit *drhd; | |
f82851a8 | 198 | int ret = 0; |
1886e8a9 SS |
199 | |
200 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | |
201 | ||
2e824f79 YZ |
202 | if (dmaru->include_all) |
203 | return 0; | |
204 | ||
205 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | |
1886e8a9 | 206 | ((void *)drhd) + drhd->header.length, |
10e5247f KA |
207 | &dmaru->devices_cnt, &dmaru->devices, |
208 | drhd->segment); | |
1c7d1bca | 209 | if (ret) { |
1886e8a9 | 210 | list_del(&dmaru->list); |
10e5247f | 211 | kfree(dmaru); |
1886e8a9 | 212 | } |
10e5247f KA |
213 | return ret; |
214 | } | |
215 | ||
aa697079 | 216 | #ifdef CONFIG_ACPI_NUMA |
ee34b32d SS |
217 | static int __init |
218 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) | |
219 | { | |
220 | struct acpi_dmar_rhsa *rhsa; | |
221 | struct dmar_drhd_unit *drhd; | |
222 | ||
223 | rhsa = (struct acpi_dmar_rhsa *)header; | |
aa697079 | 224 | for_each_drhd_unit(drhd) { |
ee34b32d SS |
225 | if (drhd->reg_base_addr == rhsa->base_address) { |
226 | int node = acpi_map_pxm_to_node(rhsa->proximity_domain); | |
227 | ||
228 | if (!node_online(node)) | |
229 | node = -1; | |
230 | drhd->iommu->node = node; | |
aa697079 DW |
231 | return 0; |
232 | } | |
ee34b32d | 233 | } |
fd0c8894 BH |
234 | WARN_TAINT( |
235 | 1, TAINT_FIRMWARE_WORKAROUND, | |
236 | "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" | |
237 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | |
238 | drhd->reg_base_addr, | |
239 | dmi_get_system_info(DMI_BIOS_VENDOR), | |
240 | dmi_get_system_info(DMI_BIOS_VERSION), | |
241 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | |
ee34b32d | 242 | |
aa697079 | 243 | return 0; |
ee34b32d | 244 | } |
aa697079 | 245 | #endif |
ee34b32d | 246 | |
10e5247f KA |
247 | static void __init |
248 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |
249 | { | |
250 | struct acpi_dmar_hardware_unit *drhd; | |
251 | struct acpi_dmar_reserved_memory *rmrr; | |
aa5d2b51 | 252 | struct acpi_dmar_atsr *atsr; |
17b60977 | 253 | struct acpi_dmar_rhsa *rhsa; |
10e5247f KA |
254 | |
255 | switch (header->type) { | |
256 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
aa5d2b51 YZ |
257 | drhd = container_of(header, struct acpi_dmar_hardware_unit, |
258 | header); | |
e9071b0b | 259 | pr_info("DRHD base: %#016Lx flags: %#x\n", |
aa5d2b51 | 260 | (unsigned long long)drhd->address, drhd->flags); |
10e5247f KA |
261 | break; |
262 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
aa5d2b51 YZ |
263 | rmrr = container_of(header, struct acpi_dmar_reserved_memory, |
264 | header); | |
e9071b0b | 265 | pr_info("RMRR base: %#016Lx end: %#016Lx\n", |
5b6985ce FY |
266 | (unsigned long long)rmrr->base_address, |
267 | (unsigned long long)rmrr->end_address); | |
10e5247f | 268 | break; |
aa5d2b51 YZ |
269 | case ACPI_DMAR_TYPE_ATSR: |
270 | atsr = container_of(header, struct acpi_dmar_atsr, header); | |
e9071b0b | 271 | pr_info("ATSR flags: %#x\n", atsr->flags); |
aa5d2b51 | 272 | break; |
17b60977 RD |
273 | case ACPI_DMAR_HARDWARE_AFFINITY: |
274 | rhsa = container_of(header, struct acpi_dmar_rhsa, header); | |
e9071b0b | 275 | pr_info("RHSA base: %#016Lx proximity domain: %#x\n", |
17b60977 RD |
276 | (unsigned long long)rhsa->base_address, |
277 | rhsa->proximity_domain); | |
278 | break; | |
10e5247f KA |
279 | } |
280 | } | |
281 | ||
f6dd5c31 YL |
282 | /** |
283 | * dmar_table_detect - checks to see if the platform supports DMAR devices | |
284 | */ | |
285 | static int __init dmar_table_detect(void) | |
286 | { | |
287 | acpi_status status = AE_OK; | |
288 | ||
289 | /* if we could find DMAR table, then there are DMAR devices */ | |
8e1568f3 YL |
290 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
291 | (struct acpi_table_header **)&dmar_tbl, | |
292 | &dmar_tbl_size); | |
f6dd5c31 YL |
293 | |
294 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | |
e9071b0b | 295 | pr_warn("Unable to map DMAR\n"); |
f6dd5c31 YL |
296 | status = AE_NOT_FOUND; |
297 | } | |
298 | ||
299 | return (ACPI_SUCCESS(status) ? 1 : 0); | |
300 | } | |
aaa9d1dd | 301 | |
10e5247f KA |
302 | /** |
303 | * parse_dmar_table - parses the DMA reporting table | |
304 | */ | |
305 | static int __init | |
306 | parse_dmar_table(void) | |
307 | { | |
308 | struct acpi_table_dmar *dmar; | |
309 | struct acpi_dmar_header *entry_header; | |
310 | int ret = 0; | |
311 | ||
f6dd5c31 YL |
312 | /* |
313 | * Do it again, earlier dmar_tbl mapping could be mapped with | |
314 | * fixed map. | |
315 | */ | |
316 | dmar_table_detect(); | |
317 | ||
a59b50e9 JC |
318 | /* |
319 | * ACPI tables may not be DMA protected by tboot, so use DMAR copy | |
320 | * SINIT saved in SinitMleData in TXT heap (which is DMA protected) | |
321 | */ | |
322 | dmar_tbl = tboot_get_dmar_table(dmar_tbl); | |
323 | ||
10e5247f KA |
324 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
325 | if (!dmar) | |
326 | return -ENODEV; | |
327 | ||
5b6985ce | 328 | if (dmar->width < PAGE_SHIFT - 1) { |
e9071b0b | 329 | pr_warn("Invalid DMAR haw\n"); |
10e5247f KA |
330 | return -EINVAL; |
331 | } | |
332 | ||
e9071b0b | 333 | pr_info("Host address width %d\n", dmar->width + 1); |
10e5247f KA |
334 | |
335 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | |
336 | while (((unsigned long)entry_header) < | |
337 | (((unsigned long)dmar) + dmar_tbl->length)) { | |
084eb960 TB |
338 | /* Avoid looping forever on bad ACPI tables */ |
339 | if (entry_header->length == 0) { | |
e9071b0b | 340 | pr_warn("Invalid 0-length structure\n"); |
084eb960 TB |
341 | ret = -EINVAL; |
342 | break; | |
343 | } | |
344 | ||
10e5247f KA |
345 | dmar_table_print_dmar_entry(entry_header); |
346 | ||
347 | switch (entry_header->type) { | |
348 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
349 | ret = dmar_parse_one_drhd(entry_header); | |
350 | break; | |
351 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
352 | ret = dmar_parse_one_rmrr(entry_header); | |
aa5d2b51 YZ |
353 | break; |
354 | case ACPI_DMAR_TYPE_ATSR: | |
aa5d2b51 | 355 | ret = dmar_parse_one_atsr(entry_header); |
10e5247f | 356 | break; |
17b60977 | 357 | case ACPI_DMAR_HARDWARE_AFFINITY: |
aa697079 | 358 | #ifdef CONFIG_ACPI_NUMA |
ee34b32d | 359 | ret = dmar_parse_one_rhsa(entry_header); |
aa697079 | 360 | #endif |
17b60977 | 361 | break; |
10e5247f | 362 | default: |
e9071b0b | 363 | pr_warn("Unknown DMAR structure type %d\n", |
4de75cf9 | 364 | entry_header->type); |
10e5247f KA |
365 | ret = 0; /* for forward compatibility */ |
366 | break; | |
367 | } | |
368 | if (ret) | |
369 | break; | |
370 | ||
371 | entry_header = ((void *)entry_header + entry_header->length); | |
372 | } | |
373 | return ret; | |
374 | } | |
375 | ||
dda56549 | 376 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
e61d98d8 SS |
377 | struct pci_dev *dev) |
378 | { | |
379 | int index; | |
380 | ||
381 | while (dev) { | |
382 | for (index = 0; index < cnt; index++) | |
383 | if (dev == devices[index]) | |
384 | return 1; | |
385 | ||
386 | /* Check our parent */ | |
387 | dev = dev->bus->self; | |
388 | } | |
389 | ||
390 | return 0; | |
391 | } | |
392 | ||
393 | struct dmar_drhd_unit * | |
394 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | |
395 | { | |
2e824f79 YZ |
396 | struct dmar_drhd_unit *dmaru = NULL; |
397 | struct acpi_dmar_hardware_unit *drhd; | |
398 | ||
dda56549 Y |
399 | dev = pci_physfn(dev); |
400 | ||
2e824f79 YZ |
401 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { |
402 | drhd = container_of(dmaru->hdr, | |
403 | struct acpi_dmar_hardware_unit, | |
404 | header); | |
405 | ||
406 | if (dmaru->include_all && | |
407 | drhd->segment == pci_domain_nr(dev->bus)) | |
408 | return dmaru; | |
e61d98d8 | 409 | |
2e824f79 YZ |
410 | if (dmar_pci_device_match(dmaru->devices, |
411 | dmaru->devices_cnt, dev)) | |
412 | return dmaru; | |
e61d98d8 SS |
413 | } |
414 | ||
415 | return NULL; | |
416 | } | |
417 | ||
1886e8a9 SS |
418 | int __init dmar_dev_scope_init(void) |
419 | { | |
c2c7286a | 420 | static int dmar_dev_scope_initialized; |
04e2ea67 | 421 | struct dmar_drhd_unit *drhd, *drhd_n; |
1886e8a9 SS |
422 | int ret = -ENODEV; |
423 | ||
c2c7286a SS |
424 | if (dmar_dev_scope_initialized) |
425 | return dmar_dev_scope_initialized; | |
426 | ||
318fe7df SS |
427 | if (list_empty(&dmar_drhd_units)) |
428 | goto fail; | |
429 | ||
04e2ea67 | 430 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
1886e8a9 SS |
431 | ret = dmar_parse_dev(drhd); |
432 | if (ret) | |
c2c7286a | 433 | goto fail; |
1886e8a9 SS |
434 | } |
435 | ||
318fe7df SS |
436 | ret = dmar_parse_rmrr_atsr_dev(); |
437 | if (ret) | |
438 | goto fail; | |
1886e8a9 | 439 | |
c2c7286a SS |
440 | dmar_dev_scope_initialized = 1; |
441 | return 0; | |
442 | ||
443 | fail: | |
444 | dmar_dev_scope_initialized = ret; | |
1886e8a9 SS |
445 | return ret; |
446 | } | |
447 | ||
10e5247f KA |
448 | |
449 | int __init dmar_table_init(void) | |
450 | { | |
1886e8a9 | 451 | static int dmar_table_initialized; |
093f87d2 FY |
452 | int ret; |
453 | ||
1886e8a9 SS |
454 | if (dmar_table_initialized) |
455 | return 0; | |
456 | ||
457 | dmar_table_initialized = 1; | |
458 | ||
093f87d2 FY |
459 | ret = parse_dmar_table(); |
460 | if (ret) { | |
1886e8a9 | 461 | if (ret != -ENODEV) |
e9071b0b | 462 | pr_info("parse DMAR table failure.\n"); |
093f87d2 FY |
463 | return ret; |
464 | } | |
465 | ||
10e5247f | 466 | if (list_empty(&dmar_drhd_units)) { |
e9071b0b | 467 | pr_info("No DMAR devices found\n"); |
10e5247f KA |
468 | return -ENODEV; |
469 | } | |
093f87d2 | 470 | |
10e5247f KA |
471 | return 0; |
472 | } | |
473 | ||
3a8663ee BH |
474 | static void warn_invalid_dmar(u64 addr, const char *message) |
475 | { | |
fd0c8894 BH |
476 | WARN_TAINT_ONCE( |
477 | 1, TAINT_FIRMWARE_WORKAROUND, | |
478 | "Your BIOS is broken; DMAR reported at address %llx%s!\n" | |
479 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | |
480 | addr, message, | |
481 | dmi_get_system_info(DMI_BIOS_VENDOR), | |
482 | dmi_get_system_info(DMI_BIOS_VERSION), | |
483 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | |
3a8663ee | 484 | } |
6ecbf01c | 485 | |
86cf898e DW |
486 | int __init check_zero_address(void) |
487 | { | |
488 | struct acpi_table_dmar *dmar; | |
489 | struct acpi_dmar_header *entry_header; | |
490 | struct acpi_dmar_hardware_unit *drhd; | |
491 | ||
492 | dmar = (struct acpi_table_dmar *)dmar_tbl; | |
493 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | |
494 | ||
495 | while (((unsigned long)entry_header) < | |
496 | (((unsigned long)dmar) + dmar_tbl->length)) { | |
497 | /* Avoid looping forever on bad ACPI tables */ | |
498 | if (entry_header->length == 0) { | |
e9071b0b | 499 | pr_warn("Invalid 0-length structure\n"); |
86cf898e DW |
500 | return 0; |
501 | } | |
502 | ||
503 | if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { | |
2c992208 CW |
504 | void __iomem *addr; |
505 | u64 cap, ecap; | |
506 | ||
86cf898e DW |
507 | drhd = (void *)entry_header; |
508 | if (!drhd->address) { | |
3a8663ee | 509 | warn_invalid_dmar(0, ""); |
2c992208 CW |
510 | goto failed; |
511 | } | |
512 | ||
513 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | |
514 | if (!addr ) { | |
515 | printk("IOMMU: can't validate: %llx\n", drhd->address); | |
516 | goto failed; | |
517 | } | |
518 | cap = dmar_readq(addr + DMAR_CAP_REG); | |
519 | ecap = dmar_readq(addr + DMAR_ECAP_REG); | |
520 | early_iounmap(addr, VTD_PAGE_SIZE); | |
521 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | |
3a8663ee BH |
522 | warn_invalid_dmar(drhd->address, |
523 | " returns all ones"); | |
2c992208 | 524 | goto failed; |
86cf898e | 525 | } |
86cf898e DW |
526 | } |
527 | ||
528 | entry_header = ((void *)entry_header + entry_header->length); | |
529 | } | |
530 | return 1; | |
2c992208 CW |
531 | |
532 | failed: | |
2c992208 | 533 | return 0; |
86cf898e DW |
534 | } |
535 | ||
480125ba | 536 | int __init detect_intel_iommu(void) |
2ae21010 SS |
537 | { |
538 | int ret; | |
539 | ||
f6dd5c31 | 540 | ret = dmar_table_detect(); |
86cf898e DW |
541 | if (ret) |
542 | ret = check_zero_address(); | |
2ae21010 | 543 | { |
1cb11583 | 544 | struct acpi_table_dmar *dmar; |
b3a530e4 | 545 | |
1cb11583 | 546 | dmar = (struct acpi_table_dmar *) dmar_tbl; |
f5d1b97b | 547 | |
95a02e97 | 548 | if (ret && irq_remapping_enabled && cpu_has_x2apic && |
f5d1b97b | 549 | dmar->flags & 0x1) |
e9071b0b | 550 | pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); |
f5d1b97b | 551 | |
11bd04f6 | 552 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
2ae21010 | 553 | iommu_detected = 1; |
5d990b62 CW |
554 | /* Make sure ACS will be enabled */ |
555 | pci_request_acs(); | |
556 | } | |
f5d1b97b | 557 | |
9d5ce73a FT |
558 | #ifdef CONFIG_X86 |
559 | if (ret) | |
560 | x86_init.iommu.iommu_init = intel_iommu_init; | |
2ae21010 | 561 | #endif |
cacd4213 | 562 | } |
8e1568f3 | 563 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
f6dd5c31 | 564 | dmar_tbl = NULL; |
480125ba | 565 | |
4db77ff3 | 566 | return ret ? 1 : -ENODEV; |
2ae21010 SS |
567 | } |
568 | ||
569 | ||
6f5cf521 DD |
570 | static void unmap_iommu(struct intel_iommu *iommu) |
571 | { | |
572 | iounmap(iommu->reg); | |
573 | release_mem_region(iommu->reg_phys, iommu->reg_size); | |
574 | } | |
575 | ||
576 | /** | |
577 | * map_iommu: map the iommu's registers | |
578 | * @iommu: the iommu to map | |
579 | * @phys_addr: the physical address of the base resgister | |
e9071b0b | 580 | * |
6f5cf521 | 581 | * Memory map the iommu's registers. Start w/ a single page, and |
e9071b0b | 582 | * possibly expand if that turns out to be insufficent. |
6f5cf521 DD |
583 | */ |
584 | static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) | |
585 | { | |
586 | int map_size, err=0; | |
587 | ||
588 | iommu->reg_phys = phys_addr; | |
589 | iommu->reg_size = VTD_PAGE_SIZE; | |
590 | ||
591 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { | |
592 | pr_err("IOMMU: can't reserve memory\n"); | |
593 | err = -EBUSY; | |
594 | goto out; | |
595 | } | |
596 | ||
597 | iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); | |
598 | if (!iommu->reg) { | |
599 | pr_err("IOMMU: can't map the region\n"); | |
600 | err = -ENOMEM; | |
601 | goto release; | |
602 | } | |
603 | ||
604 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | |
605 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | |
606 | ||
607 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | |
608 | err = -EINVAL; | |
609 | warn_invalid_dmar(phys_addr, " returns all ones"); | |
610 | goto unmap; | |
611 | } | |
612 | ||
613 | /* the registers might be more than one page */ | |
614 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | |
615 | cap_max_fault_reg_offset(iommu->cap)); | |
616 | map_size = VTD_PAGE_ALIGN(map_size); | |
617 | if (map_size > iommu->reg_size) { | |
618 | iounmap(iommu->reg); | |
619 | release_mem_region(iommu->reg_phys, iommu->reg_size); | |
620 | iommu->reg_size = map_size; | |
621 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, | |
622 | iommu->name)) { | |
623 | pr_err("IOMMU: can't reserve memory\n"); | |
624 | err = -EBUSY; | |
625 | goto out; | |
626 | } | |
627 | iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); | |
628 | if (!iommu->reg) { | |
629 | pr_err("IOMMU: can't map the region\n"); | |
630 | err = -ENOMEM; | |
631 | goto release; | |
632 | } | |
633 | } | |
634 | err = 0; | |
635 | goto out; | |
636 | ||
637 | unmap: | |
638 | iounmap(iommu->reg); | |
639 | release: | |
640 | release_mem_region(iommu->reg_phys, iommu->reg_size); | |
641 | out: | |
642 | return err; | |
643 | } | |
644 | ||
1886e8a9 | 645 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
e61d98d8 | 646 | { |
c42d9f32 | 647 | struct intel_iommu *iommu; |
e61d98d8 | 648 | u32 ver; |
c42d9f32 | 649 | static int iommu_allocated = 0; |
43f7392b | 650 | int agaw = 0; |
4ed0d3e6 | 651 | int msagaw = 0; |
6f5cf521 | 652 | int err; |
c42d9f32 | 653 | |
6ecbf01c | 654 | if (!drhd->reg_base_addr) { |
3a8663ee | 655 | warn_invalid_dmar(0, ""); |
6ecbf01c DW |
656 | return -EINVAL; |
657 | } | |
658 | ||
c42d9f32 SS |
659 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
660 | if (!iommu) | |
1886e8a9 | 661 | return -ENOMEM; |
c42d9f32 SS |
662 | |
663 | iommu->seq_id = iommu_allocated++; | |
9d783ba0 | 664 | sprintf (iommu->name, "dmar%d", iommu->seq_id); |
e61d98d8 | 665 | |
6f5cf521 DD |
666 | err = map_iommu(iommu, drhd->reg_base_addr); |
667 | if (err) { | |
668 | pr_err("IOMMU: failed to map %s\n", iommu->name); | |
e61d98d8 SS |
669 | goto error; |
670 | } | |
0815565a | 671 | |
6f5cf521 | 672 | err = -EINVAL; |
1b573683 WH |
673 | agaw = iommu_calculate_agaw(iommu); |
674 | if (agaw < 0) { | |
bf947fcb DD |
675 | pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", |
676 | iommu->seq_id); | |
0815565a | 677 | goto err_unmap; |
4ed0d3e6 FY |
678 | } |
679 | msagaw = iommu_calculate_max_sagaw(iommu); | |
680 | if (msagaw < 0) { | |
bf947fcb | 681 | pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
1b573683 | 682 | iommu->seq_id); |
0815565a | 683 | goto err_unmap; |
1b573683 WH |
684 | } |
685 | iommu->agaw = agaw; | |
4ed0d3e6 | 686 | iommu->msagaw = msagaw; |
1b573683 | 687 | |
ee34b32d SS |
688 | iommu->node = -1; |
689 | ||
e61d98d8 | 690 | ver = readl(iommu->reg + DMAR_VER_REG); |
680a7524 YL |
691 | pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", |
692 | iommu->seq_id, | |
5b6985ce FY |
693 | (unsigned long long)drhd->reg_base_addr, |
694 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | |
695 | (unsigned long long)iommu->cap, | |
696 | (unsigned long long)iommu->ecap); | |
e61d98d8 | 697 | |
1f5b3c3f | 698 | raw_spin_lock_init(&iommu->register_lock); |
e61d98d8 SS |
699 | |
700 | drhd->iommu = iommu; | |
1886e8a9 | 701 | return 0; |
0815565a DW |
702 | |
703 | err_unmap: | |
6f5cf521 | 704 | unmap_iommu(iommu); |
0815565a | 705 | error: |
e61d98d8 | 706 | kfree(iommu); |
6f5cf521 | 707 | return err; |
e61d98d8 SS |
708 | } |
709 | ||
710 | void free_iommu(struct intel_iommu *iommu) | |
711 | { | |
712 | if (!iommu) | |
713 | return; | |
714 | ||
e61d98d8 | 715 | free_dmar_iommu(iommu); |
e61d98d8 SS |
716 | |
717 | if (iommu->reg) | |
6f5cf521 DD |
718 | unmap_iommu(iommu); |
719 | ||
e61d98d8 SS |
720 | kfree(iommu); |
721 | } | |
fe962e90 SS |
722 | |
723 | /* | |
724 | * Reclaim all the submitted descriptors which have completed its work. | |
725 | */ | |
726 | static inline void reclaim_free_desc(struct q_inval *qi) | |
727 | { | |
6ba6c3a4 YZ |
728 | while (qi->desc_status[qi->free_tail] == QI_DONE || |
729 | qi->desc_status[qi->free_tail] == QI_ABORT) { | |
fe962e90 SS |
730 | qi->desc_status[qi->free_tail] = QI_FREE; |
731 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | |
732 | qi->free_cnt++; | |
733 | } | |
734 | } | |
735 | ||
704126ad YZ |
736 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
737 | { | |
738 | u32 fault; | |
6ba6c3a4 | 739 | int head, tail; |
704126ad YZ |
740 | struct q_inval *qi = iommu->qi; |
741 | int wait_index = (index + 1) % QI_LENGTH; | |
742 | ||
6ba6c3a4 YZ |
743 | if (qi->desc_status[wait_index] == QI_ABORT) |
744 | return -EAGAIN; | |
745 | ||
704126ad YZ |
746 | fault = readl(iommu->reg + DMAR_FSTS_REG); |
747 | ||
748 | /* | |
749 | * If IQE happens, the head points to the descriptor associated | |
750 | * with the error. No new descriptors are fetched until the IQE | |
751 | * is cleared. | |
752 | */ | |
753 | if (fault & DMA_FSTS_IQE) { | |
754 | head = readl(iommu->reg + DMAR_IQH_REG); | |
6ba6c3a4 | 755 | if ((head >> DMAR_IQ_SHIFT) == index) { |
bf947fcb | 756 | pr_err("VT-d detected invalid descriptor: " |
6ba6c3a4 YZ |
757 | "low=%llx, high=%llx\n", |
758 | (unsigned long long)qi->desc[index].low, | |
759 | (unsigned long long)qi->desc[index].high); | |
704126ad YZ |
760 | memcpy(&qi->desc[index], &qi->desc[wait_index], |
761 | sizeof(struct qi_desc)); | |
762 | __iommu_flush_cache(iommu, &qi->desc[index], | |
763 | sizeof(struct qi_desc)); | |
764 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); | |
765 | return -EINVAL; | |
766 | } | |
767 | } | |
768 | ||
6ba6c3a4 YZ |
769 | /* |
770 | * If ITE happens, all pending wait_desc commands are aborted. | |
771 | * No new descriptors are fetched until the ITE is cleared. | |
772 | */ | |
773 | if (fault & DMA_FSTS_ITE) { | |
774 | head = readl(iommu->reg + DMAR_IQH_REG); | |
775 | head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; | |
776 | head |= 1; | |
777 | tail = readl(iommu->reg + DMAR_IQT_REG); | |
778 | tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; | |
779 | ||
780 | writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); | |
781 | ||
782 | do { | |
783 | if (qi->desc_status[head] == QI_IN_USE) | |
784 | qi->desc_status[head] = QI_ABORT; | |
785 | head = (head - 2 + QI_LENGTH) % QI_LENGTH; | |
786 | } while (head != tail); | |
787 | ||
788 | if (qi->desc_status[wait_index] == QI_ABORT) | |
789 | return -EAGAIN; | |
790 | } | |
791 | ||
792 | if (fault & DMA_FSTS_ICE) | |
793 | writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); | |
794 | ||
704126ad YZ |
795 | return 0; |
796 | } | |
797 | ||
fe962e90 SS |
798 | /* |
799 | * Submit the queued invalidation descriptor to the remapping | |
800 | * hardware unit and wait for its completion. | |
801 | */ | |
704126ad | 802 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
fe962e90 | 803 | { |
6ba6c3a4 | 804 | int rc; |
fe962e90 SS |
805 | struct q_inval *qi = iommu->qi; |
806 | struct qi_desc *hw, wait_desc; | |
807 | int wait_index, index; | |
808 | unsigned long flags; | |
809 | ||
810 | if (!qi) | |
704126ad | 811 | return 0; |
fe962e90 SS |
812 | |
813 | hw = qi->desc; | |
814 | ||
6ba6c3a4 YZ |
815 | restart: |
816 | rc = 0; | |
817 | ||
3b8f4048 | 818 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 | 819 | while (qi->free_cnt < 3) { |
3b8f4048 | 820 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
fe962e90 | 821 | cpu_relax(); |
3b8f4048 | 822 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 SS |
823 | } |
824 | ||
825 | index = qi->free_head; | |
826 | wait_index = (index + 1) % QI_LENGTH; | |
827 | ||
828 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | |
829 | ||
830 | hw[index] = *desc; | |
831 | ||
704126ad YZ |
832 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
833 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | |
fe962e90 SS |
834 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
835 | ||
836 | hw[wait_index] = wait_desc; | |
837 | ||
838 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | |
839 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | |
840 | ||
841 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | |
842 | qi->free_cnt -= 2; | |
843 | ||
fe962e90 SS |
844 | /* |
845 | * update the HW tail register indicating the presence of | |
846 | * new descriptors. | |
847 | */ | |
6ba6c3a4 | 848 | writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); |
fe962e90 SS |
849 | |
850 | while (qi->desc_status[wait_index] != QI_DONE) { | |
f05810c9 SS |
851 | /* |
852 | * We will leave the interrupts disabled, to prevent interrupt | |
853 | * context to queue another cmd while a cmd is already submitted | |
854 | * and waiting for completion on this cpu. This is to avoid | |
855 | * a deadlock where the interrupt context can wait indefinitely | |
856 | * for free slots in the queue. | |
857 | */ | |
704126ad YZ |
858 | rc = qi_check_fault(iommu, index); |
859 | if (rc) | |
6ba6c3a4 | 860 | break; |
704126ad | 861 | |
3b8f4048 | 862 | raw_spin_unlock(&qi->q_lock); |
fe962e90 | 863 | cpu_relax(); |
3b8f4048 | 864 | raw_spin_lock(&qi->q_lock); |
fe962e90 | 865 | } |
6ba6c3a4 YZ |
866 | |
867 | qi->desc_status[index] = QI_DONE; | |
fe962e90 SS |
868 | |
869 | reclaim_free_desc(qi); | |
3b8f4048 | 870 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
704126ad | 871 | |
6ba6c3a4 YZ |
872 | if (rc == -EAGAIN) |
873 | goto restart; | |
874 | ||
704126ad | 875 | return rc; |
fe962e90 SS |
876 | } |
877 | ||
878 | /* | |
879 | * Flush the global interrupt entry cache. | |
880 | */ | |
881 | void qi_global_iec(struct intel_iommu *iommu) | |
882 | { | |
883 | struct qi_desc desc; | |
884 | ||
885 | desc.low = QI_IEC_TYPE; | |
886 | desc.high = 0; | |
887 | ||
704126ad | 888 | /* should never fail */ |
fe962e90 SS |
889 | qi_submit_sync(&desc, iommu); |
890 | } | |
891 | ||
4c25a2c1 DW |
892 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
893 | u64 type) | |
3481f210 | 894 | { |
3481f210 YS |
895 | struct qi_desc desc; |
896 | ||
3481f210 YS |
897 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) |
898 | | QI_CC_GRAN(type) | QI_CC_TYPE; | |
899 | desc.high = 0; | |
900 | ||
4c25a2c1 | 901 | qi_submit_sync(&desc, iommu); |
3481f210 YS |
902 | } |
903 | ||
1f0ef2aa DW |
904 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
905 | unsigned int size_order, u64 type) | |
3481f210 YS |
906 | { |
907 | u8 dw = 0, dr = 0; | |
908 | ||
909 | struct qi_desc desc; | |
910 | int ih = 0; | |
911 | ||
3481f210 YS |
912 | if (cap_write_drain(iommu->cap)) |
913 | dw = 1; | |
914 | ||
915 | if (cap_read_drain(iommu->cap)) | |
916 | dr = 1; | |
917 | ||
918 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | |
919 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | |
920 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | |
921 | | QI_IOTLB_AM(size_order); | |
922 | ||
1f0ef2aa | 923 | qi_submit_sync(&desc, iommu); |
3481f210 YS |
924 | } |
925 | ||
6ba6c3a4 YZ |
926 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
927 | u64 addr, unsigned mask) | |
928 | { | |
929 | struct qi_desc desc; | |
930 | ||
931 | if (mask) { | |
932 | BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); | |
933 | addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; | |
934 | desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; | |
935 | } else | |
936 | desc.high = QI_DEV_IOTLB_ADDR(addr); | |
937 | ||
938 | if (qdep >= QI_DEV_IOTLB_MAX_INVS) | |
939 | qdep = 0; | |
940 | ||
941 | desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | | |
942 | QI_DIOTLB_TYPE; | |
943 | ||
944 | qi_submit_sync(&desc, iommu); | |
945 | } | |
946 | ||
eba67e5d SS |
947 | /* |
948 | * Disable Queued Invalidation interface. | |
949 | */ | |
950 | void dmar_disable_qi(struct intel_iommu *iommu) | |
951 | { | |
952 | unsigned long flags; | |
953 | u32 sts; | |
954 | cycles_t start_time = get_cycles(); | |
955 | ||
956 | if (!ecap_qis(iommu->ecap)) | |
957 | return; | |
958 | ||
1f5b3c3f | 959 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eba67e5d SS |
960 | |
961 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
962 | if (!(sts & DMA_GSTS_QIES)) | |
963 | goto end; | |
964 | ||
965 | /* | |
966 | * Give a chance to HW to complete the pending invalidation requests. | |
967 | */ | |
968 | while ((readl(iommu->reg + DMAR_IQT_REG) != | |
969 | readl(iommu->reg + DMAR_IQH_REG)) && | |
970 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) | |
971 | cpu_relax(); | |
972 | ||
973 | iommu->gcmd &= ~DMA_GCMD_QIE; | |
eba67e5d SS |
974 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
975 | ||
976 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | |
977 | !(sts & DMA_GSTS_QIES), sts); | |
978 | end: | |
1f5b3c3f | 979 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eba67e5d SS |
980 | } |
981 | ||
eb4a52bc FY |
982 | /* |
983 | * Enable queued invalidation. | |
984 | */ | |
985 | static void __dmar_enable_qi(struct intel_iommu *iommu) | |
986 | { | |
c416daa9 | 987 | u32 sts; |
eb4a52bc FY |
988 | unsigned long flags; |
989 | struct q_inval *qi = iommu->qi; | |
990 | ||
991 | qi->free_head = qi->free_tail = 0; | |
992 | qi->free_cnt = QI_LENGTH; | |
993 | ||
1f5b3c3f | 994 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eb4a52bc FY |
995 | |
996 | /* write zero to the tail reg */ | |
997 | writel(0, iommu->reg + DMAR_IQT_REG); | |
998 | ||
999 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | |
1000 | ||
eb4a52bc | 1001 | iommu->gcmd |= DMA_GCMD_QIE; |
c416daa9 | 1002 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
eb4a52bc FY |
1003 | |
1004 | /* Make sure hardware complete it */ | |
1005 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | |
1006 | ||
1f5b3c3f | 1007 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eb4a52bc FY |
1008 | } |
1009 | ||
fe962e90 SS |
1010 | /* |
1011 | * Enable Queued Invalidation interface. This is a must to support | |
1012 | * interrupt-remapping. Also used by DMA-remapping, which replaces | |
1013 | * register based IOTLB invalidation. | |
1014 | */ | |
1015 | int dmar_enable_qi(struct intel_iommu *iommu) | |
1016 | { | |
fe962e90 | 1017 | struct q_inval *qi; |
751cafe3 | 1018 | struct page *desc_page; |
fe962e90 SS |
1019 | |
1020 | if (!ecap_qis(iommu->ecap)) | |
1021 | return -ENOENT; | |
1022 | ||
1023 | /* | |
1024 | * queued invalidation is already setup and enabled. | |
1025 | */ | |
1026 | if (iommu->qi) | |
1027 | return 0; | |
1028 | ||
fa4b57cc | 1029 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); |
fe962e90 SS |
1030 | if (!iommu->qi) |
1031 | return -ENOMEM; | |
1032 | ||
1033 | qi = iommu->qi; | |
1034 | ||
751cafe3 SS |
1035 | |
1036 | desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); | |
1037 | if (!desc_page) { | |
fe962e90 SS |
1038 | kfree(qi); |
1039 | iommu->qi = 0; | |
1040 | return -ENOMEM; | |
1041 | } | |
1042 | ||
751cafe3 SS |
1043 | qi->desc = page_address(desc_page); |
1044 | ||
37a40710 | 1045 | qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
fe962e90 SS |
1046 | if (!qi->desc_status) { |
1047 | free_page((unsigned long) qi->desc); | |
1048 | kfree(qi); | |
1049 | iommu->qi = 0; | |
1050 | return -ENOMEM; | |
1051 | } | |
1052 | ||
1053 | qi->free_head = qi->free_tail = 0; | |
1054 | qi->free_cnt = QI_LENGTH; | |
1055 | ||
3b8f4048 | 1056 | raw_spin_lock_init(&qi->q_lock); |
fe962e90 | 1057 | |
eb4a52bc | 1058 | __dmar_enable_qi(iommu); |
fe962e90 SS |
1059 | |
1060 | return 0; | |
1061 | } | |
0ac2491f SS |
1062 | |
1063 | /* iommu interrupt handling. Most stuff are MSI-like. */ | |
1064 | ||
9d783ba0 SS |
1065 | enum faulttype { |
1066 | DMA_REMAP, | |
1067 | INTR_REMAP, | |
1068 | UNKNOWN, | |
1069 | }; | |
1070 | ||
1071 | static const char *dma_remap_fault_reasons[] = | |
0ac2491f SS |
1072 | { |
1073 | "Software", | |
1074 | "Present bit in root entry is clear", | |
1075 | "Present bit in context entry is clear", | |
1076 | "Invalid context entry", | |
1077 | "Access beyond MGAW", | |
1078 | "PTE Write access is not set", | |
1079 | "PTE Read access is not set", | |
1080 | "Next page table ptr is invalid", | |
1081 | "Root table address invalid", | |
1082 | "Context table ptr is invalid", | |
1083 | "non-zero reserved fields in RTP", | |
1084 | "non-zero reserved fields in CTP", | |
1085 | "non-zero reserved fields in PTE", | |
4ecccd9e | 1086 | "PCE for translation request specifies blocking", |
0ac2491f | 1087 | }; |
9d783ba0 | 1088 | |
95a02e97 | 1089 | static const char *irq_remap_fault_reasons[] = |
9d783ba0 SS |
1090 | { |
1091 | "Detected reserved fields in the decoded interrupt-remapped request", | |
1092 | "Interrupt index exceeded the interrupt-remapping table size", | |
1093 | "Present field in the IRTE entry is clear", | |
1094 | "Error accessing interrupt-remapping table pointed by IRTA_REG", | |
1095 | "Detected reserved fields in the IRTE entry", | |
1096 | "Blocked a compatibility format interrupt request", | |
1097 | "Blocked an interrupt request due to source-id verification failure", | |
1098 | }; | |
1099 | ||
0ac2491f SS |
1100 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) |
1101 | ||
9d783ba0 | 1102 | const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) |
0ac2491f | 1103 | { |
fefe1ed1 DC |
1104 | if (fault_reason >= 0x20 && (fault_reason - 0x20 < |
1105 | ARRAY_SIZE(irq_remap_fault_reasons))) { | |
9d783ba0 | 1106 | *fault_type = INTR_REMAP; |
95a02e97 | 1107 | return irq_remap_fault_reasons[fault_reason - 0x20]; |
9d783ba0 SS |
1108 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { |
1109 | *fault_type = DMA_REMAP; | |
1110 | return dma_remap_fault_reasons[fault_reason]; | |
1111 | } else { | |
1112 | *fault_type = UNKNOWN; | |
0ac2491f | 1113 | return "Unknown"; |
9d783ba0 | 1114 | } |
0ac2491f SS |
1115 | } |
1116 | ||
5c2837fb | 1117 | void dmar_msi_unmask(struct irq_data *data) |
0ac2491f | 1118 | { |
dced35ae | 1119 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
0ac2491f SS |
1120 | unsigned long flag; |
1121 | ||
1122 | /* unmask it */ | |
1f5b3c3f | 1123 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f SS |
1124 | writel(0, iommu->reg + DMAR_FECTL_REG); |
1125 | /* Read a reg to force flush the post write */ | |
1126 | readl(iommu->reg + DMAR_FECTL_REG); | |
1f5b3c3f | 1127 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1128 | } |
1129 | ||
5c2837fb | 1130 | void dmar_msi_mask(struct irq_data *data) |
0ac2491f SS |
1131 | { |
1132 | unsigned long flag; | |
dced35ae | 1133 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
0ac2491f SS |
1134 | |
1135 | /* mask it */ | |
1f5b3c3f | 1136 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f SS |
1137 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); |
1138 | /* Read a reg to force flush the post write */ | |
1139 | readl(iommu->reg + DMAR_FECTL_REG); | |
1f5b3c3f | 1140 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1141 | } |
1142 | ||
1143 | void dmar_msi_write(int irq, struct msi_msg *msg) | |
1144 | { | |
dced35ae | 1145 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
0ac2491f SS |
1146 | unsigned long flag; |
1147 | ||
1f5b3c3f | 1148 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f SS |
1149 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); |
1150 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | |
1151 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | |
1f5b3c3f | 1152 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1153 | } |
1154 | ||
1155 | void dmar_msi_read(int irq, struct msi_msg *msg) | |
1156 | { | |
dced35ae | 1157 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
0ac2491f SS |
1158 | unsigned long flag; |
1159 | ||
1f5b3c3f | 1160 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f SS |
1161 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); |
1162 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | |
1163 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | |
1f5b3c3f | 1164 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1165 | } |
1166 | ||
1167 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | |
1168 | u8 fault_reason, u16 source_id, unsigned long long addr) | |
1169 | { | |
1170 | const char *reason; | |
9d783ba0 | 1171 | int fault_type; |
0ac2491f | 1172 | |
9d783ba0 | 1173 | reason = dmar_get_fault_reason(fault_reason, &fault_type); |
0ac2491f | 1174 | |
9d783ba0 | 1175 | if (fault_type == INTR_REMAP) |
bf947fcb | 1176 | pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] " |
9d783ba0 SS |
1177 | "fault index %llx\n" |
1178 | "INTR-REMAP:[fault reason %02d] %s\n", | |
1179 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | |
1180 | PCI_FUNC(source_id & 0xFF), addr >> 48, | |
1181 | fault_reason, reason); | |
1182 | else | |
bf947fcb | 1183 | pr_err("DMAR:[%s] Request device [%02x:%02x.%d] " |
9d783ba0 SS |
1184 | "fault addr %llx \n" |
1185 | "DMAR:[fault reason %02d] %s\n", | |
1186 | (type ? "DMA Read" : "DMA Write"), | |
1187 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | |
1188 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | |
0ac2491f SS |
1189 | return 0; |
1190 | } | |
1191 | ||
1192 | #define PRIMARY_FAULT_REG_LEN (16) | |
1531a6a6 | 1193 | irqreturn_t dmar_fault(int irq, void *dev_id) |
0ac2491f SS |
1194 | { |
1195 | struct intel_iommu *iommu = dev_id; | |
1196 | int reg, fault_index; | |
1197 | u32 fault_status; | |
1198 | unsigned long flag; | |
1199 | ||
1f5b3c3f | 1200 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f | 1201 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
9d783ba0 | 1202 | if (fault_status) |
bf947fcb | 1203 | pr_err("DRHD: handling fault status reg %x\n", fault_status); |
0ac2491f SS |
1204 | |
1205 | /* TBD: ignore advanced fault log currently */ | |
1206 | if (!(fault_status & DMA_FSTS_PPF)) | |
9d783ba0 | 1207 | goto clear_rest; |
0ac2491f SS |
1208 | |
1209 | fault_index = dma_fsts_fault_record_index(fault_status); | |
1210 | reg = cap_fault_reg_offset(iommu->cap); | |
1211 | while (1) { | |
1212 | u8 fault_reason; | |
1213 | u16 source_id; | |
1214 | u64 guest_addr; | |
1215 | int type; | |
1216 | u32 data; | |
1217 | ||
1218 | /* highest 32 bits */ | |
1219 | data = readl(iommu->reg + reg + | |
1220 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | |
1221 | if (!(data & DMA_FRCD_F)) | |
1222 | break; | |
1223 | ||
1224 | fault_reason = dma_frcd_fault_reason(data); | |
1225 | type = dma_frcd_type(data); | |
1226 | ||
1227 | data = readl(iommu->reg + reg + | |
1228 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | |
1229 | source_id = dma_frcd_source_id(data); | |
1230 | ||
1231 | guest_addr = dmar_readq(iommu->reg + reg + | |
1232 | fault_index * PRIMARY_FAULT_REG_LEN); | |
1233 | guest_addr = dma_frcd_page_addr(guest_addr); | |
1234 | /* clear the fault */ | |
1235 | writel(DMA_FRCD_F, iommu->reg + reg + | |
1236 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | |
1237 | ||
1f5b3c3f | 1238 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1239 | |
1240 | dmar_fault_do_one(iommu, type, fault_reason, | |
1241 | source_id, guest_addr); | |
1242 | ||
1243 | fault_index++; | |
8211a7b5 | 1244 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
0ac2491f | 1245 | fault_index = 0; |
1f5b3c3f | 1246 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f | 1247 | } |
9d783ba0 SS |
1248 | clear_rest: |
1249 | /* clear all the other faults */ | |
0ac2491f | 1250 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
9d783ba0 | 1251 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); |
0ac2491f | 1252 | |
1f5b3c3f | 1253 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1254 | return IRQ_HANDLED; |
1255 | } | |
1256 | ||
1257 | int dmar_set_interrupt(struct intel_iommu *iommu) | |
1258 | { | |
1259 | int irq, ret; | |
1260 | ||
9d783ba0 SS |
1261 | /* |
1262 | * Check if the fault interrupt is already initialized. | |
1263 | */ | |
1264 | if (iommu->irq) | |
1265 | return 0; | |
1266 | ||
0ac2491f SS |
1267 | irq = create_irq(); |
1268 | if (!irq) { | |
bf947fcb | 1269 | pr_err("IOMMU: no free vectors\n"); |
0ac2491f SS |
1270 | return -EINVAL; |
1271 | } | |
1272 | ||
dced35ae | 1273 | irq_set_handler_data(irq, iommu); |
0ac2491f SS |
1274 | iommu->irq = irq; |
1275 | ||
1276 | ret = arch_setup_dmar_msi(irq); | |
1277 | if (ret) { | |
dced35ae | 1278 | irq_set_handler_data(irq, NULL); |
0ac2491f SS |
1279 | iommu->irq = 0; |
1280 | destroy_irq(irq); | |
dd726435 | 1281 | return ret; |
0ac2491f SS |
1282 | } |
1283 | ||
477694e7 | 1284 | ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); |
0ac2491f | 1285 | if (ret) |
bf947fcb | 1286 | pr_err("IOMMU: can't request irq\n"); |
0ac2491f SS |
1287 | return ret; |
1288 | } | |
9d783ba0 SS |
1289 | |
1290 | int __init enable_drhd_fault_handling(void) | |
1291 | { | |
1292 | struct dmar_drhd_unit *drhd; | |
1293 | ||
1294 | /* | |
1295 | * Enable fault control interrupt. | |
1296 | */ | |
1297 | for_each_drhd_unit(drhd) { | |
1298 | int ret; | |
1299 | struct intel_iommu *iommu = drhd->iommu; | |
1300 | ret = dmar_set_interrupt(iommu); | |
1301 | ||
1302 | if (ret) { | |
e9071b0b | 1303 | pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", |
9d783ba0 SS |
1304 | (unsigned long long)drhd->reg_base_addr, ret); |
1305 | return -1; | |
1306 | } | |
7f99d946 SS |
1307 | |
1308 | /* | |
1309 | * Clear any previous faults. | |
1310 | */ | |
1311 | dmar_fault(iommu->irq, iommu); | |
9d783ba0 SS |
1312 | } |
1313 | ||
1314 | return 0; | |
1315 | } | |
eb4a52bc FY |
1316 | |
1317 | /* | |
1318 | * Re-enable Queued Invalidation interface. | |
1319 | */ | |
1320 | int dmar_reenable_qi(struct intel_iommu *iommu) | |
1321 | { | |
1322 | if (!ecap_qis(iommu->ecap)) | |
1323 | return -ENOENT; | |
1324 | ||
1325 | if (!iommu->qi) | |
1326 | return -ENOENT; | |
1327 | ||
1328 | /* | |
1329 | * First disable queued invalidation. | |
1330 | */ | |
1331 | dmar_disable_qi(iommu); | |
1332 | /* | |
1333 | * Then enable queued invalidation again. Since there is no pending | |
1334 | * invalidation requests now, it's safe to re-enable queued | |
1335 | * invalidation. | |
1336 | */ | |
1337 | __dmar_enable_qi(iommu); | |
1338 | ||
1339 | return 0; | |
1340 | } | |
074835f0 YS |
1341 | |
1342 | /* | |
1343 | * Check interrupt remapping support in DMAR table description. | |
1344 | */ | |
0b8973a8 | 1345 | int __init dmar_ir_support(void) |
074835f0 YS |
1346 | { |
1347 | struct acpi_table_dmar *dmar; | |
1348 | dmar = (struct acpi_table_dmar *)dmar_tbl; | |
4f506e07 AP |
1349 | if (!dmar) |
1350 | return 0; | |
074835f0 YS |
1351 | return dmar->flags & 0x1; |
1352 | } | |
4db77ff3 | 1353 | IOMMU_INIT_POST(detect_intel_iommu); |