]>
Commit | Line | Data |
---|---|---|
10e5247f KA |
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
98bcef56 | 17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Author: Ashok Raj <[email protected]> | |
19 | * Author: Shaohua Li <[email protected]> | |
20 | * Author: Anil S Keshavamurthy <[email protected]> | |
10e5247f | 21 | * |
e61d98d8 | 22 | * This file implements early detection/parsing of Remapping Devices |
10e5247f KA |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | |
e61d98d8 SS |
25 | * |
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | |
10e5247f KA |
27 | */ |
28 | ||
9f10e5bf | 29 | #define pr_fmt(fmt) "DMAR: " fmt |
e9071b0b | 30 | |
10e5247f KA |
31 | #include <linux/pci.h> |
32 | #include <linux/dmar.h> | |
38717946 KA |
33 | #include <linux/iova.h> |
34 | #include <linux/intel-iommu.h> | |
fe962e90 | 35 | #include <linux/timer.h> |
0ac2491f SS |
36 | #include <linux/irq.h> |
37 | #include <linux/interrupt.h> | |
69575d38 | 38 | #include <linux/tboot.h> |
eb27cae8 | 39 | #include <linux/dmi.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
a5459cfe | 41 | #include <linux/iommu.h> |
8a8f422d | 42 | #include <asm/irq_remapping.h> |
4db77ff3 | 43 | #include <asm/iommu_table.h> |
10e5247f | 44 | |
078e1ee2 JR |
45 | #include "irq_remapping.h" |
46 | ||
c2a0b538 JL |
47 | typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); |
48 | struct dmar_res_callback { | |
49 | dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED]; | |
50 | void *arg[ACPI_DMAR_TYPE_RESERVED]; | |
51 | bool ignore_unhandled; | |
52 | bool print_entry; | |
53 | }; | |
54 | ||
3a5670e8 JL |
55 | /* |
56 | * Assumptions: | |
57 | * 1) The hotplug framework guarentees that DMAR unit will be hot-added | |
58 | * before IO devices managed by that unit. | |
59 | * 2) The hotplug framework guarantees that DMAR unit will be hot-removed | |
60 | * after IO devices managed by that unit. | |
61 | * 3) Hotplug events are rare. | |
62 | * | |
63 | * Locking rules for DMA and interrupt remapping related global data structures: | |
64 | * 1) Use dmar_global_lock in process context | |
65 | * 2) Use RCU in interrupt context | |
10e5247f | 66 | */ |
3a5670e8 | 67 | DECLARE_RWSEM(dmar_global_lock); |
10e5247f | 68 | LIST_HEAD(dmar_drhd_units); |
10e5247f | 69 | |
41750d31 | 70 | struct acpi_table_header * __initdata dmar_tbl; |
8e1568f3 | 71 | static acpi_size dmar_tbl_size; |
2e455289 | 72 | static int dmar_dev_scope_status = 1; |
78d8e704 | 73 | static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; |
10e5247f | 74 | |
694835dc | 75 | static int alloc_iommu(struct dmar_drhd_unit *drhd); |
a868e6b7 | 76 | static void free_iommu(struct intel_iommu *iommu); |
694835dc | 77 | |
6b197249 | 78 | static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
10e5247f KA |
79 | { |
80 | /* | |
81 | * add INCLUDE_ALL at the tail, so scan the list will find it at | |
82 | * the very end. | |
83 | */ | |
84 | if (drhd->include_all) | |
0e242612 | 85 | list_add_tail_rcu(&drhd->list, &dmar_drhd_units); |
10e5247f | 86 | else |
0e242612 | 87 | list_add_rcu(&drhd->list, &dmar_drhd_units); |
10e5247f KA |
88 | } |
89 | ||
bb3a6b78 | 90 | void *dmar_alloc_dev_scope(void *start, void *end, int *cnt) |
10e5247f KA |
91 | { |
92 | struct acpi_dmar_device_scope *scope; | |
10e5247f KA |
93 | |
94 | *cnt = 0; | |
95 | while (start < end) { | |
96 | scope = start; | |
83118b0d | 97 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE || |
07cb52ff | 98 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
10e5247f KA |
99 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) |
100 | (*cnt)++; | |
ae3e7f3a LC |
101 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && |
102 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) { | |
e9071b0b | 103 | pr_warn("Unsupported device scope\n"); |
5715f0f9 | 104 | } |
10e5247f KA |
105 | start += scope->length; |
106 | } | |
107 | if (*cnt == 0) | |
bb3a6b78 JL |
108 | return NULL; |
109 | ||
832bd858 | 110 | return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL); |
bb3a6b78 JL |
111 | } |
112 | ||
832bd858 | 113 | void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt) |
ada4d4b2 | 114 | { |
b683b230 | 115 | int i; |
832bd858 | 116 | struct device *tmp_dev; |
b683b230 | 117 | |
ada4d4b2 | 118 | if (*devices && *cnt) { |
b683b230 | 119 | for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) |
832bd858 | 120 | put_device(tmp_dev); |
ada4d4b2 | 121 | kfree(*devices); |
ada4d4b2 | 122 | } |
0e242612 JL |
123 | |
124 | *devices = NULL; | |
125 | *cnt = 0; | |
ada4d4b2 JL |
126 | } |
127 | ||
59ce0515 JL |
128 | /* Optimize out kzalloc()/kfree() for normal cases */ |
129 | static char dmar_pci_notify_info_buf[64]; | |
130 | ||
131 | static struct dmar_pci_notify_info * | |
132 | dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) | |
133 | { | |
134 | int level = 0; | |
135 | size_t size; | |
136 | struct pci_dev *tmp; | |
137 | struct dmar_pci_notify_info *info; | |
138 | ||
139 | BUG_ON(dev->is_virtfn); | |
140 | ||
141 | /* Only generate path[] for device addition event */ | |
142 | if (event == BUS_NOTIFY_ADD_DEVICE) | |
143 | for (tmp = dev; tmp; tmp = tmp->bus->self) | |
144 | level++; | |
145 | ||
146 | size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); | |
147 | if (size <= sizeof(dmar_pci_notify_info_buf)) { | |
148 | info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; | |
149 | } else { | |
150 | info = kzalloc(size, GFP_KERNEL); | |
151 | if (!info) { | |
152 | pr_warn("Out of memory when allocating notify_info " | |
153 | "for %s.\n", pci_name(dev)); | |
2e455289 JL |
154 | if (dmar_dev_scope_status == 0) |
155 | dmar_dev_scope_status = -ENOMEM; | |
59ce0515 JL |
156 | return NULL; |
157 | } | |
158 | } | |
159 | ||
160 | info->event = event; | |
161 | info->dev = dev; | |
162 | info->seg = pci_domain_nr(dev->bus); | |
163 | info->level = level; | |
164 | if (event == BUS_NOTIFY_ADD_DEVICE) { | |
5ae0566a JL |
165 | for (tmp = dev; tmp; tmp = tmp->bus->self) { |
166 | level--; | |
57384592 | 167 | info->path[level].bus = tmp->bus->number; |
59ce0515 JL |
168 | info->path[level].device = PCI_SLOT(tmp->devfn); |
169 | info->path[level].function = PCI_FUNC(tmp->devfn); | |
170 | if (pci_is_root_bus(tmp->bus)) | |
171 | info->bus = tmp->bus->number; | |
172 | } | |
173 | } | |
174 | ||
175 | return info; | |
176 | } | |
177 | ||
178 | static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info) | |
179 | { | |
180 | if ((void *)info != dmar_pci_notify_info_buf) | |
181 | kfree(info); | |
182 | } | |
183 | ||
184 | static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, | |
185 | struct acpi_dmar_pci_path *path, int count) | |
186 | { | |
187 | int i; | |
188 | ||
189 | if (info->bus != bus) | |
80f7b3d1 | 190 | goto fallback; |
59ce0515 | 191 | if (info->level != count) |
80f7b3d1 | 192 | goto fallback; |
59ce0515 JL |
193 | |
194 | for (i = 0; i < count; i++) { | |
195 | if (path[i].device != info->path[i].device || | |
196 | path[i].function != info->path[i].function) | |
80f7b3d1 | 197 | goto fallback; |
59ce0515 JL |
198 | } |
199 | ||
200 | return true; | |
80f7b3d1 JR |
201 | |
202 | fallback: | |
203 | ||
204 | if (count != 1) | |
205 | return false; | |
206 | ||
207 | i = info->level - 1; | |
208 | if (bus == info->path[i].bus && | |
209 | path[0].device == info->path[i].device && | |
210 | path[0].function == info->path[i].function) { | |
211 | pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n", | |
212 | bus, path[0].device, path[0].function); | |
213 | return true; | |
214 | } | |
215 | ||
216 | return false; | |
59ce0515 JL |
217 | } |
218 | ||
219 | /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ | |
220 | int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, | |
221 | void *start, void*end, u16 segment, | |
832bd858 DW |
222 | struct dmar_dev_scope *devices, |
223 | int devices_cnt) | |
59ce0515 JL |
224 | { |
225 | int i, level; | |
832bd858 | 226 | struct device *tmp, *dev = &info->dev->dev; |
59ce0515 JL |
227 | struct acpi_dmar_device_scope *scope; |
228 | struct acpi_dmar_pci_path *path; | |
229 | ||
230 | if (segment != info->seg) | |
231 | return 0; | |
232 | ||
233 | for (; start < end; start += scope->length) { | |
234 | scope = start; | |
235 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && | |
236 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE) | |
237 | continue; | |
238 | ||
239 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
240 | level = (scope->length - sizeof(*scope)) / sizeof(*path); | |
241 | if (!dmar_match_pci_path(info, scope->bus, path, level)) | |
242 | continue; | |
243 | ||
ffb2d1eb RD |
244 | /* |
245 | * We expect devices with endpoint scope to have normal PCI | |
246 | * headers, and devices with bridge scope to have bridge PCI | |
247 | * headers. However PCI NTB devices may be listed in the | |
248 | * DMAR table with bridge scope, even though they have a | |
249 | * normal PCI header. NTB devices are identified by class | |
250 | * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch | |
251 | * for this special case. | |
252 | */ | |
253 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && | |
254 | info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) || | |
255 | (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE && | |
256 | (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL && | |
257 | info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) { | |
59ce0515 | 258 | pr_warn("Device scope type does not match for %s\n", |
832bd858 | 259 | pci_name(info->dev)); |
59ce0515 JL |
260 | return -EINVAL; |
261 | } | |
262 | ||
263 | for_each_dev_scope(devices, devices_cnt, i, tmp) | |
264 | if (tmp == NULL) { | |
832bd858 DW |
265 | devices[i].bus = info->dev->bus->number; |
266 | devices[i].devfn = info->dev->devfn; | |
267 | rcu_assign_pointer(devices[i].dev, | |
268 | get_device(dev)); | |
59ce0515 JL |
269 | return 1; |
270 | } | |
271 | BUG_ON(i >= devices_cnt); | |
272 | } | |
273 | ||
274 | return 0; | |
275 | } | |
276 | ||
277 | int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, | |
832bd858 | 278 | struct dmar_dev_scope *devices, int count) |
59ce0515 JL |
279 | { |
280 | int index; | |
832bd858 | 281 | struct device *tmp; |
59ce0515 JL |
282 | |
283 | if (info->seg != segment) | |
284 | return 0; | |
285 | ||
286 | for_each_active_dev_scope(devices, count, index, tmp) | |
832bd858 | 287 | if (tmp == &info->dev->dev) { |
eecbad7d | 288 | RCU_INIT_POINTER(devices[index].dev, NULL); |
59ce0515 | 289 | synchronize_rcu(); |
832bd858 | 290 | put_device(tmp); |
59ce0515 JL |
291 | return 1; |
292 | } | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
297 | static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) | |
298 | { | |
299 | int ret = 0; | |
300 | struct dmar_drhd_unit *dmaru; | |
301 | struct acpi_dmar_hardware_unit *drhd; | |
302 | ||
303 | for_each_drhd_unit(dmaru) { | |
304 | if (dmaru->include_all) | |
305 | continue; | |
306 | ||
307 | drhd = container_of(dmaru->hdr, | |
308 | struct acpi_dmar_hardware_unit, header); | |
309 | ret = dmar_insert_dev_scope(info, (void *)(drhd + 1), | |
310 | ((void *)drhd) + drhd->header.length, | |
311 | dmaru->segment, | |
312 | dmaru->devices, dmaru->devices_cnt); | |
313 | if (ret != 0) | |
314 | break; | |
315 | } | |
316 | if (ret >= 0) | |
317 | ret = dmar_iommu_notify_scope_dev(info); | |
2e455289 JL |
318 | if (ret < 0 && dmar_dev_scope_status == 0) |
319 | dmar_dev_scope_status = ret; | |
59ce0515 JL |
320 | |
321 | return ret; | |
322 | } | |
323 | ||
324 | static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info) | |
325 | { | |
326 | struct dmar_drhd_unit *dmaru; | |
327 | ||
328 | for_each_drhd_unit(dmaru) | |
329 | if (dmar_remove_dev_scope(info, dmaru->segment, | |
330 | dmaru->devices, dmaru->devices_cnt)) | |
331 | break; | |
332 | dmar_iommu_notify_scope_dev(info); | |
333 | } | |
334 | ||
335 | static int dmar_pci_bus_notifier(struct notifier_block *nb, | |
336 | unsigned long action, void *data) | |
337 | { | |
338 | struct pci_dev *pdev = to_pci_dev(data); | |
339 | struct dmar_pci_notify_info *info; | |
340 | ||
341 | /* Only care about add/remove events for physical functions */ | |
342 | if (pdev->is_virtfn) | |
343 | return NOTIFY_DONE; | |
e6a8c9b3 JR |
344 | if (action != BUS_NOTIFY_ADD_DEVICE && |
345 | action != BUS_NOTIFY_REMOVED_DEVICE) | |
59ce0515 JL |
346 | return NOTIFY_DONE; |
347 | ||
348 | info = dmar_alloc_pci_notify_info(pdev, action); | |
349 | if (!info) | |
350 | return NOTIFY_DONE; | |
351 | ||
352 | down_write(&dmar_global_lock); | |
353 | if (action == BUS_NOTIFY_ADD_DEVICE) | |
354 | dmar_pci_bus_add_dev(info); | |
e6a8c9b3 | 355 | else if (action == BUS_NOTIFY_REMOVED_DEVICE) |
59ce0515 JL |
356 | dmar_pci_bus_del_dev(info); |
357 | up_write(&dmar_global_lock); | |
358 | ||
359 | dmar_free_pci_notify_info(info); | |
360 | ||
361 | return NOTIFY_OK; | |
362 | } | |
363 | ||
364 | static struct notifier_block dmar_pci_bus_nb = { | |
365 | .notifier_call = dmar_pci_bus_notifier, | |
366 | .priority = INT_MIN, | |
367 | }; | |
368 | ||
6b197249 JL |
369 | static struct dmar_drhd_unit * |
370 | dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) | |
371 | { | |
372 | struct dmar_drhd_unit *dmaru; | |
373 | ||
374 | list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) | |
375 | if (dmaru->segment == drhd->segment && | |
376 | dmaru->reg_base_addr == drhd->address) | |
377 | return dmaru; | |
378 | ||
379 | return NULL; | |
380 | } | |
381 | ||
10e5247f KA |
382 | /** |
383 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | |
384 | * structure which uniquely represent one DMA remapping hardware unit | |
385 | * present in the platform | |
386 | */ | |
6b197249 | 387 | static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) |
10e5247f KA |
388 | { |
389 | struct acpi_dmar_hardware_unit *drhd; | |
390 | struct dmar_drhd_unit *dmaru; | |
391 | int ret = 0; | |
10e5247f | 392 | |
e523b38e | 393 | drhd = (struct acpi_dmar_hardware_unit *)header; |
6b197249 JL |
394 | dmaru = dmar_find_dmaru(drhd); |
395 | if (dmaru) | |
396 | goto out; | |
397 | ||
398 | dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); | |
10e5247f KA |
399 | if (!dmaru) |
400 | return -ENOMEM; | |
401 | ||
6b197249 JL |
402 | /* |
403 | * If header is allocated from slab by ACPI _DSM method, we need to | |
404 | * copy the content because the memory buffer will be freed on return. | |
405 | */ | |
406 | dmaru->hdr = (void *)(dmaru + 1); | |
407 | memcpy(dmaru->hdr, header, header->length); | |
10e5247f | 408 | dmaru->reg_base_addr = drhd->address; |
276dbf99 | 409 | dmaru->segment = drhd->segment; |
10e5247f | 410 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ |
07cb52ff DW |
411 | dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1), |
412 | ((void *)drhd) + drhd->header.length, | |
413 | &dmaru->devices_cnt); | |
414 | if (dmaru->devices_cnt && dmaru->devices == NULL) { | |
415 | kfree(dmaru); | |
416 | return -ENOMEM; | |
2e455289 | 417 | } |
10e5247f | 418 | |
1886e8a9 SS |
419 | ret = alloc_iommu(dmaru); |
420 | if (ret) { | |
07cb52ff DW |
421 | dmar_free_dev_scope(&dmaru->devices, |
422 | &dmaru->devices_cnt); | |
1886e8a9 SS |
423 | kfree(dmaru); |
424 | return ret; | |
425 | } | |
426 | dmar_register_drhd_unit(dmaru); | |
c2a0b538 | 427 | |
6b197249 | 428 | out: |
c2a0b538 JL |
429 | if (arg) |
430 | (*(int *)arg)++; | |
431 | ||
1886e8a9 SS |
432 | return 0; |
433 | } | |
434 | ||
a868e6b7 JL |
435 | static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) |
436 | { | |
437 | if (dmaru->devices && dmaru->devices_cnt) | |
438 | dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt); | |
439 | if (dmaru->iommu) | |
440 | free_iommu(dmaru->iommu); | |
441 | kfree(dmaru); | |
442 | } | |
443 | ||
c2a0b538 JL |
444 | static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, |
445 | void *arg) | |
e625b4a9 DW |
446 | { |
447 | struct acpi_dmar_andd *andd = (void *)header; | |
448 | ||
449 | /* Check for NUL termination within the designated length */ | |
83118b0d | 450 | if (strnlen(andd->device_name, header->length - 8) == header->length - 8) { |
e625b4a9 DW |
451 | WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, |
452 | "Your BIOS is broken; ANDD object name is not NUL-terminated\n" | |
453 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | |
454 | dmi_get_system_info(DMI_BIOS_VENDOR), | |
455 | dmi_get_system_info(DMI_BIOS_VERSION), | |
456 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | |
457 | return -EINVAL; | |
458 | } | |
459 | pr_info("ANDD device: %x name: %s\n", andd->device_number, | |
83118b0d | 460 | andd->device_name); |
e625b4a9 DW |
461 | |
462 | return 0; | |
463 | } | |
464 | ||
aa697079 | 465 | #ifdef CONFIG_ACPI_NUMA |
6b197249 | 466 | static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) |
ee34b32d SS |
467 | { |
468 | struct acpi_dmar_rhsa *rhsa; | |
469 | struct dmar_drhd_unit *drhd; | |
470 | ||
471 | rhsa = (struct acpi_dmar_rhsa *)header; | |
aa697079 | 472 | for_each_drhd_unit(drhd) { |
ee34b32d SS |
473 | if (drhd->reg_base_addr == rhsa->base_address) { |
474 | int node = acpi_map_pxm_to_node(rhsa->proximity_domain); | |
475 | ||
476 | if (!node_online(node)) | |
477 | node = -1; | |
478 | drhd->iommu->node = node; | |
aa697079 DW |
479 | return 0; |
480 | } | |
ee34b32d | 481 | } |
fd0c8894 BH |
482 | WARN_TAINT( |
483 | 1, TAINT_FIRMWARE_WORKAROUND, | |
484 | "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" | |
485 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | |
486 | drhd->reg_base_addr, | |
487 | dmi_get_system_info(DMI_BIOS_VENDOR), | |
488 | dmi_get_system_info(DMI_BIOS_VERSION), | |
489 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | |
ee34b32d | 490 | |
aa697079 | 491 | return 0; |
ee34b32d | 492 | } |
c2a0b538 JL |
493 | #else |
494 | #define dmar_parse_one_rhsa dmar_res_noop | |
aa697079 | 495 | #endif |
ee34b32d | 496 | |
10e5247f KA |
497 | static void __init |
498 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |
499 | { | |
500 | struct acpi_dmar_hardware_unit *drhd; | |
501 | struct acpi_dmar_reserved_memory *rmrr; | |
aa5d2b51 | 502 | struct acpi_dmar_atsr *atsr; |
17b60977 | 503 | struct acpi_dmar_rhsa *rhsa; |
10e5247f KA |
504 | |
505 | switch (header->type) { | |
506 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
aa5d2b51 YZ |
507 | drhd = container_of(header, struct acpi_dmar_hardware_unit, |
508 | header); | |
e9071b0b | 509 | pr_info("DRHD base: %#016Lx flags: %#x\n", |
aa5d2b51 | 510 | (unsigned long long)drhd->address, drhd->flags); |
10e5247f KA |
511 | break; |
512 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
aa5d2b51 YZ |
513 | rmrr = container_of(header, struct acpi_dmar_reserved_memory, |
514 | header); | |
e9071b0b | 515 | pr_info("RMRR base: %#016Lx end: %#016Lx\n", |
5b6985ce FY |
516 | (unsigned long long)rmrr->base_address, |
517 | (unsigned long long)rmrr->end_address); | |
10e5247f | 518 | break; |
83118b0d | 519 | case ACPI_DMAR_TYPE_ROOT_ATS: |
aa5d2b51 | 520 | atsr = container_of(header, struct acpi_dmar_atsr, header); |
e9071b0b | 521 | pr_info("ATSR flags: %#x\n", atsr->flags); |
aa5d2b51 | 522 | break; |
83118b0d | 523 | case ACPI_DMAR_TYPE_HARDWARE_AFFINITY: |
17b60977 | 524 | rhsa = container_of(header, struct acpi_dmar_rhsa, header); |
e9071b0b | 525 | pr_info("RHSA base: %#016Lx proximity domain: %#x\n", |
17b60977 RD |
526 | (unsigned long long)rhsa->base_address, |
527 | rhsa->proximity_domain); | |
528 | break; | |
83118b0d | 529 | case ACPI_DMAR_TYPE_NAMESPACE: |
e625b4a9 DW |
530 | /* We don't print this here because we need to sanity-check |
531 | it first. So print it in dmar_parse_one_andd() instead. */ | |
532 | break; | |
10e5247f KA |
533 | } |
534 | } | |
535 | ||
f6dd5c31 YL |
536 | /** |
537 | * dmar_table_detect - checks to see if the platform supports DMAR devices | |
538 | */ | |
539 | static int __init dmar_table_detect(void) | |
540 | { | |
541 | acpi_status status = AE_OK; | |
542 | ||
543 | /* if we could find DMAR table, then there are DMAR devices */ | |
8e1568f3 YL |
544 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
545 | (struct acpi_table_header **)&dmar_tbl, | |
546 | &dmar_tbl_size); | |
f6dd5c31 YL |
547 | |
548 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | |
e9071b0b | 549 | pr_warn("Unable to map DMAR\n"); |
f6dd5c31 YL |
550 | status = AE_NOT_FOUND; |
551 | } | |
552 | ||
553 | return (ACPI_SUCCESS(status) ? 1 : 0); | |
554 | } | |
aaa9d1dd | 555 | |
c2a0b538 JL |
556 | static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, |
557 | size_t len, struct dmar_res_callback *cb) | |
558 | { | |
559 | int ret = 0; | |
560 | struct acpi_dmar_header *iter, *next; | |
561 | struct acpi_dmar_header *end = ((void *)start) + len; | |
562 | ||
563 | for (iter = start; iter < end && ret == 0; iter = next) { | |
564 | next = (void *)iter + iter->length; | |
565 | if (iter->length == 0) { | |
566 | /* Avoid looping forever on bad ACPI tables */ | |
567 | pr_debug(FW_BUG "Invalid 0-length structure\n"); | |
568 | break; | |
569 | } else if (next > end) { | |
570 | /* Avoid passing table end */ | |
9f10e5bf | 571 | pr_warn(FW_BUG "Record passes table end\n"); |
c2a0b538 JL |
572 | ret = -EINVAL; |
573 | break; | |
574 | } | |
575 | ||
576 | if (cb->print_entry) | |
577 | dmar_table_print_dmar_entry(iter); | |
578 | ||
579 | if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { | |
580 | /* continue for forward compatibility */ | |
581 | pr_debug("Unknown DMAR structure type %d\n", | |
582 | iter->type); | |
583 | } else if (cb->cb[iter->type]) { | |
584 | ret = cb->cb[iter->type](iter, cb->arg[iter->type]); | |
585 | } else if (!cb->ignore_unhandled) { | |
586 | pr_warn("No handler for DMAR structure type %d\n", | |
587 | iter->type); | |
588 | ret = -EINVAL; | |
589 | } | |
590 | } | |
591 | ||
592 | return ret; | |
593 | } | |
594 | ||
595 | static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, | |
596 | struct dmar_res_callback *cb) | |
597 | { | |
598 | return dmar_walk_remapping_entries((void *)(dmar + 1), | |
599 | dmar->header.length - sizeof(*dmar), cb); | |
600 | } | |
601 | ||
10e5247f KA |
602 | /** |
603 | * parse_dmar_table - parses the DMA reporting table | |
604 | */ | |
605 | static int __init | |
606 | parse_dmar_table(void) | |
607 | { | |
608 | struct acpi_table_dmar *dmar; | |
10e5247f | 609 | int ret = 0; |
7cef3347 | 610 | int drhd_count = 0; |
c2a0b538 JL |
611 | struct dmar_res_callback cb = { |
612 | .print_entry = true, | |
613 | .ignore_unhandled = true, | |
614 | .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count, | |
615 | .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd, | |
616 | .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr, | |
617 | .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr, | |
618 | .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa, | |
619 | .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd, | |
620 | }; | |
10e5247f | 621 | |
f6dd5c31 YL |
622 | /* |
623 | * Do it again, earlier dmar_tbl mapping could be mapped with | |
624 | * fixed map. | |
625 | */ | |
626 | dmar_table_detect(); | |
627 | ||
a59b50e9 JC |
628 | /* |
629 | * ACPI tables may not be DMA protected by tboot, so use DMAR copy | |
630 | * SINIT saved in SinitMleData in TXT heap (which is DMA protected) | |
631 | */ | |
632 | dmar_tbl = tboot_get_dmar_table(dmar_tbl); | |
633 | ||
10e5247f KA |
634 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
635 | if (!dmar) | |
636 | return -ENODEV; | |
637 | ||
5b6985ce | 638 | if (dmar->width < PAGE_SHIFT - 1) { |
e9071b0b | 639 | pr_warn("Invalid DMAR haw\n"); |
10e5247f KA |
640 | return -EINVAL; |
641 | } | |
642 | ||
e9071b0b | 643 | pr_info("Host address width %d\n", dmar->width + 1); |
c2a0b538 JL |
644 | ret = dmar_walk_dmar_table(dmar, &cb); |
645 | if (ret == 0 && drhd_count == 0) | |
7cef3347 | 646 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); |
c2a0b538 | 647 | |
10e5247f KA |
648 | return ret; |
649 | } | |
650 | ||
832bd858 DW |
651 | static int dmar_pci_device_match(struct dmar_dev_scope devices[], |
652 | int cnt, struct pci_dev *dev) | |
e61d98d8 SS |
653 | { |
654 | int index; | |
832bd858 | 655 | struct device *tmp; |
e61d98d8 SS |
656 | |
657 | while (dev) { | |
b683b230 | 658 | for_each_active_dev_scope(devices, cnt, index, tmp) |
832bd858 | 659 | if (dev_is_pci(tmp) && dev == to_pci_dev(tmp)) |
e61d98d8 SS |
660 | return 1; |
661 | ||
662 | /* Check our parent */ | |
663 | dev = dev->bus->self; | |
664 | } | |
665 | ||
666 | return 0; | |
667 | } | |
668 | ||
669 | struct dmar_drhd_unit * | |
670 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | |
671 | { | |
0e242612 | 672 | struct dmar_drhd_unit *dmaru; |
2e824f79 YZ |
673 | struct acpi_dmar_hardware_unit *drhd; |
674 | ||
dda56549 Y |
675 | dev = pci_physfn(dev); |
676 | ||
0e242612 | 677 | rcu_read_lock(); |
8b161f0e | 678 | for_each_drhd_unit(dmaru) { |
2e824f79 YZ |
679 | drhd = container_of(dmaru->hdr, |
680 | struct acpi_dmar_hardware_unit, | |
681 | header); | |
682 | ||
683 | if (dmaru->include_all && | |
684 | drhd->segment == pci_domain_nr(dev->bus)) | |
0e242612 | 685 | goto out; |
e61d98d8 | 686 | |
2e824f79 YZ |
687 | if (dmar_pci_device_match(dmaru->devices, |
688 | dmaru->devices_cnt, dev)) | |
0e242612 | 689 | goto out; |
e61d98d8 | 690 | } |
0e242612 JL |
691 | dmaru = NULL; |
692 | out: | |
693 | rcu_read_unlock(); | |
e61d98d8 | 694 | |
0e242612 | 695 | return dmaru; |
e61d98d8 SS |
696 | } |
697 | ||
ed40356b DW |
698 | static void __init dmar_acpi_insert_dev_scope(u8 device_number, |
699 | struct acpi_device *adev) | |
700 | { | |
701 | struct dmar_drhd_unit *dmaru; | |
702 | struct acpi_dmar_hardware_unit *drhd; | |
703 | struct acpi_dmar_device_scope *scope; | |
704 | struct device *tmp; | |
705 | int i; | |
706 | struct acpi_dmar_pci_path *path; | |
707 | ||
708 | for_each_drhd_unit(dmaru) { | |
709 | drhd = container_of(dmaru->hdr, | |
710 | struct acpi_dmar_hardware_unit, | |
711 | header); | |
712 | ||
713 | for (scope = (void *)(drhd + 1); | |
714 | (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; | |
715 | scope = ((void *)scope) + scope->length) { | |
83118b0d | 716 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) |
ed40356b DW |
717 | continue; |
718 | if (scope->enumeration_id != device_number) | |
719 | continue; | |
720 | ||
721 | path = (void *)(scope + 1); | |
722 | pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", | |
723 | dev_name(&adev->dev), dmaru->reg_base_addr, | |
724 | scope->bus, path->device, path->function); | |
725 | for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) | |
726 | if (tmp == NULL) { | |
727 | dmaru->devices[i].bus = scope->bus; | |
728 | dmaru->devices[i].devfn = PCI_DEVFN(path->device, | |
729 | path->function); | |
730 | rcu_assign_pointer(dmaru->devices[i].dev, | |
731 | get_device(&adev->dev)); | |
732 | return; | |
733 | } | |
734 | BUG_ON(i >= dmaru->devices_cnt); | |
735 | } | |
736 | } | |
737 | pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", | |
738 | device_number, dev_name(&adev->dev)); | |
739 | } | |
740 | ||
741 | static int __init dmar_acpi_dev_scope_init(void) | |
742 | { | |
11f1a776 JR |
743 | struct acpi_dmar_andd *andd; |
744 | ||
745 | if (dmar_tbl == NULL) | |
746 | return -ENODEV; | |
747 | ||
7713ec06 DW |
748 | for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar); |
749 | ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length; | |
750 | andd = ((void *)andd) + andd->header.length) { | |
83118b0d | 751 | if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) { |
ed40356b DW |
752 | acpi_handle h; |
753 | struct acpi_device *adev; | |
754 | ||
755 | if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, | |
83118b0d | 756 | andd->device_name, |
ed40356b DW |
757 | &h))) { |
758 | pr_err("Failed to find handle for ACPI object %s\n", | |
83118b0d | 759 | andd->device_name); |
ed40356b DW |
760 | continue; |
761 | } | |
c0df975f | 762 | if (acpi_bus_get_device(h, &adev)) { |
ed40356b | 763 | pr_err("Failed to get device for ACPI object %s\n", |
83118b0d | 764 | andd->device_name); |
ed40356b DW |
765 | continue; |
766 | } | |
767 | dmar_acpi_insert_dev_scope(andd->device_number, adev); | |
768 | } | |
ed40356b DW |
769 | } |
770 | return 0; | |
771 | } | |
772 | ||
1886e8a9 SS |
773 | int __init dmar_dev_scope_init(void) |
774 | { | |
2e455289 JL |
775 | struct pci_dev *dev = NULL; |
776 | struct dmar_pci_notify_info *info; | |
1886e8a9 | 777 | |
2e455289 JL |
778 | if (dmar_dev_scope_status != 1) |
779 | return dmar_dev_scope_status; | |
c2c7286a | 780 | |
2e455289 JL |
781 | if (list_empty(&dmar_drhd_units)) { |
782 | dmar_dev_scope_status = -ENODEV; | |
783 | } else { | |
784 | dmar_dev_scope_status = 0; | |
785 | ||
63b42624 DW |
786 | dmar_acpi_dev_scope_init(); |
787 | ||
2e455289 JL |
788 | for_each_pci_dev(dev) { |
789 | if (dev->is_virtfn) | |
790 | continue; | |
791 | ||
792 | info = dmar_alloc_pci_notify_info(dev, | |
793 | BUS_NOTIFY_ADD_DEVICE); | |
794 | if (!info) { | |
795 | return dmar_dev_scope_status; | |
796 | } else { | |
797 | dmar_pci_bus_add_dev(info); | |
798 | dmar_free_pci_notify_info(info); | |
799 | } | |
800 | } | |
318fe7df | 801 | |
2e455289 | 802 | bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb); |
1886e8a9 SS |
803 | } |
804 | ||
2e455289 | 805 | return dmar_dev_scope_status; |
1886e8a9 SS |
806 | } |
807 | ||
10e5247f KA |
808 | |
809 | int __init dmar_table_init(void) | |
810 | { | |
1886e8a9 | 811 | static int dmar_table_initialized; |
093f87d2 FY |
812 | int ret; |
813 | ||
cc05301f JL |
814 | if (dmar_table_initialized == 0) { |
815 | ret = parse_dmar_table(); | |
816 | if (ret < 0) { | |
817 | if (ret != -ENODEV) | |
9f10e5bf | 818 | pr_info("Parse DMAR table failure.\n"); |
cc05301f JL |
819 | } else if (list_empty(&dmar_drhd_units)) { |
820 | pr_info("No DMAR devices found\n"); | |
821 | ret = -ENODEV; | |
822 | } | |
093f87d2 | 823 | |
cc05301f JL |
824 | if (ret < 0) |
825 | dmar_table_initialized = ret; | |
826 | else | |
827 | dmar_table_initialized = 1; | |
10e5247f | 828 | } |
093f87d2 | 829 | |
cc05301f | 830 | return dmar_table_initialized < 0 ? dmar_table_initialized : 0; |
10e5247f KA |
831 | } |
832 | ||
3a8663ee BH |
833 | static void warn_invalid_dmar(u64 addr, const char *message) |
834 | { | |
fd0c8894 BH |
835 | WARN_TAINT_ONCE( |
836 | 1, TAINT_FIRMWARE_WORKAROUND, | |
837 | "Your BIOS is broken; DMAR reported at address %llx%s!\n" | |
838 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | |
839 | addr, message, | |
840 | dmi_get_system_info(DMI_BIOS_VENDOR), | |
841 | dmi_get_system_info(DMI_BIOS_VERSION), | |
842 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | |
3a8663ee | 843 | } |
6ecbf01c | 844 | |
c2a0b538 JL |
845 | static int __ref |
846 | dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) | |
86cf898e | 847 | { |
86cf898e | 848 | struct acpi_dmar_hardware_unit *drhd; |
c2a0b538 JL |
849 | void __iomem *addr; |
850 | u64 cap, ecap; | |
86cf898e | 851 | |
c2a0b538 JL |
852 | drhd = (void *)entry; |
853 | if (!drhd->address) { | |
854 | warn_invalid_dmar(0, ""); | |
855 | return -EINVAL; | |
856 | } | |
2c992208 | 857 | |
6b197249 JL |
858 | if (arg) |
859 | addr = ioremap(drhd->address, VTD_PAGE_SIZE); | |
860 | else | |
861 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | |
c2a0b538 | 862 | if (!addr) { |
9f10e5bf | 863 | pr_warn("Can't validate DRHD address: %llx\n", drhd->address); |
c2a0b538 JL |
864 | return -EINVAL; |
865 | } | |
6b197249 | 866 | |
c2a0b538 JL |
867 | cap = dmar_readq(addr + DMAR_CAP_REG); |
868 | ecap = dmar_readq(addr + DMAR_ECAP_REG); | |
6b197249 JL |
869 | |
870 | if (arg) | |
871 | iounmap(addr); | |
872 | else | |
873 | early_iounmap(addr, VTD_PAGE_SIZE); | |
86cf898e | 874 | |
c2a0b538 JL |
875 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { |
876 | warn_invalid_dmar(drhd->address, " returns all ones"); | |
877 | return -EINVAL; | |
86cf898e | 878 | } |
2c992208 | 879 | |
2c992208 | 880 | return 0; |
86cf898e DW |
881 | } |
882 | ||
480125ba | 883 | int __init detect_intel_iommu(void) |
2ae21010 SS |
884 | { |
885 | int ret; | |
c2a0b538 JL |
886 | struct dmar_res_callback validate_drhd_cb = { |
887 | .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd, | |
888 | .ignore_unhandled = true, | |
889 | }; | |
2ae21010 | 890 | |
3a5670e8 | 891 | down_write(&dmar_global_lock); |
f6dd5c31 | 892 | ret = dmar_table_detect(); |
86cf898e | 893 | if (ret) |
c2a0b538 JL |
894 | ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, |
895 | &validate_drhd_cb); | |
896 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { | |
897 | iommu_detected = 1; | |
898 | /* Make sure ACS will be enabled */ | |
899 | pci_request_acs(); | |
900 | } | |
f5d1b97b | 901 | |
9d5ce73a | 902 | #ifdef CONFIG_X86 |
c2a0b538 JL |
903 | if (ret) |
904 | x86_init.iommu.iommu_init = intel_iommu_init; | |
2ae21010 | 905 | #endif |
c2a0b538 | 906 | |
b707cb02 | 907 | early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); |
f6dd5c31 | 908 | dmar_tbl = NULL; |
3a5670e8 | 909 | up_write(&dmar_global_lock); |
480125ba | 910 | |
4db77ff3 | 911 | return ret ? 1 : -ENODEV; |
2ae21010 SS |
912 | } |
913 | ||
914 | ||
6f5cf521 DD |
915 | static void unmap_iommu(struct intel_iommu *iommu) |
916 | { | |
917 | iounmap(iommu->reg); | |
918 | release_mem_region(iommu->reg_phys, iommu->reg_size); | |
919 | } | |
920 | ||
921 | /** | |
922 | * map_iommu: map the iommu's registers | |
923 | * @iommu: the iommu to map | |
924 | * @phys_addr: the physical address of the base resgister | |
e9071b0b | 925 | * |
6f5cf521 | 926 | * Memory map the iommu's registers. Start w/ a single page, and |
e9071b0b | 927 | * possibly expand if that turns out to be insufficent. |
6f5cf521 DD |
928 | */ |
929 | static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) | |
930 | { | |
931 | int map_size, err=0; | |
932 | ||
933 | iommu->reg_phys = phys_addr; | |
934 | iommu->reg_size = VTD_PAGE_SIZE; | |
935 | ||
936 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { | |
9f10e5bf | 937 | pr_err("Can't reserve memory\n"); |
6f5cf521 DD |
938 | err = -EBUSY; |
939 | goto out; | |
940 | } | |
941 | ||
942 | iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); | |
943 | if (!iommu->reg) { | |
9f10e5bf | 944 | pr_err("Can't map the region\n"); |
6f5cf521 DD |
945 | err = -ENOMEM; |
946 | goto release; | |
947 | } | |
948 | ||
949 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | |
950 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | |
951 | ||
952 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | |
953 | err = -EINVAL; | |
954 | warn_invalid_dmar(phys_addr, " returns all ones"); | |
955 | goto unmap; | |
956 | } | |
957 | ||
958 | /* the registers might be more than one page */ | |
959 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | |
960 | cap_max_fault_reg_offset(iommu->cap)); | |
961 | map_size = VTD_PAGE_ALIGN(map_size); | |
962 | if (map_size > iommu->reg_size) { | |
963 | iounmap(iommu->reg); | |
964 | release_mem_region(iommu->reg_phys, iommu->reg_size); | |
965 | iommu->reg_size = map_size; | |
966 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, | |
967 | iommu->name)) { | |
9f10e5bf | 968 | pr_err("Can't reserve memory\n"); |
6f5cf521 DD |
969 | err = -EBUSY; |
970 | goto out; | |
971 | } | |
972 | iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); | |
973 | if (!iommu->reg) { | |
9f10e5bf | 974 | pr_err("Can't map the region\n"); |
6f5cf521 DD |
975 | err = -ENOMEM; |
976 | goto release; | |
977 | } | |
978 | } | |
979 | err = 0; | |
980 | goto out; | |
981 | ||
982 | unmap: | |
983 | iounmap(iommu->reg); | |
984 | release: | |
985 | release_mem_region(iommu->reg_phys, iommu->reg_size); | |
986 | out: | |
987 | return err; | |
988 | } | |
989 | ||
78d8e704 JL |
990 | static int dmar_alloc_seq_id(struct intel_iommu *iommu) |
991 | { | |
992 | iommu->seq_id = find_first_zero_bit(dmar_seq_ids, | |
993 | DMAR_UNITS_SUPPORTED); | |
994 | if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { | |
995 | iommu->seq_id = -1; | |
996 | } else { | |
997 | set_bit(iommu->seq_id, dmar_seq_ids); | |
998 | sprintf(iommu->name, "dmar%d", iommu->seq_id); | |
999 | } | |
1000 | ||
1001 | return iommu->seq_id; | |
1002 | } | |
1003 | ||
1004 | static void dmar_free_seq_id(struct intel_iommu *iommu) | |
1005 | { | |
1006 | if (iommu->seq_id >= 0) { | |
1007 | clear_bit(iommu->seq_id, dmar_seq_ids); | |
1008 | iommu->seq_id = -1; | |
1009 | } | |
1010 | } | |
1011 | ||
694835dc | 1012 | static int alloc_iommu(struct dmar_drhd_unit *drhd) |
e61d98d8 | 1013 | { |
c42d9f32 | 1014 | struct intel_iommu *iommu; |
3a93c841 | 1015 | u32 ver, sts; |
43f7392b | 1016 | int agaw = 0; |
4ed0d3e6 | 1017 | int msagaw = 0; |
6f5cf521 | 1018 | int err; |
c42d9f32 | 1019 | |
6ecbf01c | 1020 | if (!drhd->reg_base_addr) { |
3a8663ee | 1021 | warn_invalid_dmar(0, ""); |
6ecbf01c DW |
1022 | return -EINVAL; |
1023 | } | |
1024 | ||
c42d9f32 SS |
1025 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
1026 | if (!iommu) | |
1886e8a9 | 1027 | return -ENOMEM; |
c42d9f32 | 1028 | |
78d8e704 | 1029 | if (dmar_alloc_seq_id(iommu) < 0) { |
9f10e5bf | 1030 | pr_err("Failed to allocate seq_id\n"); |
78d8e704 JL |
1031 | err = -ENOSPC; |
1032 | goto error; | |
1033 | } | |
e61d98d8 | 1034 | |
6f5cf521 DD |
1035 | err = map_iommu(iommu, drhd->reg_base_addr); |
1036 | if (err) { | |
9f10e5bf | 1037 | pr_err("Failed to map %s\n", iommu->name); |
78d8e704 | 1038 | goto error_free_seq_id; |
e61d98d8 | 1039 | } |
0815565a | 1040 | |
6f5cf521 | 1041 | err = -EINVAL; |
1b573683 WH |
1042 | agaw = iommu_calculate_agaw(iommu); |
1043 | if (agaw < 0) { | |
bf947fcb DD |
1044 | pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", |
1045 | iommu->seq_id); | |
0815565a | 1046 | goto err_unmap; |
4ed0d3e6 FY |
1047 | } |
1048 | msagaw = iommu_calculate_max_sagaw(iommu); | |
1049 | if (msagaw < 0) { | |
bf947fcb | 1050 | pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
1b573683 | 1051 | iommu->seq_id); |
0815565a | 1052 | goto err_unmap; |
1b573683 WH |
1053 | } |
1054 | iommu->agaw = agaw; | |
4ed0d3e6 | 1055 | iommu->msagaw = msagaw; |
67ccac41 | 1056 | iommu->segment = drhd->segment; |
1b573683 | 1057 | |
ee34b32d SS |
1058 | iommu->node = -1; |
1059 | ||
e61d98d8 | 1060 | ver = readl(iommu->reg + DMAR_VER_REG); |
9f10e5bf JR |
1061 | pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", |
1062 | iommu->name, | |
5b6985ce FY |
1063 | (unsigned long long)drhd->reg_base_addr, |
1064 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | |
1065 | (unsigned long long)iommu->cap, | |
1066 | (unsigned long long)iommu->ecap); | |
e61d98d8 | 1067 | |
3a93c841 TI |
1068 | /* Reflect status in gcmd */ |
1069 | sts = readl(iommu->reg + DMAR_GSTS_REG); | |
1070 | if (sts & DMA_GSTS_IRES) | |
1071 | iommu->gcmd |= DMA_GCMD_IRE; | |
1072 | if (sts & DMA_GSTS_TES) | |
1073 | iommu->gcmd |= DMA_GCMD_TE; | |
1074 | if (sts & DMA_GSTS_QIES) | |
1075 | iommu->gcmd |= DMA_GCMD_QIE; | |
1076 | ||
1f5b3c3f | 1077 | raw_spin_lock_init(&iommu->register_lock); |
e61d98d8 | 1078 | |
bc847454 | 1079 | if (intel_iommu_enabled) { |
a5459cfe AW |
1080 | iommu->iommu_dev = iommu_device_create(NULL, iommu, |
1081 | intel_iommu_groups, | |
2439d4aa | 1082 | "%s", iommu->name); |
a5459cfe | 1083 | |
bc847454 JR |
1084 | if (IS_ERR(iommu->iommu_dev)) { |
1085 | err = PTR_ERR(iommu->iommu_dev); | |
1086 | goto err_unmap; | |
1087 | } | |
59203379 NK |
1088 | } |
1089 | ||
bc847454 JR |
1090 | drhd->iommu = iommu; |
1091 | ||
1886e8a9 | 1092 | return 0; |
0815565a | 1093 | |
78d8e704 | 1094 | err_unmap: |
6f5cf521 | 1095 | unmap_iommu(iommu); |
78d8e704 JL |
1096 | error_free_seq_id: |
1097 | dmar_free_seq_id(iommu); | |
1098 | error: | |
e61d98d8 | 1099 | kfree(iommu); |
6f5cf521 | 1100 | return err; |
e61d98d8 SS |
1101 | } |
1102 | ||
a868e6b7 | 1103 | static void free_iommu(struct intel_iommu *iommu) |
e61d98d8 | 1104 | { |
a5459cfe AW |
1105 | iommu_device_destroy(iommu->iommu_dev); |
1106 | ||
a868e6b7 | 1107 | if (iommu->irq) { |
1208225c DW |
1108 | if (iommu->pr_irq) { |
1109 | free_irq(iommu->pr_irq, iommu); | |
1110 | dmar_free_hwirq(iommu->pr_irq); | |
1111 | iommu->pr_irq = 0; | |
1112 | } | |
a868e6b7 | 1113 | free_irq(iommu->irq, iommu); |
a553b142 | 1114 | dmar_free_hwirq(iommu->irq); |
34742db8 | 1115 | iommu->irq = 0; |
a868e6b7 | 1116 | } |
e61d98d8 | 1117 | |
a84da70b JL |
1118 | if (iommu->qi) { |
1119 | free_page((unsigned long)iommu->qi->desc); | |
1120 | kfree(iommu->qi->desc_status); | |
1121 | kfree(iommu->qi); | |
1122 | } | |
1123 | ||
e61d98d8 | 1124 | if (iommu->reg) |
6f5cf521 DD |
1125 | unmap_iommu(iommu); |
1126 | ||
78d8e704 | 1127 | dmar_free_seq_id(iommu); |
e61d98d8 SS |
1128 | kfree(iommu); |
1129 | } | |
fe962e90 SS |
1130 | |
1131 | /* | |
1132 | * Reclaim all the submitted descriptors which have completed its work. | |
1133 | */ | |
1134 | static inline void reclaim_free_desc(struct q_inval *qi) | |
1135 | { | |
6ba6c3a4 YZ |
1136 | while (qi->desc_status[qi->free_tail] == QI_DONE || |
1137 | qi->desc_status[qi->free_tail] == QI_ABORT) { | |
fe962e90 SS |
1138 | qi->desc_status[qi->free_tail] = QI_FREE; |
1139 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | |
1140 | qi->free_cnt++; | |
1141 | } | |
1142 | } | |
1143 | ||
704126ad YZ |
1144 | static int qi_check_fault(struct intel_iommu *iommu, int index) |
1145 | { | |
1146 | u32 fault; | |
6ba6c3a4 | 1147 | int head, tail; |
704126ad YZ |
1148 | struct q_inval *qi = iommu->qi; |
1149 | int wait_index = (index + 1) % QI_LENGTH; | |
1150 | ||
6ba6c3a4 YZ |
1151 | if (qi->desc_status[wait_index] == QI_ABORT) |
1152 | return -EAGAIN; | |
1153 | ||
704126ad YZ |
1154 | fault = readl(iommu->reg + DMAR_FSTS_REG); |
1155 | ||
1156 | /* | |
1157 | * If IQE happens, the head points to the descriptor associated | |
1158 | * with the error. No new descriptors are fetched until the IQE | |
1159 | * is cleared. | |
1160 | */ | |
1161 | if (fault & DMA_FSTS_IQE) { | |
1162 | head = readl(iommu->reg + DMAR_IQH_REG); | |
6ba6c3a4 | 1163 | if ((head >> DMAR_IQ_SHIFT) == index) { |
bf947fcb | 1164 | pr_err("VT-d detected invalid descriptor: " |
6ba6c3a4 YZ |
1165 | "low=%llx, high=%llx\n", |
1166 | (unsigned long long)qi->desc[index].low, | |
1167 | (unsigned long long)qi->desc[index].high); | |
704126ad YZ |
1168 | memcpy(&qi->desc[index], &qi->desc[wait_index], |
1169 | sizeof(struct qi_desc)); | |
704126ad YZ |
1170 | writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); |
1171 | return -EINVAL; | |
1172 | } | |
1173 | } | |
1174 | ||
6ba6c3a4 YZ |
1175 | /* |
1176 | * If ITE happens, all pending wait_desc commands are aborted. | |
1177 | * No new descriptors are fetched until the ITE is cleared. | |
1178 | */ | |
1179 | if (fault & DMA_FSTS_ITE) { | |
1180 | head = readl(iommu->reg + DMAR_IQH_REG); | |
1181 | head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; | |
1182 | head |= 1; | |
1183 | tail = readl(iommu->reg + DMAR_IQT_REG); | |
1184 | tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; | |
1185 | ||
1186 | writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); | |
1187 | ||
1188 | do { | |
1189 | if (qi->desc_status[head] == QI_IN_USE) | |
1190 | qi->desc_status[head] = QI_ABORT; | |
1191 | head = (head - 2 + QI_LENGTH) % QI_LENGTH; | |
1192 | } while (head != tail); | |
1193 | ||
1194 | if (qi->desc_status[wait_index] == QI_ABORT) | |
1195 | return -EAGAIN; | |
1196 | } | |
1197 | ||
1198 | if (fault & DMA_FSTS_ICE) | |
1199 | writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); | |
1200 | ||
704126ad YZ |
1201 | return 0; |
1202 | } | |
1203 | ||
fe962e90 SS |
1204 | /* |
1205 | * Submit the queued invalidation descriptor to the remapping | |
1206 | * hardware unit and wait for its completion. | |
1207 | */ | |
704126ad | 1208 | int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) |
fe962e90 | 1209 | { |
6ba6c3a4 | 1210 | int rc; |
fe962e90 SS |
1211 | struct q_inval *qi = iommu->qi; |
1212 | struct qi_desc *hw, wait_desc; | |
1213 | int wait_index, index; | |
1214 | unsigned long flags; | |
1215 | ||
1216 | if (!qi) | |
704126ad | 1217 | return 0; |
fe962e90 SS |
1218 | |
1219 | hw = qi->desc; | |
1220 | ||
6ba6c3a4 YZ |
1221 | restart: |
1222 | rc = 0; | |
1223 | ||
3b8f4048 | 1224 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 | 1225 | while (qi->free_cnt < 3) { |
3b8f4048 | 1226 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
fe962e90 | 1227 | cpu_relax(); |
3b8f4048 | 1228 | raw_spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 SS |
1229 | } |
1230 | ||
1231 | index = qi->free_head; | |
1232 | wait_index = (index + 1) % QI_LENGTH; | |
1233 | ||
1234 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | |
1235 | ||
1236 | hw[index] = *desc; | |
1237 | ||
704126ad YZ |
1238 | wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | |
1239 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | |
fe962e90 SS |
1240 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); |
1241 | ||
1242 | hw[wait_index] = wait_desc; | |
1243 | ||
fe962e90 SS |
1244 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; |
1245 | qi->free_cnt -= 2; | |
1246 | ||
fe962e90 SS |
1247 | /* |
1248 | * update the HW tail register indicating the presence of | |
1249 | * new descriptors. | |
1250 | */ | |
6ba6c3a4 | 1251 | writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); |
fe962e90 SS |
1252 | |
1253 | while (qi->desc_status[wait_index] != QI_DONE) { | |
f05810c9 SS |
1254 | /* |
1255 | * We will leave the interrupts disabled, to prevent interrupt | |
1256 | * context to queue another cmd while a cmd is already submitted | |
1257 | * and waiting for completion on this cpu. This is to avoid | |
1258 | * a deadlock where the interrupt context can wait indefinitely | |
1259 | * for free slots in the queue. | |
1260 | */ | |
704126ad YZ |
1261 | rc = qi_check_fault(iommu, index); |
1262 | if (rc) | |
6ba6c3a4 | 1263 | break; |
704126ad | 1264 | |
3b8f4048 | 1265 | raw_spin_unlock(&qi->q_lock); |
fe962e90 | 1266 | cpu_relax(); |
3b8f4048 | 1267 | raw_spin_lock(&qi->q_lock); |
fe962e90 | 1268 | } |
6ba6c3a4 YZ |
1269 | |
1270 | qi->desc_status[index] = QI_DONE; | |
fe962e90 SS |
1271 | |
1272 | reclaim_free_desc(qi); | |
3b8f4048 | 1273 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); |
704126ad | 1274 | |
6ba6c3a4 YZ |
1275 | if (rc == -EAGAIN) |
1276 | goto restart; | |
1277 | ||
704126ad | 1278 | return rc; |
fe962e90 SS |
1279 | } |
1280 | ||
1281 | /* | |
1282 | * Flush the global interrupt entry cache. | |
1283 | */ | |
1284 | void qi_global_iec(struct intel_iommu *iommu) | |
1285 | { | |
1286 | struct qi_desc desc; | |
1287 | ||
1288 | desc.low = QI_IEC_TYPE; | |
1289 | desc.high = 0; | |
1290 | ||
704126ad | 1291 | /* should never fail */ |
fe962e90 SS |
1292 | qi_submit_sync(&desc, iommu); |
1293 | } | |
1294 | ||
4c25a2c1 DW |
1295 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
1296 | u64 type) | |
3481f210 | 1297 | { |
3481f210 YS |
1298 | struct qi_desc desc; |
1299 | ||
3481f210 YS |
1300 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) |
1301 | | QI_CC_GRAN(type) | QI_CC_TYPE; | |
1302 | desc.high = 0; | |
1303 | ||
4c25a2c1 | 1304 | qi_submit_sync(&desc, iommu); |
3481f210 YS |
1305 | } |
1306 | ||
1f0ef2aa DW |
1307 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
1308 | unsigned int size_order, u64 type) | |
3481f210 YS |
1309 | { |
1310 | u8 dw = 0, dr = 0; | |
1311 | ||
1312 | struct qi_desc desc; | |
1313 | int ih = 0; | |
1314 | ||
3481f210 YS |
1315 | if (cap_write_drain(iommu->cap)) |
1316 | dw = 1; | |
1317 | ||
1318 | if (cap_read_drain(iommu->cap)) | |
1319 | dr = 1; | |
1320 | ||
1321 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | |
1322 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | |
1323 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | |
1324 | | QI_IOTLB_AM(size_order); | |
1325 | ||
1f0ef2aa | 1326 | qi_submit_sync(&desc, iommu); |
3481f210 YS |
1327 | } |
1328 | ||
6ba6c3a4 YZ |
1329 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
1330 | u64 addr, unsigned mask) | |
1331 | { | |
1332 | struct qi_desc desc; | |
1333 | ||
1334 | if (mask) { | |
1335 | BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); | |
1336 | addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; | |
1337 | desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; | |
1338 | } else | |
1339 | desc.high = QI_DEV_IOTLB_ADDR(addr); | |
1340 | ||
1341 | if (qdep >= QI_DEV_IOTLB_MAX_INVS) | |
1342 | qdep = 0; | |
1343 | ||
1344 | desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | | |
1345 | QI_DIOTLB_TYPE; | |
1346 | ||
1347 | qi_submit_sync(&desc, iommu); | |
1348 | } | |
1349 | ||
eba67e5d SS |
1350 | /* |
1351 | * Disable Queued Invalidation interface. | |
1352 | */ | |
1353 | void dmar_disable_qi(struct intel_iommu *iommu) | |
1354 | { | |
1355 | unsigned long flags; | |
1356 | u32 sts; | |
1357 | cycles_t start_time = get_cycles(); | |
1358 | ||
1359 | if (!ecap_qis(iommu->ecap)) | |
1360 | return; | |
1361 | ||
1f5b3c3f | 1362 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eba67e5d | 1363 | |
fda3bec1 | 1364 | sts = readl(iommu->reg + DMAR_GSTS_REG); |
eba67e5d SS |
1365 | if (!(sts & DMA_GSTS_QIES)) |
1366 | goto end; | |
1367 | ||
1368 | /* | |
1369 | * Give a chance to HW to complete the pending invalidation requests. | |
1370 | */ | |
1371 | while ((readl(iommu->reg + DMAR_IQT_REG) != | |
1372 | readl(iommu->reg + DMAR_IQH_REG)) && | |
1373 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) | |
1374 | cpu_relax(); | |
1375 | ||
1376 | iommu->gcmd &= ~DMA_GCMD_QIE; | |
eba67e5d SS |
1377 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
1378 | ||
1379 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | |
1380 | !(sts & DMA_GSTS_QIES), sts); | |
1381 | end: | |
1f5b3c3f | 1382 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eba67e5d SS |
1383 | } |
1384 | ||
eb4a52bc FY |
1385 | /* |
1386 | * Enable queued invalidation. | |
1387 | */ | |
1388 | static void __dmar_enable_qi(struct intel_iommu *iommu) | |
1389 | { | |
c416daa9 | 1390 | u32 sts; |
eb4a52bc FY |
1391 | unsigned long flags; |
1392 | struct q_inval *qi = iommu->qi; | |
1393 | ||
1394 | qi->free_head = qi->free_tail = 0; | |
1395 | qi->free_cnt = QI_LENGTH; | |
1396 | ||
1f5b3c3f | 1397 | raw_spin_lock_irqsave(&iommu->register_lock, flags); |
eb4a52bc FY |
1398 | |
1399 | /* write zero to the tail reg */ | |
1400 | writel(0, iommu->reg + DMAR_IQT_REG); | |
1401 | ||
1402 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | |
1403 | ||
eb4a52bc | 1404 | iommu->gcmd |= DMA_GCMD_QIE; |
c416daa9 | 1405 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
eb4a52bc FY |
1406 | |
1407 | /* Make sure hardware complete it */ | |
1408 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | |
1409 | ||
1f5b3c3f | 1410 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); |
eb4a52bc FY |
1411 | } |
1412 | ||
fe962e90 SS |
1413 | /* |
1414 | * Enable Queued Invalidation interface. This is a must to support | |
1415 | * interrupt-remapping. Also used by DMA-remapping, which replaces | |
1416 | * register based IOTLB invalidation. | |
1417 | */ | |
1418 | int dmar_enable_qi(struct intel_iommu *iommu) | |
1419 | { | |
fe962e90 | 1420 | struct q_inval *qi; |
751cafe3 | 1421 | struct page *desc_page; |
fe962e90 SS |
1422 | |
1423 | if (!ecap_qis(iommu->ecap)) | |
1424 | return -ENOENT; | |
1425 | ||
1426 | /* | |
1427 | * queued invalidation is already setup and enabled. | |
1428 | */ | |
1429 | if (iommu->qi) | |
1430 | return 0; | |
1431 | ||
fa4b57cc | 1432 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); |
fe962e90 SS |
1433 | if (!iommu->qi) |
1434 | return -ENOMEM; | |
1435 | ||
1436 | qi = iommu->qi; | |
1437 | ||
751cafe3 SS |
1438 | |
1439 | desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); | |
1440 | if (!desc_page) { | |
fe962e90 | 1441 | kfree(qi); |
b707cb02 | 1442 | iommu->qi = NULL; |
fe962e90 SS |
1443 | return -ENOMEM; |
1444 | } | |
1445 | ||
751cafe3 SS |
1446 | qi->desc = page_address(desc_page); |
1447 | ||
37a40710 | 1448 | qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
fe962e90 SS |
1449 | if (!qi->desc_status) { |
1450 | free_page((unsigned long) qi->desc); | |
1451 | kfree(qi); | |
b707cb02 | 1452 | iommu->qi = NULL; |
fe962e90 SS |
1453 | return -ENOMEM; |
1454 | } | |
1455 | ||
3b8f4048 | 1456 | raw_spin_lock_init(&qi->q_lock); |
fe962e90 | 1457 | |
eb4a52bc | 1458 | __dmar_enable_qi(iommu); |
fe962e90 SS |
1459 | |
1460 | return 0; | |
1461 | } | |
0ac2491f SS |
1462 | |
1463 | /* iommu interrupt handling. Most stuff are MSI-like. */ | |
1464 | ||
9d783ba0 SS |
1465 | enum faulttype { |
1466 | DMA_REMAP, | |
1467 | INTR_REMAP, | |
1468 | UNKNOWN, | |
1469 | }; | |
1470 | ||
1471 | static const char *dma_remap_fault_reasons[] = | |
0ac2491f SS |
1472 | { |
1473 | "Software", | |
1474 | "Present bit in root entry is clear", | |
1475 | "Present bit in context entry is clear", | |
1476 | "Invalid context entry", | |
1477 | "Access beyond MGAW", | |
1478 | "PTE Write access is not set", | |
1479 | "PTE Read access is not set", | |
1480 | "Next page table ptr is invalid", | |
1481 | "Root table address invalid", | |
1482 | "Context table ptr is invalid", | |
1483 | "non-zero reserved fields in RTP", | |
1484 | "non-zero reserved fields in CTP", | |
1485 | "non-zero reserved fields in PTE", | |
4ecccd9e | 1486 | "PCE for translation request specifies blocking", |
0ac2491f | 1487 | }; |
9d783ba0 | 1488 | |
95a02e97 | 1489 | static const char *irq_remap_fault_reasons[] = |
9d783ba0 SS |
1490 | { |
1491 | "Detected reserved fields in the decoded interrupt-remapped request", | |
1492 | "Interrupt index exceeded the interrupt-remapping table size", | |
1493 | "Present field in the IRTE entry is clear", | |
1494 | "Error accessing interrupt-remapping table pointed by IRTA_REG", | |
1495 | "Detected reserved fields in the IRTE entry", | |
1496 | "Blocked a compatibility format interrupt request", | |
1497 | "Blocked an interrupt request due to source-id verification failure", | |
1498 | }; | |
1499 | ||
21004dcd | 1500 | static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) |
0ac2491f | 1501 | { |
fefe1ed1 DC |
1502 | if (fault_reason >= 0x20 && (fault_reason - 0x20 < |
1503 | ARRAY_SIZE(irq_remap_fault_reasons))) { | |
9d783ba0 | 1504 | *fault_type = INTR_REMAP; |
95a02e97 | 1505 | return irq_remap_fault_reasons[fault_reason - 0x20]; |
9d783ba0 SS |
1506 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { |
1507 | *fault_type = DMA_REMAP; | |
1508 | return dma_remap_fault_reasons[fault_reason]; | |
1509 | } else { | |
1510 | *fault_type = UNKNOWN; | |
0ac2491f | 1511 | return "Unknown"; |
9d783ba0 | 1512 | } |
0ac2491f SS |
1513 | } |
1514 | ||
1208225c DW |
1515 | |
1516 | static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq) | |
1517 | { | |
1518 | if (iommu->irq == irq) | |
1519 | return DMAR_FECTL_REG; | |
1520 | else if (iommu->pr_irq == irq) | |
1521 | return DMAR_PECTL_REG; | |
1522 | else | |
1523 | BUG(); | |
1524 | } | |
1525 | ||
5c2837fb | 1526 | void dmar_msi_unmask(struct irq_data *data) |
0ac2491f | 1527 | { |
dced35ae | 1528 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1208225c | 1529 | int reg = dmar_msi_reg(iommu, data->irq); |
0ac2491f SS |
1530 | unsigned long flag; |
1531 | ||
1532 | /* unmask it */ | |
1f5b3c3f | 1533 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1208225c | 1534 | writel(0, iommu->reg + reg); |
0ac2491f | 1535 | /* Read a reg to force flush the post write */ |
1208225c | 1536 | readl(iommu->reg + reg); |
1f5b3c3f | 1537 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1538 | } |
1539 | ||
5c2837fb | 1540 | void dmar_msi_mask(struct irq_data *data) |
0ac2491f | 1541 | { |
dced35ae | 1542 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1208225c DW |
1543 | int reg = dmar_msi_reg(iommu, data->irq); |
1544 | unsigned long flag; | |
0ac2491f SS |
1545 | |
1546 | /* mask it */ | |
1f5b3c3f | 1547 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1208225c | 1548 | writel(DMA_FECTL_IM, iommu->reg + reg); |
0ac2491f | 1549 | /* Read a reg to force flush the post write */ |
1208225c | 1550 | readl(iommu->reg + reg); |
1f5b3c3f | 1551 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1552 | } |
1553 | ||
1554 | void dmar_msi_write(int irq, struct msi_msg *msg) | |
1555 | { | |
dced35ae | 1556 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1208225c | 1557 | int reg = dmar_msi_reg(iommu, irq); |
0ac2491f SS |
1558 | unsigned long flag; |
1559 | ||
1f5b3c3f | 1560 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1208225c DW |
1561 | writel(msg->data, iommu->reg + reg + 4); |
1562 | writel(msg->address_lo, iommu->reg + reg + 8); | |
1563 | writel(msg->address_hi, iommu->reg + reg + 12); | |
1f5b3c3f | 1564 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1565 | } |
1566 | ||
1567 | void dmar_msi_read(int irq, struct msi_msg *msg) | |
1568 | { | |
dced35ae | 1569 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1208225c | 1570 | int reg = dmar_msi_reg(iommu, irq); |
0ac2491f SS |
1571 | unsigned long flag; |
1572 | ||
1f5b3c3f | 1573 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1208225c DW |
1574 | msg->data = readl(iommu->reg + reg + 4); |
1575 | msg->address_lo = readl(iommu->reg + reg + 8); | |
1576 | msg->address_hi = readl(iommu->reg + reg + 12); | |
1f5b3c3f | 1577 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1578 | } |
1579 | ||
1580 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | |
1581 | u8 fault_reason, u16 source_id, unsigned long long addr) | |
1582 | { | |
1583 | const char *reason; | |
9d783ba0 | 1584 | int fault_type; |
0ac2491f | 1585 | |
9d783ba0 | 1586 | reason = dmar_get_fault_reason(fault_reason, &fault_type); |
0ac2491f | 1587 | |
9d783ba0 | 1588 | if (fault_type == INTR_REMAP) |
a0fe14d7 AW |
1589 | pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n", |
1590 | source_id >> 8, PCI_SLOT(source_id & 0xFF), | |
9d783ba0 SS |
1591 | PCI_FUNC(source_id & 0xFF), addr >> 48, |
1592 | fault_reason, reason); | |
1593 | else | |
a0fe14d7 AW |
1594 | pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n", |
1595 | type ? "DMA Read" : "DMA Write", | |
1596 | source_id >> 8, PCI_SLOT(source_id & 0xFF), | |
9d783ba0 | 1597 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); |
0ac2491f SS |
1598 | return 0; |
1599 | } | |
1600 | ||
1601 | #define PRIMARY_FAULT_REG_LEN (16) | |
1531a6a6 | 1602 | irqreturn_t dmar_fault(int irq, void *dev_id) |
0ac2491f SS |
1603 | { |
1604 | struct intel_iommu *iommu = dev_id; | |
1605 | int reg, fault_index; | |
1606 | u32 fault_status; | |
1607 | unsigned long flag; | |
c43fce4e AW |
1608 | bool ratelimited; |
1609 | static DEFINE_RATELIMIT_STATE(rs, | |
1610 | DEFAULT_RATELIMIT_INTERVAL, | |
1611 | DEFAULT_RATELIMIT_BURST); | |
1612 | ||
1613 | /* Disable printing, simply clear the fault when ratelimited */ | |
1614 | ratelimited = !__ratelimit(&rs); | |
0ac2491f | 1615 | |
1f5b3c3f | 1616 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f | 1617 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
c43fce4e | 1618 | if (fault_status && !ratelimited) |
bf947fcb | 1619 | pr_err("DRHD: handling fault status reg %x\n", fault_status); |
0ac2491f SS |
1620 | |
1621 | /* TBD: ignore advanced fault log currently */ | |
1622 | if (!(fault_status & DMA_FSTS_PPF)) | |
bd5cdad0 | 1623 | goto unlock_exit; |
0ac2491f SS |
1624 | |
1625 | fault_index = dma_fsts_fault_record_index(fault_status); | |
1626 | reg = cap_fault_reg_offset(iommu->cap); | |
1627 | while (1) { | |
1628 | u8 fault_reason; | |
1629 | u16 source_id; | |
1630 | u64 guest_addr; | |
1631 | int type; | |
1632 | u32 data; | |
1633 | ||
1634 | /* highest 32 bits */ | |
1635 | data = readl(iommu->reg + reg + | |
1636 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | |
1637 | if (!(data & DMA_FRCD_F)) | |
1638 | break; | |
1639 | ||
c43fce4e AW |
1640 | if (!ratelimited) { |
1641 | fault_reason = dma_frcd_fault_reason(data); | |
1642 | type = dma_frcd_type(data); | |
0ac2491f | 1643 | |
c43fce4e AW |
1644 | data = readl(iommu->reg + reg + |
1645 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | |
1646 | source_id = dma_frcd_source_id(data); | |
1647 | ||
1648 | guest_addr = dmar_readq(iommu->reg + reg + | |
1649 | fault_index * PRIMARY_FAULT_REG_LEN); | |
1650 | guest_addr = dma_frcd_page_addr(guest_addr); | |
1651 | } | |
0ac2491f | 1652 | |
0ac2491f SS |
1653 | /* clear the fault */ |
1654 | writel(DMA_FRCD_F, iommu->reg + reg + | |
1655 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | |
1656 | ||
1f5b3c3f | 1657 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f | 1658 | |
c43fce4e AW |
1659 | if (!ratelimited) |
1660 | dmar_fault_do_one(iommu, type, fault_reason, | |
1661 | source_id, guest_addr); | |
0ac2491f SS |
1662 | |
1663 | fault_index++; | |
8211a7b5 | 1664 | if (fault_index >= cap_num_fault_regs(iommu->cap)) |
0ac2491f | 1665 | fault_index = 0; |
1f5b3c3f | 1666 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
0ac2491f | 1667 | } |
0ac2491f | 1668 | |
bd5cdad0 LZH |
1669 | writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); |
1670 | ||
1671 | unlock_exit: | |
1f5b3c3f | 1672 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
0ac2491f SS |
1673 | return IRQ_HANDLED; |
1674 | } | |
1675 | ||
1676 | int dmar_set_interrupt(struct intel_iommu *iommu) | |
1677 | { | |
1678 | int irq, ret; | |
1679 | ||
9d783ba0 SS |
1680 | /* |
1681 | * Check if the fault interrupt is already initialized. | |
1682 | */ | |
1683 | if (iommu->irq) | |
1684 | return 0; | |
1685 | ||
34742db8 JL |
1686 | irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); |
1687 | if (irq > 0) { | |
1688 | iommu->irq = irq; | |
1689 | } else { | |
9f10e5bf | 1690 | pr_err("No free IRQ vectors\n"); |
0ac2491f SS |
1691 | return -EINVAL; |
1692 | } | |
1693 | ||
477694e7 | 1694 | ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); |
0ac2491f | 1695 | if (ret) |
9f10e5bf | 1696 | pr_err("Can't request irq\n"); |
0ac2491f SS |
1697 | return ret; |
1698 | } | |
9d783ba0 SS |
1699 | |
1700 | int __init enable_drhd_fault_handling(void) | |
1701 | { | |
1702 | struct dmar_drhd_unit *drhd; | |
7c919779 | 1703 | struct intel_iommu *iommu; |
9d783ba0 SS |
1704 | |
1705 | /* | |
1706 | * Enable fault control interrupt. | |
1707 | */ | |
7c919779 | 1708 | for_each_iommu(iommu, drhd) { |
bd5cdad0 | 1709 | u32 fault_status; |
7c919779 | 1710 | int ret = dmar_set_interrupt(iommu); |
9d783ba0 SS |
1711 | |
1712 | if (ret) { | |
e9071b0b | 1713 | pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", |
9d783ba0 SS |
1714 | (unsigned long long)drhd->reg_base_addr, ret); |
1715 | return -1; | |
1716 | } | |
7f99d946 SS |
1717 | |
1718 | /* | |
1719 | * Clear any previous faults. | |
1720 | */ | |
1721 | dmar_fault(iommu->irq, iommu); | |
bd5cdad0 LZH |
1722 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); |
1723 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | |
9d783ba0 SS |
1724 | } |
1725 | ||
1726 | return 0; | |
1727 | } | |
eb4a52bc FY |
1728 | |
1729 | /* | |
1730 | * Re-enable Queued Invalidation interface. | |
1731 | */ | |
1732 | int dmar_reenable_qi(struct intel_iommu *iommu) | |
1733 | { | |
1734 | if (!ecap_qis(iommu->ecap)) | |
1735 | return -ENOENT; | |
1736 | ||
1737 | if (!iommu->qi) | |
1738 | return -ENOENT; | |
1739 | ||
1740 | /* | |
1741 | * First disable queued invalidation. | |
1742 | */ | |
1743 | dmar_disable_qi(iommu); | |
1744 | /* | |
1745 | * Then enable queued invalidation again. Since there is no pending | |
1746 | * invalidation requests now, it's safe to re-enable queued | |
1747 | * invalidation. | |
1748 | */ | |
1749 | __dmar_enable_qi(iommu); | |
1750 | ||
1751 | return 0; | |
1752 | } | |
074835f0 YS |
1753 | |
1754 | /* | |
1755 | * Check interrupt remapping support in DMAR table description. | |
1756 | */ | |
0b8973a8 | 1757 | int __init dmar_ir_support(void) |
074835f0 YS |
1758 | { |
1759 | struct acpi_table_dmar *dmar; | |
1760 | dmar = (struct acpi_table_dmar *)dmar_tbl; | |
4f506e07 AP |
1761 | if (!dmar) |
1762 | return 0; | |
074835f0 YS |
1763 | return dmar->flags & 0x1; |
1764 | } | |
694835dc | 1765 | |
6b197249 JL |
1766 | /* Check whether DMAR units are in use */ |
1767 | static inline bool dmar_in_use(void) | |
1768 | { | |
1769 | return irq_remapping_enabled || intel_iommu_enabled; | |
1770 | } | |
1771 | ||
a868e6b7 JL |
1772 | static int __init dmar_free_unused_resources(void) |
1773 | { | |
1774 | struct dmar_drhd_unit *dmaru, *dmaru_n; | |
1775 | ||
6b197249 | 1776 | if (dmar_in_use()) |
a868e6b7 JL |
1777 | return 0; |
1778 | ||
2e455289 JL |
1779 | if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) |
1780 | bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb); | |
59ce0515 | 1781 | |
3a5670e8 | 1782 | down_write(&dmar_global_lock); |
a868e6b7 JL |
1783 | list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { |
1784 | list_del(&dmaru->list); | |
1785 | dmar_free_drhd(dmaru); | |
1786 | } | |
3a5670e8 | 1787 | up_write(&dmar_global_lock); |
a868e6b7 JL |
1788 | |
1789 | return 0; | |
1790 | } | |
1791 | ||
1792 | late_initcall(dmar_free_unused_resources); | |
4db77ff3 | 1793 | IOMMU_INIT_POST(detect_intel_iommu); |
6b197249 JL |
1794 | |
1795 | /* | |
1796 | * DMAR Hotplug Support | |
1797 | * For more details, please refer to Intel(R) Virtualization Technology | |
1798 | * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8 | |
1799 | * "Remapping Hardware Unit Hot Plug". | |
1800 | */ | |
1801 | static u8 dmar_hp_uuid[] = { | |
1802 | /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C, | |
1803 | /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF | |
1804 | }; | |
1805 | ||
1806 | /* | |
1807 | * Currently there's only one revision and BIOS will not check the revision id, | |
1808 | * so use 0 for safety. | |
1809 | */ | |
1810 | #define DMAR_DSM_REV_ID 0 | |
1811 | #define DMAR_DSM_FUNC_DRHD 1 | |
1812 | #define DMAR_DSM_FUNC_ATSR 2 | |
1813 | #define DMAR_DSM_FUNC_RHSA 3 | |
1814 | ||
1815 | static inline bool dmar_detect_dsm(acpi_handle handle, int func) | |
1816 | { | |
1817 | return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func); | |
1818 | } | |
1819 | ||
1820 | static int dmar_walk_dsm_resource(acpi_handle handle, int func, | |
1821 | dmar_res_handler_t handler, void *arg) | |
1822 | { | |
1823 | int ret = -ENODEV; | |
1824 | union acpi_object *obj; | |
1825 | struct acpi_dmar_header *start; | |
1826 | struct dmar_res_callback callback; | |
1827 | static int res_type[] = { | |
1828 | [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT, | |
1829 | [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS, | |
1830 | [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY, | |
1831 | }; | |
1832 | ||
1833 | if (!dmar_detect_dsm(handle, func)) | |
1834 | return 0; | |
1835 | ||
1836 | obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, | |
1837 | func, NULL, ACPI_TYPE_BUFFER); | |
1838 | if (!obj) | |
1839 | return -ENODEV; | |
1840 | ||
1841 | memset(&callback, 0, sizeof(callback)); | |
1842 | callback.cb[res_type[func]] = handler; | |
1843 | callback.arg[res_type[func]] = arg; | |
1844 | start = (struct acpi_dmar_header *)obj->buffer.pointer; | |
1845 | ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); | |
1846 | ||
1847 | ACPI_FREE(obj); | |
1848 | ||
1849 | return ret; | |
1850 | } | |
1851 | ||
1852 | static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg) | |
1853 | { | |
1854 | int ret; | |
1855 | struct dmar_drhd_unit *dmaru; | |
1856 | ||
1857 | dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); | |
1858 | if (!dmaru) | |
1859 | return -ENODEV; | |
1860 | ||
1861 | ret = dmar_ir_hotplug(dmaru, true); | |
1862 | if (ret == 0) | |
1863 | ret = dmar_iommu_hotplug(dmaru, true); | |
1864 | ||
1865 | return ret; | |
1866 | } | |
1867 | ||
1868 | static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) | |
1869 | { | |
1870 | int i, ret; | |
1871 | struct device *dev; | |
1872 | struct dmar_drhd_unit *dmaru; | |
1873 | ||
1874 | dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); | |
1875 | if (!dmaru) | |
1876 | return 0; | |
1877 | ||
1878 | /* | |
1879 | * All PCI devices managed by this unit should have been destroyed. | |
1880 | */ | |
194dc870 | 1881 | if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) { |
6b197249 JL |
1882 | for_each_active_dev_scope(dmaru->devices, |
1883 | dmaru->devices_cnt, i, dev) | |
1884 | return -EBUSY; | |
194dc870 | 1885 | } |
6b197249 JL |
1886 | |
1887 | ret = dmar_ir_hotplug(dmaru, false); | |
1888 | if (ret == 0) | |
1889 | ret = dmar_iommu_hotplug(dmaru, false); | |
1890 | ||
1891 | return ret; | |
1892 | } | |
1893 | ||
1894 | static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg) | |
1895 | { | |
1896 | struct dmar_drhd_unit *dmaru; | |
1897 | ||
1898 | dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); | |
1899 | if (dmaru) { | |
1900 | list_del_rcu(&dmaru->list); | |
1901 | synchronize_rcu(); | |
1902 | dmar_free_drhd(dmaru); | |
1903 | } | |
1904 | ||
1905 | return 0; | |
1906 | } | |
1907 | ||
1908 | static int dmar_hotplug_insert(acpi_handle handle) | |
1909 | { | |
1910 | int ret; | |
1911 | int drhd_count = 0; | |
1912 | ||
1913 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1914 | &dmar_validate_one_drhd, (void *)1); | |
1915 | if (ret) | |
1916 | goto out; | |
1917 | ||
1918 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1919 | &dmar_parse_one_drhd, (void *)&drhd_count); | |
1920 | if (ret == 0 && drhd_count == 0) { | |
1921 | pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n"); | |
1922 | goto out; | |
1923 | } else if (ret) { | |
1924 | goto release_drhd; | |
1925 | } | |
1926 | ||
1927 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA, | |
1928 | &dmar_parse_one_rhsa, NULL); | |
1929 | if (ret) | |
1930 | goto release_drhd; | |
1931 | ||
1932 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | |
1933 | &dmar_parse_one_atsr, NULL); | |
1934 | if (ret) | |
1935 | goto release_atsr; | |
1936 | ||
1937 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1938 | &dmar_hp_add_drhd, NULL); | |
1939 | if (!ret) | |
1940 | return 0; | |
1941 | ||
1942 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1943 | &dmar_hp_remove_drhd, NULL); | |
1944 | release_atsr: | |
1945 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | |
1946 | &dmar_release_one_atsr, NULL); | |
1947 | release_drhd: | |
1948 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1949 | &dmar_hp_release_drhd, NULL); | |
1950 | out: | |
1951 | return ret; | |
1952 | } | |
1953 | ||
1954 | static int dmar_hotplug_remove(acpi_handle handle) | |
1955 | { | |
1956 | int ret; | |
1957 | ||
1958 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | |
1959 | &dmar_check_one_atsr, NULL); | |
1960 | if (ret) | |
1961 | return ret; | |
1962 | ||
1963 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1964 | &dmar_hp_remove_drhd, NULL); | |
1965 | if (ret == 0) { | |
1966 | WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | |
1967 | &dmar_release_one_atsr, NULL)); | |
1968 | WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1969 | &dmar_hp_release_drhd, NULL)); | |
1970 | } else { | |
1971 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | |
1972 | &dmar_hp_add_drhd, NULL); | |
1973 | } | |
1974 | ||
1975 | return ret; | |
1976 | } | |
1977 | ||
d35165a9 JL |
1978 | static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl, |
1979 | void *context, void **retval) | |
1980 | { | |
1981 | acpi_handle *phdl = retval; | |
1982 | ||
1983 | if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { | |
1984 | *phdl = handle; | |
1985 | return AE_CTRL_TERMINATE; | |
1986 | } | |
1987 | ||
1988 | return AE_OK; | |
1989 | } | |
1990 | ||
6b197249 JL |
1991 | static int dmar_device_hotplug(acpi_handle handle, bool insert) |
1992 | { | |
1993 | int ret; | |
d35165a9 JL |
1994 | acpi_handle tmp = NULL; |
1995 | acpi_status status; | |
6b197249 JL |
1996 | |
1997 | if (!dmar_in_use()) | |
1998 | return 0; | |
1999 | ||
d35165a9 JL |
2000 | if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { |
2001 | tmp = handle; | |
2002 | } else { | |
2003 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, | |
2004 | ACPI_UINT32_MAX, | |
2005 | dmar_get_dsm_handle, | |
2006 | NULL, NULL, &tmp); | |
2007 | if (ACPI_FAILURE(status)) { | |
2008 | pr_warn("Failed to locate _DSM method.\n"); | |
2009 | return -ENXIO; | |
2010 | } | |
2011 | } | |
2012 | if (tmp == NULL) | |
6b197249 JL |
2013 | return 0; |
2014 | ||
2015 | down_write(&dmar_global_lock); | |
2016 | if (insert) | |
d35165a9 | 2017 | ret = dmar_hotplug_insert(tmp); |
6b197249 | 2018 | else |
d35165a9 | 2019 | ret = dmar_hotplug_remove(tmp); |
6b197249 JL |
2020 | up_write(&dmar_global_lock); |
2021 | ||
2022 | return ret; | |
2023 | } | |
2024 | ||
2025 | int dmar_device_add(acpi_handle handle) | |
2026 | { | |
2027 | return dmar_device_hotplug(handle, true); | |
2028 | } | |
2029 | ||
2030 | int dmar_device_remove(acpi_handle handle) | |
2031 | { | |
2032 | return dmar_device_hotplug(handle, false); | |
2033 | } |