]>
Commit | Line | Data |
---|---|---|
10e5247f KA |
1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
98bcef56 | 17 | * Copyright (C) 2006-2008 Intel Corporation |
18 | * Author: Ashok Raj <[email protected]> | |
19 | * Author: Shaohua Li <[email protected]> | |
20 | * Author: Anil S Keshavamurthy <[email protected]> | |
10e5247f | 21 | * |
e61d98d8 | 22 | * This file implements early detection/parsing of Remapping Devices |
10e5247f KA |
23 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI |
24 | * tables. | |
e61d98d8 SS |
25 | * |
26 | * These routines are used by both DMA-remapping and Interrupt-remapping | |
10e5247f KA |
27 | */ |
28 | ||
29 | #include <linux/pci.h> | |
30 | #include <linux/dmar.h> | |
38717946 KA |
31 | #include <linux/iova.h> |
32 | #include <linux/intel-iommu.h> | |
fe962e90 | 33 | #include <linux/timer.h> |
10e5247f KA |
34 | |
35 | #undef PREFIX | |
36 | #define PREFIX "DMAR:" | |
37 | ||
38 | /* No locks are needed as DMA remapping hardware unit | |
39 | * list is constructed at boot time and hotplug of | |
40 | * these units are not supported by the architecture. | |
41 | */ | |
42 | LIST_HEAD(dmar_drhd_units); | |
10e5247f KA |
43 | |
44 | static struct acpi_table_header * __initdata dmar_tbl; | |
45 | ||
46 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | |
47 | { | |
48 | /* | |
49 | * add INCLUDE_ALL at the tail, so scan the list will find it at | |
50 | * the very end. | |
51 | */ | |
52 | if (drhd->include_all) | |
53 | list_add_tail(&drhd->list, &dmar_drhd_units); | |
54 | else | |
55 | list_add(&drhd->list, &dmar_drhd_units); | |
56 | } | |
57 | ||
10e5247f KA |
58 | static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, |
59 | struct pci_dev **dev, u16 segment) | |
60 | { | |
61 | struct pci_bus *bus; | |
62 | struct pci_dev *pdev = NULL; | |
63 | struct acpi_dmar_pci_path *path; | |
64 | int count; | |
65 | ||
66 | bus = pci_find_bus(segment, scope->bus); | |
67 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
68 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
69 | / sizeof(struct acpi_dmar_pci_path); | |
70 | ||
71 | while (count) { | |
72 | if (pdev) | |
73 | pci_dev_put(pdev); | |
74 | /* | |
75 | * Some BIOSes list non-exist devices in DMAR table, just | |
76 | * ignore it | |
77 | */ | |
78 | if (!bus) { | |
79 | printk(KERN_WARNING | |
80 | PREFIX "Device scope bus [%d] not found\n", | |
81 | scope->bus); | |
82 | break; | |
83 | } | |
84 | pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); | |
85 | if (!pdev) { | |
86 | printk(KERN_WARNING PREFIX | |
87 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", | |
88 | segment, bus->number, path->dev, path->fn); | |
89 | break; | |
90 | } | |
91 | path ++; | |
92 | count --; | |
93 | bus = pdev->subordinate; | |
94 | } | |
95 | if (!pdev) { | |
96 | printk(KERN_WARNING PREFIX | |
97 | "Device scope device [%04x:%02x:%02x.%02x] not found\n", | |
98 | segment, scope->bus, path->dev, path->fn); | |
99 | *dev = NULL; | |
100 | return 0; | |
101 | } | |
102 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ | |
103 | pdev->subordinate) || (scope->entry_type == \ | |
104 | ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { | |
105 | pci_dev_put(pdev); | |
106 | printk(KERN_WARNING PREFIX | |
107 | "Device scope type does not match for %s\n", | |
108 | pci_name(pdev)); | |
109 | return -EINVAL; | |
110 | } | |
111 | *dev = pdev; | |
112 | return 0; | |
113 | } | |
114 | ||
115 | static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, | |
116 | struct pci_dev ***devices, u16 segment) | |
117 | { | |
118 | struct acpi_dmar_device_scope *scope; | |
119 | void * tmp = start; | |
120 | int index; | |
121 | int ret; | |
122 | ||
123 | *cnt = 0; | |
124 | while (start < end) { | |
125 | scope = start; | |
126 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
127 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | |
128 | (*cnt)++; | |
129 | else | |
130 | printk(KERN_WARNING PREFIX | |
131 | "Unsupported device scope\n"); | |
132 | start += scope->length; | |
133 | } | |
134 | if (*cnt == 0) | |
135 | return 0; | |
136 | ||
137 | *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); | |
138 | if (!*devices) | |
139 | return -ENOMEM; | |
140 | ||
141 | start = tmp; | |
142 | index = 0; | |
143 | while (start < end) { | |
144 | scope = start; | |
145 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | |
146 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { | |
147 | ret = dmar_parse_one_dev_scope(scope, | |
148 | &(*devices)[index], segment); | |
149 | if (ret) { | |
150 | kfree(*devices); | |
151 | return ret; | |
152 | } | |
153 | index ++; | |
154 | } | |
155 | start += scope->length; | |
156 | } | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | /** | |
162 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | |
163 | * structure which uniquely represent one DMA remapping hardware unit | |
164 | * present in the platform | |
165 | */ | |
166 | static int __init | |
167 | dmar_parse_one_drhd(struct acpi_dmar_header *header) | |
168 | { | |
169 | struct acpi_dmar_hardware_unit *drhd; | |
170 | struct dmar_drhd_unit *dmaru; | |
171 | int ret = 0; | |
10e5247f KA |
172 | |
173 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | |
174 | if (!dmaru) | |
175 | return -ENOMEM; | |
176 | ||
1886e8a9 | 177 | dmaru->hdr = header; |
10e5247f KA |
178 | drhd = (struct acpi_dmar_hardware_unit *)header; |
179 | dmaru->reg_base_addr = drhd->address; | |
180 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | |
181 | ||
1886e8a9 SS |
182 | ret = alloc_iommu(dmaru); |
183 | if (ret) { | |
184 | kfree(dmaru); | |
185 | return ret; | |
186 | } | |
187 | dmar_register_drhd_unit(dmaru); | |
188 | return 0; | |
189 | } | |
190 | ||
f82851a8 | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
1886e8a9 SS |
192 | { |
193 | struct acpi_dmar_hardware_unit *drhd; | |
f82851a8 | 194 | int ret = 0; |
1886e8a9 SS |
195 | |
196 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | |
197 | ||
2e824f79 YZ |
198 | if (dmaru->include_all) |
199 | return 0; | |
200 | ||
201 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | |
1886e8a9 | 202 | ((void *)drhd) + drhd->header.length, |
10e5247f KA |
203 | &dmaru->devices_cnt, &dmaru->devices, |
204 | drhd->segment); | |
1c7d1bca | 205 | if (ret) { |
1886e8a9 | 206 | list_del(&dmaru->list); |
10e5247f | 207 | kfree(dmaru); |
1886e8a9 | 208 | } |
10e5247f KA |
209 | return ret; |
210 | } | |
211 | ||
aaa9d1dd SS |
212 | #ifdef CONFIG_DMAR |
213 | LIST_HEAD(dmar_rmrr_units); | |
214 | ||
215 | static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) | |
216 | { | |
217 | list_add(&rmrr->list, &dmar_rmrr_units); | |
218 | } | |
219 | ||
220 | ||
10e5247f KA |
221 | static int __init |
222 | dmar_parse_one_rmrr(struct acpi_dmar_header *header) | |
223 | { | |
224 | struct acpi_dmar_reserved_memory *rmrr; | |
225 | struct dmar_rmrr_unit *rmrru; | |
10e5247f KA |
226 | |
227 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | |
228 | if (!rmrru) | |
229 | return -ENOMEM; | |
230 | ||
1886e8a9 | 231 | rmrru->hdr = header; |
10e5247f KA |
232 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
233 | rmrru->base_address = rmrr->base_address; | |
234 | rmrru->end_address = rmrr->end_address; | |
1886e8a9 SS |
235 | |
236 | dmar_register_rmrr_unit(rmrru); | |
237 | return 0; | |
238 | } | |
239 | ||
240 | static int __init | |
241 | rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) | |
242 | { | |
243 | struct acpi_dmar_reserved_memory *rmrr; | |
244 | int ret; | |
245 | ||
246 | rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; | |
10e5247f | 247 | ret = dmar_parse_dev_scope((void *)(rmrr + 1), |
1886e8a9 | 248 | ((void *)rmrr) + rmrr->header.length, |
10e5247f KA |
249 | &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); |
250 | ||
1886e8a9 SS |
251 | if (ret || (rmrru->devices_cnt == 0)) { |
252 | list_del(&rmrru->list); | |
10e5247f | 253 | kfree(rmrru); |
1886e8a9 | 254 | } |
10e5247f KA |
255 | return ret; |
256 | } | |
aaa9d1dd | 257 | #endif |
10e5247f KA |
258 | |
259 | static void __init | |
260 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |
261 | { | |
262 | struct acpi_dmar_hardware_unit *drhd; | |
263 | struct acpi_dmar_reserved_memory *rmrr; | |
264 | ||
265 | switch (header->type) { | |
266 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
267 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
268 | printk (KERN_INFO PREFIX | |
269 | "DRHD (flags: 0x%08x)base: 0x%016Lx\n", | |
5b6985ce | 270 | drhd->flags, (unsigned long long)drhd->address); |
10e5247f KA |
271 | break; |
272 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
273 | rmrr = (struct acpi_dmar_reserved_memory *)header; | |
274 | ||
275 | printk (KERN_INFO PREFIX | |
276 | "RMRR base: 0x%016Lx end: 0x%016Lx\n", | |
5b6985ce FY |
277 | (unsigned long long)rmrr->base_address, |
278 | (unsigned long long)rmrr->end_address); | |
10e5247f KA |
279 | break; |
280 | } | |
281 | } | |
282 | ||
f6dd5c31 YL |
283 | /** |
284 | * dmar_table_detect - checks to see if the platform supports DMAR devices | |
285 | */ | |
286 | static int __init dmar_table_detect(void) | |
287 | { | |
288 | acpi_status status = AE_OK; | |
289 | ||
290 | /* if we could find DMAR table, then there are DMAR devices */ | |
291 | status = acpi_get_table(ACPI_SIG_DMAR, 0, | |
292 | (struct acpi_table_header **)&dmar_tbl); | |
293 | ||
294 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | |
295 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | |
296 | status = AE_NOT_FOUND; | |
297 | } | |
298 | ||
299 | return (ACPI_SUCCESS(status) ? 1 : 0); | |
300 | } | |
aaa9d1dd | 301 | |
10e5247f KA |
302 | /** |
303 | * parse_dmar_table - parses the DMA reporting table | |
304 | */ | |
305 | static int __init | |
306 | parse_dmar_table(void) | |
307 | { | |
308 | struct acpi_table_dmar *dmar; | |
309 | struct acpi_dmar_header *entry_header; | |
310 | int ret = 0; | |
311 | ||
f6dd5c31 YL |
312 | /* |
313 | * Do it again, earlier dmar_tbl mapping could be mapped with | |
314 | * fixed map. | |
315 | */ | |
316 | dmar_table_detect(); | |
317 | ||
10e5247f KA |
318 | dmar = (struct acpi_table_dmar *)dmar_tbl; |
319 | if (!dmar) | |
320 | return -ENODEV; | |
321 | ||
5b6985ce | 322 | if (dmar->width < PAGE_SHIFT - 1) { |
093f87d2 | 323 | printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); |
10e5247f KA |
324 | return -EINVAL; |
325 | } | |
326 | ||
327 | printk (KERN_INFO PREFIX "Host address width %d\n", | |
328 | dmar->width + 1); | |
329 | ||
330 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | |
331 | while (((unsigned long)entry_header) < | |
332 | (((unsigned long)dmar) + dmar_tbl->length)) { | |
333 | dmar_table_print_dmar_entry(entry_header); | |
334 | ||
335 | switch (entry_header->type) { | |
336 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | |
337 | ret = dmar_parse_one_drhd(entry_header); | |
338 | break; | |
339 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | |
aaa9d1dd | 340 | #ifdef CONFIG_DMAR |
10e5247f | 341 | ret = dmar_parse_one_rmrr(entry_header); |
aaa9d1dd | 342 | #endif |
10e5247f KA |
343 | break; |
344 | default: | |
345 | printk(KERN_WARNING PREFIX | |
346 | "Unknown DMAR structure type\n"); | |
347 | ret = 0; /* for forward compatibility */ | |
348 | break; | |
349 | } | |
350 | if (ret) | |
351 | break; | |
352 | ||
353 | entry_header = ((void *)entry_header + entry_header->length); | |
354 | } | |
355 | return ret; | |
356 | } | |
357 | ||
e61d98d8 SS |
358 | int dmar_pci_device_match(struct pci_dev *devices[], int cnt, |
359 | struct pci_dev *dev) | |
360 | { | |
361 | int index; | |
362 | ||
363 | while (dev) { | |
364 | for (index = 0; index < cnt; index++) | |
365 | if (dev == devices[index]) | |
366 | return 1; | |
367 | ||
368 | /* Check our parent */ | |
369 | dev = dev->bus->self; | |
370 | } | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
375 | struct dmar_drhd_unit * | |
376 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | |
377 | { | |
2e824f79 YZ |
378 | struct dmar_drhd_unit *dmaru = NULL; |
379 | struct acpi_dmar_hardware_unit *drhd; | |
380 | ||
381 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { | |
382 | drhd = container_of(dmaru->hdr, | |
383 | struct acpi_dmar_hardware_unit, | |
384 | header); | |
385 | ||
386 | if (dmaru->include_all && | |
387 | drhd->segment == pci_domain_nr(dev->bus)) | |
388 | return dmaru; | |
e61d98d8 | 389 | |
2e824f79 YZ |
390 | if (dmar_pci_device_match(dmaru->devices, |
391 | dmaru->devices_cnt, dev)) | |
392 | return dmaru; | |
e61d98d8 SS |
393 | } |
394 | ||
395 | return NULL; | |
396 | } | |
397 | ||
1886e8a9 SS |
398 | int __init dmar_dev_scope_init(void) |
399 | { | |
04e2ea67 | 400 | struct dmar_drhd_unit *drhd, *drhd_n; |
1886e8a9 SS |
401 | int ret = -ENODEV; |
402 | ||
04e2ea67 | 403 | list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { |
1886e8a9 SS |
404 | ret = dmar_parse_dev(drhd); |
405 | if (ret) | |
406 | return ret; | |
407 | } | |
408 | ||
aaa9d1dd SS |
409 | #ifdef CONFIG_DMAR |
410 | { | |
04e2ea67 SS |
411 | struct dmar_rmrr_unit *rmrr, *rmrr_n; |
412 | list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { | |
aaa9d1dd SS |
413 | ret = rmrr_parse_dev(rmrr); |
414 | if (ret) | |
415 | return ret; | |
416 | } | |
1886e8a9 | 417 | } |
aaa9d1dd | 418 | #endif |
1886e8a9 SS |
419 | |
420 | return ret; | |
421 | } | |
422 | ||
10e5247f KA |
423 | |
424 | int __init dmar_table_init(void) | |
425 | { | |
1886e8a9 | 426 | static int dmar_table_initialized; |
093f87d2 FY |
427 | int ret; |
428 | ||
1886e8a9 SS |
429 | if (dmar_table_initialized) |
430 | return 0; | |
431 | ||
432 | dmar_table_initialized = 1; | |
433 | ||
093f87d2 FY |
434 | ret = parse_dmar_table(); |
435 | if (ret) { | |
1886e8a9 SS |
436 | if (ret != -ENODEV) |
437 | printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); | |
093f87d2 FY |
438 | return ret; |
439 | } | |
440 | ||
10e5247f KA |
441 | if (list_empty(&dmar_drhd_units)) { |
442 | printk(KERN_INFO PREFIX "No DMAR devices found\n"); | |
443 | return -ENODEV; | |
444 | } | |
093f87d2 | 445 | |
aaa9d1dd | 446 | #ifdef CONFIG_DMAR |
2d6b5f85 | 447 | if (list_empty(&dmar_rmrr_units)) |
093f87d2 | 448 | printk(KERN_INFO PREFIX "No RMRR found\n"); |
aaa9d1dd | 449 | #endif |
093f87d2 | 450 | |
ad3ad3f6 SS |
451 | #ifdef CONFIG_INTR_REMAP |
452 | parse_ioapics_under_ir(); | |
453 | #endif | |
10e5247f KA |
454 | return 0; |
455 | } | |
456 | ||
2ae21010 SS |
457 | void __init detect_intel_iommu(void) |
458 | { | |
459 | int ret; | |
460 | ||
f6dd5c31 | 461 | ret = dmar_table_detect(); |
2ae21010 | 462 | |
2ae21010 | 463 | { |
cacd4213 | 464 | #ifdef CONFIG_INTR_REMAP |
1cb11583 SS |
465 | struct acpi_table_dmar *dmar; |
466 | /* | |
467 | * for now we will disable dma-remapping when interrupt | |
468 | * remapping is enabled. | |
469 | * When support for queued invalidation for IOTLB invalidation | |
470 | * is added, we will not need this any more. | |
471 | */ | |
472 | dmar = (struct acpi_table_dmar *) dmar_tbl; | |
cacd4213 | 473 | if (ret && cpu_has_x2apic && dmar->flags & 0x1) |
1cb11583 SS |
474 | printk(KERN_INFO |
475 | "Queued invalidation will be enabled to support " | |
476 | "x2apic and Intr-remapping.\n"); | |
cacd4213 | 477 | #endif |
cacd4213 | 478 | #ifdef CONFIG_DMAR |
2ae21010 SS |
479 | if (ret && !no_iommu && !iommu_detected && !swiotlb && |
480 | !dmar_disabled) | |
481 | iommu_detected = 1; | |
2ae21010 | 482 | #endif |
cacd4213 | 483 | } |
f6dd5c31 | 484 | dmar_tbl = NULL; |
2ae21010 SS |
485 | } |
486 | ||
487 | ||
1886e8a9 | 488 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
e61d98d8 | 489 | { |
c42d9f32 | 490 | struct intel_iommu *iommu; |
e61d98d8 SS |
491 | int map_size; |
492 | u32 ver; | |
c42d9f32 | 493 | static int iommu_allocated = 0; |
1b573683 | 494 | int agaw; |
c42d9f32 SS |
495 | |
496 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | |
497 | if (!iommu) | |
1886e8a9 | 498 | return -ENOMEM; |
c42d9f32 SS |
499 | |
500 | iommu->seq_id = iommu_allocated++; | |
e61d98d8 | 501 | |
5b6985ce | 502 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
e61d98d8 SS |
503 | if (!iommu->reg) { |
504 | printk(KERN_ERR "IOMMU: can't map the region\n"); | |
505 | goto error; | |
506 | } | |
507 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | |
508 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | |
509 | ||
1b573683 WH |
510 | agaw = iommu_calculate_agaw(iommu); |
511 | if (agaw < 0) { | |
512 | printk(KERN_ERR | |
513 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", | |
514 | iommu->seq_id); | |
515 | goto error; | |
516 | } | |
517 | iommu->agaw = agaw; | |
518 | ||
e61d98d8 SS |
519 | /* the registers might be more than one page */ |
520 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | |
521 | cap_max_fault_reg_offset(iommu->cap)); | |
5b6985ce FY |
522 | map_size = VTD_PAGE_ALIGN(map_size); |
523 | if (map_size > VTD_PAGE_SIZE) { | |
e61d98d8 SS |
524 | iounmap(iommu->reg); |
525 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | |
526 | if (!iommu->reg) { | |
527 | printk(KERN_ERR "IOMMU: can't map the region\n"); | |
528 | goto error; | |
529 | } | |
530 | } | |
531 | ||
532 | ver = readl(iommu->reg + DMAR_VER_REG); | |
533 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | |
5b6985ce FY |
534 | (unsigned long long)drhd->reg_base_addr, |
535 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | |
536 | (unsigned long long)iommu->cap, | |
537 | (unsigned long long)iommu->ecap); | |
e61d98d8 SS |
538 | |
539 | spin_lock_init(&iommu->register_lock); | |
540 | ||
541 | drhd->iommu = iommu; | |
1886e8a9 | 542 | return 0; |
e61d98d8 SS |
543 | error: |
544 | kfree(iommu); | |
1886e8a9 | 545 | return -1; |
e61d98d8 SS |
546 | } |
547 | ||
548 | void free_iommu(struct intel_iommu *iommu) | |
549 | { | |
550 | if (!iommu) | |
551 | return; | |
552 | ||
553 | #ifdef CONFIG_DMAR | |
554 | free_dmar_iommu(iommu); | |
555 | #endif | |
556 | ||
557 | if (iommu->reg) | |
558 | iounmap(iommu->reg); | |
559 | kfree(iommu); | |
560 | } | |
fe962e90 SS |
561 | |
562 | /* | |
563 | * Reclaim all the submitted descriptors which have completed its work. | |
564 | */ | |
565 | static inline void reclaim_free_desc(struct q_inval *qi) | |
566 | { | |
567 | while (qi->desc_status[qi->free_tail] == QI_DONE) { | |
568 | qi->desc_status[qi->free_tail] = QI_FREE; | |
569 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | |
570 | qi->free_cnt++; | |
571 | } | |
572 | } | |
573 | ||
574 | /* | |
575 | * Submit the queued invalidation descriptor to the remapping | |
576 | * hardware unit and wait for its completion. | |
577 | */ | |
578 | void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) | |
579 | { | |
580 | struct q_inval *qi = iommu->qi; | |
581 | struct qi_desc *hw, wait_desc; | |
582 | int wait_index, index; | |
583 | unsigned long flags; | |
584 | ||
585 | if (!qi) | |
586 | return; | |
587 | ||
588 | hw = qi->desc; | |
589 | ||
f05810c9 | 590 | spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 | 591 | while (qi->free_cnt < 3) { |
f05810c9 | 592 | spin_unlock_irqrestore(&qi->q_lock, flags); |
fe962e90 | 593 | cpu_relax(); |
f05810c9 | 594 | spin_lock_irqsave(&qi->q_lock, flags); |
fe962e90 SS |
595 | } |
596 | ||
597 | index = qi->free_head; | |
598 | wait_index = (index + 1) % QI_LENGTH; | |
599 | ||
600 | qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; | |
601 | ||
602 | hw[index] = *desc; | |
603 | ||
604 | wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | |
605 | wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); | |
606 | ||
607 | hw[wait_index] = wait_desc; | |
608 | ||
609 | __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); | |
610 | __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); | |
611 | ||
612 | qi->free_head = (qi->free_head + 2) % QI_LENGTH; | |
613 | qi->free_cnt -= 2; | |
614 | ||
f05810c9 | 615 | spin_lock(&iommu->register_lock); |
fe962e90 SS |
616 | /* |
617 | * update the HW tail register indicating the presence of | |
618 | * new descriptors. | |
619 | */ | |
620 | writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); | |
f05810c9 | 621 | spin_unlock(&iommu->register_lock); |
fe962e90 SS |
622 | |
623 | while (qi->desc_status[wait_index] != QI_DONE) { | |
f05810c9 SS |
624 | /* |
625 | * We will leave the interrupts disabled, to prevent interrupt | |
626 | * context to queue another cmd while a cmd is already submitted | |
627 | * and waiting for completion on this cpu. This is to avoid | |
628 | * a deadlock where the interrupt context can wait indefinitely | |
629 | * for free slots in the queue. | |
630 | */ | |
fe962e90 SS |
631 | spin_unlock(&qi->q_lock); |
632 | cpu_relax(); | |
633 | spin_lock(&qi->q_lock); | |
634 | } | |
635 | ||
636 | qi->desc_status[index] = QI_DONE; | |
637 | ||
638 | reclaim_free_desc(qi); | |
f05810c9 | 639 | spin_unlock_irqrestore(&qi->q_lock, flags); |
fe962e90 SS |
640 | } |
641 | ||
642 | /* | |
643 | * Flush the global interrupt entry cache. | |
644 | */ | |
645 | void qi_global_iec(struct intel_iommu *iommu) | |
646 | { | |
647 | struct qi_desc desc; | |
648 | ||
649 | desc.low = QI_IEC_TYPE; | |
650 | desc.high = 0; | |
651 | ||
652 | qi_submit_sync(&desc, iommu); | |
653 | } | |
654 | ||
3481f210 YS |
655 | int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, |
656 | u64 type, int non_present_entry_flush) | |
657 | { | |
658 | ||
659 | struct qi_desc desc; | |
660 | ||
661 | if (non_present_entry_flush) { | |
662 | if (!cap_caching_mode(iommu->cap)) | |
663 | return 1; | |
664 | else | |
665 | did = 0; | |
666 | } | |
667 | ||
668 | desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | |
669 | | QI_CC_GRAN(type) | QI_CC_TYPE; | |
670 | desc.high = 0; | |
671 | ||
672 | qi_submit_sync(&desc, iommu); | |
673 | ||
674 | return 0; | |
675 | ||
676 | } | |
677 | ||
678 | int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |
679 | unsigned int size_order, u64 type, | |
680 | int non_present_entry_flush) | |
681 | { | |
682 | u8 dw = 0, dr = 0; | |
683 | ||
684 | struct qi_desc desc; | |
685 | int ih = 0; | |
686 | ||
687 | if (non_present_entry_flush) { | |
688 | if (!cap_caching_mode(iommu->cap)) | |
689 | return 1; | |
690 | else | |
691 | did = 0; | |
692 | } | |
693 | ||
694 | if (cap_write_drain(iommu->cap)) | |
695 | dw = 1; | |
696 | ||
697 | if (cap_read_drain(iommu->cap)) | |
698 | dr = 1; | |
699 | ||
700 | desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) | |
701 | | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | |
702 | desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) | |
703 | | QI_IOTLB_AM(size_order); | |
704 | ||
705 | qi_submit_sync(&desc, iommu); | |
706 | ||
707 | return 0; | |
708 | ||
709 | } | |
710 | ||
fe962e90 SS |
711 | /* |
712 | * Enable Queued Invalidation interface. This is a must to support | |
713 | * interrupt-remapping. Also used by DMA-remapping, which replaces | |
714 | * register based IOTLB invalidation. | |
715 | */ | |
716 | int dmar_enable_qi(struct intel_iommu *iommu) | |
717 | { | |
718 | u32 cmd, sts; | |
719 | unsigned long flags; | |
720 | struct q_inval *qi; | |
721 | ||
722 | if (!ecap_qis(iommu->ecap)) | |
723 | return -ENOENT; | |
724 | ||
725 | /* | |
726 | * queued invalidation is already setup and enabled. | |
727 | */ | |
728 | if (iommu->qi) | |
729 | return 0; | |
730 | ||
731 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); | |
732 | if (!iommu->qi) | |
733 | return -ENOMEM; | |
734 | ||
735 | qi = iommu->qi; | |
736 | ||
737 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); | |
738 | if (!qi->desc) { | |
739 | kfree(qi); | |
740 | iommu->qi = 0; | |
741 | return -ENOMEM; | |
742 | } | |
743 | ||
744 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); | |
745 | if (!qi->desc_status) { | |
746 | free_page((unsigned long) qi->desc); | |
747 | kfree(qi); | |
748 | iommu->qi = 0; | |
749 | return -ENOMEM; | |
750 | } | |
751 | ||
752 | qi->free_head = qi->free_tail = 0; | |
753 | qi->free_cnt = QI_LENGTH; | |
754 | ||
755 | spin_lock_init(&qi->q_lock); | |
756 | ||
757 | spin_lock_irqsave(&iommu->register_lock, flags); | |
758 | /* write zero to the tail reg */ | |
759 | writel(0, iommu->reg + DMAR_IQT_REG); | |
760 | ||
761 | dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); | |
762 | ||
763 | cmd = iommu->gcmd | DMA_GCMD_QIE; | |
764 | iommu->gcmd |= DMA_GCMD_QIE; | |
765 | writel(cmd, iommu->reg + DMAR_GCMD_REG); | |
766 | ||
767 | /* Make sure hardware complete it */ | |
768 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | |
769 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
770 | ||
771 | return 0; | |
772 | } |