]>
Commit | Line | Data |
---|---|---|
5aeecaf4 | 1 | #include <linux/interrupt.h> |
ad3ad3f6 | 2 | #include <linux/dmar.h> |
2ae21010 SS |
3 | #include <linux/spinlock.h> |
4 | #include <linux/jiffies.h> | |
5 | #include <linux/pci.h> | |
b6fcb33a | 6 | #include <linux/irq.h> |
ad3ad3f6 | 7 | #include <asm/io_apic.h> |
17483a1f | 8 | #include <asm/smp.h> |
6d652ea1 | 9 | #include <asm/cpu.h> |
38717946 | 10 | #include <linux/intel-iommu.h> |
ad3ad3f6 | 11 | #include "intr_remapping.h" |
46f06b72 | 12 | #include <acpi/acpi.h> |
ad3ad3f6 SS |
13 | |
14 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |
15 | static int ir_ioapic_num; | |
2ae21010 SS |
16 | int intr_remapping_enabled; |
17 | ||
03ea8155 WH |
18 | static int disable_intremap; |
19 | static __init int setup_nointremap(char *str) | |
20 | { | |
21 | disable_intremap = 1; | |
22 | return 0; | |
23 | } | |
24 | early_param("nointremap", setup_nointremap); | |
25 | ||
5aeecaf4 | 26 | struct irq_2_iommu { |
b6fcb33a SS |
27 | struct intel_iommu *iommu; |
28 | u16 irte_index; | |
29 | u16 sub_handle; | |
30 | u8 irte_mask; | |
5aeecaf4 YL |
31 | }; |
32 | ||
d7e51e66 | 33 | #ifdef CONFIG_GENERIC_HARDIRQS |
85ac16d0 | 34 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) |
0b8f1efa YL |
35 | { |
36 | struct irq_2_iommu *iommu; | |
0b8f1efa YL |
37 | |
38 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | |
85ac16d0 | 39 | printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); |
0b8f1efa YL |
40 | |
41 | return iommu; | |
42 | } | |
e420dfb4 YL |
43 | |
44 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |
45 | { | |
0b8f1efa YL |
46 | struct irq_desc *desc; |
47 | ||
48 | desc = irq_to_desc(irq); | |
49 | ||
50 | if (WARN_ON_ONCE(!desc)) | |
51 | return NULL; | |
52 | ||
53 | return desc->irq_2_iommu; | |
54 | } | |
55 | ||
85ac16d0 | 56 | static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) |
0b8f1efa YL |
57 | { |
58 | struct irq_desc *desc; | |
59 | struct irq_2_iommu *irq_iommu; | |
60 | ||
61 | /* | |
62 | * alloc irq desc if not allocated already. | |
63 | */ | |
85ac16d0 | 64 | desc = irq_to_desc_alloc_node(irq, node); |
0b8f1efa YL |
65 | if (!desc) { |
66 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | |
67 | return NULL; | |
68 | } | |
69 | ||
70 | irq_iommu = desc->irq_2_iommu; | |
71 | ||
72 | if (!irq_iommu) | |
85ac16d0 | 73 | desc->irq_2_iommu = get_one_free_irq_2_iommu(node); |
0b8f1efa YL |
74 | |
75 | return desc->irq_2_iommu; | |
76 | } | |
77 | ||
78 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | |
79 | { | |
85ac16d0 | 80 | return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id)); |
e420dfb4 | 81 | } |
d6c88a50 | 82 | |
0b8f1efa YL |
83 | #else /* !CONFIG_SPARSE_IRQ */ |
84 | ||
85 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | |
86 | ||
87 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |
88 | { | |
89 | if (irq < nr_irqs) | |
90 | return &irq_2_iommuX[irq]; | |
91 | ||
92 | return NULL; | |
93 | } | |
e420dfb4 YL |
94 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
95 | { | |
96 | return irq_2_iommu(irq); | |
97 | } | |
0b8f1efa | 98 | #endif |
b6fcb33a SS |
99 | |
100 | static DEFINE_SPINLOCK(irq_2_ir_lock); | |
101 | ||
e420dfb4 | 102 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) |
b6fcb33a | 103 | { |
e420dfb4 YL |
104 | struct irq_2_iommu *irq_iommu; |
105 | ||
106 | irq_iommu = irq_2_iommu(irq); | |
b6fcb33a | 107 | |
e420dfb4 YL |
108 | if (!irq_iommu) |
109 | return NULL; | |
b6fcb33a | 110 | |
e420dfb4 YL |
111 | if (!irq_iommu->iommu) |
112 | return NULL; | |
b6fcb33a | 113 | |
e420dfb4 YL |
114 | return irq_iommu; |
115 | } | |
b6fcb33a | 116 | |
e420dfb4 YL |
117 | int irq_remapped(int irq) |
118 | { | |
119 | return valid_irq_2_iommu(irq) != NULL; | |
b6fcb33a SS |
120 | } |
121 | ||
122 | int get_irte(int irq, struct irte *entry) | |
123 | { | |
124 | int index; | |
e420dfb4 | 125 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 126 | unsigned long flags; |
b6fcb33a | 127 | |
e420dfb4 | 128 | if (!entry) |
b6fcb33a SS |
129 | return -1; |
130 | ||
4c5502b1 | 131 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
132 | irq_iommu = valid_irq_2_iommu(irq); |
133 | if (!irq_iommu) { | |
4c5502b1 | 134 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
135 | return -1; |
136 | } | |
137 | ||
e420dfb4 YL |
138 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
139 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 140 | |
4c5502b1 | 141 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
142 | return 0; |
143 | } | |
144 | ||
145 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |
146 | { | |
147 | struct ir_table *table = iommu->ir_table; | |
e420dfb4 | 148 | struct irq_2_iommu *irq_iommu; |
b6fcb33a SS |
149 | u16 index, start_index; |
150 | unsigned int mask = 0; | |
4c5502b1 | 151 | unsigned long flags; |
b6fcb33a SS |
152 | int i; |
153 | ||
154 | if (!count) | |
155 | return -1; | |
156 | ||
0b8f1efa | 157 | #ifndef CONFIG_SPARSE_IRQ |
e420dfb4 YL |
158 | /* protect irq_2_iommu_alloc later */ |
159 | if (irq >= nr_irqs) | |
160 | return -1; | |
0b8f1efa | 161 | #endif |
e420dfb4 | 162 | |
b6fcb33a SS |
163 | /* |
164 | * start the IRTE search from index 0. | |
165 | */ | |
166 | index = start_index = 0; | |
167 | ||
168 | if (count > 1) { | |
169 | count = __roundup_pow_of_two(count); | |
170 | mask = ilog2(count); | |
171 | } | |
172 | ||
173 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
174 | printk(KERN_ERR | |
175 | "Requested mask %x exceeds the max invalidation handle" | |
176 | " mask value %Lx\n", mask, | |
177 | ecap_max_handle_mask(iommu->ecap)); | |
178 | return -1; | |
179 | } | |
180 | ||
4c5502b1 | 181 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a SS |
182 | do { |
183 | for (i = index; i < index + count; i++) | |
184 | if (table->base[i].present) | |
185 | break; | |
186 | /* empty index found */ | |
187 | if (i == index + count) | |
188 | break; | |
189 | ||
190 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | |
191 | ||
192 | if (index == start_index) { | |
4c5502b1 | 193 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
194 | printk(KERN_ERR "can't allocate an IRTE\n"); |
195 | return -1; | |
196 | } | |
197 | } while (1); | |
198 | ||
199 | for (i = index; i < index + count; i++) | |
200 | table->base[i].present = 1; | |
201 | ||
e420dfb4 | 202 | irq_iommu = irq_2_iommu_alloc(irq); |
0b8f1efa | 203 | if (!irq_iommu) { |
4c5502b1 | 204 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
0b8f1efa YL |
205 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
206 | return -1; | |
207 | } | |
208 | ||
e420dfb4 YL |
209 | irq_iommu->iommu = iommu; |
210 | irq_iommu->irte_index = index; | |
211 | irq_iommu->sub_handle = 0; | |
212 | irq_iommu->irte_mask = mask; | |
b6fcb33a | 213 | |
4c5502b1 | 214 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
215 | |
216 | return index; | |
217 | } | |
218 | ||
704126ad | 219 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
220 | { |
221 | struct qi_desc desc; | |
222 | ||
223 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
224 | | QI_IEC_SELECTIVE; | |
225 | desc.high = 0; | |
226 | ||
704126ad | 227 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
228 | } |
229 | ||
230 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |
231 | { | |
232 | int index; | |
e420dfb4 | 233 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 234 | unsigned long flags; |
b6fcb33a | 235 | |
4c5502b1 | 236 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
237 | irq_iommu = valid_irq_2_iommu(irq); |
238 | if (!irq_iommu) { | |
4c5502b1 | 239 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
240 | return -1; |
241 | } | |
242 | ||
e420dfb4 YL |
243 | *sub_handle = irq_iommu->sub_handle; |
244 | index = irq_iommu->irte_index; | |
4c5502b1 | 245 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
246 | return index; |
247 | } | |
248 | ||
249 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |
250 | { | |
e420dfb4 | 251 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 252 | unsigned long flags; |
e420dfb4 | 253 | |
4c5502b1 | 254 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 255 | |
7ddfb650 | 256 | irq_iommu = irq_2_iommu_alloc(irq); |
b6fcb33a | 257 | |
0b8f1efa | 258 | if (!irq_iommu) { |
4c5502b1 | 259 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
0b8f1efa YL |
260 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
261 | return -1; | |
262 | } | |
263 | ||
e420dfb4 YL |
264 | irq_iommu->iommu = iommu; |
265 | irq_iommu->irte_index = index; | |
266 | irq_iommu->sub_handle = subhandle; | |
267 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 268 | |
4c5502b1 | 269 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
270 | |
271 | return 0; | |
272 | } | |
273 | ||
274 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | |
275 | { | |
e420dfb4 | 276 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 277 | unsigned long flags; |
e420dfb4 | 278 | |
4c5502b1 | 279 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
280 | irq_iommu = valid_irq_2_iommu(irq); |
281 | if (!irq_iommu) { | |
4c5502b1 | 282 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
283 | return -1; |
284 | } | |
285 | ||
e420dfb4 YL |
286 | irq_iommu->iommu = NULL; |
287 | irq_iommu->irte_index = 0; | |
288 | irq_iommu->sub_handle = 0; | |
289 | irq_2_iommu(irq)->irte_mask = 0; | |
b6fcb33a | 290 | |
4c5502b1 | 291 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
292 | |
293 | return 0; | |
294 | } | |
295 | ||
296 | int modify_irte(int irq, struct irte *irte_modified) | |
297 | { | |
704126ad | 298 | int rc; |
b6fcb33a SS |
299 | int index; |
300 | struct irte *irte; | |
301 | struct intel_iommu *iommu; | |
e420dfb4 | 302 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 303 | unsigned long flags; |
b6fcb33a | 304 | |
4c5502b1 | 305 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
306 | irq_iommu = valid_irq_2_iommu(irq); |
307 | if (!irq_iommu) { | |
4c5502b1 | 308 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
309 | return -1; |
310 | } | |
311 | ||
e420dfb4 | 312 | iommu = irq_iommu->iommu; |
b6fcb33a | 313 | |
e420dfb4 | 314 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
315 | irte = &iommu->ir_table->base[index]; |
316 | ||
c4658b4e WH |
317 | set_64bit((unsigned long *)&irte->low, irte_modified->low); |
318 | set_64bit((unsigned long *)&irte->high, irte_modified->high); | |
b6fcb33a SS |
319 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
320 | ||
704126ad | 321 | rc = qi_flush_iec(iommu, index, 0); |
4c5502b1 | 322 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
323 | |
324 | return rc; | |
b6fcb33a SS |
325 | } |
326 | ||
327 | int flush_irte(int irq) | |
328 | { | |
704126ad | 329 | int rc; |
b6fcb33a SS |
330 | int index; |
331 | struct intel_iommu *iommu; | |
e420dfb4 | 332 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 333 | unsigned long flags; |
b6fcb33a | 334 | |
4c5502b1 | 335 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
336 | irq_iommu = valid_irq_2_iommu(irq); |
337 | if (!irq_iommu) { | |
4c5502b1 | 338 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
339 | return -1; |
340 | } | |
341 | ||
e420dfb4 | 342 | iommu = irq_iommu->iommu; |
b6fcb33a | 343 | |
e420dfb4 | 344 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a | 345 | |
704126ad | 346 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
4c5502b1 | 347 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 348 | |
704126ad | 349 | return rc; |
b6fcb33a SS |
350 | } |
351 | ||
89027d35 SS |
352 | struct intel_iommu *map_ioapic_to_ir(int apic) |
353 | { | |
354 | int i; | |
355 | ||
356 | for (i = 0; i < MAX_IO_APICS; i++) | |
357 | if (ir_ioapic[i].id == apic) | |
358 | return ir_ioapic[i].iommu; | |
359 | return NULL; | |
360 | } | |
361 | ||
75c46fa6 SS |
362 | struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
363 | { | |
364 | struct dmar_drhd_unit *drhd; | |
365 | ||
366 | drhd = dmar_find_matched_drhd_unit(dev); | |
367 | if (!drhd) | |
368 | return NULL; | |
369 | ||
370 | return drhd->iommu; | |
371 | } | |
372 | ||
c4658b4e WH |
373 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
374 | { | |
375 | struct irte *start, *entry, *end; | |
376 | struct intel_iommu *iommu; | |
377 | int index; | |
378 | ||
379 | if (irq_iommu->sub_handle) | |
380 | return 0; | |
381 | ||
382 | iommu = irq_iommu->iommu; | |
383 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
384 | ||
385 | start = iommu->ir_table->base + index; | |
386 | end = start + (1 << irq_iommu->irte_mask); | |
387 | ||
388 | for (entry = start; entry < end; entry++) { | |
389 | set_64bit((unsigned long *)&entry->low, 0); | |
390 | set_64bit((unsigned long *)&entry->high, 0); | |
391 | } | |
392 | ||
393 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
394 | } | |
395 | ||
b6fcb33a SS |
396 | int free_irte(int irq) |
397 | { | |
704126ad | 398 | int rc = 0; |
e420dfb4 | 399 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 400 | unsigned long flags; |
b6fcb33a | 401 | |
4c5502b1 | 402 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
403 | irq_iommu = valid_irq_2_iommu(irq); |
404 | if (!irq_iommu) { | |
4c5502b1 | 405 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
406 | return -1; |
407 | } | |
408 | ||
c4658b4e | 409 | rc = clear_entries(irq_iommu); |
b6fcb33a | 410 | |
e420dfb4 YL |
411 | irq_iommu->iommu = NULL; |
412 | irq_iommu->irte_index = 0; | |
413 | irq_iommu->sub_handle = 0; | |
414 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 415 | |
4c5502b1 | 416 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 417 | |
704126ad | 418 | return rc; |
b6fcb33a SS |
419 | } |
420 | ||
2ae21010 SS |
421 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
422 | { | |
423 | u64 addr; | |
c416daa9 | 424 | u32 sts; |
2ae21010 SS |
425 | unsigned long flags; |
426 | ||
427 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
428 | ||
429 | spin_lock_irqsave(&iommu->register_lock, flags); | |
430 | ||
431 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
432 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
433 | ||
434 | /* Set interrupt-remapping table pointer */ | |
161fde08 | 435 | iommu->gcmd |= DMA_GCMD_SIRTP; |
c416daa9 | 436 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
437 | |
438 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
439 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
440 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
441 | ||
442 | /* | |
443 | * global invalidation of interrupt entry cache before enabling | |
444 | * interrupt-remapping. | |
445 | */ | |
446 | qi_global_iec(iommu); | |
447 | ||
448 | spin_lock_irqsave(&iommu->register_lock, flags); | |
449 | ||
450 | /* Enable interrupt-remapping */ | |
2ae21010 | 451 | iommu->gcmd |= DMA_GCMD_IRE; |
c416daa9 | 452 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
453 | |
454 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
455 | readl, (sts & DMA_GSTS_IRES), sts); | |
456 | ||
457 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
458 | } | |
459 | ||
460 | ||
461 | static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |
462 | { | |
463 | struct ir_table *ir_table; | |
464 | struct page *pages; | |
465 | ||
466 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | |
fa4b57cc | 467 | GFP_ATOMIC); |
2ae21010 SS |
468 | |
469 | if (!iommu->ir_table) | |
470 | return -ENOMEM; | |
471 | ||
fa4b57cc | 472 | pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); |
2ae21010 SS |
473 | |
474 | if (!pages) { | |
475 | printk(KERN_ERR "failed to allocate pages of order %d\n", | |
476 | INTR_REMAP_PAGE_ORDER); | |
477 | kfree(iommu->ir_table); | |
478 | return -ENOMEM; | |
479 | } | |
480 | ||
481 | ir_table->base = page_address(pages); | |
482 | ||
483 | iommu_set_intr_remapping(iommu, mode); | |
484 | return 0; | |
485 | } | |
486 | ||
eba67e5d SS |
487 | /* |
488 | * Disable Interrupt Remapping. | |
489 | */ | |
b24696bc | 490 | static void iommu_disable_intr_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
491 | { |
492 | unsigned long flags; | |
493 | u32 sts; | |
494 | ||
495 | if (!ecap_ir_support(iommu->ecap)) | |
496 | return; | |
497 | ||
b24696bc FY |
498 | /* |
499 | * global invalidation of interrupt entry cache before disabling | |
500 | * interrupt-remapping. | |
501 | */ | |
502 | qi_global_iec(iommu); | |
503 | ||
eba67e5d SS |
504 | spin_lock_irqsave(&iommu->register_lock, flags); |
505 | ||
506 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
507 | if (!(sts & DMA_GSTS_IRES)) | |
508 | goto end; | |
509 | ||
510 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
511 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
512 | ||
513 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
514 | readl, !(sts & DMA_GSTS_IRES), sts); | |
515 | ||
516 | end: | |
517 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
518 | } | |
519 | ||
93758238 WH |
520 | int __init intr_remapping_supported(void) |
521 | { | |
522 | struct dmar_drhd_unit *drhd; | |
523 | ||
03ea8155 WH |
524 | if (disable_intremap) |
525 | return 0; | |
526 | ||
93758238 WH |
527 | for_each_drhd_unit(drhd) { |
528 | struct intel_iommu *iommu = drhd->iommu; | |
529 | ||
530 | if (!ecap_ir_support(iommu->ecap)) | |
531 | return 0; | |
532 | } | |
533 | ||
534 | return 1; | |
535 | } | |
536 | ||
2ae21010 SS |
537 | int __init enable_intr_remapping(int eim) |
538 | { | |
539 | struct dmar_drhd_unit *drhd; | |
540 | int setup = 0; | |
541 | ||
1531a6a6 SS |
542 | for_each_drhd_unit(drhd) { |
543 | struct intel_iommu *iommu = drhd->iommu; | |
544 | ||
34aaaa94 HW |
545 | /* |
546 | * If the queued invalidation is already initialized, | |
547 | * shouldn't disable it. | |
548 | */ | |
549 | if (iommu->qi) | |
550 | continue; | |
551 | ||
1531a6a6 SS |
552 | /* |
553 | * Clear previous faults. | |
554 | */ | |
555 | dmar_fault(-1, iommu); | |
556 | ||
557 | /* | |
558 | * Disable intr remapping and queued invalidation, if already | |
559 | * enabled prior to OS handover. | |
560 | */ | |
b24696bc | 561 | iommu_disable_intr_remapping(iommu); |
1531a6a6 SS |
562 | |
563 | dmar_disable_qi(iommu); | |
564 | } | |
565 | ||
2ae21010 SS |
566 | /* |
567 | * check for the Interrupt-remapping support | |
568 | */ | |
569 | for_each_drhd_unit(drhd) { | |
570 | struct intel_iommu *iommu = drhd->iommu; | |
571 | ||
572 | if (!ecap_ir_support(iommu->ecap)) | |
573 | continue; | |
574 | ||
575 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
576 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | |
577 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
578 | return -1; | |
579 | } | |
580 | } | |
581 | ||
582 | /* | |
583 | * Enable queued invalidation for all the DRHD's. | |
584 | */ | |
585 | for_each_drhd_unit(drhd) { | |
586 | int ret; | |
587 | struct intel_iommu *iommu = drhd->iommu; | |
588 | ret = dmar_enable_qi(iommu); | |
589 | ||
590 | if (ret) { | |
591 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | |
592 | " invalidation, ecap %Lx, ret %d\n", | |
593 | drhd->reg_base_addr, iommu->ecap, ret); | |
594 | return -1; | |
595 | } | |
596 | } | |
597 | ||
598 | /* | |
599 | * Setup Interrupt-remapping for all the DRHD's now. | |
600 | */ | |
601 | for_each_drhd_unit(drhd) { | |
602 | struct intel_iommu *iommu = drhd->iommu; | |
603 | ||
604 | if (!ecap_ir_support(iommu->ecap)) | |
605 | continue; | |
606 | ||
607 | if (setup_intr_remapping(iommu, eim)) | |
608 | goto error; | |
609 | ||
610 | setup = 1; | |
611 | } | |
612 | ||
613 | if (!setup) | |
614 | goto error; | |
615 | ||
616 | intr_remapping_enabled = 1; | |
617 | ||
618 | return 0; | |
619 | ||
620 | error: | |
621 | /* | |
622 | * handle error condition gracefully here! | |
623 | */ | |
624 | return -1; | |
625 | } | |
ad3ad3f6 SS |
626 | |
627 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | |
628 | struct intel_iommu *iommu) | |
629 | { | |
630 | struct acpi_dmar_hardware_unit *drhd; | |
631 | struct acpi_dmar_device_scope *scope; | |
632 | void *start, *end; | |
633 | ||
634 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
635 | ||
636 | start = (void *)(drhd + 1); | |
637 | end = ((void *)drhd) + header->length; | |
638 | ||
639 | while (start < end) { | |
640 | scope = start; | |
641 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | |
642 | if (ir_ioapic_num == MAX_IO_APICS) { | |
643 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | |
644 | return -1; | |
645 | } | |
646 | ||
647 | printk(KERN_INFO "IOAPIC id %d under DRHD base" | |
648 | " 0x%Lx\n", scope->enumeration_id, | |
649 | drhd->address); | |
650 | ||
651 | ir_ioapic[ir_ioapic_num].iommu = iommu; | |
652 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | |
653 | ir_ioapic_num++; | |
654 | } | |
655 | start += scope->length; | |
656 | } | |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
661 | /* | |
662 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
663 | * hardware unit. | |
664 | */ | |
665 | int __init parse_ioapics_under_ir(void) | |
666 | { | |
667 | struct dmar_drhd_unit *drhd; | |
668 | int ir_supported = 0; | |
669 | ||
670 | for_each_drhd_unit(drhd) { | |
671 | struct intel_iommu *iommu = drhd->iommu; | |
672 | ||
673 | if (ecap_ir_support(iommu->ecap)) { | |
674 | if (ir_parse_ioapic_scope(drhd->hdr, iommu)) | |
675 | return -1; | |
676 | ||
677 | ir_supported = 1; | |
678 | } | |
679 | } | |
680 | ||
681 | if (ir_supported && ir_ioapic_num != nr_ioapics) { | |
682 | printk(KERN_WARNING | |
683 | "Not all IO-APIC's listed under remapping hardware\n"); | |
684 | return -1; | |
685 | } | |
686 | ||
687 | return ir_supported; | |
688 | } | |
b24696bc FY |
689 | |
690 | void disable_intr_remapping(void) | |
691 | { | |
692 | struct dmar_drhd_unit *drhd; | |
693 | struct intel_iommu *iommu = NULL; | |
694 | ||
695 | /* | |
696 | * Disable Interrupt-remapping for all the DRHD's now. | |
697 | */ | |
698 | for_each_iommu(iommu, drhd) { | |
699 | if (!ecap_ir_support(iommu->ecap)) | |
700 | continue; | |
701 | ||
702 | iommu_disable_intr_remapping(iommu); | |
703 | } | |
704 | } | |
705 | ||
706 | int reenable_intr_remapping(int eim) | |
707 | { | |
708 | struct dmar_drhd_unit *drhd; | |
709 | int setup = 0; | |
710 | struct intel_iommu *iommu = NULL; | |
711 | ||
712 | for_each_iommu(iommu, drhd) | |
713 | if (iommu->qi) | |
714 | dmar_reenable_qi(iommu); | |
715 | ||
716 | /* | |
717 | * Setup Interrupt-remapping for all the DRHD's now. | |
718 | */ | |
719 | for_each_iommu(iommu, drhd) { | |
720 | if (!ecap_ir_support(iommu->ecap)) | |
721 | continue; | |
722 | ||
723 | /* Set up interrupt remapping for iommu.*/ | |
724 | iommu_set_intr_remapping(iommu, eim); | |
725 | setup = 1; | |
726 | } | |
727 | ||
728 | if (!setup) | |
729 | goto error; | |
730 | ||
731 | return 0; | |
732 | ||
733 | error: | |
734 | /* | |
735 | * handle error condition gracefully here! | |
736 | */ | |
737 | return -1; | |
738 | } | |
739 |