]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * File: msi.c | |
3 | * Purpose: PCI Message Signaled Interrupt (MSI) | |
4 | * | |
5 | * Copyright (C) 2003-2004 Intel | |
6 | * Copyright (C) Tom Long Nguyen ([email protected]) | |
7 | */ | |
8 | ||
9 | #include <linux/mm.h> | |
10 | #include <linux/irq.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/config.h> | |
14 | #include <linux/ioport.h> | |
15 | #include <linux/smp_lock.h> | |
16 | #include <linux/pci.h> | |
17 | #include <linux/proc_fs.h> | |
18 | ||
19 | #include <asm/errno.h> | |
20 | #include <asm/io.h> | |
21 | #include <asm/smp.h> | |
22 | ||
23 | #include "pci.h" | |
24 | #include "msi.h" | |
25 | ||
26 | static DEFINE_SPINLOCK(msi_lock); | |
27 | static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; | |
28 | static kmem_cache_t* msi_cachep; | |
29 | ||
30 | static int pci_msi_enable = 1; | |
70549ad9 GKH |
31 | static int last_alloc_vector; |
32 | static int nr_released_vectors; | |
1da177e4 | 33 | static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS; |
70549ad9 | 34 | static int nr_msix_devices; |
1da177e4 LT |
35 | |
36 | #ifndef CONFIG_X86_IO_APIC | |
37 | int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; | |
10083072 | 38 | u8 irq_vector[NR_IRQ_VECTORS]; |
1da177e4 LT |
39 | #endif |
40 | ||
fd58e55f MM |
41 | static struct msi_ops *msi_ops; |
42 | ||
43 | int | |
44 | msi_register(struct msi_ops *ops) | |
45 | { | |
46 | msi_ops = ops; | |
47 | return 0; | |
48 | } | |
49 | ||
1da177e4 LT |
50 | static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) |
51 | { | |
52 | memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); | |
53 | } | |
54 | ||
55 | static int msi_cache_init(void) | |
56 | { | |
57 | msi_cachep = kmem_cache_create("msi_cache", | |
58 | NR_IRQS * sizeof(struct msi_desc), | |
59 | 0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL); | |
60 | if (!msi_cachep) | |
61 | return -ENOMEM; | |
62 | ||
63 | return 0; | |
64 | } | |
65 | ||
66 | static void msi_set_mask_bit(unsigned int vector, int flag) | |
67 | { | |
68 | struct msi_desc *entry; | |
69 | ||
70 | entry = (struct msi_desc *)msi_desc[vector]; | |
71 | if (!entry || !entry->dev || !entry->mask_base) | |
72 | return; | |
73 | switch (entry->msi_attrib.type) { | |
74 | case PCI_CAP_ID_MSI: | |
75 | { | |
76 | int pos; | |
77 | u32 mask_bits; | |
78 | ||
79 | pos = (long)entry->mask_base; | |
80 | pci_read_config_dword(entry->dev, pos, &mask_bits); | |
81 | mask_bits &= ~(1); | |
82 | mask_bits |= flag; | |
83 | pci_write_config_dword(entry->dev, pos, mask_bits); | |
84 | break; | |
85 | } | |
86 | case PCI_CAP_ID_MSIX: | |
87 | { | |
88 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | |
89 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | |
90 | writel(flag, entry->mask_base + offset); | |
91 | break; | |
92 | } | |
93 | default: | |
94 | break; | |
95 | } | |
96 | } | |
97 | ||
98 | #ifdef CONFIG_SMP | |
99 | static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) | |
100 | { | |
101 | struct msi_desc *entry; | |
fd58e55f | 102 | u32 address_hi, address_lo; |
54d5d424 | 103 | unsigned int irq = vector; |
b4033c17 | 104 | unsigned int dest_cpu = first_cpu(cpu_mask); |
1da177e4 LT |
105 | |
106 | entry = (struct msi_desc *)msi_desc[vector]; | |
107 | if (!entry || !entry->dev) | |
108 | return; | |
109 | ||
110 | switch (entry->msi_attrib.type) { | |
111 | case PCI_CAP_ID_MSI: | |
112 | { | |
b64c05e7 | 113 | int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI); |
1da177e4 | 114 | |
b64c05e7 | 115 | if (!pos) |
1da177e4 LT |
116 | return; |
117 | ||
fd58e55f MM |
118 | pci_read_config_dword(entry->dev, msi_upper_address_reg(pos), |
119 | &address_hi); | |
1da177e4 | 120 | pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), |
fd58e55f MM |
121 | &address_lo); |
122 | ||
123 | msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); | |
124 | ||
125 | pci_write_config_dword(entry->dev, msi_upper_address_reg(pos), | |
126 | address_hi); | |
1da177e4 | 127 | pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), |
fd58e55f | 128 | address_lo); |
54d5d424 | 129 | set_native_irq_info(irq, cpu_mask); |
1da177e4 LT |
130 | break; |
131 | } | |
132 | case PCI_CAP_ID_MSIX: | |
133 | { | |
fd58e55f MM |
134 | int offset_hi = |
135 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | |
136 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET; | |
137 | int offset_lo = | |
138 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | |
139 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; | |
140 | ||
141 | address_hi = readl(entry->mask_base + offset_hi); | |
142 | address_lo = readl(entry->mask_base + offset_lo); | |
143 | ||
144 | msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); | |
145 | ||
146 | writel(address_hi, entry->mask_base + offset_hi); | |
147 | writel(address_lo, entry->mask_base + offset_lo); | |
54d5d424 | 148 | set_native_irq_info(irq, cpu_mask); |
1da177e4 LT |
149 | break; |
150 | } | |
151 | default: | |
152 | break; | |
153 | } | |
154 | } | |
8169b5d2 GG |
155 | #else |
156 | #define set_msi_affinity NULL | |
1da177e4 LT |
157 | #endif /* CONFIG_SMP */ |
158 | ||
159 | static void mask_MSI_irq(unsigned int vector) | |
160 | { | |
161 | msi_set_mask_bit(vector, 1); | |
162 | } | |
163 | ||
164 | static void unmask_MSI_irq(unsigned int vector) | |
165 | { | |
166 | msi_set_mask_bit(vector, 0); | |
167 | } | |
168 | ||
169 | static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector) | |
170 | { | |
171 | struct msi_desc *entry; | |
172 | unsigned long flags; | |
173 | ||
174 | spin_lock_irqsave(&msi_lock, flags); | |
175 | entry = msi_desc[vector]; | |
176 | if (!entry || !entry->dev) { | |
177 | spin_unlock_irqrestore(&msi_lock, flags); | |
178 | return 0; | |
179 | } | |
180 | entry->msi_attrib.state = 1; /* Mark it active */ | |
181 | spin_unlock_irqrestore(&msi_lock, flags); | |
182 | ||
183 | return 0; /* never anything pending */ | |
184 | } | |
185 | ||
70549ad9 | 186 | static unsigned int startup_msi_irq_w_maskbit(unsigned int vector) |
1da177e4 | 187 | { |
70549ad9 GKH |
188 | startup_msi_irq_wo_maskbit(vector); |
189 | unmask_MSI_irq(vector); | |
190 | return 0; /* never anything pending */ | |
1da177e4 LT |
191 | } |
192 | ||
70549ad9 | 193 | static void shutdown_msi_irq(unsigned int vector) |
1da177e4 LT |
194 | { |
195 | struct msi_desc *entry; | |
196 | unsigned long flags; | |
197 | ||
198 | spin_lock_irqsave(&msi_lock, flags); | |
199 | entry = msi_desc[vector]; | |
70549ad9 GKH |
200 | if (entry && entry->dev) |
201 | entry->msi_attrib.state = 0; /* Mark it not active */ | |
1da177e4 | 202 | spin_unlock_irqrestore(&msi_lock, flags); |
1da177e4 LT |
203 | } |
204 | ||
70549ad9 GKH |
205 | static void end_msi_irq_wo_maskbit(unsigned int vector) |
206 | { | |
54d5d424 | 207 | move_native_irq(vector); |
70549ad9 GKH |
208 | ack_APIC_irq(); |
209 | } | |
1da177e4 LT |
210 | |
211 | static void end_msi_irq_w_maskbit(unsigned int vector) | |
212 | { | |
54d5d424 | 213 | move_native_irq(vector); |
1da177e4 LT |
214 | unmask_MSI_irq(vector); |
215 | ack_APIC_irq(); | |
216 | } | |
217 | ||
70549ad9 GKH |
218 | static void do_nothing(unsigned int vector) |
219 | { | |
220 | } | |
221 | ||
1da177e4 LT |
222 | /* |
223 | * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices, | |
224 | * which implement the MSI-X Capability Structure. | |
225 | */ | |
226 | static struct hw_interrupt_type msix_irq_type = { | |
227 | .typename = "PCI-MSI-X", | |
228 | .startup = startup_msi_irq_w_maskbit, | |
70549ad9 GKH |
229 | .shutdown = shutdown_msi_irq, |
230 | .enable = unmask_MSI_irq, | |
231 | .disable = mask_MSI_irq, | |
232 | .ack = mask_MSI_irq, | |
1da177e4 | 233 | .end = end_msi_irq_w_maskbit, |
8169b5d2 | 234 | .set_affinity = set_msi_affinity |
1da177e4 LT |
235 | }; |
236 | ||
237 | /* | |
238 | * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, | |
239 | * which implement the MSI Capability Structure with | |
240 | * Mask-and-Pending Bits. | |
241 | */ | |
242 | static struct hw_interrupt_type msi_irq_w_maskbit_type = { | |
243 | .typename = "PCI-MSI", | |
244 | .startup = startup_msi_irq_w_maskbit, | |
70549ad9 GKH |
245 | .shutdown = shutdown_msi_irq, |
246 | .enable = unmask_MSI_irq, | |
247 | .disable = mask_MSI_irq, | |
248 | .ack = mask_MSI_irq, | |
1da177e4 | 249 | .end = end_msi_irq_w_maskbit, |
8169b5d2 | 250 | .set_affinity = set_msi_affinity |
1da177e4 LT |
251 | }; |
252 | ||
253 | /* | |
254 | * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, | |
255 | * which implement the MSI Capability Structure without | |
256 | * Mask-and-Pending Bits. | |
257 | */ | |
258 | static struct hw_interrupt_type msi_irq_wo_maskbit_type = { | |
259 | .typename = "PCI-MSI", | |
260 | .startup = startup_msi_irq_wo_maskbit, | |
70549ad9 GKH |
261 | .shutdown = shutdown_msi_irq, |
262 | .enable = do_nothing, | |
263 | .disable = do_nothing, | |
264 | .ack = do_nothing, | |
1da177e4 | 265 | .end = end_msi_irq_wo_maskbit, |
8169b5d2 | 266 | .set_affinity = set_msi_affinity |
1da177e4 LT |
267 | }; |
268 | ||
1da177e4 LT |
269 | static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); |
270 | static int assign_msi_vector(void) | |
271 | { | |
272 | static int new_vector_avail = 1; | |
273 | int vector; | |
274 | unsigned long flags; | |
275 | ||
276 | /* | |
277 | * msi_lock is provided to ensure that successful allocation of MSI | |
278 | * vector is assigned unique among drivers. | |
279 | */ | |
280 | spin_lock_irqsave(&msi_lock, flags); | |
281 | ||
282 | if (!new_vector_avail) { | |
283 | int free_vector = 0; | |
284 | ||
285 | /* | |
286 | * vector_irq[] = -1 indicates that this specific vector is: | |
287 | * - assigned for MSI (since MSI have no associated IRQ) or | |
288 | * - assigned for legacy if less than 16, or | |
289 | * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping | |
290 | * vector_irq[] = 0 indicates that this vector, previously | |
291 | * assigned for MSI, is freed by hotplug removed operations. | |
292 | * This vector will be reused for any subsequent hotplug added | |
293 | * operations. | |
294 | * vector_irq[] > 0 indicates that this vector is assigned for | |
295 | * IOxAPIC IRQs. This vector and its value provides a 1-to-1 | |
296 | * vector-to-IOxAPIC IRQ mapping. | |
297 | */ | |
298 | for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { | |
299 | if (vector_irq[vector] != 0) | |
300 | continue; | |
301 | free_vector = vector; | |
302 | if (!msi_desc[vector]) | |
303 | break; | |
304 | else | |
305 | continue; | |
306 | } | |
307 | if (!free_vector) { | |
308 | spin_unlock_irqrestore(&msi_lock, flags); | |
309 | return -EBUSY; | |
310 | } | |
311 | vector_irq[free_vector] = -1; | |
312 | nr_released_vectors--; | |
313 | spin_unlock_irqrestore(&msi_lock, flags); | |
314 | if (msi_desc[free_vector] != NULL) { | |
315 | struct pci_dev *dev; | |
316 | int tail; | |
317 | ||
318 | /* free all linked vectors before re-assign */ | |
319 | do { | |
320 | spin_lock_irqsave(&msi_lock, flags); | |
321 | dev = msi_desc[free_vector]->dev; | |
322 | tail = msi_desc[free_vector]->link.tail; | |
323 | spin_unlock_irqrestore(&msi_lock, flags); | |
324 | msi_free_vector(dev, tail, 1); | |
325 | } while (free_vector != tail); | |
326 | } | |
327 | ||
328 | return free_vector; | |
329 | } | |
330 | vector = assign_irq_vector(AUTO_ASSIGN); | |
331 | last_alloc_vector = vector; | |
332 | if (vector == LAST_DEVICE_VECTOR) | |
333 | new_vector_avail = 0; | |
334 | ||
335 | spin_unlock_irqrestore(&msi_lock, flags); | |
336 | return vector; | |
337 | } | |
338 | ||
339 | static int get_new_vector(void) | |
340 | { | |
b64c05e7 | 341 | int vector = assign_msi_vector(); |
1da177e4 | 342 | |
b64c05e7 | 343 | if (vector > 0) |
1da177e4 LT |
344 | set_intr_gate(vector, interrupt[vector]); |
345 | ||
346 | return vector; | |
347 | } | |
348 | ||
349 | static int msi_init(void) | |
350 | { | |
351 | static int status = -ENOMEM; | |
352 | ||
353 | if (!status) | |
354 | return status; | |
355 | ||
356 | if (pci_msi_quirk) { | |
357 | pci_msi_enable = 0; | |
358 | printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n"); | |
359 | status = -EINVAL; | |
360 | return status; | |
361 | } | |
362 | ||
fd58e55f MM |
363 | status = msi_arch_init(); |
364 | if (status < 0) { | |
365 | pci_msi_enable = 0; | |
366 | printk(KERN_WARNING | |
367 | "PCI: MSI arch init failed. MSI disabled.\n"); | |
368 | return status; | |
369 | } | |
370 | ||
371 | if (! msi_ops) { | |
372 | printk(KERN_WARNING | |
373 | "PCI: MSI ops not registered. MSI disabled.\n"); | |
374 | status = -EINVAL; | |
375 | return status; | |
376 | } | |
377 | ||
378 | last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); | |
b64c05e7 GG |
379 | status = msi_cache_init(); |
380 | if (status < 0) { | |
1da177e4 LT |
381 | pci_msi_enable = 0; |
382 | printk(KERN_WARNING "PCI: MSI cache init failed\n"); | |
383 | return status; | |
384 | } | |
fd58e55f | 385 | |
10083072 MM |
386 | #ifndef CONFIG_X86_IO_APIC |
387 | irq_vector[0] = FIRST_DEVICE_VECTOR; | |
388 | #endif | |
389 | ||
1da177e4 LT |
390 | if (last_alloc_vector < 0) { |
391 | pci_msi_enable = 0; | |
392 | printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); | |
393 | status = -EBUSY; | |
394 | return status; | |
395 | } | |
396 | vector_irq[last_alloc_vector] = 0; | |
397 | nr_released_vectors++; | |
398 | ||
399 | return status; | |
400 | } | |
401 | ||
402 | static int get_msi_vector(struct pci_dev *dev) | |
403 | { | |
404 | return get_new_vector(); | |
405 | } | |
406 | ||
407 | static struct msi_desc* alloc_msi_entry(void) | |
408 | { | |
409 | struct msi_desc *entry; | |
410 | ||
70549ad9 | 411 | entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL); |
1da177e4 LT |
412 | if (!entry) |
413 | return NULL; | |
414 | ||
415 | memset(entry, 0, sizeof(struct msi_desc)); | |
416 | entry->link.tail = entry->link.head = 0; /* single message */ | |
417 | entry->dev = NULL; | |
418 | ||
419 | return entry; | |
420 | } | |
421 | ||
422 | static void attach_msi_entry(struct msi_desc *entry, int vector) | |
423 | { | |
424 | unsigned long flags; | |
425 | ||
426 | spin_lock_irqsave(&msi_lock, flags); | |
427 | msi_desc[vector] = entry; | |
428 | spin_unlock_irqrestore(&msi_lock, flags); | |
429 | } | |
430 | ||
431 | static void irq_handler_init(int cap_id, int pos, int mask) | |
432 | { | |
f6bc2666 IM |
433 | unsigned long flags; |
434 | ||
435 | spin_lock_irqsave(&irq_desc[pos].lock, flags); | |
1da177e4 LT |
436 | if (cap_id == PCI_CAP_ID_MSIX) |
437 | irq_desc[pos].handler = &msix_irq_type; | |
438 | else { | |
439 | if (!mask) | |
440 | irq_desc[pos].handler = &msi_irq_wo_maskbit_type; | |
441 | else | |
442 | irq_desc[pos].handler = &msi_irq_w_maskbit_type; | |
443 | } | |
f6bc2666 | 444 | spin_unlock_irqrestore(&irq_desc[pos].lock, flags); |
1da177e4 LT |
445 | } |
446 | ||
447 | static void enable_msi_mode(struct pci_dev *dev, int pos, int type) | |
448 | { | |
449 | u16 control; | |
450 | ||
451 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
452 | if (type == PCI_CAP_ID_MSI) { | |
453 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | |
454 | msi_enable(control, 1); | |
455 | pci_write_config_word(dev, msi_control_reg(pos), control); | |
456 | } else { | |
457 | msix_enable(control); | |
458 | pci_write_config_word(dev, msi_control_reg(pos), control); | |
459 | } | |
460 | if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { | |
461 | /* PCI Express Endpoint device detected */ | |
a04ce0ff | 462 | pci_intx(dev, 0); /* disable intx */ |
1da177e4 LT |
463 | } |
464 | } | |
465 | ||
4602b88d | 466 | void disable_msi_mode(struct pci_dev *dev, int pos, int type) |
1da177e4 LT |
467 | { |
468 | u16 control; | |
469 | ||
470 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
471 | if (type == PCI_CAP_ID_MSI) { | |
472 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | |
473 | msi_disable(control); | |
474 | pci_write_config_word(dev, msi_control_reg(pos), control); | |
475 | } else { | |
476 | msix_disable(control); | |
477 | pci_write_config_word(dev, msi_control_reg(pos), control); | |
478 | } | |
479 | if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { | |
480 | /* PCI Express Endpoint device detected */ | |
a04ce0ff | 481 | pci_intx(dev, 1); /* enable intx */ |
1da177e4 LT |
482 | } |
483 | } | |
484 | ||
485 | static int msi_lookup_vector(struct pci_dev *dev, int type) | |
486 | { | |
487 | int vector; | |
488 | unsigned long flags; | |
489 | ||
490 | spin_lock_irqsave(&msi_lock, flags); | |
491 | for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { | |
492 | if (!msi_desc[vector] || msi_desc[vector]->dev != dev || | |
493 | msi_desc[vector]->msi_attrib.type != type || | |
494 | msi_desc[vector]->msi_attrib.default_vector != dev->irq) | |
495 | continue; | |
496 | spin_unlock_irqrestore(&msi_lock, flags); | |
497 | /* This pre-assigned MSI vector for this device | |
498 | already exits. Override dev->irq with this vector */ | |
499 | dev->irq = vector; | |
500 | return 0; | |
501 | } | |
502 | spin_unlock_irqrestore(&msi_lock, flags); | |
503 | ||
504 | return -EACCES; | |
505 | } | |
506 | ||
507 | void pci_scan_msi_device(struct pci_dev *dev) | |
508 | { | |
509 | if (!dev) | |
510 | return; | |
511 | ||
512 | if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) | |
513 | nr_msix_devices++; | |
514 | else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0) | |
515 | nr_reserved_vectors++; | |
516 | } | |
517 | ||
41017f0c SL |
518 | #ifdef CONFIG_PM |
519 | int pci_save_msi_state(struct pci_dev *dev) | |
520 | { | |
521 | int pos, i = 0; | |
522 | u16 control; | |
523 | struct pci_cap_saved_state *save_state; | |
524 | u32 *cap; | |
525 | ||
526 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | |
527 | if (pos <= 0 || dev->no_msi) | |
528 | return 0; | |
529 | ||
530 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
531 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | |
532 | return 0; | |
533 | ||
534 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, | |
535 | GFP_KERNEL); | |
536 | if (!save_state) { | |
537 | printk(KERN_ERR "Out of memory in pci_save_msi_state\n"); | |
538 | return -ENOMEM; | |
539 | } | |
540 | cap = &save_state->data[0]; | |
541 | ||
542 | pci_read_config_dword(dev, pos, &cap[i++]); | |
543 | control = cap[0] >> 16; | |
544 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]); | |
545 | if (control & PCI_MSI_FLAGS_64BIT) { | |
546 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]); | |
547 | pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]); | |
548 | } else | |
549 | pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); | |
550 | if (control & PCI_MSI_FLAGS_MASKBIT) | |
551 | pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); | |
552 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | |
553 | save_state->cap_nr = PCI_CAP_ID_MSI; | |
554 | pci_add_saved_cap(dev, save_state); | |
555 | return 0; | |
556 | } | |
557 | ||
558 | void pci_restore_msi_state(struct pci_dev *dev) | |
559 | { | |
560 | int i = 0, pos; | |
561 | u16 control; | |
562 | struct pci_cap_saved_state *save_state; | |
563 | u32 *cap; | |
564 | ||
565 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); | |
566 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | |
567 | if (!save_state || pos <= 0) | |
568 | return; | |
569 | cap = &save_state->data[0]; | |
570 | ||
571 | control = cap[i++] >> 16; | |
572 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); | |
573 | if (control & PCI_MSI_FLAGS_64BIT) { | |
574 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); | |
575 | pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]); | |
576 | } else | |
577 | pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]); | |
578 | if (control & PCI_MSI_FLAGS_MASKBIT) | |
579 | pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); | |
580 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | |
581 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | |
582 | pci_remove_saved_cap(save_state); | |
583 | kfree(save_state); | |
584 | } | |
585 | ||
586 | int pci_save_msix_state(struct pci_dev *dev) | |
587 | { | |
588 | int pos; | |
fd58e55f MM |
589 | int temp; |
590 | int vector, head, tail = 0; | |
41017f0c SL |
591 | u16 control; |
592 | struct pci_cap_saved_state *save_state; | |
593 | ||
594 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
595 | if (pos <= 0 || dev->no_msi) | |
596 | return 0; | |
597 | ||
fd58e55f | 598 | /* save the capability */ |
41017f0c SL |
599 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
600 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | |
601 | return 0; | |
602 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), | |
603 | GFP_KERNEL); | |
604 | if (!save_state) { | |
605 | printk(KERN_ERR "Out of memory in pci_save_msix_state\n"); | |
606 | return -ENOMEM; | |
607 | } | |
608 | *((u16 *)&save_state->data[0]) = control; | |
609 | ||
fd58e55f MM |
610 | /* save the table */ |
611 | temp = dev->irq; | |
612 | if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | |
613 | kfree(save_state); | |
614 | return -EINVAL; | |
615 | } | |
616 | ||
617 | vector = head = dev->irq; | |
618 | while (head != tail) { | |
619 | int j; | |
620 | void __iomem *base; | |
621 | struct msi_desc *entry; | |
622 | ||
623 | entry = msi_desc[vector]; | |
624 | base = entry->mask_base; | |
625 | j = entry->msi_attrib.entry_nr; | |
626 | ||
627 | entry->address_lo_save = | |
628 | readl(base + j * PCI_MSIX_ENTRY_SIZE + | |
629 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | |
630 | entry->address_hi_save = | |
631 | readl(base + j * PCI_MSIX_ENTRY_SIZE + | |
632 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | |
633 | entry->data_save = | |
634 | readl(base + j * PCI_MSIX_ENTRY_SIZE + | |
635 | PCI_MSIX_ENTRY_DATA_OFFSET); | |
636 | ||
637 | tail = msi_desc[vector]->link.tail; | |
638 | vector = tail; | |
639 | } | |
640 | dev->irq = temp; | |
641 | ||
41017f0c SL |
642 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); |
643 | save_state->cap_nr = PCI_CAP_ID_MSIX; | |
644 | pci_add_saved_cap(dev, save_state); | |
645 | return 0; | |
646 | } | |
647 | ||
648 | void pci_restore_msix_state(struct pci_dev *dev) | |
649 | { | |
650 | u16 save; | |
651 | int pos; | |
652 | int vector, head, tail = 0; | |
653 | void __iomem *base; | |
654 | int j; | |
41017f0c SL |
655 | struct msi_desc *entry; |
656 | int temp; | |
657 | struct pci_cap_saved_state *save_state; | |
658 | ||
659 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX); | |
660 | if (!save_state) | |
661 | return; | |
662 | save = *((u16 *)&save_state->data[0]); | |
663 | pci_remove_saved_cap(save_state); | |
664 | kfree(save_state); | |
665 | ||
666 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
667 | if (pos <= 0) | |
668 | return; | |
669 | ||
670 | /* route the table */ | |
671 | temp = dev->irq; | |
672 | if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) | |
673 | return; | |
674 | vector = head = dev->irq; | |
675 | while (head != tail) { | |
676 | entry = msi_desc[vector]; | |
677 | base = entry->mask_base; | |
678 | j = entry->msi_attrib.entry_nr; | |
679 | ||
fd58e55f | 680 | writel(entry->address_lo_save, |
41017f0c SL |
681 | base + j * PCI_MSIX_ENTRY_SIZE + |
682 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | |
fd58e55f | 683 | writel(entry->address_hi_save, |
41017f0c SL |
684 | base + j * PCI_MSIX_ENTRY_SIZE + |
685 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | |
fd58e55f | 686 | writel(entry->data_save, |
41017f0c SL |
687 | base + j * PCI_MSIX_ENTRY_SIZE + |
688 | PCI_MSIX_ENTRY_DATA_OFFSET); | |
689 | ||
690 | tail = msi_desc[vector]->link.tail; | |
691 | vector = tail; | |
692 | } | |
693 | dev->irq = temp; | |
694 | ||
695 | pci_write_config_word(dev, msi_control_reg(pos), save); | |
696 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | |
697 | } | |
698 | #endif | |
699 | ||
fd58e55f | 700 | static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry) |
41017f0c | 701 | { |
fd58e55f MM |
702 | int status; |
703 | u32 address_hi; | |
704 | u32 address_lo; | |
705 | u32 data; | |
41017f0c SL |
706 | int pos, vector = dev->irq; |
707 | u16 control; | |
708 | ||
709 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | |
710 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
fd58e55f | 711 | |
41017f0c | 712 | /* Configure MSI capability structure */ |
fd58e55f MM |
713 | status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data); |
714 | if (status < 0) | |
715 | return status; | |
716 | ||
717 | pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo); | |
41017f0c SL |
718 | if (is_64bit_address(control)) { |
719 | pci_write_config_dword(dev, | |
fd58e55f | 720 | msi_upper_address_reg(pos), address_hi); |
41017f0c | 721 | pci_write_config_word(dev, |
fd58e55f | 722 | msi_data_reg(pos, 1), data); |
41017f0c SL |
723 | } else |
724 | pci_write_config_word(dev, | |
fd58e55f | 725 | msi_data_reg(pos, 0), data); |
41017f0c SL |
726 | if (entry->msi_attrib.maskbit) { |
727 | unsigned int maskbits, temp; | |
728 | /* All MSIs are unmasked by default, Mask them all */ | |
729 | pci_read_config_dword(dev, | |
730 | msi_mask_bits_reg(pos, is_64bit_address(control)), | |
731 | &maskbits); | |
732 | temp = (1 << multi_msi_capable(control)); | |
733 | temp = ((temp - 1) & ~temp); | |
734 | maskbits |= temp; | |
735 | pci_write_config_dword(dev, | |
736 | msi_mask_bits_reg(pos, is_64bit_address(control)), | |
737 | maskbits); | |
738 | } | |
fd58e55f MM |
739 | |
740 | return 0; | |
41017f0c SL |
741 | } |
742 | ||
1da177e4 LT |
743 | /** |
744 | * msi_capability_init - configure device's MSI capability structure | |
745 | * @dev: pointer to the pci_dev data structure of MSI device function | |
746 | * | |
eaae4b3a | 747 | * Setup the MSI capability structure of device function with a single |
1da177e4 LT |
748 | * MSI vector, regardless of device function is capable of handling |
749 | * multiple messages. A return of zero indicates the successful setup | |
750 | * of an entry zero with the new MSI vector or non-zero for otherwise. | |
751 | **/ | |
752 | static int msi_capability_init(struct pci_dev *dev) | |
753 | { | |
fd58e55f | 754 | int status; |
1da177e4 | 755 | struct msi_desc *entry; |
1da177e4 LT |
756 | int pos, vector; |
757 | u16 control; | |
758 | ||
759 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | |
760 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
761 | /* MSI Entry Initialization */ | |
b64c05e7 GG |
762 | entry = alloc_msi_entry(); |
763 | if (!entry) | |
1da177e4 LT |
764 | return -ENOMEM; |
765 | ||
b64c05e7 GG |
766 | vector = get_msi_vector(dev); |
767 | if (vector < 0) { | |
1da177e4 LT |
768 | kmem_cache_free(msi_cachep, entry); |
769 | return -EBUSY; | |
770 | } | |
771 | entry->link.head = vector; | |
772 | entry->link.tail = vector; | |
773 | entry->msi_attrib.type = PCI_CAP_ID_MSI; | |
774 | entry->msi_attrib.state = 0; /* Mark it not active */ | |
775 | entry->msi_attrib.entry_nr = 0; | |
776 | entry->msi_attrib.maskbit = is_mask_bit_support(control); | |
777 | entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */ | |
778 | dev->irq = vector; | |
779 | entry->dev = dev; | |
780 | if (is_mask_bit_support(control)) { | |
781 | entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, | |
782 | is_64bit_address(control)); | |
783 | } | |
784 | /* Replace with MSI handler */ | |
785 | irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); | |
786 | /* Configure MSI capability structure */ | |
fd58e55f MM |
787 | status = msi_register_init(dev, entry); |
788 | if (status != 0) { | |
789 | dev->irq = entry->msi_attrib.default_vector; | |
790 | kmem_cache_free(msi_cachep, entry); | |
791 | return status; | |
792 | } | |
41017f0c | 793 | |
1da177e4 LT |
794 | attach_msi_entry(entry, vector); |
795 | /* Set MSI enabled bits */ | |
796 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | |
797 | ||
798 | return 0; | |
799 | } | |
800 | ||
801 | /** | |
802 | * msix_capability_init - configure device's MSI-X capability | |
803 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
8f7020d3 RD |
804 | * @entries: pointer to an array of struct msix_entry entries |
805 | * @nvec: number of @entries | |
1da177e4 | 806 | * |
eaae4b3a | 807 | * Setup the MSI-X capability structure of device function with a |
1da177e4 LT |
808 | * single MSI-X vector. A return of zero indicates the successful setup of |
809 | * requested MSI-X entries with allocated vectors or non-zero for otherwise. | |
810 | **/ | |
811 | static int msix_capability_init(struct pci_dev *dev, | |
812 | struct msix_entry *entries, int nvec) | |
813 | { | |
814 | struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; | |
fd58e55f MM |
815 | u32 address_hi; |
816 | u32 address_lo; | |
817 | u32 data; | |
818 | int status; | |
1da177e4 | 819 | int vector, pos, i, j, nr_entries, temp = 0; |
a0454b40 GG |
820 | unsigned long phys_addr; |
821 | u32 table_offset; | |
1da177e4 LT |
822 | u16 control; |
823 | u8 bir; | |
824 | void __iomem *base; | |
825 | ||
826 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
827 | /* Request & Map MSI-X table region */ | |
828 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
829 | nr_entries = multi_msix_capable(control); | |
a0454b40 GG |
830 | |
831 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); | |
1da177e4 | 832 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); |
a0454b40 GG |
833 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; |
834 | phys_addr = pci_resource_start (dev, bir) + table_offset; | |
1da177e4 LT |
835 | base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
836 | if (base == NULL) | |
837 | return -ENOMEM; | |
838 | ||
839 | /* MSI-X Table Initialization */ | |
840 | for (i = 0; i < nvec; i++) { | |
841 | entry = alloc_msi_entry(); | |
842 | if (!entry) | |
843 | break; | |
b64c05e7 | 844 | vector = get_msi_vector(dev); |
f01f4182 JJ |
845 | if (vector < 0) { |
846 | kmem_cache_free(msi_cachep, entry); | |
1da177e4 | 847 | break; |
f01f4182 | 848 | } |
1da177e4 LT |
849 | |
850 | j = entries[i].entry; | |
851 | entries[i].vector = vector; | |
852 | entry->msi_attrib.type = PCI_CAP_ID_MSIX; | |
853 | entry->msi_attrib.state = 0; /* Mark it not active */ | |
854 | entry->msi_attrib.entry_nr = j; | |
855 | entry->msi_attrib.maskbit = 1; | |
856 | entry->msi_attrib.default_vector = dev->irq; | |
857 | entry->dev = dev; | |
858 | entry->mask_base = base; | |
859 | if (!head) { | |
860 | entry->link.head = vector; | |
861 | entry->link.tail = vector; | |
862 | head = entry; | |
863 | } else { | |
864 | entry->link.head = temp; | |
865 | entry->link.tail = tail->link.tail; | |
866 | tail->link.tail = vector; | |
867 | head->link.head = vector; | |
868 | } | |
869 | temp = vector; | |
870 | tail = entry; | |
871 | /* Replace with MSI-X handler */ | |
872 | irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); | |
873 | /* Configure MSI-X capability structure */ | |
fd58e55f MM |
874 | status = msi_ops->setup(dev, vector, |
875 | &address_hi, | |
876 | &address_lo, | |
877 | &data); | |
878 | if (status < 0) | |
879 | break; | |
880 | ||
881 | writel(address_lo, | |
1da177e4 LT |
882 | base + j * PCI_MSIX_ENTRY_SIZE + |
883 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | |
fd58e55f | 884 | writel(address_hi, |
1da177e4 LT |
885 | base + j * PCI_MSIX_ENTRY_SIZE + |
886 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | |
fd58e55f | 887 | writel(data, |
1da177e4 LT |
888 | base + j * PCI_MSIX_ENTRY_SIZE + |
889 | PCI_MSIX_ENTRY_DATA_OFFSET); | |
890 | attach_msi_entry(entry, vector); | |
891 | } | |
892 | if (i != nvec) { | |
893 | i--; | |
894 | for (; i >= 0; i--) { | |
895 | vector = (entries + i)->vector; | |
896 | msi_free_vector(dev, vector, 0); | |
897 | (entries + i)->vector = 0; | |
898 | } | |
899 | return -EBUSY; | |
900 | } | |
901 | /* Set MSI-X enabled bits */ | |
902 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | |
903 | ||
904 | return 0; | |
905 | } | |
906 | ||
907 | /** | |
908 | * pci_enable_msi - configure device's MSI capability structure | |
909 | * @dev: pointer to the pci_dev data structure of MSI device function | |
910 | * | |
911 | * Setup the MSI capability structure of device function with | |
912 | * a single MSI vector upon its software driver call to request for | |
913 | * MSI mode enabled on its hardware device function. A return of zero | |
914 | * indicates the successful setup of an entry zero with the new MSI | |
915 | * vector or non-zero for otherwise. | |
916 | **/ | |
917 | int pci_enable_msi(struct pci_dev* dev) | |
918 | { | |
919 | int pos, temp, status = -EINVAL; | |
920 | u16 control; | |
921 | ||
922 | if (!pci_msi_enable || !dev) | |
923 | return status; | |
924 | ||
4602b88d KA |
925 | if (dev->no_msi) |
926 | return status; | |
927 | ||
6e325a62 MT |
928 | if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) |
929 | return -EINVAL; | |
930 | ||
1da177e4 LT |
931 | temp = dev->irq; |
932 | ||
b64c05e7 GG |
933 | status = msi_init(); |
934 | if (status < 0) | |
1da177e4 LT |
935 | return status; |
936 | ||
b64c05e7 GG |
937 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
938 | if (!pos) | |
1da177e4 LT |
939 | return -EINVAL; |
940 | ||
941 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
942 | if (control & PCI_MSI_FLAGS_ENABLE) | |
943 | return 0; /* Already in MSI mode */ | |
944 | ||
945 | if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { | |
946 | /* Lookup Sucess */ | |
947 | unsigned long flags; | |
948 | ||
949 | spin_lock_irqsave(&msi_lock, flags); | |
950 | if (!vector_irq[dev->irq]) { | |
951 | msi_desc[dev->irq]->msi_attrib.state = 0; | |
952 | vector_irq[dev->irq] = -1; | |
953 | nr_released_vectors--; | |
954 | spin_unlock_irqrestore(&msi_lock, flags); | |
fd58e55f MM |
955 | status = msi_register_init(dev, msi_desc[dev->irq]); |
956 | if (status == 0) | |
957 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | |
958 | return status; | |
1da177e4 LT |
959 | } |
960 | spin_unlock_irqrestore(&msi_lock, flags); | |
961 | dev->irq = temp; | |
962 | } | |
963 | /* Check whether driver already requested for MSI-X vectors */ | |
b64c05e7 GG |
964 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
965 | if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | |
1da177e4 LT |
966 | printk(KERN_INFO "PCI: %s: Can't enable MSI. " |
967 | "Device already has MSI-X vectors assigned\n", | |
968 | pci_name(dev)); | |
969 | dev->irq = temp; | |
970 | return -EINVAL; | |
971 | } | |
972 | status = msi_capability_init(dev); | |
973 | if (!status) { | |
974 | if (!pos) | |
975 | nr_reserved_vectors--; /* Only MSI capable */ | |
976 | else if (nr_msix_devices > 0) | |
977 | nr_msix_devices--; /* Both MSI and MSI-X capable, | |
978 | but choose enabling MSI */ | |
979 | } | |
980 | ||
981 | return status; | |
982 | } | |
983 | ||
984 | void pci_disable_msi(struct pci_dev* dev) | |
985 | { | |
986 | struct msi_desc *entry; | |
987 | int pos, default_vector; | |
988 | u16 control; | |
989 | unsigned long flags; | |
990 | ||
309e57df MW |
991 | if (!pci_msi_enable) |
992 | return; | |
b64c05e7 GG |
993 | if (!dev) |
994 | return; | |
309e57df | 995 | |
b64c05e7 GG |
996 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
997 | if (!pos) | |
1da177e4 LT |
998 | return; |
999 | ||
1000 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
1001 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | |
1002 | return; | |
1003 | ||
1004 | spin_lock_irqsave(&msi_lock, flags); | |
1005 | entry = msi_desc[dev->irq]; | |
1006 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { | |
1007 | spin_unlock_irqrestore(&msi_lock, flags); | |
1008 | return; | |
1009 | } | |
1010 | if (entry->msi_attrib.state) { | |
1011 | spin_unlock_irqrestore(&msi_lock, flags); | |
1012 | printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " | |
1013 | "free_irq() on MSI vector %d\n", | |
1014 | pci_name(dev), dev->irq); | |
1015 | BUG_ON(entry->msi_attrib.state > 0); | |
1016 | } else { | |
1017 | vector_irq[dev->irq] = 0; /* free it */ | |
1018 | nr_released_vectors++; | |
1019 | default_vector = entry->msi_attrib.default_vector; | |
1020 | spin_unlock_irqrestore(&msi_lock, flags); | |
1021 | /* Restore dev->irq to its default pin-assertion vector */ | |
1022 | dev->irq = default_vector; | |
1023 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | |
1024 | PCI_CAP_ID_MSI); | |
1025 | } | |
1026 | } | |
1027 | ||
1da177e4 LT |
1028 | static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) |
1029 | { | |
1030 | struct msi_desc *entry; | |
1031 | int head, entry_nr, type; | |
1032 | void __iomem *base; | |
1033 | unsigned long flags; | |
1034 | ||
fd58e55f MM |
1035 | msi_ops->teardown(vector); |
1036 | ||
1da177e4 LT |
1037 | spin_lock_irqsave(&msi_lock, flags); |
1038 | entry = msi_desc[vector]; | |
1039 | if (!entry || entry->dev != dev) { | |
1040 | spin_unlock_irqrestore(&msi_lock, flags); | |
1041 | return -EINVAL; | |
1042 | } | |
1043 | type = entry->msi_attrib.type; | |
1044 | entry_nr = entry->msi_attrib.entry_nr; | |
1045 | head = entry->link.head; | |
1046 | base = entry->mask_base; | |
1047 | msi_desc[entry->link.head]->link.tail = entry->link.tail; | |
1048 | msi_desc[entry->link.tail]->link.head = entry->link.head; | |
1049 | entry->dev = NULL; | |
1050 | if (!reassign) { | |
1051 | vector_irq[vector] = 0; | |
1052 | nr_released_vectors++; | |
1053 | } | |
1054 | msi_desc[vector] = NULL; | |
1055 | spin_unlock_irqrestore(&msi_lock, flags); | |
1056 | ||
1057 | kmem_cache_free(msi_cachep, entry); | |
1058 | ||
1059 | if (type == PCI_CAP_ID_MSIX) { | |
1060 | if (!reassign) | |
1061 | writel(1, base + | |
1062 | entry_nr * PCI_MSIX_ENTRY_SIZE + | |
1063 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); | |
1064 | ||
1065 | if (head == vector) { | |
1066 | /* | |
1067 | * Detect last MSI-X vector to be released. | |
1068 | * Release the MSI-X memory-mapped table. | |
1069 | */ | |
a0454b40 | 1070 | #if 0 |
1da177e4 | 1071 | int pos, nr_entries; |
a0454b40 GG |
1072 | unsigned long phys_addr; |
1073 | u32 table_offset; | |
1da177e4 LT |
1074 | u16 control; |
1075 | u8 bir; | |
1076 | ||
1077 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
1078 | pci_read_config_word(dev, msi_control_reg(pos), | |
1079 | &control); | |
1080 | nr_entries = multi_msix_capable(control); | |
1081 | pci_read_config_dword(dev, msix_table_offset_reg(pos), | |
1082 | &table_offset); | |
1083 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | |
a0454b40 GG |
1084 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; |
1085 | phys_addr = pci_resource_start(dev, bir) + table_offset; | |
1086 | /* | |
1087 | * FIXME! and what did you want to do with phys_addr? | |
1088 | */ | |
1089 | #endif | |
1da177e4 LT |
1090 | iounmap(base); |
1091 | } | |
1092 | } | |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | ||
1097 | static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec) | |
1098 | { | |
1099 | int vector = head, tail = 0; | |
1100 | int i, j = 0, nr_entries = 0; | |
1101 | void __iomem *base; | |
1102 | unsigned long flags; | |
1103 | ||
1104 | spin_lock_irqsave(&msi_lock, flags); | |
1105 | while (head != tail) { | |
1106 | nr_entries++; | |
1107 | tail = msi_desc[vector]->link.tail; | |
1108 | if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr) | |
1109 | j = vector; | |
1110 | vector = tail; | |
1111 | } | |
1112 | if (*nvec > nr_entries) { | |
1113 | spin_unlock_irqrestore(&msi_lock, flags); | |
1114 | *nvec = nr_entries; | |
1115 | return -EINVAL; | |
1116 | } | |
1117 | vector = ((j > 0) ? j : head); | |
1118 | for (i = 0; i < *nvec; i++) { | |
1119 | j = msi_desc[vector]->msi_attrib.entry_nr; | |
1120 | msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */ | |
1121 | vector_irq[vector] = -1; /* Mark it busy */ | |
1122 | nr_released_vectors--; | |
1123 | entries[i].vector = vector; | |
1124 | if (j != (entries + i)->entry) { | |
1125 | base = msi_desc[vector]->mask_base; | |
1126 | msi_desc[vector]->msi_attrib.entry_nr = | |
1127 | (entries + i)->entry; | |
1128 | writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + | |
1129 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base + | |
1130 | (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + | |
1131 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | |
1132 | writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + | |
1133 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base + | |
1134 | (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + | |
1135 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | |
1136 | writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE + | |
1137 | PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector, | |
1138 | base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE + | |
1139 | PCI_MSIX_ENTRY_DATA_OFFSET); | |
1140 | } | |
1141 | vector = msi_desc[vector]->link.tail; | |
1142 | } | |
1143 | spin_unlock_irqrestore(&msi_lock, flags); | |
1144 | ||
1145 | return 0; | |
1146 | } | |
1147 | ||
1148 | /** | |
1149 | * pci_enable_msix - configure device's MSI-X capability structure | |
1150 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
70549ad9 | 1151 | * @entries: pointer to an array of MSI-X entries |
1da177e4 LT |
1152 | * @nvec: number of MSI-X vectors requested for allocation by device driver |
1153 | * | |
1154 | * Setup the MSI-X capability structure of device function with the number | |
1155 | * of requested vectors upon its software driver call to request for | |
1156 | * MSI-X mode enabled on its hardware device function. A return of zero | |
1157 | * indicates the successful configuration of MSI-X capability structure | |
1158 | * with new allocated MSI-X vectors. A return of < 0 indicates a failure. | |
1159 | * Or a return of > 0 indicates that driver request is exceeding the number | |
1160 | * of vectors available. Driver should use the returned value to re-send | |
1161 | * its request. | |
1162 | **/ | |
1163 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |
1164 | { | |
1165 | int status, pos, nr_entries, free_vectors; | |
1166 | int i, j, temp; | |
1167 | u16 control; | |
1168 | unsigned long flags; | |
1169 | ||
1170 | if (!pci_msi_enable || !dev || !entries) | |
1171 | return -EINVAL; | |
1172 | ||
b64c05e7 GG |
1173 | status = msi_init(); |
1174 | if (status < 0) | |
1da177e4 LT |
1175 | return status; |
1176 | ||
b64c05e7 GG |
1177 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
1178 | if (!pos) | |
1da177e4 LT |
1179 | return -EINVAL; |
1180 | ||
1181 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
1182 | if (control & PCI_MSIX_FLAGS_ENABLE) | |
1183 | return -EINVAL; /* Already in MSI-X mode */ | |
1184 | ||
1185 | nr_entries = multi_msix_capable(control); | |
1186 | if (nvec > nr_entries) | |
1187 | return -EINVAL; | |
1188 | ||
1189 | /* Check for any invalid entries */ | |
1190 | for (i = 0; i < nvec; i++) { | |
1191 | if (entries[i].entry >= nr_entries) | |
1192 | return -EINVAL; /* invalid entry */ | |
1193 | for (j = i + 1; j < nvec; j++) { | |
1194 | if (entries[i].entry == entries[j].entry) | |
1195 | return -EINVAL; /* duplicate entry */ | |
1196 | } | |
1197 | } | |
1198 | temp = dev->irq; | |
1199 | if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | |
1200 | /* Lookup Sucess */ | |
1201 | nr_entries = nvec; | |
1202 | /* Reroute MSI-X table */ | |
1203 | if (reroute_msix_table(dev->irq, entries, &nr_entries)) { | |
1204 | /* #requested > #previous-assigned */ | |
1205 | dev->irq = temp; | |
1206 | return nr_entries; | |
1207 | } | |
1208 | dev->irq = temp; | |
1209 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | |
1210 | return 0; | |
1211 | } | |
1212 | /* Check whether driver already requested for MSI vector */ | |
1213 | if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && | |
1214 | !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { | |
1215 | printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " | |
1216 | "Device already has an MSI vector assigned\n", | |
1217 | pci_name(dev)); | |
1218 | dev->irq = temp; | |
1219 | return -EINVAL; | |
1220 | } | |
1221 | ||
1222 | spin_lock_irqsave(&msi_lock, flags); | |
1223 | /* | |
1224 | * msi_lock is provided to ensure that enough vectors resources are | |
1225 | * available before granting. | |
1226 | */ | |
1227 | free_vectors = pci_vector_resources(last_alloc_vector, | |
1228 | nr_released_vectors); | |
1229 | /* Ensure that each MSI/MSI-X device has one vector reserved by | |
1230 | default to avoid any MSI-X driver to take all available | |
1231 | resources */ | |
1232 | free_vectors -= nr_reserved_vectors; | |
1233 | /* Find the average of free vectors among MSI-X devices */ | |
1234 | if (nr_msix_devices > 0) | |
1235 | free_vectors /= nr_msix_devices; | |
1236 | spin_unlock_irqrestore(&msi_lock, flags); | |
1237 | ||
1238 | if (nvec > free_vectors) { | |
1239 | if (free_vectors > 0) | |
1240 | return free_vectors; | |
1241 | else | |
1242 | return -EBUSY; | |
1243 | } | |
1244 | ||
1245 | status = msix_capability_init(dev, entries, nvec); | |
1246 | if (!status && nr_msix_devices > 0) | |
1247 | nr_msix_devices--; | |
1248 | ||
1249 | return status; | |
1250 | } | |
1251 | ||
1252 | void pci_disable_msix(struct pci_dev* dev) | |
1253 | { | |
1254 | int pos, temp; | |
1255 | u16 control; | |
1256 | ||
309e57df MW |
1257 | if (!pci_msi_enable) |
1258 | return; | |
b64c05e7 GG |
1259 | if (!dev) |
1260 | return; | |
1261 | ||
1262 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
1263 | if (!pos) | |
1da177e4 LT |
1264 | return; |
1265 | ||
1266 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
1267 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | |
1268 | return; | |
1269 | ||
1270 | temp = dev->irq; | |
1271 | if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | |
1272 | int state, vector, head, tail = 0, warning = 0; | |
1273 | unsigned long flags; | |
1274 | ||
1275 | vector = head = dev->irq; | |
1276 | spin_lock_irqsave(&msi_lock, flags); | |
1277 | while (head != tail) { | |
1278 | state = msi_desc[vector]->msi_attrib.state; | |
1279 | if (state) | |
1280 | warning = 1; | |
1281 | else { | |
1282 | vector_irq[vector] = 0; /* free it */ | |
1283 | nr_released_vectors++; | |
1284 | } | |
1285 | tail = msi_desc[vector]->link.tail; | |
1286 | vector = tail; | |
1287 | } | |
1288 | spin_unlock_irqrestore(&msi_lock, flags); | |
1289 | if (warning) { | |
1290 | dev->irq = temp; | |
1291 | printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " | |
1292 | "free_irq() on all MSI-X vectors\n", | |
1293 | pci_name(dev)); | |
1294 | BUG_ON(warning > 0); | |
1295 | } else { | |
1296 | dev->irq = temp; | |
1297 | disable_msi_mode(dev, | |
1298 | pci_find_capability(dev, PCI_CAP_ID_MSIX), | |
1299 | PCI_CAP_ID_MSIX); | |
1300 | ||
1301 | } | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | /** | |
1306 | * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state | |
1307 | * @dev: pointer to the pci_dev data structure of MSI(X) device function | |
1308 | * | |
eaae4b3a | 1309 | * Being called during hotplug remove, from which the device function |
1da177e4 LT |
1310 | * is hot-removed. All previous assigned MSI/MSI-X vectors, if |
1311 | * allocated for this device function, are reclaimed to unused state, | |
1312 | * which may be used later on. | |
1313 | **/ | |
1314 | void msi_remove_pci_irq_vectors(struct pci_dev* dev) | |
1315 | { | |
1316 | int state, pos, temp; | |
1317 | unsigned long flags; | |
1318 | ||
1319 | if (!pci_msi_enable || !dev) | |
1320 | return; | |
1321 | ||
1322 | temp = dev->irq; /* Save IOAPIC IRQ */ | |
b64c05e7 GG |
1323 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
1324 | if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { | |
1da177e4 LT |
1325 | spin_lock_irqsave(&msi_lock, flags); |
1326 | state = msi_desc[dev->irq]->msi_attrib.state; | |
1327 | spin_unlock_irqrestore(&msi_lock, flags); | |
1328 | if (state) { | |
1329 | printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " | |
1330 | "called without free_irq() on MSI vector %d\n", | |
1331 | pci_name(dev), dev->irq); | |
1332 | BUG_ON(state > 0); | |
1333 | } else /* Release MSI vector assigned to this device */ | |
1334 | msi_free_vector(dev, dev->irq, 0); | |
1335 | dev->irq = temp; /* Restore IOAPIC IRQ */ | |
1336 | } | |
b64c05e7 GG |
1337 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
1338 | if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { | |
1da177e4 LT |
1339 | int vector, head, tail = 0, warning = 0; |
1340 | void __iomem *base = NULL; | |
1341 | ||
1342 | vector = head = dev->irq; | |
1343 | while (head != tail) { | |
1344 | spin_lock_irqsave(&msi_lock, flags); | |
1345 | state = msi_desc[vector]->msi_attrib.state; | |
1346 | tail = msi_desc[vector]->link.tail; | |
1347 | base = msi_desc[vector]->mask_base; | |
1348 | spin_unlock_irqrestore(&msi_lock, flags); | |
1349 | if (state) | |
1350 | warning = 1; | |
1351 | else if (vector != head) /* Release MSI-X vector */ | |
1352 | msi_free_vector(dev, vector, 0); | |
1353 | vector = tail; | |
1354 | } | |
1355 | msi_free_vector(dev, vector, 0); | |
1356 | if (warning) { | |
1357 | /* Force to release the MSI-X memory-mapped table */ | |
a0454b40 GG |
1358 | #if 0 |
1359 | unsigned long phys_addr; | |
1360 | u32 table_offset; | |
1da177e4 LT |
1361 | u16 control; |
1362 | u8 bir; | |
1363 | ||
1364 | pci_read_config_word(dev, msi_control_reg(pos), | |
1365 | &control); | |
1366 | pci_read_config_dword(dev, msix_table_offset_reg(pos), | |
1367 | &table_offset); | |
1368 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | |
a0454b40 GG |
1369 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; |
1370 | phys_addr = pci_resource_start(dev, bir) + table_offset; | |
1371 | /* | |
1372 | * FIXME! and what did you want to do with phys_addr? | |
1373 | */ | |
1374 | #endif | |
1da177e4 LT |
1375 | iounmap(base); |
1376 | printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " | |
1377 | "called without free_irq() on all MSI-X vectors\n", | |
1378 | pci_name(dev)); | |
1379 | BUG_ON(warning > 0); | |
1380 | } | |
1381 | dev->irq = temp; /* Restore IOAPIC IRQ */ | |
1382 | } | |
1383 | } | |
1384 | ||
309e57df MW |
1385 | void pci_no_msi(void) |
1386 | { | |
1387 | pci_msi_enable = 0; | |
1388 | } | |
1389 | ||
1da177e4 LT |
1390 | EXPORT_SYMBOL(pci_enable_msi); |
1391 | EXPORT_SYMBOL(pci_disable_msi); | |
1392 | EXPORT_SYMBOL(pci_enable_msix); | |
1393 | EXPORT_SYMBOL(pci_disable_msix); |