]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * File: msi.c | |
3 | * Purpose: PCI Message Signaled Interrupt (MSI) | |
4 | * | |
5 | * Copyright (C) 2003-2004 Intel | |
6 | * Copyright (C) Tom Long Nguyen ([email protected]) | |
7 | */ | |
8 | ||
1ce03373 | 9 | #include <linux/err.h> |
1da177e4 LT |
10 | #include <linux/mm.h> |
11 | #include <linux/irq.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/init.h> | |
1da177e4 | 14 | #include <linux/ioport.h> |
1da177e4 LT |
15 | #include <linux/pci.h> |
16 | #include <linux/proc_fs.h> | |
3b7d1921 | 17 | #include <linux/msi.h> |
4fdadebc | 18 | #include <linux/smp.h> |
500559a9 HS |
19 | #include <linux/errno.h> |
20 | #include <linux/io.h> | |
5a0e3ad6 | 21 | #include <linux/slab.h> |
1da177e4 LT |
22 | |
23 | #include "pci.h" | |
24 | #include "msi.h" | |
25 | ||
1da177e4 | 26 | static int pci_msi_enable = 1; |
1da177e4 | 27 | |
6a9e7f20 AB |
28 | /* Arch hooks */ |
29 | ||
11df1f05 ME |
30 | #ifndef arch_msi_check_device |
31 | int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) | |
6a9e7f20 AB |
32 | { |
33 | return 0; | |
34 | } | |
11df1f05 | 35 | #endif |
6a9e7f20 | 36 | |
11df1f05 | 37 | #ifndef arch_setup_msi_irqs |
1525bf0d TG |
38 | # define arch_setup_msi_irqs default_setup_msi_irqs |
39 | # define HAVE_DEFAULT_MSI_SETUP_IRQS | |
40 | #endif | |
41 | ||
42 | #ifdef HAVE_DEFAULT_MSI_SETUP_IRQS | |
43 | int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |
6a9e7f20 AB |
44 | { |
45 | struct msi_desc *entry; | |
46 | int ret; | |
47 | ||
1c8d7b0a MW |
48 | /* |
49 | * If an architecture wants to support multiple MSI, it needs to | |
50 | * override arch_setup_msi_irqs() | |
51 | */ | |
52 | if (type == PCI_CAP_ID_MSI && nvec > 1) | |
53 | return 1; | |
54 | ||
6a9e7f20 AB |
55 | list_for_each_entry(entry, &dev->msi_list, list) { |
56 | ret = arch_setup_msi_irq(dev, entry); | |
b5fbf533 | 57 | if (ret < 0) |
6a9e7f20 | 58 | return ret; |
b5fbf533 ME |
59 | if (ret > 0) |
60 | return -ENOSPC; | |
6a9e7f20 AB |
61 | } |
62 | ||
63 | return 0; | |
64 | } | |
11df1f05 | 65 | #endif |
6a9e7f20 | 66 | |
11df1f05 | 67 | #ifndef arch_teardown_msi_irqs |
1525bf0d TG |
68 | # define arch_teardown_msi_irqs default_teardown_msi_irqs |
69 | # define HAVE_DEFAULT_MSI_TEARDOWN_IRQS | |
70 | #endif | |
71 | ||
72 | #ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS | |
73 | void default_teardown_msi_irqs(struct pci_dev *dev) | |
6a9e7f20 AB |
74 | { |
75 | struct msi_desc *entry; | |
76 | ||
77 | list_for_each_entry(entry, &dev->msi_list, list) { | |
1c8d7b0a MW |
78 | int i, nvec; |
79 | if (entry->irq == 0) | |
80 | continue; | |
81 | nvec = 1 << entry->msi_attrib.multiple; | |
82 | for (i = 0; i < nvec; i++) | |
83 | arch_teardown_msi_irq(entry->irq + i); | |
6a9e7f20 AB |
84 | } |
85 | } | |
11df1f05 | 86 | #endif |
6a9e7f20 | 87 | |
110828c9 | 88 | static void msi_set_enable(struct pci_dev *dev, int pos, int enable) |
b1cbf4e4 | 89 | { |
b1cbf4e4 EB |
90 | u16 control; |
91 | ||
110828c9 | 92 | BUG_ON(!pos); |
b1cbf4e4 | 93 | |
110828c9 MW |
94 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); |
95 | control &= ~PCI_MSI_FLAGS_ENABLE; | |
96 | if (enable) | |
97 | control |= PCI_MSI_FLAGS_ENABLE; | |
98 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | |
5ca5c02f HS |
99 | } |
100 | ||
b1cbf4e4 EB |
101 | static void msix_set_enable(struct pci_dev *dev, int enable) |
102 | { | |
103 | int pos; | |
104 | u16 control; | |
105 | ||
106 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
107 | if (pos) { | |
108 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | |
109 | control &= ~PCI_MSIX_FLAGS_ENABLE; | |
110 | if (enable) | |
111 | control |= PCI_MSIX_FLAGS_ENABLE; | |
112 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | |
113 | } | |
114 | } | |
115 | ||
bffac3c5 MW |
116 | static inline __attribute_const__ u32 msi_mask(unsigned x) |
117 | { | |
0b49ec37 MW |
118 | /* Don't shift by >= width of type */ |
119 | if (x >= 5) | |
120 | return 0xffffffff; | |
121 | return (1 << (1 << x)) - 1; | |
bffac3c5 MW |
122 | } |
123 | ||
f2440d9a | 124 | static inline __attribute_const__ u32 msi_capable_mask(u16 control) |
988cbb15 | 125 | { |
f2440d9a MW |
126 | return msi_mask((control >> 1) & 7); |
127 | } | |
988cbb15 | 128 | |
f2440d9a MW |
129 | static inline __attribute_const__ u32 msi_enabled_mask(u16 control) |
130 | { | |
131 | return msi_mask((control >> 4) & 7); | |
988cbb15 MW |
132 | } |
133 | ||
ce6fce42 MW |
134 | /* |
135 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to | |
136 | * mask all MSI interrupts by clearing the MSI enable bit does not work | |
137 | * reliably as devices without an INTx disable bit will then generate a | |
138 | * level IRQ which will never be cleared. | |
ce6fce42 | 139 | */ |
12abb8ba | 140 | static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
1da177e4 | 141 | { |
f2440d9a | 142 | u32 mask_bits = desc->masked; |
1da177e4 | 143 | |
f2440d9a | 144 | if (!desc->msi_attrib.maskbit) |
12abb8ba | 145 | return 0; |
f2440d9a MW |
146 | |
147 | mask_bits &= ~mask; | |
148 | mask_bits |= flag; | |
149 | pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); | |
12abb8ba HS |
150 | |
151 | return mask_bits; | |
152 | } | |
153 | ||
154 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | |
155 | { | |
156 | desc->masked = __msi_mask_irq(desc, mask, flag); | |
f2440d9a MW |
157 | } |
158 | ||
159 | /* | |
160 | * This internal function does not flush PCI writes to the device. | |
161 | * All users must ensure that they read from the device before either | |
162 | * assuming that the device state is up to date, or returning out of this | |
163 | * file. This saves a few milliseconds when initialising devices with lots | |
164 | * of MSI-X interrupts. | |
165 | */ | |
12abb8ba | 166 | static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) |
f2440d9a MW |
167 | { |
168 | u32 mask_bits = desc->masked; | |
169 | unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | |
2c21fd4b | 170 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
8d805286 SY |
171 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; |
172 | if (flag) | |
173 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
f2440d9a | 174 | writel(mask_bits, desc->mask_base + offset); |
12abb8ba HS |
175 | |
176 | return mask_bits; | |
177 | } | |
178 | ||
179 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | |
180 | { | |
181 | desc->masked = __msix_mask_irq(desc, flag); | |
f2440d9a | 182 | } |
24d27553 | 183 | |
1c9db525 | 184 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) |
f2440d9a | 185 | { |
1c9db525 | 186 | struct msi_desc *desc = irq_data_get_msi(data); |
24d27553 | 187 | |
f2440d9a MW |
188 | if (desc->msi_attrib.is_msix) { |
189 | msix_mask_irq(desc, flag); | |
190 | readl(desc->mask_base); /* Flush write to device */ | |
191 | } else { | |
1c9db525 | 192 | unsigned offset = data->irq - desc->dev->irq; |
1c8d7b0a | 193 | msi_mask_irq(desc, 1 << offset, flag << offset); |
1da177e4 | 194 | } |
f2440d9a MW |
195 | } |
196 | ||
1c9db525 | 197 | void mask_msi_irq(struct irq_data *data) |
f2440d9a | 198 | { |
1c9db525 | 199 | msi_set_mask_bit(data, 1); |
f2440d9a MW |
200 | } |
201 | ||
1c9db525 | 202 | void unmask_msi_irq(struct irq_data *data) |
f2440d9a | 203 | { |
1c9db525 | 204 | msi_set_mask_bit(data, 0); |
1da177e4 LT |
205 | } |
206 | ||
39431acb | 207 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
1da177e4 | 208 | { |
30da5524 BH |
209 | BUG_ON(entry->dev->current_state != PCI_D0); |
210 | ||
211 | if (entry->msi_attrib.is_msix) { | |
212 | void __iomem *base = entry->mask_base + | |
213 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | |
214 | ||
215 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); | |
216 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); | |
217 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA); | |
218 | } else { | |
219 | struct pci_dev *dev = entry->dev; | |
220 | int pos = entry->msi_attrib.pos; | |
221 | u16 data; | |
222 | ||
223 | pci_read_config_dword(dev, msi_lower_address_reg(pos), | |
224 | &msg->address_lo); | |
225 | if (entry->msi_attrib.is_64) { | |
226 | pci_read_config_dword(dev, msi_upper_address_reg(pos), | |
227 | &msg->address_hi); | |
228 | pci_read_config_word(dev, msi_data_reg(pos, 1), &data); | |
229 | } else { | |
230 | msg->address_hi = 0; | |
231 | pci_read_config_word(dev, msi_data_reg(pos, 0), &data); | |
232 | } | |
233 | msg->data = data; | |
234 | } | |
235 | } | |
236 | ||
237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | |
238 | { | |
dced35ae | 239 | struct msi_desc *entry = irq_get_msi_desc(irq); |
30da5524 | 240 | |
39431acb | 241 | __read_msi_msg(entry, msg); |
30da5524 BH |
242 | } |
243 | ||
39431acb | 244 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
30da5524 | 245 | { |
30da5524 | 246 | /* Assert that the cache is valid, assuming that |
fcd097f3 BH |
247 | * valid messages are not all-zeroes. */ |
248 | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | | |
249 | entry->msg.data)); | |
0366f8f7 | 250 | |
fcd097f3 | 251 | *msg = entry->msg; |
0366f8f7 | 252 | } |
1da177e4 | 253 | |
30da5524 | 254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
0366f8f7 | 255 | { |
dced35ae | 256 | struct msi_desc *entry = irq_get_msi_desc(irq); |
3145e941 | 257 | |
39431acb | 258 | __get_cached_msi_msg(entry, msg); |
3145e941 YL |
259 | } |
260 | ||
39431acb | 261 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
3145e941 | 262 | { |
fcd097f3 BH |
263 | if (entry->dev->current_state != PCI_D0) { |
264 | /* Don't touch the hardware now */ | |
265 | } else if (entry->msi_attrib.is_msix) { | |
24d27553 MW |
266 | void __iomem *base; |
267 | base = entry->mask_base + | |
268 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | |
269 | ||
2c21fd4b HS |
270 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); |
271 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); | |
272 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); | |
24d27553 | 273 | } else { |
0366f8f7 EB |
274 | struct pci_dev *dev = entry->dev; |
275 | int pos = entry->msi_attrib.pos; | |
1c8d7b0a MW |
276 | u16 msgctl; |
277 | ||
278 | pci_read_config_word(dev, msi_control_reg(pos), &msgctl); | |
279 | msgctl &= ~PCI_MSI_FLAGS_QSIZE; | |
280 | msgctl |= entry->msi_attrib.multiple << 4; | |
281 | pci_write_config_word(dev, msi_control_reg(pos), msgctl); | |
0366f8f7 EB |
282 | |
283 | pci_write_config_dword(dev, msi_lower_address_reg(pos), | |
284 | msg->address_lo); | |
285 | if (entry->msi_attrib.is_64) { | |
286 | pci_write_config_dword(dev, msi_upper_address_reg(pos), | |
287 | msg->address_hi); | |
288 | pci_write_config_word(dev, msi_data_reg(pos, 1), | |
289 | msg->data); | |
290 | } else { | |
291 | pci_write_config_word(dev, msi_data_reg(pos, 0), | |
292 | msg->data); | |
293 | } | |
1da177e4 | 294 | } |
392ee1e6 | 295 | entry->msg = *msg; |
1da177e4 | 296 | } |
0366f8f7 | 297 | |
3145e941 YL |
298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
299 | { | |
dced35ae | 300 | struct msi_desc *entry = irq_get_msi_desc(irq); |
3145e941 | 301 | |
39431acb | 302 | __write_msi_msg(entry, msg); |
3145e941 YL |
303 | } |
304 | ||
f56e4481 HS |
305 | static void free_msi_irqs(struct pci_dev *dev) |
306 | { | |
307 | struct msi_desc *entry, *tmp; | |
308 | ||
309 | list_for_each_entry(entry, &dev->msi_list, list) { | |
310 | int i, nvec; | |
311 | if (!entry->irq) | |
312 | continue; | |
313 | nvec = 1 << entry->msi_attrib.multiple; | |
314 | for (i = 0; i < nvec; i++) | |
315 | BUG_ON(irq_has_action(entry->irq + i)); | |
316 | } | |
317 | ||
318 | arch_teardown_msi_irqs(dev); | |
319 | ||
320 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | |
321 | if (entry->msi_attrib.is_msix) { | |
322 | if (list_is_last(&entry->list, &dev->msi_list)) | |
323 | iounmap(entry->mask_base); | |
324 | } | |
325 | list_del(&entry->list); | |
326 | kfree(entry); | |
327 | } | |
328 | } | |
c54c1879 | 329 | |
379f5327 | 330 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) |
1da177e4 | 331 | { |
379f5327 MW |
332 | struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
333 | if (!desc) | |
1da177e4 LT |
334 | return NULL; |
335 | ||
379f5327 MW |
336 | INIT_LIST_HEAD(&desc->list); |
337 | desc->dev = dev; | |
1da177e4 | 338 | |
379f5327 | 339 | return desc; |
1da177e4 LT |
340 | } |
341 | ||
ba698ad4 DM |
342 | static void pci_intx_for_msi(struct pci_dev *dev, int enable) |
343 | { | |
344 | if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) | |
345 | pci_intx(dev, enable); | |
346 | } | |
347 | ||
8fed4b65 | 348 | static void __pci_restore_msi_state(struct pci_dev *dev) |
41017f0c | 349 | { |
392ee1e6 | 350 | int pos; |
41017f0c | 351 | u16 control; |
392ee1e6 | 352 | struct msi_desc *entry; |
41017f0c | 353 | |
b1cbf4e4 EB |
354 | if (!dev->msi_enabled) |
355 | return; | |
356 | ||
dced35ae | 357 | entry = irq_get_msi_desc(dev->irq); |
392ee1e6 | 358 | pos = entry->msi_attrib.pos; |
41017f0c | 359 | |
ba698ad4 | 360 | pci_intx_for_msi(dev, 0); |
110828c9 | 361 | msi_set_enable(dev, pos, 0); |
392ee1e6 | 362 | write_msi_msg(dev->irq, &entry->msg); |
392ee1e6 EB |
363 | |
364 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | |
f2440d9a | 365 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); |
abad2ec9 | 366 | control &= ~PCI_MSI_FLAGS_QSIZE; |
1c8d7b0a | 367 | control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; |
41017f0c | 368 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); |
8fed4b65 ME |
369 | } |
370 | ||
371 | static void __pci_restore_msix_state(struct pci_dev *dev) | |
41017f0c | 372 | { |
41017f0c | 373 | int pos; |
41017f0c | 374 | struct msi_desc *entry; |
392ee1e6 | 375 | u16 control; |
41017f0c | 376 | |
ded86d8d EB |
377 | if (!dev->msix_enabled) |
378 | return; | |
f598282f | 379 | BUG_ON(list_empty(&dev->msi_list)); |
9cc8d548 | 380 | entry = list_first_entry(&dev->msi_list, struct msi_desc, list); |
f598282f MW |
381 | pos = entry->msi_attrib.pos; |
382 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | |
ded86d8d | 383 | |
41017f0c | 384 | /* route the table */ |
ba698ad4 | 385 | pci_intx_for_msi(dev, 0); |
f598282f MW |
386 | control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; |
387 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | |
41017f0c | 388 | |
4aa9bc95 ME |
389 | list_for_each_entry(entry, &dev->msi_list, list) { |
390 | write_msi_msg(entry->irq, &entry->msg); | |
f2440d9a | 391 | msix_mask_irq(entry, entry->masked); |
41017f0c | 392 | } |
41017f0c | 393 | |
392ee1e6 | 394 | control &= ~PCI_MSIX_FLAGS_MASKALL; |
392ee1e6 | 395 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
41017f0c | 396 | } |
8fed4b65 ME |
397 | |
398 | void pci_restore_msi_state(struct pci_dev *dev) | |
399 | { | |
400 | __pci_restore_msi_state(dev); | |
401 | __pci_restore_msix_state(dev); | |
402 | } | |
94688cf2 | 403 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); |
41017f0c | 404 | |
1da177e4 LT |
405 | /** |
406 | * msi_capability_init - configure device's MSI capability structure | |
407 | * @dev: pointer to the pci_dev data structure of MSI device function | |
1c8d7b0a | 408 | * @nvec: number of interrupts to allocate |
1da177e4 | 409 | * |
1c8d7b0a MW |
410 | * Setup the MSI capability structure of the device with the requested |
411 | * number of interrupts. A return value of zero indicates the successful | |
412 | * setup of an entry with the new MSI irq. A negative return value indicates | |
413 | * an error, and a positive return value indicates the number of interrupts | |
414 | * which could have been allocated. | |
415 | */ | |
416 | static int msi_capability_init(struct pci_dev *dev, int nvec) | |
1da177e4 LT |
417 | { |
418 | struct msi_desc *entry; | |
7fe3730d | 419 | int pos, ret; |
1da177e4 | 420 | u16 control; |
f2440d9a | 421 | unsigned mask; |
1da177e4 | 422 | |
500559a9 | 423 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
110828c9 MW |
424 | msi_set_enable(dev, pos, 0); /* Disable MSI during set up */ |
425 | ||
1da177e4 LT |
426 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
427 | /* MSI Entry Initialization */ | |
379f5327 | 428 | entry = alloc_msi_entry(dev); |
f7feaca7 EB |
429 | if (!entry) |
430 | return -ENOMEM; | |
1ce03373 | 431 | |
500559a9 HS |
432 | entry->msi_attrib.is_msix = 0; |
433 | entry->msi_attrib.is_64 = is_64bit_address(control); | |
434 | entry->msi_attrib.entry_nr = 0; | |
435 | entry->msi_attrib.maskbit = is_mask_bit_support(control); | |
436 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | |
437 | entry->msi_attrib.pos = pos; | |
f2440d9a | 438 | |
67b5db65 | 439 | entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64); |
f2440d9a MW |
440 | /* All MSIs are unmasked by default, Mask them all */ |
441 | if (entry->msi_attrib.maskbit) | |
442 | pci_read_config_dword(dev, entry->mask_pos, &entry->masked); | |
443 | mask = msi_capable_mask(control); | |
444 | msi_mask_irq(entry, mask, mask); | |
445 | ||
0dd11f9b | 446 | list_add_tail(&entry->list, &dev->msi_list); |
9c831334 | 447 | |
1da177e4 | 448 | /* Configure MSI capability structure */ |
1c8d7b0a | 449 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
7fe3730d | 450 | if (ret) { |
7ba1930d | 451 | msi_mask_irq(entry, mask, ~mask); |
f56e4481 | 452 | free_msi_irqs(dev); |
7fe3730d | 453 | return ret; |
fd58e55f | 454 | } |
f7feaca7 | 455 | |
1da177e4 | 456 | /* Set MSI enabled bits */ |
ba698ad4 | 457 | pci_intx_for_msi(dev, 0); |
110828c9 | 458 | msi_set_enable(dev, pos, 1); |
b1cbf4e4 | 459 | dev->msi_enabled = 1; |
1da177e4 | 460 | |
7fe3730d | 461 | dev->irq = entry->irq; |
1da177e4 LT |
462 | return 0; |
463 | } | |
464 | ||
5a05a9d8 HS |
465 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos, |
466 | unsigned nr_entries) | |
467 | { | |
4302e0fb | 468 | resource_size_t phys_addr; |
5a05a9d8 HS |
469 | u32 table_offset; |
470 | u8 bir; | |
471 | ||
472 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); | |
473 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | |
474 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; | |
475 | phys_addr = pci_resource_start(dev, bir) + table_offset; | |
476 | ||
477 | return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | |
478 | } | |
479 | ||
d9d7070e HS |
480 | static int msix_setup_entries(struct pci_dev *dev, unsigned pos, |
481 | void __iomem *base, struct msix_entry *entries, | |
482 | int nvec) | |
483 | { | |
484 | struct msi_desc *entry; | |
485 | int i; | |
486 | ||
487 | for (i = 0; i < nvec; i++) { | |
488 | entry = alloc_msi_entry(dev); | |
489 | if (!entry) { | |
490 | if (!i) | |
491 | iounmap(base); | |
492 | else | |
493 | free_msi_irqs(dev); | |
494 | /* No enough memory. Don't try again */ | |
495 | return -ENOMEM; | |
496 | } | |
497 | ||
498 | entry->msi_attrib.is_msix = 1; | |
499 | entry->msi_attrib.is_64 = 1; | |
500 | entry->msi_attrib.entry_nr = entries[i].entry; | |
501 | entry->msi_attrib.default_irq = dev->irq; | |
502 | entry->msi_attrib.pos = pos; | |
503 | entry->mask_base = base; | |
504 | ||
505 | list_add_tail(&entry->list, &dev->msi_list); | |
506 | } | |
507 | ||
508 | return 0; | |
509 | } | |
510 | ||
75cb3426 HS |
511 | static void msix_program_entries(struct pci_dev *dev, |
512 | struct msix_entry *entries) | |
513 | { | |
514 | struct msi_desc *entry; | |
515 | int i = 0; | |
516 | ||
517 | list_for_each_entry(entry, &dev->msi_list, list) { | |
518 | int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + | |
519 | PCI_MSIX_ENTRY_VECTOR_CTRL; | |
520 | ||
521 | entries[i].vector = entry->irq; | |
dced35ae | 522 | irq_set_msi_desc(entry->irq, entry); |
75cb3426 HS |
523 | entry->masked = readl(entry->mask_base + offset); |
524 | msix_mask_irq(entry, 1); | |
525 | i++; | |
526 | } | |
527 | } | |
528 | ||
1da177e4 LT |
529 | /** |
530 | * msix_capability_init - configure device's MSI-X capability | |
531 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
8f7020d3 RD |
532 | * @entries: pointer to an array of struct msix_entry entries |
533 | * @nvec: number of @entries | |
1da177e4 | 534 | * |
eaae4b3a | 535 | * Setup the MSI-X capability structure of device function with a |
1ce03373 EB |
536 | * single MSI-X irq. A return of zero indicates the successful setup of |
537 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | |
1da177e4 LT |
538 | **/ |
539 | static int msix_capability_init(struct pci_dev *dev, | |
540 | struct msix_entry *entries, int nvec) | |
541 | { | |
d9d7070e | 542 | int pos, ret; |
5a05a9d8 | 543 | u16 control; |
1da177e4 LT |
544 | void __iomem *base; |
545 | ||
500559a9 | 546 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
f598282f MW |
547 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); |
548 | ||
549 | /* Ensure MSI-X is disabled while it is set up */ | |
550 | control &= ~PCI_MSIX_FLAGS_ENABLE; | |
551 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | |
552 | ||
1da177e4 | 553 | /* Request & Map MSI-X table region */ |
5a05a9d8 HS |
554 | base = msix_map_region(dev, pos, multi_msix_capable(control)); |
555 | if (!base) | |
1da177e4 LT |
556 | return -ENOMEM; |
557 | ||
d9d7070e HS |
558 | ret = msix_setup_entries(dev, pos, base, entries, nvec); |
559 | if (ret) | |
560 | return ret; | |
9c831334 ME |
561 | |
562 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | |
583871d4 HS |
563 | if (ret) |
564 | goto error; | |
9c831334 | 565 | |
f598282f MW |
566 | /* |
567 | * Some devices require MSI-X to be enabled before we can touch the | |
568 | * MSI-X registers. We need to mask all the vectors to prevent | |
569 | * interrupts coming in before they're fully set up. | |
570 | */ | |
571 | control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; | |
572 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | |
573 | ||
75cb3426 | 574 | msix_program_entries(dev, entries); |
f598282f MW |
575 | |
576 | /* Set MSI-X enabled bits and unmask the function */ | |
ba698ad4 | 577 | pci_intx_for_msi(dev, 0); |
b1cbf4e4 | 578 | dev->msix_enabled = 1; |
1da177e4 | 579 | |
f598282f MW |
580 | control &= ~PCI_MSIX_FLAGS_MASKALL; |
581 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | |
8d181018 | 582 | |
1da177e4 | 583 | return 0; |
583871d4 HS |
584 | |
585 | error: | |
586 | if (ret < 0) { | |
587 | /* | |
588 | * If we had some success, report the number of irqs | |
589 | * we succeeded in setting up. | |
590 | */ | |
d9d7070e | 591 | struct msi_desc *entry; |
583871d4 HS |
592 | int avail = 0; |
593 | ||
594 | list_for_each_entry(entry, &dev->msi_list, list) { | |
595 | if (entry->irq != 0) | |
596 | avail++; | |
597 | } | |
598 | if (avail != 0) | |
599 | ret = avail; | |
600 | } | |
601 | ||
602 | free_msi_irqs(dev); | |
603 | ||
604 | return ret; | |
1da177e4 LT |
605 | } |
606 | ||
24334a12 | 607 | /** |
17bbc12a | 608 | * pci_msi_check_device - check whether MSI may be enabled on a device |
24334a12 | 609 | * @dev: pointer to the pci_dev data structure of MSI device function |
c9953a73 | 610 | * @nvec: how many MSIs have been requested ? |
b1e2303d | 611 | * @type: are we checking for MSI or MSI-X ? |
24334a12 | 612 | * |
0306ebfa | 613 | * Look at global flags, the device itself, and its parent busses |
17bbc12a ME |
614 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
615 | * supported return 0, else return an error code. | |
24334a12 | 616 | **/ |
500559a9 | 617 | static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) |
24334a12 BG |
618 | { |
619 | struct pci_bus *bus; | |
c9953a73 | 620 | int ret; |
24334a12 | 621 | |
0306ebfa | 622 | /* MSI must be globally enabled and supported by the device */ |
24334a12 BG |
623 | if (!pci_msi_enable || !dev || dev->no_msi) |
624 | return -EINVAL; | |
625 | ||
314e77b3 ME |
626 | /* |
627 | * You can't ask to have 0 or less MSIs configured. | |
628 | * a) it's stupid .. | |
629 | * b) the list manipulation code assumes nvec >= 1. | |
630 | */ | |
631 | if (nvec < 1) | |
632 | return -ERANGE; | |
633 | ||
500559a9 HS |
634 | /* |
635 | * Any bridge which does NOT route MSI transactions from its | |
636 | * secondary bus to its primary bus must set NO_MSI flag on | |
0306ebfa BG |
637 | * the secondary pci_bus. |
638 | * We expect only arch-specific PCI host bus controller driver | |
639 | * or quirks for specific PCI bridges to be setting NO_MSI. | |
640 | */ | |
24334a12 BG |
641 | for (bus = dev->bus; bus; bus = bus->parent) |
642 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) | |
643 | return -EINVAL; | |
644 | ||
c9953a73 ME |
645 | ret = arch_msi_check_device(dev, nvec, type); |
646 | if (ret) | |
647 | return ret; | |
648 | ||
b1e2303d ME |
649 | if (!pci_find_capability(dev, type)) |
650 | return -EINVAL; | |
651 | ||
24334a12 BG |
652 | return 0; |
653 | } | |
654 | ||
1da177e4 | 655 | /** |
1c8d7b0a MW |
656 | * pci_enable_msi_block - configure device's MSI capability structure |
657 | * @dev: device to configure | |
658 | * @nvec: number of interrupts to configure | |
1da177e4 | 659 | * |
1c8d7b0a MW |
660 | * Allocate IRQs for a device with the MSI capability. |
661 | * This function returns a negative errno if an error occurs. If it | |
662 | * is unable to allocate the number of interrupts requested, it returns | |
663 | * the number of interrupts it might be able to allocate. If it successfully | |
664 | * allocates at least the number of interrupts requested, it returns 0 and | |
665 | * updates the @dev's irq member to the lowest new interrupt number; the | |
666 | * other interrupt numbers allocated to this device are consecutive. | |
667 | */ | |
668 | int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | |
1da177e4 | 669 | { |
1c8d7b0a MW |
670 | int status, pos, maxvec; |
671 | u16 msgctl; | |
672 | ||
673 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | |
674 | if (!pos) | |
675 | return -EINVAL; | |
676 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); | |
677 | maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | |
678 | if (nvec > maxvec) | |
679 | return maxvec; | |
1da177e4 | 680 | |
1c8d7b0a | 681 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); |
c9953a73 ME |
682 | if (status) |
683 | return status; | |
1da177e4 | 684 | |
ded86d8d | 685 | WARN_ON(!!dev->msi_enabled); |
1da177e4 | 686 | |
1c8d7b0a | 687 | /* Check whether driver already requested MSI-X irqs */ |
b1cbf4e4 | 688 | if (dev->msix_enabled) { |
80ccba11 BH |
689 | dev_info(&dev->dev, "can't enable MSI " |
690 | "(MSI-X already enabled)\n"); | |
b1cbf4e4 | 691 | return -EINVAL; |
1da177e4 | 692 | } |
1c8d7b0a MW |
693 | |
694 | status = msi_capability_init(dev, nvec); | |
1da177e4 LT |
695 | return status; |
696 | } | |
1c8d7b0a | 697 | EXPORT_SYMBOL(pci_enable_msi_block); |
1da177e4 | 698 | |
f2440d9a | 699 | void pci_msi_shutdown(struct pci_dev *dev) |
1da177e4 | 700 | { |
f2440d9a MW |
701 | struct msi_desc *desc; |
702 | u32 mask; | |
703 | u16 ctrl; | |
110828c9 | 704 | unsigned pos; |
1da177e4 | 705 | |
128bc5fc | 706 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
ded86d8d EB |
707 | return; |
708 | ||
110828c9 MW |
709 | BUG_ON(list_empty(&dev->msi_list)); |
710 | desc = list_first_entry(&dev->msi_list, struct msi_desc, list); | |
711 | pos = desc->msi_attrib.pos; | |
712 | ||
713 | msi_set_enable(dev, pos, 0); | |
ba698ad4 | 714 | pci_intx_for_msi(dev, 1); |
b1cbf4e4 | 715 | dev->msi_enabled = 0; |
7bd007e4 | 716 | |
12abb8ba | 717 | /* Return the device with MSI unmasked as initial states */ |
110828c9 | 718 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl); |
f2440d9a | 719 | mask = msi_capable_mask(ctrl); |
12abb8ba HS |
720 | /* Keep cached state to be restored */ |
721 | __msi_mask_irq(desc, mask, ~mask); | |
e387b9ee ME |
722 | |
723 | /* Restore dev->irq to its default pin-assertion irq */ | |
f2440d9a | 724 | dev->irq = desc->msi_attrib.default_irq; |
d52877c7 | 725 | } |
24d27553 | 726 | |
500559a9 | 727 | void pci_disable_msi(struct pci_dev *dev) |
d52877c7 | 728 | { |
d52877c7 YL |
729 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
730 | return; | |
731 | ||
732 | pci_msi_shutdown(dev); | |
f56e4481 | 733 | free_msi_irqs(dev); |
1da177e4 | 734 | } |
4cc086fa | 735 | EXPORT_SYMBOL(pci_disable_msi); |
1da177e4 | 736 | |
a52e2e35 RW |
737 | /** |
738 | * pci_msix_table_size - return the number of device's MSI-X table entries | |
739 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
740 | */ | |
741 | int pci_msix_table_size(struct pci_dev *dev) | |
742 | { | |
743 | int pos; | |
744 | u16 control; | |
745 | ||
746 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | |
747 | if (!pos) | |
748 | return 0; | |
749 | ||
750 | pci_read_config_word(dev, msi_control_reg(pos), &control); | |
751 | return multi_msix_capable(control); | |
752 | } | |
753 | ||
1da177e4 LT |
754 | /** |
755 | * pci_enable_msix - configure device's MSI-X capability structure | |
756 | * @dev: pointer to the pci_dev data structure of MSI-X device function | |
70549ad9 | 757 | * @entries: pointer to an array of MSI-X entries |
1ce03373 | 758 | * @nvec: number of MSI-X irqs requested for allocation by device driver |
1da177e4 LT |
759 | * |
760 | * Setup the MSI-X capability structure of device function with the number | |
1ce03373 | 761 | * of requested irqs upon its software driver call to request for |
1da177e4 LT |
762 | * MSI-X mode enabled on its hardware device function. A return of zero |
763 | * indicates the successful configuration of MSI-X capability structure | |
1ce03373 | 764 | * with new allocated MSI-X irqs. A return of < 0 indicates a failure. |
1da177e4 | 765 | * Or a return of > 0 indicates that driver request is exceeding the number |
57fbf52c MT |
766 | * of irqs or MSI-X vectors available. Driver should use the returned value to |
767 | * re-send its request. | |
1da177e4 | 768 | **/ |
500559a9 | 769 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) |
1da177e4 | 770 | { |
a52e2e35 | 771 | int status, nr_entries; |
ded86d8d | 772 | int i, j; |
1da177e4 | 773 | |
c9953a73 | 774 | if (!entries) |
500559a9 | 775 | return -EINVAL; |
1da177e4 | 776 | |
c9953a73 ME |
777 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); |
778 | if (status) | |
779 | return status; | |
780 | ||
a52e2e35 | 781 | nr_entries = pci_msix_table_size(dev); |
1da177e4 | 782 | if (nvec > nr_entries) |
57fbf52c | 783 | return nr_entries; |
1da177e4 LT |
784 | |
785 | /* Check for any invalid entries */ | |
786 | for (i = 0; i < nvec; i++) { | |
787 | if (entries[i].entry >= nr_entries) | |
788 | return -EINVAL; /* invalid entry */ | |
789 | for (j = i + 1; j < nvec; j++) { | |
790 | if (entries[i].entry == entries[j].entry) | |
791 | return -EINVAL; /* duplicate entry */ | |
792 | } | |
793 | } | |
ded86d8d | 794 | WARN_ON(!!dev->msix_enabled); |
7bd007e4 | 795 | |
1ce03373 | 796 | /* Check whether driver already requested for MSI irq */ |
500559a9 | 797 | if (dev->msi_enabled) { |
80ccba11 BH |
798 | dev_info(&dev->dev, "can't enable MSI-X " |
799 | "(MSI IRQ already assigned)\n"); | |
1da177e4 LT |
800 | return -EINVAL; |
801 | } | |
1da177e4 | 802 | status = msix_capability_init(dev, entries, nvec); |
1da177e4 LT |
803 | return status; |
804 | } | |
4cc086fa | 805 | EXPORT_SYMBOL(pci_enable_msix); |
1da177e4 | 806 | |
500559a9 | 807 | void pci_msix_shutdown(struct pci_dev *dev) |
fc4afc7b | 808 | { |
12abb8ba HS |
809 | struct msi_desc *entry; |
810 | ||
128bc5fc | 811 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
ded86d8d EB |
812 | return; |
813 | ||
12abb8ba HS |
814 | /* Return the device with MSI-X masked as initial states */ |
815 | list_for_each_entry(entry, &dev->msi_list, list) { | |
816 | /* Keep cached states to be restored */ | |
817 | __msix_mask_irq(entry, 1); | |
818 | } | |
819 | ||
b1cbf4e4 | 820 | msix_set_enable(dev, 0); |
ba698ad4 | 821 | pci_intx_for_msi(dev, 1); |
b1cbf4e4 | 822 | dev->msix_enabled = 0; |
d52877c7 | 823 | } |
c901851f | 824 | |
500559a9 | 825 | void pci_disable_msix(struct pci_dev *dev) |
d52877c7 YL |
826 | { |
827 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | |
828 | return; | |
829 | ||
830 | pci_msix_shutdown(dev); | |
f56e4481 | 831 | free_msi_irqs(dev); |
1da177e4 | 832 | } |
4cc086fa | 833 | EXPORT_SYMBOL(pci_disable_msix); |
1da177e4 LT |
834 | |
835 | /** | |
1ce03373 | 836 | * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state |
1da177e4 LT |
837 | * @dev: pointer to the pci_dev data structure of MSI(X) device function |
838 | * | |
eaae4b3a | 839 | * Being called during hotplug remove, from which the device function |
1ce03373 | 840 | * is hot-removed. All previous assigned MSI/MSI-X irqs, if |
1da177e4 LT |
841 | * allocated for this device function, are reclaimed to unused state, |
842 | * which may be used later on. | |
843 | **/ | |
500559a9 | 844 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) |
1da177e4 | 845 | { |
1da177e4 | 846 | if (!pci_msi_enable || !dev) |
500559a9 | 847 | return; |
1da177e4 | 848 | |
f56e4481 HS |
849 | if (dev->msi_enabled || dev->msix_enabled) |
850 | free_msi_irqs(dev); | |
1da177e4 LT |
851 | } |
852 | ||
309e57df MW |
853 | void pci_no_msi(void) |
854 | { | |
855 | pci_msi_enable = 0; | |
856 | } | |
c9953a73 | 857 | |
07ae95f9 AP |
858 | /** |
859 | * pci_msi_enabled - is MSI enabled? | |
860 | * | |
861 | * Returns true if MSI has not been disabled by the command-line option | |
862 | * pci=nomsi. | |
863 | **/ | |
864 | int pci_msi_enabled(void) | |
d389fec6 | 865 | { |
07ae95f9 | 866 | return pci_msi_enable; |
d389fec6 | 867 | } |
07ae95f9 | 868 | EXPORT_SYMBOL(pci_msi_enabled); |
d389fec6 | 869 | |
07ae95f9 | 870 | void pci_msi_init_pci_dev(struct pci_dev *dev) |
d389fec6 | 871 | { |
07ae95f9 | 872 | INIT_LIST_HEAD(&dev->msi_list); |
d389fec6 | 873 | } |