diff options
author | Matthew Wilcox <willy@linux.intel.com> | 2009-03-17 08:54:10 -0400 |
---|---|---|
committer | Jesse Barnes <jbarnes@virtuousgeek.org> | 2009-03-20 13:48:14 -0400 |
commit | 1c8d7b0a562da06d3ebe83f01b1ed553205d1ae4 (patch) | |
tree | 79c84432f5aed5a08b3bef262a10d933daae6a9b /drivers/pci/msi.c | |
parent | f2440d9acbe866b917b16cc0f927366341ce9215 (diff) |
PCI MSI: Add support for multiple MSI
Add the new API pci_enable_msi_block() to allow drivers to
request multiple MSI and reimplement pci_enable_msi in terms of
pci_enable_msi_block. Ensure that the architecture back ends don't
have to know about multiple MSI.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r-- | drivers/pci/msi.c | 91 |
1 files changed, 64 insertions, 27 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index adcc78242571..6f2e6295e773 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -40,6 +40,13 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
40 | struct msi_desc *entry; | 40 | struct msi_desc *entry; |
41 | int ret; | 41 | int ret; |
42 | 42 | ||
43 | /* | ||
44 | * If an architecture wants to support multiple MSI, it needs to | ||
45 | * override arch_setup_msi_irqs() | ||
46 | */ | ||
47 | if (type == PCI_CAP_ID_MSI && nvec > 1) | ||
48 | return 1; | ||
49 | |||
43 | list_for_each_entry(entry, &dev->msi_list, list) { | 50 | list_for_each_entry(entry, &dev->msi_list, list) { |
44 | ret = arch_setup_msi_irq(dev, entry); | 51 | ret = arch_setup_msi_irq(dev, entry); |
45 | if (ret < 0) | 52 | if (ret < 0) |
@@ -58,8 +65,12 @@ void arch_teardown_msi_irqs(struct pci_dev *dev) | |||
58 | struct msi_desc *entry; | 65 | struct msi_desc *entry; |
59 | 66 | ||
60 | list_for_each_entry(entry, &dev->msi_list, list) { | 67 | list_for_each_entry(entry, &dev->msi_list, list) { |
61 | if (entry->irq != 0) | 68 | int i, nvec; |
62 | arch_teardown_msi_irq(entry->irq); | 69 | if (entry->irq == 0) |
70 | continue; | ||
71 | nvec = 1 << entry->msi_attrib.multiple; | ||
72 | for (i = 0; i < nvec; i++) | ||
73 | arch_teardown_msi_irq(entry->irq + i); | ||
63 | } | 74 | } |
64 | } | 75 | } |
65 | #endif | 76 | #endif |
@@ -163,7 +174,8 @@ static void msi_set_mask_bit(unsigned irq, u32 flag) | |||
163 | msix_mask_irq(desc, flag); | 174 | msix_mask_irq(desc, flag); |
164 | readl(desc->mask_base); /* Flush write to device */ | 175 | readl(desc->mask_base); /* Flush write to device */ |
165 | } else { | 176 | } else { |
166 | msi_mask_irq(desc, 1, flag); | 177 | unsigned offset = irq - desc->dev->irq; |
178 | msi_mask_irq(desc, 1 << offset, flag << offset); | ||
167 | } | 179 | } |
168 | } | 180 | } |
169 | 181 | ||
@@ -229,6 +241,12 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | |||
229 | } else { | 241 | } else { |
230 | struct pci_dev *dev = entry->dev; | 242 | struct pci_dev *dev = entry->dev; |
231 | int pos = entry->msi_attrib.pos; | 243 | int pos = entry->msi_attrib.pos; |
244 | u16 msgctl; | ||
245 | |||
246 | pci_read_config_word(dev, msi_control_reg(pos), &msgctl); | ||
247 | msgctl &= ~PCI_MSI_FLAGS_QSIZE; | ||
248 | msgctl |= entry->msi_attrib.multiple << 4; | ||
249 | pci_write_config_word(dev, msi_control_reg(pos), msgctl); | ||
232 | 250 | ||
233 | pci_write_config_dword(dev, msi_lower_address_reg(pos), | 251 | pci_write_config_dword(dev, msi_lower_address_reg(pos), |
234 | msg->address_lo); | 252 | msg->address_lo); |
@@ -291,7 +309,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
291 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | 309 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); |
292 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); | 310 | msi_mask_irq(entry, msi_capable_mask(control), entry->masked); |
293 | control &= ~PCI_MSI_FLAGS_QSIZE; | 311 | control &= ~PCI_MSI_FLAGS_QSIZE; |
294 | control |= PCI_MSI_FLAGS_ENABLE; | 312 | control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; |
295 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | 313 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); |
296 | } | 314 | } |
297 | 315 | ||
@@ -332,13 +350,15 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state); | |||
332 | /** | 350 | /** |
333 | * msi_capability_init - configure device's MSI capability structure | 351 | * msi_capability_init - configure device's MSI capability structure |
334 | * @dev: pointer to the pci_dev data structure of MSI device function | 352 | * @dev: pointer to the pci_dev data structure of MSI device function |
353 | * @nvec: number of interrupts to allocate | ||
335 | * | 354 | * |
336 | * Setup the MSI capability structure of device function with a single | 355 | * Setup the MSI capability structure of the device with the requested |
337 | * MSI irq, regardless of device function is capable of handling | 356 | * number of interrupts. A return value of zero indicates the successful |
338 | * multiple messages. A return of zero indicates the successful setup | 357 | * setup of an entry with the new MSI irq. A negative return value indicates |
339 | * of an entry zero with the new MSI irq or non-zero for otherwise. | 358 | * an error, and a positive return value indicates the number of interrupts |
340 | **/ | 359 | * which could have been allocated. |
341 | static int msi_capability_init(struct pci_dev *dev) | 360 | */ |
361 | static int msi_capability_init(struct pci_dev *dev, int nvec) | ||
342 | { | 362 | { |
343 | struct msi_desc *entry; | 363 | struct msi_desc *entry; |
344 | int pos, ret; | 364 | int pos, ret; |
@@ -371,7 +391,7 @@ static int msi_capability_init(struct pci_dev *dev) | |||
371 | list_add_tail(&entry->list, &dev->msi_list); | 391 | list_add_tail(&entry->list, &dev->msi_list); |
372 | 392 | ||
373 | /* Configure MSI capability structure */ | 393 | /* Configure MSI capability structure */ |
374 | ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); | 394 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
375 | if (ret) { | 395 | if (ret) { |
376 | msi_free_irqs(dev); | 396 | msi_free_irqs(dev); |
377 | return ret; | 397 | return ret; |
@@ -524,35 +544,48 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) | |||
524 | } | 544 | } |
525 | 545 | ||
526 | /** | 546 | /** |
527 | * pci_enable_msi - configure device's MSI capability structure | 547 | * pci_enable_msi_block - configure device's MSI capability structure |
528 | * @dev: pointer to the pci_dev data structure of MSI device function | 548 | * @dev: device to configure |
549 | * @nvec: number of interrupts to configure | ||
529 | * | 550 | * |
530 | * Setup the MSI capability structure of device function with | 551 | * Allocate IRQs for a device with the MSI capability. |
531 | * a single MSI irq upon its software driver call to request for | 552 | * This function returns a negative errno if an error occurs. If it |
532 | * MSI mode enabled on its hardware device function. A return of zero | 553 | * is unable to allocate the number of interrupts requested, it returns |
533 | * indicates the successful setup of an entry zero with the new MSI | 554 | * the number of interrupts it might be able to allocate. If it successfully |
534 | * irq or non-zero for otherwise. | 555 | * allocates at least the number of interrupts requested, it returns 0 and |
535 | **/ | 556 | * updates the @dev's irq member to the lowest new interrupt number; the |
536 | int pci_enable_msi(struct pci_dev* dev) | 557 | * other interrupt numbers allocated to this device are consecutive. |
558 | */ | ||
559 | int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | ||
537 | { | 560 | { |
538 | int status; | 561 | int status, pos, maxvec; |
562 | u16 msgctl; | ||
563 | |||
564 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
565 | if (!pos) | ||
566 | return -EINVAL; | ||
567 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); | ||
568 | maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | ||
569 | if (nvec > maxvec) | ||
570 | return maxvec; | ||
539 | 571 | ||
540 | status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); | 572 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); |
541 | if (status) | 573 | if (status) |
542 | return status; | 574 | return status; |
543 | 575 | ||
544 | WARN_ON(!!dev->msi_enabled); | 576 | WARN_ON(!!dev->msi_enabled); |
545 | 577 | ||
546 | /* Check whether driver already requested for MSI-X irqs */ | 578 | /* Check whether driver already requested MSI-X irqs */ |
547 | if (dev->msix_enabled) { | 579 | if (dev->msix_enabled) { |
548 | dev_info(&dev->dev, "can't enable MSI " | 580 | dev_info(&dev->dev, "can't enable MSI " |
549 | "(MSI-X already enabled)\n"); | 581 | "(MSI-X already enabled)\n"); |
550 | return -EINVAL; | 582 | return -EINVAL; |
551 | } | 583 | } |
552 | status = msi_capability_init(dev); | 584 | |
585 | status = msi_capability_init(dev, nvec); | ||
553 | return status; | 586 | return status; |
554 | } | 587 | } |
555 | EXPORT_SYMBOL(pci_enable_msi); | 588 | EXPORT_SYMBOL(pci_enable_msi_block); |
556 | 589 | ||
557 | void pci_msi_shutdown(struct pci_dev *dev) | 590 | void pci_msi_shutdown(struct pci_dev *dev) |
558 | { | 591 | { |
@@ -599,8 +632,12 @@ static int msi_free_irqs(struct pci_dev* dev) | |||
599 | struct msi_desc *entry, *tmp; | 632 | struct msi_desc *entry, *tmp; |
600 | 633 | ||
601 | list_for_each_entry(entry, &dev->msi_list, list) { | 634 | list_for_each_entry(entry, &dev->msi_list, list) { |
602 | if (entry->irq) | 635 | int i, nvec; |
603 | BUG_ON(irq_has_action(entry->irq)); | 636 | if (!entry->irq) |
637 | continue; | ||
638 | nvec = 1 << entry->msi_attrib.multiple; | ||
639 | for (i = 0; i < nvec; i++) | ||
640 | BUG_ON(irq_has_action(entry->irq + i)); | ||
604 | } | 641 | } |
605 | 642 | ||
606 | arch_teardown_msi_irqs(dev); | 643 | arch_teardown_msi_irqs(dev); |