diff options
author | Paul Mackerras <paulus@samba.org> | 2007-05-07 23:37:51 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-05-07 23:37:51 -0400 |
commit | 02bbc0f09c90cefdb2837605c96a66c5ce4ba2e1 (patch) | |
tree | 04ef573cd4de095c500c9fc3477f4278c0b36300 /drivers/pci/msi.c | |
parent | 7487a2245b8841c77ba9db406cf99a483b9334e9 (diff) | |
parent | 5b94f675f57e4ff16c8fda09088d7480a84dcd91 (diff) |
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/pci/msi.c')
-rw-r--r-- | drivers/pci/msi.c | 398 |
1 files changed, 188 insertions, 210 deletions
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 435c1958a7b7..9e1321d0d5e6 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -24,20 +24,8 @@ | |||
24 | #include "pci.h" | 24 | #include "pci.h" |
25 | #include "msi.h" | 25 | #include "msi.h" |
26 | 26 | ||
27 | static struct kmem_cache* msi_cachep; | ||
28 | |||
29 | static int pci_msi_enable = 1; | 27 | static int pci_msi_enable = 1; |
30 | 28 | ||
31 | static int msi_cache_init(void) | ||
32 | { | ||
33 | msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc), | ||
34 | 0, SLAB_HWCACHE_ALIGN, NULL, NULL); | ||
35 | if (!msi_cachep) | ||
36 | return -ENOMEM; | ||
37 | |||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void msi_set_enable(struct pci_dev *dev, int enable) | 29 | static void msi_set_enable(struct pci_dev *dev, int enable) |
42 | { | 30 | { |
43 | int pos; | 31 | int pos; |
@@ -68,6 +56,29 @@ static void msix_set_enable(struct pci_dev *dev, int enable) | |||
68 | } | 56 | } |
69 | } | 57 | } |
70 | 58 | ||
59 | static void msix_flush_writes(unsigned int irq) | ||
60 | { | ||
61 | struct msi_desc *entry; | ||
62 | |||
63 | entry = get_irq_msi(irq); | ||
64 | BUG_ON(!entry || !entry->dev); | ||
65 | switch (entry->msi_attrib.type) { | ||
66 | case PCI_CAP_ID_MSI: | ||
67 | /* nothing to do */ | ||
68 | break; | ||
69 | case PCI_CAP_ID_MSIX: | ||
70 | { | ||
71 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
72 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | ||
73 | readl(entry->mask_base + offset); | ||
74 | break; | ||
75 | } | ||
76 | default: | ||
77 | BUG(); | ||
78 | break; | ||
79 | } | ||
80 | } | ||
81 | |||
71 | static void msi_set_mask_bit(unsigned int irq, int flag) | 82 | static void msi_set_mask_bit(unsigned int irq, int flag) |
72 | { | 83 | { |
73 | struct msi_desc *entry; | 84 | struct msi_desc *entry; |
@@ -187,41 +198,28 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg) | |||
187 | void mask_msi_irq(unsigned int irq) | 198 | void mask_msi_irq(unsigned int irq) |
188 | { | 199 | { |
189 | msi_set_mask_bit(irq, 1); | 200 | msi_set_mask_bit(irq, 1); |
201 | msix_flush_writes(irq); | ||
190 | } | 202 | } |
191 | 203 | ||
192 | void unmask_msi_irq(unsigned int irq) | 204 | void unmask_msi_irq(unsigned int irq) |
193 | { | 205 | { |
194 | msi_set_mask_bit(irq, 0); | 206 | msi_set_mask_bit(irq, 0); |
207 | msix_flush_writes(irq); | ||
195 | } | 208 | } |
196 | 209 | ||
197 | static int msi_free_irq(struct pci_dev* dev, int irq); | 210 | static int msi_free_irqs(struct pci_dev* dev); |
198 | |||
199 | static int msi_init(void) | ||
200 | { | ||
201 | static int status = -ENOMEM; | ||
202 | |||
203 | if (!status) | ||
204 | return status; | ||
205 | 211 | ||
206 | status = msi_cache_init(); | ||
207 | if (status < 0) { | ||
208 | pci_msi_enable = 0; | ||
209 | printk(KERN_WARNING "PCI: MSI cache init failed\n"); | ||
210 | return status; | ||
211 | } | ||
212 | |||
213 | return status; | ||
214 | } | ||
215 | 212 | ||
216 | static struct msi_desc* alloc_msi_entry(void) | 213 | static struct msi_desc* alloc_msi_entry(void) |
217 | { | 214 | { |
218 | struct msi_desc *entry; | 215 | struct msi_desc *entry; |
219 | 216 | ||
220 | entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL); | 217 | entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); |
221 | if (!entry) | 218 | if (!entry) |
222 | return NULL; | 219 | return NULL; |
223 | 220 | ||
224 | entry->link.tail = entry->link.head = 0; /* single message */ | 221 | INIT_LIST_HEAD(&entry->list); |
222 | entry->irq = 0; | ||
225 | entry->dev = NULL; | 223 | entry->dev = NULL; |
226 | 224 | ||
227 | return entry; | 225 | return entry; |
@@ -256,7 +254,6 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
256 | static void __pci_restore_msix_state(struct pci_dev *dev) | 254 | static void __pci_restore_msix_state(struct pci_dev *dev) |
257 | { | 255 | { |
258 | int pos; | 256 | int pos; |
259 | int irq, head, tail = 0; | ||
260 | struct msi_desc *entry; | 257 | struct msi_desc *entry; |
261 | u16 control; | 258 | u16 control; |
262 | 259 | ||
@@ -266,18 +263,15 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
266 | /* route the table */ | 263 | /* route the table */ |
267 | pci_intx(dev, 0); /* disable intx */ | 264 | pci_intx(dev, 0); /* disable intx */ |
268 | msix_set_enable(dev, 0); | 265 | msix_set_enable(dev, 0); |
269 | irq = head = dev->first_msi_irq; | ||
270 | entry = get_irq_msi(irq); | ||
271 | pos = entry->msi_attrib.pos; | ||
272 | while (head != tail) { | ||
273 | entry = get_irq_msi(irq); | ||
274 | write_msi_msg(irq, &entry->msg); | ||
275 | msi_set_mask_bit(irq, entry->msi_attrib.masked); | ||
276 | 266 | ||
277 | tail = entry->link.tail; | 267 | list_for_each_entry(entry, &dev->msi_list, list) { |
278 | irq = tail; | 268 | write_msi_msg(entry->irq, &entry->msg); |
269 | msi_set_mask_bit(entry->irq, entry->msi_attrib.masked); | ||
279 | } | 270 | } |
280 | 271 | ||
272 | BUG_ON(list_empty(&dev->msi_list)); | ||
273 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | ||
274 | pos = entry->msi_attrib.pos; | ||
281 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | 275 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); |
282 | control &= ~PCI_MSIX_FLAGS_MASKALL; | 276 | control &= ~PCI_MSIX_FLAGS_MASKALL; |
283 | control |= PCI_MSIX_FLAGS_ENABLE; | 277 | control |= PCI_MSIX_FLAGS_ENABLE; |
@@ -303,7 +297,7 @@ void pci_restore_msi_state(struct pci_dev *dev) | |||
303 | static int msi_capability_init(struct pci_dev *dev) | 297 | static int msi_capability_init(struct pci_dev *dev) |
304 | { | 298 | { |
305 | struct msi_desc *entry; | 299 | struct msi_desc *entry; |
306 | int pos, irq; | 300 | int pos, ret; |
307 | u16 control; | 301 | u16 control; |
308 | 302 | ||
309 | msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ | 303 | msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ |
@@ -340,23 +334,21 @@ static int msi_capability_init(struct pci_dev *dev) | |||
340 | msi_mask_bits_reg(pos, is_64bit_address(control)), | 334 | msi_mask_bits_reg(pos, is_64bit_address(control)), |
341 | maskbits); | 335 | maskbits); |
342 | } | 336 | } |
337 | list_add(&entry->list, &dev->msi_list); | ||
338 | |||
343 | /* Configure MSI capability structure */ | 339 | /* Configure MSI capability structure */ |
344 | irq = arch_setup_msi_irq(dev, entry); | 340 | ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); |
345 | if (irq < 0) { | 341 | if (ret) { |
346 | kmem_cache_free(msi_cachep, entry); | 342 | msi_free_irqs(dev); |
347 | return irq; | 343 | return ret; |
348 | } | 344 | } |
349 | entry->link.head = irq; | ||
350 | entry->link.tail = irq; | ||
351 | dev->first_msi_irq = irq; | ||
352 | set_irq_msi(irq, entry); | ||
353 | 345 | ||
354 | /* Set MSI enabled bits */ | 346 | /* Set MSI enabled bits */ |
355 | pci_intx(dev, 0); /* disable intx */ | 347 | pci_intx(dev, 0); /* disable intx */ |
356 | msi_set_enable(dev, 1); | 348 | msi_set_enable(dev, 1); |
357 | dev->msi_enabled = 1; | 349 | dev->msi_enabled = 1; |
358 | 350 | ||
359 | dev->irq = irq; | 351 | dev->irq = entry->irq; |
360 | return 0; | 352 | return 0; |
361 | } | 353 | } |
362 | 354 | ||
@@ -373,8 +365,8 @@ static int msi_capability_init(struct pci_dev *dev) | |||
373 | static int msix_capability_init(struct pci_dev *dev, | 365 | static int msix_capability_init(struct pci_dev *dev, |
374 | struct msix_entry *entries, int nvec) | 366 | struct msix_entry *entries, int nvec) |
375 | { | 367 | { |
376 | struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; | 368 | struct msi_desc *entry; |
377 | int irq, pos, i, j, nr_entries, temp = 0; | 369 | int pos, i, j, nr_entries, ret; |
378 | unsigned long phys_addr; | 370 | unsigned long phys_addr; |
379 | u32 table_offset; | 371 | u32 table_offset; |
380 | u16 control; | 372 | u16 control; |
@@ -413,44 +405,34 @@ static int msix_capability_init(struct pci_dev *dev, | |||
413 | entry->dev = dev; | 405 | entry->dev = dev; |
414 | entry->mask_base = base; | 406 | entry->mask_base = base; |
415 | 407 | ||
416 | /* Configure MSI-X capability structure */ | 408 | list_add(&entry->list, &dev->msi_list); |
417 | irq = arch_setup_msi_irq(dev, entry); | ||
418 | if (irq < 0) { | ||
419 | kmem_cache_free(msi_cachep, entry); | ||
420 | break; | ||
421 | } | ||
422 | entries[i].vector = irq; | ||
423 | if (!head) { | ||
424 | entry->link.head = irq; | ||
425 | entry->link.tail = irq; | ||
426 | head = entry; | ||
427 | } else { | ||
428 | entry->link.head = temp; | ||
429 | entry->link.tail = tail->link.tail; | ||
430 | tail->link.tail = irq; | ||
431 | head->link.head = irq; | ||
432 | } | ||
433 | temp = irq; | ||
434 | tail = entry; | ||
435 | |||
436 | set_irq_msi(irq, entry); | ||
437 | } | 409 | } |
438 | if (i != nvec) { | 410 | |
439 | int avail = i - 1; | 411 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
440 | i--; | 412 | if (ret) { |
441 | for (; i >= 0; i--) { | 413 | int avail = 0; |
442 | irq = (entries + i)->vector; | 414 | list_for_each_entry(entry, &dev->msi_list, list) { |
443 | msi_free_irq(dev, irq); | 415 | if (entry->irq != 0) { |
444 | (entries + i)->vector = 0; | 416 | avail++; |
417 | } | ||
445 | } | 418 | } |
419 | |||
420 | msi_free_irqs(dev); | ||
421 | |||
446 | /* If we had some success report the number of irqs | 422 | /* If we had some success report the number of irqs |
447 | * we succeeded in setting up. | 423 | * we succeeded in setting up. |
448 | */ | 424 | */ |
449 | if (avail <= 0) | 425 | if (avail == 0) |
450 | avail = -EBUSY; | 426 | avail = ret; |
451 | return avail; | 427 | return avail; |
452 | } | 428 | } |
453 | dev->first_msi_irq = entries[0].vector; | 429 | |
430 | i = 0; | ||
431 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
432 | entries[i].vector = entry->irq; | ||
433 | set_irq_msi(entry->irq, entry); | ||
434 | i++; | ||
435 | } | ||
454 | /* Set MSI-X enabled bits */ | 436 | /* Set MSI-X enabled bits */ |
455 | pci_intx(dev, 0); /* disable intx */ | 437 | pci_intx(dev, 0); /* disable intx */ |
456 | msix_set_enable(dev, 1); | 438 | msix_set_enable(dev, 1); |
@@ -460,21 +442,32 @@ static int msix_capability_init(struct pci_dev *dev, | |||
460 | } | 442 | } |
461 | 443 | ||
462 | /** | 444 | /** |
463 | * pci_msi_supported - check whether MSI may be enabled on device | 445 | * pci_msi_check_device - check whether MSI may be enabled on a device |
464 | * @dev: pointer to the pci_dev data structure of MSI device function | 446 | * @dev: pointer to the pci_dev data structure of MSI device function |
447 | * @nvec: how many MSIs have been requested ? | ||
448 | * @type: are we checking for MSI or MSI-X ? | ||
465 | * | 449 | * |
466 | * Look at global flags, the device itself, and its parent busses | 450 | * Look at global flags, the device itself, and its parent busses |
467 | * to return 0 if MSI are supported for the device. | 451 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
452 | * supported return 0, else return an error code. | ||
468 | **/ | 453 | **/ |
469 | static | 454 | static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) |
470 | int pci_msi_supported(struct pci_dev * dev) | ||
471 | { | 455 | { |
472 | struct pci_bus *bus; | 456 | struct pci_bus *bus; |
457 | int ret; | ||
473 | 458 | ||
474 | /* MSI must be globally enabled and supported by the device */ | 459 | /* MSI must be globally enabled and supported by the device */ |
475 | if (!pci_msi_enable || !dev || dev->no_msi) | 460 | if (!pci_msi_enable || !dev || dev->no_msi) |
476 | return -EINVAL; | 461 | return -EINVAL; |
477 | 462 | ||
463 | /* | ||
464 | * You can't ask to have 0 or less MSIs configured. | ||
465 | * a) it's stupid .. | ||
466 | * b) the list manipulation code assumes nvec >= 1. | ||
467 | */ | ||
468 | if (nvec < 1) | ||
469 | return -ERANGE; | ||
470 | |||
478 | /* Any bridge which does NOT route MSI transactions from it's | 471 | /* Any bridge which does NOT route MSI transactions from it's |
479 | * secondary bus to it's primary bus must set NO_MSI flag on | 472 | * secondary bus to it's primary bus must set NO_MSI flag on |
480 | * the secondary pci_bus. | 473 | * the secondary pci_bus. |
@@ -485,6 +478,13 @@ int pci_msi_supported(struct pci_dev * dev) | |||
485 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) | 478 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) |
486 | return -EINVAL; | 479 | return -EINVAL; |
487 | 480 | ||
481 | ret = arch_msi_check_device(dev, nvec, type); | ||
482 | if (ret) | ||
483 | return ret; | ||
484 | |||
485 | if (!pci_find_capability(dev, type)) | ||
486 | return -EINVAL; | ||
487 | |||
488 | return 0; | 488 | return 0; |
489 | } | 489 | } |
490 | 490 | ||
@@ -500,19 +500,12 @@ int pci_msi_supported(struct pci_dev * dev) | |||
500 | **/ | 500 | **/ |
501 | int pci_enable_msi(struct pci_dev* dev) | 501 | int pci_enable_msi(struct pci_dev* dev) |
502 | { | 502 | { |
503 | int pos, status; | 503 | int status; |
504 | |||
505 | if (pci_msi_supported(dev) < 0) | ||
506 | return -EINVAL; | ||
507 | 504 | ||
508 | status = msi_init(); | 505 | status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); |
509 | if (status < 0) | 506 | if (status) |
510 | return status; | 507 | return status; |
511 | 508 | ||
512 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
513 | if (!pos) | ||
514 | return -EINVAL; | ||
515 | |||
516 | WARN_ON(!!dev->msi_enabled); | 509 | WARN_ON(!!dev->msi_enabled); |
517 | 510 | ||
518 | /* Check whether driver already requested for MSI-X irqs */ | 511 | /* Check whether driver already requested for MSI-X irqs */ |
@@ -525,69 +518,54 @@ int pci_enable_msi(struct pci_dev* dev) | |||
525 | status = msi_capability_init(dev); | 518 | status = msi_capability_init(dev); |
526 | return status; | 519 | return status; |
527 | } | 520 | } |
521 | EXPORT_SYMBOL(pci_enable_msi); | ||
528 | 522 | ||
529 | void pci_disable_msi(struct pci_dev* dev) | 523 | void pci_disable_msi(struct pci_dev* dev) |
530 | { | 524 | { |
531 | struct msi_desc *entry; | 525 | struct msi_desc *entry; |
532 | int default_irq; | 526 | int default_irq; |
533 | 527 | ||
534 | if (!pci_msi_enable) | 528 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
535 | return; | ||
536 | if (!dev) | ||
537 | return; | ||
538 | |||
539 | if (!dev->msi_enabled) | ||
540 | return; | 529 | return; |
541 | 530 | ||
542 | msi_set_enable(dev, 0); | 531 | msi_set_enable(dev, 0); |
543 | pci_intx(dev, 1); /* enable intx */ | 532 | pci_intx(dev, 1); /* enable intx */ |
544 | dev->msi_enabled = 0; | 533 | dev->msi_enabled = 0; |
545 | 534 | ||
546 | entry = get_irq_msi(dev->first_msi_irq); | 535 | BUG_ON(list_empty(&dev->msi_list)); |
547 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { | 536 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); |
537 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { | ||
548 | return; | 538 | return; |
549 | } | 539 | } |
550 | if (irq_has_action(dev->first_msi_irq)) { | 540 | |
551 | printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " | 541 | default_irq = entry->msi_attrib.default_irq; |
552 | "free_irq() on MSI irq %d\n", | 542 | msi_free_irqs(dev); |
553 | pci_name(dev), dev->first_msi_irq); | 543 | |
554 | BUG_ON(irq_has_action(dev->first_msi_irq)); | 544 | /* Restore dev->irq to its default pin-assertion irq */ |
555 | } else { | 545 | dev->irq = default_irq; |
556 | default_irq = entry->msi_attrib.default_irq; | ||
557 | msi_free_irq(dev, dev->first_msi_irq); | ||
558 | |||
559 | /* Restore dev->irq to its default pin-assertion irq */ | ||
560 | dev->irq = default_irq; | ||
561 | } | ||
562 | dev->first_msi_irq = 0; | ||
563 | } | 546 | } |
547 | EXPORT_SYMBOL(pci_disable_msi); | ||
564 | 548 | ||
565 | static int msi_free_irq(struct pci_dev* dev, int irq) | 549 | static int msi_free_irqs(struct pci_dev* dev) |
566 | { | 550 | { |
567 | struct msi_desc *entry; | 551 | struct msi_desc *entry, *tmp; |
568 | int head, entry_nr, type; | ||
569 | void __iomem *base; | ||
570 | 552 | ||
571 | entry = get_irq_msi(irq); | 553 | list_for_each_entry(entry, &dev->msi_list, list) |
572 | if (!entry || entry->dev != dev) { | 554 | BUG_ON(irq_has_action(entry->irq)); |
573 | return -EINVAL; | 555 | |
574 | } | 556 | arch_teardown_msi_irqs(dev); |
575 | type = entry->msi_attrib.type; | 557 | |
576 | entry_nr = entry->msi_attrib.entry_nr; | 558 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { |
577 | head = entry->link.head; | 559 | if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { |
578 | base = entry->mask_base; | 560 | if (list_is_last(&entry->list, &dev->msi_list)) |
579 | get_irq_msi(entry->link.head)->link.tail = entry->link.tail; | 561 | iounmap(entry->mask_base); |
580 | get_irq_msi(entry->link.tail)->link.head = entry->link.head; | 562 | |
581 | 563 | writel(1, entry->mask_base + entry->msi_attrib.entry_nr | |
582 | arch_teardown_msi_irq(irq); | 564 | * PCI_MSIX_ENTRY_SIZE |
583 | kmem_cache_free(msi_cachep, entry); | 565 | + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); |
584 | 566 | } | |
585 | if (type == PCI_CAP_ID_MSIX) { | 567 | list_del(&entry->list); |
586 | writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE + | 568 | kfree(entry); |
587 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); | ||
588 | |||
589 | if (head == irq) | ||
590 | iounmap(base); | ||
591 | } | 569 | } |
592 | 570 | ||
593 | return 0; | 571 | return 0; |
@@ -614,17 +592,14 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
614 | int i, j; | 592 | int i, j; |
615 | u16 control; | 593 | u16 control; |
616 | 594 | ||
617 | if (!entries || pci_msi_supported(dev) < 0) | 595 | if (!entries) |
618 | return -EINVAL; | 596 | return -EINVAL; |
619 | 597 | ||
620 | status = msi_init(); | 598 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); |
621 | if (status < 0) | 599 | if (status) |
622 | return status; | 600 | return status; |
623 | 601 | ||
624 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 602 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
625 | if (!pos) | ||
626 | return -EINVAL; | ||
627 | |||
628 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 603 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
629 | nr_entries = multi_msix_capable(control); | 604 | nr_entries = multi_msix_capable(control); |
630 | if (nvec > nr_entries) | 605 | if (nvec > nr_entries) |
@@ -651,41 +626,25 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
651 | status = msix_capability_init(dev, entries, nvec); | 626 | status = msix_capability_init(dev, entries, nvec); |
652 | return status; | 627 | return status; |
653 | } | 628 | } |
629 | EXPORT_SYMBOL(pci_enable_msix); | ||
654 | 630 | ||
655 | void pci_disable_msix(struct pci_dev* dev) | 631 | static void msix_free_all_irqs(struct pci_dev *dev) |
656 | { | 632 | { |
657 | int irq, head, tail = 0, warning = 0; | 633 | msi_free_irqs(dev); |
658 | 634 | } | |
659 | if (!pci_msi_enable) | ||
660 | return; | ||
661 | if (!dev) | ||
662 | return; | ||
663 | 635 | ||
664 | if (!dev->msix_enabled) | 636 | void pci_disable_msix(struct pci_dev* dev) |
637 | { | ||
638 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | ||
665 | return; | 639 | return; |
666 | 640 | ||
667 | msix_set_enable(dev, 0); | 641 | msix_set_enable(dev, 0); |
668 | pci_intx(dev, 1); /* enable intx */ | 642 | pci_intx(dev, 1); /* enable intx */ |
669 | dev->msix_enabled = 0; | 643 | dev->msix_enabled = 0; |
670 | 644 | ||
671 | irq = head = dev->first_msi_irq; | 645 | msix_free_all_irqs(dev); |
672 | while (head != tail) { | ||
673 | tail = get_irq_msi(irq)->link.tail; | ||
674 | if (irq_has_action(irq)) | ||
675 | warning = 1; | ||
676 | else if (irq != head) /* Release MSI-X irq */ | ||
677 | msi_free_irq(dev, irq); | ||
678 | irq = tail; | ||
679 | } | ||
680 | msi_free_irq(dev, irq); | ||
681 | if (warning) { | ||
682 | printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " | ||
683 | "free_irq() on all MSI-X irqs\n", | ||
684 | pci_name(dev)); | ||
685 | BUG_ON(warning > 0); | ||
686 | } | ||
687 | dev->first_msi_irq = 0; | ||
688 | } | 646 | } |
647 | EXPORT_SYMBOL(pci_disable_msix); | ||
689 | 648 | ||
690 | /** | 649 | /** |
691 | * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state | 650 | * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state |
@@ -701,38 +660,11 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) | |||
701 | if (!pci_msi_enable || !dev) | 660 | if (!pci_msi_enable || !dev) |
702 | return; | 661 | return; |
703 | 662 | ||
704 | if (dev->msi_enabled) { | 663 | if (dev->msi_enabled) |
705 | if (irq_has_action(dev->first_msi_irq)) { | 664 | msi_free_irqs(dev); |
706 | printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " | 665 | |
707 | "called without free_irq() on MSI irq %d\n", | 666 | if (dev->msix_enabled) |
708 | pci_name(dev), dev->first_msi_irq); | 667 | msix_free_all_irqs(dev); |
709 | BUG_ON(irq_has_action(dev->first_msi_irq)); | ||
710 | } else /* Release MSI irq assigned to this device */ | ||
711 | msi_free_irq(dev, dev->first_msi_irq); | ||
712 | } | ||
713 | if (dev->msix_enabled) { | ||
714 | int irq, head, tail = 0, warning = 0; | ||
715 | void __iomem *base = NULL; | ||
716 | |||
717 | irq = head = dev->first_msi_irq; | ||
718 | while (head != tail) { | ||
719 | tail = get_irq_msi(irq)->link.tail; | ||
720 | base = get_irq_msi(irq)->mask_base; | ||
721 | if (irq_has_action(irq)) | ||
722 | warning = 1; | ||
723 | else if (irq != head) /* Release MSI-X irq */ | ||
724 | msi_free_irq(dev, irq); | ||
725 | irq = tail; | ||
726 | } | ||
727 | msi_free_irq(dev, irq); | ||
728 | if (warning) { | ||
729 | iounmap(base); | ||
730 | printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " | ||
731 | "called without free_irq() on all MSI-X irqs\n", | ||
732 | pci_name(dev)); | ||
733 | BUG_ON(warning > 0); | ||
734 | } | ||
735 | } | ||
736 | } | 668 | } |
737 | 669 | ||
738 | void pci_no_msi(void) | 670 | void pci_no_msi(void) |
@@ -740,7 +672,53 @@ void pci_no_msi(void) | |||
740 | pci_msi_enable = 0; | 672 | pci_msi_enable = 0; |
741 | } | 673 | } |
742 | 674 | ||
743 | EXPORT_SYMBOL(pci_enable_msi); | 675 | void pci_msi_init_pci_dev(struct pci_dev *dev) |
744 | EXPORT_SYMBOL(pci_disable_msi); | 676 | { |
745 | EXPORT_SYMBOL(pci_enable_msix); | 677 | INIT_LIST_HEAD(&dev->msi_list); |
746 | EXPORT_SYMBOL(pci_disable_msix); | 678 | } |
679 | |||
680 | |||
681 | /* Arch hooks */ | ||
682 | |||
683 | int __attribute__ ((weak)) | ||
684 | arch_msi_check_device(struct pci_dev* dev, int nvec, int type) | ||
685 | { | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | int __attribute__ ((weak)) | ||
690 | arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) | ||
691 | { | ||
692 | return 0; | ||
693 | } | ||
694 | |||
695 | int __attribute__ ((weak)) | ||
696 | arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
697 | { | ||
698 | struct msi_desc *entry; | ||
699 | int ret; | ||
700 | |||
701 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
702 | ret = arch_setup_msi_irq(dev, entry); | ||
703 | if (ret) | ||
704 | return ret; | ||
705 | } | ||
706 | |||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) | ||
711 | { | ||
712 | return; | ||
713 | } | ||
714 | |||
715 | void __attribute__ ((weak)) | ||
716 | arch_teardown_msi_irqs(struct pci_dev *dev) | ||
717 | { | ||
718 | struct msi_desc *entry; | ||
719 | |||
720 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
721 | if (entry->irq != 0) | ||
722 | arch_teardown_msi_irq(entry->irq); | ||
723 | } | ||
724 | } | ||