diff options
| -rw-r--r-- | Documentation/DMA-API.txt | 49 | ||||
| -rw-r--r-- | Documentation/DMA-mapping.txt | 22 | ||||
| -rw-r--r-- | arch/i386/pci/irq.c | 3 | ||||
| -rw-r--r-- | drivers/pci/hotplug/rpaphp_core.c | 3 | ||||
| -rw-r--r-- | drivers/pci/msi.c | 227 | ||||
| -rw-r--r-- | drivers/pci/pci.c | 6 | ||||
| -rw-r--r-- | drivers/pci/pci.h | 11 | ||||
| -rw-r--r-- | drivers/pci/quirks.c | 4 | ||||
| -rw-r--r-- | include/linux/pci.h | 33 | ||||
| -rw-r--r-- | include/linux/pci_ids.h | 3 | ||||
| -rw-r--r-- | include/linux/pm_legacy.h | 7 | ||||
| -rw-r--r-- | kernel/power/pm.c | 20 |
12 files changed, 308 insertions, 80 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 1af0f2d50220..2ffb0d62f0fe 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
| @@ -33,7 +33,9 @@ pci_alloc_consistent(struct pci_dev *dev, size_t size, | |||
| 33 | 33 | ||
| 34 | Consistent memory is memory for which a write by either the device or | 34 | Consistent memory is memory for which a write by either the device or |
| 35 | the processor can immediately be read by the processor or device | 35 | the processor can immediately be read by the processor or device |
| 36 | without having to worry about caching effects. | 36 | without having to worry about caching effects. (You may however need |
| 37 | to make sure to flush the processor's write buffers before telling | ||
| 38 | devices to read that memory.) | ||
| 37 | 39 | ||
| 38 | This routine allocates a region of <size> bytes of consistent memory. | 40 | This routine allocates a region of <size> bytes of consistent memory. |
| 39 | it also returns a <dma_handle> which may be cast to an unsigned | 41 | it also returns a <dma_handle> which may be cast to an unsigned |
| @@ -304,12 +306,12 @@ dma address with dma_mapping_error(). A non zero return value means the mapping | |||
| 304 | could not be created and the driver should take appropriate action (eg | 306 | could not be created and the driver should take appropriate action (eg |
| 305 | reduce current DMA mapping usage or delay and try again later). | 307 | reduce current DMA mapping usage or delay and try again later). |
| 306 | 308 | ||
| 307 | int | 309 | int |
| 308 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 310 | dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 309 | enum dma_data_direction direction) | 311 | int nents, enum dma_data_direction direction) |
| 310 | int | 312 | int |
| 311 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 313 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, |
| 312 | int nents, int direction) | 314 | int nents, int direction) |
| 313 | 315 | ||
| 314 | Maps a scatter gather list from the block layer. | 316 | Maps a scatter gather list from the block layer. |
| 315 | 317 | ||
| @@ -327,12 +329,33 @@ critical that the driver do something, in the case of a block driver | |||
| 327 | aborting the request or even oopsing is better than doing nothing and | 329 | aborting the request or even oopsing is better than doing nothing and |
| 328 | corrupting the filesystem. | 330 | corrupting the filesystem. |
| 329 | 331 | ||
| 330 | void | 332 | With scatterlists, you use the resulting mapping like this: |
| 331 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 333 | |
| 332 | enum dma_data_direction direction) | 334 | int i, count = dma_map_sg(dev, sglist, nents, direction); |
| 333 | void | 335 | struct scatterlist *sg; |
| 334 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 336 | |
| 335 | int nents, int direction) | 337 | for (i = 0, sg = sglist; i < count; i++, sg++) { |
| 338 | hw_address[i] = sg_dma_address(sg); | ||
| 339 | hw_len[i] = sg_dma_len(sg); | ||
| 340 | } | ||
| 341 | |||
| 342 | where nents is the number of entries in the sglist. | ||
| 343 | |||
| 344 | The implementation is free to merge several consecutive sglist entries | ||
| 345 | into one (e.g. with an IOMMU, or if several pages just happen to be | ||
| 346 | physically contiguous) and returns the actual number of sg entries it | ||
| 347 | mapped them to. On failure 0, is returned. | ||
| 348 | |||
| 349 | Then you should loop count times (note: this can be less than nents times) | ||
| 350 | and use sg_dma_address() and sg_dma_len() macros where you previously | ||
| 351 | accessed sg->address and sg->length as shown above. | ||
| 352 | |||
| 353 | void | ||
| 354 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
| 355 | int nhwentries, enum dma_data_direction direction) | ||
| 356 | void | ||
| 357 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
| 358 | int nents, int direction) | ||
| 336 | 359 | ||
| 337 | unmap the previously mapped scatter/gather list. All the parameters | 360 | unmap the previously mapped scatter/gather list. All the parameters |
| 338 | must be the same as those and passed in to the scatter/gather mapping | 361 | must be the same as those and passed in to the scatter/gather mapping |
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt index 10bf4deb96aa..7c717699032c 100644 --- a/Documentation/DMA-mapping.txt +++ b/Documentation/DMA-mapping.txt | |||
| @@ -58,11 +58,15 @@ translating each of those pages back to a kernel address using | |||
| 58 | something like __va(). [ EDIT: Update this when we integrate | 58 | something like __va(). [ EDIT: Update this when we integrate |
| 59 | Gerd Knorr's generic code which does this. ] | 59 | Gerd Knorr's generic code which does this. ] |
| 60 | 60 | ||
| 61 | This rule also means that you may not use kernel image addresses | 61 | This rule also means that you may use neither kernel image addresses |
| 62 | (ie. items in the kernel's data/text/bss segment, or your driver's) | 62 | (items in data/text/bss segments), nor module image addresses, nor |
| 63 | nor may you use kernel stack addresses for DMA. Both of these items | 63 | stack addresses for DMA. These could all be mapped somewhere entirely |
| 64 | might be mapped somewhere entirely different than the rest of physical | 64 | different than the rest of physical memory. Even if those classes of |
| 65 | memory. | 65 | memory could physically work with DMA, you'd need to ensure the I/O |
| 66 | buffers were cacheline-aligned. Without that, you'd see cacheline | ||
| 67 | sharing problems (data corruption) on CPUs with DMA-incoherent caches. | ||
| 68 | (The CPU could write to one word, DMA would write to a different one | ||
| 69 | in the same cache line, and one of them could be overwritten.) | ||
| 66 | 70 | ||
| 67 | Also, this means that you cannot take the return of a kmap() | 71 | Also, this means that you cannot take the return of a kmap() |
| 68 | call and DMA to/from that. This is similar to vmalloc(). | 72 | call and DMA to/from that. This is similar to vmalloc(). |
| @@ -284,6 +288,11 @@ There are two types of DMA mappings: | |||
| 284 | 288 | ||
| 285 | in order to get correct behavior on all platforms. | 289 | in order to get correct behavior on all platforms. |
| 286 | 290 | ||
| 291 | Also, on some platforms your driver may need to flush CPU write | ||
| 292 | buffers in much the same way as it needs to flush write buffers | ||
| 293 | found in PCI bridges (such as by reading a register's value | ||
| 294 | after writing it). | ||
| 295 | |||
| 287 | - Streaming DMA mappings which are usually mapped for one DMA transfer, | 296 | - Streaming DMA mappings which are usually mapped for one DMA transfer, |
| 288 | unmapped right after it (unless you use pci_dma_sync_* below) and for which | 297 | unmapped right after it (unless you use pci_dma_sync_* below) and for which |
| 289 | hardware can optimize for sequential accesses. | 298 | hardware can optimize for sequential accesses. |
| @@ -303,6 +312,9 @@ There are two types of DMA mappings: | |||
| 303 | 312 | ||
| 304 | Neither type of DMA mapping has alignment restrictions that come | 313 | Neither type of DMA mapping has alignment restrictions that come |
| 305 | from PCI, although some devices may have such restrictions. | 314 | from PCI, although some devices may have such restrictions. |
| 315 | Also, systems with caches that aren't DMA-coherent will work better | ||
| 316 | when the underlying buffers don't share cache lines with other data. | ||
| 317 | |||
| 306 | 318 | ||
| 307 | Using Consistent DMA mappings. | 319 | Using Consistent DMA mappings. |
| 308 | 320 | ||
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index 3ca59cad05f3..73235443fda7 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c | |||
| @@ -588,7 +588,10 @@ static __init int via_router_probe(struct irq_router *r, | |||
| 588 | case PCI_DEVICE_ID_VIA_82C596: | 588 | case PCI_DEVICE_ID_VIA_82C596: |
| 589 | case PCI_DEVICE_ID_VIA_82C686: | 589 | case PCI_DEVICE_ID_VIA_82C686: |
| 590 | case PCI_DEVICE_ID_VIA_8231: | 590 | case PCI_DEVICE_ID_VIA_8231: |
| 591 | case PCI_DEVICE_ID_VIA_8233A: | ||
| 591 | case PCI_DEVICE_ID_VIA_8235: | 592 | case PCI_DEVICE_ID_VIA_8235: |
| 593 | case PCI_DEVICE_ID_VIA_8237: | ||
| 594 | case PCI_DEVICE_ID_VIA_8237_SATA: | ||
| 592 | /* FIXME: add new ones for 8233/5 */ | 595 | /* FIXME: add new ones for 8233/5 */ |
| 593 | r->name = "VIA"; | 596 | r->name = "VIA"; |
| 594 | r->get = pirq_via_get; | 597 | r->get = pirq_via_get; |
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 6e79f5675b0d..638004546700 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c | |||
| @@ -360,9 +360,6 @@ static int __init rpaphp_init(void) | |||
| 360 | while ((dn = of_find_node_by_type(dn, "pci"))) | 360 | while ((dn = of_find_node_by_type(dn, "pci"))) |
| 361 | rpaphp_add_slot(dn); | 361 | rpaphp_add_slot(dn); |
| 362 | 362 | ||
| 363 | if (!num_slots) | ||
| 364 | return -ENODEV; | ||
| 365 | |||
| 366 | return 0; | 363 | return 0; |
| 367 | } | 364 | } |
| 368 | 365 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index a77e79c8c82e..2087a397ef16 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -504,6 +504,201 @@ void pci_scan_msi_device(struct pci_dev *dev) | |||
| 504 | nr_reserved_vectors++; | 504 | nr_reserved_vectors++; |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | #ifdef CONFIG_PM | ||
| 508 | int pci_save_msi_state(struct pci_dev *dev) | ||
| 509 | { | ||
| 510 | int pos, i = 0; | ||
| 511 | u16 control; | ||
| 512 | struct pci_cap_saved_state *save_state; | ||
| 513 | u32 *cap; | ||
| 514 | |||
| 515 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
| 516 | if (pos <= 0 || dev->no_msi) | ||
| 517 | return 0; | ||
| 518 | |||
| 519 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
| 520 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | ||
| 521 | return 0; | ||
| 522 | |||
| 523 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, | ||
| 524 | GFP_KERNEL); | ||
| 525 | if (!save_state) { | ||
| 526 | printk(KERN_ERR "Out of memory in pci_save_msi_state\n"); | ||
| 527 | return -ENOMEM; | ||
| 528 | } | ||
| 529 | cap = &save_state->data[0]; | ||
| 530 | |||
| 531 | pci_read_config_dword(dev, pos, &cap[i++]); | ||
| 532 | control = cap[0] >> 16; | ||
| 533 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]); | ||
| 534 | if (control & PCI_MSI_FLAGS_64BIT) { | ||
| 535 | pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]); | ||
| 536 | pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]); | ||
| 537 | } else | ||
| 538 | pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); | ||
| 539 | if (control & PCI_MSI_FLAGS_MASKBIT) | ||
| 540 | pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); | ||
| 541 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
| 542 | save_state->cap_nr = PCI_CAP_ID_MSI; | ||
| 543 | pci_add_saved_cap(dev, save_state); | ||
| 544 | return 0; | ||
| 545 | } | ||
| 546 | |||
| 547 | void pci_restore_msi_state(struct pci_dev *dev) | ||
| 548 | { | ||
| 549 | int i = 0, pos; | ||
| 550 | u16 control; | ||
| 551 | struct pci_cap_saved_state *save_state; | ||
| 552 | u32 *cap; | ||
| 553 | |||
| 554 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); | ||
| 555 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
| 556 | if (!save_state || pos <= 0) | ||
| 557 | return; | ||
| 558 | cap = &save_state->data[0]; | ||
| 559 | |||
| 560 | control = cap[i++] >> 16; | ||
| 561 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); | ||
| 562 | if (control & PCI_MSI_FLAGS_64BIT) { | ||
| 563 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); | ||
| 564 | pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]); | ||
| 565 | } else | ||
| 566 | pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]); | ||
| 567 | if (control & PCI_MSI_FLAGS_MASKBIT) | ||
| 568 | pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); | ||
| 569 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | ||
| 570 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
| 571 | pci_remove_saved_cap(save_state); | ||
| 572 | kfree(save_state); | ||
| 573 | } | ||
| 574 | |||
| 575 | int pci_save_msix_state(struct pci_dev *dev) | ||
| 576 | { | ||
| 577 | int pos; | ||
| 578 | u16 control; | ||
| 579 | struct pci_cap_saved_state *save_state; | ||
| 580 | |||
| 581 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
| 582 | if (pos <= 0 || dev->no_msi) | ||
| 583 | return 0; | ||
| 584 | |||
| 585 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
| 586 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | ||
| 587 | return 0; | ||
| 588 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), | ||
| 589 | GFP_KERNEL); | ||
| 590 | if (!save_state) { | ||
| 591 | printk(KERN_ERR "Out of memory in pci_save_msix_state\n"); | ||
| 592 | return -ENOMEM; | ||
| 593 | } | ||
| 594 | *((u16 *)&save_state->data[0]) = control; | ||
| 595 | |||
| 596 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
| 597 | save_state->cap_nr = PCI_CAP_ID_MSIX; | ||
| 598 | pci_add_saved_cap(dev, save_state); | ||
| 599 | return 0; | ||
| 600 | } | ||
| 601 | |||
| 602 | void pci_restore_msix_state(struct pci_dev *dev) | ||
| 603 | { | ||
| 604 | u16 save; | ||
| 605 | int pos; | ||
| 606 | int vector, head, tail = 0; | ||
| 607 | void __iomem *base; | ||
| 608 | int j; | ||
| 609 | struct msg_address address; | ||
| 610 | struct msg_data data; | ||
| 611 | struct msi_desc *entry; | ||
| 612 | int temp; | ||
| 613 | struct pci_cap_saved_state *save_state; | ||
| 614 | |||
| 615 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX); | ||
| 616 | if (!save_state) | ||
| 617 | return; | ||
| 618 | save = *((u16 *)&save_state->data[0]); | ||
| 619 | pci_remove_saved_cap(save_state); | ||
| 620 | kfree(save_state); | ||
| 621 | |||
| 622 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
| 623 | if (pos <= 0) | ||
| 624 | return; | ||
| 625 | |||
| 626 | /* route the table */ | ||
| 627 | temp = dev->irq; | ||
| 628 | if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) | ||
| 629 | return; | ||
| 630 | vector = head = dev->irq; | ||
| 631 | while (head != tail) { | ||
| 632 | entry = msi_desc[vector]; | ||
| 633 | base = entry->mask_base; | ||
| 634 | j = entry->msi_attrib.entry_nr; | ||
| 635 | |||
| 636 | msi_address_init(&address); | ||
| 637 | msi_data_init(&data, vector); | ||
| 638 | |||
| 639 | address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; | ||
| 640 | address.lo_address.value |= entry->msi_attrib.current_cpu << | ||
| 641 | MSI_TARGET_CPU_SHIFT; | ||
| 642 | |||
| 643 | writel(address.lo_address.value, | ||
| 644 | base + j * PCI_MSIX_ENTRY_SIZE + | ||
| 645 | PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | ||
| 646 | writel(address.hi_address, | ||
| 647 | base + j * PCI_MSIX_ENTRY_SIZE + | ||
| 648 | PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | ||
| 649 | writel(*(u32*)&data, | ||
| 650 | base + j * PCI_MSIX_ENTRY_SIZE + | ||
| 651 | PCI_MSIX_ENTRY_DATA_OFFSET); | ||
| 652 | |||
| 653 | tail = msi_desc[vector]->link.tail; | ||
| 654 | vector = tail; | ||
| 655 | } | ||
| 656 | dev->irq = temp; | ||
| 657 | |||
| 658 | pci_write_config_word(dev, msi_control_reg(pos), save); | ||
| 659 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
| 660 | } | ||
| 661 | #endif | ||
| 662 | |||
| 663 | static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) | ||
| 664 | { | ||
| 665 | struct msg_address address; | ||
| 666 | struct msg_data data; | ||
| 667 | int pos, vector = dev->irq; | ||
| 668 | u16 control; | ||
| 669 | |||
| 670 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
| 671 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
| 672 | /* Configure MSI capability structure */ | ||
| 673 | msi_address_init(&address); | ||
| 674 | msi_data_init(&data, vector); | ||
| 675 | entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> | ||
| 676 | MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); | ||
| 677 | pci_write_config_dword(dev, msi_lower_address_reg(pos), | ||
| 678 | address.lo_address.value); | ||
| 679 | if (is_64bit_address(control)) { | ||
| 680 | pci_write_config_dword(dev, | ||
| 681 | msi_upper_address_reg(pos), address.hi_address); | ||
| 682 | pci_write_config_word(dev, | ||
| 683 | msi_data_reg(pos, 1), *((u32*)&data)); | ||
| 684 | } else | ||
| 685 | pci_write_config_word(dev, | ||
| 686 | msi_data_reg(pos, 0), *((u32*)&data)); | ||
| 687 | if (entry->msi_attrib.maskbit) { | ||
| 688 | unsigned int maskbits, temp; | ||
| 689 | /* All MSIs are unmasked by default, Mask them all */ | ||
| 690 | pci_read_config_dword(dev, | ||
| 691 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
| 692 | &maskbits); | ||
| 693 | temp = (1 << multi_msi_capable(control)); | ||
| 694 | temp = ((temp - 1) & ~temp); | ||
| 695 | maskbits |= temp; | ||
| 696 | pci_write_config_dword(dev, | ||
| 697 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
| 698 | maskbits); | ||
| 699 | } | ||
| 700 | } | ||
| 701 | |||
| 507 | /** | 702 | /** |
| 508 | * msi_capability_init - configure device's MSI capability structure | 703 | * msi_capability_init - configure device's MSI capability structure |
| 509 | * @dev: pointer to the pci_dev data structure of MSI device function | 704 | * @dev: pointer to the pci_dev data structure of MSI device function |
| @@ -516,8 +711,6 @@ void pci_scan_msi_device(struct pci_dev *dev) | |||
| 516 | static int msi_capability_init(struct pci_dev *dev) | 711 | static int msi_capability_init(struct pci_dev *dev) |
| 517 | { | 712 | { |
| 518 | struct msi_desc *entry; | 713 | struct msi_desc *entry; |
| 519 | struct msg_address address; | ||
| 520 | struct msg_data data; | ||
| 521 | int pos, vector; | 714 | int pos, vector; |
| 522 | u16 control; | 715 | u16 control; |
| 523 | 716 | ||
| @@ -549,33 +742,8 @@ static int msi_capability_init(struct pci_dev *dev) | |||
| 549 | /* Replace with MSI handler */ | 742 | /* Replace with MSI handler */ |
| 550 | irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); | 743 | irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); |
| 551 | /* Configure MSI capability structure */ | 744 | /* Configure MSI capability structure */ |
| 552 | msi_address_init(&address); | 745 | msi_register_init(dev, entry); |
| 553 | msi_data_init(&data, vector); | 746 | |
| 554 | entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> | ||
| 555 | MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); | ||
| 556 | pci_write_config_dword(dev, msi_lower_address_reg(pos), | ||
| 557 | address.lo_address.value); | ||
| 558 | if (is_64bit_address(control)) { | ||
| 559 | pci_write_config_dword(dev, | ||
| 560 | msi_upper_address_reg(pos), address.hi_address); | ||
| 561 | pci_write_config_word(dev, | ||
| 562 | msi_data_reg(pos, 1), *((u32*)&data)); | ||
| 563 | } else | ||
| 564 | pci_write_config_word(dev, | ||
| 565 | msi_data_reg(pos, 0), *((u32*)&data)); | ||
| 566 | if (entry->msi_attrib.maskbit) { | ||
| 567 | unsigned int maskbits, temp; | ||
| 568 | /* All MSIs are unmasked by default, Mask them all */ | ||
| 569 | pci_read_config_dword(dev, | ||
| 570 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
| 571 | &maskbits); | ||
| 572 | temp = (1 << multi_msi_capable(control)); | ||
| 573 | temp = ((temp - 1) & ~temp); | ||
| 574 | maskbits |= temp; | ||
| 575 | pci_write_config_dword(dev, | ||
| 576 | msi_mask_bits_reg(pos, is_64bit_address(control)), | ||
| 577 | maskbits); | ||
| 578 | } | ||
| 579 | attach_msi_entry(entry, vector); | 747 | attach_msi_entry(entry, vector); |
| 580 | /* Set MSI enabled bits */ | 748 | /* Set MSI enabled bits */ |
| 581 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | 749 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); |
| @@ -731,6 +899,7 @@ int pci_enable_msi(struct pci_dev* dev) | |||
| 731 | vector_irq[dev->irq] = -1; | 899 | vector_irq[dev->irq] = -1; |
| 732 | nr_released_vectors--; | 900 | nr_released_vectors--; |
| 733 | spin_unlock_irqrestore(&msi_lock, flags); | 901 | spin_unlock_irqrestore(&msi_lock, flags); |
| 902 | msi_register_init(dev, msi_desc[dev->irq]); | ||
| 734 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | 903 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); |
| 735 | return 0; | 904 | return 0; |
| 736 | } | 905 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 042fa5265cf6..2329f941a0dc 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -446,6 +446,10 @@ pci_save_state(struct pci_dev *dev) | |||
| 446 | /* XXX: 100% dword access ok here? */ | 446 | /* XXX: 100% dword access ok here? */ |
| 447 | for (i = 0; i < 16; i++) | 447 | for (i = 0; i < 16; i++) |
| 448 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); | 448 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); |
| 449 | if ((i = pci_save_msi_state(dev)) != 0) | ||
| 450 | return i; | ||
| 451 | if ((i = pci_save_msix_state(dev)) != 0) | ||
| 452 | return i; | ||
| 449 | return 0; | 453 | return 0; |
| 450 | } | 454 | } |
| 451 | 455 | ||
| @@ -460,6 +464,8 @@ pci_restore_state(struct pci_dev *dev) | |||
| 460 | 464 | ||
| 461 | for (i = 0; i < 16; i++) | 465 | for (i = 0; i < 16; i++) |
| 462 | pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); | 466 | pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); |
| 467 | pci_restore_msi_state(dev); | ||
| 468 | pci_restore_msix_state(dev); | ||
| 463 | return 0; | 469 | return 0; |
| 464 | } | 470 | } |
| 465 | 471 | ||
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 8f3fb47ea671..30630cbe2fe3 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -55,6 +55,17 @@ void pci_no_msi(void); | |||
| 55 | static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } | 55 | static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } |
| 56 | static inline void pci_no_msi(void) { } | 56 | static inline void pci_no_msi(void) { } |
| 57 | #endif | 57 | #endif |
| 58 | #if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM) | ||
| 59 | int pci_save_msi_state(struct pci_dev *dev); | ||
| 60 | int pci_save_msix_state(struct pci_dev *dev); | ||
| 61 | void pci_restore_msi_state(struct pci_dev *dev); | ||
| 62 | void pci_restore_msix_state(struct pci_dev *dev); | ||
| 63 | #else | ||
| 64 | static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; } | ||
| 65 | static inline int pci_save_msix_state(struct pci_dev *dev) { return 0; } | ||
| 66 | static inline void pci_restore_msi_state(struct pci_dev *dev) {} | ||
| 67 | static inline void pci_restore_msix_state(struct pci_dev *dev) {} | ||
| 68 | #endif | ||
| 58 | 69 | ||
| 59 | extern int pcie_mch_quirk; | 70 | extern int pcie_mch_quirk; |
| 60 | extern struct device_attribute pci_dev_attrs[]; | 71 | extern struct device_attribute pci_dev_attrs[]; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4970f47be72c..827550d25c9e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -592,7 +592,7 @@ static void __init quirk_amd_8131_ioapic(struct pci_dev *dev) | |||
| 592 | pci_write_config_byte( dev, AMD8131_MISC, tmp); | 592 | pci_write_config_byte( dev, AMD8131_MISC, tmp); |
| 593 | } | 593 | } |
| 594 | } | 594 | } |
| 595 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC, quirk_amd_8131_ioapic ); | 595 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); |
| 596 | 596 | ||
| 597 | static void __init quirk_svw_msi(struct pci_dev *dev) | 597 | static void __init quirk_svw_msi(struct pci_dev *dev) |
| 598 | { | 598 | { |
| @@ -921,6 +921,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | |||
| 921 | if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { | 921 | if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { |
| 922 | switch (dev->subsystem_device) { | 922 | switch (dev->subsystem_device) { |
| 923 | case 0x1882: /* M6V notebook */ | 923 | case 0x1882: /* M6V notebook */ |
| 924 | case 0x1977: /* A6VA notebook */ | ||
| 924 | asus_hides_smbus = 1; | 925 | asus_hides_smbus = 1; |
| 925 | } | 926 | } |
| 926 | } | 927 | } |
| @@ -999,6 +1000,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asu | |||
| 999 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); | 1000 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); |
| 1000 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); | 1001 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); |
| 1001 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); | 1002 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); |
| 1003 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc ); | ||
| 1002 | 1004 | ||
| 1003 | static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) | 1005 | static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) |
| 1004 | { | 1006 | { |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 0aad5a378e95..3a6a4e37a482 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -97,7 +97,13 @@ enum pci_channel_state { | |||
| 97 | 97 | ||
| 98 | typedef unsigned short __bitwise pci_bus_flags_t; | 98 | typedef unsigned short __bitwise pci_bus_flags_t; |
| 99 | enum pci_bus_flags { | 99 | enum pci_bus_flags { |
| 100 | PCI_BUS_FLAGS_NO_MSI = (pci_bus_flags_t) 1, | 100 | PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, |
| 101 | }; | ||
| 102 | |||
| 103 | struct pci_cap_saved_state { | ||
| 104 | struct hlist_node next; | ||
| 105 | char cap_nr; | ||
| 106 | u32 data[0]; | ||
| 101 | }; | 107 | }; |
| 102 | 108 | ||
| 103 | /* | 109 | /* |
| @@ -159,6 +165,7 @@ struct pci_dev { | |||
| 159 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ | 165 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ |
| 160 | 166 | ||
| 161 | u32 saved_config_space[16]; /* config space saved at suspend time */ | 167 | u32 saved_config_space[16]; /* config space saved at suspend time */ |
| 168 | struct hlist_head saved_cap_space; | ||
| 162 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ | 169 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ |
| 163 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ | 170 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ |
| 164 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ | 171 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ |
| @@ -169,6 +176,30 @@ struct pci_dev { | |||
| 169 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) | 176 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) |
| 170 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) | 177 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) |
| 171 | 178 | ||
| 179 | static inline struct pci_cap_saved_state *pci_find_saved_cap( | ||
| 180 | struct pci_dev *pci_dev,char cap) | ||
| 181 | { | ||
| 182 | struct pci_cap_saved_state *tmp; | ||
| 183 | struct hlist_node *pos; | ||
| 184 | |||
| 185 | hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { | ||
| 186 | if (tmp->cap_nr == cap) | ||
| 187 | return tmp; | ||
| 188 | } | ||
| 189 | return NULL; | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline void pci_add_saved_cap(struct pci_dev *pci_dev, | ||
| 193 | struct pci_cap_saved_state *new_cap) | ||
| 194 | { | ||
| 195 | hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); | ||
| 196 | } | ||
| 197 | |||
| 198 | static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap) | ||
| 199 | { | ||
| 200 | hlist_del(&cap->next); | ||
| 201 | } | ||
| 202 | |||
| 172 | /* | 203 | /* |
| 173 | * For PCI devices, the region numbers are assigned this way: | 204 | * For PCI devices, the region numbers are assigned this way: |
| 174 | * | 205 | * |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 870fe38378b1..8d03e10212f5 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -497,7 +497,8 @@ | |||
| 497 | #define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b | 497 | #define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b |
| 498 | #define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d | 498 | #define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d |
| 499 | #define PCI_DEVICE_ID_AMD_8151_0 0x7454 | 499 | #define PCI_DEVICE_ID_AMD_8151_0 0x7454 |
| 500 | #define PCI_DEVICE_ID_AMD_8131_APIC 0x7450 | 500 | #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 |
| 501 | #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 | ||
| 501 | #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 | 502 | #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 |
| 502 | #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 | 503 | #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 |
| 503 | #define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 | 504 | #define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 |
diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h index 1252b45face1..008932d73c35 100644 --- a/include/linux/pm_legacy.h +++ b/include/linux/pm_legacy.h | |||
| @@ -16,11 +16,6 @@ struct pm_dev __deprecated * | |||
| 16 | pm_register(pm_dev_t type, unsigned long id, pm_callback callback); | 16 | pm_register(pm_dev_t type, unsigned long id, pm_callback callback); |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * Unregister a device with power management | ||
| 20 | */ | ||
| 21 | void __deprecated pm_unregister(struct pm_dev *dev); | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Unregister all devices with matching callback | 19 | * Unregister all devices with matching callback |
| 25 | */ | 20 | */ |
| 26 | void __deprecated pm_unregister_all(pm_callback callback); | 21 | void __deprecated pm_unregister_all(pm_callback callback); |
| @@ -41,8 +36,6 @@ static inline struct pm_dev *pm_register(pm_dev_t type, | |||
| 41 | return NULL; | 36 | return NULL; |
| 42 | } | 37 | } |
| 43 | 38 | ||
| 44 | static inline void pm_unregister(struct pm_dev *dev) {} | ||
| 45 | |||
| 46 | static inline void pm_unregister_all(pm_callback callback) {} | 39 | static inline void pm_unregister_all(pm_callback callback) {} |
| 47 | 40 | ||
| 48 | static inline int pm_send_all(pm_request_t rqst, void *data) | 41 | static inline int pm_send_all(pm_request_t rqst, void *data) |
diff --git a/kernel/power/pm.c b/kernel/power/pm.c index 0f6908cce1dd..84063ac8fcfc 100644 --- a/kernel/power/pm.c +++ b/kernel/power/pm.c | |||
| @@ -75,25 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type, | |||
| 75 | return dev; | 75 | return dev; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | /** | ||
| 79 | * pm_unregister - unregister a device with power management | ||
| 80 | * @dev: device to unregister | ||
| 81 | * | ||
| 82 | * Remove a device from the power management notification lists. The | ||
| 83 | * dev passed must be a handle previously returned by pm_register. | ||
| 84 | */ | ||
| 85 | |||
| 86 | void pm_unregister(struct pm_dev *dev) | ||
| 87 | { | ||
| 88 | if (dev) { | ||
| 89 | mutex_lock(&pm_devs_lock); | ||
| 90 | list_del(&dev->entry); | ||
| 91 | mutex_unlock(&pm_devs_lock); | ||
| 92 | |||
| 93 | kfree(dev); | ||
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | static void __pm_unregister(struct pm_dev *dev) | 78 | static void __pm_unregister(struct pm_dev *dev) |
| 98 | { | 79 | { |
| 99 | if (dev) { | 80 | if (dev) { |
| @@ -258,7 +239,6 @@ int pm_send_all(pm_request_t rqst, void *data) | |||
| 258 | } | 239 | } |
| 259 | 240 | ||
| 260 | EXPORT_SYMBOL(pm_register); | 241 | EXPORT_SYMBOL(pm_register); |
| 261 | EXPORT_SYMBOL(pm_unregister); | ||
| 262 | EXPORT_SYMBOL(pm_unregister_all); | 242 | EXPORT_SYMBOL(pm_unregister_all); |
| 263 | EXPORT_SYMBOL(pm_send_all); | 243 | EXPORT_SYMBOL(pm_send_all); |
| 264 | EXPORT_SYMBOL(pm_active); | 244 | EXPORT_SYMBOL(pm_active); |
