diff options
Diffstat (limited to 'drivers/pci')
44 files changed, 1468 insertions, 1167 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index fdc864f9cf23..b1ecefa2a23d 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
| @@ -27,10 +27,10 @@ config PCI_LEGACY | |||
| 27 | default y | 27 | default y |
| 28 | help | 28 | help |
| 29 | Say Y here if you want to include support for the deprecated | 29 | Say Y here if you want to include support for the deprecated |
| 30 | pci_find_slot() and pci_find_device() APIs. Most drivers have | 30 | pci_find_device() API. Most drivers have been converted over |
| 31 | been converted over to using the proper hotplug APIs, so this | 31 | to using the proper hotplug APIs, so this option serves to |
| 32 | option serves to include/exclude only a few drivers that are | 32 | include/exclude only a few drivers that are still using this |
| 33 | still using this API. | 33 | API. |
| 34 | 34 | ||
| 35 | config PCI_DEBUG | 35 | config PCI_DEBUG |
| 36 | bool "PCI Debugging" | 36 | bool "PCI Debugging" |
| @@ -69,3 +69,10 @@ config PCI_IOV | |||
| 69 | physical resources. | 69 | physical resources. |
| 70 | 70 | ||
| 71 | If unsure, say N. | 71 | If unsure, say N. |
| 72 | |||
| 73 | config PCI_IOAPIC | ||
| 74 | bool | ||
| 75 | depends on PCI | ||
| 76 | depends on ACPI | ||
| 77 | depends on HOTPLUG | ||
| 78 | default y | ||
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 4a7f11d8f432..4df48d58eaa6 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
| @@ -14,6 +14,8 @@ CFLAGS_legacy.o += -Wno-deprecated-declarations | |||
| 14 | # Build PCI Express stuff if needed | 14 | # Build PCI Express stuff if needed |
| 15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 15 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
| 16 | 16 | ||
| 17 | obj-$(CONFIG_PCI_IOAPIC) += ioapic.o | ||
| 18 | |||
| 17 | obj-$(CONFIG_HOTPLUG) += hotplug.o | 19 | obj-$(CONFIG_HOTPLUG) += hotplug.o |
| 18 | 20 | ||
| 19 | # Build the PCI Hotplug drivers if we were asked to | 21 | # Build the PCI Hotplug drivers if we were asked to |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 14bbaa17e2ca..83aae4747594 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
| @@ -175,15 +175,6 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
| 175 | int ret = 0; | 175 | int ret = 0; |
| 176 | 176 | ||
| 177 | drhd = (struct acpi_dmar_hardware_unit *)header; | 177 | drhd = (struct acpi_dmar_hardware_unit *)header; |
| 178 | if (!drhd->address) { | ||
| 179 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
| 180 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
| 181 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 182 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 183 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 184 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 185 | return -ENODEV; | ||
| 186 | } | ||
| 187 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | 178 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); |
| 188 | if (!dmaru) | 179 | if (!dmaru) |
| 189 | return -ENOMEM; | 180 | return -ENOMEM; |
| @@ -329,7 +320,7 @@ found: | |||
| 329 | for (bus = dev->bus; bus; bus = bus->parent) { | 320 | for (bus = dev->bus; bus; bus = bus->parent) { |
| 330 | struct pci_dev *bridge = bus->self; | 321 | struct pci_dev *bridge = bus->self; |
| 331 | 322 | ||
| 332 | if (!bridge || !bridge->is_pcie || | 323 | if (!bridge || !pci_is_pcie(bridge) || |
| 333 | bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) | 324 | bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) |
| 334 | return 0; | 325 | return 0; |
| 335 | 326 | ||
| @@ -348,12 +339,42 @@ found: | |||
| 348 | } | 339 | } |
| 349 | #endif | 340 | #endif |
| 350 | 341 | ||
| 342 | #ifdef CONFIG_ACPI_NUMA | ||
| 343 | static int __init | ||
| 344 | dmar_parse_one_rhsa(struct acpi_dmar_header *header) | ||
| 345 | { | ||
| 346 | struct acpi_dmar_rhsa *rhsa; | ||
| 347 | struct dmar_drhd_unit *drhd; | ||
| 348 | |||
| 349 | rhsa = (struct acpi_dmar_rhsa *)header; | ||
| 350 | for_each_drhd_unit(drhd) { | ||
| 351 | if (drhd->reg_base_addr == rhsa->base_address) { | ||
| 352 | int node = acpi_map_pxm_to_node(rhsa->proximity_domain); | ||
| 353 | |||
| 354 | if (!node_online(node)) | ||
| 355 | node = -1; | ||
| 356 | drhd->iommu->node = node; | ||
| 357 | return 0; | ||
| 358 | } | ||
| 359 | } | ||
| 360 | WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" | ||
| 361 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 362 | drhd->reg_base_addr, | ||
| 363 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 364 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 365 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 366 | |||
| 367 | return 0; | ||
| 368 | } | ||
| 369 | #endif | ||
| 370 | |||
| 351 | static void __init | 371 | static void __init |
| 352 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | 372 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
| 353 | { | 373 | { |
| 354 | struct acpi_dmar_hardware_unit *drhd; | 374 | struct acpi_dmar_hardware_unit *drhd; |
| 355 | struct acpi_dmar_reserved_memory *rmrr; | 375 | struct acpi_dmar_reserved_memory *rmrr; |
| 356 | struct acpi_dmar_atsr *atsr; | 376 | struct acpi_dmar_atsr *atsr; |
| 377 | struct acpi_dmar_rhsa *rhsa; | ||
| 357 | 378 | ||
| 358 | switch (header->type) { | 379 | switch (header->type) { |
| 359 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | 380 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
| @@ -375,6 +396,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | |||
| 375 | atsr = container_of(header, struct acpi_dmar_atsr, header); | 396 | atsr = container_of(header, struct acpi_dmar_atsr, header); |
| 376 | printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); | 397 | printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); |
| 377 | break; | 398 | break; |
| 399 | case ACPI_DMAR_HARDWARE_AFFINITY: | ||
| 400 | rhsa = container_of(header, struct acpi_dmar_rhsa, header); | ||
| 401 | printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", | ||
| 402 | (unsigned long long)rhsa->base_address, | ||
| 403 | rhsa->proximity_domain); | ||
| 404 | break; | ||
| 378 | } | 405 | } |
| 379 | } | 406 | } |
| 380 | 407 | ||
| @@ -459,9 +486,15 @@ parse_dmar_table(void) | |||
| 459 | ret = dmar_parse_one_atsr(entry_header); | 486 | ret = dmar_parse_one_atsr(entry_header); |
| 460 | #endif | 487 | #endif |
| 461 | break; | 488 | break; |
| 489 | case ACPI_DMAR_HARDWARE_AFFINITY: | ||
| 490 | #ifdef CONFIG_ACPI_NUMA | ||
| 491 | ret = dmar_parse_one_rhsa(entry_header); | ||
| 492 | #endif | ||
| 493 | break; | ||
| 462 | default: | 494 | default: |
| 463 | printk(KERN_WARNING PREFIX | 495 | printk(KERN_WARNING PREFIX |
| 464 | "Unknown DMAR structure type\n"); | 496 | "Unknown DMAR structure type %d\n", |
| 497 | entry_header->type); | ||
| 465 | ret = 0; /* for forward compatibility */ | 498 | ret = 0; /* for forward compatibility */ |
| 466 | break; | 499 | break; |
| 467 | } | 500 | } |
| @@ -580,12 +613,81 @@ int __init dmar_table_init(void) | |||
| 580 | return 0; | 613 | return 0; |
| 581 | } | 614 | } |
| 582 | 615 | ||
| 616 | static int bios_warned; | ||
| 617 | |||
| 618 | int __init check_zero_address(void) | ||
| 619 | { | ||
| 620 | struct acpi_table_dmar *dmar; | ||
| 621 | struct acpi_dmar_header *entry_header; | ||
| 622 | struct acpi_dmar_hardware_unit *drhd; | ||
| 623 | |||
| 624 | dmar = (struct acpi_table_dmar *)dmar_tbl; | ||
| 625 | entry_header = (struct acpi_dmar_header *)(dmar + 1); | ||
| 626 | |||
| 627 | while (((unsigned long)entry_header) < | ||
| 628 | (((unsigned long)dmar) + dmar_tbl->length)) { | ||
| 629 | /* Avoid looping forever on bad ACPI tables */ | ||
| 630 | if (entry_header->length == 0) { | ||
| 631 | printk(KERN_WARNING PREFIX | ||
| 632 | "Invalid 0-length structure\n"); | ||
| 633 | return 0; | ||
| 634 | } | ||
| 635 | |||
| 636 | if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { | ||
| 637 | void __iomem *addr; | ||
| 638 | u64 cap, ecap; | ||
| 639 | |||
| 640 | drhd = (void *)entry_header; | ||
| 641 | if (!drhd->address) { | ||
| 642 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
| 643 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
| 644 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 645 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 646 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 647 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 648 | bios_warned = 1; | ||
| 649 | goto failed; | ||
| 650 | } | ||
| 651 | |||
| 652 | addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | ||
| 653 | if (!addr ) { | ||
| 654 | printk("IOMMU: can't validate: %llx\n", drhd->address); | ||
| 655 | goto failed; | ||
| 656 | } | ||
| 657 | cap = dmar_readq(addr + DMAR_CAP_REG); | ||
| 658 | ecap = dmar_readq(addr + DMAR_ECAP_REG); | ||
| 659 | early_iounmap(addr, VTD_PAGE_SIZE); | ||
| 660 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | ||
| 661 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
| 662 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | ||
| 663 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 664 | drhd->address, | ||
| 665 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 666 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 667 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 668 | bios_warned = 1; | ||
| 669 | goto failed; | ||
| 670 | } | ||
| 671 | } | ||
| 672 | |||
| 673 | entry_header = ((void *)entry_header + entry_header->length); | ||
| 674 | } | ||
| 675 | return 1; | ||
| 676 | |||
| 677 | failed: | ||
| 678 | #ifdef CONFIG_DMAR | ||
| 679 | dmar_disabled = 1; | ||
| 680 | #endif | ||
| 681 | return 0; | ||
| 682 | } | ||
| 683 | |||
| 583 | void __init detect_intel_iommu(void) | 684 | void __init detect_intel_iommu(void) |
| 584 | { | 685 | { |
| 585 | int ret; | 686 | int ret; |
| 586 | 687 | ||
| 587 | ret = dmar_table_detect(); | 688 | ret = dmar_table_detect(); |
| 588 | 689 | if (ret) | |
| 690 | ret = check_zero_address(); | ||
| 589 | { | 691 | { |
| 590 | #ifdef CONFIG_INTR_REMAP | 692 | #ifdef CONFIG_INTR_REMAP |
| 591 | struct acpi_table_dmar *dmar; | 693 | struct acpi_table_dmar *dmar; |
| @@ -602,9 +704,15 @@ void __init detect_intel_iommu(void) | |||
| 602 | "x2apic and Intr-remapping.\n"); | 704 | "x2apic and Intr-remapping.\n"); |
| 603 | #endif | 705 | #endif |
| 604 | #ifdef CONFIG_DMAR | 706 | #ifdef CONFIG_DMAR |
| 605 | if (ret && !no_iommu && !iommu_detected && !swiotlb && | 707 | if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { |
| 606 | !dmar_disabled) | ||
| 607 | iommu_detected = 1; | 708 | iommu_detected = 1; |
| 709 | /* Make sure ACS will be enabled */ | ||
| 710 | pci_request_acs(); | ||
| 711 | } | ||
| 712 | #endif | ||
| 713 | #ifdef CONFIG_X86 | ||
| 714 | if (ret) | ||
| 715 | x86_init.iommu.iommu_init = intel_iommu_init; | ||
| 608 | #endif | 716 | #endif |
| 609 | } | 717 | } |
| 610 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | 718 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); |
| @@ -621,6 +729,18 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 621 | int agaw = 0; | 729 | int agaw = 0; |
| 622 | int msagaw = 0; | 730 | int msagaw = 0; |
| 623 | 731 | ||
| 732 | if (!drhd->reg_base_addr) { | ||
| 733 | if (!bios_warned) { | ||
| 734 | WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
| 735 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 736 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 737 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 738 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 739 | bios_warned = 1; | ||
| 740 | } | ||
| 741 | return -EINVAL; | ||
| 742 | } | ||
| 743 | |||
| 624 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 744 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
| 625 | if (!iommu) | 745 | if (!iommu) |
| 626 | return -ENOMEM; | 746 | return -ENOMEM; |
| @@ -637,13 +757,16 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 637 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 757 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
| 638 | 758 | ||
| 639 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | 759 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { |
| 640 | /* Promote an attitude of violence to a BIOS engineer today */ | 760 | if (!bios_warned) { |
| 641 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | 761 | /* Promote an attitude of violence to a BIOS engineer today */ |
| 642 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 762 | WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" |
| 643 | drhd->reg_base_addr, | 763 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
| 644 | dmi_get_system_info(DMI_BIOS_VENDOR), | 764 | drhd->reg_base_addr, |
| 645 | dmi_get_system_info(DMI_BIOS_VERSION), | 765 | dmi_get_system_info(DMI_BIOS_VENDOR), |
| 646 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | 766 | dmi_get_system_info(DMI_BIOS_VERSION), |
| 767 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 768 | bios_warned = 1; | ||
| 769 | } | ||
| 647 | goto err_unmap; | 770 | goto err_unmap; |
| 648 | } | 771 | } |
| 649 | 772 | ||
| @@ -666,6 +789,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
| 666 | iommu->agaw = agaw; | 789 | iommu->agaw = agaw; |
| 667 | iommu->msagaw = msagaw; | 790 | iommu->msagaw = msagaw; |
| 668 | 791 | ||
| 792 | iommu->node = -1; | ||
| 793 | |||
| 669 | /* the registers might be more than one page */ | 794 | /* the registers might be more than one page */ |
| 670 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 795 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
| 671 | cap_max_fault_reg_offset(iommu->cap)); | 796 | cap_max_fault_reg_offset(iommu->cap)); |
| @@ -1007,6 +1132,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) | |||
| 1007 | int dmar_enable_qi(struct intel_iommu *iommu) | 1132 | int dmar_enable_qi(struct intel_iommu *iommu) |
| 1008 | { | 1133 | { |
| 1009 | struct q_inval *qi; | 1134 | struct q_inval *qi; |
| 1135 | struct page *desc_page; | ||
| 1010 | 1136 | ||
| 1011 | if (!ecap_qis(iommu->ecap)) | 1137 | if (!ecap_qis(iommu->ecap)) |
| 1012 | return -ENOENT; | 1138 | return -ENOENT; |
| @@ -1023,13 +1149,16 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
| 1023 | 1149 | ||
| 1024 | qi = iommu->qi; | 1150 | qi = iommu->qi; |
| 1025 | 1151 | ||
| 1026 | qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC)); | 1152 | |
| 1027 | if (!qi->desc) { | 1153 | desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); |
| 1154 | if (!desc_page) { | ||
| 1028 | kfree(qi); | 1155 | kfree(qi); |
| 1029 | iommu->qi = 0; | 1156 | iommu->qi = 0; |
| 1030 | return -ENOMEM; | 1157 | return -ENOMEM; |
| 1031 | } | 1158 | } |
| 1032 | 1159 | ||
| 1160 | qi->desc = page_address(desc_page); | ||
| 1161 | |||
| 1033 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); | 1162 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
| 1034 | if (!qi->desc_status) { | 1163 | if (!qi->desc_status) { |
| 1035 | free_page((unsigned long) qi->desc); | 1164 | free_page((unsigned long) qi->desc); |
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 3625b094bf7e..6cd9f3c9887d 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile | |||
| @@ -6,18 +6,22 @@ obj-$(CONFIG_HOTPLUG_PCI) += pci_hotplug.o | |||
| 6 | obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o | 6 | obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o |
| 7 | obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o | 7 | obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o |
| 8 | 8 | ||
| 9 | # pciehp should be linked before acpiphp in order to allow the native driver | 9 | # native drivers should be linked before acpiphp in order to allow the |
| 10 | # to attempt to bind first. We can then fall back to generic support. | 10 | # native driver to attempt to bind first. We can then fall back to |
| 11 | # generic support. | ||
| 11 | 12 | ||
| 12 | obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o | 13 | obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o |
| 13 | obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o | ||
| 14 | obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o | ||
| 15 | obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o | 14 | obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o |
| 16 | obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o | 15 | obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o |
| 17 | obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o | 16 | obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o |
| 18 | obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o | 17 | obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o |
| 19 | obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o | 18 | obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o |
| 20 | obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o | 19 | obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o |
| 20 | obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o | ||
| 21 | |||
| 22 | # acpiphp_ibm extends acpiphp, so should be linked afterwards. | ||
| 23 | |||
| 24 | obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o | ||
| 21 | 25 | ||
| 22 | # Link this last so it doesn't claim devices that have a real hotplug driver | 26 | # Link this last so it doesn't claim devices that have a real hotplug driver |
| 23 | obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o | 27 | obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o |
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index a73028ec52e5..3c76fc67cf0e 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c | |||
| @@ -362,6 +362,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
| 362 | status = acpi_pci_osc_control_set(handle, flags); | 362 | status = acpi_pci_osc_control_set(handle, flags); |
| 363 | if (ACPI_SUCCESS(status)) | 363 | if (ACPI_SUCCESS(status)) |
| 364 | goto got_one; | 364 | goto got_one; |
| 365 | if (status == AE_SUPPORT) | ||
| 366 | goto no_control; | ||
| 365 | kfree(string.pointer); | 367 | kfree(string.pointer); |
| 366 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; | 368 | string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; |
| 367 | } | 369 | } |
| @@ -394,10 +396,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) | |||
| 394 | if (ACPI_FAILURE(status)) | 396 | if (ACPI_FAILURE(status)) |
| 395 | break; | 397 | break; |
| 396 | } | 398 | } |
| 397 | 399 | no_control: | |
| 398 | dbg("Cannot get control of hotplug hardware for pci %s\n", | 400 | dbg("Cannot get control of hotplug hardware for pci %s\n", |
| 399 | pci_name(pdev)); | 401 | pci_name(pdev)); |
| 400 | |||
| 401 | kfree(string.pointer); | 402 | kfree(string.pointer); |
| 402 | return -ENODEV; | 403 | return -ENODEV; |
| 403 | got_one: | 404 | got_one: |
| @@ -471,7 +472,7 @@ int acpi_pci_detect_ejectable(acpi_handle handle) | |||
| 471 | return found; | 472 | return found; |
| 472 | 473 | ||
| 473 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | 474 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, |
| 474 | check_hotplug, (void *)&found, NULL); | 475 | check_hotplug, NULL, (void *)&found, NULL); |
| 475 | return found; | 476 | return found; |
| 476 | } | 477 | } |
| 477 | EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); | 478 | EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); |
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 7d938df79206..bab52047baa8 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h | |||
| @@ -146,12 +146,6 @@ struct acpiphp_attention_info | |||
| 146 | struct module *owner; | 146 | struct module *owner; |
| 147 | }; | 147 | }; |
| 148 | 148 | ||
| 149 | struct acpiphp_ioapic { | ||
| 150 | struct pci_dev *dev; | ||
| 151 | u32 gsi_base; | ||
| 152 | struct list_head list; | ||
| 153 | }; | ||
| 154 | |||
| 155 | /* PCI bus bridge HID */ | 149 | /* PCI bus bridge HID */ |
| 156 | #define ACPI_PCI_HOST_HID "PNP0A03" | 150 | #define ACPI_PCI_HOST_HID "PNP0A03" |
| 157 | 151 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 58d25a163a8b..cb2fd01eddae 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
| @@ -52,8 +52,6 @@ | |||
| 52 | #include "acpiphp.h" | 52 | #include "acpiphp.h" |
| 53 | 53 | ||
| 54 | static LIST_HEAD(bridge_list); | 54 | static LIST_HEAD(bridge_list); |
| 55 | static LIST_HEAD(ioapic_list); | ||
| 56 | static DEFINE_SPINLOCK(ioapic_list_lock); | ||
| 57 | 55 | ||
| 58 | #define MY_NAME "acpiphp_glue" | 56 | #define MY_NAME "acpiphp_glue" |
| 59 | 57 | ||
| @@ -266,7 +264,7 @@ static int detect_ejectable_slots(acpi_handle handle) | |||
| 266 | int found = acpi_pci_detect_ejectable(handle); | 264 | int found = acpi_pci_detect_ejectable(handle); |
| 267 | if (!found) { | 265 | if (!found) { |
| 268 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 266 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
| 269 | is_pci_dock_device, (void *)&found, NULL); | 267 | is_pci_dock_device, NULL, (void *)&found, NULL); |
| 270 | } | 268 | } |
| 271 | return found; | 269 | return found; |
| 272 | } | 270 | } |
| @@ -281,7 +279,7 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge) | |||
| 281 | 279 | ||
| 282 | /* register all slot objects under this bridge */ | 280 | /* register all slot objects under this bridge */ |
| 283 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1, | 281 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1, |
| 284 | register_slot, bridge, NULL); | 282 | register_slot, NULL, bridge, NULL); |
| 285 | if (ACPI_FAILURE(status)) { | 283 | if (ACPI_FAILURE(status)) { |
| 286 | list_del(&bridge->list); | 284 | list_del(&bridge->list); |
| 287 | return; | 285 | return; |
| @@ -311,17 +309,13 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge) | |||
| 311 | /* find acpiphp_func from acpiphp_bridge */ | 309 | /* find acpiphp_func from acpiphp_bridge */ |
| 312 | static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle) | 310 | static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle) |
| 313 | { | 311 | { |
| 314 | struct list_head *node, *l; | ||
| 315 | struct acpiphp_bridge *bridge; | 312 | struct acpiphp_bridge *bridge; |
| 316 | struct acpiphp_slot *slot; | 313 | struct acpiphp_slot *slot; |
| 317 | struct acpiphp_func *func; | 314 | struct acpiphp_func *func; |
| 318 | 315 | ||
| 319 | list_for_each(node, &bridge_list) { | 316 | list_for_each_entry(bridge, &bridge_list, list) { |
| 320 | bridge = list_entry(node, struct acpiphp_bridge, list); | ||
| 321 | for (slot = bridge->slots; slot; slot = slot->next) { | 317 | for (slot = bridge->slots; slot; slot = slot->next) { |
| 322 | list_for_each(l, &slot->funcs) { | 318 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 323 | func = list_entry(l, struct acpiphp_func, | ||
| 324 | sibling); | ||
| 325 | if (func->handle == handle) | 319 | if (func->handle == handle) |
| 326 | return func; | 320 | return func; |
| 327 | } | 321 | } |
| @@ -447,7 +441,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
| 447 | 441 | ||
| 448 | /* search P2P bridges under this p2p bridge */ | 442 | /* search P2P bridges under this p2p bridge */ |
| 449 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 443 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
| 450 | find_p2p_bridge, NULL, NULL); | 444 | find_p2p_bridge, NULL, NULL, NULL); |
| 451 | if (ACPI_FAILURE(status)) | 445 | if (ACPI_FAILURE(status)) |
| 452 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); | 446 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); |
| 453 | 447 | ||
| @@ -485,7 +479,7 @@ static int add_bridge(acpi_handle handle) | |||
| 485 | 479 | ||
| 486 | /* search P2P bridges under this host bridge */ | 480 | /* search P2P bridges under this host bridge */ |
| 487 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 481 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
| 488 | find_p2p_bridge, NULL, NULL); | 482 | find_p2p_bridge, NULL, NULL, NULL); |
| 489 | 483 | ||
| 490 | if (ACPI_FAILURE(status)) | 484 | if (ACPI_FAILURE(status)) |
| 491 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); | 485 | warn("find_p2p_bridge failed (error code = 0x%x)\n", status); |
| @@ -495,21 +489,19 @@ static int add_bridge(acpi_handle handle) | |||
| 495 | 489 | ||
| 496 | static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) | 490 | static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) |
| 497 | { | 491 | { |
| 498 | struct list_head *head; | 492 | struct acpiphp_bridge *bridge; |
| 499 | list_for_each(head, &bridge_list) { | 493 | |
| 500 | struct acpiphp_bridge *bridge = list_entry(head, | 494 | list_for_each_entry(bridge, &bridge_list, list) |
| 501 | struct acpiphp_bridge, list); | ||
| 502 | if (bridge->handle == handle) | 495 | if (bridge->handle == handle) |
| 503 | return bridge; | 496 | return bridge; |
| 504 | } | ||
| 505 | 497 | ||
| 506 | return NULL; | 498 | return NULL; |
| 507 | } | 499 | } |
| 508 | 500 | ||
| 509 | static void cleanup_bridge(struct acpiphp_bridge *bridge) | 501 | static void cleanup_bridge(struct acpiphp_bridge *bridge) |
| 510 | { | 502 | { |
| 511 | struct list_head *list, *tmp; | 503 | struct acpiphp_slot *slot, *next; |
| 512 | struct acpiphp_slot *slot; | 504 | struct acpiphp_func *func, *tmp; |
| 513 | acpi_status status; | 505 | acpi_status status; |
| 514 | acpi_handle handle = bridge->handle; | 506 | acpi_handle handle = bridge->handle; |
| 515 | 507 | ||
| @@ -530,10 +522,8 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) | |||
| 530 | 522 | ||
| 531 | slot = bridge->slots; | 523 | slot = bridge->slots; |
| 532 | while (slot) { | 524 | while (slot) { |
| 533 | struct acpiphp_slot *next = slot->next; | 525 | next = slot->next; |
| 534 | list_for_each_safe (list, tmp, &slot->funcs) { | 526 | list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) { |
| 535 | struct acpiphp_func *func; | ||
| 536 | func = list_entry(list, struct acpiphp_func, sibling); | ||
| 537 | if (is_dock_device(func->handle)) { | 527 | if (is_dock_device(func->handle)) { |
| 538 | unregister_hotplug_dock_device(func->handle); | 528 | unregister_hotplug_dock_device(func->handle); |
| 539 | unregister_dock_notifier(&func->nb); | 529 | unregister_dock_notifier(&func->nb); |
| @@ -545,7 +535,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge) | |||
| 545 | if (ACPI_FAILURE(status)) | 535 | if (ACPI_FAILURE(status)) |
| 546 | err("failed to remove notify handler\n"); | 536 | err("failed to remove notify handler\n"); |
| 547 | } | 537 | } |
| 548 | list_del(list); | 538 | list_del(&func->sibling); |
| 549 | kfree(func); | 539 | kfree(func); |
| 550 | } | 540 | } |
| 551 | acpiphp_unregister_hotplug_slot(slot); | 541 | acpiphp_unregister_hotplug_slot(slot); |
| @@ -573,7 +563,7 @@ cleanup_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
| 573 | /* cleanup p2p bridges under this P2P bridge | 563 | /* cleanup p2p bridges under this P2P bridge |
| 574 | in a depth-first manner */ | 564 | in a depth-first manner */ |
| 575 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, | 565 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, |
| 576 | cleanup_p2p_bridge, NULL, NULL); | 566 | cleanup_p2p_bridge, NULL, NULL, NULL); |
| 577 | 567 | ||
| 578 | bridge = acpiphp_handle_to_bridge(handle); | 568 | bridge = acpiphp_handle_to_bridge(handle); |
| 579 | if (bridge) | 569 | if (bridge) |
| @@ -589,7 +579,7 @@ static void remove_bridge(acpi_handle handle) | |||
| 589 | /* cleanup p2p bridges under this host bridge | 579 | /* cleanup p2p bridges under this host bridge |
| 590 | in a depth-first manner */ | 580 | in a depth-first manner */ |
| 591 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, | 581 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, |
| 592 | (u32)1, cleanup_p2p_bridge, NULL, NULL); | 582 | (u32)1, cleanup_p2p_bridge, NULL, NULL, NULL); |
| 593 | 583 | ||
| 594 | /* | 584 | /* |
| 595 | * On root bridges with hotplug slots directly underneath (ie, | 585 | * On root bridges with hotplug slots directly underneath (ie, |
| @@ -606,204 +596,17 @@ static void remove_bridge(acpi_handle handle) | |||
| 606 | handle_hotplug_event_bridge); | 596 | handle_hotplug_event_bridge); |
| 607 | } | 597 | } |
| 608 | 598 | ||
| 609 | static struct pci_dev * get_apic_pci_info(acpi_handle handle) | ||
| 610 | { | ||
| 611 | struct pci_dev *dev; | ||
| 612 | |||
| 613 | dev = acpi_get_pci_dev(handle); | ||
| 614 | if (!dev) | ||
| 615 | return NULL; | ||
| 616 | |||
| 617 | if ((dev->class != PCI_CLASS_SYSTEM_PIC_IOAPIC) && | ||
| 618 | (dev->class != PCI_CLASS_SYSTEM_PIC_IOXAPIC)) | ||
| 619 | { | ||
| 620 | pci_dev_put(dev); | ||
| 621 | return NULL; | ||
| 622 | } | ||
| 623 | |||
| 624 | return dev; | ||
| 625 | } | ||
| 626 | |||
| 627 | static int get_gsi_base(acpi_handle handle, u32 *gsi_base) | ||
| 628 | { | ||
| 629 | acpi_status status; | ||
| 630 | int result = -1; | ||
| 631 | unsigned long long gsb; | ||
| 632 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
| 633 | union acpi_object *obj; | ||
| 634 | void *table; | ||
| 635 | |||
| 636 | status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb); | ||
| 637 | if (ACPI_SUCCESS(status)) { | ||
| 638 | *gsi_base = (u32)gsb; | ||
| 639 | return 0; | ||
| 640 | } | ||
| 641 | |||
| 642 | status = acpi_evaluate_object(handle, "_MAT", NULL, &buffer); | ||
| 643 | if (ACPI_FAILURE(status) || !buffer.length || !buffer.pointer) | ||
| 644 | return -1; | ||
| 645 | |||
| 646 | obj = buffer.pointer; | ||
| 647 | if (obj->type != ACPI_TYPE_BUFFER) | ||
| 648 | goto out; | ||
| 649 | |||
| 650 | table = obj->buffer.pointer; | ||
| 651 | switch (((struct acpi_subtable_header *)table)->type) { | ||
| 652 | case ACPI_MADT_TYPE_IO_SAPIC: | ||
| 653 | *gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base; | ||
| 654 | result = 0; | ||
| 655 | break; | ||
| 656 | case ACPI_MADT_TYPE_IO_APIC: | ||
| 657 | *gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base; | ||
| 658 | result = 0; | ||
| 659 | break; | ||
| 660 | default: | ||
| 661 | break; | ||
| 662 | } | ||
| 663 | out: | ||
| 664 | kfree(buffer.pointer); | ||
| 665 | return result; | ||
| 666 | } | ||
| 667 | |||
| 668 | static acpi_status | ||
| 669 | ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
| 670 | { | ||
| 671 | acpi_status status; | ||
| 672 | unsigned long long sta; | ||
| 673 | acpi_handle tmp; | ||
| 674 | struct pci_dev *pdev; | ||
| 675 | u32 gsi_base; | ||
| 676 | u64 phys_addr; | ||
| 677 | struct acpiphp_ioapic *ioapic; | ||
| 678 | |||
| 679 | /* Evaluate _STA if present */ | ||
| 680 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | ||
| 681 | if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL) | ||
| 682 | return AE_CTRL_DEPTH; | ||
| 683 | |||
| 684 | /* Scan only PCI bus scope */ | ||
| 685 | status = acpi_get_handle(handle, "_HID", &tmp); | ||
| 686 | if (ACPI_SUCCESS(status)) | ||
| 687 | return AE_CTRL_DEPTH; | ||
| 688 | |||
| 689 | if (get_gsi_base(handle, &gsi_base)) | ||
| 690 | return AE_OK; | ||
| 691 | |||
| 692 | ioapic = kmalloc(sizeof(*ioapic), GFP_KERNEL); | ||
| 693 | if (!ioapic) | ||
| 694 | return AE_NO_MEMORY; | ||
| 695 | |||
| 696 | pdev = get_apic_pci_info(handle); | ||
| 697 | if (!pdev) | ||
| 698 | goto exit_kfree; | ||
| 699 | |||
| 700 | if (pci_enable_device(pdev)) | ||
| 701 | goto exit_pci_dev_put; | ||
| 702 | |||
| 703 | pci_set_master(pdev); | ||
| 704 | |||
| 705 | if (pci_request_region(pdev, 0, "I/O APIC(acpiphp)")) | ||
| 706 | goto exit_pci_disable_device; | ||
| 707 | |||
| 708 | phys_addr = pci_resource_start(pdev, 0); | ||
| 709 | if (acpi_register_ioapic(handle, phys_addr, gsi_base)) | ||
| 710 | goto exit_pci_release_region; | ||
| 711 | |||
| 712 | ioapic->gsi_base = gsi_base; | ||
| 713 | ioapic->dev = pdev; | ||
| 714 | spin_lock(&ioapic_list_lock); | ||
| 715 | list_add_tail(&ioapic->list, &ioapic_list); | ||
| 716 | spin_unlock(&ioapic_list_lock); | ||
| 717 | |||
| 718 | return AE_OK; | ||
| 719 | |||
| 720 | exit_pci_release_region: | ||
| 721 | pci_release_region(pdev, 0); | ||
| 722 | exit_pci_disable_device: | ||
| 723 | pci_disable_device(pdev); | ||
| 724 | exit_pci_dev_put: | ||
| 725 | pci_dev_put(pdev); | ||
| 726 | exit_kfree: | ||
| 727 | kfree(ioapic); | ||
| 728 | |||
| 729 | return AE_OK; | ||
| 730 | } | ||
| 731 | |||
| 732 | static acpi_status | ||
| 733 | ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
| 734 | { | ||
| 735 | acpi_status status; | ||
| 736 | unsigned long long sta; | ||
| 737 | acpi_handle tmp; | ||
| 738 | u32 gsi_base; | ||
| 739 | struct acpiphp_ioapic *pos, *n, *ioapic = NULL; | ||
| 740 | |||
| 741 | /* Evaluate _STA if present */ | ||
| 742 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | ||
| 743 | if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL) | ||
| 744 | return AE_CTRL_DEPTH; | ||
| 745 | |||
| 746 | /* Scan only PCI bus scope */ | ||
| 747 | status = acpi_get_handle(handle, "_HID", &tmp); | ||
| 748 | if (ACPI_SUCCESS(status)) | ||
| 749 | return AE_CTRL_DEPTH; | ||
| 750 | |||
| 751 | if (get_gsi_base(handle, &gsi_base)) | ||
| 752 | return AE_OK; | ||
| 753 | |||
| 754 | acpi_unregister_ioapic(handle, gsi_base); | ||
| 755 | |||
| 756 | spin_lock(&ioapic_list_lock); | ||
| 757 | list_for_each_entry_safe(pos, n, &ioapic_list, list) { | ||
| 758 | if (pos->gsi_base != gsi_base) | ||
| 759 | continue; | ||
| 760 | ioapic = pos; | ||
| 761 | list_del(&ioapic->list); | ||
| 762 | break; | ||
| 763 | } | ||
| 764 | spin_unlock(&ioapic_list_lock); | ||
| 765 | |||
| 766 | if (!ioapic) | ||
| 767 | return AE_OK; | ||
| 768 | |||
| 769 | pci_release_region(ioapic->dev, 0); | ||
| 770 | pci_disable_device(ioapic->dev); | ||
| 771 | pci_dev_put(ioapic->dev); | ||
| 772 | kfree(ioapic); | ||
| 773 | |||
| 774 | return AE_OK; | ||
| 775 | } | ||
| 776 | |||
| 777 | static int acpiphp_configure_ioapics(acpi_handle handle) | ||
| 778 | { | ||
| 779 | ioapic_add(handle, 0, NULL, NULL); | ||
| 780 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, | ||
| 781 | ACPI_UINT32_MAX, ioapic_add, NULL, NULL); | ||
| 782 | return 0; | ||
| 783 | } | ||
| 784 | |||
| 785 | static int acpiphp_unconfigure_ioapics(acpi_handle handle) | ||
| 786 | { | ||
| 787 | ioapic_remove(handle, 0, NULL, NULL); | ||
| 788 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, | ||
| 789 | ACPI_UINT32_MAX, ioapic_remove, NULL, NULL); | ||
| 790 | return 0; | ||
| 791 | } | ||
| 792 | |||
| 793 | static int power_on_slot(struct acpiphp_slot *slot) | 599 | static int power_on_slot(struct acpiphp_slot *slot) |
| 794 | { | 600 | { |
| 795 | acpi_status status; | 601 | acpi_status status; |
| 796 | struct acpiphp_func *func; | 602 | struct acpiphp_func *func; |
| 797 | struct list_head *l; | ||
| 798 | int retval = 0; | 603 | int retval = 0; |
| 799 | 604 | ||
| 800 | /* if already enabled, just skip */ | 605 | /* if already enabled, just skip */ |
| 801 | if (slot->flags & SLOT_POWEREDON) | 606 | if (slot->flags & SLOT_POWEREDON) |
| 802 | goto err_exit; | 607 | goto err_exit; |
| 803 | 608 | ||
| 804 | list_for_each (l, &slot->funcs) { | 609 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 805 | func = list_entry(l, struct acpiphp_func, sibling); | ||
| 806 | |||
| 807 | if (func->flags & FUNC_HAS_PS0) { | 610 | if (func->flags & FUNC_HAS_PS0) { |
| 808 | dbg("%s: executing _PS0\n", __func__); | 611 | dbg("%s: executing _PS0\n", __func__); |
| 809 | status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL); | 612 | status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL); |
| @@ -829,7 +632,6 @@ static int power_off_slot(struct acpiphp_slot *slot) | |||
| 829 | { | 632 | { |
| 830 | acpi_status status; | 633 | acpi_status status; |
| 831 | struct acpiphp_func *func; | 634 | struct acpiphp_func *func; |
| 832 | struct list_head *l; | ||
| 833 | 635 | ||
| 834 | int retval = 0; | 636 | int retval = 0; |
| 835 | 637 | ||
| @@ -837,9 +639,7 @@ static int power_off_slot(struct acpiphp_slot *slot) | |||
| 837 | if ((slot->flags & SLOT_POWEREDON) == 0) | 639 | if ((slot->flags & SLOT_POWEREDON) == 0) |
| 838 | goto err_exit; | 640 | goto err_exit; |
| 839 | 641 | ||
| 840 | list_for_each (l, &slot->funcs) { | 642 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 841 | func = list_entry(l, struct acpiphp_func, sibling); | ||
| 842 | |||
| 843 | if (func->flags & FUNC_HAS_PS3) { | 643 | if (func->flags & FUNC_HAS_PS3) { |
| 844 | status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL); | 644 | status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL); |
| 845 | if (ACPI_FAILURE(status)) { | 645 | if (ACPI_FAILURE(status)) { |
| @@ -920,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func) | |||
| 920 | -ret_val); | 720 | -ret_val); |
| 921 | goto acpiphp_bus_add_out; | 721 | goto acpiphp_bus_add_out; |
| 922 | } | 722 | } |
| 923 | /* | ||
| 924 | * try to start anyway. We could have failed to add | ||
| 925 | * simply because this bus had previously been added | ||
| 926 | * on another add. Don't bother with the return value | ||
| 927 | * we just keep going. | ||
| 928 | */ | ||
| 929 | ret_val = acpi_bus_start(device); | 723 | ret_val = acpi_bus_start(device); |
| 930 | 724 | ||
| 931 | acpiphp_bus_add_out: | 725 | acpiphp_bus_add_out: |
| @@ -966,7 +760,6 @@ static int __ref enable_device(struct acpiphp_slot *slot) | |||
| 966 | { | 760 | { |
| 967 | struct pci_dev *dev; | 761 | struct pci_dev *dev; |
| 968 | struct pci_bus *bus = slot->bridge->pci_bus; | 762 | struct pci_bus *bus = slot->bridge->pci_bus; |
| 969 | struct list_head *l; | ||
| 970 | struct acpiphp_func *func; | 763 | struct acpiphp_func *func; |
| 971 | int retval = 0; | 764 | int retval = 0; |
| 972 | int num, max, pass; | 765 | int num, max, pass; |
| @@ -1006,21 +799,16 @@ static int __ref enable_device(struct acpiphp_slot *slot) | |||
| 1006 | } | 799 | } |
| 1007 | } | 800 | } |
| 1008 | 801 | ||
| 1009 | list_for_each (l, &slot->funcs) { | 802 | list_for_each_entry(func, &slot->funcs, sibling) |
| 1010 | func = list_entry(l, struct acpiphp_func, sibling); | ||
| 1011 | acpiphp_bus_add(func); | 803 | acpiphp_bus_add(func); |
| 1012 | } | ||
| 1013 | 804 | ||
| 1014 | pci_bus_assign_resources(bus); | 805 | pci_bus_assign_resources(bus); |
| 1015 | acpiphp_sanitize_bus(bus); | 806 | acpiphp_sanitize_bus(bus); |
| 1016 | acpiphp_set_hpp_values(bus); | 807 | acpiphp_set_hpp_values(bus); |
| 1017 | list_for_each_entry(func, &slot->funcs, sibling) | ||
| 1018 | acpiphp_configure_ioapics(func->handle); | ||
| 1019 | pci_enable_bridges(bus); | 808 | pci_enable_bridges(bus); |
| 1020 | pci_bus_add_devices(bus); | 809 | pci_bus_add_devices(bus); |
| 1021 | 810 | ||
| 1022 | list_for_each (l, &slot->funcs) { | 811 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 1023 | func = list_entry(l, struct acpiphp_func, sibling); | ||
| 1024 | dev = pci_get_slot(bus, PCI_DEVFN(slot->device, | 812 | dev = pci_get_slot(bus, PCI_DEVFN(slot->device, |
| 1025 | func->function)); | 813 | func->function)); |
| 1026 | if (!dev) | 814 | if (!dev) |
| @@ -1091,7 +879,6 @@ static int disable_device(struct acpiphp_slot *slot) | |||
| 1091 | } | 879 | } |
| 1092 | 880 | ||
| 1093 | list_for_each_entry(func, &slot->funcs, sibling) { | 881 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 1094 | acpiphp_unconfigure_ioapics(func->handle); | ||
| 1095 | acpiphp_bus_trim(func->handle); | 882 | acpiphp_bus_trim(func->handle); |
| 1096 | } | 883 | } |
| 1097 | 884 | ||
| @@ -1119,12 +906,9 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) | |||
| 1119 | acpi_status status; | 906 | acpi_status status; |
| 1120 | unsigned long long sta = 0; | 907 | unsigned long long sta = 0; |
| 1121 | u32 dvid; | 908 | u32 dvid; |
| 1122 | struct list_head *l; | ||
| 1123 | struct acpiphp_func *func; | 909 | struct acpiphp_func *func; |
| 1124 | 910 | ||
| 1125 | list_for_each (l, &slot->funcs) { | 911 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 1126 | func = list_entry(l, struct acpiphp_func, sibling); | ||
| 1127 | |||
| 1128 | if (func->flags & FUNC_HAS_STA) { | 912 | if (func->flags & FUNC_HAS_STA) { |
| 1129 | status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta); | 913 | status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta); |
| 1130 | if (ACPI_SUCCESS(status) && sta) | 914 | if (ACPI_SUCCESS(status) && sta) |
| @@ -1152,13 +936,10 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot) | |||
| 1152 | { | 936 | { |
| 1153 | acpi_status status; | 937 | acpi_status status; |
| 1154 | struct acpiphp_func *func; | 938 | struct acpiphp_func *func; |
| 1155 | struct list_head *l; | ||
| 1156 | struct acpi_object_list arg_list; | 939 | struct acpi_object_list arg_list; |
| 1157 | union acpi_object arg; | 940 | union acpi_object arg; |
| 1158 | 941 | ||
| 1159 | list_for_each (l, &slot->funcs) { | 942 | list_for_each_entry(func, &slot->funcs, sibling) { |
| 1160 | func = list_entry(l, struct acpiphp_func, sibling); | ||
| 1161 | |||
| 1162 | /* We don't want to call _EJ0 on non-existing functions. */ | 943 | /* We don't want to call _EJ0 on non-existing functions. */ |
| 1163 | if ((func->flags & FUNC_HAS_EJ0)) { | 944 | if ((func->flags & FUNC_HAS_EJ0)) { |
| 1164 | /* _EJ0 method take one argument */ | 945 | /* _EJ0 method take one argument */ |
| @@ -1275,7 +1056,6 @@ static int acpiphp_configure_bridge (acpi_handle handle) | |||
| 1275 | acpiphp_sanitize_bus(bus); | 1056 | acpiphp_sanitize_bus(bus); |
| 1276 | acpiphp_set_hpp_values(bus); | 1057 | acpiphp_set_hpp_values(bus); |
| 1277 | pci_enable_bridges(bus); | 1058 | pci_enable_bridges(bus); |
| 1278 | acpiphp_configure_ioapics(handle); | ||
| 1279 | return 0; | 1059 | return 0; |
| 1280 | } | 1060 | } |
| 1281 | 1061 | ||
| @@ -1367,7 +1147,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
| 1367 | bridge = acpiphp_handle_to_bridge(handle); | 1147 | bridge = acpiphp_handle_to_bridge(handle); |
| 1368 | if (type == ACPI_NOTIFY_BUS_CHECK) { | 1148 | if (type == ACPI_NOTIFY_BUS_CHECK) { |
| 1369 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX, | 1149 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX, |
| 1370 | count_sub_bridges, &num_sub_bridges, NULL); | 1150 | count_sub_bridges, NULL, &num_sub_bridges, NULL); |
| 1371 | } | 1151 | } |
| 1372 | 1152 | ||
| 1373 | if (!bridge && !num_sub_bridges) { | 1153 | if (!bridge && !num_sub_bridges) { |
| @@ -1388,7 +1168,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont | |||
| 1388 | } | 1168 | } |
| 1389 | if (num_sub_bridges) | 1169 | if (num_sub_bridges) |
| 1390 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, | 1170 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, |
| 1391 | ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL); | 1171 | ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); |
| 1392 | break; | 1172 | break; |
| 1393 | 1173 | ||
| 1394 | case ACPI_NOTIFY_DEVICE_CHECK: | 1174 | case ACPI_NOTIFY_DEVICE_CHECK: |
| @@ -1512,7 +1292,7 @@ int __init acpiphp_glue_init(void) | |||
| 1512 | int num = 0; | 1292 | int num = 0; |
| 1513 | 1293 | ||
| 1514 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | 1294 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
| 1515 | ACPI_UINT32_MAX, find_root_bridges, &num, NULL); | 1295 | ACPI_UINT32_MAX, find_root_bridges, NULL, &num, NULL); |
| 1516 | 1296 | ||
| 1517 | if (num <= 0) | 1297 | if (num <= 0) |
| 1518 | return -1; | 1298 | return -1; |
| @@ -1542,7 +1322,7 @@ int __init acpiphp_get_num_slots(void) | |||
| 1542 | struct acpiphp_bridge *bridge; | 1322 | struct acpiphp_bridge *bridge; |
| 1543 | int num_slots = 0; | 1323 | int num_slots = 0; |
| 1544 | 1324 | ||
| 1545 | list_for_each_entry (bridge, &bridge_list, list) { | 1325 | list_for_each_entry(bridge, &bridge_list, list) { |
| 1546 | dbg("Bus %04x:%02x has %d slot%s\n", | 1326 | dbg("Bus %04x:%02x has %d slot%s\n", |
| 1547 | pci_domain_nr(bridge->pci_bus), | 1327 | pci_domain_nr(bridge->pci_bus), |
| 1548 | bridge->pci_bus->number, bridge->nr_slots, | 1328 | bridge->pci_bus->number, bridge->nr_slots, |
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index e7be66dbac21..aa5df485f8cf 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c | |||
| @@ -434,7 +434,7 @@ static int __init ibm_acpiphp_init(void) | |||
| 434 | dbg("%s\n", __func__); | 434 | dbg("%s\n", __func__); |
| 435 | 435 | ||
| 436 | if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | 436 | if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
| 437 | ACPI_UINT32_MAX, ibm_find_acpi_device, | 437 | ACPI_UINT32_MAX, ibm_find_acpi_device, NULL, |
| 438 | &ibm_acpi_handle, NULL) != FOUND_APCI) { | 438 | &ibm_acpi_handle, NULL) != FOUND_APCI) { |
| 439 | err("%s: acpi_walk_namespace failed\n", __func__); | 439 | err("%s: acpi_walk_namespace failed\n", __func__); |
| 440 | retval = -ENODEV; | 440 | retval = -ENODEV; |
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index 53836001d511..9c6a9fd26812 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <asm/io.h> /* for read? and write? functions */ | 32 | #include <asm/io.h> /* for read? and write? functions */ |
| 33 | #include <linux/delay.h> /* for delays */ | 33 | #include <linux/delay.h> /* for delays */ |
| 34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
| 35 | #include <linux/sched.h> /* for signal_pending() */ | ||
| 35 | 36 | ||
| 36 | #define MY_NAME "cpqphp" | 37 | #define MY_NAME "cpqphp" |
| 37 | 38 | ||
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 83f337c891a9..c7084f0eca5a 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c | |||
| @@ -890,7 +890,7 @@ static int poll_hpc(void *data) | |||
| 890 | msleep(POLL_INTERVAL_SEC * 1000); | 890 | msleep(POLL_INTERVAL_SEC * 1000); |
| 891 | 891 | ||
| 892 | if (kthread_should_stop()) | 892 | if (kthread_should_stop()) |
| 893 | break; | 893 | goto out_sleep; |
| 894 | 894 | ||
| 895 | down (&semOperations); | 895 | down (&semOperations); |
| 896 | 896 | ||
| @@ -904,6 +904,7 @@ static int poll_hpc(void *data) | |||
| 904 | /* give up the hardware semaphore */ | 904 | /* give up the hardware semaphore */ |
| 905 | up (&semOperations); | 905 | up (&semOperations); |
| 906 | /* sleep for a short time just for good measure */ | 906 | /* sleep for a short time just for good measure */ |
| 907 | out_sleep: | ||
| 907 | msleep(100); | 908 | msleep(100); |
| 908 | } | 909 | } |
| 909 | up (&sem_exit); | 910 | up (&sem_exit); |
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 0325d989bb46..38183a534b65 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c | |||
| @@ -68,26 +68,26 @@ static DEFINE_MUTEX(pci_hp_mutex); | |||
| 68 | static char *pci_bus_speed_strings[] = { | 68 | static char *pci_bus_speed_strings[] = { |
| 69 | "33 MHz PCI", /* 0x00 */ | 69 | "33 MHz PCI", /* 0x00 */ |
| 70 | "66 MHz PCI", /* 0x01 */ | 70 | "66 MHz PCI", /* 0x01 */ |
| 71 | "66 MHz PCIX", /* 0x02 */ | 71 | "66 MHz PCI-X", /* 0x02 */ |
| 72 | "100 MHz PCIX", /* 0x03 */ | 72 | "100 MHz PCI-X", /* 0x03 */ |
| 73 | "133 MHz PCIX", /* 0x04 */ | 73 | "133 MHz PCI-X", /* 0x04 */ |
| 74 | NULL, /* 0x05 */ | 74 | NULL, /* 0x05 */ |
| 75 | NULL, /* 0x06 */ | 75 | NULL, /* 0x06 */ |
| 76 | NULL, /* 0x07 */ | 76 | NULL, /* 0x07 */ |
| 77 | NULL, /* 0x08 */ | 77 | NULL, /* 0x08 */ |
| 78 | "66 MHz PCIX 266", /* 0x09 */ | 78 | "66 MHz PCI-X 266", /* 0x09 */ |
| 79 | "100 MHz PCIX 266", /* 0x0a */ | 79 | "100 MHz PCI-X 266", /* 0x0a */ |
| 80 | "133 MHz PCIX 266", /* 0x0b */ | 80 | "133 MHz PCI-X 266", /* 0x0b */ |
| 81 | NULL, /* 0x0c */ | 81 | NULL, /* 0x0c */ |
| 82 | NULL, /* 0x0d */ | 82 | NULL, /* 0x0d */ |
| 83 | NULL, /* 0x0e */ | 83 | NULL, /* 0x0e */ |
| 84 | NULL, /* 0x0f */ | 84 | NULL, /* 0x0f */ |
| 85 | NULL, /* 0x10 */ | 85 | NULL, /* 0x10 */ |
| 86 | "66 MHz PCIX 533", /* 0x11 */ | 86 | "66 MHz PCI-X 533", /* 0x11 */ |
| 87 | "100 MHz PCIX 533", /* 0x12 */ | 87 | "100 MHz PCI-X 533", /* 0x12 */ |
| 88 | "133 MHz PCIX 533", /* 0x13 */ | 88 | "133 MHz PCI-X 533", /* 0x13 */ |
| 89 | "2.5 GT/s PCI-E", /* 0x14 */ | 89 | "2.5 GT/s PCIe", /* 0x14 */ |
| 90 | "5.0 GT/s PCI-E", /* 0x15 */ | 90 | "5.0 GT/s PCIe", /* 0x15 */ |
| 91 | }; | 91 | }; |
| 92 | 92 | ||
| 93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI | 93 | #ifdef CONFIG_HOTPLUG_PCI_CPCI |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 3070f77eb56a..4ed76b47b6dc 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
| @@ -91,7 +91,6 @@ struct controller { | |||
| 91 | struct slot *slot; | 91 | struct slot *slot; |
| 92 | wait_queue_head_t queue; /* sleep & wake process */ | 92 | wait_queue_head_t queue; /* sleep & wake process */ |
| 93 | u32 slot_cap; | 93 | u32 slot_cap; |
| 94 | u8 cap_base; | ||
| 95 | struct timer_list poll_timer; | 94 | struct timer_list poll_timer; |
| 96 | unsigned int cmd_busy:1; | 95 | unsigned int cmd_busy:1; |
| 97 | unsigned int no_cmd_complete:1; | 96 | unsigned int no_cmd_complete:1; |
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index 37c8d3d0323e..b09b083011d6 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c | |||
| @@ -87,7 +87,8 @@ static int __init dummy_probe(struct pcie_device *dev) | |||
| 87 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ | 87 | /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ |
| 88 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) | 88 | if (pciehp_get_hp_hw_control_from_firmware(pdev)) |
| 89 | return -ENODEV; | 89 | return -ENODEV; |
| 90 | if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) | 90 | pos = pci_pcie_cap(pdev); |
| 91 | if (!pos) | ||
| 91 | return -ENODEV; | 92 | return -ENODEV; |
| 92 | pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); | 93 | pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); |
| 93 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | 94 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); |
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index bc234719b1df..5674b2075bdc 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
| @@ -72,18 +72,6 @@ static int get_adapter_status (struct hotplug_slot *slot, u8 *value); | |||
| 72 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | 72 | static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); |
| 73 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); | 73 | static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); |
| 74 | 74 | ||
| 75 | static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { | ||
| 76 | .set_attention_status = set_attention_status, | ||
| 77 | .enable_slot = enable_slot, | ||
| 78 | .disable_slot = disable_slot, | ||
| 79 | .get_power_status = get_power_status, | ||
| 80 | .get_attention_status = get_attention_status, | ||
| 81 | .get_latch_status = get_latch_status, | ||
| 82 | .get_adapter_status = get_adapter_status, | ||
| 83 | .get_max_bus_speed = get_max_bus_speed, | ||
| 84 | .get_cur_bus_speed = get_cur_bus_speed, | ||
| 85 | }; | ||
| 86 | |||
| 87 | /** | 75 | /** |
| 88 | * release_slot - free up the memory used by a slot | 76 | * release_slot - free up the memory used by a slot |
| 89 | * @hotplug_slot: slot to free | 77 | * @hotplug_slot: slot to free |
| @@ -95,6 +83,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot) | |||
| 95 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 83 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 96 | __func__, hotplug_slot_name(hotplug_slot)); | 84 | __func__, hotplug_slot_name(hotplug_slot)); |
| 97 | 85 | ||
| 86 | kfree(hotplug_slot->ops); | ||
| 98 | kfree(hotplug_slot->info); | 87 | kfree(hotplug_slot->info); |
| 99 | kfree(hotplug_slot); | 88 | kfree(hotplug_slot); |
| 100 | } | 89 | } |
| @@ -104,6 +93,7 @@ static int init_slot(struct controller *ctrl) | |||
| 104 | struct slot *slot = ctrl->slot; | 93 | struct slot *slot = ctrl->slot; |
| 105 | struct hotplug_slot *hotplug = NULL; | 94 | struct hotplug_slot *hotplug = NULL; |
| 106 | struct hotplug_slot_info *info = NULL; | 95 | struct hotplug_slot_info *info = NULL; |
| 96 | struct hotplug_slot_ops *ops = NULL; | ||
| 107 | char name[SLOT_NAME_SIZE]; | 97 | char name[SLOT_NAME_SIZE]; |
| 108 | int retval = -ENOMEM; | 98 | int retval = -ENOMEM; |
| 109 | 99 | ||
| @@ -115,11 +105,28 @@ static int init_slot(struct controller *ctrl) | |||
| 115 | if (!info) | 105 | if (!info) |
| 116 | goto out; | 106 | goto out; |
| 117 | 107 | ||
| 108 | /* Setup hotplug slot ops */ | ||
| 109 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
| 110 | if (!ops) | ||
| 111 | goto out; | ||
| 112 | ops->enable_slot = enable_slot; | ||
| 113 | ops->disable_slot = disable_slot; | ||
| 114 | ops->get_power_status = get_power_status; | ||
| 115 | ops->get_adapter_status = get_adapter_status; | ||
| 116 | ops->get_max_bus_speed = get_max_bus_speed; | ||
| 117 | ops->get_cur_bus_speed = get_cur_bus_speed; | ||
| 118 | if (MRL_SENS(ctrl)) | ||
| 119 | ops->get_latch_status = get_latch_status; | ||
| 120 | if (ATTN_LED(ctrl)) { | ||
| 121 | ops->get_attention_status = get_attention_status; | ||
| 122 | ops->set_attention_status = set_attention_status; | ||
| 123 | } | ||
| 124 | |||
| 118 | /* register this slot with the hotplug pci core */ | 125 | /* register this slot with the hotplug pci core */ |
| 119 | hotplug->info = info; | 126 | hotplug->info = info; |
| 120 | hotplug->private = slot; | 127 | hotplug->private = slot; |
| 121 | hotplug->release = &release_slot; | 128 | hotplug->release = &release_slot; |
| 122 | hotplug->ops = &pciehp_hotplug_slot_ops; | 129 | hotplug->ops = ops; |
| 123 | slot->hotplug_slot = hotplug; | 130 | slot->hotplug_slot = hotplug; |
| 124 | snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); | 131 | snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); |
| 125 | 132 | ||
| @@ -128,17 +135,12 @@ static int init_slot(struct controller *ctrl) | |||
| 128 | ctrl->pcie->port->subordinate->number, PSN(ctrl)); | 135 | ctrl->pcie->port->subordinate->number, PSN(ctrl)); |
| 129 | retval = pci_hp_register(hotplug, | 136 | retval = pci_hp_register(hotplug, |
| 130 | ctrl->pcie->port->subordinate, 0, name); | 137 | ctrl->pcie->port->subordinate, 0, name); |
| 131 | if (retval) { | 138 | if (retval) |
| 132 | ctrl_err(ctrl, | 139 | ctrl_err(ctrl, |
| 133 | "pci_hp_register failed with error %d\n", retval); | 140 | "pci_hp_register failed with error %d\n", retval); |
| 134 | goto out; | ||
| 135 | } | ||
| 136 | get_power_status(hotplug, &info->power_status); | ||
| 137 | get_attention_status(hotplug, &info->attention_status); | ||
| 138 | get_latch_status(hotplug, &info->latch_status); | ||
| 139 | get_adapter_status(hotplug, &info->adapter_status); | ||
| 140 | out: | 141 | out: |
| 141 | if (retval) { | 142 | if (retval) { |
| 143 | kfree(ops); | ||
| 142 | kfree(info); | 144 | kfree(info); |
| 143 | kfree(hotplug); | 145 | kfree(hotplug); |
| 144 | } | 146 | } |
| @@ -160,12 +162,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) | |||
| 160 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 162 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 161 | __func__, slot_name(slot)); | 163 | __func__, slot_name(slot)); |
| 162 | 164 | ||
| 163 | hotplug_slot->info->attention_status = status; | 165 | return pciehp_set_attention_status(slot, status); |
| 164 | |||
| 165 | if (ATTN_LED(slot->ctrl)) | ||
| 166 | pciehp_set_attention_status(slot, status); | ||
| 167 | |||
| 168 | return 0; | ||
| 169 | } | 166 | } |
| 170 | 167 | ||
| 171 | 168 | ||
| @@ -193,92 +190,62 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) | |||
| 193 | static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) | 190 | static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) |
| 194 | { | 191 | { |
| 195 | struct slot *slot = hotplug_slot->private; | 192 | struct slot *slot = hotplug_slot->private; |
| 196 | int retval; | ||
| 197 | 193 | ||
| 198 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 194 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 199 | __func__, slot_name(slot)); | 195 | __func__, slot_name(slot)); |
| 200 | 196 | ||
| 201 | retval = pciehp_get_power_status(slot, value); | 197 | return pciehp_get_power_status(slot, value); |
| 202 | if (retval < 0) | ||
| 203 | *value = hotplug_slot->info->power_status; | ||
| 204 | |||
| 205 | return 0; | ||
| 206 | } | 198 | } |
| 207 | 199 | ||
| 208 | static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) | 200 | static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) |
| 209 | { | 201 | { |
| 210 | struct slot *slot = hotplug_slot->private; | 202 | struct slot *slot = hotplug_slot->private; |
| 211 | int retval; | ||
| 212 | 203 | ||
| 213 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 204 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 214 | __func__, slot_name(slot)); | 205 | __func__, slot_name(slot)); |
| 215 | 206 | ||
| 216 | retval = pciehp_get_attention_status(slot, value); | 207 | return pciehp_get_attention_status(slot, value); |
| 217 | if (retval < 0) | ||
| 218 | *value = hotplug_slot->info->attention_status; | ||
| 219 | |||
| 220 | return 0; | ||
| 221 | } | 208 | } |
| 222 | 209 | ||
| 223 | static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) | 210 | static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) |
| 224 | { | 211 | { |
| 225 | struct slot *slot = hotplug_slot->private; | 212 | struct slot *slot = hotplug_slot->private; |
| 226 | int retval; | ||
| 227 | 213 | ||
| 228 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 214 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 229 | __func__, slot_name(slot)); | 215 | __func__, slot_name(slot)); |
| 230 | 216 | ||
| 231 | retval = pciehp_get_latch_status(slot, value); | 217 | return pciehp_get_latch_status(slot, value); |
| 232 | if (retval < 0) | ||
| 233 | *value = hotplug_slot->info->latch_status; | ||
| 234 | |||
| 235 | return 0; | ||
| 236 | } | 218 | } |
| 237 | 219 | ||
| 238 | static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) | 220 | static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) |
| 239 | { | 221 | { |
| 240 | struct slot *slot = hotplug_slot->private; | 222 | struct slot *slot = hotplug_slot->private; |
| 241 | int retval; | ||
| 242 | 223 | ||
| 243 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 224 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 244 | __func__, slot_name(slot)); | 225 | __func__, slot_name(slot)); |
| 245 | 226 | ||
| 246 | retval = pciehp_get_adapter_status(slot, value); | 227 | return pciehp_get_adapter_status(slot, value); |
| 247 | if (retval < 0) | ||
| 248 | *value = hotplug_slot->info->adapter_status; | ||
| 249 | |||
| 250 | return 0; | ||
| 251 | } | 228 | } |
| 252 | 229 | ||
| 253 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, | 230 | static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, |
| 254 | enum pci_bus_speed *value) | 231 | enum pci_bus_speed *value) |
| 255 | { | 232 | { |
| 256 | struct slot *slot = hotplug_slot->private; | 233 | struct slot *slot = hotplug_slot->private; |
| 257 | int retval; | ||
| 258 | 234 | ||
| 259 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 235 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 260 | __func__, slot_name(slot)); | 236 | __func__, slot_name(slot)); |
| 261 | 237 | ||
| 262 | retval = pciehp_get_max_link_speed(slot, value); | 238 | return pciehp_get_max_link_speed(slot, value); |
| 263 | if (retval < 0) | ||
| 264 | *value = PCI_SPEED_UNKNOWN; | ||
| 265 | |||
| 266 | return 0; | ||
| 267 | } | 239 | } |
| 268 | 240 | ||
| 269 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) | 241 | static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) |
| 270 | { | 242 | { |
| 271 | struct slot *slot = hotplug_slot->private; | 243 | struct slot *slot = hotplug_slot->private; |
| 272 | int retval; | ||
| 273 | 244 | ||
| 274 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", | 245 | ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", |
| 275 | __func__, slot_name(slot)); | 246 | __func__, slot_name(slot)); |
| 276 | 247 | ||
| 277 | retval = pciehp_get_cur_link_speed(slot, value); | 248 | return pciehp_get_cur_link_speed(slot, value); |
| 278 | if (retval < 0) | ||
| 279 | *value = PCI_SPEED_UNKNOWN; | ||
| 280 | |||
| 281 | return 0; | ||
| 282 | } | 249 | } |
| 283 | 250 | ||
| 284 | static int pciehp_probe(struct pcie_device *dev) | 251 | static int pciehp_probe(struct pcie_device *dev) |
| @@ -286,14 +253,13 @@ static int pciehp_probe(struct pcie_device *dev) | |||
| 286 | int rc; | 253 | int rc; |
| 287 | struct controller *ctrl; | 254 | struct controller *ctrl; |
| 288 | struct slot *slot; | 255 | struct slot *slot; |
| 289 | u8 value; | 256 | u8 occupied, poweron; |
| 290 | struct pci_dev *pdev = dev->port; | ||
| 291 | 257 | ||
| 292 | if (pciehp_force) | 258 | if (pciehp_force) |
| 293 | dev_info(&dev->device, | 259 | dev_info(&dev->device, |
| 294 | "Bypassing BIOS check for pciehp use on %s\n", | 260 | "Bypassing BIOS check for pciehp use on %s\n", |
| 295 | pci_name(pdev)); | 261 | pci_name(dev->port)); |
| 296 | else if (pciehp_get_hp_hw_control_from_firmware(pdev)) | 262 | else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) |
| 297 | goto err_out_none; | 263 | goto err_out_none; |
| 298 | 264 | ||
| 299 | ctrl = pcie_init(dev); | 265 | ctrl = pcie_init(dev); |
| @@ -318,23 +284,18 @@ static int pciehp_probe(struct pcie_device *dev) | |||
| 318 | rc = pcie_init_notification(ctrl); | 284 | rc = pcie_init_notification(ctrl); |
| 319 | if (rc) { | 285 | if (rc) { |
| 320 | ctrl_err(ctrl, "Notification initialization failed\n"); | 286 | ctrl_err(ctrl, "Notification initialization failed\n"); |
| 321 | goto err_out_release_ctlr; | 287 | goto err_out_free_ctrl_slot; |
| 322 | } | 288 | } |
| 323 | 289 | ||
| 324 | /* Check if slot is occupied */ | 290 | /* Check if slot is occupied */ |
| 325 | slot = ctrl->slot; | 291 | slot = ctrl->slot; |
| 326 | pciehp_get_adapter_status(slot, &value); | 292 | pciehp_get_adapter_status(slot, &occupied); |
| 327 | if (value) { | 293 | pciehp_get_power_status(slot, &poweron); |
| 328 | if (pciehp_force) | 294 | if (occupied && pciehp_force) |
| 329 | pciehp_enable_slot(slot); | 295 | pciehp_enable_slot(slot); |
| 330 | } else { | 296 | /* If empty slot's power status is on, turn power off */ |
| 331 | /* Power off slot if not occupied */ | 297 | if (!occupied && poweron && POWER_CTRL(ctrl)) |
| 332 | if (POWER_CTRL(ctrl)) { | 298 | pciehp_power_off_slot(slot); |
| 333 | rc = pciehp_power_off_slot(slot); | ||
| 334 | if (rc) | ||
| 335 | goto err_out_free_ctrl_slot; | ||
| 336 | } | ||
| 337 | } | ||
| 338 | 299 | ||
| 339 | return 0; | 300 | return 0; |
| 340 | 301 | ||
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 84487d126e4d..d6ac1b261dd9 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
| @@ -142,23 +142,9 @@ u8 pciehp_handle_power_fault(struct slot *p_slot) | |||
| 142 | 142 | ||
| 143 | /* power fault */ | 143 | /* power fault */ |
| 144 | ctrl_dbg(ctrl, "Power fault interrupt received\n"); | 144 | ctrl_dbg(ctrl, "Power fault interrupt received\n"); |
| 145 | 145 | ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); | |
| 146 | if (!pciehp_query_power_fault(p_slot)) { | 146 | event_type = INT_POWER_FAULT; |
| 147 | /* | 147 | ctrl_info(ctrl, "Power fault bit %x set\n", 0); |
| 148 | * power fault Cleared | ||
| 149 | */ | ||
| 150 | ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", | ||
| 151 | slot_name(p_slot)); | ||
| 152 | event_type = INT_POWER_FAULT_CLEAR; | ||
| 153 | } else { | ||
| 154 | /* | ||
| 155 | * power fault | ||
| 156 | */ | ||
| 157 | ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot)); | ||
| 158 | event_type = INT_POWER_FAULT; | ||
| 159 | ctrl_info(ctrl, "Power fault bit %x set\n", 0); | ||
| 160 | } | ||
| 161 | |||
| 162 | queue_interrupt_event(p_slot, event_type); | 148 | queue_interrupt_event(p_slot, event_type); |
| 163 | 149 | ||
| 164 | return 1; | 150 | return 1; |
| @@ -224,13 +210,12 @@ static int board_added(struct slot *p_slot) | |||
| 224 | retval = pciehp_check_link_status(ctrl); | 210 | retval = pciehp_check_link_status(ctrl); |
| 225 | if (retval) { | 211 | if (retval) { |
| 226 | ctrl_err(ctrl, "Failed to check link status\n"); | 212 | ctrl_err(ctrl, "Failed to check link status\n"); |
| 227 | set_slot_off(ctrl, p_slot); | 213 | goto err_exit; |
| 228 | return retval; | ||
| 229 | } | 214 | } |
| 230 | 215 | ||
| 231 | /* Check for a power fault */ | 216 | /* Check for a power fault */ |
| 232 | if (pciehp_query_power_fault(p_slot)) { | 217 | if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { |
| 233 | ctrl_dbg(ctrl, "Power fault detected\n"); | 218 | ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); |
| 234 | retval = -EIO; | 219 | retval = -EIO; |
| 235 | goto err_exit; | 220 | goto err_exit; |
| 236 | } | 221 | } |
| @@ -363,25 +348,6 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) | |||
| 363 | mutex_unlock(&p_slot->lock); | 348 | mutex_unlock(&p_slot->lock); |
| 364 | } | 349 | } |
| 365 | 350 | ||
| 366 | static int update_slot_info(struct slot *slot) | ||
| 367 | { | ||
| 368 | struct hotplug_slot_info *info; | ||
| 369 | int result; | ||
| 370 | |||
| 371 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
| 372 | if (!info) | ||
| 373 | return -ENOMEM; | ||
| 374 | |||
| 375 | pciehp_get_power_status(slot, &info->power_status); | ||
| 376 | pciehp_get_attention_status(slot, &info->attention_status); | ||
| 377 | pciehp_get_latch_status(slot, &info->latch_status); | ||
| 378 | pciehp_get_adapter_status(slot, &info->adapter_status); | ||
| 379 | |||
| 380 | result = pci_hp_change_slot_info(slot->hotplug_slot, info); | ||
| 381 | kfree (info); | ||
| 382 | return result; | ||
| 383 | } | ||
| 384 | |||
| 385 | /* | 351 | /* |
| 386 | * Note: This function must be called with slot->lock held | 352 | * Note: This function must be called with slot->lock held |
| 387 | */ | 353 | */ |
| @@ -442,7 +408,6 @@ static void handle_button_press_event(struct slot *p_slot) | |||
| 442 | * to hot-add or hot-remove is undergoing | 408 | * to hot-add or hot-remove is undergoing |
| 443 | */ | 409 | */ |
| 444 | ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); | 410 | ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); |
| 445 | update_slot_info(p_slot); | ||
| 446 | break; | 411 | break; |
| 447 | default: | 412 | default: |
| 448 | ctrl_warn(ctrl, "Not a valid state\n"); | 413 | ctrl_warn(ctrl, "Not a valid state\n"); |
| @@ -500,11 +465,9 @@ static void interrupt_event_handler(struct work_struct *work) | |||
| 500 | if (!HP_SUPR_RM(ctrl)) | 465 | if (!HP_SUPR_RM(ctrl)) |
| 501 | break; | 466 | break; |
| 502 | ctrl_dbg(ctrl, "Surprise Removal\n"); | 467 | ctrl_dbg(ctrl, "Surprise Removal\n"); |
| 503 | update_slot_info(p_slot); | ||
| 504 | handle_surprise_event(p_slot); | 468 | handle_surprise_event(p_slot); |
| 505 | break; | 469 | break; |
| 506 | default: | 470 | default: |
| 507 | update_slot_info(p_slot); | ||
| 508 | break; | 471 | break; |
| 509 | } | 472 | } |
| 510 | mutex_unlock(&p_slot->lock); | 473 | mutex_unlock(&p_slot->lock); |
| @@ -547,9 +510,6 @@ int pciehp_enable_slot(struct slot *p_slot) | |||
| 547 | if (rc) { | 510 | if (rc) { |
| 548 | pciehp_get_latch_status(p_slot, &getstatus); | 511 | pciehp_get_latch_status(p_slot, &getstatus); |
| 549 | } | 512 | } |
| 550 | |||
| 551 | update_slot_info(p_slot); | ||
| 552 | |||
| 553 | return rc; | 513 | return rc; |
| 554 | } | 514 | } |
| 555 | 515 | ||
| @@ -590,10 +550,7 @@ int pciehp_disable_slot(struct slot *p_slot) | |||
| 590 | } | 550 | } |
| 591 | } | 551 | } |
| 592 | 552 | ||
| 593 | ret = remove_board(p_slot); | 553 | return remove_board(p_slot); |
| 594 | update_slot_info(p_slot); | ||
| 595 | |||
| 596 | return ret; | ||
| 597 | } | 554 | } |
| 598 | 555 | ||
| 599 | int pciehp_sysfs_enable_slot(struct slot *p_slot) | 556 | int pciehp_sysfs_enable_slot(struct slot *p_slot) |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 9ef4605c1ef6..10040d58c8ef 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -45,25 +45,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); | |||
| 45 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) | 45 | static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) |
| 46 | { | 46 | { |
| 47 | struct pci_dev *dev = ctrl->pcie->port; | 47 | struct pci_dev *dev = ctrl->pcie->port; |
| 48 | return pci_read_config_word(dev, ctrl->cap_base + reg, value); | 48 | return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) | 51 | static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) |
| 52 | { | 52 | { |
| 53 | struct pci_dev *dev = ctrl->pcie->port; | 53 | struct pci_dev *dev = ctrl->pcie->port; |
| 54 | return pci_read_config_dword(dev, ctrl->cap_base + reg, value); | 54 | return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) | 57 | static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) |
| 58 | { | 58 | { |
| 59 | struct pci_dev *dev = ctrl->pcie->port; | 59 | struct pci_dev *dev = ctrl->pcie->port; |
| 60 | return pci_write_config_word(dev, ctrl->cap_base + reg, value); | 60 | return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value); |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) | 63 | static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) |
| 64 | { | 64 | { |
| 65 | struct pci_dev *dev = ctrl->pcie->port; | 65 | struct pci_dev *dev = ctrl->pcie->port; |
| 66 | return pci_write_config_dword(dev, ctrl->cap_base + reg, value); | 66 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | /* Power Control Command */ | 69 | /* Power Control Command */ |
| @@ -318,8 +318,8 @@ int pciehp_get_attention_status(struct slot *slot, u8 *status) | |||
| 318 | return retval; | 318 | return retval; |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", | 321 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__, |
| 322 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); | 322 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); |
| 323 | 323 | ||
| 324 | atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; | 324 | atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; |
| 325 | 325 | ||
| @@ -356,8 +356,8 @@ int pciehp_get_power_status(struct slot *slot, u8 *status) | |||
| 356 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); | 356 | ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); |
| 357 | return retval; | 357 | return retval; |
| 358 | } | 358 | } |
| 359 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", | 359 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__, |
| 360 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); | 360 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl); |
| 361 | 361 | ||
| 362 | pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; | 362 | pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; |
| 363 | 363 | ||
| @@ -427,27 +427,24 @@ int pciehp_set_attention_status(struct slot *slot, u8 value) | |||
| 427 | struct controller *ctrl = slot->ctrl; | 427 | struct controller *ctrl = slot->ctrl; |
| 428 | u16 slot_cmd; | 428 | u16 slot_cmd; |
| 429 | u16 cmd_mask; | 429 | u16 cmd_mask; |
| 430 | int rc; | ||
| 431 | 430 | ||
| 432 | cmd_mask = PCI_EXP_SLTCTL_AIC; | 431 | cmd_mask = PCI_EXP_SLTCTL_AIC; |
| 433 | switch (value) { | 432 | switch (value) { |
| 434 | case 0 : /* turn off */ | 433 | case 0 : /* turn off */ |
| 435 | slot_cmd = 0x00C0; | 434 | slot_cmd = 0x00C0; |
| 436 | break; | 435 | break; |
| 437 | case 1: /* turn on */ | 436 | case 1: /* turn on */ |
| 438 | slot_cmd = 0x0040; | 437 | slot_cmd = 0x0040; |
| 439 | break; | 438 | break; |
| 440 | case 2: /* turn blink */ | 439 | case 2: /* turn blink */ |
| 441 | slot_cmd = 0x0080; | 440 | slot_cmd = 0x0080; |
| 442 | break; | 441 | break; |
| 443 | default: | 442 | default: |
| 444 | return -1; | 443 | return -EINVAL; |
| 445 | } | 444 | } |
| 446 | rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 445 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
| 447 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 446 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
| 448 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 447 | return pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
| 449 | |||
| 450 | return rc; | ||
| 451 | } | 448 | } |
| 452 | 449 | ||
| 453 | void pciehp_green_led_on(struct slot *slot) | 450 | void pciehp_green_led_on(struct slot *slot) |
| @@ -459,8 +456,8 @@ void pciehp_green_led_on(struct slot *slot) | |||
| 459 | slot_cmd = 0x0100; | 456 | slot_cmd = 0x0100; |
| 460 | cmd_mask = PCI_EXP_SLTCTL_PIC; | 457 | cmd_mask = PCI_EXP_SLTCTL_PIC; |
| 461 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 458 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
| 462 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 459 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
| 463 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 460 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
| 464 | } | 461 | } |
| 465 | 462 | ||
| 466 | void pciehp_green_led_off(struct slot *slot) | 463 | void pciehp_green_led_off(struct slot *slot) |
| @@ -472,8 +469,8 @@ void pciehp_green_led_off(struct slot *slot) | |||
| 472 | slot_cmd = 0x0300; | 469 | slot_cmd = 0x0300; |
| 473 | cmd_mask = PCI_EXP_SLTCTL_PIC; | 470 | cmd_mask = PCI_EXP_SLTCTL_PIC; |
| 474 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 471 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
| 475 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 472 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
| 476 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 473 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
| 477 | } | 474 | } |
| 478 | 475 | ||
| 479 | void pciehp_green_led_blink(struct slot *slot) | 476 | void pciehp_green_led_blink(struct slot *slot) |
| @@ -485,8 +482,8 @@ void pciehp_green_led_blink(struct slot *slot) | |||
| 485 | slot_cmd = 0x0200; | 482 | slot_cmd = 0x0200; |
| 486 | cmd_mask = PCI_EXP_SLTCTL_PIC; | 483 | cmd_mask = PCI_EXP_SLTCTL_PIC; |
| 487 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 484 | pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
| 488 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 485 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
| 489 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 486 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
| 490 | } | 487 | } |
| 491 | 488 | ||
| 492 | int pciehp_power_on_slot(struct slot * slot) | 489 | int pciehp_power_on_slot(struct slot * slot) |
| @@ -514,97 +511,38 @@ int pciehp_power_on_slot(struct slot * slot) | |||
| 514 | return retval; | 511 | return retval; |
| 515 | } | 512 | } |
| 516 | } | 513 | } |
| 514 | ctrl->power_fault_detected = 0; | ||
| 517 | 515 | ||
| 518 | slot_cmd = POWER_ON; | 516 | slot_cmd = POWER_ON; |
| 519 | cmd_mask = PCI_EXP_SLTCTL_PCC; | 517 | cmd_mask = PCI_EXP_SLTCTL_PCC; |
| 520 | if (!pciehp_poll_mode) { | ||
| 521 | /* Enable power fault detection turned off at power off time */ | ||
| 522 | slot_cmd |= PCI_EXP_SLTCTL_PFDE; | ||
| 523 | cmd_mask |= PCI_EXP_SLTCTL_PFDE; | ||
| 524 | } | ||
| 525 | |||
| 526 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 518 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
| 527 | if (retval) { | 519 | if (retval) { |
| 528 | ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); | 520 | ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); |
| 529 | return retval; | 521 | return retval; |
| 530 | } | 522 | } |
| 531 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 523 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
| 532 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 524 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
| 533 | 525 | ||
| 534 | ctrl->power_fault_detected = 0; | ||
| 535 | return retval; | 526 | return retval; |
| 536 | } | 527 | } |
| 537 | 528 | ||
| 538 | static inline int pcie_mask_bad_dllp(struct controller *ctrl) | ||
| 539 | { | ||
| 540 | struct pci_dev *dev = ctrl->pcie->port; | ||
| 541 | int pos; | ||
| 542 | u32 reg; | ||
| 543 | |||
| 544 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
| 545 | if (!pos) | ||
| 546 | return 0; | ||
| 547 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®); | ||
| 548 | if (reg & PCI_ERR_COR_BAD_DLLP) | ||
| 549 | return 0; | ||
| 550 | reg |= PCI_ERR_COR_BAD_DLLP; | ||
| 551 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg); | ||
| 552 | return 1; | ||
| 553 | } | ||
| 554 | |||
| 555 | static inline void pcie_unmask_bad_dllp(struct controller *ctrl) | ||
| 556 | { | ||
| 557 | struct pci_dev *dev = ctrl->pcie->port; | ||
| 558 | u32 reg; | ||
| 559 | int pos; | ||
| 560 | |||
| 561 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
| 562 | if (!pos) | ||
| 563 | return; | ||
| 564 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®); | ||
| 565 | if (!(reg & PCI_ERR_COR_BAD_DLLP)) | ||
| 566 | return; | ||
| 567 | reg &= ~PCI_ERR_COR_BAD_DLLP; | ||
| 568 | pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg); | ||
| 569 | } | ||
| 570 | |||
| 571 | int pciehp_power_off_slot(struct slot * slot) | 529 | int pciehp_power_off_slot(struct slot * slot) |
| 572 | { | 530 | { |
| 573 | struct controller *ctrl = slot->ctrl; | 531 | struct controller *ctrl = slot->ctrl; |
| 574 | u16 slot_cmd; | 532 | u16 slot_cmd; |
| 575 | u16 cmd_mask; | 533 | u16 cmd_mask; |
| 576 | int retval = 0; | 534 | int retval; |
| 577 | int changed; | ||
| 578 | |||
| 579 | /* | ||
| 580 | * Set Bad DLLP Mask bit in Correctable Error Mask | ||
| 581 | * Register. This is the workaround against Bad DLLP error | ||
| 582 | * that sometimes happens during turning power off the slot | ||
| 583 | * which conforms to PCI Express 1.0a spec. | ||
| 584 | */ | ||
| 585 | changed = pcie_mask_bad_dllp(ctrl); | ||
| 586 | 535 | ||
| 587 | slot_cmd = POWER_OFF; | 536 | slot_cmd = POWER_OFF; |
| 588 | cmd_mask = PCI_EXP_SLTCTL_PCC; | 537 | cmd_mask = PCI_EXP_SLTCTL_PCC; |
| 589 | if (!pciehp_poll_mode) { | ||
| 590 | /* Disable power fault detection */ | ||
| 591 | slot_cmd &= ~PCI_EXP_SLTCTL_PFDE; | ||
| 592 | cmd_mask |= PCI_EXP_SLTCTL_PFDE; | ||
| 593 | } | ||
| 594 | |||
| 595 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); | 538 | retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); |
| 596 | if (retval) { | 539 | if (retval) { |
| 597 | ctrl_err(ctrl, "Write command failed!\n"); | 540 | ctrl_err(ctrl, "Write command failed!\n"); |
| 598 | retval = -1; | 541 | return retval; |
| 599 | goto out; | ||
| 600 | } | 542 | } |
| 601 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", | 543 | ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, |
| 602 | __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); | 544 | pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); |
| 603 | out: | 545 | return 0; |
| 604 | if (changed) | ||
| 605 | pcie_unmask_bad_dllp(ctrl); | ||
| 606 | |||
| 607 | return retval; | ||
| 608 | } | 546 | } |
| 609 | 547 | ||
| 610 | static irqreturn_t pcie_isr(int irq, void *dev_id) | 548 | static irqreturn_t pcie_isr(int irq, void *dev_id) |
| @@ -840,11 +778,19 @@ int pcie_enable_notification(struct controller *ctrl) | |||
| 840 | { | 778 | { |
| 841 | u16 cmd, mask; | 779 | u16 cmd, mask; |
| 842 | 780 | ||
| 781 | /* | ||
| 782 | * TBD: Power fault detected software notification support. | ||
| 783 | * | ||
| 784 | * Power fault detected software notification is not enabled | ||
| 785 | * now, because it caused power fault detected interrupt storm | ||
| 786 | * on some machines. On those machines, power fault detected | ||
| 787 | * bit in the slot status register was set again immediately | ||
| 788 | * when it is cleared in the interrupt service routine, and | ||
| 789 | * next power fault detected interrupt was notified again. | ||
| 790 | */ | ||
| 843 | cmd = PCI_EXP_SLTCTL_PDCE; | 791 | cmd = PCI_EXP_SLTCTL_PDCE; |
| 844 | if (ATTN_BUTTN(ctrl)) | 792 | if (ATTN_BUTTN(ctrl)) |
| 845 | cmd |= PCI_EXP_SLTCTL_ABPE; | 793 | cmd |= PCI_EXP_SLTCTL_ABPE; |
| 846 | if (POWER_CTRL(ctrl)) | ||
| 847 | cmd |= PCI_EXP_SLTCTL_PFDE; | ||
| 848 | if (MRL_SENS(ctrl)) | 794 | if (MRL_SENS(ctrl)) |
| 849 | cmd |= PCI_EXP_SLTCTL_MRLSCE; | 795 | cmd |= PCI_EXP_SLTCTL_MRLSCE; |
| 850 | if (!pciehp_poll_mode) | 796 | if (!pciehp_poll_mode) |
| @@ -866,7 +812,8 @@ static void pcie_disable_notification(struct controller *ctrl) | |||
| 866 | u16 mask; | 812 | u16 mask; |
| 867 | mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | | 813 | mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | |
| 868 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | | 814 | PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | |
| 869 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); | 815 | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | |
| 816 | PCI_EXP_SLTCTL_DLLSCE); | ||
| 870 | if (pcie_write_cmd(ctrl, 0, mask)) | 817 | if (pcie_write_cmd(ctrl, 0, mask)) |
| 871 | ctrl_warn(ctrl, "Cannot disable software notification\n"); | 818 | ctrl_warn(ctrl, "Cannot disable software notification\n"); |
| 872 | } | 819 | } |
| @@ -934,7 +881,8 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
| 934 | pdev->subsystem_device); | 881 | pdev->subsystem_device); |
| 935 | ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", | 882 | ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", |
| 936 | pdev->subsystem_vendor); | 883 | pdev->subsystem_vendor); |
| 937 | ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base); | 884 | ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", |
| 885 | pci_pcie_cap(pdev)); | ||
| 938 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 886 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
| 939 | if (!pci_resource_len(pdev, i)) | 887 | if (!pci_resource_len(pdev, i)) |
| 940 | continue; | 888 | continue; |
| @@ -978,8 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev) | |||
| 978 | goto abort; | 926 | goto abort; |
| 979 | } | 927 | } |
| 980 | ctrl->pcie = dev; | 928 | ctrl->pcie = dev; |
| 981 | ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 929 | if (!pci_pcie_cap(pdev)) { |
| 982 | if (!ctrl->cap_base) { | ||
| 983 | ctrl_err(ctrl, "Cannot find PCI Express capability\n"); | 930 | ctrl_err(ctrl, "Cannot find PCI Express capability\n"); |
| 984 | goto abort_ctrl; | 931 | goto abort_ctrl; |
| 985 | } | 932 | } |
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index cc8ec3aa41a7..80b461c98557 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c | |||
| @@ -43,7 +43,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | |||
| 43 | * Perhaps we *should* use default settings for PCIe, but | 43 | * Perhaps we *should* use default settings for PCIe, but |
| 44 | * pciehp didn't, so we won't either. | 44 | * pciehp didn't, so we won't either. |
| 45 | */ | 45 | */ |
| 46 | if (dev->is_pcie) | 46 | if (pci_is_pcie(dev)) |
| 47 | return; | 47 | return; |
| 48 | dev_info(&dev->dev, "using default PCI settings\n"); | 48 | dev_info(&dev->dev, "using default PCI settings\n"); |
| 49 | hpp = &pci_default_type0; | 49 | hpp = &pci_default_type0; |
| @@ -102,7 +102,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | |||
| 102 | return; | 102 | return; |
| 103 | 103 | ||
| 104 | /* Find PCI Express capability */ | 104 | /* Find PCI Express capability */ |
| 105 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 105 | pos = pci_pcie_cap(dev); |
| 106 | if (!pos) | 106 | if (!pos) |
| 107 | return; | 107 | return; |
| 108 | 108 | ||
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index bd588eb8e922..8e210cd76e55 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
| @@ -121,7 +121,7 @@ struct controller { | |||
| 121 | #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 | 121 | #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 |
| 122 | #define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 | 122 | #define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 |
| 123 | 123 | ||
| 124 | /* AMD PCIX bridge registers */ | 124 | /* AMD PCI-X bridge registers */ |
| 125 | #define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C | 125 | #define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C |
| 126 | #define PCIX_MISCII_OFFSET 0x48 | 126 | #define PCIX_MISCII_OFFSET 0x48 |
| 127 | #define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80 | 127 | #define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80 |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 855dd7ca47f3..417312528ddf 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | 48 | ||
| 49 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) | 49 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
| 50 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 50 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
| 51 | #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) | ||
| 51 | 52 | ||
| 52 | #define IOAPIC_RANGE_START (0xfee00000) | 53 | #define IOAPIC_RANGE_START (0xfee00000) |
| 53 | #define IOAPIC_RANGE_END (0xfeefffff) | 54 | #define IOAPIC_RANGE_END (0xfeefffff) |
| @@ -94,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p) | |||
| 94 | /* global iommu list, set NULL for ignored DMAR units */ | 95 | /* global iommu list, set NULL for ignored DMAR units */ |
| 95 | static struct intel_iommu **g_iommus; | 96 | static struct intel_iommu **g_iommus; |
| 96 | 97 | ||
| 98 | static void __init check_tylersburg_isoch(void); | ||
| 97 | static int rwbf_quirk; | 99 | static int rwbf_quirk; |
| 98 | 100 | ||
| 99 | /* | 101 | /* |
| @@ -275,6 +277,7 @@ static int hw_pass_through = 1; | |||
| 275 | 277 | ||
| 276 | struct dmar_domain { | 278 | struct dmar_domain { |
| 277 | int id; /* domain id */ | 279 | int id; /* domain id */ |
| 280 | int nid; /* node id */ | ||
| 278 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ | 281 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
| 279 | 282 | ||
| 280 | struct list_head devices; /* all devices' list */ | 283 | struct list_head devices; /* all devices' list */ |
| @@ -302,7 +305,7 @@ struct device_domain_info { | |||
| 302 | int segment; /* PCI domain */ | 305 | int segment; /* PCI domain */ |
| 303 | u8 bus; /* PCI bus number */ | 306 | u8 bus; /* PCI bus number */ |
| 304 | u8 devfn; /* PCI devfn number */ | 307 | u8 devfn; /* PCI devfn number */ |
| 305 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | 308 | struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ |
| 306 | struct intel_iommu *iommu; /* IOMMU used by this device */ | 309 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
| 307 | struct dmar_domain *domain; /* pointer to domain */ | 310 | struct dmar_domain *domain; /* pointer to domain */ |
| 308 | }; | 311 | }; |
| @@ -384,30 +387,14 @@ static struct kmem_cache *iommu_domain_cache; | |||
| 384 | static struct kmem_cache *iommu_devinfo_cache; | 387 | static struct kmem_cache *iommu_devinfo_cache; |
| 385 | static struct kmem_cache *iommu_iova_cache; | 388 | static struct kmem_cache *iommu_iova_cache; |
| 386 | 389 | ||
| 387 | static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) | 390 | static inline void *alloc_pgtable_page(int node) |
| 388 | { | 391 | { |
| 389 | unsigned int flags; | 392 | struct page *page; |
| 390 | void *vaddr; | 393 | void *vaddr = NULL; |
| 391 | |||
| 392 | /* trying to avoid low memory issues */ | ||
| 393 | flags = current->flags & PF_MEMALLOC; | ||
| 394 | current->flags |= PF_MEMALLOC; | ||
| 395 | vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC); | ||
| 396 | current->flags &= (~PF_MEMALLOC | flags); | ||
| 397 | return vaddr; | ||
| 398 | } | ||
| 399 | |||
| 400 | 394 | ||
| 401 | static inline void *alloc_pgtable_page(void) | 395 | page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); |
| 402 | { | 396 | if (page) |
| 403 | unsigned int flags; | 397 | vaddr = page_address(page); |
| 404 | void *vaddr; | ||
| 405 | |||
| 406 | /* trying to avoid low memory issues */ | ||
| 407 | flags = current->flags & PF_MEMALLOC; | ||
| 408 | current->flags |= PF_MEMALLOC; | ||
| 409 | vaddr = (void *)get_zeroed_page(GFP_ATOMIC); | ||
| 410 | current->flags &= (~PF_MEMALLOC | flags); | ||
| 411 | return vaddr; | 398 | return vaddr; |
| 412 | } | 399 | } |
| 413 | 400 | ||
| @@ -418,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr) | |||
| 418 | 405 | ||
| 419 | static inline void *alloc_domain_mem(void) | 406 | static inline void *alloc_domain_mem(void) |
| 420 | { | 407 | { |
| 421 | return iommu_kmem_cache_alloc(iommu_domain_cache); | 408 | return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); |
| 422 | } | 409 | } |
| 423 | 410 | ||
| 424 | static void free_domain_mem(void *vaddr) | 411 | static void free_domain_mem(void *vaddr) |
| @@ -428,7 +415,7 @@ static void free_domain_mem(void *vaddr) | |||
| 428 | 415 | ||
| 429 | static inline void * alloc_devinfo_mem(void) | 416 | static inline void * alloc_devinfo_mem(void) |
| 430 | { | 417 | { |
| 431 | return iommu_kmem_cache_alloc(iommu_devinfo_cache); | 418 | return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); |
| 432 | } | 419 | } |
| 433 | 420 | ||
| 434 | static inline void free_devinfo_mem(void *vaddr) | 421 | static inline void free_devinfo_mem(void *vaddr) |
| @@ -438,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr) | |||
| 438 | 425 | ||
| 439 | struct iova *alloc_iova_mem(void) | 426 | struct iova *alloc_iova_mem(void) |
| 440 | { | 427 | { |
| 441 | return iommu_kmem_cache_alloc(iommu_iova_cache); | 428 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); |
| 442 | } | 429 | } |
| 443 | 430 | ||
| 444 | void free_iova_mem(struct iova *iova) | 431 | void free_iova_mem(struct iova *iova) |
| @@ -587,7 +574,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | |||
| 587 | root = &iommu->root_entry[bus]; | 574 | root = &iommu->root_entry[bus]; |
| 588 | context = get_context_addr_from_root(root); | 575 | context = get_context_addr_from_root(root); |
| 589 | if (!context) { | 576 | if (!context) { |
| 590 | context = (struct context_entry *)alloc_pgtable_page(); | 577 | context = (struct context_entry *) |
| 578 | alloc_pgtable_page(iommu->node); | ||
| 591 | if (!context) { | 579 | if (!context) { |
| 592 | spin_unlock_irqrestore(&iommu->lock, flags); | 580 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 593 | return NULL; | 581 | return NULL; |
| @@ -730,7 +718,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | |||
| 730 | if (!dma_pte_present(pte)) { | 718 | if (!dma_pte_present(pte)) { |
| 731 | uint64_t pteval; | 719 | uint64_t pteval; |
| 732 | 720 | ||
| 733 | tmp_page = alloc_pgtable_page(); | 721 | tmp_page = alloc_pgtable_page(domain->nid); |
| 734 | 722 | ||
| 735 | if (!tmp_page) | 723 | if (!tmp_page) |
| 736 | return NULL; | 724 | return NULL; |
| @@ -866,7 +854,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
| 866 | struct root_entry *root; | 854 | struct root_entry *root; |
| 867 | unsigned long flags; | 855 | unsigned long flags; |
| 868 | 856 | ||
| 869 | root = (struct root_entry *)alloc_pgtable_page(); | 857 | root = (struct root_entry *)alloc_pgtable_page(iommu->node); |
| 870 | if (!root) | 858 | if (!root) |
| 871 | return -ENOMEM; | 859 | return -ENOMEM; |
| 872 | 860 | ||
| @@ -1261,6 +1249,7 @@ static struct dmar_domain *alloc_domain(void) | |||
| 1261 | if (!domain) | 1249 | if (!domain) |
| 1262 | return NULL; | 1250 | return NULL; |
| 1263 | 1251 | ||
| 1252 | domain->nid = -1; | ||
| 1264 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | 1253 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
| 1265 | domain->flags = 0; | 1254 | domain->flags = 0; |
| 1266 | 1255 | ||
| @@ -1418,9 +1407,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
| 1418 | domain->iommu_snooping = 0; | 1407 | domain->iommu_snooping = 0; |
| 1419 | 1408 | ||
| 1420 | domain->iommu_count = 1; | 1409 | domain->iommu_count = 1; |
| 1410 | domain->nid = iommu->node; | ||
| 1421 | 1411 | ||
| 1422 | /* always allocate the top pgd */ | 1412 | /* always allocate the top pgd */ |
| 1423 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1413 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); |
| 1424 | if (!domain->pgd) | 1414 | if (!domain->pgd) |
| 1425 | return -ENOMEM; | 1415 | return -ENOMEM; |
| 1426 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); | 1416 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
| @@ -1521,12 +1511,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1521 | 1511 | ||
| 1522 | /* Skip top levels of page tables for | 1512 | /* Skip top levels of page tables for |
| 1523 | * iommu which has less agaw than default. | 1513 | * iommu which has less agaw than default. |
| 1514 | * Unnecessary for PT mode. | ||
| 1524 | */ | 1515 | */ |
| 1525 | for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { | 1516 | if (translation != CONTEXT_TT_PASS_THROUGH) { |
| 1526 | pgd = phys_to_virt(dma_pte_addr(pgd)); | 1517 | for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { |
| 1527 | if (!dma_pte_present(pgd)) { | 1518 | pgd = phys_to_virt(dma_pte_addr(pgd)); |
| 1528 | spin_unlock_irqrestore(&iommu->lock, flags); | 1519 | if (!dma_pte_present(pgd)) { |
| 1529 | return -ENOMEM; | 1520 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1521 | return -ENOMEM; | ||
| 1522 | } | ||
| 1530 | } | 1523 | } |
| 1531 | } | 1524 | } |
| 1532 | } | 1525 | } |
| @@ -1575,6 +1568,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
| 1575 | spin_lock_irqsave(&domain->iommu_lock, flags); | 1568 | spin_lock_irqsave(&domain->iommu_lock, flags); |
| 1576 | if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { | 1569 | if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { |
| 1577 | domain->iommu_count++; | 1570 | domain->iommu_count++; |
| 1571 | if (domain->iommu_count == 1) | ||
| 1572 | domain->nid = iommu->node; | ||
| 1578 | domain_update_iommu_cap(domain); | 1573 | domain_update_iommu_cap(domain); |
| 1579 | } | 1574 | } |
| 1580 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | 1575 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
| @@ -1609,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, | |||
| 1609 | return ret; | 1604 | return ret; |
| 1610 | parent = parent->bus->self; | 1605 | parent = parent->bus->self; |
| 1611 | } | 1606 | } |
| 1612 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ | 1607 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ |
| 1613 | return domain_context_mapping_one(domain, | 1608 | return domain_context_mapping_one(domain, |
| 1614 | pci_domain_nr(tmp->subordinate), | 1609 | pci_domain_nr(tmp->subordinate), |
| 1615 | tmp->subordinate->number, 0, | 1610 | tmp->subordinate->number, 0, |
| @@ -1649,7 +1644,7 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
| 1649 | return ret; | 1644 | return ret; |
| 1650 | parent = parent->bus->self; | 1645 | parent = parent->bus->self; |
| 1651 | } | 1646 | } |
| 1652 | if (tmp->is_pcie) | 1647 | if (pci_is_pcie(tmp)) |
| 1653 | return device_context_mapped(iommu, tmp->subordinate->number, | 1648 | return device_context_mapped(iommu, tmp->subordinate->number, |
| 1654 | 0); | 1649 | 0); |
| 1655 | else | 1650 | else |
| @@ -1819,7 +1814,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
| 1819 | 1814 | ||
| 1820 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); | 1815 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1821 | if (dev_tmp) { | 1816 | if (dev_tmp) { |
| 1822 | if (dev_tmp->is_pcie) { | 1817 | if (pci_is_pcie(dev_tmp)) { |
| 1823 | bus = dev_tmp->subordinate->number; | 1818 | bus = dev_tmp->subordinate->number; |
| 1824 | devfn = 0; | 1819 | devfn = 0; |
| 1825 | } else { | 1820 | } else { |
| @@ -1934,6 +1929,9 @@ error: | |||
| 1934 | } | 1929 | } |
| 1935 | 1930 | ||
| 1936 | static int iommu_identity_mapping; | 1931 | static int iommu_identity_mapping; |
| 1932 | #define IDENTMAP_ALL 1 | ||
| 1933 | #define IDENTMAP_GFX 2 | ||
| 1934 | #define IDENTMAP_AZALIA 4 | ||
| 1937 | 1935 | ||
| 1938 | static int iommu_domain_identity_map(struct dmar_domain *domain, | 1936 | static int iommu_domain_identity_map(struct dmar_domain *domain, |
| 1939 | unsigned long long start, | 1937 | unsigned long long start, |
| @@ -1986,6 +1984,16 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | |||
| 1986 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | 1984 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
| 1987 | pci_name(pdev), start, end); | 1985 | pci_name(pdev), start, end); |
| 1988 | 1986 | ||
| 1987 | if (end < start) { | ||
| 1988 | WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" | ||
| 1989 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 1990 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 1991 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 1992 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 1993 | ret = -EIO; | ||
| 1994 | goto error; | ||
| 1995 | } | ||
| 1996 | |||
| 1989 | if (end >> agaw_to_width(domain->agaw)) { | 1997 | if (end >> agaw_to_width(domain->agaw)) { |
| 1990 | WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" | 1998 | WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" |
| 1991 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 1999 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
| @@ -2151,8 +2159,14 @@ static int domain_add_dev_info(struct dmar_domain *domain, | |||
| 2151 | 2159 | ||
| 2152 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | 2160 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) |
| 2153 | { | 2161 | { |
| 2154 | if (iommu_identity_mapping == 2) | 2162 | if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) |
| 2155 | return IS_GFX_DEVICE(pdev); | 2163 | return 1; |
| 2164 | |||
| 2165 | if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) | ||
| 2166 | return 1; | ||
| 2167 | |||
| 2168 | if (!(iommu_identity_mapping & IDENTMAP_ALL)) | ||
| 2169 | return 0; | ||
| 2156 | 2170 | ||
| 2157 | /* | 2171 | /* |
| 2158 | * We want to start off with all devices in the 1:1 domain, and | 2172 | * We want to start off with all devices in the 1:1 domain, and |
| @@ -2171,7 +2185,7 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) | |||
| 2171 | * the 1:1 domain, just in _case_ one of their siblings turns out | 2185 | * the 1:1 domain, just in _case_ one of their siblings turns out |
| 2172 | * not to be able to map all of memory. | 2186 | * not to be able to map all of memory. |
| 2173 | */ | 2187 | */ |
| 2174 | if (!pdev->is_pcie) { | 2188 | if (!pci_is_pcie(pdev)) { |
| 2175 | if (!pci_is_root_bus(pdev->bus)) | 2189 | if (!pci_is_root_bus(pdev->bus)) |
| 2176 | return 0; | 2190 | return 0; |
| 2177 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) | 2191 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) |
| @@ -2332,11 +2346,14 @@ int __init init_dmars(void) | |||
| 2332 | } | 2346 | } |
| 2333 | 2347 | ||
| 2334 | if (iommu_pass_through) | 2348 | if (iommu_pass_through) |
| 2335 | iommu_identity_mapping = 1; | 2349 | iommu_identity_mapping |= IDENTMAP_ALL; |
| 2350 | |||
| 2336 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA | 2351 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA |
| 2337 | else | 2352 | iommu_identity_mapping |= IDENTMAP_GFX; |
| 2338 | iommu_identity_mapping = 2; | ||
| 2339 | #endif | 2353 | #endif |
| 2354 | |||
| 2355 | check_tylersburg_isoch(); | ||
| 2356 | |||
| 2340 | /* | 2357 | /* |
| 2341 | * If pass through is not set or not enabled, setup context entries for | 2358 | * If pass through is not set or not enabled, setup context entries for |
| 2342 | * identity mappings for rmrr, gfx, and isa and may fall back to static | 2359 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
| @@ -2753,7 +2770,15 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
| 2753 | 2770 | ||
| 2754 | size = PAGE_ALIGN(size); | 2771 | size = PAGE_ALIGN(size); |
| 2755 | order = get_order(size); | 2772 | order = get_order(size); |
| 2756 | flags &= ~(GFP_DMA | GFP_DMA32); | 2773 | |
| 2774 | if (!iommu_no_mapping(hwdev)) | ||
| 2775 | flags &= ~(GFP_DMA | GFP_DMA32); | ||
| 2776 | else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { | ||
| 2777 | if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) | ||
| 2778 | flags |= GFP_DMA; | ||
| 2779 | else | ||
| 2780 | flags |= GFP_DMA32; | ||
| 2781 | } | ||
| 2757 | 2782 | ||
| 2758 | vaddr = (void *)__get_free_pages(flags, order); | 2783 | vaddr = (void *)__get_free_pages(flags, order); |
| 2759 | if (!vaddr) | 2784 | if (!vaddr) |
| @@ -3193,6 +3218,36 @@ static int __init init_iommu_sysfs(void) | |||
| 3193 | } | 3218 | } |
| 3194 | #endif /* CONFIG_PM */ | 3219 | #endif /* CONFIG_PM */ |
| 3195 | 3220 | ||
| 3221 | /* | ||
| 3222 | * Here we only respond to action of unbound device from driver. | ||
| 3223 | * | ||
| 3224 | * Added device is not attached to its DMAR domain here yet. That will happen | ||
| 3225 | * when mapping the device to iova. | ||
| 3226 | */ | ||
| 3227 | static int device_notifier(struct notifier_block *nb, | ||
| 3228 | unsigned long action, void *data) | ||
| 3229 | { | ||
| 3230 | struct device *dev = data; | ||
| 3231 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 3232 | struct dmar_domain *domain; | ||
| 3233 | |||
| 3234 | if (iommu_no_mapping(dev)) | ||
| 3235 | return 0; | ||
| 3236 | |||
| 3237 | domain = find_domain(pdev); | ||
| 3238 | if (!domain) | ||
| 3239 | return 0; | ||
| 3240 | |||
| 3241 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) | ||
| 3242 | domain_remove_one_dev_info(domain, pdev); | ||
| 3243 | |||
| 3244 | return 0; | ||
| 3245 | } | ||
| 3246 | |||
| 3247 | static struct notifier_block device_nb = { | ||
| 3248 | .notifier_call = device_notifier, | ||
| 3249 | }; | ||
| 3250 | |||
| 3196 | int __init intel_iommu_init(void) | 3251 | int __init intel_iommu_init(void) |
| 3197 | { | 3252 | { |
| 3198 | int ret = 0; | 3253 | int ret = 0; |
| @@ -3217,7 +3272,7 @@ int __init intel_iommu_init(void) | |||
| 3217 | * Check the need for DMA-remapping initialization now. | 3272 | * Check the need for DMA-remapping initialization now. |
| 3218 | * Above initialization will also be used by Interrupt-remapping. | 3273 | * Above initialization will also be used by Interrupt-remapping. |
| 3219 | */ | 3274 | */ |
| 3220 | if (no_iommu || swiotlb || dmar_disabled) | 3275 | if (no_iommu || dmar_disabled) |
| 3221 | return -ENODEV; | 3276 | return -ENODEV; |
| 3222 | 3277 | ||
| 3223 | iommu_init_mempool(); | 3278 | iommu_init_mempool(); |
| @@ -3238,13 +3293,17 @@ int __init intel_iommu_init(void) | |||
| 3238 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); | 3293 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); |
| 3239 | 3294 | ||
| 3240 | init_timer(&unmap_timer); | 3295 | init_timer(&unmap_timer); |
| 3241 | force_iommu = 1; | 3296 | #ifdef CONFIG_SWIOTLB |
| 3297 | swiotlb = 0; | ||
| 3298 | #endif | ||
| 3242 | dma_ops = &intel_dma_ops; | 3299 | dma_ops = &intel_dma_ops; |
| 3243 | 3300 | ||
| 3244 | init_iommu_sysfs(); | 3301 | init_iommu_sysfs(); |
| 3245 | 3302 | ||
| 3246 | register_iommu(&intel_iommu_ops); | 3303 | register_iommu(&intel_iommu_ops); |
| 3247 | 3304 | ||
| 3305 | bus_register_notifier(&pci_bus_type, &device_nb); | ||
| 3306 | |||
| 3248 | return 0; | 3307 | return 0; |
| 3249 | } | 3308 | } |
| 3250 | 3309 | ||
| @@ -3266,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
| 3266 | parent->devfn); | 3325 | parent->devfn); |
| 3267 | parent = parent->bus->self; | 3326 | parent = parent->bus->self; |
| 3268 | } | 3327 | } |
| 3269 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ | 3328 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ |
| 3270 | iommu_detach_dev(iommu, | 3329 | iommu_detach_dev(iommu, |
| 3271 | tmp->subordinate->number, 0); | 3330 | tmp->subordinate->number, 0); |
| 3272 | else /* this is a legacy PCI bridge */ | 3331 | else /* this is a legacy PCI bridge */ |
| @@ -3402,6 +3461,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void) | |||
| 3402 | return NULL; | 3461 | return NULL; |
| 3403 | 3462 | ||
| 3404 | domain->id = vm_domid++; | 3463 | domain->id = vm_domid++; |
| 3464 | domain->nid = -1; | ||
| 3405 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | 3465 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
| 3406 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | 3466 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; |
| 3407 | 3467 | ||
| @@ -3428,9 +3488,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
| 3428 | domain->iommu_coherency = 0; | 3488 | domain->iommu_coherency = 0; |
| 3429 | domain->iommu_snooping = 0; | 3489 | domain->iommu_snooping = 0; |
| 3430 | domain->max_addr = 0; | 3490 | domain->max_addr = 0; |
| 3491 | domain->nid = -1; | ||
| 3431 | 3492 | ||
| 3432 | /* always allocate the top pgd */ | 3493 | /* always allocate the top pgd */ |
| 3433 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 3494 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); |
| 3434 | if (!domain->pgd) | 3495 | if (!domain->pgd) |
| 3435 | return -ENOMEM; | 3496 | return -ENOMEM; |
| 3436 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | 3497 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); |
| @@ -3670,3 +3731,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) | |||
| 3670 | } | 3731 | } |
| 3671 | 3732 | ||
| 3672 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); | 3733 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); |
| 3734 | |||
| 3735 | /* On Tylersburg chipsets, some BIOSes have been known to enable the | ||
| 3736 | ISOCH DMAR unit for the Azalia sound device, but not give it any | ||
| 3737 | TLB entries, which causes it to deadlock. Check for that. We do | ||
| 3738 | this in a function called from init_dmars(), instead of in a PCI | ||
| 3739 | quirk, because we don't want to print the obnoxious "BIOS broken" | ||
| 3740 | message if VT-d is actually disabled. | ||
| 3741 | */ | ||
| 3742 | static void __init check_tylersburg_isoch(void) | ||
| 3743 | { | ||
| 3744 | struct pci_dev *pdev; | ||
| 3745 | uint32_t vtisochctrl; | ||
| 3746 | |||
| 3747 | /* If there's no Azalia in the system anyway, forget it. */ | ||
| 3748 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); | ||
| 3749 | if (!pdev) | ||
| 3750 | return; | ||
| 3751 | pci_dev_put(pdev); | ||
| 3752 | |||
| 3753 | /* System Management Registers. Might be hidden, in which case | ||
| 3754 | we can't do the sanity check. But that's OK, because the | ||
| 3755 | known-broken BIOSes _don't_ actually hide it, so far. */ | ||
| 3756 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); | ||
| 3757 | if (!pdev) | ||
| 3758 | return; | ||
| 3759 | |||
| 3760 | if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { | ||
| 3761 | pci_dev_put(pdev); | ||
| 3762 | return; | ||
| 3763 | } | ||
| 3764 | |||
| 3765 | pci_dev_put(pdev); | ||
| 3766 | |||
| 3767 | /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ | ||
| 3768 | if (vtisochctrl & 1) | ||
| 3769 | return; | ||
| 3770 | |||
| 3771 | /* Drop all bits other than the number of TLB entries */ | ||
| 3772 | vtisochctrl &= 0x1c; | ||
| 3773 | |||
| 3774 | /* If we have the recommended number of TLB entries (16), fine. */ | ||
| 3775 | if (vtisochctrl == 0x10) | ||
| 3776 | return; | ||
| 3777 | |||
| 3778 | /* Zero TLB entries? You get to ride the short bus to school. */ | ||
| 3779 | if (!vtisochctrl) { | ||
| 3780 | WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n" | ||
| 3781 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
| 3782 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
| 3783 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
| 3784 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
| 3785 | iommu_identity_mapping |= IDENTMAP_AZALIA; | ||
| 3786 | return; | ||
| 3787 | } | ||
| 3788 | |||
| 3789 | printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", | ||
| 3790 | vtisochctrl); | ||
| 3791 | } | ||
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 0ed78a764ded..95b849130ad4 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/dmar.h> | 2 | #include <linux/dmar.h> |
| 3 | #include <linux/spinlock.h> | 3 | #include <linux/spinlock.h> |
| 4 | #include <linux/jiffies.h> | 4 | #include <linux/jiffies.h> |
| 5 | #include <linux/hpet.h> | ||
| 5 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
| 6 | #include <linux/irq.h> | 7 | #include <linux/irq.h> |
| 7 | #include <asm/io_apic.h> | 8 | #include <asm/io_apic.h> |
| @@ -14,7 +15,8 @@ | |||
| 14 | #include "pci.h" | 15 | #include "pci.h" |
| 15 | 16 | ||
| 16 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 17 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
| 17 | static int ir_ioapic_num; | 18 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
| 19 | static int ir_ioapic_num, ir_hpet_num; | ||
| 18 | int intr_remapping_enabled; | 20 | int intr_remapping_enabled; |
| 19 | 21 | ||
| 20 | static int disable_intremap; | 22 | static int disable_intremap; |
| @@ -343,6 +345,16 @@ int flush_irte(int irq) | |||
| 343 | return rc; | 345 | return rc; |
| 344 | } | 346 | } |
| 345 | 347 | ||
| 348 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | ||
| 349 | { | ||
| 350 | int i; | ||
| 351 | |||
| 352 | for (i = 0; i < MAX_HPET_TBS; i++) | ||
| 353 | if (ir_hpet[i].id == hpet_id) | ||
| 354 | return ir_hpet[i].iommu; | ||
| 355 | return NULL; | ||
| 356 | } | ||
| 357 | |||
| 346 | struct intel_iommu *map_ioapic_to_ir(int apic) | 358 | struct intel_iommu *map_ioapic_to_ir(int apic) |
| 347 | { | 359 | { |
| 348 | int i; | 360 | int i; |
| @@ -470,6 +482,36 @@ int set_ioapic_sid(struct irte *irte, int apic) | |||
| 470 | return 0; | 482 | return 0; |
| 471 | } | 483 | } |
| 472 | 484 | ||
| 485 | int set_hpet_sid(struct irte *irte, u8 id) | ||
| 486 | { | ||
| 487 | int i; | ||
| 488 | u16 sid = 0; | ||
| 489 | |||
| 490 | if (!irte) | ||
| 491 | return -1; | ||
| 492 | |||
| 493 | for (i = 0; i < MAX_HPET_TBS; i++) { | ||
| 494 | if (ir_hpet[i].id == id) { | ||
| 495 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | ||
| 496 | break; | ||
| 497 | } | ||
| 498 | } | ||
| 499 | |||
| 500 | if (sid == 0) { | ||
| 501 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | ||
| 502 | return -1; | ||
| 503 | } | ||
| 504 | |||
| 505 | /* | ||
| 506 | * Should really use SQ_ALL_16. Some platforms are broken. | ||
| 507 | * While we figure out the right quirks for these broken platforms, use | ||
| 508 | * SQ_13_IGNORE_3 for now. | ||
| 509 | */ | ||
| 510 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | ||
| 511 | |||
| 512 | return 0; | ||
| 513 | } | ||
| 514 | |||
| 473 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) | 515 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
| 474 | { | 516 | { |
| 475 | struct pci_dev *bridge; | 517 | struct pci_dev *bridge; |
| @@ -478,7 +520,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
| 478 | return -1; | 520 | return -1; |
| 479 | 521 | ||
| 480 | /* PCIe device or Root Complex integrated PCI device */ | 522 | /* PCIe device or Root Complex integrated PCI device */ |
| 481 | if (dev->is_pcie || !dev->bus->parent) { | 523 | if (pci_is_pcie(dev) || !dev->bus->parent) { |
| 482 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | 524 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
| 483 | (dev->bus->number << 8) | dev->devfn); | 525 | (dev->bus->number << 8) | dev->devfn); |
| 484 | return 0; | 526 | return 0; |
| @@ -486,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
| 486 | 528 | ||
| 487 | bridge = pci_find_upstream_pcie_bridge(dev); | 529 | bridge = pci_find_upstream_pcie_bridge(dev); |
| 488 | if (bridge) { | 530 | if (bridge) { |
| 489 | if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */ | 531 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ |
| 490 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | 532 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
| 491 | (bridge->bus->number << 8) | dev->bus->number); | 533 | (bridge->bus->number << 8) | dev->bus->number); |
| 492 | else /* this is a legacy PCI bridge */ | 534 | else /* this is a legacy PCI bridge */ |
| @@ -548,7 +590,8 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |||
| 548 | if (!iommu->ir_table) | 590 | if (!iommu->ir_table) |
| 549 | return -ENOMEM; | 591 | return -ENOMEM; |
| 550 | 592 | ||
| 551 | pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); | 593 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
| 594 | INTR_REMAP_PAGE_ORDER); | ||
| 552 | 595 | ||
| 553 | if (!pages) { | 596 | if (!pages) { |
| 554 | printk(KERN_ERR "failed to allocate pages of order %d\n", | 597 | printk(KERN_ERR "failed to allocate pages of order %d\n", |
| @@ -711,6 +754,34 @@ error: | |||
| 711 | return -1; | 754 | return -1; |
| 712 | } | 755 | } |
| 713 | 756 | ||
| 757 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | ||
| 758 | struct intel_iommu *iommu) | ||
| 759 | { | ||
| 760 | struct acpi_dmar_pci_path *path; | ||
| 761 | u8 bus; | ||
| 762 | int count; | ||
| 763 | |||
| 764 | bus = scope->bus; | ||
| 765 | path = (struct acpi_dmar_pci_path *)(scope + 1); | ||
| 766 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | ||
| 767 | / sizeof(struct acpi_dmar_pci_path); | ||
| 768 | |||
| 769 | while (--count > 0) { | ||
| 770 | /* | ||
| 771 | * Access PCI directly due to the PCI | ||
| 772 | * subsystem isn't initialized yet. | ||
| 773 | */ | ||
| 774 | bus = read_pci_config_byte(bus, path->dev, path->fn, | ||
| 775 | PCI_SECONDARY_BUS); | ||
| 776 | path++; | ||
| 777 | } | ||
| 778 | ir_hpet[ir_hpet_num].bus = bus; | ||
| 779 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | ||
| 780 | ir_hpet[ir_hpet_num].iommu = iommu; | ||
| 781 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | ||
| 782 | ir_hpet_num++; | ||
| 783 | } | ||
| 784 | |||
| 714 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | 785 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
| 715 | struct intel_iommu *iommu) | 786 | struct intel_iommu *iommu) |
| 716 | { | 787 | { |
| @@ -740,8 +811,8 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | |||
| 740 | ir_ioapic_num++; | 811 | ir_ioapic_num++; |
| 741 | } | 812 | } |
| 742 | 813 | ||
| 743 | static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | 814 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
| 744 | struct intel_iommu *iommu) | 815 | struct intel_iommu *iommu) |
| 745 | { | 816 | { |
| 746 | struct acpi_dmar_hardware_unit *drhd; | 817 | struct acpi_dmar_hardware_unit *drhd; |
| 747 | struct acpi_dmar_device_scope *scope; | 818 | struct acpi_dmar_device_scope *scope; |
| @@ -765,6 +836,17 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, | |||
| 765 | drhd->address); | 836 | drhd->address); |
| 766 | 837 | ||
| 767 | ir_parse_one_ioapic_scope(scope, iommu); | 838 | ir_parse_one_ioapic_scope(scope, iommu); |
| 839 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { | ||
| 840 | if (ir_hpet_num == MAX_HPET_TBS) { | ||
| 841 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | ||
| 842 | return -1; | ||
| 843 | } | ||
| 844 | |||
| 845 | printk(KERN_INFO "HPET id %d under DRHD base" | ||
| 846 | " 0x%Lx\n", scope->enumeration_id, | ||
| 847 | drhd->address); | ||
| 848 | |||
| 849 | ir_parse_one_hpet_scope(scope, iommu); | ||
| 768 | } | 850 | } |
| 769 | start += scope->length; | 851 | start += scope->length; |
| 770 | } | 852 | } |
| @@ -785,7 +867,7 @@ int __init parse_ioapics_under_ir(void) | |||
| 785 | struct intel_iommu *iommu = drhd->iommu; | 867 | struct intel_iommu *iommu = drhd->iommu; |
| 786 | 868 | ||
| 787 | if (ecap_ir_support(iommu->ecap)) { | 869 | if (ecap_ir_support(iommu->ecap)) { |
| 788 | if (ir_parse_ioapic_scope(drhd->hdr, iommu)) | 870 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
| 789 | return -1; | 871 | return -1; |
| 790 | 872 | ||
| 791 | ir_supported = 1; | 873 | ir_supported = 1; |
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h index 63a263c18415..5662fecfee60 100644 --- a/drivers/pci/intr_remapping.h +++ b/drivers/pci/intr_remapping.h | |||
| @@ -7,4 +7,11 @@ struct ioapic_scope { | |||
| 7 | unsigned int devfn; /* PCI devfn number */ | 7 | unsigned int devfn; /* PCI devfn number */ |
| 8 | }; | 8 | }; |
| 9 | 9 | ||
| 10 | struct hpet_scope { | ||
| 11 | struct intel_iommu *iommu; | ||
| 12 | u8 id; | ||
| 13 | unsigned int bus; | ||
| 14 | unsigned int devfn; | ||
| 15 | }; | ||
| 16 | |||
| 10 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | 17 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) |
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c new file mode 100644 index 000000000000..3e0d7b5dd1b9 --- /dev/null +++ b/drivers/pci/ioapic.c | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | /* | ||
| 2 | * IOAPIC/IOxAPIC/IOSAPIC driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Fujitsu Limited. | ||
| 5 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | /* | ||
| 13 | * This driver manages PCI I/O APICs added by hotplug after boot. We try to | ||
| 14 | * claim all I/O APIC PCI devices, but those present at boot were registered | ||
| 15 | * when we parsed the ACPI MADT, so we'll fail when we try to re-register | ||
| 16 | * them. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/pci.h> | ||
| 20 | #include <linux/acpi.h> | ||
| 21 | #include <acpi/acpi_bus.h> | ||
| 22 | |||
| 23 | struct ioapic { | ||
| 24 | acpi_handle handle; | ||
| 25 | u32 gsi_base; | ||
| 26 | }; | ||
| 27 | |||
| 28 | static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) | ||
| 29 | { | ||
| 30 | acpi_handle handle; | ||
| 31 | acpi_status status; | ||
| 32 | unsigned long long gsb; | ||
| 33 | struct ioapic *ioapic; | ||
| 34 | u64 addr; | ||
| 35 | int ret; | ||
| 36 | char *type; | ||
| 37 | |||
| 38 | handle = DEVICE_ACPI_HANDLE(&dev->dev); | ||
| 39 | if (!handle) | ||
| 40 | return -EINVAL; | ||
| 41 | |||
| 42 | status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb); | ||
| 43 | if (ACPI_FAILURE(status)) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 46 | /* | ||
| 47 | * The previous code in acpiphp evaluated _MAT if _GSB failed, but | ||
| 48 | * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs. | ||
| 49 | */ | ||
| 50 | |||
| 51 | ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); | ||
| 52 | if (!ioapic) | ||
| 53 | return -ENOMEM; | ||
| 54 | |||
| 55 | ioapic->handle = handle; | ||
| 56 | ioapic->gsi_base = (u32) gsb; | ||
| 57 | |||
| 58 | if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC) | ||
| 59 | type = "IOAPIC"; | ||
| 60 | else | ||
| 61 | type = "IOxAPIC"; | ||
| 62 | |||
| 63 | ret = pci_enable_device(dev); | ||
| 64 | if (ret < 0) | ||
| 65 | goto exit_free; | ||
| 66 | |||
| 67 | pci_set_master(dev); | ||
| 68 | |||
| 69 | if (pci_request_region(dev, 0, type)) | ||
| 70 | goto exit_disable; | ||
| 71 | |||
| 72 | addr = pci_resource_start(dev, 0); | ||
| 73 | if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base)) | ||
| 74 | goto exit_release; | ||
| 75 | |||
| 76 | pci_set_drvdata(dev, ioapic); | ||
| 77 | dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr, | ||
| 78 | ioapic->gsi_base); | ||
| 79 | return 0; | ||
| 80 | |||
| 81 | exit_release: | ||
| 82 | pci_release_region(dev, 0); | ||
| 83 | exit_disable: | ||
| 84 | pci_disable_device(dev); | ||
| 85 | exit_free: | ||
| 86 | kfree(ioapic); | ||
| 87 | return -ENODEV; | ||
| 88 | } | ||
| 89 | |||
| 90 | static void ioapic_remove(struct pci_dev *dev) | ||
| 91 | { | ||
| 92 | struct ioapic *ioapic = pci_get_drvdata(dev); | ||
| 93 | |||
| 94 | acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base); | ||
| 95 | pci_release_region(dev, 0); | ||
| 96 | pci_disable_device(dev); | ||
| 97 | kfree(ioapic); | ||
| 98 | } | ||
| 99 | |||
| 100 | |||
| 101 | static struct pci_device_id ioapic_devices[] = { | ||
| 102 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | ||
| 103 | PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, }, | ||
| 104 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | ||
| 105 | PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, }, | ||
| 106 | { } | ||
| 107 | }; | ||
| 108 | |||
| 109 | static struct pci_driver ioapic_driver = { | ||
| 110 | .name = "ioapic", | ||
| 111 | .id_table = ioapic_devices, | ||
| 112 | .probe = ioapic_probe, | ||
| 113 | .remove = __devexit_p(ioapic_remove), | ||
| 114 | }; | ||
| 115 | |||
| 116 | static int __init ioapic_init(void) | ||
| 117 | { | ||
| 118 | return pci_register_driver(&ioapic_driver); | ||
| 119 | } | ||
| 120 | |||
| 121 | static void __exit ioapic_exit(void) | ||
| 122 | { | ||
| 123 | pci_unregister_driver(&ioapic_driver); | ||
| 124 | } | ||
| 125 | |||
| 126 | module_init(ioapic_init); | ||
| 127 | module_exit(ioapic_exit); | ||
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index e03fe98f0619..b2a448e19fe6 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
| @@ -555,7 +555,7 @@ int pci_iov_init(struct pci_dev *dev) | |||
| 555 | { | 555 | { |
| 556 | int pos; | 556 | int pos; |
| 557 | 557 | ||
| 558 | if (!dev->is_pcie) | 558 | if (!pci_is_pcie(dev)) |
| 559 | return -ENODEV; | 559 | return -ENODEV; |
| 560 | 560 | ||
| 561 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); | 561 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 33317df47699..7e2829538a4c 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev) | |||
| 112 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) | 112 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) |
| 113 | { | 113 | { |
| 114 | while (bus->parent) { | 114 | while (bus->parent) { |
| 115 | struct pci_dev *bridge = bus->self; | 115 | if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) |
| 116 | int ret; | ||
| 117 | |||
| 118 | ret = acpi_pm_device_sleep_wake(&bridge->dev, enable); | ||
| 119 | if (!ret || bridge->is_pcie) | ||
| 120 | return; | 116 | return; |
| 121 | bus = bus->parent; | 117 | bus = bus->parent; |
| 122 | } | 118 | } |
| @@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
| 131 | if (acpi_pci_can_wakeup(dev)) | 127 | if (acpi_pci_can_wakeup(dev)) |
| 132 | return acpi_pm_device_sleep_wake(&dev->dev, enable); | 128 | return acpi_pm_device_sleep_wake(&dev->dev, enable); |
| 133 | 129 | ||
| 134 | if (!dev->is_pcie) | 130 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); |
| 135 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); | ||
| 136 | |||
| 137 | return 0; | 131 | return 0; |
| 138 | } | 132 | } |
| 139 | 133 | ||
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 0f6382f090ee..807224ec8351 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -74,7 +74,12 @@ static ssize_t local_cpus_show(struct device *dev, | |||
| 74 | const struct cpumask *mask; | 74 | const struct cpumask *mask; |
| 75 | int len; | 75 | int len; |
| 76 | 76 | ||
| 77 | #ifdef CONFIG_NUMA | ||
| 78 | mask = (dev_to_node(dev) == -1) ? cpu_online_mask : | ||
| 79 | cpumask_of_node(dev_to_node(dev)); | ||
| 80 | #else | ||
| 77 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); | 81 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); |
| 82 | #endif | ||
| 78 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); | 83 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); |
| 79 | buf[len++] = '\n'; | 84 | buf[len++] = '\n'; |
| 80 | buf[len] = '\0'; | 85 | buf[len] = '\0'; |
| @@ -88,7 +93,12 @@ static ssize_t local_cpulist_show(struct device *dev, | |||
| 88 | const struct cpumask *mask; | 93 | const struct cpumask *mask; |
| 89 | int len; | 94 | int len; |
| 90 | 95 | ||
| 96 | #ifdef CONFIG_NUMA | ||
| 97 | mask = (dev_to_node(dev) == -1) ? cpu_online_mask : | ||
| 98 | cpumask_of_node(dev_to_node(dev)); | ||
| 99 | #else | ||
| 91 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); | 100 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); |
| 101 | #endif | ||
| 92 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); | 102 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); |
| 93 | buf[len++] = '\n'; | 103 | buf[len++] = '\n'; |
| 94 | buf[len] = '\0'; | 104 | buf[len] = '\0'; |
| @@ -176,6 +186,21 @@ numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
| 176 | #endif | 186 | #endif |
| 177 | 187 | ||
| 178 | static ssize_t | 188 | static ssize_t |
| 189 | dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 190 | { | ||
| 191 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 192 | |||
| 193 | return sprintf (buf, "%d\n", fls64(pdev->dma_mask)); | ||
| 194 | } | ||
| 195 | |||
| 196 | static ssize_t | ||
| 197 | consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, | ||
| 198 | char *buf) | ||
| 199 | { | ||
| 200 | return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask)); | ||
| 201 | } | ||
| 202 | |||
| 203 | static ssize_t | ||
| 179 | msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) | 204 | msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) |
| 180 | { | 205 | { |
| 181 | struct pci_dev *pdev = to_pci_dev(dev); | 206 | struct pci_dev *pdev = to_pci_dev(dev); |
| @@ -306,6 +331,8 @@ struct device_attribute pci_dev_attrs[] = { | |||
| 306 | #ifdef CONFIG_NUMA | 331 | #ifdef CONFIG_NUMA |
| 307 | __ATTR_RO(numa_node), | 332 | __ATTR_RO(numa_node), |
| 308 | #endif | 333 | #endif |
| 334 | __ATTR_RO(dma_mask_bits), | ||
| 335 | __ATTR_RO(consistent_dma_mask_bits), | ||
| 309 | __ATTR(enable, 0600, is_enabled_show, is_enabled_store), | 336 | __ATTR(enable, 0600, is_enabled_show, is_enabled_store), |
| 310 | __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), | 337 | __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), |
| 311 | broken_parity_status_show,broken_parity_status_store), | 338 | broken_parity_status_show,broken_parity_status_store), |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6edecff0b419..315fea47e784 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -29,7 +29,17 @@ const char *pci_power_names[] = { | |||
| 29 | }; | 29 | }; |
| 30 | EXPORT_SYMBOL_GPL(pci_power_names); | 30 | EXPORT_SYMBOL_GPL(pci_power_names); |
| 31 | 31 | ||
| 32 | unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; | 32 | unsigned int pci_pm_d3_delay; |
| 33 | |||
| 34 | static void pci_dev_d3_sleep(struct pci_dev *dev) | ||
| 35 | { | ||
| 36 | unsigned int delay = dev->d3_delay; | ||
| 37 | |||
| 38 | if (delay < pci_pm_d3_delay) | ||
| 39 | delay = pci_pm_d3_delay; | ||
| 40 | |||
| 41 | msleep(delay); | ||
| 42 | } | ||
| 33 | 43 | ||
| 34 | #ifdef CONFIG_PCI_DOMAINS | 44 | #ifdef CONFIG_PCI_DOMAINS |
| 35 | int pci_domains_supported = 1; | 45 | int pci_domains_supported = 1; |
| @@ -47,6 +57,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
| 47 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 57 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
| 48 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 58 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
| 49 | 59 | ||
| 60 | /* | ||
| 61 | * The default CLS is used if arch didn't set CLS explicitly and not | ||
| 62 | * all pci devices agree on the same value. Arch can override either | ||
| 63 | * the dfl or actual value as it sees fit. Don't forget this is | ||
| 64 | * measured in 32-bit words, not bytes. | ||
| 65 | */ | ||
| 66 | u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2; | ||
| 67 | u8 pci_cache_line_size; | ||
| 68 | |||
| 50 | /** | 69 | /** |
| 51 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children | 70 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children |
| 52 | * @bus: pointer to PCI bus structure to search | 71 | * @bus: pointer to PCI bus structure to search |
| @@ -373,8 +392,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
| 373 | continue; /* Wrong type */ | 392 | continue; /* Wrong type */ |
| 374 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) | 393 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) |
| 375 | return r; /* Exact match */ | 394 | return r; /* Exact match */ |
| 376 | if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) | 395 | /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ |
| 377 | best = r; /* Approximating prefetchable by non-prefetchable */ | 396 | if (r->flags & IORESOURCE_PREFETCH) |
| 397 | continue; | ||
| 398 | /* .. but we can put a prefetchable resource inside a non-prefetchable one */ | ||
| 399 | if (!best) | ||
| 400 | best = r; | ||
| 378 | } | 401 | } |
| 379 | return best; | 402 | return best; |
| 380 | } | 403 | } |
| @@ -509,11 +532,15 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 509 | /* Mandatory power management transition delays */ | 532 | /* Mandatory power management transition delays */ |
| 510 | /* see PCI PM 1.1 5.6.1 table 18 */ | 533 | /* see PCI PM 1.1 5.6.1 table 18 */ |
| 511 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | 534 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
| 512 | msleep(pci_pm_d3_delay); | 535 | pci_dev_d3_sleep(dev); |
| 513 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | 536 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
| 514 | udelay(PCI_PM_D2_DELAY); | 537 | udelay(PCI_PM_D2_DELAY); |
| 515 | 538 | ||
| 516 | dev->current_state = state; | 539 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| 540 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | ||
| 541 | if (dev->current_state != state && printk_ratelimit()) | ||
| 542 | dev_info(&dev->dev, "Refused to change power state, " | ||
| 543 | "currently in D%d\n", dev->current_state); | ||
| 517 | 544 | ||
| 518 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT | 545 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
| 519 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning | 546 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
| @@ -724,8 +751,8 @@ static int pci_save_pcie_state(struct pci_dev *dev) | |||
| 724 | u16 *cap; | 751 | u16 *cap; |
| 725 | u16 flags; | 752 | u16 flags; |
| 726 | 753 | ||
| 727 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 754 | pos = pci_pcie_cap(dev); |
| 728 | if (pos <= 0) | 755 | if (!pos) |
| 729 | return 0; | 756 | return 0; |
| 730 | 757 | ||
| 731 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); | 758 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); |
| @@ -833,7 +860,7 @@ pci_save_state(struct pci_dev *dev) | |||
| 833 | int i; | 860 | int i; |
| 834 | /* XXX: 100% dword access ok here? */ | 861 | /* XXX: 100% dword access ok here? */ |
| 835 | for (i = 0; i < 16; i++) | 862 | for (i = 0; i < 16; i++) |
| 836 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); | 863 | pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); |
| 837 | dev->state_saved = true; | 864 | dev->state_saved = true; |
| 838 | if ((i = pci_save_pcie_state(dev)) != 0) | 865 | if ((i = pci_save_pcie_state(dev)) != 0) |
| 839 | return i; | 866 | return i; |
| @@ -1136,11 +1163,11 @@ pci_disable_device(struct pci_dev *dev) | |||
| 1136 | 1163 | ||
| 1137 | /** | 1164 | /** |
| 1138 | * pcibios_set_pcie_reset_state - set reset state for device dev | 1165 | * pcibios_set_pcie_reset_state - set reset state for device dev |
| 1139 | * @dev: the PCI-E device reset | 1166 | * @dev: the PCIe device reset |
| 1140 | * @state: Reset state to enter into | 1167 | * @state: Reset state to enter into |
| 1141 | * | 1168 | * |
| 1142 | * | 1169 | * |
| 1143 | * Sets the PCI-E reset state for the device. This is the default | 1170 | * Sets the PCIe reset state for the device. This is the default |
| 1144 | * implementation. Architecture implementations can override this. | 1171 | * implementation. Architecture implementations can override this. |
| 1145 | */ | 1172 | */ |
| 1146 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | 1173 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, |
| @@ -1151,7 +1178,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | |||
| 1151 | 1178 | ||
| 1152 | /** | 1179 | /** |
| 1153 | * pci_set_pcie_reset_state - set reset state for device dev | 1180 | * pci_set_pcie_reset_state - set reset state for device dev |
| 1154 | * @dev: the PCI-E device reset | 1181 | * @dev: the PCIe device reset |
| 1155 | * @state: Reset state to enter into | 1182 | * @state: Reset state to enter into |
| 1156 | * | 1183 | * |
| 1157 | * | 1184 | * |
| @@ -1198,7 +1225,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
| 1198 | 1225 | ||
| 1199 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 1226 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| 1200 | 1227 | ||
| 1201 | dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", | 1228 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", |
| 1202 | enable ? "enabled" : "disabled"); | 1229 | enable ? "enabled" : "disabled"); |
| 1203 | } | 1230 | } |
| 1204 | 1231 | ||
| @@ -1392,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
| 1392 | } | 1419 | } |
| 1393 | 1420 | ||
| 1394 | dev->pm_cap = pm; | 1421 | dev->pm_cap = pm; |
| 1422 | dev->d3_delay = PCI_PM_D3_WAIT; | ||
| 1395 | 1423 | ||
| 1396 | dev->d1_support = false; | 1424 | dev->d1_support = false; |
| 1397 | dev->d2_support = false; | 1425 | dev->d2_support = false; |
| @@ -1409,7 +1437,8 @@ void pci_pm_init(struct pci_dev *dev) | |||
| 1409 | 1437 | ||
| 1410 | pmc &= PCI_PM_CAP_PME_MASK; | 1438 | pmc &= PCI_PM_CAP_PME_MASK; |
| 1411 | if (pmc) { | 1439 | if (pmc) { |
| 1412 | dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", | 1440 | dev_printk(KERN_DEBUG, &dev->dev, |
| 1441 | "PME# supported from%s%s%s%s%s\n", | ||
| 1413 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", | 1442 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", |
| 1414 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", | 1443 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", |
| 1415 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", | 1444 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", |
| @@ -1506,7 +1535,7 @@ void pci_enable_ari(struct pci_dev *dev) | |||
| 1506 | u16 ctrl; | 1535 | u16 ctrl; |
| 1507 | struct pci_dev *bridge; | 1536 | struct pci_dev *bridge; |
| 1508 | 1537 | ||
| 1509 | if (!dev->is_pcie || dev->devfn) | 1538 | if (!pci_is_pcie(dev) || dev->devfn) |
| 1510 | return; | 1539 | return; |
| 1511 | 1540 | ||
| 1512 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); | 1541 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); |
| @@ -1514,10 +1543,10 @@ void pci_enable_ari(struct pci_dev *dev) | |||
| 1514 | return; | 1543 | return; |
| 1515 | 1544 | ||
| 1516 | bridge = dev->bus->self; | 1545 | bridge = dev->bus->self; |
| 1517 | if (!bridge || !bridge->is_pcie) | 1546 | if (!bridge || !pci_is_pcie(bridge)) |
| 1518 | return; | 1547 | return; |
| 1519 | 1548 | ||
| 1520 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); | 1549 | pos = pci_pcie_cap(bridge); |
| 1521 | if (!pos) | 1550 | if (!pos) |
| 1522 | return; | 1551 | return; |
| 1523 | 1552 | ||
| @@ -1532,6 +1561,54 @@ void pci_enable_ari(struct pci_dev *dev) | |||
| 1532 | bridge->ari_enabled = 1; | 1561 | bridge->ari_enabled = 1; |
| 1533 | } | 1562 | } |
| 1534 | 1563 | ||
| 1564 | static int pci_acs_enable; | ||
| 1565 | |||
| 1566 | /** | ||
| 1567 | * pci_request_acs - ask for ACS to be enabled if supported | ||
| 1568 | */ | ||
| 1569 | void pci_request_acs(void) | ||
| 1570 | { | ||
| 1571 | pci_acs_enable = 1; | ||
| 1572 | } | ||
| 1573 | |||
| 1574 | /** | ||
| 1575 | * pci_enable_acs - enable ACS if hardware support it | ||
| 1576 | * @dev: the PCI device | ||
| 1577 | */ | ||
| 1578 | void pci_enable_acs(struct pci_dev *dev) | ||
| 1579 | { | ||
| 1580 | int pos; | ||
| 1581 | u16 cap; | ||
| 1582 | u16 ctrl; | ||
| 1583 | |||
| 1584 | if (!pci_acs_enable) | ||
| 1585 | return; | ||
| 1586 | |||
| 1587 | if (!pci_is_pcie(dev)) | ||
| 1588 | return; | ||
| 1589 | |||
| 1590 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); | ||
| 1591 | if (!pos) | ||
| 1592 | return; | ||
| 1593 | |||
| 1594 | pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); | ||
| 1595 | pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); | ||
| 1596 | |||
| 1597 | /* Source Validation */ | ||
| 1598 | ctrl |= (cap & PCI_ACS_SV); | ||
| 1599 | |||
| 1600 | /* P2P Request Redirect */ | ||
| 1601 | ctrl |= (cap & PCI_ACS_RR); | ||
| 1602 | |||
| 1603 | /* P2P Completion Redirect */ | ||
| 1604 | ctrl |= (cap & PCI_ACS_CR); | ||
| 1605 | |||
| 1606 | /* Upstream Forwarding */ | ||
| 1607 | ctrl |= (cap & PCI_ACS_UF); | ||
| 1608 | |||
| 1609 | pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); | ||
| 1610 | } | ||
| 1611 | |||
| 1535 | /** | 1612 | /** |
| 1536 | * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge | 1613 | * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge |
| 1537 | * @dev: the PCI device | 1614 | * @dev: the PCI device |
| @@ -1665,9 +1742,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n | |||
| 1665 | return 0; | 1742 | return 0; |
| 1666 | 1743 | ||
| 1667 | err_out: | 1744 | err_out: |
| 1668 | dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", | 1745 | dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, |
| 1669 | bar, | ||
| 1670 | pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", | ||
| 1671 | &pdev->resource[bar]); | 1746 | &pdev->resource[bar]); |
| 1672 | return -EBUSY; | 1747 | return -EBUSY; |
| 1673 | } | 1748 | } |
| @@ -1862,31 +1937,6 @@ void pci_clear_master(struct pci_dev *dev) | |||
| 1862 | __pci_set_master(dev, false); | 1937 | __pci_set_master(dev, false); |
| 1863 | } | 1938 | } |
| 1864 | 1939 | ||
| 1865 | #ifdef PCI_DISABLE_MWI | ||
| 1866 | int pci_set_mwi(struct pci_dev *dev) | ||
| 1867 | { | ||
| 1868 | return 0; | ||
| 1869 | } | ||
| 1870 | |||
| 1871 | int pci_try_set_mwi(struct pci_dev *dev) | ||
| 1872 | { | ||
| 1873 | return 0; | ||
| 1874 | } | ||
| 1875 | |||
| 1876 | void pci_clear_mwi(struct pci_dev *dev) | ||
| 1877 | { | ||
| 1878 | } | ||
| 1879 | |||
| 1880 | #else | ||
| 1881 | |||
| 1882 | #ifndef PCI_CACHE_LINE_BYTES | ||
| 1883 | #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES | ||
| 1884 | #endif | ||
| 1885 | |||
| 1886 | /* This can be overridden by arch code. */ | ||
| 1887 | /* Don't forget this is measured in 32-bit words, not bytes */ | ||
| 1888 | u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; | ||
| 1889 | |||
| 1890 | /** | 1940 | /** |
| 1891 | * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed | 1941 | * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed |
| 1892 | * @dev: the PCI device for which MWI is to be enabled | 1942 | * @dev: the PCI device for which MWI is to be enabled |
| @@ -1897,13 +1947,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; | |||
| 1897 | * | 1947 | * |
| 1898 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | 1948 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. |
| 1899 | */ | 1949 | */ |
| 1900 | static int | 1950 | int pci_set_cacheline_size(struct pci_dev *dev) |
| 1901 | pci_set_cacheline_size(struct pci_dev *dev) | ||
| 1902 | { | 1951 | { |
| 1903 | u8 cacheline_size; | 1952 | u8 cacheline_size; |
| 1904 | 1953 | ||
| 1905 | if (!pci_cache_line_size) | 1954 | if (!pci_cache_line_size) |
| 1906 | return -EINVAL; /* The system doesn't support MWI. */ | 1955 | return -EINVAL; |
| 1907 | 1956 | ||
| 1908 | /* Validate current setting: the PCI_CACHE_LINE_SIZE must be | 1957 | /* Validate current setting: the PCI_CACHE_LINE_SIZE must be |
| 1909 | equal to or multiple of the right value. */ | 1958 | equal to or multiple of the right value. */ |
| @@ -1924,6 +1973,24 @@ pci_set_cacheline_size(struct pci_dev *dev) | |||
| 1924 | 1973 | ||
| 1925 | return -EINVAL; | 1974 | return -EINVAL; |
| 1926 | } | 1975 | } |
| 1976 | EXPORT_SYMBOL_GPL(pci_set_cacheline_size); | ||
| 1977 | |||
| 1978 | #ifdef PCI_DISABLE_MWI | ||
| 1979 | int pci_set_mwi(struct pci_dev *dev) | ||
| 1980 | { | ||
| 1981 | return 0; | ||
| 1982 | } | ||
| 1983 | |||
| 1984 | int pci_try_set_mwi(struct pci_dev *dev) | ||
| 1985 | { | ||
| 1986 | return 0; | ||
| 1987 | } | ||
| 1988 | |||
| 1989 | void pci_clear_mwi(struct pci_dev *dev) | ||
| 1990 | { | ||
| 1991 | } | ||
| 1992 | |||
| 1993 | #else | ||
| 1927 | 1994 | ||
| 1928 | /** | 1995 | /** |
| 1929 | * pci_set_mwi - enables memory-write-invalidate PCI transaction | 1996 | * pci_set_mwi - enables memory-write-invalidate PCI transaction |
| @@ -2058,6 +2125,7 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |||
| 2058 | return -EIO; | 2125 | return -EIO; |
| 2059 | 2126 | ||
| 2060 | dev->dma_mask = mask; | 2127 | dev->dma_mask = mask; |
| 2128 | dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask)); | ||
| 2061 | 2129 | ||
| 2062 | return 0; | 2130 | return 0; |
| 2063 | } | 2131 | } |
| @@ -2069,6 +2137,7 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |||
| 2069 | return -EIO; | 2137 | return -EIO; |
| 2070 | 2138 | ||
| 2071 | dev->dev.coherent_dma_mask = mask; | 2139 | dev->dev.coherent_dma_mask = mask; |
| 2140 | dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask)); | ||
| 2072 | 2141 | ||
| 2073 | return 0; | 2142 | return 0; |
| 2074 | } | 2143 | } |
| @@ -2095,9 +2164,9 @@ static int pcie_flr(struct pci_dev *dev, int probe) | |||
| 2095 | int i; | 2164 | int i; |
| 2096 | int pos; | 2165 | int pos; |
| 2097 | u32 cap; | 2166 | u32 cap; |
| 2098 | u16 status; | 2167 | u16 status, control; |
| 2099 | 2168 | ||
| 2100 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 2169 | pos = pci_pcie_cap(dev); |
| 2101 | if (!pos) | 2170 | if (!pos) |
| 2102 | return -ENOTTY; | 2171 | return -ENOTTY; |
| 2103 | 2172 | ||
| @@ -2122,8 +2191,10 @@ static int pcie_flr(struct pci_dev *dev, int probe) | |||
| 2122 | "proceeding with reset anyway\n"); | 2191 | "proceeding with reset anyway\n"); |
| 2123 | 2192 | ||
| 2124 | clear: | 2193 | clear: |
| 2125 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | 2194 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); |
| 2126 | PCI_EXP_DEVCTL_BCR_FLR); | 2195 | control |= PCI_EXP_DEVCTL_BCR_FLR; |
| 2196 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control); | ||
| 2197 | |||
| 2127 | msleep(100); | 2198 | msleep(100); |
| 2128 | 2199 | ||
| 2129 | return 0; | 2200 | return 0; |
| @@ -2187,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) | |||
| 2187 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2258 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
| 2188 | csr |= PCI_D3hot; | 2259 | csr |= PCI_D3hot; |
| 2189 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2260 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
| 2190 | msleep(pci_pm_d3_delay); | 2261 | pci_dev_d3_sleep(dev); |
| 2191 | 2262 | ||
| 2192 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2263 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
| 2193 | csr |= PCI_D0; | 2264 | csr |= PCI_D0; |
| 2194 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2265 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
| 2195 | msleep(pci_pm_d3_delay); | 2266 | pci_dev_d3_sleep(dev); |
| 2196 | 2267 | ||
| 2197 | return 0; | 2268 | return 0; |
| 2198 | } | 2269 | } |
| @@ -2236,6 +2307,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
| 2236 | down(&dev->dev.sem); | 2307 | down(&dev->dev.sem); |
| 2237 | } | 2308 | } |
| 2238 | 2309 | ||
| 2310 | rc = pci_dev_specific_reset(dev, probe); | ||
| 2311 | if (rc != -ENOTTY) | ||
| 2312 | goto done; | ||
| 2313 | |||
| 2239 | rc = pcie_flr(dev, probe); | 2314 | rc = pcie_flr(dev, probe); |
| 2240 | if (rc != -ENOTTY) | 2315 | if (rc != -ENOTTY) |
| 2241 | goto done; | 2316 | goto done; |
| @@ -2446,7 +2521,7 @@ int pcie_get_readrq(struct pci_dev *dev) | |||
| 2446 | int ret, cap; | 2521 | int ret, cap; |
| 2447 | u16 ctl; | 2522 | u16 ctl; |
| 2448 | 2523 | ||
| 2449 | cap = pci_find_capability(dev, PCI_CAP_ID_EXP); | 2524 | cap = pci_pcie_cap(dev); |
| 2450 | if (!cap) | 2525 | if (!cap) |
| 2451 | return -EINVAL; | 2526 | return -EINVAL; |
| 2452 | 2527 | ||
| @@ -2476,7 +2551,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
| 2476 | 2551 | ||
| 2477 | v = (ffs(rq) - 8) << 12; | 2552 | v = (ffs(rq) - 8) << 12; |
| 2478 | 2553 | ||
| 2479 | cap = pci_find_capability(dev, PCI_CAP_ID_EXP); | 2554 | cap = pci_pcie_cap(dev); |
| 2480 | if (!cap) | 2555 | if (!cap) |
| 2481 | goto out; | 2556 | goto out; |
| 2482 | 2557 | ||
| @@ -2536,16 +2611,16 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
| 2536 | return reg; | 2611 | return reg; |
| 2537 | } | 2612 | } |
| 2538 | 2613 | ||
| 2539 | dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); | 2614 | dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); |
| 2540 | return 0; | 2615 | return 0; |
| 2541 | } | 2616 | } |
| 2542 | 2617 | ||
| 2543 | /** | 2618 | /** |
| 2544 | * pci_set_vga_state - set VGA decode state on device and parents if requested | 2619 | * pci_set_vga_state - set VGA decode state on device and parents if requested |
| 2545 | * @dev the PCI device | 2620 | * @dev: the PCI device |
| 2546 | * @decode - true = enable decoding, false = disable decoding | 2621 | * @decode: true = enable decoding, false = disable decoding |
| 2547 | * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY | 2622 | * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY |
| 2548 | * @change_bridge - traverse ancestors and change bridges | 2623 | * @change_bridge: traverse ancestors and change bridges |
| 2549 | */ | 2624 | */ |
| 2550 | int pci_set_vga_state(struct pci_dev *dev, bool decode, | 2625 | int pci_set_vga_state(struct pci_dev *dev, bool decode, |
| 2551 | unsigned int command_bits, bool change_bridge) | 2626 | unsigned int command_bits, bool change_bridge) |
| @@ -2586,7 +2661,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
| 2586 | 2661 | ||
| 2587 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE | 2662 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
| 2588 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; | 2663 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
| 2589 | spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; | 2664 | static DEFINE_SPINLOCK(resource_alignment_lock); |
| 2590 | 2665 | ||
| 2591 | /** | 2666 | /** |
| 2592 | * pci_specified_resource_alignment - get resource alignment specified by user. | 2667 | * pci_specified_resource_alignment - get resource alignment specified by user. |
| @@ -2719,16 +2794,10 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) | |||
| 2719 | return 1; | 2794 | return 1; |
| 2720 | } | 2795 | } |
| 2721 | 2796 | ||
| 2722 | static int __devinit pci_init(void) | 2797 | void __weak pci_fixup_cardbus(struct pci_bus *bus) |
| 2723 | { | 2798 | { |
| 2724 | struct pci_dev *dev = NULL; | ||
| 2725 | |||
| 2726 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
| 2727 | pci_fixup_device(pci_fixup_final, dev); | ||
| 2728 | } | ||
| 2729 | |||
| 2730 | return 0; | ||
| 2731 | } | 2799 | } |
| 2800 | EXPORT_SYMBOL(pci_fixup_cardbus); | ||
| 2732 | 2801 | ||
| 2733 | static int __init pci_setup(char *str) | 2802 | static int __init pci_setup(char *str) |
| 2734 | { | 2803 | { |
| @@ -2767,8 +2836,6 @@ static int __init pci_setup(char *str) | |||
| 2767 | } | 2836 | } |
| 2768 | early_param("pci", pci_setup); | 2837 | early_param("pci", pci_setup); |
| 2769 | 2838 | ||
| 2770 | device_initcall(pci_init); | ||
| 2771 | |||
| 2772 | EXPORT_SYMBOL(pci_reenable_device); | 2839 | EXPORT_SYMBOL(pci_reenable_device); |
| 2773 | EXPORT_SYMBOL(pci_enable_device_io); | 2840 | EXPORT_SYMBOL(pci_enable_device_io); |
| 2774 | EXPORT_SYMBOL(pci_enable_device_mem); | 2841 | EXPORT_SYMBOL(pci_enable_device_mem); |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d92d1954a2fb..fbd0e3adbca3 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -311,4 +311,14 @@ static inline int pci_resource_alignment(struct pci_dev *dev, | |||
| 311 | return resource_alignment(res); | 311 | return resource_alignment(res); |
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | extern void pci_enable_acs(struct pci_dev *dev); | ||
| 315 | |||
| 316 | struct pci_dev_reset_methods { | ||
| 317 | u16 vendor; | ||
| 318 | u16 device; | ||
| 319 | int (*reset)(struct pci_dev *dev, int probe); | ||
| 320 | }; | ||
| 321 | |||
| 322 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); | ||
| 323 | |||
| 314 | #endif /* DRIVERS_PCI_H */ | 324 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug index b8c925c1f6aa..9142949734f5 100644 --- a/drivers/pci/pcie/aer/Kconfig.debug +++ b/drivers/pci/pcie/aer/Kconfig.debug | |||
| @@ -3,14 +3,14 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config PCIEAER_INJECT | 5 | config PCIEAER_INJECT |
| 6 | tristate "PCIE AER error injector support" | 6 | tristate "PCIe AER error injector support" |
| 7 | depends on PCIEAER | 7 | depends on PCIEAER |
| 8 | default n | 8 | default n |
| 9 | help | 9 | help |
| 10 | This enables PCI Express Root Port Advanced Error Reporting | 10 | This enables PCI Express Root Port Advanced Error Reporting |
| 11 | (AER) software error injector. | 11 | (AER) software error injector. |
| 12 | 12 | ||
| 13 | Debuging PCIE AER code is quite difficult because it is hard | 13 | Debugging PCIe AER code is quite difficult because it is hard |
| 14 | to trigger various real hardware errors. Software based | 14 | to trigger various real hardware errors. Software based |
| 15 | error injection can fake almost all kinds of errors with the | 15 | error injection can fake almost all kinds of errors with the |
| 16 | help of a user space helper tool aer-inject, which can be | 16 | help of a user space helper tool aer-inject, which can be |
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 62d15f652bb6..223052b73563 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * PCIE AER software error injection support. | 2 | * PCIe AER software error injection support. |
| 3 | * | 3 | * |
| 4 | * Debuging PCIE AER code is quite difficult because it is hard to | 4 | * Debuging PCIe AER code is quite difficult because it is hard to |
| 5 | * trigger various real hardware errors. Software based error | 5 | * trigger various real hardware errors. Software based error |
| 6 | * injection can fake almost all kinds of errors with the help of a | 6 | * injection can fake almost all kinds of errors with the help of a |
| 7 | * user space helper tool aer-inject, which can be gotten from: | 7 | * user space helper tool aer-inject, which can be gotten from: |
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
| 24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
| 25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
| 26 | #include <linux/stddef.h> | ||
| 26 | #include "aerdrv.h" | 27 | #include "aerdrv.h" |
| 27 | 28 | ||
| 28 | struct aer_error_inj { | 29 | struct aer_error_inj { |
| @@ -35,10 +36,12 @@ struct aer_error_inj { | |||
| 35 | u32 header_log1; | 36 | u32 header_log1; |
| 36 | u32 header_log2; | 37 | u32 header_log2; |
| 37 | u32 header_log3; | 38 | u32 header_log3; |
| 39 | u16 domain; | ||
| 38 | }; | 40 | }; |
| 39 | 41 | ||
| 40 | struct aer_error { | 42 | struct aer_error { |
| 41 | struct list_head list; | 43 | struct list_head list; |
| 44 | u16 domain; | ||
| 42 | unsigned int bus; | 45 | unsigned int bus; |
| 43 | unsigned int devfn; | 46 | unsigned int devfn; |
| 44 | int pos_cap_err; | 47 | int pos_cap_err; |
| @@ -66,22 +69,27 @@ static LIST_HEAD(pci_bus_ops_list); | |||
| 66 | /* Protect einjected and pci_bus_ops_list */ | 69 | /* Protect einjected and pci_bus_ops_list */ |
| 67 | static DEFINE_SPINLOCK(inject_lock); | 70 | static DEFINE_SPINLOCK(inject_lock); |
| 68 | 71 | ||
| 69 | static void aer_error_init(struct aer_error *err, unsigned int bus, | 72 | static void aer_error_init(struct aer_error *err, u16 domain, |
| 70 | unsigned int devfn, int pos_cap_err) | 73 | unsigned int bus, unsigned int devfn, |
| 74 | int pos_cap_err) | ||
| 71 | { | 75 | { |
| 72 | INIT_LIST_HEAD(&err->list); | 76 | INIT_LIST_HEAD(&err->list); |
| 77 | err->domain = domain; | ||
| 73 | err->bus = bus; | 78 | err->bus = bus; |
| 74 | err->devfn = devfn; | 79 | err->devfn = devfn; |
| 75 | err->pos_cap_err = pos_cap_err; | 80 | err->pos_cap_err = pos_cap_err; |
| 76 | } | 81 | } |
| 77 | 82 | ||
| 78 | /* inject_lock must be held before calling */ | 83 | /* inject_lock must be held before calling */ |
| 79 | static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn) | 84 | static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, |
| 85 | unsigned int devfn) | ||
| 80 | { | 86 | { |
| 81 | struct aer_error *err; | 87 | struct aer_error *err; |
| 82 | 88 | ||
| 83 | list_for_each_entry(err, &einjected, list) { | 89 | list_for_each_entry(err, &einjected, list) { |
| 84 | if (bus == err->bus && devfn == err->devfn) | 90 | if (domain == err->domain && |
| 91 | bus == err->bus && | ||
| 92 | devfn == err->devfn) | ||
| 85 | return err; | 93 | return err; |
| 86 | } | 94 | } |
| 87 | return NULL; | 95 | return NULL; |
| @@ -90,7 +98,10 @@ static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn) | |||
| 90 | /* inject_lock must be held before calling */ | 98 | /* inject_lock must be held before calling */ |
| 91 | static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) | 99 | static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) |
| 92 | { | 100 | { |
| 93 | return __find_aer_error(dev->bus->number, dev->devfn); | 101 | int domain = pci_domain_nr(dev->bus); |
| 102 | if (domain < 0) | ||
| 103 | return NULL; | ||
| 104 | return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); | ||
| 94 | } | 105 | } |
| 95 | 106 | ||
| 96 | /* inject_lock must be held before calling */ | 107 | /* inject_lock must be held before calling */ |
| @@ -172,11 +183,15 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where, | |||
| 172 | struct aer_error *err; | 183 | struct aer_error *err; |
| 173 | unsigned long flags; | 184 | unsigned long flags; |
| 174 | struct pci_ops *ops; | 185 | struct pci_ops *ops; |
| 186 | int domain; | ||
| 175 | 187 | ||
| 176 | spin_lock_irqsave(&inject_lock, flags); | 188 | spin_lock_irqsave(&inject_lock, flags); |
| 177 | if (size != sizeof(u32)) | 189 | if (size != sizeof(u32)) |
| 178 | goto out; | 190 | goto out; |
| 179 | err = __find_aer_error(bus->number, devfn); | 191 | domain = pci_domain_nr(bus); |
| 192 | if (domain < 0) | ||
| 193 | goto out; | ||
| 194 | err = __find_aer_error((u16)domain, bus->number, devfn); | ||
| 180 | if (!err) | 195 | if (!err) |
| 181 | goto out; | 196 | goto out; |
| 182 | 197 | ||
| @@ -200,11 +215,15 @@ int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size, | |||
| 200 | unsigned long flags; | 215 | unsigned long flags; |
| 201 | int rw1cs; | 216 | int rw1cs; |
| 202 | struct pci_ops *ops; | 217 | struct pci_ops *ops; |
| 218 | int domain; | ||
| 203 | 219 | ||
| 204 | spin_lock_irqsave(&inject_lock, flags); | 220 | spin_lock_irqsave(&inject_lock, flags); |
| 205 | if (size != sizeof(u32)) | 221 | if (size != sizeof(u32)) |
| 206 | goto out; | 222 | goto out; |
| 207 | err = __find_aer_error(bus->number, devfn); | 223 | domain = pci_domain_nr(bus); |
| 224 | if (domain < 0) | ||
| 225 | goto out; | ||
| 226 | err = __find_aer_error((u16)domain, bus->number, devfn); | ||
| 208 | if (!err) | 227 | if (!err) |
| 209 | goto out; | 228 | goto out; |
| 210 | 229 | ||
| @@ -262,7 +281,7 @@ out: | |||
| 262 | static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) | 281 | static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) |
| 263 | { | 282 | { |
| 264 | while (1) { | 283 | while (1) { |
| 265 | if (!dev->is_pcie) | 284 | if (!pci_is_pcie(dev)) |
| 266 | break; | 285 | break; |
| 267 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | 286 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) |
| 268 | return dev; | 287 | return dev; |
| @@ -302,28 +321,31 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 302 | unsigned long flags; | 321 | unsigned long flags; |
| 303 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); | 322 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); |
| 304 | int pos_cap_err, rp_pos_cap_err; | 323 | int pos_cap_err, rp_pos_cap_err; |
| 305 | u32 sever; | 324 | u32 sever, cor_mask, uncor_mask; |
| 306 | int ret = 0; | 325 | int ret = 0; |
| 307 | 326 | ||
| 308 | dev = pci_get_bus_and_slot(einj->bus, devfn); | 327 | dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); |
| 309 | if (!dev) | 328 | if (!dev) |
| 310 | return -EINVAL; | 329 | return -ENODEV; |
| 311 | rpdev = pcie_find_root_port(dev); | 330 | rpdev = pcie_find_root_port(dev); |
| 312 | if (!rpdev) { | 331 | if (!rpdev) { |
| 313 | ret = -EINVAL; | 332 | ret = -ENOTTY; |
| 314 | goto out_put; | 333 | goto out_put; |
| 315 | } | 334 | } |
| 316 | 335 | ||
| 317 | pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 336 | pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
| 318 | if (!pos_cap_err) { | 337 | if (!pos_cap_err) { |
| 319 | ret = -EIO; | 338 | ret = -ENOTTY; |
| 320 | goto out_put; | 339 | goto out_put; |
| 321 | } | 340 | } |
| 322 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); | 341 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); |
| 342 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask); | ||
| 343 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | ||
| 344 | &uncor_mask); | ||
| 323 | 345 | ||
| 324 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); | 346 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); |
| 325 | if (!rp_pos_cap_err) { | 347 | if (!rp_pos_cap_err) { |
| 326 | ret = -EIO; | 348 | ret = -ENOTTY; |
| 327 | goto out_put; | 349 | goto out_put; |
| 328 | } | 350 | } |
| 329 | 351 | ||
| @@ -344,7 +366,8 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 344 | if (!err) { | 366 | if (!err) { |
| 345 | err = err_alloc; | 367 | err = err_alloc; |
| 346 | err_alloc = NULL; | 368 | err_alloc = NULL; |
| 347 | aer_error_init(err, einj->bus, devfn, pos_cap_err); | 369 | aer_error_init(err, einj->domain, einj->bus, devfn, |
| 370 | pos_cap_err); | ||
| 348 | list_add(&err->list, &einjected); | 371 | list_add(&err->list, &einjected); |
| 349 | } | 372 | } |
| 350 | err->uncor_status |= einj->uncor_status; | 373 | err->uncor_status |= einj->uncor_status; |
| @@ -354,11 +377,27 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 354 | err->header_log2 = einj->header_log2; | 377 | err->header_log2 = einj->header_log2; |
| 355 | err->header_log3 = einj->header_log3; | 378 | err->header_log3 = einj->header_log3; |
| 356 | 379 | ||
| 380 | if (einj->cor_status && !(einj->cor_status & ~cor_mask)) { | ||
| 381 | ret = -EINVAL; | ||
| 382 | printk(KERN_WARNING "The correctable error(s) is masked " | ||
| 383 | "by device\n"); | ||
| 384 | spin_unlock_irqrestore(&inject_lock, flags); | ||
| 385 | goto out_put; | ||
| 386 | } | ||
| 387 | if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) { | ||
| 388 | ret = -EINVAL; | ||
| 389 | printk(KERN_WARNING "The uncorrectable error(s) is masked " | ||
| 390 | "by device\n"); | ||
| 391 | spin_unlock_irqrestore(&inject_lock, flags); | ||
| 392 | goto out_put; | ||
| 393 | } | ||
| 394 | |||
| 357 | rperr = __find_aer_error_by_dev(rpdev); | 395 | rperr = __find_aer_error_by_dev(rpdev); |
| 358 | if (!rperr) { | 396 | if (!rperr) { |
| 359 | rperr = rperr_alloc; | 397 | rperr = rperr_alloc; |
| 360 | rperr_alloc = NULL; | 398 | rperr_alloc = NULL; |
| 361 | aer_error_init(rperr, rpdev->bus->number, rpdev->devfn, | 399 | aer_error_init(rperr, pci_domain_nr(rpdev->bus), |
| 400 | rpdev->bus->number, rpdev->devfn, | ||
| 362 | rp_pos_cap_err); | 401 | rp_pos_cap_err); |
| 363 | list_add(&rperr->list, &einjected); | 402 | list_add(&rperr->list, &einjected); |
| 364 | } | 403 | } |
| @@ -392,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 392 | if (ret) | 431 | if (ret) |
| 393 | goto out_put; | 432 | goto out_put; |
| 394 | 433 | ||
| 395 | if (find_aer_device(rpdev, &edev)) | 434 | if (find_aer_device(rpdev, &edev)) { |
| 435 | if (!get_service_data(edev)) { | ||
| 436 | printk(KERN_WARNING "AER service is not initialized\n"); | ||
| 437 | ret = -EINVAL; | ||
| 438 | goto out_put; | ||
| 439 | } | ||
| 396 | aer_irq(-1, edev); | 440 | aer_irq(-1, edev); |
| 441 | } | ||
| 397 | else | 442 | else |
| 398 | ret = -EINVAL; | 443 | ret = -EINVAL; |
| 399 | out_put: | 444 | out_put: |
| @@ -411,10 +456,11 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, | |||
| 411 | 456 | ||
| 412 | if (!capable(CAP_SYS_ADMIN)) | 457 | if (!capable(CAP_SYS_ADMIN)) |
| 413 | return -EPERM; | 458 | return -EPERM; |
| 414 | 459 | if (usize < offsetof(struct aer_error_inj, domain) || | |
| 415 | if (usize != sizeof(struct aer_error_inj)) | 460 | usize > sizeof(einj)) |
| 416 | return -EINVAL; | 461 | return -EINVAL; |
| 417 | 462 | ||
| 463 | memset(&einj, 0, sizeof(einj)); | ||
| 418 | if (copy_from_user(&einj, ubuf, usize)) | 464 | if (copy_from_user(&einj, ubuf, usize)) |
| 419 | return -EFAULT; | 465 | return -EFAULT; |
| 420 | 466 | ||
| @@ -452,7 +498,7 @@ static void __exit aer_inject_exit(void) | |||
| 452 | } | 498 | } |
| 453 | 499 | ||
| 454 | spin_lock_irqsave(&inject_lock, flags); | 500 | spin_lock_irqsave(&inject_lock, flags); |
| 455 | list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) { | 501 | list_for_each_entry_safe(err, err_next, &einjected, list) { |
| 456 | list_del(&err->list); | 502 | list_del(&err->list); |
| 457 | kfree(err); | 503 | kfree(err); |
| 458 | } | 504 | } |
| @@ -462,5 +508,5 @@ static void __exit aer_inject_exit(void) | |||
| 462 | module_init(aer_inject_init); | 508 | module_init(aer_inject_init); |
| 463 | module_exit(aer_inject_exit); | 509 | module_exit(aer_inject_exit); |
| 464 | 510 | ||
| 465 | MODULE_DESCRIPTION("PCIE AER software error injector"); | 511 | MODULE_DESCRIPTION("PCIe AER software error injector"); |
| 466 | MODULE_LICENSE("GPL"); | 512 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 2ce8f9ccc66e..21f215f4daa3 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
| 20 | #include <linux/sched.h> | ||
| 20 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 21 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
| 22 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
| @@ -52,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = { | |||
| 52 | 53 | ||
| 53 | static struct pcie_port_service_driver aerdriver = { | 54 | static struct pcie_port_service_driver aerdriver = { |
| 54 | .name = "aer", | 55 | .name = "aer", |
| 55 | .port_type = PCIE_ANY_PORT, | 56 | .port_type = PCI_EXP_TYPE_ROOT_PORT, |
| 56 | .service = PCIE_PORT_SERVICE_AER, | 57 | .service = PCIE_PORT_SERVICE_AER, |
| 57 | 58 | ||
| 58 | .probe = aer_probe, | 59 | .probe = aer_probe, |
| @@ -154,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | |||
| 154 | mutex_init(&rpc->rpc_mutex); | 155 | mutex_init(&rpc->rpc_mutex); |
| 155 | init_waitqueue_head(&rpc->wait_release); | 156 | init_waitqueue_head(&rpc->wait_release); |
| 156 | 157 | ||
| 157 | /* Use PCIE bus function to store rpc into PCIE device */ | 158 | /* Use PCIe bus function to store rpc into PCIe device */ |
| 158 | set_service_data(dev, rpc); | 159 | set_service_data(dev, rpc); |
| 159 | 160 | ||
| 160 | return rpc; | 161 | return rpc; |
| @@ -294,7 +295,7 @@ static void aer_error_resume(struct pci_dev *dev) | |||
| 294 | u16 reg16; | 295 | u16 reg16; |
| 295 | 296 | ||
| 296 | /* Clean up Root device status */ | 297 | /* Clean up Root device status */ |
| 297 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 298 | pos = pci_pcie_cap(dev); |
| 298 | pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, ®16); | 299 | pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, ®16); |
| 299 | pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); | 300 | pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); |
| 300 | 301 | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 8edb2f300e8f..04814087658d 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | * | 24 | * |
| 25 | * @return: Zero on success. Nonzero otherwise. | 25 | * @return: Zero on success. Nonzero otherwise. |
| 26 | * | 26 | * |
| 27 | * Invoked when PCIE bus loads AER service driver. To avoid conflict with | 27 | * Invoked when PCIe bus loads AER service driver. To avoid conflict with |
| 28 | * BIOS AER support requires BIOS to yield AER control to OS native driver. | 28 | * BIOS AER support requires BIOS to yield AER control to OS native driver. |
| 29 | **/ | 29 | **/ |
| 30 | int aer_osc_setup(struct pcie_device *pciedev) | 30 | int aer_osc_setup(struct pcie_device *pciedev) |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 9f5ccbeb4fa5..c843a799814d 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
| @@ -35,11 +35,14 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) | |||
| 35 | u16 reg16 = 0; | 35 | u16 reg16 = 0; |
| 36 | int pos; | 36 | int pos; |
| 37 | 37 | ||
| 38 | if (dev->aer_firmware_first) | ||
| 39 | return -EIO; | ||
| 40 | |||
| 38 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 41 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
| 39 | if (!pos) | 42 | if (!pos) |
| 40 | return -EIO; | 43 | return -EIO; |
| 41 | 44 | ||
| 42 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 45 | pos = pci_pcie_cap(dev); |
| 43 | if (!pos) | 46 | if (!pos) |
| 44 | return -EIO; | 47 | return -EIO; |
| 45 | 48 | ||
| @@ -60,7 +63,10 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) | |||
| 60 | u16 reg16 = 0; | 63 | u16 reg16 = 0; |
| 61 | int pos; | 64 | int pos; |
| 62 | 65 | ||
| 63 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 66 | if (dev->aer_firmware_first) |
| 67 | return -EIO; | ||
| 68 | |||
| 69 | pos = pci_pcie_cap(dev); | ||
| 64 | if (!pos) | 70 | if (!pos) |
| 65 | return -EIO; | 71 | return -EIO; |
| 66 | 72 | ||
| @@ -78,48 +84,27 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | |||
| 78 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | 84 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) |
| 79 | { | 85 | { |
| 80 | int pos; | 86 | int pos; |
| 81 | u32 status, mask; | 87 | u32 status; |
| 82 | 88 | ||
| 83 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 89 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
| 84 | if (!pos) | 90 | if (!pos) |
| 85 | return -EIO; | 91 | return -EIO; |
| 86 | 92 | ||
| 87 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 93 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); |
| 88 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); | 94 | if (status) |
| 89 | if (dev->error_state == pci_channel_io_normal) | 95 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); |
| 90 | status &= ~mask; /* Clear corresponding nonfatal bits */ | ||
| 91 | else | ||
| 92 | status &= mask; /* Clear corresponding fatal bits */ | ||
| 93 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); | ||
| 94 | 96 | ||
| 95 | return 0; | 97 | return 0; |
| 96 | } | 98 | } |
| 97 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | 99 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); |
| 98 | 100 | ||
| 99 | #if 0 | ||
| 100 | int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) | ||
| 101 | { | ||
| 102 | int pos; | ||
| 103 | u32 status; | ||
| 104 | |||
| 105 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | ||
| 106 | if (!pos) | ||
| 107 | return -EIO; | ||
| 108 | |||
| 109 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); | ||
| 110 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status); | ||
| 111 | |||
| 112 | return 0; | ||
| 113 | } | ||
| 114 | #endif /* 0 */ | ||
| 115 | |||
| 116 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | 101 | static int set_device_error_reporting(struct pci_dev *dev, void *data) |
| 117 | { | 102 | { |
| 118 | bool enable = *((bool *)data); | 103 | bool enable = *((bool *)data); |
| 119 | 104 | ||
| 120 | if (dev->pcie_type == PCIE_RC_PORT || | 105 | if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || |
| 121 | dev->pcie_type == PCIE_SW_UPSTREAM_PORT || | 106 | (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || |
| 122 | dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) { | 107 | (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { |
| 123 | if (enable) | 108 | if (enable) |
| 124 | pci_enable_pcie_error_reporting(dev); | 109 | pci_enable_pcie_error_reporting(dev); |
| 125 | else | 110 | else |
| @@ -218,7 +203,7 @@ static int find_device_iter(struct pci_dev *dev, void *data) | |||
| 218 | */ | 203 | */ |
| 219 | if (atomic_read(&dev->enable_cnt) == 0) | 204 | if (atomic_read(&dev->enable_cnt) == 0) |
| 220 | return 0; | 205 | return 0; |
| 221 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 206 | pos = pci_pcie_cap(dev); |
| 222 | if (!pos) | 207 | if (!pos) |
| 223 | return 0; | 208 | return 0; |
| 224 | /* Check if AER is enabled */ | 209 | /* Check if AER is enabled */ |
| @@ -431,10 +416,9 @@ static int find_aer_service_iter(struct device *device, void *data) | |||
| 431 | result = (struct find_aer_service_data *) data; | 416 | result = (struct find_aer_service_data *) data; |
| 432 | 417 | ||
| 433 | if (device->bus == &pcie_port_bus_type) { | 418 | if (device->bus == &pcie_port_bus_type) { |
| 434 | struct pcie_port_data *port_data; | 419 | struct pcie_device *pcie = to_pcie_device(device); |
| 435 | 420 | ||
| 436 | port_data = pci_get_drvdata(to_pcie_device(device)->port); | 421 | if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) |
| 437 | if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT) | ||
| 438 | result->is_downstream = 1; | 422 | result->is_downstream = 1; |
| 439 | 423 | ||
| 440 | driver = device->driver; | 424 | driver = device->driver; |
| @@ -603,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev, | |||
| 603 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages | 587 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages |
| 604 | * @rpc: pointer to a Root Port data structure | 588 | * @rpc: pointer to a Root Port data structure |
| 605 | * | 589 | * |
| 606 | * Invoked when PCIE bus loads AER service driver. | 590 | * Invoked when PCIe bus loads AER service driver. |
| 607 | */ | 591 | */ |
| 608 | void aer_enable_rootport(struct aer_rpc *rpc) | 592 | void aer_enable_rootport(struct aer_rpc *rpc) |
| 609 | { | 593 | { |
| @@ -612,8 +596,8 @@ void aer_enable_rootport(struct aer_rpc *rpc) | |||
| 612 | u16 reg16; | 596 | u16 reg16; |
| 613 | u32 reg32; | 597 | u32 reg32; |
| 614 | 598 | ||
| 615 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 599 | pos = pci_pcie_cap(pdev); |
| 616 | /* Clear PCIE Capability's Device Status */ | 600 | /* Clear PCIe Capability's Device Status */ |
| 617 | pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); | 601 | pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); |
| 618 | pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); | 602 | pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); |
| 619 | 603 | ||
| @@ -647,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc) | |||
| 647 | * disable_root_aer - disable Root Port's interrupts when receiving messages | 631 | * disable_root_aer - disable Root Port's interrupts when receiving messages |
| 648 | * @rpc: pointer to a Root Port data structure | 632 | * @rpc: pointer to a Root Port data structure |
| 649 | * | 633 | * |
| 650 | * Invoked when PCIE bus unloads AER service driver. | 634 | * Invoked when PCIe bus unloads AER service driver. |
| 651 | */ | 635 | */ |
| 652 | static void disable_root_aer(struct aer_rpc *rpc) | 636 | static void disable_root_aer(struct aer_rpc *rpc) |
| 653 | { | 637 | { |
| @@ -874,8 +858,22 @@ void aer_delete_rootport(struct aer_rpc *rpc) | |||
| 874 | */ | 858 | */ |
| 875 | int aer_init(struct pcie_device *dev) | 859 | int aer_init(struct pcie_device *dev) |
| 876 | { | 860 | { |
| 877 | if (aer_osc_setup(dev) && !forceload) | 861 | if (dev->port->aer_firmware_first) { |
| 878 | return -ENXIO; | 862 | dev_printk(KERN_DEBUG, &dev->device, |
| 863 | "PCIe errors handled by platform firmware.\n"); | ||
| 864 | goto out; | ||
| 865 | } | ||
| 866 | |||
| 867 | if (aer_osc_setup(dev)) | ||
| 868 | goto out; | ||
| 879 | 869 | ||
| 880 | return 0; | 870 | return 0; |
| 871 | out: | ||
| 872 | if (forceload) { | ||
| 873 | dev_printk(KERN_DEBUG, &dev->device, | ||
| 874 | "aerdrv forceload requested.\n"); | ||
| 875 | dev->port->aer_firmware_first = 0; | ||
| 876 | return 0; | ||
| 877 | } | ||
| 878 | return -ENXIO; | ||
| 881 | } | 879 | } |
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 44acde72294f..9d3e4c8d0184 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
| @@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
| 184 | 184 | ||
| 185 | if (info->status == 0) { | 185 | if (info->status == 0) { |
| 186 | AER_PR(info, dev, | 186 | AER_PR(info, dev, |
| 187 | "PCIE Bus Error: severity=%s, type=Unaccessible, " | 187 | "PCIe Bus Error: severity=%s, type=Unaccessible, " |
| 188 | "id=%04x(Unregistered Agent ID)\n", | 188 | "id=%04x(Unregistered Agent ID)\n", |
| 189 | aer_error_severity_string[info->severity], id); | 189 | aer_error_severity_string[info->severity], id); |
| 190 | } else { | 190 | } else { |
| @@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
| 194 | agent = AER_GET_AGENT(info->severity, info->status); | 194 | agent = AER_GET_AGENT(info->severity, info->status); |
| 195 | 195 | ||
| 196 | AER_PR(info, dev, | 196 | AER_PR(info, dev, |
| 197 | "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n", | 197 | "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", |
| 198 | aer_error_severity_string[info->severity], | 198 | aer_error_severity_string[info->severity], |
| 199 | aer_error_layer[layer], id, aer_agent_string[agent]); | 199 | aer_error_layer[layer], id, aer_agent_string[agent]); |
| 200 | 200 | ||
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c index a928d8ab6bda..a2747a663bc9 100644 --- a/drivers/pci/pcie/aer/ecrc.c +++ b/drivers/pci/pcie/aer/ecrc.c | |||
| @@ -51,7 +51,7 @@ static int enable_ecrc_checking(struct pci_dev *dev) | |||
| 51 | int pos; | 51 | int pos; |
| 52 | u32 reg32; | 52 | u32 reg32; |
| 53 | 53 | ||
| 54 | if (!dev->is_pcie) | 54 | if (!pci_is_pcie(dev)) |
| 55 | return -ENODEV; | 55 | return -ENODEV; |
| 56 | 56 | ||
| 57 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 57 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
| @@ -79,7 +79,7 @@ static int disable_ecrc_checking(struct pci_dev *dev) | |||
| 79 | int pos; | 79 | int pos; |
| 80 | u32 reg32; | 80 | u32 reg32; |
| 81 | 81 | ||
| 82 | if (!dev->is_pcie) | 82 | if (!pci_is_pcie(dev)) |
| 83 | return -ENODEV; | 83 | return -ENODEV; |
| 84 | 84 | ||
| 85 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 85 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 745402e8e498..be53d98fa384 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * File: drivers/pci/pcie/aspm.c | 2 | * File: drivers/pci/pcie/aspm.c |
| 3 | * Enabling PCIE link L0s/L1 state and Clock Power Management | 3 | * Enabling PCIe link L0s/L1 state and Clock Power Management |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2007 Intel | 5 | * Copyright (C) 2007 Intel |
| 6 | * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) | 6 | * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) |
| @@ -122,7 +122,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) | |||
| 122 | struct pci_bus *linkbus = link->pdev->subordinate; | 122 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 123 | 123 | ||
| 124 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 124 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 125 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); | 125 | pos = pci_pcie_cap(child); |
| 126 | if (!pos) | 126 | if (!pos) |
| 127 | return; | 127 | return; |
| 128 | pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16); | 128 | pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16); |
| @@ -156,7 +156,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) | |||
| 156 | 156 | ||
| 157 | /* All functions should have the same cap and state, take the worst */ | 157 | /* All functions should have the same cap and state, take the worst */ |
| 158 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 158 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 159 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); | 159 | pos = pci_pcie_cap(child); |
| 160 | if (!pos) | 160 | if (!pos) |
| 161 | return; | 161 | return; |
| 162 | pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, ®32); | 162 | pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, ®32); |
| @@ -191,23 +191,23 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) | |||
| 191 | * Configuration, so just check one function | 191 | * Configuration, so just check one function |
| 192 | */ | 192 | */ |
| 193 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); | 193 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
| 194 | BUG_ON(!child->is_pcie); | 194 | BUG_ON(!pci_is_pcie(child)); |
| 195 | 195 | ||
| 196 | /* Check downstream component if bit Slot Clock Configuration is 1 */ | 196 | /* Check downstream component if bit Slot Clock Configuration is 1 */ |
| 197 | cpos = pci_find_capability(child, PCI_CAP_ID_EXP); | 197 | cpos = pci_pcie_cap(child); |
| 198 | pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, ®16); | 198 | pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, ®16); |
| 199 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) | 199 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
| 200 | same_clock = 0; | 200 | same_clock = 0; |
| 201 | 201 | ||
| 202 | /* Check upstream component if bit Slot Clock Configuration is 1 */ | 202 | /* Check upstream component if bit Slot Clock Configuration is 1 */ |
| 203 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); | 203 | ppos = pci_pcie_cap(parent); |
| 204 | pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16); | 204 | pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16); |
| 205 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) | 205 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
| 206 | same_clock = 0; | 206 | same_clock = 0; |
| 207 | 207 | ||
| 208 | /* Configure downstream component, all functions */ | 208 | /* Configure downstream component, all functions */ |
| 209 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 209 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 210 | cpos = pci_find_capability(child, PCI_CAP_ID_EXP); | 210 | cpos = pci_pcie_cap(child); |
| 211 | pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, ®16); | 211 | pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, ®16); |
| 212 | child_reg[PCI_FUNC(child->devfn)] = reg16; | 212 | child_reg[PCI_FUNC(child->devfn)] = reg16; |
| 213 | if (same_clock) | 213 | if (same_clock) |
| @@ -247,7 +247,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) | |||
| 247 | dev_printk(KERN_ERR, &parent->dev, | 247 | dev_printk(KERN_ERR, &parent->dev, |
| 248 | "ASPM: Could not configure common clock\n"); | 248 | "ASPM: Could not configure common clock\n"); |
| 249 | list_for_each_entry(child, &linkbus->devices, bus_list) { | 249 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 250 | cpos = pci_find_capability(child, PCI_CAP_ID_EXP); | 250 | cpos = pci_pcie_cap(child); |
| 251 | pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, | 251 | pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, |
| 252 | child_reg[PCI_FUNC(child->devfn)]); | 252 | child_reg[PCI_FUNC(child->devfn)]); |
| 253 | } | 253 | } |
| @@ -300,7 +300,7 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev, | |||
| 300 | u16 reg16; | 300 | u16 reg16; |
| 301 | u32 reg32; | 301 | u32 reg32; |
| 302 | 302 | ||
| 303 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 303 | pos = pci_pcie_cap(pdev); |
| 304 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); | 304 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); |
| 305 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; | 305 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
| 306 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; | 306 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
| @@ -420,7 +420,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | |||
| 420 | child->pcie_type != PCI_EXP_TYPE_LEG_END) | 420 | child->pcie_type != PCI_EXP_TYPE_LEG_END) |
| 421 | continue; | 421 | continue; |
| 422 | 422 | ||
| 423 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); | 423 | pos = pci_pcie_cap(child); |
| 424 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); | 424 | pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); |
| 425 | /* Calculate endpoint L0s acceptable latency */ | 425 | /* Calculate endpoint L0s acceptable latency */ |
| 426 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; | 426 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; |
| @@ -436,7 +436,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | |||
| 436 | static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) | 436 | static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) |
| 437 | { | 437 | { |
| 438 | u16 reg16; | 438 | u16 reg16; |
| 439 | int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 439 | int pos = pci_pcie_cap(pdev); |
| 440 | 440 | ||
| 441 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); | 441 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
| 442 | reg16 &= ~0x3; | 442 | reg16 &= ~0x3; |
| @@ -499,11 +499,11 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
| 499 | int pos; | 499 | int pos; |
| 500 | u32 reg32; | 500 | u32 reg32; |
| 501 | /* | 501 | /* |
| 502 | * Some functions in a slot might not all be PCIE functions, | 502 | * Some functions in a slot might not all be PCIe functions, |
| 503 | * very strange. Disable ASPM for the whole slot | 503 | * very strange. Disable ASPM for the whole slot |
| 504 | */ | 504 | */ |
| 505 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { | 505 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { |
| 506 | pos = pci_find_capability(child, PCI_CAP_ID_EXP); | 506 | pos = pci_pcie_cap(child); |
| 507 | if (!pos) | 507 | if (!pos) |
| 508 | return -EINVAL; | 508 | return -EINVAL; |
| 509 | /* | 509 | /* |
| @@ -563,7 +563,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) | |||
| 563 | struct pcie_link_state *link; | 563 | struct pcie_link_state *link; |
| 564 | int blacklist = !!pcie_aspm_sanity_check(pdev); | 564 | int blacklist = !!pcie_aspm_sanity_check(pdev); |
| 565 | 565 | ||
| 566 | if (aspm_disabled || !pdev->is_pcie || pdev->link_state) | 566 | if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state) |
| 567 | return; | 567 | return; |
| 568 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 568 | if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && |
| 569 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) | 569 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) |
| @@ -629,7 +629,8 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
| 629 | struct pci_dev *parent = pdev->bus->self; | 629 | struct pci_dev *parent = pdev->bus->self; |
| 630 | struct pcie_link_state *link, *root, *parent_link; | 630 | struct pcie_link_state *link, *root, *parent_link; |
| 631 | 631 | ||
| 632 | if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state) | 632 | if (aspm_disabled || !pci_is_pcie(pdev) || |
| 633 | !parent || !parent->link_state) | ||
| 633 | return; | 634 | return; |
| 634 | if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && | 635 | if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
| 635 | (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) | 636 | (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) |
| @@ -656,8 +657,10 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) | |||
| 656 | free_link_state(link); | 657 | free_link_state(link); |
| 657 | 658 | ||
| 658 | /* Recheck latencies and configure upstream links */ | 659 | /* Recheck latencies and configure upstream links */ |
| 659 | pcie_update_aspm_capable(root); | 660 | if (parent_link) { |
| 660 | pcie_config_aspm_path(parent_link); | 661 | pcie_update_aspm_capable(root); |
| 662 | pcie_config_aspm_path(parent_link); | ||
| 663 | } | ||
| 661 | out: | 664 | out: |
| 662 | mutex_unlock(&aspm_lock); | 665 | mutex_unlock(&aspm_lock); |
| 663 | up_read(&pci_bus_sem); | 666 | up_read(&pci_bus_sem); |
| @@ -668,7 +671,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) | |||
| 668 | { | 671 | { |
| 669 | struct pcie_link_state *link = pdev->link_state; | 672 | struct pcie_link_state *link = pdev->link_state; |
| 670 | 673 | ||
| 671 | if (aspm_disabled || !pdev->is_pcie || !link) | 674 | if (aspm_disabled || !pci_is_pcie(pdev) || !link) |
| 672 | return; | 675 | return; |
| 673 | if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && | 676 | if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
| 674 | (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) | 677 | (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) |
| @@ -694,7 +697,7 @@ void pci_disable_link_state(struct pci_dev *pdev, int state) | |||
| 694 | struct pci_dev *parent = pdev->bus->self; | 697 | struct pci_dev *parent = pdev->bus->self; |
| 695 | struct pcie_link_state *link; | 698 | struct pcie_link_state *link; |
| 696 | 699 | ||
| 697 | if (aspm_disabled || !pdev->is_pcie) | 700 | if (aspm_disabled || !pci_is_pcie(pdev)) |
| 698 | return; | 701 | return; |
| 699 | if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || | 702 | if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || |
| 700 | pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) | 703 | pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) |
| @@ -839,8 +842,9 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) | |||
| 839 | { | 842 | { |
| 840 | struct pcie_link_state *link_state = pdev->link_state; | 843 | struct pcie_link_state *link_state = pdev->link_state; |
| 841 | 844 | ||
| 842 | if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 845 | if (!pci_is_pcie(pdev) || |
| 843 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) | 846 | (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && |
| 847 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) | ||
| 844 | return; | 848 | return; |
| 845 | 849 | ||
| 846 | if (link_state->aspm_support) | 850 | if (link_state->aspm_support) |
| @@ -855,8 +859,9 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) | |||
| 855 | { | 859 | { |
| 856 | struct pcie_link_state *link_state = pdev->link_state; | 860 | struct pcie_link_state *link_state = pdev->link_state; |
| 857 | 861 | ||
| 858 | if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && | 862 | if (!pci_is_pcie(pdev) || |
| 859 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) | 863 | (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && |
| 864 | pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) | ||
| 860 | return; | 865 | return; |
| 861 | 866 | ||
| 862 | if (link_state->aspm_support) | 867 | if (link_state->aspm_support) |
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 17ad53868f9f..aaeb9d21cba5 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
| @@ -11,31 +11,16 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| 13 | 13 | ||
| 14 | #if !defined(PCI_CAP_ID_PME) | 14 | #define PCIE_PORT_DEVICE_MAXSERVICES 4 |
| 15 | #define PCI_CAP_ID_PME 1 | ||
| 16 | #endif | ||
| 17 | |||
| 18 | #if !defined(PCI_CAP_ID_EXP) | ||
| 19 | #define PCI_CAP_ID_EXP 0x10 | ||
| 20 | #endif | ||
| 21 | |||
| 22 | #define PORT_TYPE_MASK 0xf | ||
| 23 | #define PORT_TO_SLOT_MASK 0x100 | ||
| 24 | #define SLOT_HP_CAPABLE_MASK 0x40 | ||
| 25 | #define PCIE_CAPABILITIES_REG 0x2 | ||
| 26 | #define PCIE_SLOT_CAPABILITIES_REG 0x14 | ||
| 27 | #define PCIE_PORT_DEVICE_MAXSERVICES 4 | ||
| 28 | #define PCIE_PORT_MSI_VECTOR_MASK 0x1f | ||
| 29 | /* | 15 | /* |
| 30 | * According to the PCI Express Base Specification 2.0, the indices of the MSI-X | 16 | * According to the PCI Express Base Specification 2.0, the indices of |
| 31 | * table entires used by port services must not exceed 31 | 17 | * the MSI-X table entires used by port services must not exceed 31 |
| 32 | */ | 18 | */ |
| 33 | #define PCIE_PORT_MAX_MSIX_ENTRIES 32 | 19 | #define PCIE_PORT_MAX_MSIX_ENTRIES 32 |
| 34 | 20 | ||
| 35 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) | 21 | #define get_descriptor_id(type, service) (((type - 4) << 4) | service) |
| 36 | 22 | ||
| 37 | extern struct bus_type pcie_port_bus_type; | 23 | extern struct bus_type pcie_port_bus_type; |
| 38 | extern int pcie_port_device_probe(struct pci_dev *dev); | ||
| 39 | extern int pcie_port_device_register(struct pci_dev *dev); | 24 | extern int pcie_port_device_register(struct pci_dev *dev); |
| 40 | #ifdef CONFIG_PM | 25 | #ifdef CONFIG_PM |
| 41 | extern int pcie_port_device_suspend(struct device *dev); | 26 | extern int pcie_port_device_suspend(struct device *dev); |
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index ef3a4eeaebb4..18bf90f748f6 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c | |||
| @@ -26,7 +26,6 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type); | |||
| 26 | static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) | 26 | static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) |
| 27 | { | 27 | { |
| 28 | struct pcie_device *pciedev; | 28 | struct pcie_device *pciedev; |
| 29 | struct pcie_port_data *port_data; | ||
| 30 | struct pcie_port_service_driver *driver; | 29 | struct pcie_port_service_driver *driver; |
| 31 | 30 | ||
| 32 | if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) | 31 | if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) |
| @@ -38,10 +37,8 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) | |||
| 38 | if (driver->service != pciedev->service) | 37 | if (driver->service != pciedev->service) |
| 39 | return 0; | 38 | return 0; |
| 40 | 39 | ||
| 41 | port_data = pci_get_drvdata(pciedev->port); | 40 | if ((driver->port_type != PCIE_ANY_PORT) && |
| 42 | 41 | (driver->port_type != pciedev->port->pcie_type)) | |
| 43 | if (driver->port_type != PCIE_ANY_PORT | ||
| 44 | && driver->port_type != port_data->port_type) | ||
| 45 | return 0; | 42 | return 0; |
| 46 | 43 | ||
| 47 | return 1; | 44 | return 1; |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 52f84fca9f7d..b174188ac121 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | */ | 27 | */ |
| 28 | static void release_pcie_device(struct device *dev) | 28 | static void release_pcie_device(struct device *dev) |
| 29 | { | 29 | { |
| 30 | kfree(to_pcie_device(dev)); | 30 | kfree(to_pcie_device(dev)); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | /** | 33 | /** |
| @@ -108,9 +108,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
| 108 | * the value in this field indicates which MSI-X Table entry is | 108 | * the value in this field indicates which MSI-X Table entry is |
| 109 | * used to generate the interrupt message." | 109 | * used to generate the interrupt message." |
| 110 | */ | 110 | */ |
| 111 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 111 | pos = pci_pcie_cap(dev); |
| 112 | pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, ®16); | 112 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); |
| 113 | entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK; | 113 | entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; |
| 114 | if (entry >= nr_entries) | 114 | if (entry >= nr_entries) |
| 115 | goto Error; | 115 | goto Error; |
| 116 | 116 | ||
| @@ -177,37 +177,40 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) | |||
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | /** | 179 | /** |
| 180 | * assign_interrupt_mode - choose interrupt mode for PCI Express port services | 180 | * init_service_irqs - initialize irqs for PCI Express port services |
| 181 | * (INTx, MSI-X, MSI) and set up vectors | ||
| 182 | * @dev: PCI Express port to handle | 181 | * @dev: PCI Express port to handle |
| 183 | * @vectors: Array of interrupt vectors to populate | 182 | * @irqs: Array of irqs to populate |
| 184 | * @mask: Bitmask of port capabilities returned by get_port_device_capability() | 183 | * @mask: Bitmask of port capabilities returned by get_port_device_capability() |
| 185 | * | 184 | * |
| 186 | * Return value: Interrupt mode associated with the port | 185 | * Return value: Interrupt mode associated with the port |
| 187 | */ | 186 | */ |
| 188 | static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) | 187 | static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
| 189 | { | 188 | { |
| 190 | int irq, interrupt_mode = PCIE_PORT_NO_IRQ; | 189 | int i, irq; |
| 191 | int i; | ||
| 192 | 190 | ||
| 193 | /* Try to use MSI-X if supported */ | 191 | /* Try to use MSI-X if supported */ |
| 194 | if (!pcie_port_enable_msix(dev, vectors, mask)) | 192 | if (!pcie_port_enable_msix(dev, irqs, mask)) |
| 195 | return PCIE_PORT_MSIX_MODE; | 193 | return 0; |
| 196 | |||
| 197 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ | 194 | /* We're not going to use MSI-X, so try MSI and fall back to INTx */ |
| 198 | if (!pci_enable_msi(dev)) | 195 | irq = -1; |
| 199 | interrupt_mode = PCIE_PORT_MSI_MODE; | 196 | if (!pci_enable_msi(dev) || dev->pin) |
| 200 | 197 | irq = dev->irq; | |
| 201 | if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin) | ||
| 202 | interrupt_mode = PCIE_PORT_INTx_MODE; | ||
| 203 | 198 | ||
| 204 | irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1; | ||
| 205 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) | 199 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) |
| 206 | vectors[i] = irq; | 200 | irqs[i] = irq; |
| 201 | irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; | ||
| 207 | 202 | ||
| 208 | vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1; | 203 | if (irq < 0) |
| 204 | return -ENODEV; | ||
| 205 | return 0; | ||
| 206 | } | ||
| 209 | 207 | ||
| 210 | return interrupt_mode; | 208 | static void cleanup_service_irqs(struct pci_dev *dev) |
| 209 | { | ||
| 210 | if (dev->msix_enabled) | ||
| 211 | pci_disable_msix(dev); | ||
| 212 | else if (dev->msi_enabled) | ||
| 213 | pci_disable_msi(dev); | ||
| 211 | } | 214 | } |
| 212 | 215 | ||
| 213 | /** | 216 | /** |
| @@ -226,13 +229,12 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
| 226 | u16 reg16; | 229 | u16 reg16; |
| 227 | u32 reg32; | 230 | u32 reg32; |
| 228 | 231 | ||
| 229 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 232 | pos = pci_pcie_cap(dev); |
| 230 | pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, ®16); | 233 | pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); |
| 231 | /* Hot-Plug Capable */ | 234 | /* Hot-Plug Capable */ |
| 232 | if (reg16 & PORT_TO_SLOT_MASK) { | 235 | if (reg16 & PCI_EXP_FLAGS_SLOT) { |
| 233 | pci_read_config_dword(dev, | 236 | pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); |
| 234 | pos + PCIE_SLOT_CAPABILITIES_REG, ®32); | 237 | if (reg32 & PCI_EXP_SLTCAP_HPC) |
| 235 | if (reg32 & SLOT_HP_CAPABLE_MASK) | ||
| 236 | services |= PCIE_PORT_SERVICE_HP; | 238 | services |= PCIE_PORT_SERVICE_HP; |
| 237 | } | 239 | } |
| 238 | /* AER capable */ | 240 | /* AER capable */ |
| @@ -241,80 +243,47 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
| 241 | /* VC support */ | 243 | /* VC support */ |
| 242 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) | 244 | if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) |
| 243 | services |= PCIE_PORT_SERVICE_VC; | 245 | services |= PCIE_PORT_SERVICE_VC; |
| 246 | /* Root ports are capable of generating PME too */ | ||
| 247 | if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) | ||
| 248 | services |= PCIE_PORT_SERVICE_PME; | ||
| 244 | 249 | ||
| 245 | return services; | 250 | return services; |
| 246 | } | 251 | } |
| 247 | 252 | ||
| 248 | /** | 253 | /** |
| 249 | * pcie_device_init - initialize PCI Express port service device | 254 | * pcie_device_init - allocate and initialize PCI Express port service device |
| 250 | * @dev: Port service device to initialize | 255 | * @pdev: PCI Express port to associate the service device with |
| 251 | * @parent: PCI Express port to associate the service device with | 256 | * @service: Type of service to associate with the service device |
| 252 | * @port_type: Type of the port | ||
| 253 | * @service_type: Type of service to associate with the service device | ||
| 254 | * @irq: Interrupt vector to associate with the service device | 257 | * @irq: Interrupt vector to associate with the service device |
| 255 | */ | 258 | */ |
| 256 | static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, | 259 | static int pcie_device_init(struct pci_dev *pdev, int service, int irq) |
| 257 | int service_type, int irq) | ||
| 258 | { | 260 | { |
| 259 | struct pcie_port_data *port_data = pci_get_drvdata(parent); | 261 | int retval; |
| 262 | struct pcie_device *pcie; | ||
| 260 | struct device *device; | 263 | struct device *device; |
| 261 | int port_type = port_data->port_type; | ||
| 262 | 264 | ||
| 263 | dev->port = parent; | 265 | pcie = kzalloc(sizeof(*pcie), GFP_KERNEL); |
| 264 | dev->irq = irq; | 266 | if (!pcie) |
| 265 | dev->service = service_type; | 267 | return -ENOMEM; |
| 268 | pcie->port = pdev; | ||
| 269 | pcie->irq = irq; | ||
| 270 | pcie->service = service; | ||
| 266 | 271 | ||
| 267 | /* Initialize generic device interface */ | 272 | /* Initialize generic device interface */ |
| 268 | device = &dev->device; | 273 | device = &pcie->device; |
| 269 | memset(device, 0, sizeof(struct device)); | ||
| 270 | device->bus = &pcie_port_bus_type; | 274 | device->bus = &pcie_port_bus_type; |
| 271 | device->driver = NULL; | ||
| 272 | dev_set_drvdata(device, NULL); | ||
| 273 | device->release = release_pcie_device; /* callback to free pcie dev */ | 275 | device->release = release_pcie_device; /* callback to free pcie dev */ |
| 274 | dev_set_name(device, "%s:pcie%02x", | 276 | dev_set_name(device, "%s:pcie%02x", |
| 275 | pci_name(parent), get_descriptor_id(port_type, service_type)); | 277 | pci_name(pdev), |
| 276 | device->parent = &parent->dev; | 278 | get_descriptor_id(pdev->pcie_type, service)); |
| 277 | } | 279 | device->parent = &pdev->dev; |
| 278 | 280 | ||
| 279 | /** | 281 | retval = device_register(device); |
| 280 | * alloc_pcie_device - allocate PCI Express port service device structure | 282 | if (retval) |
| 281 | * @parent: PCI Express port to associate the service device with | 283 | kfree(pcie); |
| 282 | * @port_type: Type of the port | 284 | else |
| 283 | * @service_type: Type of service to associate with the service device | 285 | get_device(device); |
| 284 | * @irq: Interrupt vector to associate with the service device | 286 | return retval; |
| 285 | */ | ||
| 286 | static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, | ||
| 287 | int service_type, int irq) | ||
| 288 | { | ||
| 289 | struct pcie_device *device; | ||
| 290 | |||
| 291 | device = kzalloc(sizeof(struct pcie_device), GFP_KERNEL); | ||
| 292 | if (!device) | ||
| 293 | return NULL; | ||
| 294 | |||
| 295 | pcie_device_init(parent, device, service_type, irq); | ||
| 296 | return device; | ||
| 297 | } | ||
| 298 | |||
| 299 | /** | ||
| 300 | * pcie_port_device_probe - check if device is a PCI Express port | ||
| 301 | * @dev: Device to check | ||
| 302 | */ | ||
| 303 | int pcie_port_device_probe(struct pci_dev *dev) | ||
| 304 | { | ||
| 305 | int pos, type; | ||
| 306 | u16 reg; | ||
| 307 | |||
| 308 | if (!(pos = pci_find_capability(dev, PCI_CAP_ID_EXP))) | ||
| 309 | return -ENODEV; | ||
| 310 | |||
| 311 | pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, ®); | ||
| 312 | type = (reg >> 4) & PORT_TYPE_MASK; | ||
| 313 | if ( type == PCIE_RC_PORT || type == PCIE_SW_UPSTREAM_PORT || | ||
| 314 | type == PCIE_SW_DOWNSTREAM_PORT ) | ||
| 315 | return 0; | ||
| 316 | |||
| 317 | return -ENODEV; | ||
| 318 | } | 287 | } |
| 319 | 288 | ||
| 320 | /** | 289 | /** |
| @@ -326,77 +295,49 @@ int pcie_port_device_probe(struct pci_dev *dev) | |||
| 326 | */ | 295 | */ |
| 327 | int pcie_port_device_register(struct pci_dev *dev) | 296 | int pcie_port_device_register(struct pci_dev *dev) |
| 328 | { | 297 | { |
| 329 | struct pcie_port_data *port_data; | 298 | int status, capabilities, i, nr_service; |
| 330 | int status, capabilities, irq_mode, i, nr_serv; | 299 | int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; |
| 331 | int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; | ||
| 332 | u16 reg16; | ||
| 333 | |||
| 334 | port_data = kzalloc(sizeof(*port_data), GFP_KERNEL); | ||
| 335 | if (!port_data) | ||
| 336 | return -ENOMEM; | ||
| 337 | pci_set_drvdata(dev, port_data); | ||
| 338 | |||
| 339 | /* Get port type */ | ||
| 340 | pci_read_config_word(dev, | ||
| 341 | pci_find_capability(dev, PCI_CAP_ID_EXP) + | ||
| 342 | PCIE_CAPABILITIES_REG, ®16); | ||
| 343 | port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK; | ||
| 344 | 300 | ||
| 301 | /* Get and check PCI Express port services */ | ||
| 345 | capabilities = get_port_device_capability(dev); | 302 | capabilities = get_port_device_capability(dev); |
| 346 | /* Root ports are capable of generating PME too */ | 303 | if (!capabilities) |
| 347 | if (port_data->port_type == PCIE_RC_PORT) | 304 | return -ENODEV; |
| 348 | capabilities |= PCIE_PORT_SERVICE_PME; | ||
| 349 | |||
| 350 | irq_mode = assign_interrupt_mode(dev, vectors, capabilities); | ||
| 351 | if (irq_mode == PCIE_PORT_NO_IRQ) { | ||
| 352 | /* | ||
| 353 | * Don't use service devices that require interrupts if there is | ||
| 354 | * no way to generate them. | ||
| 355 | */ | ||
| 356 | if (!(capabilities & PCIE_PORT_SERVICE_VC)) { | ||
| 357 | status = -ENODEV; | ||
| 358 | goto Error; | ||
| 359 | } | ||
| 360 | capabilities = PCIE_PORT_SERVICE_VC; | ||
| 361 | } | ||
| 362 | port_data->port_irq_mode = irq_mode; | ||
| 363 | 305 | ||
| 306 | /* Enable PCI Express port device */ | ||
| 364 | status = pci_enable_device(dev); | 307 | status = pci_enable_device(dev); |
| 365 | if (status) | 308 | if (status) |
| 366 | goto Error; | 309 | return status; |
| 367 | pci_set_master(dev); | 310 | pci_set_master(dev); |
| 311 | /* | ||
| 312 | * Initialize service irqs. Don't use service devices that | ||
| 313 | * require interrupts if there is no way to generate them. | ||
| 314 | */ | ||
| 315 | status = init_service_irqs(dev, irqs, capabilities); | ||
| 316 | if (status) { | ||
| 317 | capabilities &= PCIE_PORT_SERVICE_VC; | ||
| 318 | if (!capabilities) | ||
| 319 | goto error_disable; | ||
| 320 | } | ||
| 368 | 321 | ||
| 369 | /* Allocate child services if any */ | 322 | /* Allocate child services if any */ |
| 370 | for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { | 323 | status = -ENODEV; |
| 371 | struct pcie_device *child; | 324 | nr_service = 0; |
| 325 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { | ||
| 372 | int service = 1 << i; | 326 | int service = 1 << i; |
| 373 | |||
| 374 | if (!(capabilities & service)) | 327 | if (!(capabilities & service)) |
| 375 | continue; | 328 | continue; |
| 376 | 329 | if (!pcie_device_init(dev, service, irqs[i])) | |
| 377 | child = alloc_pcie_device(dev, service, vectors[i]); | 330 | nr_service++; |
| 378 | if (!child) | ||
| 379 | continue; | ||
| 380 | |||
| 381 | status = device_register(&child->device); | ||
| 382 | if (status) { | ||
| 383 | kfree(child); | ||
| 384 | continue; | ||
| 385 | } | ||
| 386 | |||
| 387 | get_device(&child->device); | ||
| 388 | nr_serv++; | ||
| 389 | } | ||
| 390 | if (!nr_serv) { | ||
| 391 | pci_disable_device(dev); | ||
| 392 | status = -ENODEV; | ||
| 393 | goto Error; | ||
| 394 | } | 331 | } |
| 332 | if (!nr_service) | ||
| 333 | goto error_cleanup_irqs; | ||
| 395 | 334 | ||
| 396 | return 0; | 335 | return 0; |
| 397 | 336 | ||
| 398 | Error: | 337 | error_cleanup_irqs: |
| 399 | kfree(port_data); | 338 | cleanup_service_irqs(dev); |
| 339 | error_disable: | ||
| 340 | pci_disable_device(dev); | ||
| 400 | return status; | 341 | return status; |
| 401 | } | 342 | } |
| 402 | 343 | ||
| @@ -405,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data) | |||
| 405 | { | 346 | { |
| 406 | struct pcie_port_service_driver *service_driver; | 347 | struct pcie_port_service_driver *service_driver; |
| 407 | 348 | ||
| 408 | if ((dev->bus == &pcie_port_bus_type) && | 349 | if ((dev->bus == &pcie_port_bus_type) && dev->driver) { |
| 409 | (dev->driver)) { | 350 | service_driver = to_service_driver(dev->driver); |
| 410 | service_driver = to_service_driver(dev->driver); | 351 | if (service_driver->suspend) |
| 411 | if (service_driver->suspend) | 352 | service_driver->suspend(to_pcie_device(dev)); |
| 412 | service_driver->suspend(to_pcie_device(dev)); | 353 | } |
| 413 | } | ||
| 414 | return 0; | 354 | return 0; |
| 415 | } | 355 | } |
| 416 | 356 | ||
| @@ -464,21 +404,9 @@ static int remove_iter(struct device *dev, void *data) | |||
| 464 | */ | 404 | */ |
| 465 | void pcie_port_device_remove(struct pci_dev *dev) | 405 | void pcie_port_device_remove(struct pci_dev *dev) |
| 466 | { | 406 | { |
| 467 | struct pcie_port_data *port_data = pci_get_drvdata(dev); | ||
| 468 | |||
| 469 | device_for_each_child(&dev->dev, NULL, remove_iter); | 407 | device_for_each_child(&dev->dev, NULL, remove_iter); |
| 408 | cleanup_service_irqs(dev); | ||
| 470 | pci_disable_device(dev); | 409 | pci_disable_device(dev); |
| 471 | |||
| 472 | switch (port_data->port_irq_mode) { | ||
| 473 | case PCIE_PORT_MSIX_MODE: | ||
| 474 | pci_disable_msix(dev); | ||
| 475 | break; | ||
| 476 | case PCIE_PORT_MSI_MODE: | ||
| 477 | pci_disable_msi(dev); | ||
| 478 | break; | ||
| 479 | } | ||
| 480 | |||
| 481 | kfree(port_data); | ||
| 482 | } | 410 | } |
| 483 | 411 | ||
| 484 | /** | 412 | /** |
| @@ -565,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) | |||
| 565 | 493 | ||
| 566 | return driver_register(&new->driver); | 494 | return driver_register(&new->driver); |
| 567 | } | 495 | } |
| 496 | EXPORT_SYMBOL(pcie_port_service_register); | ||
| 568 | 497 | ||
| 569 | /** | 498 | /** |
| 570 | * pcie_port_service_unregister - unregister PCI Express port service driver | 499 | * pcie_port_service_unregister - unregister PCI Express port service driver |
| @@ -574,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) | |||
| 574 | { | 503 | { |
| 575 | driver_unregister(&drv->driver); | 504 | driver_unregister(&drv->driver); |
| 576 | } | 505 | } |
| 577 | |||
| 578 | EXPORT_SYMBOL(pcie_port_service_register); | ||
| 579 | EXPORT_SYMBOL(pcie_port_service_unregister); | 506 | EXPORT_SYMBOL(pcie_port_service_unregister); |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 6df5c984a791..13c8972886e6 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
| @@ -24,13 +24,12 @@ | |||
| 24 | */ | 24 | */ |
| 25 | #define DRIVER_VERSION "v1.0" | 25 | #define DRIVER_VERSION "v1.0" |
| 26 | #define DRIVER_AUTHOR "tom.l.nguyen@intel.com" | 26 | #define DRIVER_AUTHOR "tom.l.nguyen@intel.com" |
| 27 | #define DRIVER_DESC "PCIE Port Bus Driver" | 27 | #define DRIVER_DESC "PCIe Port Bus Driver" |
| 28 | MODULE_AUTHOR(DRIVER_AUTHOR); | 28 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 29 | MODULE_DESCRIPTION(DRIVER_DESC); | 29 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 30 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
| 31 | 31 | ||
| 32 | /* global data */ | 32 | /* global data */ |
| 33 | static const char device_name[] = "pcieport-driver"; | ||
| 34 | 33 | ||
| 35 | static int pcie_portdrv_restore_config(struct pci_dev *dev) | 34 | static int pcie_portdrv_restore_config(struct pci_dev *dev) |
| 36 | { | 35 | { |
| @@ -44,7 +43,7 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev) | |||
| 44 | } | 43 | } |
| 45 | 44 | ||
| 46 | #ifdef CONFIG_PM | 45 | #ifdef CONFIG_PM |
| 47 | static struct dev_pm_ops pcie_portdrv_pm_ops = { | 46 | static const struct dev_pm_ops pcie_portdrv_pm_ops = { |
| 48 | .suspend = pcie_port_device_suspend, | 47 | .suspend = pcie_port_device_suspend, |
| 49 | .resume = pcie_port_device_resume, | 48 | .resume = pcie_port_device_resume, |
| 50 | .freeze = pcie_port_device_suspend, | 49 | .freeze = pcie_port_device_suspend, |
| @@ -64,20 +63,22 @@ static struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
| 64 | * pcie_portdrv_probe - Probe PCI-Express port devices | 63 | * pcie_portdrv_probe - Probe PCI-Express port devices |
| 65 | * @dev: PCI-Express port device being probed | 64 | * @dev: PCI-Express port device being probed |
| 66 | * | 65 | * |
| 67 | * If detected invokes the pcie_port_device_register() method for | 66 | * If detected invokes the pcie_port_device_register() method for |
| 68 | * this port device. | 67 | * this port device. |
| 69 | * | 68 | * |
| 70 | */ | 69 | */ |
| 71 | static int __devinit pcie_portdrv_probe (struct pci_dev *dev, | 70 | static int __devinit pcie_portdrv_probe(struct pci_dev *dev, |
| 72 | const struct pci_device_id *id ) | 71 | const struct pci_device_id *id) |
| 73 | { | 72 | { |
| 74 | int status; | 73 | int status; |
| 75 | 74 | ||
| 76 | status = pcie_port_device_probe(dev); | 75 | if (!pci_is_pcie(dev) || |
| 77 | if (status) | 76 | ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && |
| 78 | return status; | 77 | (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) && |
| 78 | (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) | ||
| 79 | return -ENODEV; | ||
| 79 | 80 | ||
| 80 | if (!dev->irq && dev->pin) { | 81 | if (!dev->irq && dev->pin) { |
| 81 | dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " | 82 | dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " |
| 82 | "check vendor BIOS\n", dev->vendor, dev->device); | 83 | "check vendor BIOS\n", dev->vendor, dev->device); |
| 83 | } | 84 | } |
| @@ -90,7 +91,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, | |||
| 90 | return 0; | 91 | return 0; |
| 91 | } | 92 | } |
| 92 | 93 | ||
| 93 | static void pcie_portdrv_remove (struct pci_dev *dev) | 94 | static void pcie_portdrv_remove(struct pci_dev *dev) |
| 94 | { | 95 | { |
| 95 | pcie_port_device_remove(dev); | 96 | pcie_port_device_remove(dev); |
| 96 | pci_disable_device(dev); | 97 | pci_disable_device(dev); |
| @@ -128,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data) | |||
| 128 | static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, | 129 | static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, |
| 129 | enum pci_channel_state error) | 130 | enum pci_channel_state error) |
| 130 | { | 131 | { |
| 131 | struct aer_broadcast_data result_data = | 132 | struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER}; |
| 132 | {error, PCI_ERS_RESULT_CAN_RECOVER}; | 133 | int ret; |
| 133 | int retval; | ||
| 134 | 134 | ||
| 135 | /* can not fail */ | 135 | /* can not fail */ |
| 136 | retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter); | 136 | ret = device_for_each_child(&dev->dev, &data, error_detected_iter); |
| 137 | 137 | ||
| 138 | return result_data.result; | 138 | return data.result; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static int mmio_enabled_iter(struct device *device, void *data) | 141 | static int mmio_enabled_iter(struct device *device, void *data) |
| @@ -262,7 +262,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = { | |||
| 262 | }; | 262 | }; |
| 263 | 263 | ||
| 264 | static struct pci_driver pcie_portdriver = { | 264 | static struct pci_driver pcie_portdriver = { |
| 265 | .name = (char *)device_name, | 265 | .name = "pcieport", |
| 266 | .id_table = &port_pci_ids[0], | 266 | .id_table = &port_pci_ids[0], |
| 267 | 267 | ||
| 268 | .probe = pcie_portdrv_probe, | 268 | .probe = pcie_portdrv_probe, |
| @@ -289,7 +289,7 @@ static int __init pcie_portdrv_init(void) | |||
| 289 | return retval; | 289 | return retval; |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | static void __exit pcie_portdrv_exit(void) | 292 | static void __exit pcie_portdrv_exit(void) |
| 293 | { | 293 | { |
| 294 | pci_unregister_driver(&pcie_portdriver); | 294 | pci_unregister_driver(&pcie_portdriver); |
| 295 | pcie_port_bus_unregister(); | 295 | pcie_port_bus_unregister(); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8105e32117f6..446e4a94d7d3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/cpumask.h> | 11 | #include <linux/cpumask.h> |
| 12 | #include <linux/pci-aspm.h> | 12 | #include <linux/pci-aspm.h> |
| 13 | #include <acpi/acpi_hest.h> | ||
| 13 | #include "pci.h" | 14 | #include "pci.h" |
| 14 | 15 | ||
| 15 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ | 16 | #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ |
| @@ -163,12 +164,12 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 163 | { | 164 | { |
| 164 | u32 l, sz, mask; | 165 | u32 l, sz, mask; |
| 165 | 166 | ||
| 166 | mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0; | 167 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; |
| 167 | 168 | ||
| 168 | res->name = pci_name(dev); | 169 | res->name = pci_name(dev); |
| 169 | 170 | ||
| 170 | pci_read_config_dword(dev, pos, &l); | 171 | pci_read_config_dword(dev, pos, &l); |
| 171 | pci_write_config_dword(dev, pos, mask); | 172 | pci_write_config_dword(dev, pos, l | mask); |
| 172 | pci_read_config_dword(dev, pos, &sz); | 173 | pci_read_config_dword(dev, pos, &sz); |
| 173 | pci_write_config_dword(dev, pos, l); | 174 | pci_write_config_dword(dev, pos, l); |
| 174 | 175 | ||
| @@ -223,9 +224,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 223 | goto fail; | 224 | goto fail; |
| 224 | 225 | ||
| 225 | if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { | 226 | if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { |
| 226 | dev_err(&dev->dev, "can't handle 64-bit BAR\n"); | 227 | dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", |
| 228 | pos); | ||
| 227 | goto fail; | 229 | goto fail; |
| 228 | } else if ((sizeof(resource_size_t) < 8) && l) { | 230 | } |
| 231 | |||
| 232 | res->flags |= IORESOURCE_MEM_64; | ||
| 233 | if ((sizeof(resource_size_t) < 8) && l) { | ||
| 229 | /* Address above 32-bit boundary; disable the BAR */ | 234 | /* Address above 32-bit boundary; disable the BAR */ |
| 230 | pci_write_config_dword(dev, pos, 0); | 235 | pci_write_config_dword(dev, pos, 0); |
| 231 | pci_write_config_dword(dev, pos + 4, 0); | 236 | pci_write_config_dword(dev, pos + 4, 0); |
| @@ -234,14 +239,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 234 | } else { | 239 | } else { |
| 235 | res->start = l64; | 240 | res->start = l64; |
| 236 | res->end = l64 + sz64; | 241 | res->end = l64 + sz64; |
| 237 | dev_printk(KERN_DEBUG, &dev->dev, | 242 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", |
| 238 | "reg %x %s: %pR\n", pos, | 243 | pos, res); |
| 239 | (res->flags & IORESOURCE_PREFETCH) ? | ||
| 240 | "64bit mmio pref" : "64bit mmio", | ||
| 241 | res); | ||
| 242 | } | 244 | } |
| 243 | |||
| 244 | res->flags |= IORESOURCE_MEM_64; | ||
| 245 | } else { | 245 | } else { |
| 246 | sz = pci_size(l, sz, mask); | 246 | sz = pci_size(l, sz, mask); |
| 247 | 247 | ||
| @@ -251,11 +251,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 251 | res->start = l; | 251 | res->start = l; |
| 252 | res->end = l + sz; | 252 | res->end = l + sz; |
| 253 | 253 | ||
| 254 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, | 254 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); |
| 255 | (res->flags & IORESOURCE_IO) ? "io port" : | ||
| 256 | ((res->flags & IORESOURCE_PREFETCH) ? | ||
| 257 | "32bit mmio pref" : "32bit mmio"), | ||
| 258 | res); | ||
| 259 | } | 255 | } |
| 260 | 256 | ||
| 261 | out: | 257 | out: |
| @@ -297,8 +293,11 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
| 297 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ | 293 | if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ |
| 298 | return; | 294 | return; |
| 299 | 295 | ||
| 296 | dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", | ||
| 297 | child->secondary, child->subordinate, | ||
| 298 | dev->transparent ? " (subtractive decode)": ""); | ||
| 299 | |||
| 300 | if (dev->transparent) { | 300 | if (dev->transparent) { |
| 301 | dev_info(&dev->dev, "transparent bridge\n"); | ||
| 302 | for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) | 301 | for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) |
| 303 | child->resource[i] = child->parent->resource[i - 3]; | 302 | child->resource[i] = child->parent->resource[i - 3]; |
| 304 | } | 303 | } |
| @@ -323,7 +322,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
| 323 | res->start = base; | 322 | res->start = base; |
| 324 | if (!res->end) | 323 | if (!res->end) |
| 325 | res->end = limit + 0xfff; | 324 | res->end = limit + 0xfff; |
| 326 | dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res); | 325 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
| 327 | } | 326 | } |
| 328 | 327 | ||
| 329 | res = child->resource[1]; | 328 | res = child->resource[1]; |
| @@ -335,8 +334,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
| 335 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; | 334 | res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; |
| 336 | res->start = base; | 335 | res->start = base; |
| 337 | res->end = limit + 0xfffff; | 336 | res->end = limit + 0xfffff; |
| 338 | dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n", | 337 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
| 339 | res); | ||
| 340 | } | 338 | } |
| 341 | 339 | ||
| 342 | res = child->resource[2]; | 340 | res = child->resource[2]; |
| @@ -375,9 +373,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) | |||
| 375 | res->flags |= IORESOURCE_MEM_64; | 373 | res->flags |= IORESOURCE_MEM_64; |
| 376 | res->start = base; | 374 | res->start = base; |
| 377 | res->end = limit + 0xfffff; | 375 | res->end = limit + 0xfffff; |
| 378 | dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", | 376 | dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); |
| 379 | (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32", | ||
| 380 | res); | ||
| 381 | } | 377 | } |
| 382 | } | 378 | } |
| 383 | 379 | ||
| @@ -651,13 +647,14 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, | |||
| 651 | (child->number > bus->subordinate) || | 647 | (child->number > bus->subordinate) || |
| 652 | (child->number < bus->number) || | 648 | (child->number < bus->number) || |
| 653 | (child->subordinate < bus->number)) { | 649 | (child->subordinate < bus->number)) { |
| 654 | pr_debug("PCI: Bus #%02x (-#%02x) is %s " | 650 | dev_info(&child->dev, "[bus %02x-%02x] %s " |
| 655 | "hidden behind%s bridge #%02x (-#%02x)\n", | 651 | "hidden behind%s bridge %s [bus %02x-%02x]\n", |
| 656 | child->number, child->subordinate, | 652 | child->number, child->subordinate, |
| 657 | (bus->number > child->subordinate && | 653 | (bus->number > child->subordinate && |
| 658 | bus->subordinate < child->number) ? | 654 | bus->subordinate < child->number) ? |
| 659 | "wholly" : "partially", | 655 | "wholly" : "partially", |
| 660 | bus->self->transparent ? " transparent" : "", | 656 | bus->self->transparent ? " transparent" : "", |
| 657 | dev_name(&bus->dev), | ||
| 661 | bus->number, bus->subordinate); | 658 | bus->number, bus->subordinate); |
| 662 | } | 659 | } |
| 663 | bus = bus->parent; | 660 | bus = bus->parent; |
| @@ -684,7 +681,7 @@ static void pci_read_irq(struct pci_dev *dev) | |||
| 684 | dev->irq = irq; | 681 | dev->irq = irq; |
| 685 | } | 682 | } |
| 686 | 683 | ||
| 687 | static void set_pcie_port_type(struct pci_dev *pdev) | 684 | void set_pcie_port_type(struct pci_dev *pdev) |
| 688 | { | 685 | { |
| 689 | int pos; | 686 | int pos; |
| 690 | u16 reg16; | 687 | u16 reg16; |
| @@ -693,17 +690,18 @@ static void set_pcie_port_type(struct pci_dev *pdev) | |||
| 693 | if (!pos) | 690 | if (!pos) |
| 694 | return; | 691 | return; |
| 695 | pdev->is_pcie = 1; | 692 | pdev->is_pcie = 1; |
| 693 | pdev->pcie_cap = pos; | ||
| 696 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | 694 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
| 697 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 695 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
| 698 | } | 696 | } |
| 699 | 697 | ||
| 700 | static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | 698 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
| 701 | { | 699 | { |
| 702 | int pos; | 700 | int pos; |
| 703 | u16 reg16; | 701 | u16 reg16; |
| 704 | u32 reg32; | 702 | u32 reg32; |
| 705 | 703 | ||
| 706 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 704 | pos = pci_pcie_cap(pdev); |
| 707 | if (!pos) | 705 | if (!pos) |
| 708 | return; | 706 | return; |
| 709 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | 707 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); |
| @@ -714,6 +712,12 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | |||
| 714 | pdev->is_hotplug_bridge = 1; | 712 | pdev->is_hotplug_bridge = 1; |
| 715 | } | 713 | } |
| 716 | 714 | ||
| 715 | static void set_pci_aer_firmware_first(struct pci_dev *pdev) | ||
| 716 | { | ||
| 717 | if (acpi_hest_firmware_first_pci(pdev)) | ||
| 718 | pdev->aer_firmware_first = 1; | ||
| 719 | } | ||
| 720 | |||
| 717 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 721 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
| 718 | 722 | ||
| 719 | /** | 723 | /** |
| @@ -731,6 +735,7 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 731 | u32 class; | 735 | u32 class; |
| 732 | u8 hdr_type; | 736 | u8 hdr_type; |
| 733 | struct pci_slot *slot; | 737 | struct pci_slot *slot; |
| 738 | int pos = 0; | ||
| 734 | 739 | ||
| 735 | if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) | 740 | if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) |
| 736 | return -EIO; | 741 | return -EIO; |
| @@ -742,6 +747,7 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 742 | dev->multifunction = !!(hdr_type & 0x80); | 747 | dev->multifunction = !!(hdr_type & 0x80); |
| 743 | dev->error_state = pci_channel_io_normal; | 748 | dev->error_state = pci_channel_io_normal; |
| 744 | set_pcie_port_type(dev); | 749 | set_pcie_port_type(dev); |
| 750 | set_pci_aer_firmware_first(dev); | ||
| 745 | 751 | ||
| 746 | list_for_each_entry(slot, &dev->bus->slots, list) | 752 | list_for_each_entry(slot, &dev->bus->slots, list) |
| 747 | if (PCI_SLOT(dev->devfn) == slot->number) | 753 | if (PCI_SLOT(dev->devfn) == slot->number) |
| @@ -822,6 +828,11 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 822 | dev->transparent = ((dev->class & 0xff) == 1); | 828 | dev->transparent = ((dev->class & 0xff) == 1); |
| 823 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); | 829 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
| 824 | set_pcie_hotplug_bridge(dev); | 830 | set_pcie_hotplug_bridge(dev); |
| 831 | pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); | ||
| 832 | if (pos) { | ||
| 833 | pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); | ||
| 834 | pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); | ||
| 835 | } | ||
| 825 | break; | 836 | break; |
| 826 | 837 | ||
| 827 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ | 838 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
| @@ -907,7 +918,7 @@ int pci_cfg_space_size(struct pci_dev *dev) | |||
| 907 | if (class == PCI_CLASS_BRIDGE_HOST) | 918 | if (class == PCI_CLASS_BRIDGE_HOST) |
| 908 | return pci_cfg_space_size_ext(dev); | 919 | return pci_cfg_space_size_ext(dev); |
| 909 | 920 | ||
| 910 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 921 | pos = pci_pcie_cap(dev); |
| 911 | if (!pos) { | 922 | if (!pos) { |
| 912 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 923 | pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
| 913 | if (!pos) | 924 | if (!pos) |
| @@ -1014,6 +1025,9 @@ static void pci_init_capabilities(struct pci_dev *dev) | |||
| 1014 | 1025 | ||
| 1015 | /* Single Root I/O Virtualization */ | 1026 | /* Single Root I/O Virtualization */ |
| 1016 | pci_iov_init(dev); | 1027 | pci_iov_init(dev); |
| 1028 | |||
| 1029 | /* Enable ACS P2P upstream forwarding */ | ||
| 1030 | pci_enable_acs(dev); | ||
| 1017 | } | 1031 | } |
| 1018 | 1032 | ||
| 1019 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | 1033 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
| @@ -1110,7 +1124,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) | |||
| 1110 | unsigned int devfn, pass, max = bus->secondary; | 1124 | unsigned int devfn, pass, max = bus->secondary; |
| 1111 | struct pci_dev *dev; | 1125 | struct pci_dev *dev; |
| 1112 | 1126 | ||
| 1113 | pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number); | 1127 | dev_dbg(&bus->dev, "scanning bus\n"); |
| 1114 | 1128 | ||
| 1115 | /* Go find them, Rover! */ | 1129 | /* Go find them, Rover! */ |
| 1116 | for (devfn = 0; devfn < 0x100; devfn += 8) | 1130 | for (devfn = 0; devfn < 0x100; devfn += 8) |
| @@ -1124,8 +1138,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) | |||
| 1124 | * all PCI-to-PCI bridges on this bus. | 1138 | * all PCI-to-PCI bridges on this bus. |
| 1125 | */ | 1139 | */ |
| 1126 | if (!bus->is_added) { | 1140 | if (!bus->is_added) { |
| 1127 | pr_debug("PCI: Fixups for bus %04x:%02x\n", | 1141 | dev_dbg(&bus->dev, "fixups for bus\n"); |
| 1128 | pci_domain_nr(bus), bus->number); | ||
| 1129 | pcibios_fixup_bus(bus); | 1142 | pcibios_fixup_bus(bus); |
| 1130 | if (pci_is_root_bus(bus)) | 1143 | if (pci_is_root_bus(bus)) |
| 1131 | bus->is_added = 1; | 1144 | bus->is_added = 1; |
| @@ -1145,8 +1158,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) | |||
| 1145 | * | 1158 | * |
| 1146 | * Return how far we've got finding sub-buses. | 1159 | * Return how far we've got finding sub-buses. |
| 1147 | */ | 1160 | */ |
| 1148 | pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n", | 1161 | dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); |
| 1149 | pci_domain_nr(bus), bus->number, max); | ||
| 1150 | return max; | 1162 | return max; |
| 1151 | } | 1163 | } |
| 1152 | 1164 | ||
| @@ -1154,7 +1166,7 @@ struct pci_bus * pci_create_bus(struct device *parent, | |||
| 1154 | int bus, struct pci_ops *ops, void *sysdata) | 1166 | int bus, struct pci_ops *ops, void *sysdata) |
| 1155 | { | 1167 | { |
| 1156 | int error; | 1168 | int error; |
| 1157 | struct pci_bus *b; | 1169 | struct pci_bus *b, *b2; |
| 1158 | struct device *dev; | 1170 | struct device *dev; |
| 1159 | 1171 | ||
| 1160 | b = pci_alloc_bus(); | 1172 | b = pci_alloc_bus(); |
| @@ -1170,9 +1182,10 @@ struct pci_bus * pci_create_bus(struct device *parent, | |||
| 1170 | b->sysdata = sysdata; | 1182 | b->sysdata = sysdata; |
| 1171 | b->ops = ops; | 1183 | b->ops = ops; |
| 1172 | 1184 | ||
| 1173 | if (pci_find_bus(pci_domain_nr(b), bus)) { | 1185 | b2 = pci_find_bus(pci_domain_nr(b), bus); |
| 1186 | if (b2) { | ||
| 1174 | /* If we already got to this bus through a different bridge, ignore it */ | 1187 | /* If we already got to this bus through a different bridge, ignore it */ |
| 1175 | pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); | 1188 | dev_dbg(&b2->dev, "bus already known\n"); |
| 1176 | goto err_out; | 1189 | goto err_out; |
| 1177 | } | 1190 | } |
| 1178 | 1191 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6099facecd79..d58b94030ef3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) | |||
| 338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); | 338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); |
| 339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); | 339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); |
| 340 | 340 | ||
| 341 | /* | ||
| 342 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS | ||
| 343 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. | ||
| 344 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k | ||
| 345 | * (which conflicts w/ BAR1's memory range). | ||
| 346 | */ | ||
| 347 | static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) | ||
| 348 | { | ||
| 349 | if (pci_resource_len(dev, 0) != 8) { | ||
| 350 | struct resource *res = &dev->resource[0]; | ||
| 351 | res->end = res->start + 8 - 1; | ||
| 352 | dev_info(&dev->dev, "CS5536 ISA bridge bug detected " | ||
| 353 | "(incorrect header); workaround applied.\n"); | ||
| 354 | } | ||
| 355 | } | ||
| 356 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); | ||
| 357 | |||
| 341 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | 358 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, |
| 342 | unsigned size, int nr, const char *name) | 359 | unsigned size, int nr, const char *name) |
| 343 | { | 360 | { |
| @@ -357,7 +374,7 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | |||
| 357 | pcibios_bus_to_resource(dev, res, &bus_region); | 374 | pcibios_bus_to_resource(dev, res, &bus_region); |
| 358 | 375 | ||
| 359 | pci_claim_resource(dev, nr); | 376 | pci_claim_resource(dev, nr); |
| 360 | dev_info(&dev->dev, "quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name); | 377 | dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); |
| 361 | } | 378 | } |
| 362 | } | 379 | } |
| 363 | 380 | ||
| @@ -670,6 +687,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) | |||
| 670 | } | 687 | } |
| 671 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); | 688 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); |
| 672 | 689 | ||
| 690 | /* | ||
| 691 | * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back: | ||
| 692 | * Disable fast back-to-back on the secondary bus segment | ||
| 693 | */ | ||
| 694 | static void __devinit quirk_xio2000a(struct pci_dev *dev) | ||
| 695 | { | ||
| 696 | struct pci_dev *pdev; | ||
| 697 | u16 command; | ||
| 698 | |||
| 699 | dev_warn(&dev->dev, "TI XIO2000a quirk detected; " | ||
| 700 | "secondary bus fast back-to-back transfers disabled\n"); | ||
| 701 | list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { | ||
| 702 | pci_read_config_word(pdev, PCI_COMMAND, &command); | ||
| 703 | if (command & PCI_COMMAND_FAST_BACK) | ||
| 704 | pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK); | ||
| 705 | } | ||
| 706 | } | ||
| 707 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, | ||
| 708 | quirk_xio2000a); | ||
| 673 | 709 | ||
| 674 | #ifdef CONFIG_X86_IO_APIC | 710 | #ifdef CONFIG_X86_IO_APIC |
| 675 | 711 | ||
| @@ -990,7 +1026,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, | |||
| 990 | 1026 | ||
| 991 | static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) | 1027 | static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) |
| 992 | { | 1028 | { |
| 993 | /* set SBX00 SATA in IDE mode to AHCI mode */ | 1029 | /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */ |
| 994 | u8 tmp; | 1030 | u8 tmp; |
| 995 | 1031 | ||
| 996 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); | 1032 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); |
| @@ -1009,8 +1045,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk | |||
| 1009 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); | 1045 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); |
| 1010 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); | 1046 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); |
| 1011 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); | 1047 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); |
| 1012 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); | 1048 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); |
| 1013 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); | 1049 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); |
| 1014 | 1050 | ||
| 1015 | /* | 1051 | /* |
| 1016 | * Serverworks CSB5 IDE does not fully support native mode | 1052 | * Serverworks CSB5 IDE does not fully support native mode |
| @@ -1661,6 +1697,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_ | |||
| 1661 | */ | 1697 | */ |
| 1662 | #define AMD_813X_MISC 0x40 | 1698 | #define AMD_813X_MISC 0x40 |
| 1663 | #define AMD_813X_NOIOAMODE (1<<0) | 1699 | #define AMD_813X_NOIOAMODE (1<<0) |
| 1700 | #define AMD_813X_REV_B1 0x12 | ||
| 1664 | #define AMD_813X_REV_B2 0x13 | 1701 | #define AMD_813X_REV_B2 0x13 |
| 1665 | 1702 | ||
| 1666 | static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | 1703 | static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) |
| @@ -1669,7 +1706,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | |||
| 1669 | 1706 | ||
| 1670 | if (noioapicquirk) | 1707 | if (noioapicquirk) |
| 1671 | return; | 1708 | return; |
| 1672 | if (dev->revision == AMD_813X_REV_B2) | 1709 | if ((dev->revision == AMD_813X_REV_B1) || |
| 1710 | (dev->revision == AMD_813X_REV_B2)) | ||
| 1673 | return; | 1711 | return; |
| 1674 | 1712 | ||
| 1675 | pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); | 1713 | pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); |
| @@ -1679,8 +1717,10 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) | |||
| 1679 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", | 1717 | dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", |
| 1680 | dev->vendor, dev->device); | 1718 | dev->vendor, dev->device); |
| 1681 | } | 1719 | } |
| 1682 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | 1720 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); |
| 1683 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | 1721 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); |
| 1722 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | ||
| 1723 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); | ||
| 1684 | 1724 | ||
| 1685 | #define AMD_8111_PCI_IRQ_ROUTING 0x56 | 1725 | #define AMD_8111_PCI_IRQ_ROUTING 0x56 |
| 1686 | 1726 | ||
| @@ -2572,7 +2612,120 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | |||
| 2572 | } | 2612 | } |
| 2573 | pci_do_fixups(dev, start, end); | 2613 | pci_do_fixups(dev, start, end); |
| 2574 | } | 2614 | } |
| 2615 | |||
| 2616 | static int __init pci_apply_final_quirks(void) | ||
| 2617 | { | ||
| 2618 | struct pci_dev *dev = NULL; | ||
| 2619 | u8 cls = 0; | ||
| 2620 | u8 tmp; | ||
| 2621 | |||
| 2622 | if (pci_cache_line_size) | ||
| 2623 | printk(KERN_DEBUG "PCI: CLS %u bytes\n", | ||
| 2624 | pci_cache_line_size << 2); | ||
| 2625 | |||
| 2626 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
| 2627 | pci_fixup_device(pci_fixup_final, dev); | ||
| 2628 | /* | ||
| 2629 | * If arch hasn't set it explicitly yet, use the CLS | ||
| 2630 | * value shared by all PCI devices. If there's a | ||
| 2631 | * mismatch, fall back to the default value. | ||
| 2632 | */ | ||
| 2633 | if (!pci_cache_line_size) { | ||
| 2634 | pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp); | ||
| 2635 | if (!cls) | ||
| 2636 | cls = tmp; | ||
| 2637 | if (!tmp || cls == tmp) | ||
| 2638 | continue; | ||
| 2639 | |||
| 2640 | printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), " | ||
| 2641 | "using %u bytes\n", cls << 2, tmp << 2, | ||
| 2642 | pci_dfl_cache_line_size << 2); | ||
| 2643 | pci_cache_line_size = pci_dfl_cache_line_size; | ||
| 2644 | } | ||
| 2645 | } | ||
| 2646 | if (!pci_cache_line_size) { | ||
| 2647 | printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", | ||
| 2648 | cls << 2, pci_dfl_cache_line_size << 2); | ||
| 2649 | pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; | ||
| 2650 | } | ||
| 2651 | |||
| 2652 | return 0; | ||
| 2653 | } | ||
| 2654 | |||
| 2655 | fs_initcall_sync(pci_apply_final_quirks); | ||
| 2656 | |||
| 2657 | /* | ||
| 2658 | * Followings are device-specific reset methods which can be used to | ||
| 2659 | * reset a single function if other methods (e.g. FLR, PM D0->D3) are | ||
| 2660 | * not available. | ||
| 2661 | */ | ||
| 2662 | static int reset_intel_generic_dev(struct pci_dev *dev, int probe) | ||
| 2663 | { | ||
| 2664 | int pos; | ||
| 2665 | |||
| 2666 | /* only implement PCI_CLASS_SERIAL_USB at present */ | ||
| 2667 | if (dev->class == PCI_CLASS_SERIAL_USB) { | ||
| 2668 | pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); | ||
| 2669 | if (!pos) | ||
| 2670 | return -ENOTTY; | ||
| 2671 | |||
| 2672 | if (probe) | ||
| 2673 | return 0; | ||
| 2674 | |||
| 2675 | pci_write_config_byte(dev, pos + 0x4, 1); | ||
| 2676 | msleep(100); | ||
| 2677 | |||
| 2678 | return 0; | ||
| 2679 | } else { | ||
| 2680 | return -ENOTTY; | ||
| 2681 | } | ||
| 2682 | } | ||
| 2683 | |||
| 2684 | static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) | ||
| 2685 | { | ||
| 2686 | int pos; | ||
| 2687 | |||
| 2688 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
| 2689 | if (!pos) | ||
| 2690 | return -ENOTTY; | ||
| 2691 | |||
| 2692 | if (probe) | ||
| 2693 | return 0; | ||
| 2694 | |||
| 2695 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | ||
| 2696 | PCI_EXP_DEVCTL_BCR_FLR); | ||
| 2697 | msleep(100); | ||
| 2698 | |||
| 2699 | return 0; | ||
| 2700 | } | ||
| 2701 | |||
| 2702 | #define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed | ||
| 2703 | |||
| 2704 | static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { | ||
| 2705 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF, | ||
| 2706 | reset_intel_82599_sfp_virtfn }, | ||
| 2707 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, | ||
| 2708 | reset_intel_generic_dev }, | ||
| 2709 | { 0 } | ||
| 2710 | }; | ||
| 2711 | |||
| 2712 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) | ||
| 2713 | { | ||
| 2714 | const struct pci_dev_reset_methods *i; | ||
| 2715 | |||
| 2716 | for (i = pci_dev_reset_methods; i->reset; i++) { | ||
| 2717 | if ((i->vendor == dev->vendor || | ||
| 2718 | i->vendor == (u16)PCI_ANY_ID) && | ||
| 2719 | (i->device == dev->device || | ||
| 2720 | i->device == (u16)PCI_ANY_ID)) | ||
| 2721 | return i->reset(dev, probe); | ||
| 2722 | } | ||
| 2723 | |||
| 2724 | return -ENOTTY; | ||
| 2725 | } | ||
| 2726 | |||
| 2575 | #else | 2727 | #else |
| 2576 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} | 2728 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} |
| 2729 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; } | ||
| 2577 | #endif | 2730 | #endif |
| 2578 | EXPORT_SYMBOL(pci_fixup_device); | 2731 | EXPORT_SYMBOL(pci_fixup_device); |
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index ec415352d9ba..4a471dc4f4b9 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
| @@ -15,9 +15,9 @@ | |||
| 15 | 15 | ||
| 16 | DECLARE_RWSEM(pci_bus_sem); | 16 | DECLARE_RWSEM(pci_bus_sem); |
| 17 | /* | 17 | /* |
| 18 | * find the upstream PCIE-to-PCI bridge of a PCI device | 18 | * find the upstream PCIe-to-PCI bridge of a PCI device |
| 19 | * if the device is PCIE, return NULL | 19 | * if the device is PCIE, return NULL |
| 20 | * if the device isn't connected to a PCIE bridge (that is its parent is a | 20 | * if the device isn't connected to a PCIe bridge (that is its parent is a |
| 21 | * legacy PCI bridge and the bridge is directly connected to bus 0), return its | 21 | * legacy PCI bridge and the bridge is directly connected to bus 0), return its |
| 22 | * parent | 22 | * parent |
| 23 | */ | 23 | */ |
| @@ -26,18 +26,18 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev) | |||
| 26 | { | 26 | { |
| 27 | struct pci_dev *tmp = NULL; | 27 | struct pci_dev *tmp = NULL; |
| 28 | 28 | ||
| 29 | if (pdev->is_pcie) | 29 | if (pci_is_pcie(pdev)) |
| 30 | return NULL; | 30 | return NULL; |
| 31 | while (1) { | 31 | while (1) { |
| 32 | if (pci_is_root_bus(pdev->bus)) | 32 | if (pci_is_root_bus(pdev->bus)) |
| 33 | break; | 33 | break; |
| 34 | pdev = pdev->bus->self; | 34 | pdev = pdev->bus->self; |
| 35 | /* a p2p bridge */ | 35 | /* a p2p bridge */ |
| 36 | if (!pdev->is_pcie) { | 36 | if (!pci_is_pcie(pdev)) { |
| 37 | tmp = pdev; | 37 | tmp = pdev; |
| 38 | continue; | 38 | continue; |
| 39 | } | 39 | } |
| 40 | /* PCI device should connect to a PCIE bridge */ | 40 | /* PCI device should connect to a PCIe bridge */ |
| 41 | if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { | 41 | if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { |
| 42 | /* Busted hardware? */ | 42 | /* Busted hardware? */ |
| 43 | WARN_ON_ONCE(1); | 43 | WARN_ON_ONCE(1); |
| @@ -149,32 +149,33 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) | |||
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | /** | 151 | /** |
| 152 | * pci_get_bus_and_slot - locate PCI device from a given PCI bus & slot | 152 | * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot |
| 153 | * @bus: number of PCI bus on which desired PCI device resides | 153 | * @domain: PCI domain/segment on which the PCI device resides. |
| 154 | * @devfn: encodes number of PCI slot in which the desired PCI | 154 | * @bus: PCI bus on which desired PCI device resides |
| 155 | * device resides and the logical device number within that slot | 155 | * @devfn: encodes number of PCI slot in which the desired PCI device |
| 156 | * in case of multi-function devices. | 156 | * resides and the logical device number within that slot in case of |
| 157 | * | 157 | * multi-function devices. |
| 158 | * Note: the bus/slot search is limited to PCI domain (segment) 0. | ||
| 159 | * | 158 | * |
| 160 | * Given a PCI bus and slot/function number, the desired PCI device | 159 | * Given a PCI domain, bus, and slot/function number, the desired PCI |
| 161 | * is located in system global list of PCI devices. If the device | 160 | * device is located in the list of PCI devices. If the device is |
| 162 | * is found, a pointer to its data structure is returned. If no | 161 | * found, its reference count is increased and this function returns a |
| 163 | * device is found, %NULL is returned. The returned device has its | 162 | * pointer to its data structure. The caller must decrement the |
| 164 | * reference count bumped by one. | 163 | * reference count by calling pci_dev_put(). If no device is found, |
| 164 | * %NULL is returned. | ||
| 165 | */ | 165 | */ |
| 166 | 166 | struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, | |
| 167 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) | 167 | unsigned int devfn) |
| 168 | { | 168 | { |
| 169 | struct pci_dev *dev = NULL; | 169 | struct pci_dev *dev = NULL; |
| 170 | 170 | ||
| 171 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 171 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
| 172 | if (pci_domain_nr(dev->bus) == 0 && | 172 | if (pci_domain_nr(dev->bus) == domain && |
| 173 | (dev->bus->number == bus && dev->devfn == devfn)) | 173 | (dev->bus->number == bus && dev->devfn == devfn)) |
| 174 | return dev; | 174 | return dev; |
| 175 | } | 175 | } |
| 176 | return NULL; | 176 | return NULL; |
| 177 | } | 177 | } |
| 178 | EXPORT_SYMBOL(pci_get_domain_bus_and_slot); | ||
| 178 | 179 | ||
| 179 | static int match_pci_dev_by_id(struct device *dev, void *data) | 180 | static int match_pci_dev_by_id(struct device *dev, void *data) |
| 180 | { | 181 | { |
| @@ -354,5 +355,4 @@ EXPORT_SYMBOL(pci_find_next_bus); | |||
| 354 | EXPORT_SYMBOL(pci_get_device); | 355 | EXPORT_SYMBOL(pci_get_device); |
| 355 | EXPORT_SYMBOL(pci_get_subsys); | 356 | EXPORT_SYMBOL(pci_get_subsys); |
| 356 | EXPORT_SYMBOL(pci_get_slot); | 357 | EXPORT_SYMBOL(pci_get_slot); |
| 357 | EXPORT_SYMBOL(pci_get_bus_and_slot); | ||
| 358 | EXPORT_SYMBOL(pci_get_class); | 358 | EXPORT_SYMBOL(pci_get_class); |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index cb1a027eb552..c48cd377b3f5 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
| @@ -71,53 +71,50 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus) | |||
| 71 | void pci_setup_cardbus(struct pci_bus *bus) | 71 | void pci_setup_cardbus(struct pci_bus *bus) |
| 72 | { | 72 | { |
| 73 | struct pci_dev *bridge = bus->self; | 73 | struct pci_dev *bridge = bus->self; |
| 74 | struct resource *res; | ||
| 74 | struct pci_bus_region region; | 75 | struct pci_bus_region region; |
| 75 | 76 | ||
| 76 | dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n", | 77 | dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n", |
| 77 | pci_domain_nr(bus), bus->number); | 78 | bus->secondary, bus->subordinate); |
| 78 | 79 | ||
| 79 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); | 80 | res = bus->resource[0]; |
| 80 | if (bus->resource[0]->flags & IORESOURCE_IO) { | 81 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 82 | if (res->flags & IORESOURCE_IO) { | ||
| 81 | /* | 83 | /* |
| 82 | * The IO resource is allocated a range twice as large as it | 84 | * The IO resource is allocated a range twice as large as it |
| 83 | * would normally need. This allows us to set both IO regs. | 85 | * would normally need. This allows us to set both IO regs. |
| 84 | */ | 86 | */ |
| 85 | dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", | 87 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 86 | (unsigned long)region.start, | ||
| 87 | (unsigned long)region.end); | ||
| 88 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, | 88 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, |
| 89 | region.start); | 89 | region.start); |
| 90 | pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, | 90 | pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, |
| 91 | region.end); | 91 | region.end); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[1]); | 94 | res = bus->resource[1]; |
| 95 | if (bus->resource[1]->flags & IORESOURCE_IO) { | 95 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 96 | dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", | 96 | if (res->flags & IORESOURCE_IO) { |
| 97 | (unsigned long)region.start, | 97 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 98 | (unsigned long)region.end); | ||
| 99 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, | 98 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, |
| 100 | region.start); | 99 | region.start); |
| 101 | pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, | 100 | pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, |
| 102 | region.end); | 101 | region.end); |
| 103 | } | 102 | } |
| 104 | 103 | ||
| 105 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); | 104 | res = bus->resource[2]; |
| 106 | if (bus->resource[2]->flags & IORESOURCE_MEM) { | 105 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 107 | dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n", | 106 | if (res->flags & IORESOURCE_MEM) { |
| 108 | (unsigned long)region.start, | 107 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 109 | (unsigned long)region.end); | ||
| 110 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, | 108 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, |
| 111 | region.start); | 109 | region.start); |
| 112 | pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, | 110 | pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, |
| 113 | region.end); | 111 | region.end); |
| 114 | } | 112 | } |
| 115 | 113 | ||
| 116 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[3]); | 114 | res = bus->resource[3]; |
| 117 | if (bus->resource[3]->flags & IORESOURCE_MEM) { | 115 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 118 | dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", | 116 | if (res->flags & IORESOURCE_MEM) { |
| 119 | (unsigned long)region.start, | 117 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 120 | (unsigned long)region.end); | ||
| 121 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, | 118 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, |
| 122 | region.start); | 119 | region.start); |
| 123 | pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, | 120 | pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, |
| @@ -140,34 +137,33 @@ EXPORT_SYMBOL(pci_setup_cardbus); | |||
| 140 | static void pci_setup_bridge(struct pci_bus *bus) | 137 | static void pci_setup_bridge(struct pci_bus *bus) |
| 141 | { | 138 | { |
| 142 | struct pci_dev *bridge = bus->self; | 139 | struct pci_dev *bridge = bus->self; |
| 140 | struct resource *res; | ||
| 143 | struct pci_bus_region region; | 141 | struct pci_bus_region region; |
| 144 | u32 l, bu, lu, io_upper16; | 142 | u32 l, bu, lu, io_upper16; |
| 145 | int pref_mem64; | ||
| 146 | 143 | ||
| 147 | if (pci_is_enabled(bridge)) | 144 | if (pci_is_enabled(bridge)) |
| 148 | return; | 145 | return; |
| 149 | 146 | ||
| 150 | dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", | 147 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", |
| 151 | pci_domain_nr(bus), bus->number); | 148 | bus->secondary, bus->subordinate); |
| 152 | 149 | ||
| 153 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 150 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
| 154 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); | 151 | res = bus->resource[0]; |
| 155 | if (bus->resource[0]->flags & IORESOURCE_IO) { | 152 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 153 | if (res->flags & IORESOURCE_IO) { | ||
| 156 | pci_read_config_dword(bridge, PCI_IO_BASE, &l); | 154 | pci_read_config_dword(bridge, PCI_IO_BASE, &l); |
| 157 | l &= 0xffff0000; | 155 | l &= 0xffff0000; |
| 158 | l |= (region.start >> 8) & 0x00f0; | 156 | l |= (region.start >> 8) & 0x00f0; |
| 159 | l |= region.end & 0xf000; | 157 | l |= region.end & 0xf000; |
| 160 | /* Set up upper 16 bits of I/O base/limit. */ | 158 | /* Set up upper 16 bits of I/O base/limit. */ |
| 161 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 159 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
| 162 | dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n", | 160 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 163 | (unsigned long)region.start, | ||
| 164 | (unsigned long)region.end); | ||
| 165 | } | 161 | } |
| 166 | else { | 162 | else { |
| 167 | /* Clear upper 16 bits of I/O base/limit. */ | 163 | /* Clear upper 16 bits of I/O base/limit. */ |
| 168 | io_upper16 = 0; | 164 | io_upper16 = 0; |
| 169 | l = 0x00f0; | 165 | l = 0x00f0; |
| 170 | dev_info(&bridge->dev, " IO window: disabled\n"); | 166 | dev_info(&bridge->dev, " bridge window [io disabled]\n"); |
| 171 | } | 167 | } |
| 172 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ | 168 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ |
| 173 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); | 169 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); |
| @@ -178,17 +174,16 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
| 178 | 174 | ||
| 179 | /* Set up the top and bottom of the PCI Memory segment | 175 | /* Set up the top and bottom of the PCI Memory segment |
| 180 | for this bus. */ | 176 | for this bus. */ |
| 181 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[1]); | 177 | res = bus->resource[1]; |
| 182 | if (bus->resource[1]->flags & IORESOURCE_MEM) { | 178 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 179 | if (res->flags & IORESOURCE_MEM) { | ||
| 183 | l = (region.start >> 16) & 0xfff0; | 180 | l = (region.start >> 16) & 0xfff0; |
| 184 | l |= region.end & 0xfff00000; | 181 | l |= region.end & 0xfff00000; |
| 185 | dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", | 182 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 186 | (unsigned long)region.start, | ||
| 187 | (unsigned long)region.end); | ||
| 188 | } | 183 | } |
| 189 | else { | 184 | else { |
| 190 | l = 0x0000fff0; | 185 | l = 0x0000fff0; |
| 191 | dev_info(&bridge->dev, " MEM window: disabled\n"); | 186 | dev_info(&bridge->dev, " bridge window [mem disabled]\n"); |
| 192 | } | 187 | } |
| 193 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | 188 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); |
| 194 | 189 | ||
| @@ -198,34 +193,27 @@ static void pci_setup_bridge(struct pci_bus *bus) | |||
| 198 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); | 193 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); |
| 199 | 194 | ||
| 200 | /* Set up PREF base/limit. */ | 195 | /* Set up PREF base/limit. */ |
| 201 | pref_mem64 = 0; | ||
| 202 | bu = lu = 0; | 196 | bu = lu = 0; |
| 203 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); | 197 | res = bus->resource[2]; |
| 204 | if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { | 198 | pcibios_resource_to_bus(bridge, ®ion, res); |
| 205 | int width = 8; | 199 | if (res->flags & IORESOURCE_PREFETCH) { |
| 206 | l = (region.start >> 16) & 0xfff0; | 200 | l = (region.start >> 16) & 0xfff0; |
| 207 | l |= region.end & 0xfff00000; | 201 | l |= region.end & 0xfff00000; |
| 208 | if (bus->resource[2]->flags & IORESOURCE_MEM_64) { | 202 | if (res->flags & IORESOURCE_MEM_64) { |
| 209 | pref_mem64 = 1; | ||
| 210 | bu = upper_32_bits(region.start); | 203 | bu = upper_32_bits(region.start); |
| 211 | lu = upper_32_bits(region.end); | 204 | lu = upper_32_bits(region.end); |
| 212 | width = 16; | ||
| 213 | } | 205 | } |
| 214 | dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n", | 206 | dev_info(&bridge->dev, " bridge window %pR\n", res); |
| 215 | width, (unsigned long long)region.start, | ||
| 216 | width, (unsigned long long)region.end); | ||
| 217 | } | 207 | } |
| 218 | else { | 208 | else { |
| 219 | l = 0x0000fff0; | 209 | l = 0x0000fff0; |
| 220 | dev_info(&bridge->dev, " PREFETCH window: disabled\n"); | 210 | dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); |
| 221 | } | 211 | } |
| 222 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); | 212 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); |
| 223 | 213 | ||
| 224 | if (pref_mem64) { | 214 | /* Set the upper 32 bits of PREF base & limit. */ |
| 225 | /* Set the upper 32 bits of PREF base & limit. */ | 215 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); |
| 226 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | 216 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); |
| 227 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | ||
| 228 | } | ||
| 229 | 217 | ||
| 230 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 218 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
| 231 | } | 219 | } |
| @@ -345,6 +333,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size) | |||
| 345 | #endif | 333 | #endif |
| 346 | size = ALIGN(size + size1, 4096); | 334 | size = ALIGN(size + size1, 4096); |
| 347 | if (!size) { | 335 | if (!size) { |
| 336 | if (b_res->start || b_res->end) | ||
| 337 | dev_info(&bus->self->dev, "disabling bridge window " | ||
| 338 | "%pR to [bus %02x-%02x] (unused)\n", b_res, | ||
| 339 | bus->secondary, bus->subordinate); | ||
| 348 | b_res->flags = 0; | 340 | b_res->flags = 0; |
| 349 | return; | 341 | return; |
| 350 | } | 342 | } |
| @@ -390,8 +382,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
| 390 | align = pci_resource_alignment(dev, r); | 382 | align = pci_resource_alignment(dev, r); |
| 391 | order = __ffs(align) - 20; | 383 | order = __ffs(align) - 20; |
| 392 | if (order > 11) { | 384 | if (order > 11) { |
| 393 | dev_warn(&dev->dev, "BAR %d bad alignment %llx: " | 385 | dev_warn(&dev->dev, "disabling BAR %d: %pR " |
| 394 | "%pR\n", i, (unsigned long long)align, r); | 386 | "(bad alignment %#llx)\n", i, r, |
| 387 | (unsigned long long) align); | ||
| 395 | r->flags = 0; | 388 | r->flags = 0; |
| 396 | continue; | 389 | continue; |
| 397 | } | 390 | } |
| @@ -425,6 +418,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
| 425 | } | 418 | } |
| 426 | size = ALIGN(size, min_align); | 419 | size = ALIGN(size, min_align); |
| 427 | if (!size) { | 420 | if (!size) { |
| 421 | if (b_res->start || b_res->end) | ||
| 422 | dev_info(&bus->self->dev, "disabling bridge window " | ||
| 423 | "%pR to [bus %02x-%02x] (unused)\n", b_res, | ||
| 424 | bus->secondary, bus->subordinate); | ||
| 428 | b_res->flags = 0; | 425 | b_res->flags = 0; |
| 429 | return 1; | 426 | return 1; |
| 430 | } | 427 | } |
| @@ -582,10 +579,7 @@ static void pci_bus_dump_res(struct pci_bus *bus) | |||
| 582 | if (!res || !res->end) | 579 | if (!res || !res->end) |
| 583 | continue; | 580 | continue; |
| 584 | 581 | ||
| 585 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, | 582 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); |
| 586 | (res->flags & IORESOURCE_IO) ? "io: " : | ||
| 587 | ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"), | ||
| 588 | res); | ||
| 589 | } | 583 | } |
| 590 | } | 584 | } |
| 591 | 585 | ||
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 706f82d8111f..7d678bb15ffb 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
| @@ -51,12 +51,6 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
| 51 | 51 | ||
| 52 | pcibios_resource_to_bus(dev, ®ion, res); | 52 | pcibios_resource_to_bus(dev, ®ion, res); |
| 53 | 53 | ||
| 54 | dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] " | ||
| 55 | "flags %#lx\n", resno, res, | ||
| 56 | (unsigned long long)region.start, | ||
| 57 | (unsigned long long)region.end, | ||
| 58 | (unsigned long)res->flags); | ||
| 59 | |||
| 60 | new = region.start | (res->flags & PCI_REGION_FLAG_MASK); | 54 | new = region.start | (res->flags & PCI_REGION_FLAG_MASK); |
| 61 | if (res->flags & IORESOURCE_IO) | 55 | if (res->flags & IORESOURCE_IO) |
| 62 | mask = (u32)PCI_BASE_ADDRESS_IO_MASK; | 56 | mask = (u32)PCI_BASE_ADDRESS_IO_MASK; |
| @@ -91,9 +85,9 @@ void pci_update_resource(struct pci_dev *dev, int resno) | |||
| 91 | } | 85 | } |
| 92 | } | 86 | } |
| 93 | res->flags &= ~IORESOURCE_UNSET; | 87 | res->flags &= ~IORESOURCE_UNSET; |
| 94 | dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n", | 88 | dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n", |
| 95 | resno, (unsigned long long)region.start, | 89 | resno, res, (unsigned long long)region.start, |
| 96 | (unsigned long long)region.end, res->flags); | 90 | (unsigned long long)region.end); |
| 97 | } | 91 | } |
| 98 | 92 | ||
| 99 | int pci_claim_resource(struct pci_dev *dev, int resource) | 93 | int pci_claim_resource(struct pci_dev *dev, int resource) |
| @@ -103,20 +97,17 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
| 103 | int err; | 97 | int err; |
| 104 | 98 | ||
| 105 | root = pci_find_parent_resource(dev, res); | 99 | root = pci_find_parent_resource(dev, res); |
| 106 | 100 | if (!root) { | |
| 107 | err = -EINVAL; | 101 | dev_err(&dev->dev, "no compatible bridge window for %pR\n", |
| 108 | if (root != NULL) | 102 | res); |
| 109 | err = request_resource(root, res); | 103 | return -EINVAL; |
| 110 | |||
| 111 | if (err) { | ||
| 112 | const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; | ||
| 113 | dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", | ||
| 114 | resource, | ||
| 115 | root ? "address space collision on" : | ||
| 116 | "no parent found for", | ||
| 117 | dtype, res); | ||
| 118 | } | 104 | } |
| 119 | 105 | ||
| 106 | err = request_resource(root, res); | ||
| 107 | if (err) | ||
| 108 | dev_err(&dev->dev, | ||
| 109 | "address space collision: %pR already in use\n", res); | ||
| 110 | |||
| 120 | return err; | 111 | return err; |
| 121 | } | 112 | } |
| 122 | EXPORT_SYMBOL(pci_claim_resource); | 113 | EXPORT_SYMBOL(pci_claim_resource); |
| @@ -124,7 +115,7 @@ EXPORT_SYMBOL(pci_claim_resource); | |||
| 124 | #ifdef CONFIG_PCI_QUIRKS | 115 | #ifdef CONFIG_PCI_QUIRKS |
| 125 | void pci_disable_bridge_window(struct pci_dev *dev) | 116 | void pci_disable_bridge_window(struct pci_dev *dev) |
| 126 | { | 117 | { |
| 127 | dev_dbg(&dev->dev, "Disabling bridge window.\n"); | 118 | dev_info(&dev->dev, "disabling bridge mem windows\n"); |
| 128 | 119 | ||
| 129 | /* MMIO Base/Limit */ | 120 | /* MMIO Base/Limit */ |
| 130 | pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0); | 121 | pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0); |
| @@ -165,6 +156,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
| 165 | 156 | ||
| 166 | if (!ret) { | 157 | if (!ret) { |
| 167 | res->flags &= ~IORESOURCE_STARTALIGN; | 158 | res->flags &= ~IORESOURCE_STARTALIGN; |
| 159 | dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); | ||
| 168 | if (resno < PCI_BRIDGE_RESOURCES) | 160 | if (resno < PCI_BRIDGE_RESOURCES) |
| 169 | pci_update_resource(dev, resno); | 161 | pci_update_resource(dev, resno); |
| 170 | } | 162 | } |
| @@ -178,12 +170,12 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
| 178 | resource_size_t align; | 170 | resource_size_t align; |
| 179 | struct pci_bus *bus; | 171 | struct pci_bus *bus; |
| 180 | int ret; | 172 | int ret; |
| 173 | char *type; | ||
| 181 | 174 | ||
| 182 | align = pci_resource_alignment(dev, res); | 175 | align = pci_resource_alignment(dev, res); |
| 183 | if (!align) { | 176 | if (!align) { |
| 184 | dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " | 177 | dev_info(&dev->dev, "BAR %d: can't assign %pR " |
| 185 | "alignment) %pR flags %#lx\n", | 178 | "(bogus alignment)\n", resno, res); |
| 186 | resno, res, res->flags); | ||
| 187 | return -EINVAL; | 179 | return -EINVAL; |
| 188 | } | 180 | } |
| 189 | 181 | ||
| @@ -198,49 +190,23 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
| 198 | break; | 190 | break; |
| 199 | } | 191 | } |
| 200 | 192 | ||
| 201 | if (ret) | ||
| 202 | dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", | ||
| 203 | resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); | ||
| 204 | |||
| 205 | return ret; | ||
| 206 | } | ||
| 207 | |||
| 208 | #if 0 | ||
| 209 | int pci_assign_resource_fixed(struct pci_dev *dev, int resno) | ||
| 210 | { | ||
| 211 | struct pci_bus *bus = dev->bus; | ||
| 212 | struct resource *res = dev->resource + resno; | ||
| 213 | unsigned int type_mask; | ||
| 214 | int i, ret = -EBUSY; | ||
| 215 | |||
| 216 | type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; | ||
| 217 | |||
| 218 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | ||
| 219 | struct resource *r = bus->resource[i]; | ||
| 220 | if (!r) | ||
| 221 | continue; | ||
| 222 | |||
| 223 | /* type_mask must match */ | ||
| 224 | if ((res->flags ^ r->flags) & type_mask) | ||
| 225 | continue; | ||
| 226 | |||
| 227 | ret = request_resource(r, res); | ||
| 228 | |||
| 229 | if (ret == 0) | ||
| 230 | break; | ||
| 231 | } | ||
| 232 | |||
| 233 | if (ret) { | 193 | if (ret) { |
| 234 | dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", | 194 | if (res->flags & IORESOURCE_MEM) |
| 235 | resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); | 195 | if (res->flags & IORESOURCE_PREFETCH) |
| 236 | } else if (resno < PCI_BRIDGE_RESOURCES) { | 196 | type = "mem pref"; |
| 237 | pci_update_resource(dev, resno); | 197 | else |
| 198 | type = "mem"; | ||
| 199 | else if (res->flags & IORESOURCE_IO) | ||
| 200 | type = "io"; | ||
| 201 | else | ||
| 202 | type = "unknown"; | ||
| 203 | dev_info(&dev->dev, | ||
| 204 | "BAR %d: can't assign %s (size %#llx)\n", | ||
| 205 | resno, type, (unsigned long long) resource_size(res)); | ||
| 238 | } | 206 | } |
| 239 | 207 | ||
| 240 | return ret; | 208 | return ret; |
| 241 | } | 209 | } |
| 242 | EXPORT_SYMBOL_GPL(pci_assign_resource_fixed); | ||
| 243 | #endif | ||
| 244 | 210 | ||
| 245 | /* Sort resources by alignment */ | 211 | /* Sort resources by alignment */ |
| 246 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | 212 | void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) |
| @@ -262,9 +228,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | |||
| 262 | 228 | ||
| 263 | r_align = pci_resource_alignment(dev, r); | 229 | r_align = pci_resource_alignment(dev, r); |
| 264 | if (!r_align) { | 230 | if (!r_align) { |
| 265 | dev_warn(&dev->dev, "BAR %d: bogus alignment " | 231 | dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n", |
| 266 | "%pR flags %#lx\n", | 232 | i, r); |
| 267 | i, r, r->flags); | ||
| 268 | continue; | 233 | continue; |
| 269 | } | 234 | } |
| 270 | for (list = head; ; list = list->next) { | 235 | for (list = head; ; list = list->next) { |
| @@ -311,8 +276,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask) | |||
| 311 | continue; | 276 | continue; |
| 312 | 277 | ||
| 313 | if (!r->parent) { | 278 | if (!r->parent) { |
| 314 | dev_err(&dev->dev, "device not available because of " | 279 | dev_err(&dev->dev, "device not available " |
| 315 | "BAR %d %pR collisions\n", i, r); | 280 | "(can't reserve %pR)\n", r); |
| 316 | return -EINVAL; | 281 | return -EINVAL; |
| 317 | } | 282 | } |
| 318 | 283 | ||
