aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
committerLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
commit478c6a43fcbc6c11609f8cee7c7b57223907754f (patch)
treea7f7952099da60d33032aed6de9c0c56c9f8779e /drivers/pci
parent8a3f257c704e02aee9869decd069a806b45be3f1 (diff)
parent6bb597507f9839b13498781e481f5458aea33620 (diff)
Merge branch 'linus' into release
Conflicts: arch/x86/kernel/cpu/cpufreq/longhaul.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig10
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c8
-rw-r--r--drivers/pci/dmar.c303
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c58
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c3
-rw-r--r--drivers/pci/hotplug/fakephp.c444
-rw-r--r--drivers/pci/hotplug/pciehp.h13
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c21
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c34
-rw-r--r--drivers/pci/hotplug/shpchp.h10
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/intel-iommu.c387
-rw-r--r--drivers/pci/intr_remapping.c113
-rw-r--r--drivers/pci/iov.c680
-rw-r--r--drivers/pci/msi.c426
-rw-r--r--drivers/pci/msi.h6
-rw-r--r--drivers/pci/pci-acpi.c215
-rw-r--r--drivers/pci/pci-driver.c258
-rw-r--r--drivers/pci/pci-sysfs.c124
-rw-r--r--drivers/pci/pci.c337
-rw-r--r--drivers/pci/pci.h66
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c28
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c10
-rw-r--r--drivers/pci/pcie/portdrv.h14
-rw-r--r--drivers/pci/pcie/portdrv_bus.c18
-rw-r--r--drivers/pci/pcie/portdrv_core.c379
-rw-r--r--drivers/pci/pcie/portdrv_pci.c50
-rw-r--r--drivers/pci/probe.c210
-rw-r--r--drivers/pci/quirks.c221
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c7
-rw-r--r--drivers/pci/setup-res.c15
-rw-r--r--drivers/pci/slot.c18
37 files changed, 2869 insertions, 1647 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2a4501dd2515..fdc864f9cf23 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -59,3 +59,13 @@ config HT_IRQ
59 This allows native hypertransport devices to use interrupts. 59 This allows native hypertransport devices to use interrupts.
60 60
61 If unsure say Y. 61 If unsure say Y.
62
63config PCI_IOV
64 bool "PCI IOV support"
65 depends on PCI
66 help
67 I/O Virtualization is a PCI feature supported by some devices
68 which allows them to create virtual devices which share their
69 physical resources.
70
71 If unsure, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d07ce24f6a8..ba6af162fd39 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
29 29
30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o 30obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
31 31
32obj-$(CONFIG_PCI_IOV) += iov.o
33
32# 34#
33# Some architectures use the generic PCI setup functions 35# Some architectures use the generic PCI setup functions
34# 36#
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 52b54f053be0..68f91a252595 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -133,7 +133,7 @@ int pci_bus_add_child(struct pci_bus *bus)
133 * 133 *
134 * Call hotplug for each new devices. 134 * Call hotplug for each new devices.
135 */ 135 */
136void pci_bus_add_devices(struct pci_bus *bus) 136void pci_bus_add_devices(const struct pci_bus *bus)
137{ 137{
138 struct pci_dev *dev; 138 struct pci_dev *dev;
139 struct pci_bus *child; 139 struct pci_bus *child;
@@ -184,8 +184,10 @@ void pci_enable_bridges(struct pci_bus *bus)
184 184
185 list_for_each_entry(dev, &bus->devices, bus_list) { 185 list_for_each_entry(dev, &bus->devices, bus_list) {
186 if (dev->subordinate) { 186 if (dev->subordinate) {
187 retval = pci_enable_device(dev); 187 if (atomic_read(&dev->enable_cnt) == 0) {
188 pci_set_master(dev); 188 retval = pci_enable_device(dev);
189 pci_set_master(dev);
190 }
189 pci_enable_bridges(dev->subordinate); 191 pci_enable_bridges(dev->subordinate);
190 } 192 }
191 } 193 }
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 26c536b51c5a..d313039e2fdf 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -31,6 +31,8 @@
31#include <linux/iova.h> 31#include <linux/iova.h>
32#include <linux/intel-iommu.h> 32#include <linux/intel-iommu.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/irq.h>
35#include <linux/interrupt.h>
34 36
35#undef PREFIX 37#undef PREFIX
36#define PREFIX "DMAR:" 38#define PREFIX "DMAR:"
@@ -42,6 +44,7 @@
42LIST_HEAD(dmar_drhd_units); 44LIST_HEAD(dmar_drhd_units);
43 45
44static struct acpi_table_header * __initdata dmar_tbl; 46static struct acpi_table_header * __initdata dmar_tbl;
47static acpi_size dmar_tbl_size;
45 48
46static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 49static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
47{ 50{
@@ -288,8 +291,9 @@ static int __init dmar_table_detect(void)
288 acpi_status status = AE_OK; 291 acpi_status status = AE_OK;
289 292
290 /* if we could find DMAR table, then there are DMAR devices */ 293 /* if we could find DMAR table, then there are DMAR devices */
291 status = acpi_get_table(ACPI_SIG_DMAR, 0, 294 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl); 295 (struct acpi_table_header **)&dmar_tbl,
296 &dmar_tbl_size);
293 297
294 if (ACPI_SUCCESS(status) && !dmar_tbl) { 298 if (ACPI_SUCCESS(status) && !dmar_tbl) {
295 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 299 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@@ -489,6 +493,7 @@ void __init detect_intel_iommu(void)
489 iommu_detected = 1; 493 iommu_detected = 1;
490#endif 494#endif
491 } 495 }
496 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
492 dmar_tbl = NULL; 497 dmar_tbl = NULL;
493} 498}
494 499
@@ -506,6 +511,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
506 return -ENOMEM; 511 return -ENOMEM;
507 512
508 iommu->seq_id = iommu_allocated++; 513 iommu->seq_id = iommu_allocated++;
514 sprintf (iommu->name, "dmar%d", iommu->seq_id);
509 515
510 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); 516 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
511 if (!iommu->reg) { 517 if (!iommu->reg) {
@@ -748,6 +754,42 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
748} 754}
749 755
750/* 756/*
757 * Disable Queued Invalidation interface.
758 */
759void dmar_disable_qi(struct intel_iommu *iommu)
760{
761 unsigned long flags;
762 u32 sts;
763 cycles_t start_time = get_cycles();
764
765 if (!ecap_qis(iommu->ecap))
766 return;
767
768 spin_lock_irqsave(&iommu->register_lock, flags);
769
770 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
771 if (!(sts & DMA_GSTS_QIES))
772 goto end;
773
774 /*
775 * Give a chance to HW to complete the pending invalidation requests.
776 */
777 while ((readl(iommu->reg + DMAR_IQT_REG) !=
778 readl(iommu->reg + DMAR_IQH_REG)) &&
779 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
780 cpu_relax();
781
782 iommu->gcmd &= ~DMA_GCMD_QIE;
783
784 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
785
786 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
787 !(sts & DMA_GSTS_QIES), sts);
788end:
789 spin_unlock_irqrestore(&iommu->register_lock, flags);
790}
791
792/*
751 * Enable Queued Invalidation interface. This is a must to support 793 * Enable Queued Invalidation interface. This is a must to support
752 * interrupt-remapping. Also used by DMA-remapping, which replaces 794 * interrupt-remapping. Also used by DMA-remapping, which replaces
753 * register based IOTLB invalidation. 795 * register based IOTLB invalidation.
@@ -767,20 +809,20 @@ int dmar_enable_qi(struct intel_iommu *iommu)
767 if (iommu->qi) 809 if (iommu->qi)
768 return 0; 810 return 0;
769 811
770 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); 812 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
771 if (!iommu->qi) 813 if (!iommu->qi)
772 return -ENOMEM; 814 return -ENOMEM;
773 815
774 qi = iommu->qi; 816 qi = iommu->qi;
775 817
776 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); 818 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
777 if (!qi->desc) { 819 if (!qi->desc) {
778 kfree(qi); 820 kfree(qi);
779 iommu->qi = 0; 821 iommu->qi = 0;
780 return -ENOMEM; 822 return -ENOMEM;
781 } 823 }
782 824
783 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); 825 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
784 if (!qi->desc_status) { 826 if (!qi->desc_status) {
785 free_page((unsigned long) qi->desc); 827 free_page((unsigned long) qi->desc);
786 kfree(qi); 828 kfree(qi);
@@ -809,3 +851,254 @@ int dmar_enable_qi(struct intel_iommu *iommu)
809 851
810 return 0; 852 return 0;
811} 853}
854
855/* iommu interrupt handling. Most stuff are MSI-like. */
856
857enum faulttype {
858 DMA_REMAP,
859 INTR_REMAP,
860 UNKNOWN,
861};
862
863static const char *dma_remap_fault_reasons[] =
864{
865 "Software",
866 "Present bit in root entry is clear",
867 "Present bit in context entry is clear",
868 "Invalid context entry",
869 "Access beyond MGAW",
870 "PTE Write access is not set",
871 "PTE Read access is not set",
872 "Next page table ptr is invalid",
873 "Root table address invalid",
874 "Context table ptr is invalid",
875 "non-zero reserved fields in RTP",
876 "non-zero reserved fields in CTP",
877 "non-zero reserved fields in PTE",
878};
879
880static const char *intr_remap_fault_reasons[] =
881{
882 "Detected reserved fields in the decoded interrupt-remapped request",
883 "Interrupt index exceeded the interrupt-remapping table size",
884 "Present field in the IRTE entry is clear",
885 "Error accessing interrupt-remapping table pointed by IRTA_REG",
886 "Detected reserved fields in the IRTE entry",
887 "Blocked a compatibility format interrupt request",
888 "Blocked an interrupt request due to source-id verification failure",
889};
890
891#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
892
893const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
894{
895 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
896 ARRAY_SIZE(intr_remap_fault_reasons))) {
897 *fault_type = INTR_REMAP;
898 return intr_remap_fault_reasons[fault_reason - 0x20];
899 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
900 *fault_type = DMA_REMAP;
901 return dma_remap_fault_reasons[fault_reason];
902 } else {
903 *fault_type = UNKNOWN;
904 return "Unknown";
905 }
906}
907
908void dmar_msi_unmask(unsigned int irq)
909{
910 struct intel_iommu *iommu = get_irq_data(irq);
911 unsigned long flag;
912
913 /* unmask it */
914 spin_lock_irqsave(&iommu->register_lock, flag);
915 writel(0, iommu->reg + DMAR_FECTL_REG);
916 /* Read a reg to force flush the post write */
917 readl(iommu->reg + DMAR_FECTL_REG);
918 spin_unlock_irqrestore(&iommu->register_lock, flag);
919}
920
921void dmar_msi_mask(unsigned int irq)
922{
923 unsigned long flag;
924 struct intel_iommu *iommu = get_irq_data(irq);
925
926 /* mask it */
927 spin_lock_irqsave(&iommu->register_lock, flag);
928 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
929 /* Read a reg to force flush the post write */
930 readl(iommu->reg + DMAR_FECTL_REG);
931 spin_unlock_irqrestore(&iommu->register_lock, flag);
932}
933
934void dmar_msi_write(int irq, struct msi_msg *msg)
935{
936 struct intel_iommu *iommu = get_irq_data(irq);
937 unsigned long flag;
938
939 spin_lock_irqsave(&iommu->register_lock, flag);
940 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
941 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
942 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
943 spin_unlock_irqrestore(&iommu->register_lock, flag);
944}
945
946void dmar_msi_read(int irq, struct msi_msg *msg)
947{
948 struct intel_iommu *iommu = get_irq_data(irq);
949 unsigned long flag;
950
951 spin_lock_irqsave(&iommu->register_lock, flag);
952 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
953 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
954 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
955 spin_unlock_irqrestore(&iommu->register_lock, flag);
956}
957
958static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
959 u8 fault_reason, u16 source_id, unsigned long long addr)
960{
961 const char *reason;
962 int fault_type;
963
964 reason = dmar_get_fault_reason(fault_reason, &fault_type);
965
966 if (fault_type == INTR_REMAP)
967 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
968 "fault index %llx\n"
969 "INTR-REMAP:[fault reason %02d] %s\n",
970 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
971 PCI_FUNC(source_id & 0xFF), addr >> 48,
972 fault_reason, reason);
973 else
974 printk(KERN_ERR
975 "DMAR:[%s] Request device [%02x:%02x.%d] "
976 "fault addr %llx \n"
977 "DMAR:[fault reason %02d] %s\n",
978 (type ? "DMA Read" : "DMA Write"),
979 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
980 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
981 return 0;
982}
983
984#define PRIMARY_FAULT_REG_LEN (16)
985irqreturn_t dmar_fault(int irq, void *dev_id)
986{
987 struct intel_iommu *iommu = dev_id;
988 int reg, fault_index;
989 u32 fault_status;
990 unsigned long flag;
991
992 spin_lock_irqsave(&iommu->register_lock, flag);
993 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
994 if (fault_status)
995 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
996 fault_status);
997
998 /* TBD: ignore advanced fault log currently */
999 if (!(fault_status & DMA_FSTS_PPF))
1000 goto clear_rest;
1001
1002 fault_index = dma_fsts_fault_record_index(fault_status);
1003 reg = cap_fault_reg_offset(iommu->cap);
1004 while (1) {
1005 u8 fault_reason;
1006 u16 source_id;
1007 u64 guest_addr;
1008 int type;
1009 u32 data;
1010
1011 /* highest 32 bits */
1012 data = readl(iommu->reg + reg +
1013 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1014 if (!(data & DMA_FRCD_F))
1015 break;
1016
1017 fault_reason = dma_frcd_fault_reason(data);
1018 type = dma_frcd_type(data);
1019
1020 data = readl(iommu->reg + reg +
1021 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1022 source_id = dma_frcd_source_id(data);
1023
1024 guest_addr = dmar_readq(iommu->reg + reg +
1025 fault_index * PRIMARY_FAULT_REG_LEN);
1026 guest_addr = dma_frcd_page_addr(guest_addr);
1027 /* clear the fault */
1028 writel(DMA_FRCD_F, iommu->reg + reg +
1029 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1030
1031 spin_unlock_irqrestore(&iommu->register_lock, flag);
1032
1033 dmar_fault_do_one(iommu, type, fault_reason,
1034 source_id, guest_addr);
1035
1036 fault_index++;
1037 if (fault_index > cap_num_fault_regs(iommu->cap))
1038 fault_index = 0;
1039 spin_lock_irqsave(&iommu->register_lock, flag);
1040 }
1041clear_rest:
1042 /* clear all the other faults */
1043 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1044 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1045
1046 spin_unlock_irqrestore(&iommu->register_lock, flag);
1047 return IRQ_HANDLED;
1048}
1049
1050int dmar_set_interrupt(struct intel_iommu *iommu)
1051{
1052 int irq, ret;
1053
1054 /*
1055 * Check if the fault interrupt is already initialized.
1056 */
1057 if (iommu->irq)
1058 return 0;
1059
1060 irq = create_irq();
1061 if (!irq) {
1062 printk(KERN_ERR "IOMMU: no free vectors\n");
1063 return -EINVAL;
1064 }
1065
1066 set_irq_data(irq, iommu);
1067 iommu->irq = irq;
1068
1069 ret = arch_setup_dmar_msi(irq);
1070 if (ret) {
1071 set_irq_data(irq, NULL);
1072 iommu->irq = 0;
1073 destroy_irq(irq);
1074 return 0;
1075 }
1076
1077 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1078 if (ret)
1079 printk(KERN_ERR "IOMMU: can't request irq\n");
1080 return ret;
1081}
1082
1083int __init enable_drhd_fault_handling(void)
1084{
1085 struct dmar_drhd_unit *drhd;
1086
1087 /*
1088 * Enable fault control interrupt.
1089 */
1090 for_each_drhd_unit(drhd) {
1091 int ret;
1092 struct intel_iommu *iommu = drhd->iommu;
1093 ret = dmar_set_interrupt(iommu);
1094
1095 if (ret) {
1096 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1097 " interrupt, ret %d\n",
1098 (unsigned long long)drhd->reg_base_addr, ret);
1099 return -1;
1100 }
1101 }
1102
1103 return 0;
1104}
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 1c1141801060..fbc63d5e459f 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,9 +30,8 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/acpi.h>
33#include <linux/pci-acpi.h> 34#include <linux/pci-acpi.h>
34#include <acpi/acpi.h>
35#include <acpi/acpi_bus.h>
36 35
37#define MY_NAME "acpi_pcihp" 36#define MY_NAME "acpi_pcihp"
38 37
@@ -333,19 +332,14 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
333{ 332{
334 acpi_status status = AE_NOT_FOUND; 333 acpi_status status = AE_NOT_FOUND;
335 acpi_handle handle, phandle; 334 acpi_handle handle, phandle;
336 struct pci_bus *pbus = bus; 335 struct pci_bus *pbus;
337 struct pci_dev *pdev; 336
338 337 handle = NULL;
339 do { 338 for (pbus = bus; pbus; pbus = pbus->parent) {
340 pdev = pbus->self; 339 handle = acpi_pci_get_bridge_handle(pbus);
341 if (!pdev) { 340 if (handle)
342 handle = acpi_get_pci_rootbridge_handle(
343 pci_domain_nr(pbus), pbus->number);
344 break; 341 break;
345 } 342 }
346 handle = DEVICE_ACPI_HANDLE(&(pdev->dev));
347 pbus = pbus->parent;
348 } while (!handle);
349 343
350 /* 344 /*
351 * _HPP settings apply to all child buses, until another _HPP is 345 * _HPP settings apply to all child buses, until another _HPP is
@@ -378,12 +372,10 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
378 * 372 *
379 * Attempt to take hotplug control from firmware. 373 * Attempt to take hotplug control from firmware.
380 */ 374 */
381int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) 375int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
382{ 376{
383 acpi_status status; 377 acpi_status status;
384 acpi_handle chandle, handle; 378 acpi_handle chandle, handle;
385 struct pci_dev *pdev = dev;
386 struct pci_bus *parent;
387 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 379 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
388 380
389 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 381 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
@@ -408,33 +400,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
408 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 400 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
409 dbg("Trying to get hotplug control for %s\n", 401 dbg("Trying to get hotplug control for %s\n",
410 (char *)string.pointer); 402 (char *)string.pointer);
411 status = pci_osc_control_set(handle, flags); 403 status = acpi_pci_osc_control_set(handle, flags);
412 if (ACPI_SUCCESS(status)) 404 if (ACPI_SUCCESS(status))
413 goto got_one; 405 goto got_one;
414 kfree(string.pointer); 406 kfree(string.pointer);
415 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 407 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
416 } 408 }
417 409
418 pdev = dev; 410 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
419 handle = DEVICE_ACPI_HANDLE(&dev->dev); 411 if (!handle) {
420 while (!handle) {
421 /* 412 /*
422 * This hotplug controller was not listed in the ACPI name 413 * This hotplug controller was not listed in the ACPI name
423 * space at all. Try to get acpi handle of parent pci bus. 414 * space at all. Try to get acpi handle of parent pci bus.
424 */ 415 */
425 if (!pdev || !pdev->bus->parent) 416 struct pci_bus *pbus;
426 break; 417 for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
427 parent = pdev->bus->parent; 418 handle = acpi_pci_get_bridge_handle(pbus);
428 dbg("Could not find %s in acpi namespace, trying parent\n", 419 if (handle)
429 pci_name(pdev)); 420 break;
430 if (!parent->self) 421 }
431 /* Parent must be a host bridge */
432 handle = acpi_get_pci_rootbridge_handle(
433 pci_domain_nr(parent),
434 parent->number);
435 else
436 handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
437 pdev = parent->self;
438 } 422 }
439 423
440 while (handle) { 424 while (handle) {
@@ -453,13 +437,13 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
453 } 437 }
454 438
455 dbg("Cannot get control of hotplug hardware for pci %s\n", 439 dbg("Cannot get control of hotplug hardware for pci %s\n",
456 pci_name(dev)); 440 pci_name(pdev));
457 441
458 kfree(string.pointer); 442 kfree(string.pointer);
459 return -ENODEV; 443 return -ENODEV;
460got_one: 444got_one:
461 dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), 445 dbg("Gained control for hotplug HW for pci %s (%s)\n",
462 (char *)string.pointer); 446 pci_name(pdev), (char *)string.pointer);
463 kfree(string.pointer); 447 kfree(string.pointer);
464 return 0; 448 return 0;
465} 449}
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index a13abf55d784..8450f4a6568a 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -225,7 +225,8 @@ void cpqhp_shutdown_debugfs(void)
225 225
226void cpqhp_create_debugfs_files(struct controller *ctrl) 226void cpqhp_create_debugfs_files(struct controller *ctrl)
227{ 227{
228 ctrl->dentry = debugfs_create_file(ctrl->pci_dev->dev.bus_id, S_IRUGO, root, ctrl, &debug_ops); 228 ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev),
229 S_IRUGO, root, ctrl, &debug_ops);
229} 230}
230 231
231void cpqhp_remove_debugfs_files(struct controller *ctrl) 232void cpqhp_remove_debugfs_files(struct controller *ctrl)
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index d8649e127298..6151389fd903 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -1,395 +1,163 @@
1/* 1/* Works like the fakephp driver used to, except a little better.
2 * Fake PCI Hot Plug Controller Driver
3 * 2 *
4 * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> 3 * - It's possible to remove devices with subordinate busses.
5 * Copyright (C) 2003 IBM Corp. 4 * - New PCI devices that appear via any method, not just a fakephp triggered
6 * Copyright (C) 2003 Rolf Eike Beer <eike-kernel@sf-tec.de> 5 * rescan, will be noticed.
6 * - Devices that are removed via any method, not just a fakephp triggered
7 * removal, will also be noticed.
7 * 8 *
8 * Based on ideas and code from: 9 * Uses nothing from the pci-hotplug subsystem.
9 * Vladimir Kondratiev <vladimir.kondratiev@intel.com>
10 * Rolf Eike Beer <eike-kernel@sf-tec.de>
11 * 10 *
12 * All rights reserved.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation, version 2 of the License.
17 *
18 * Send feedback to <greg@kroah.com>
19 */ 11 */
20 12
21/*
22 *
23 * This driver will "emulate" removing PCI devices from the system. If
24 * the "power" file is written to with "0" then the specified PCI device
25 * will be completely removed from the kernel.
26 *
27 * WARNING, this does NOT turn off the power to the PCI device. This is
28 * a "logical" removal, not a physical or electrical removal.
29 *
30 * Use this module at your own risk, you have been warned!
31 *
32 * Enabling PCI devices is left as an exercise for the reader...
33 *
34 */
35#include <linux/kernel.h>
36#include <linux/module.h> 13#include <linux/module.h>
37#include <linux/pci.h> 14#include <linux/kernel.h>
38#include <linux/pci_hotplug.h> 15#include <linux/types.h>
16#include <linux/list.h>
17#include <linux/kobject.h>
18#include <linux/sysfs.h>
39#include <linux/init.h> 19#include <linux/init.h>
40#include <linux/string.h> 20#include <linux/pci.h>
41#include <linux/slab.h> 21#include <linux/device.h>
42#include <linux/workqueue.h>
43#include "../pci.h" 22#include "../pci.h"
44 23
45#if !defined(MODULE) 24struct legacy_slot {
46 #define MY_NAME "fakephp" 25 struct kobject kobj;
47#else 26 struct pci_dev *dev;
48 #define MY_NAME THIS_MODULE->name 27 struct list_head list;
49#endif
50
51#define dbg(format, arg...) \
52 do { \
53 if (debug) \
54 printk(KERN_DEBUG "%s: " format, \
55 MY_NAME , ## arg); \
56 } while (0)
57#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
58#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
59
60#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
61#define DRIVER_DESC "Fake PCI Hot Plug Controller Driver"
62
63struct dummy_slot {
64 struct list_head node;
65 struct hotplug_slot *slot;
66 struct pci_dev *dev;
67 struct work_struct remove_work;
68 unsigned long removed;
69}; 28};
70 29
71static int debug; 30static LIST_HEAD(legacy_list);
72static int dup_slots;
73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq;
75
76static void pci_rescan_worker(struct work_struct *work);
77static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
78
79static int enable_slot (struct hotplug_slot *slot);
80static int disable_slot (struct hotplug_slot *slot);
81 31
82static struct hotplug_slot_ops dummy_hotplug_slot_ops = { 32static ssize_t legacy_show(struct kobject *kobj, struct attribute *attr,
83 .owner = THIS_MODULE, 33 char *buf)
84 .enable_slot = enable_slot,
85 .disable_slot = disable_slot,
86};
87
88static void dummy_release(struct hotplug_slot *slot)
89{ 34{
90 struct dummy_slot *dslot = slot->private; 35 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
91 36 strcpy(buf, "1\n");
92 list_del(&dslot->node); 37 return 2;
93 kfree(dslot->slot->info);
94 kfree(dslot->slot);
95 pci_dev_put(dslot->dev);
96 kfree(dslot);
97} 38}
98 39
99#define SLOT_NAME_SIZE 8 40static void remove_callback(void *data)
100
101static int add_slot(struct pci_dev *dev)
102{ 41{
103 struct dummy_slot *dslot; 42 pci_remove_bus_device((struct pci_dev *)data);
104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
106 int retval = -ENOMEM;
107 static int count = 1;
108
109 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
110 if (!slot)
111 goto error;
112
113 slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
114 if (!slot->info)
115 goto error_slot;
116
117 slot->info->power_status = 1;
118 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
119 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
120
121 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
122 if (!dslot)
123 goto error_info;
124
125 if (dup_slots)
126 snprintf(name, SLOT_NAME_SIZE, "fake");
127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
130 slot->ops = &dummy_hotplug_slot_ops;
131 slot->release = &dummy_release;
132 slot->private = dslot;
133
134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
135 if (retval) {
136 err("pci_hp_register failed with error %d\n", retval);
137 goto error_dslot;
138 }
139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
141 dslot->slot = slot;
142 dslot->dev = pci_dev_get(dev);
143 list_add (&dslot->node, &slot_list);
144 return retval;
145
146error_dslot:
147 kfree(dslot);
148error_info:
149 kfree(slot->info);
150error_slot:
151 kfree(slot);
152error:
153 return retval;
154} 43}
155 44
156static int __init pci_scan_buses(void) 45static ssize_t legacy_store(struct kobject *kobj, struct attribute *attr,
46 const char *buf, size_t len)
157{ 47{
158 struct pci_dev *dev = NULL; 48 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
159 int lastslot = 0; 49 unsigned long val;
160 50
161 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 51 if (strict_strtoul(buf, 0, &val) < 0)
162 if (PCI_FUNC(dev->devfn) > 0 && 52 return -EINVAL;
163 lastslot == PCI_SLOT(dev->devfn))
164 continue;
165 lastslot = PCI_SLOT(dev->devfn);
166 add_slot(dev);
167 }
168 53
169 return 0; 54 if (val)
55 pci_rescan_bus(slot->dev->bus);
56 else
57 sysfs_schedule_callback(&slot->dev->dev.kobj, remove_callback,
58 slot->dev, THIS_MODULE);
59 return len;
170} 60}
171 61
172static void remove_slot(struct dummy_slot *dslot) 62static struct attribute *legacy_attrs[] = {
173{ 63 &(struct attribute){ .name = "power", .mode = 0644 },
174 int retval; 64 NULL,
175 65};
176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
177 retval = pci_hp_deregister(dslot->slot);
178 if (retval)
179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
181}
182 66
183/* called from the single-threaded workqueue handler to remove a slot */ 67static void legacy_release(struct kobject *kobj)
184static void remove_slot_worker(struct work_struct *work)
185{ 68{
186 struct dummy_slot *dslot = 69 struct legacy_slot *slot = container_of(kobj, typeof(*slot), kobj);
187 container_of(work, struct dummy_slot, remove_work);
188 remove_slot(dslot);
189}
190 70
191/** 71 pci_dev_put(slot->dev);
192 * pci_rescan_slot - Rescan slot 72 kfree(slot);
193 * @temp: Device template. Should be set: bus and devfn.
194 *
195 * Tries hard not to re-enable already existing devices;
196 * also handles scanning of subfunctions.
197 */
198static int pci_rescan_slot(struct pci_dev *temp)
199{
200 struct pci_bus *bus = temp->bus;
201 struct pci_dev *dev;
202 int func;
203 u8 hdr_type;
204 int count = 0;
205
206 if (!pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type)) {
207 temp->hdr_type = hdr_type & 0x7f;
208 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
209 pci_dev_put(dev);
210 else {
211 dev = pci_scan_single_device(bus, temp->devfn);
212 if (dev) {
213 dbg("New device on %s function %x:%x\n",
214 bus->name, temp->devfn >> 3,
215 temp->devfn & 7);
216 count++;
217 }
218 }
219 /* multifunction device? */
220 if (!(hdr_type & 0x80))
221 return count;
222
223 /* continue scanning for other functions */
224 for (func = 1, temp->devfn++; func < 8; func++, temp->devfn++) {
225 if (pci_read_config_byte(temp, PCI_HEADER_TYPE, &hdr_type))
226 continue;
227 temp->hdr_type = hdr_type & 0x7f;
228
229 if ((dev = pci_get_slot(bus, temp->devfn)) != NULL)
230 pci_dev_put(dev);
231 else {
232 dev = pci_scan_single_device(bus, temp->devfn);
233 if (dev) {
234 dbg("New device on %s function %x:%x\n",
235 bus->name, temp->devfn >> 3,
236 temp->devfn & 7);
237 count++;
238 }
239 }
240 }
241 }
242
243 return count;
244} 73}
245 74
75static struct kobj_type legacy_ktype = {
76 .sysfs_ops = &(struct sysfs_ops){
77 .store = legacy_store, .show = legacy_show
78 },
79 .release = &legacy_release,
80 .default_attrs = legacy_attrs,
81};
246 82
247/** 83static int legacy_add_slot(struct pci_dev *pdev)
248 * pci_rescan_bus - Rescan PCI bus
249 * @bus: the PCI bus to rescan
250 *
251 * Call pci_rescan_slot for each possible function of the bus.
252 */
253static void pci_rescan_bus(const struct pci_bus *bus)
254{ 84{
255 unsigned int devfn; 85 struct legacy_slot *slot = kzalloc(sizeof(*slot), GFP_KERNEL);
256 struct pci_dev *dev;
257 int retval;
258 int found = 0;
259 dev = alloc_pci_dev();
260 if (!dev)
261 return;
262 86
263 dev->bus = (struct pci_bus*)bus; 87 if (!slot)
264 dev->sysdata = bus->sysdata; 88 return -ENOMEM;
265 for (devfn = 0; devfn < 0x100; devfn += 8) {
266 dev->devfn = devfn;
267 found += pci_rescan_slot(dev);
268 }
269
270 if (found) {
271 pci_bus_assign_resources(bus);
272 list_for_each_entry(dev, &bus->devices, bus_list) {
273 /* Skip already-added devices */
274 if (dev->is_added)
275 continue;
276 retval = pci_bus_add_device(dev);
277 if (retval)
278 dev_err(&dev->dev,
279 "Error adding device, continuing\n");
280 else
281 add_slot(dev);
282 }
283 pci_bus_add_devices(bus);
284 }
285 kfree(dev);
286}
287 89
288/* recursively scan all buses */ 90 if (kobject_init_and_add(&slot->kobj, &legacy_ktype,
289static void pci_rescan_buses(const struct list_head *list) 91 &pci_slots_kset->kobj, "%s",
290{ 92 dev_name(&pdev->dev))) {
291 const struct list_head *l; 93 dev_warn(&pdev->dev, "Failed to created legacy fake slot\n");
292 list_for_each(l,list) { 94 return -EINVAL;
293 const struct pci_bus *b = pci_bus_b(l);
294 pci_rescan_bus(b);
295 pci_rescan_buses(&b->children);
296 } 95 }
297} 96 slot->dev = pci_dev_get(pdev);
298 97
299/* initiate rescan of all pci buses */ 98 list_add(&slot->list, &legacy_list);
300static inline void pci_rescan(void) {
301 pci_rescan_buses(&pci_root_buses);
302}
303
304/* called from the single-threaded workqueue handler to rescan all pci buses */
305static void pci_rescan_worker(struct work_struct *work)
306{
307 pci_rescan();
308}
309 99
310static int enable_slot(struct hotplug_slot *hotplug_slot)
311{
312 /* mis-use enable_slot for rescanning of the pci bus */
313 cancel_work_sync(&pci_rescan_work);
314 queue_work(dummyphp_wq, &pci_rescan_work);
315 return 0; 100 return 0;
316} 101}
317 102
318static int disable_slot(struct hotplug_slot *slot) 103static int legacy_notify(struct notifier_block *nb,
104 unsigned long action, void *data)
319{ 105{
320 struct dummy_slot *dslot; 106 struct pci_dev *pdev = to_pci_dev(data);
321 struct pci_dev *dev;
322 int func;
323
324 if (!slot)
325 return -ENODEV;
326 dslot = slot->private;
327
328 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
329 107
330 for (func = 7; func >= 0; func--) { 108 if (action == BUS_NOTIFY_ADD_DEVICE) {
331 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 109 legacy_add_slot(pdev);
332 if (!dev) 110 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
333 continue; 111 struct legacy_slot *slot;
334 112
335 if (test_and_set_bit(0, &dslot->removed)) { 113 list_for_each_entry(slot, &legacy_list, list)
336 dbg("Slot already scheduled for removal\n"); 114 if (slot->dev == pdev)
337 pci_dev_put(dev); 115 goto found;
338 return -ENODEV;
339 }
340 116
341 /* remove the device from the pci core */ 117 dev_warn(&pdev->dev, "Missing legacy fake slot?");
342 pci_remove_bus_device(dev); 118 return -ENODEV;
343 119found:
344 /* queue work item to blow away this sysfs entry and other 120 kobject_del(&slot->kobj);
345 * parts. 121 list_del(&slot->list);
346 */ 122 kobject_put(&slot->kobj);
347 INIT_WORK(&dslot->remove_work, remove_slot_worker);
348 queue_work(dummyphp_wq, &dslot->remove_work);
349
350 pci_dev_put(dev);
351 } 123 }
124
352 return 0; 125 return 0;
353} 126}
354 127
355static void cleanup_slots (void) 128static struct notifier_block legacy_notifier = {
356{ 129 .notifier_call = legacy_notify
357 struct list_head *tmp; 130};
358 struct list_head *next;
359 struct dummy_slot *dslot;
360
361 destroy_workqueue(dummyphp_wq);
362 list_for_each_safe (tmp, next, &slot_list) {
363 dslot = list_entry (tmp, struct dummy_slot, node);
364 remove_slot(dslot);
365 }
366
367}
368 131
369static int __init dummyphp_init(void) 132static int __init init_legacy(void)
370{ 133{
371 info(DRIVER_DESC "\n"); 134 struct pci_dev *pdev = NULL;
372 135
373 dummyphp_wq = create_singlethread_workqueue(MY_NAME); 136 /* Add existing devices */
374 if (!dummyphp_wq) 137 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)))
375 return -ENOMEM; 138 legacy_add_slot(pdev);
376 139
377 return pci_scan_buses(); 140 /* Be alerted of any new ones */
141 bus_register_notifier(&pci_bus_type, &legacy_notifier);
142 return 0;
378} 143}
144module_init(init_legacy);
379 145
380 146static void __exit remove_legacy(void)
381static void __exit dummyphp_exit(void)
382{ 147{
383 cleanup_slots(); 148 struct legacy_slot *slot, *tmp;
149
150 bus_unregister_notifier(&pci_bus_type, &legacy_notifier);
151
152 list_for_each_entry_safe(slot, tmp, &legacy_list, list) {
153 list_del(&slot->list);
154 kobject_del(&slot->kobj);
155 kobject_put(&slot->kobj);
156 }
384} 157}
158module_exit(remove_legacy);
385 159
386module_init(dummyphp_init);
387module_exit(dummyphp_exit);
388 160
389MODULE_AUTHOR(DRIVER_AUTHOR); 161MODULE_AUTHOR("Trent Piepho <xyzzy@speakeasy.org>");
390MODULE_DESCRIPTION(DRIVER_DESC); 162MODULE_DESCRIPTION("Legacy version of the fakephp interface");
391MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
392module_param(debug, bool, S_IRUGO | S_IWUSR);
393MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
394module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
395MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 39ae37589fda..0a368547e633 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,10 +46,10 @@ extern int pciehp_force;
46extern struct workqueue_struct *pciehp_wq; 46extern struct workqueue_struct *pciehp_wq;
47 47
48#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
49 do { \ 49do { \
50 if (pciehp_debug) \ 50 if (pciehp_debug) \
51 printk("%s: " format, MY_NAME , ## arg); \ 51 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
52 } while (0) 52} while (0)
53#define err(format, arg...) \ 53#define err(format, arg...) \
54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 54 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
55#define info(format, arg...) \ 55#define info(format, arg...) \
@@ -60,7 +60,7 @@ extern struct workqueue_struct *pciehp_wq;
60#define ctrl_dbg(ctrl, format, arg...) \ 60#define ctrl_dbg(ctrl, format, arg...) \
61 do { \ 61 do { \
62 if (pciehp_debug) \ 62 if (pciehp_debug) \
63 dev_printk(, &ctrl->pcie->device, \ 63 dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
64 format, ## arg); \ 64 format, ## arg); \
65 } while (0) 65 } while (0)
66#define ctrl_err(ctrl, format, arg...) \ 66#define ctrl_err(ctrl, format, arg...) \
@@ -108,10 +108,11 @@ struct controller {
108 u32 slot_cap; 108 u32 slot_cap;
109 u8 cap_base; 109 u8 cap_base;
110 struct timer_list poll_timer; 110 struct timer_list poll_timer;
111 int cmd_busy; 111 unsigned int cmd_busy:1;
112 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1; 113 unsigned int link_active_reporting:1;
114 unsigned int notification_enabled:1; 114 unsigned int notification_enabled:1;
115 unsigned int power_fault_detected;
115}; 116};
116 117
117#define INT_BUTTON_IGNORE 0 118#define INT_BUTTON_IGNORE 0
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 438d795f9fe3..96048010e7d9 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -67,37 +67,27 @@ static int __init parse_detect_mode(void)
67 return PCIEHP_DETECT_DEFAULT; 67 return PCIEHP_DETECT_DEFAULT;
68} 68}
69 69
70static struct pcie_port_service_id __initdata port_pci_ids[] = {
71 {
72 .vendor = PCI_ANY_ID,
73 .device = PCI_ANY_ID,
74 .port_type = PCIE_ANY_PORT,
75 .service_type = PCIE_PORT_SERVICE_HP,
76 .driver_data = 0,
77 }, { /* end: all zeroes */ }
78};
79
80static int __initdata dup_slot_id; 70static int __initdata dup_slot_id;
81static int __initdata acpi_slot_detected; 71static int __initdata acpi_slot_detected;
82static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); 72static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
83 73
84/* Dummy driver for dumplicate name detection */ 74/* Dummy driver for dumplicate name detection */
85static int __init dummy_probe(struct pcie_device *dev, 75static int __init dummy_probe(struct pcie_device *dev)
86 const struct pcie_port_service_id *id)
87{ 76{
88 int pos; 77 int pos;
89 u32 slot_cap; 78 u32 slot_cap;
90 struct slot *slot, *tmp; 79 struct slot *slot, *tmp;
91 struct pci_dev *pdev = dev->port; 80 struct pci_dev *pdev = dev->port;
92 struct pci_bus *pbus = pdev->subordinate; 81 struct pci_bus *pbus = pdev->subordinate;
93 if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
94 return -ENOMEM;
95 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 82 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
96 if (pciehp_get_hp_hw_control_from_firmware(pdev)) 83 if (pciehp_get_hp_hw_control_from_firmware(pdev))
97 return -ENODEV; 84 return -ENODEV;
98 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) 85 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
99 return -ENODEV; 86 return -ENODEV;
100 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 87 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
88 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
89 if (!slot)
90 return -ENOMEM;
101 slot->number = slot_cap >> 19; 91 slot->number = slot_cap >> 19;
102 list_for_each_entry(tmp, &dummy_slots, slot_list) { 92 list_for_each_entry(tmp, &dummy_slots, slot_list) {
103 if (tmp->number == slot->number) 93 if (tmp->number == slot->number)
@@ -111,7 +101,8 @@ static int __init dummy_probe(struct pcie_device *dev,
111 101
112static struct pcie_port_service_driver __initdata dummy_driver = { 102static struct pcie_port_service_driver __initdata dummy_driver = {
113 .name = "pciehp_dummy", 103 .name = "pciehp_dummy",
114 .id_table = port_pci_ids, 104 .port_type = PCIE_ANY_PORT,
105 .service = PCIE_PORT_SERVICE_HP,
115 .probe = dummy_probe, 106 .probe = dummy_probe,
116}; 107};
117 108
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 681e3912b821..fb254b2454de 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -401,7 +401,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
401 return 0; 401 return 0;
402} 402}
403 403
404static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_id *id) 404static int pciehp_probe(struct pcie_device *dev)
405{ 405{
406 int rc; 406 int rc;
407 struct controller *ctrl; 407 struct controller *ctrl;
@@ -475,7 +475,7 @@ static void pciehp_remove (struct pcie_device *dev)
475} 475}
476 476
477#ifdef CONFIG_PM 477#ifdef CONFIG_PM
478static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) 478static int pciehp_suspend (struct pcie_device *dev)
479{ 479{
480 dev_info(&dev->device, "%s ENTRY\n", __func__); 480 dev_info(&dev->device, "%s ENTRY\n", __func__);
481 return 0; 481 return 0;
@@ -503,20 +503,12 @@ static int pciehp_resume (struct pcie_device *dev)
503 } 503 }
504 return 0; 504 return 0;
505} 505}
506#endif 506#endif /* PM */
507
508static struct pcie_port_service_id port_pci_ids[] = { {
509 .vendor = PCI_ANY_ID,
510 .device = PCI_ANY_ID,
511 .port_type = PCIE_ANY_PORT,
512 .service_type = PCIE_PORT_SERVICE_HP,
513 .driver_data = 0,
514 }, { /* end: all zeroes */ }
515};
516 507
517static struct pcie_port_service_driver hpdriver_portdrv = { 508static struct pcie_port_service_driver hpdriver_portdrv = {
518 .name = PCIE_MODULE_NAME, 509 .name = PCIE_MODULE_NAME,
519 .id_table = &port_pci_ids[0], 510 .port_type = PCIE_ANY_PORT,
511 .service = PCIE_PORT_SERVICE_HP,
520 512
521 .probe = pciehp_probe, 513 .probe = pciehp_probe,
522 .remove = pciehp_remove, 514 .remove = pciehp_remove,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7a16c6897bb9..07bd32151146 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -548,23 +548,21 @@ static int hpc_power_on_slot(struct slot * slot)
548 548
549 slot_cmd = POWER_ON; 549 slot_cmd = POWER_ON;
550 cmd_mask = PCI_EXP_SLTCTL_PCC; 550 cmd_mask = PCI_EXP_SLTCTL_PCC;
551 /* Enable detection that we turned off at slot power-off time */
552 if (!pciehp_poll_mode) { 551 if (!pciehp_poll_mode) {
553 slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 552 /* Enable power fault detection turned off at power off time */
554 PCI_EXP_SLTCTL_PDCE); 553 slot_cmd |= PCI_EXP_SLTCTL_PFDE;
555 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 554 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
556 PCI_EXP_SLTCTL_PDCE);
557 } 555 }
558 556
559 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 557 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
560
561 if (retval) { 558 if (retval) {
562 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); 559 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
563 return -1; 560 return retval;
564 } 561 }
565 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 562 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
566 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 563 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
567 564
565 ctrl->power_fault_detected = 0;
568 return retval; 566 return retval;
569} 567}
570 568
@@ -621,18 +619,10 @@ static int hpc_power_off_slot(struct slot * slot)
621 619
622 slot_cmd = POWER_OFF; 620 slot_cmd = POWER_OFF;
623 cmd_mask = PCI_EXP_SLTCTL_PCC; 621 cmd_mask = PCI_EXP_SLTCTL_PCC;
624 /*
625 * If we get MRL or presence detect interrupts now, the isr
626 * will notice the sticky power-fault bit too and issue power
627 * indicator change commands. This will lead to an endless loop
628 * of command completions, since the power-fault bit remains on
629 * till the slot is powered on again.
630 */
631 if (!pciehp_poll_mode) { 622 if (!pciehp_poll_mode) {
632 slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 623 /* Disable power fault detection */
633 PCI_EXP_SLTCTL_PDCE); 624 slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
634 cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 625 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
635 PCI_EXP_SLTCTL_PDCE);
636 } 626 }
637 627
638 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 628 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -672,10 +662,11 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
672 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 662 detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
673 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 663 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
674 PCI_EXP_SLTSTA_CC); 664 PCI_EXP_SLTSTA_CC);
665 detected &= ~intr_loc;
675 intr_loc |= detected; 666 intr_loc |= detected;
676 if (!intr_loc) 667 if (!intr_loc)
677 return IRQ_NONE; 668 return IRQ_NONE;
678 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { 669 if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, intr_loc)) {
679 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 670 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
680 __func__); 671 __func__);
681 return IRQ_NONE; 672 return IRQ_NONE;
@@ -709,9 +700,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
709 pciehp_handle_presence_change(p_slot); 700 pciehp_handle_presence_change(p_slot);
710 701
711 /* Check Power Fault Detected */ 702 /* Check Power Fault Detected */
712 if (intr_loc & PCI_EXP_SLTSTA_PFD) 703 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
704 ctrl->power_fault_detected = 1;
713 pciehp_handle_power_fault(p_slot); 705 pciehp_handle_power_fault(p_slot);
714 706 }
715 return IRQ_HANDLED; 707 return IRQ_HANDLED;
716} 708}
717 709
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 6aba0b6cf2e0..974e924ca96d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -48,10 +48,10 @@ extern int shpchp_debug;
48extern struct workqueue_struct *shpchp_wq; 48extern struct workqueue_struct *shpchp_wq;
49 49
50#define dbg(format, arg...) \ 50#define dbg(format, arg...) \
51 do { \ 51do { \
52 if (shpchp_debug) \ 52 if (shpchp_debug) \
53 printk("%s: " format, MY_NAME , ## arg); \ 53 printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
54 } while (0) 54} while (0)
55#define err(format, arg...) \ 55#define err(format, arg...) \
56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 56 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
57#define info(format, arg...) \ 57#define info(format, arg...) \
@@ -62,7 +62,7 @@ extern struct workqueue_struct *shpchp_wq;
62#define ctrl_dbg(ctrl, format, arg...) \ 62#define ctrl_dbg(ctrl, format, arg...) \
63 do { \ 63 do { \
64 if (shpchp_debug) \ 64 if (shpchp_debug) \
65 dev_printk(, &ctrl->pci_dev->dev, \ 65 dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \
66 format, ## arg); \ 66 format, ## arg); \
67 } while (0) 67 } while (0)
68#define ctrl_err(ctrl, format, arg...) \ 68#define ctrl_err(ctrl, format, arg...) \
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 138f161becc0..aa315e52529b 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -137,7 +137,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
137 busnr)) 137 busnr))
138 break; 138 break;
139 } 139 }
140 if (busnr >= end) { 140 if (busnr > end) {
141 ctrl_err(ctrl, 141 ctrl_err(ctrl,
142 "No free bus for hot-added bridge\n"); 142 "No free bus for hot-added bridge\n");
143 pci_dev_put(dev); 143 pci_dev_put(dev);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f3f686581a90..23e56a564e05 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context)
164 * 1: writable 164 * 1: writable
165 * 2-6: reserved 165 * 2-6: reserved
166 * 7: super page 166 * 7: super page
167 * 8-11: available 167 * 8-10: available
168 * 11: snoop behavior
168 * 12-63: Host physcial address 169 * 12-63: Host physcial address
169 */ 170 */
170struct dma_pte { 171struct dma_pte {
@@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte)
186 pte->val |= DMA_PTE_WRITE; 187 pte->val |= DMA_PTE_WRITE;
187} 188}
188 189
190static inline void dma_set_pte_snp(struct dma_pte *pte)
191{
192 pte->val |= DMA_PTE_SNP;
193}
194
189static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) 195static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
190{ 196{
191 pte->val = (pte->val & ~3) | (prot & 3); 197 pte->val = (pte->val & ~3) | (prot & 3);
@@ -231,6 +237,7 @@ struct dmar_domain {
231 int flags; /* flags to find out type of domain */ 237 int flags; /* flags to find out type of domain */
232 238
233 int iommu_coherency;/* indicate coherency of iommu access */ 239 int iommu_coherency;/* indicate coherency of iommu access */
240 int iommu_snooping; /* indicate snooping control feature*/
234 int iommu_count; /* reference count of iommu */ 241 int iommu_count; /* reference count of iommu */
235 spinlock_t iommu_lock; /* protect iommu set in domain */ 242 spinlock_t iommu_lock; /* protect iommu set in domain */
236 u64 max_addr; /* maximum mapped address */ 243 u64 max_addr; /* maximum mapped address */
@@ -421,7 +428,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
421 return g_iommus[iommu_id]; 428 return g_iommus[iommu_id];
422} 429}
423 430
424/* "Coherency" capability may be different across iommus */
425static void domain_update_iommu_coherency(struct dmar_domain *domain) 431static void domain_update_iommu_coherency(struct dmar_domain *domain)
426{ 432{
427 int i; 433 int i;
@@ -438,6 +444,29 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
438 } 444 }
439} 445}
440 446
447static void domain_update_iommu_snooping(struct dmar_domain *domain)
448{
449 int i;
450
451 domain->iommu_snooping = 1;
452
453 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
454 for (; i < g_num_of_iommus; ) {
455 if (!ecap_sc_support(g_iommus[i]->ecap)) {
456 domain->iommu_snooping = 0;
457 break;
458 }
459 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
460 }
461}
462
463/* Some capabilities may be different across iommus */
464static void domain_update_iommu_cap(struct dmar_domain *domain)
465{
466 domain_update_iommu_coherency(domain);
467 domain_update_iommu_snooping(domain);
468}
469
441static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) 470static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
442{ 471{
443 struct dmar_drhd_unit *drhd = NULL; 472 struct dmar_drhd_unit *drhd = NULL;
@@ -689,15 +718,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
689static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) 718static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
690{ 719{
691 int addr_width = agaw_to_width(domain->agaw); 720 int addr_width = agaw_to_width(domain->agaw);
721 int npages;
692 722
693 start &= (((u64)1) << addr_width) - 1; 723 start &= (((u64)1) << addr_width) - 1;
694 end &= (((u64)1) << addr_width) - 1; 724 end &= (((u64)1) << addr_width) - 1;
695 /* in case it's partial page */ 725 /* in case it's partial page */
696 start = PAGE_ALIGN(start); 726 start = PAGE_ALIGN(start);
697 end &= PAGE_MASK; 727 end &= PAGE_MASK;
728 npages = (end - start) / VTD_PAGE_SIZE;
698 729
699 /* we don't need lock here, nobody else touches the iova range */ 730 /* we don't need lock here, nobody else touches the iova range */
700 while (start < end) { 731 while (npages--) {
701 dma_pte_clear_one(domain, start); 732 dma_pte_clear_one(domain, start);
702 start += VTD_PAGE_SIZE; 733 start += VTD_PAGE_SIZE;
703 } 734 }
@@ -1004,194 +1035,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1004 return 0; 1035 return 0;
1005} 1036}
1006 1037
1007/* iommu interrupt handling. Most stuff are MSI-like. */
1008
1009static const char *fault_reason_strings[] =
1010{
1011 "Software",
1012 "Present bit in root entry is clear",
1013 "Present bit in context entry is clear",
1014 "Invalid context entry",
1015 "Access beyond MGAW",
1016 "PTE Write access is not set",
1017 "PTE Read access is not set",
1018 "Next page table ptr is invalid",
1019 "Root table address invalid",
1020 "Context table ptr is invalid",
1021 "non-zero reserved fields in RTP",
1022 "non-zero reserved fields in CTP",
1023 "non-zero reserved fields in PTE",
1024};
1025#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1026
1027const char *dmar_get_fault_reason(u8 fault_reason)
1028{
1029 if (fault_reason > MAX_FAULT_REASON_IDX)
1030 return "Unknown";
1031 else
1032 return fault_reason_strings[fault_reason];
1033}
1034
1035void dmar_msi_unmask(unsigned int irq)
1036{
1037 struct intel_iommu *iommu = get_irq_data(irq);
1038 unsigned long flag;
1039
1040 /* unmask it */
1041 spin_lock_irqsave(&iommu->register_lock, flag);
1042 writel(0, iommu->reg + DMAR_FECTL_REG);
1043 /* Read a reg to force flush the post write */
1044 readl(iommu->reg + DMAR_FECTL_REG);
1045 spin_unlock_irqrestore(&iommu->register_lock, flag);
1046}
1047
1048void dmar_msi_mask(unsigned int irq)
1049{
1050 unsigned long flag;
1051 struct intel_iommu *iommu = get_irq_data(irq);
1052
1053 /* mask it */
1054 spin_lock_irqsave(&iommu->register_lock, flag);
1055 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1056 /* Read a reg to force flush the post write */
1057 readl(iommu->reg + DMAR_FECTL_REG);
1058 spin_unlock_irqrestore(&iommu->register_lock, flag);
1059}
1060
1061void dmar_msi_write(int irq, struct msi_msg *msg)
1062{
1063 struct intel_iommu *iommu = get_irq_data(irq);
1064 unsigned long flag;
1065
1066 spin_lock_irqsave(&iommu->register_lock, flag);
1067 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1068 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1069 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1070 spin_unlock_irqrestore(&iommu->register_lock, flag);
1071}
1072
1073void dmar_msi_read(int irq, struct msi_msg *msg)
1074{
1075 struct intel_iommu *iommu = get_irq_data(irq);
1076 unsigned long flag;
1077
1078 spin_lock_irqsave(&iommu->register_lock, flag);
1079 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1080 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1081 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1082 spin_unlock_irqrestore(&iommu->register_lock, flag);
1083}
1084
1085static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
1086 u8 fault_reason, u16 source_id, unsigned long long addr)
1087{
1088 const char *reason;
1089
1090 reason = dmar_get_fault_reason(fault_reason);
1091
1092 printk(KERN_ERR
1093 "DMAR:[%s] Request device [%02x:%02x.%d] "
1094 "fault addr %llx \n"
1095 "DMAR:[fault reason %02d] %s\n",
1096 (type ? "DMA Read" : "DMA Write"),
1097 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1098 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1099 return 0;
1100}
1101
1102#define PRIMARY_FAULT_REG_LEN (16)
1103static irqreturn_t iommu_page_fault(int irq, void *dev_id)
1104{
1105 struct intel_iommu *iommu = dev_id;
1106 int reg, fault_index;
1107 u32 fault_status;
1108 unsigned long flag;
1109
1110 spin_lock_irqsave(&iommu->register_lock, flag);
1111 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1112
1113 /* TBD: ignore advanced fault log currently */
1114 if (!(fault_status & DMA_FSTS_PPF))
1115 goto clear_overflow;
1116
1117 fault_index = dma_fsts_fault_record_index(fault_status);
1118 reg = cap_fault_reg_offset(iommu->cap);
1119 while (1) {
1120 u8 fault_reason;
1121 u16 source_id;
1122 u64 guest_addr;
1123 int type;
1124 u32 data;
1125
1126 /* highest 32 bits */
1127 data = readl(iommu->reg + reg +
1128 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1129 if (!(data & DMA_FRCD_F))
1130 break;
1131
1132 fault_reason = dma_frcd_fault_reason(data);
1133 type = dma_frcd_type(data);
1134
1135 data = readl(iommu->reg + reg +
1136 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1137 source_id = dma_frcd_source_id(data);
1138
1139 guest_addr = dmar_readq(iommu->reg + reg +
1140 fault_index * PRIMARY_FAULT_REG_LEN);
1141 guest_addr = dma_frcd_page_addr(guest_addr);
1142 /* clear the fault */
1143 writel(DMA_FRCD_F, iommu->reg + reg +
1144 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1145
1146 spin_unlock_irqrestore(&iommu->register_lock, flag);
1147
1148 iommu_page_fault_do_one(iommu, type, fault_reason,
1149 source_id, guest_addr);
1150
1151 fault_index++;
1152 if (fault_index > cap_num_fault_regs(iommu->cap))
1153 fault_index = 0;
1154 spin_lock_irqsave(&iommu->register_lock, flag);
1155 }
1156clear_overflow:
1157 /* clear primary fault overflow */
1158 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1159 if (fault_status & DMA_FSTS_PFO)
1160 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
1161
1162 spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 return IRQ_HANDLED;
1164}
1165
1166int dmar_set_interrupt(struct intel_iommu *iommu)
1167{
1168 int irq, ret;
1169
1170 irq = create_irq();
1171 if (!irq) {
1172 printk(KERN_ERR "IOMMU: no free vectors\n");
1173 return -EINVAL;
1174 }
1175
1176 set_irq_data(irq, iommu);
1177 iommu->irq = irq;
1178
1179 ret = arch_setup_dmar_msi(irq);
1180 if (ret) {
1181 set_irq_data(irq, NULL);
1182 iommu->irq = 0;
1183 destroy_irq(irq);
1184 return 0;
1185 }
1186
1187 /* Force fault register is cleared */
1188 iommu_page_fault(irq, iommu);
1189
1190 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
1191 if (ret)
1192 printk(KERN_ERR "IOMMU: can't request irq\n");
1193 return ret;
1194}
1195 1038
1196static int iommu_init_domains(struct intel_iommu *iommu) 1039static int iommu_init_domains(struct intel_iommu *iommu)
1197{ 1040{
@@ -1429,6 +1272,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1429 else 1272 else
1430 domain->iommu_coherency = 0; 1273 domain->iommu_coherency = 0;
1431 1274
1275 if (ecap_sc_support(iommu->ecap))
1276 domain->iommu_snooping = 1;
1277 else
1278 domain->iommu_snooping = 0;
1279
1432 domain->iommu_count = 1; 1280 domain->iommu_count = 1;
1433 1281
1434 /* always allocate the top pgd */ 1282 /* always allocate the top pgd */
@@ -1557,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1557 spin_lock_irqsave(&domain->iommu_lock, flags); 1405 spin_lock_irqsave(&domain->iommu_lock, flags);
1558 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { 1406 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1559 domain->iommu_count++; 1407 domain->iommu_count++;
1560 domain_update_iommu_coherency(domain); 1408 domain_update_iommu_cap(domain);
1561 } 1409 }
1562 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1410 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1563 return 0; 1411 return 0;
@@ -1657,6 +1505,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1657 BUG_ON(dma_pte_addr(pte)); 1505 BUG_ON(dma_pte_addr(pte));
1658 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); 1506 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1659 dma_set_pte_prot(pte, prot); 1507 dma_set_pte_prot(pte, prot);
1508 if (prot & DMA_PTE_SNP)
1509 dma_set_pte_snp(pte);
1660 domain_flush_cache(domain, pte, sizeof(*pte)); 1510 domain_flush_cache(domain, pte, sizeof(*pte));
1661 start_pfn++; 1511 start_pfn++;
1662 index++; 1512 index++;
@@ -1970,7 +1820,7 @@ static inline void iommu_prepare_isa(void)
1970 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 1820 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1971 1821
1972 if (ret) 1822 if (ret)
1973 printk("IOMMU: Failed to create 0-64M identity map, " 1823 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
1974 "floppy might not work\n"); 1824 "floppy might not work\n");
1975 1825
1976} 1826}
@@ -1987,7 +1837,7 @@ static int __init init_dmars(void)
1987 struct dmar_rmrr_unit *rmrr; 1837 struct dmar_rmrr_unit *rmrr;
1988 struct pci_dev *pdev; 1838 struct pci_dev *pdev;
1989 struct intel_iommu *iommu; 1839 struct intel_iommu *iommu;
1990 int i, ret, unit = 0; 1840 int i, ret;
1991 1841
1992 /* 1842 /*
1993 * for each drhd 1843 * for each drhd
@@ -2043,11 +1893,40 @@ static int __init init_dmars(void)
2043 } 1893 }
2044 } 1894 }
2045 1895
1896 /*
1897 * Start from the sane iommu hardware state.
1898 */
2046 for_each_drhd_unit(drhd) { 1899 for_each_drhd_unit(drhd) {
2047 if (drhd->ignored) 1900 if (drhd->ignored)
2048 continue; 1901 continue;
2049 1902
2050 iommu = drhd->iommu; 1903 iommu = drhd->iommu;
1904
1905 /*
1906 * If the queued invalidation is already initialized by us
1907 * (for example, while enabling interrupt-remapping) then
1908 * we got the things already rolling from a sane state.
1909 */
1910 if (iommu->qi)
1911 continue;
1912
1913 /*
1914 * Clear any previous faults.
1915 */
1916 dmar_fault(-1, iommu);
1917 /*
1918 * Disable queued invalidation if supported and already enabled
1919 * before OS handover.
1920 */
1921 dmar_disable_qi(iommu);
1922 }
1923
1924 for_each_drhd_unit(drhd) {
1925 if (drhd->ignored)
1926 continue;
1927
1928 iommu = drhd->iommu;
1929
2051 if (dmar_enable_qi(iommu)) { 1930 if (dmar_enable_qi(iommu)) {
2052 /* 1931 /*
2053 * Queued Invalidate not enabled, use Register Based 1932 * Queued Invalidate not enabled, use Register Based
@@ -2109,7 +1988,6 @@ static int __init init_dmars(void)
2109 if (drhd->ignored) 1988 if (drhd->ignored)
2110 continue; 1989 continue;
2111 iommu = drhd->iommu; 1990 iommu = drhd->iommu;
2112 sprintf (iommu->name, "dmar%d", unit++);
2113 1991
2114 iommu_flush_write_buffer(iommu); 1992 iommu_flush_write_buffer(iommu);
2115 1993
@@ -2279,16 +2157,18 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2279error: 2157error:
2280 if (iova) 2158 if (iova)
2281 __free_iova(&domain->iovad, iova); 2159 __free_iova(&domain->iovad, iova);
2282 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 2160 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2283 pci_name(pdev), size, (unsigned long long)paddr, dir); 2161 pci_name(pdev), size, (unsigned long long)paddr, dir);
2284 return 0; 2162 return 0;
2285} 2163}
2286 2164
2287dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, 2165static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2288 size_t size, int dir) 2166 unsigned long offset, size_t size,
2167 enum dma_data_direction dir,
2168 struct dma_attrs *attrs)
2289{ 2169{
2290 return __intel_map_single(hwdev, paddr, size, dir, 2170 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2291 to_pci_dev(hwdev)->dma_mask); 2171 dir, to_pci_dev(dev)->dma_mask);
2292} 2172}
2293 2173
2294static void flush_unmaps(void) 2174static void flush_unmaps(void)
@@ -2352,8 +2232,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2352 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 2232 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2353} 2233}
2354 2234
2355void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, 2235static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2356 int dir) 2236 size_t size, enum dma_data_direction dir,
2237 struct dma_attrs *attrs)
2357{ 2238{
2358 struct pci_dev *pdev = to_pci_dev(dev); 2239 struct pci_dev *pdev = to_pci_dev(dev);
2359 struct dmar_domain *domain; 2240 struct dmar_domain *domain;
@@ -2375,7 +2256,7 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2375 start_addr = iova->pfn_lo << PAGE_SHIFT; 2256 start_addr = iova->pfn_lo << PAGE_SHIFT;
2376 size = aligned_size((u64)dev_addr, size); 2257 size = aligned_size((u64)dev_addr, size);
2377 2258
2378 pr_debug("Device %s unmapping: %lx@%llx\n", 2259 pr_debug("Device %s unmapping: %zx@%llx\n",
2379 pci_name(pdev), size, (unsigned long long)start_addr); 2260 pci_name(pdev), size, (unsigned long long)start_addr);
2380 2261
2381 /* clear the whole page */ 2262 /* clear the whole page */
@@ -2397,8 +2278,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2397 } 2278 }
2398} 2279}
2399 2280
2400void *intel_alloc_coherent(struct device *hwdev, size_t size, 2281static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2401 dma_addr_t *dma_handle, gfp_t flags) 2282 int dir)
2283{
2284 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2285}
2286
2287static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2288 dma_addr_t *dma_handle, gfp_t flags)
2402{ 2289{
2403 void *vaddr; 2290 void *vaddr;
2404 int order; 2291 int order;
@@ -2421,8 +2308,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size,
2421 return NULL; 2308 return NULL;
2422} 2309}
2423 2310
2424void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 2311static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2425 dma_addr_t dma_handle) 2312 dma_addr_t dma_handle)
2426{ 2313{
2427 int order; 2314 int order;
2428 2315
@@ -2433,10 +2320,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2433 free_pages((unsigned long)vaddr, order); 2320 free_pages((unsigned long)vaddr, order);
2434} 2321}
2435 2322
2436#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2323static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2437 2324 int nelems, enum dma_data_direction dir,
2438void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2325 struct dma_attrs *attrs)
2439 int nelems, int dir)
2440{ 2326{
2441 int i; 2327 int i;
2442 struct pci_dev *pdev = to_pci_dev(hwdev); 2328 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2444,7 +2330,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2444 unsigned long start_addr; 2330 unsigned long start_addr;
2445 struct iova *iova; 2331 struct iova *iova;
2446 size_t size = 0; 2332 size_t size = 0;
2447 void *addr; 2333 phys_addr_t addr;
2448 struct scatterlist *sg; 2334 struct scatterlist *sg;
2449 struct intel_iommu *iommu; 2335 struct intel_iommu *iommu;
2450 2336
@@ -2460,7 +2346,7 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2460 if (!iova) 2346 if (!iova)
2461 return; 2347 return;
2462 for_each_sg(sglist, sg, nelems, i) { 2348 for_each_sg(sglist, sg, nelems, i) {
2463 addr = SG_ENT_VIRT_ADDRESS(sg); 2349 addr = page_to_phys(sg_page(sg)) + sg->offset;
2464 size += aligned_size((u64)addr, sg->length); 2350 size += aligned_size((u64)addr, sg->length);
2465 } 2351 }
2466 2352
@@ -2487,16 +2373,16 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2487 2373
2488 for_each_sg(sglist, sg, nelems, i) { 2374 for_each_sg(sglist, sg, nelems, i) {
2489 BUG_ON(!sg_page(sg)); 2375 BUG_ON(!sg_page(sg));
2490 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); 2376 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2491 sg->dma_length = sg->length; 2377 sg->dma_length = sg->length;
2492 } 2378 }
2493 return nelems; 2379 return nelems;
2494} 2380}
2495 2381
2496int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 2382static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2497 int dir) 2383 enum dma_data_direction dir, struct dma_attrs *attrs)
2498{ 2384{
2499 void *addr; 2385 phys_addr_t addr;
2500 int i; 2386 int i;
2501 struct pci_dev *pdev = to_pci_dev(hwdev); 2387 struct pci_dev *pdev = to_pci_dev(hwdev);
2502 struct dmar_domain *domain; 2388 struct dmar_domain *domain;
@@ -2520,8 +2406,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2520 iommu = domain_get_iommu(domain); 2406 iommu = domain_get_iommu(domain);
2521 2407
2522 for_each_sg(sglist, sg, nelems, i) { 2408 for_each_sg(sglist, sg, nelems, i) {
2523 addr = SG_ENT_VIRT_ADDRESS(sg); 2409 addr = page_to_phys(sg_page(sg)) + sg->offset;
2524 addr = (void *)virt_to_phys(addr);
2525 size += aligned_size((u64)addr, sg->length); 2410 size += aligned_size((u64)addr, sg->length);
2526 } 2411 }
2527 2412
@@ -2544,8 +2429,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2544 start_addr = iova->pfn_lo << PAGE_SHIFT; 2429 start_addr = iova->pfn_lo << PAGE_SHIFT;
2545 offset = 0; 2430 offset = 0;
2546 for_each_sg(sglist, sg, nelems, i) { 2431 for_each_sg(sglist, sg, nelems, i) {
2547 addr = SG_ENT_VIRT_ADDRESS(sg); 2432 addr = page_to_phys(sg_page(sg)) + sg->offset;
2548 addr = (void *)virt_to_phys(addr);
2549 size = aligned_size((u64)addr, sg->length); 2433 size = aligned_size((u64)addr, sg->length);
2550 ret = domain_page_mapping(domain, start_addr + offset, 2434 ret = domain_page_mapping(domain, start_addr + offset,
2551 ((u64)addr) & PAGE_MASK, 2435 ((u64)addr) & PAGE_MASK,
@@ -2574,13 +2458,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2574 return nelems; 2458 return nelems;
2575} 2459}
2576 2460
2577static struct dma_mapping_ops intel_dma_ops = { 2461static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2462{
2463 return !dma_addr;
2464}
2465
2466struct dma_map_ops intel_dma_ops = {
2578 .alloc_coherent = intel_alloc_coherent, 2467 .alloc_coherent = intel_alloc_coherent,
2579 .free_coherent = intel_free_coherent, 2468 .free_coherent = intel_free_coherent,
2580 .map_single = intel_map_single,
2581 .unmap_single = intel_unmap_single,
2582 .map_sg = intel_map_sg, 2469 .map_sg = intel_map_sg,
2583 .unmap_sg = intel_unmap_sg, 2470 .unmap_sg = intel_unmap_sg,
2471 .map_page = intel_map_page,
2472 .unmap_page = intel_unmap_page,
2473 .mapping_error = intel_mapping_error,
2584}; 2474};
2585 2475
2586static inline int iommu_domain_cache_init(void) 2476static inline int iommu_domain_cache_init(void)
@@ -2772,6 +2662,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
2772 return 0; 2662 return 0;
2773} 2663}
2774 2664
2665static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2666 struct pci_dev *pdev)
2667{
2668 struct pci_dev *tmp, *parent;
2669
2670 if (!iommu || !pdev)
2671 return;
2672
2673 /* dependent device detach */
2674 tmp = pci_find_upstream_pcie_bridge(pdev);
2675 /* Secondary interface's bus number and devfn 0 */
2676 if (tmp) {
2677 parent = pdev->bus->self;
2678 while (parent != tmp) {
2679 iommu_detach_dev(iommu, parent->bus->number,
2680 parent->devfn);
2681 parent = parent->bus->self;
2682 }
2683 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2684 iommu_detach_dev(iommu,
2685 tmp->subordinate->number, 0);
2686 else /* this is a legacy PCI bridge */
2687 iommu_detach_dev(iommu,
2688 tmp->bus->number, tmp->devfn);
2689 }
2690}
2691
2775static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, 2692static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2776 struct pci_dev *pdev) 2693 struct pci_dev *pdev)
2777{ 2694{
@@ -2797,6 +2714,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2797 spin_unlock_irqrestore(&device_domain_lock, flags); 2714 spin_unlock_irqrestore(&device_domain_lock, flags);
2798 2715
2799 iommu_detach_dev(iommu, info->bus, info->devfn); 2716 iommu_detach_dev(iommu, info->bus, info->devfn);
2717 iommu_detach_dependent_devices(iommu, pdev);
2800 free_devinfo_mem(info); 2718 free_devinfo_mem(info);
2801 2719
2802 spin_lock_irqsave(&device_domain_lock, flags); 2720 spin_lock_irqsave(&device_domain_lock, flags);
@@ -2820,7 +2738,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2820 spin_lock_irqsave(&domain->iommu_lock, tmp_flags); 2738 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2821 clear_bit(iommu->seq_id, &domain->iommu_bmp); 2739 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2822 domain->iommu_count--; 2740 domain->iommu_count--;
2823 domain_update_iommu_coherency(domain); 2741 domain_update_iommu_cap(domain);
2824 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 2742 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2825 } 2743 }
2826 2744
@@ -2846,15 +2764,16 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2846 2764
2847 iommu = device_to_iommu(info->bus, info->devfn); 2765 iommu = device_to_iommu(info->bus, info->devfn);
2848 iommu_detach_dev(iommu, info->bus, info->devfn); 2766 iommu_detach_dev(iommu, info->bus, info->devfn);
2767 iommu_detach_dependent_devices(iommu, info->dev);
2849 2768
2850 /* clear this iommu in iommu_bmp, update iommu count 2769 /* clear this iommu in iommu_bmp, update iommu count
2851 * and coherency 2770 * and capabilities
2852 */ 2771 */
2853 spin_lock_irqsave(&domain->iommu_lock, flags2); 2772 spin_lock_irqsave(&domain->iommu_lock, flags2);
2854 if (test_and_clear_bit(iommu->seq_id, 2773 if (test_and_clear_bit(iommu->seq_id,
2855 &domain->iommu_bmp)) { 2774 &domain->iommu_bmp)) {
2856 domain->iommu_count--; 2775 domain->iommu_count--;
2857 domain_update_iommu_coherency(domain); 2776 domain_update_iommu_cap(domain);
2858 } 2777 }
2859 spin_unlock_irqrestore(&domain->iommu_lock, flags2); 2778 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2860 2779
@@ -3077,6 +2996,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
3077 prot |= DMA_PTE_READ; 2996 prot |= DMA_PTE_READ;
3078 if (iommu_prot & IOMMU_WRITE) 2997 if (iommu_prot & IOMMU_WRITE)
3079 prot |= DMA_PTE_WRITE; 2998 prot |= DMA_PTE_WRITE;
2999 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3000 prot |= DMA_PTE_SNP;
3080 3001
3081 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3002 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3082 if (dmar_domain->max_addr < max_addr) { 3003 if (dmar_domain->max_addr < max_addr) {
@@ -3130,6 +3051,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3130 return phys; 3051 return phys;
3131} 3052}
3132 3053
3054static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3055 unsigned long cap)
3056{
3057 struct dmar_domain *dmar_domain = domain->priv;
3058
3059 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3060 return dmar_domain->iommu_snooping;
3061
3062 return 0;
3063}
3064
3133static struct iommu_ops intel_iommu_ops = { 3065static struct iommu_ops intel_iommu_ops = {
3134 .domain_init = intel_iommu_domain_init, 3066 .domain_init = intel_iommu_domain_init,
3135 .domain_destroy = intel_iommu_domain_destroy, 3067 .domain_destroy = intel_iommu_domain_destroy,
@@ -3138,6 +3070,7 @@ static struct iommu_ops intel_iommu_ops = {
3138 .map = intel_iommu_map_range, 3070 .map = intel_iommu_map_range,
3139 .unmap = intel_iommu_unmap_range, 3071 .unmap = intel_iommu_unmap_range,
3140 .iova_to_phys = intel_iommu_iova_to_phys, 3072 .iova_to_phys = intel_iommu_iova_to_phys,
3073 .domain_has_cap = intel_iommu_domain_has_cap,
3141}; 3074};
3142 3075
3143static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 3076static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 45effc5726c0..b041a409f4a7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -6,6 +6,7 @@
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <asm/io_apic.h> 7#include <asm/io_apic.h>
8#include <asm/smp.h> 8#include <asm/smp.h>
9#include <asm/cpu.h>
9#include <linux/intel-iommu.h> 10#include <linux/intel-iommu.h>
10#include "intr_remapping.h" 11#include "intr_remapping.h"
11 12
@@ -20,7 +21,7 @@ struct irq_2_iommu {
20 u8 irte_mask; 21 u8 irte_mask;
21}; 22};
22 23
23#ifdef CONFIG_SPARSE_IRQ 24#ifdef CONFIG_GENERIC_HARDIRQS
24static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) 25static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
25{ 26{
26 struct irq_2_iommu *iommu; 27 struct irq_2_iommu *iommu;
@@ -116,21 +117,22 @@ int get_irte(int irq, struct irte *entry)
116{ 117{
117 int index; 118 int index;
118 struct irq_2_iommu *irq_iommu; 119 struct irq_2_iommu *irq_iommu;
120 unsigned long flags;
119 121
120 if (!entry) 122 if (!entry)
121 return -1; 123 return -1;
122 124
123 spin_lock(&irq_2_ir_lock); 125 spin_lock_irqsave(&irq_2_ir_lock, flags);
124 irq_iommu = valid_irq_2_iommu(irq); 126 irq_iommu = valid_irq_2_iommu(irq);
125 if (!irq_iommu) { 127 if (!irq_iommu) {
126 spin_unlock(&irq_2_ir_lock); 128 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
127 return -1; 129 return -1;
128 } 130 }
129 131
130 index = irq_iommu->irte_index + irq_iommu->sub_handle; 132 index = irq_iommu->irte_index + irq_iommu->sub_handle;
131 *entry = *(irq_iommu->iommu->ir_table->base + index); 133 *entry = *(irq_iommu->iommu->ir_table->base + index);
132 134
133 spin_unlock(&irq_2_ir_lock); 135 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
134 return 0; 136 return 0;
135} 137}
136 138
@@ -140,6 +142,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
140 struct irq_2_iommu *irq_iommu; 142 struct irq_2_iommu *irq_iommu;
141 u16 index, start_index; 143 u16 index, start_index;
142 unsigned int mask = 0; 144 unsigned int mask = 0;
145 unsigned long flags;
143 int i; 146 int i;
144 147
145 if (!count) 148 if (!count)
@@ -169,7 +172,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
169 return -1; 172 return -1;
170 } 173 }
171 174
172 spin_lock(&irq_2_ir_lock); 175 spin_lock_irqsave(&irq_2_ir_lock, flags);
173 do { 176 do {
174 for (i = index; i < index + count; i++) 177 for (i = index; i < index + count; i++)
175 if (table->base[i].present) 178 if (table->base[i].present)
@@ -181,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
181 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 184 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
182 185
183 if (index == start_index) { 186 if (index == start_index) {
184 spin_unlock(&irq_2_ir_lock); 187 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
185 printk(KERN_ERR "can't allocate an IRTE\n"); 188 printk(KERN_ERR "can't allocate an IRTE\n");
186 return -1; 189 return -1;
187 } 190 }
@@ -192,7 +195,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
192 195
193 irq_iommu = irq_2_iommu_alloc(irq); 196 irq_iommu = irq_2_iommu_alloc(irq);
194 if (!irq_iommu) { 197 if (!irq_iommu) {
195 spin_unlock(&irq_2_ir_lock); 198 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
196 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 199 printk(KERN_ERR "can't allocate irq_2_iommu\n");
197 return -1; 200 return -1;
198 } 201 }
@@ -202,7 +205,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
202 irq_iommu->sub_handle = 0; 205 irq_iommu->sub_handle = 0;
203 irq_iommu->irte_mask = mask; 206 irq_iommu->irte_mask = mask;
204 207
205 spin_unlock(&irq_2_ir_lock); 208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
206 209
207 return index; 210 return index;
208} 211}
@@ -222,30 +225,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
222{ 225{
223 int index; 226 int index;
224 struct irq_2_iommu *irq_iommu; 227 struct irq_2_iommu *irq_iommu;
228 unsigned long flags;
225 229
226 spin_lock(&irq_2_ir_lock); 230 spin_lock_irqsave(&irq_2_ir_lock, flags);
227 irq_iommu = valid_irq_2_iommu(irq); 231 irq_iommu = valid_irq_2_iommu(irq);
228 if (!irq_iommu) { 232 if (!irq_iommu) {
229 spin_unlock(&irq_2_ir_lock); 233 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
230 return -1; 234 return -1;
231 } 235 }
232 236
233 *sub_handle = irq_iommu->sub_handle; 237 *sub_handle = irq_iommu->sub_handle;
234 index = irq_iommu->irte_index; 238 index = irq_iommu->irte_index;
235 spin_unlock(&irq_2_ir_lock); 239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
236 return index; 240 return index;
237} 241}
238 242
239int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 243int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
240{ 244{
241 struct irq_2_iommu *irq_iommu; 245 struct irq_2_iommu *irq_iommu;
246 unsigned long flags;
242 247
243 spin_lock(&irq_2_ir_lock); 248 spin_lock_irqsave(&irq_2_ir_lock, flags);
244 249
245 irq_iommu = irq_2_iommu_alloc(irq); 250 irq_iommu = irq_2_iommu_alloc(irq);
246 251
247 if (!irq_iommu) { 252 if (!irq_iommu) {
248 spin_unlock(&irq_2_ir_lock); 253 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
249 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 254 printk(KERN_ERR "can't allocate irq_2_iommu\n");
250 return -1; 255 return -1;
251 } 256 }
@@ -255,7 +260,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
255 irq_iommu->sub_handle = subhandle; 260 irq_iommu->sub_handle = subhandle;
256 irq_iommu->irte_mask = 0; 261 irq_iommu->irte_mask = 0;
257 262
258 spin_unlock(&irq_2_ir_lock); 263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
259 264
260 return 0; 265 return 0;
261} 266}
@@ -263,11 +268,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
263int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 268int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
264{ 269{
265 struct irq_2_iommu *irq_iommu; 270 struct irq_2_iommu *irq_iommu;
271 unsigned long flags;
266 272
267 spin_lock(&irq_2_ir_lock); 273 spin_lock_irqsave(&irq_2_ir_lock, flags);
268 irq_iommu = valid_irq_2_iommu(irq); 274 irq_iommu = valid_irq_2_iommu(irq);
269 if (!irq_iommu) { 275 if (!irq_iommu) {
270 spin_unlock(&irq_2_ir_lock); 276 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
271 return -1; 277 return -1;
272 } 278 }
273 279
@@ -276,7 +282,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
276 irq_iommu->sub_handle = 0; 282 irq_iommu->sub_handle = 0;
277 irq_2_iommu(irq)->irte_mask = 0; 283 irq_2_iommu(irq)->irte_mask = 0;
278 284
279 spin_unlock(&irq_2_ir_lock); 285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280 286
281 return 0; 287 return 0;
282} 288}
@@ -288,11 +294,12 @@ int modify_irte(int irq, struct irte *irte_modified)
288 struct irte *irte; 294 struct irte *irte;
289 struct intel_iommu *iommu; 295 struct intel_iommu *iommu;
290 struct irq_2_iommu *irq_iommu; 296 struct irq_2_iommu *irq_iommu;
297 unsigned long flags;
291 298
292 spin_lock(&irq_2_ir_lock); 299 spin_lock_irqsave(&irq_2_ir_lock, flags);
293 irq_iommu = valid_irq_2_iommu(irq); 300 irq_iommu = valid_irq_2_iommu(irq);
294 if (!irq_iommu) { 301 if (!irq_iommu) {
295 spin_unlock(&irq_2_ir_lock); 302 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
296 return -1; 303 return -1;
297 } 304 }
298 305
@@ -301,11 +308,11 @@ int modify_irte(int irq, struct irte *irte_modified)
301 index = irq_iommu->irte_index + irq_iommu->sub_handle; 308 index = irq_iommu->irte_index + irq_iommu->sub_handle;
302 irte = &iommu->ir_table->base[index]; 309 irte = &iommu->ir_table->base[index];
303 310
304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 311 set_64bit((unsigned long *)irte, irte_modified->low);
305 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 312 __iommu_flush_cache(iommu, irte, sizeof(*irte));
306 313
307 rc = qi_flush_iec(iommu, index, 0); 314 rc = qi_flush_iec(iommu, index, 0);
308 spin_unlock(&irq_2_ir_lock); 315 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
309 316
310 return rc; 317 return rc;
311} 318}
@@ -316,11 +323,12 @@ int flush_irte(int irq)
316 int index; 323 int index;
317 struct intel_iommu *iommu; 324 struct intel_iommu *iommu;
318 struct irq_2_iommu *irq_iommu; 325 struct irq_2_iommu *irq_iommu;
326 unsigned long flags;
319 327
320 spin_lock(&irq_2_ir_lock); 328 spin_lock_irqsave(&irq_2_ir_lock, flags);
321 irq_iommu = valid_irq_2_iommu(irq); 329 irq_iommu = valid_irq_2_iommu(irq);
322 if (!irq_iommu) { 330 if (!irq_iommu) {
323 spin_unlock(&irq_2_ir_lock); 331 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
324 return -1; 332 return -1;
325 } 333 }
326 334
@@ -329,7 +337,7 @@ int flush_irte(int irq)
329 index = irq_iommu->irte_index + irq_iommu->sub_handle; 337 index = irq_iommu->irte_index + irq_iommu->sub_handle;
330 338
331 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 339 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
332 spin_unlock(&irq_2_ir_lock); 340 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
333 341
334 return rc; 342 return rc;
335} 343}
@@ -362,11 +370,12 @@ int free_irte(int irq)
362 struct irte *irte; 370 struct irte *irte;
363 struct intel_iommu *iommu; 371 struct intel_iommu *iommu;
364 struct irq_2_iommu *irq_iommu; 372 struct irq_2_iommu *irq_iommu;
373 unsigned long flags;
365 374
366 spin_lock(&irq_2_ir_lock); 375 spin_lock_irqsave(&irq_2_ir_lock, flags);
367 irq_iommu = valid_irq_2_iommu(irq); 376 irq_iommu = valid_irq_2_iommu(irq);
368 if (!irq_iommu) { 377 if (!irq_iommu) {
369 spin_unlock(&irq_2_ir_lock); 378 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
370 return -1; 379 return -1;
371 } 380 }
372 381
@@ -377,7 +386,7 @@ int free_irte(int irq)
377 386
378 if (!irq_iommu->sub_handle) { 387 if (!irq_iommu->sub_handle) {
379 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 388 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
380 set_64bit((unsigned long *)irte, 0); 389 set_64bit((unsigned long *)(irte + i), 0);
381 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 390 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
382 } 391 }
383 392
@@ -386,7 +395,7 @@ int free_irte(int irq)
386 irq_iommu->sub_handle = 0; 395 irq_iommu->sub_handle = 0;
387 irq_iommu->irte_mask = 0; 396 irq_iommu->irte_mask = 0;
388 397
389 spin_unlock(&irq_2_ir_lock); 398 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
390 399
391 return rc; 400 return rc;
392} 401}
@@ -438,12 +447,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
438 struct page *pages; 447 struct page *pages;
439 448
440 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 449 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
441 GFP_KERNEL); 450 GFP_ATOMIC);
442 451
443 if (!iommu->ir_table) 452 if (!iommu->ir_table)
444 return -ENOMEM; 453 return -ENOMEM;
445 454
446 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 455 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
447 456
448 if (!pages) { 457 if (!pages) {
449 printk(KERN_ERR "failed to allocate pages of order %d\n", 458 printk(KERN_ERR "failed to allocate pages of order %d\n",
@@ -458,11 +467,55 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
458 return 0; 467 return 0;
459} 468}
460 469
470/*
471 * Disable Interrupt Remapping.
472 */
473static void disable_intr_remapping(struct intel_iommu *iommu)
474{
475 unsigned long flags;
476 u32 sts;
477
478 if (!ecap_ir_support(iommu->ecap))
479 return;
480
481 spin_lock_irqsave(&iommu->register_lock, flags);
482
483 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
484 if (!(sts & DMA_GSTS_IRES))
485 goto end;
486
487 iommu->gcmd &= ~DMA_GCMD_IRE;
488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
489
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 readl, !(sts & DMA_GSTS_IRES), sts);
492
493end:
494 spin_unlock_irqrestore(&iommu->register_lock, flags);
495}
496
461int __init enable_intr_remapping(int eim) 497int __init enable_intr_remapping(int eim)
462{ 498{
463 struct dmar_drhd_unit *drhd; 499 struct dmar_drhd_unit *drhd;
464 int setup = 0; 500 int setup = 0;
465 501
502 for_each_drhd_unit(drhd) {
503 struct intel_iommu *iommu = drhd->iommu;
504
505 /*
506 * Clear previous faults.
507 */
508 dmar_fault(-1, iommu);
509
510 /*
511 * Disable intr remapping and queued invalidation, if already
512 * enabled prior to OS handover.
513 */
514 disable_intr_remapping(iommu);
515
516 dmar_disable_qi(iommu);
517 }
518
466 /* 519 /*
467 * check for the Interrupt-remapping support 520 * check for the Interrupt-remapping support
468 */ 521 */
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
new file mode 100644
index 000000000000..7227efc760db
--- /dev/null
+++ b/drivers/pci/iov.c
@@ -0,0 +1,680 @@
1/*
2 * drivers/pci/iov.c
3 *
4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
5 *
6 * PCI Express I/O Virtualization (IOV) support.
7 * Single Root IOV 1.0
8 */
9
10#include <linux/pci.h>
11#include <linux/mutex.h>
12#include <linux/string.h>
13#include <linux/delay.h>
14#include "pci.h"
15
16#define VIRTFN_ID_LEN 16
17
18static inline u8 virtfn_bus(struct pci_dev *dev, int id)
19{
20 return dev->bus->number + ((dev->devfn + dev->sriov->offset +
21 dev->sriov->stride * id) >> 8);
22}
23
24static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
25{
26 return (dev->devfn + dev->sriov->offset +
27 dev->sriov->stride * id) & 0xff;
28}
29
30static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
31{
32 int rc;
33 struct pci_bus *child;
34
35 if (bus->number == busnr)
36 return bus;
37
38 child = pci_find_bus(pci_domain_nr(bus), busnr);
39 if (child)
40 return child;
41
42 child = pci_add_new_bus(bus, NULL, busnr);
43 if (!child)
44 return NULL;
45
46 child->subordinate = busnr;
47 child->dev.parent = bus->bridge;
48 rc = pci_bus_add_child(child);
49 if (rc) {
50 pci_remove_bus(child);
51 return NULL;
52 }
53
54 return child;
55}
56
57static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
58{
59 struct pci_bus *child;
60
61 if (bus->number == busnr)
62 return;
63
64 child = pci_find_bus(pci_domain_nr(bus), busnr);
65 BUG_ON(!child);
66
67 if (list_empty(&child->devices))
68 pci_remove_bus(child);
69}
70
71static int virtfn_add(struct pci_dev *dev, int id, int reset)
72{
73 int i;
74 int rc;
75 u64 size;
76 char buf[VIRTFN_ID_LEN];
77 struct pci_dev *virtfn;
78 struct resource *res;
79 struct pci_sriov *iov = dev->sriov;
80
81 virtfn = alloc_pci_dev();
82 if (!virtfn)
83 return -ENOMEM;
84
85 mutex_lock(&iov->dev->sriov->lock);
86 virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
87 if (!virtfn->bus) {
88 kfree(virtfn);
89 mutex_unlock(&iov->dev->sriov->lock);
90 return -ENOMEM;
91 }
92 virtfn->devfn = virtfn_devfn(dev, id);
93 virtfn->vendor = dev->vendor;
94 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
95 pci_setup_device(virtfn);
96 virtfn->dev.parent = dev->dev.parent;
97
98 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
99 res = dev->resource + PCI_IOV_RESOURCES + i;
100 if (!res->parent)
101 continue;
102 virtfn->resource[i].name = pci_name(virtfn);
103 virtfn->resource[i].flags = res->flags;
104 size = resource_size(res);
105 do_div(size, iov->total);
106 virtfn->resource[i].start = res->start + size * id;
107 virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
108 rc = request_resource(res, &virtfn->resource[i]);
109 BUG_ON(rc);
110 }
111
112 if (reset)
113 pci_execute_reset_function(virtfn);
114
115 pci_device_add(virtfn, virtfn->bus);
116 mutex_unlock(&iov->dev->sriov->lock);
117
118 virtfn->physfn = pci_dev_get(dev);
119 virtfn->is_virtfn = 1;
120
121 rc = pci_bus_add_device(virtfn);
122 if (rc)
123 goto failed1;
124 sprintf(buf, "virtfn%u", id);
125 rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
126 if (rc)
127 goto failed1;
128 rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
129 if (rc)
130 goto failed2;
131
132 kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
133
134 return 0;
135
136failed2:
137 sysfs_remove_link(&dev->dev.kobj, buf);
138failed1:
139 pci_dev_put(dev);
140 mutex_lock(&iov->dev->sriov->lock);
141 pci_remove_bus_device(virtfn);
142 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
143 mutex_unlock(&iov->dev->sriov->lock);
144
145 return rc;
146}
147
148static void virtfn_remove(struct pci_dev *dev, int id, int reset)
149{
150 char buf[VIRTFN_ID_LEN];
151 struct pci_bus *bus;
152 struct pci_dev *virtfn;
153 struct pci_sriov *iov = dev->sriov;
154
155 bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
156 if (!bus)
157 return;
158
159 virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
160 if (!virtfn)
161 return;
162
163 pci_dev_put(virtfn);
164
165 if (reset) {
166 device_release_driver(&virtfn->dev);
167 pci_execute_reset_function(virtfn);
168 }
169
170 sprintf(buf, "virtfn%u", id);
171 sysfs_remove_link(&dev->dev.kobj, buf);
172 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
173
174 mutex_lock(&iov->dev->sriov->lock);
175 pci_remove_bus_device(virtfn);
176 virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
177 mutex_unlock(&iov->dev->sriov->lock);
178
179 pci_dev_put(dev);
180}
181
182static int sriov_migration(struct pci_dev *dev)
183{
184 u16 status;
185 struct pci_sriov *iov = dev->sriov;
186
187 if (!iov->nr_virtfn)
188 return 0;
189
190 if (!(iov->cap & PCI_SRIOV_CAP_VFM))
191 return 0;
192
193 pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
194 if (!(status & PCI_SRIOV_STATUS_VFM))
195 return 0;
196
197 schedule_work(&iov->mtask);
198
199 return 1;
200}
201
202static void sriov_migration_task(struct work_struct *work)
203{
204 int i;
205 u8 state;
206 u16 status;
207 struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
208
209 for (i = iov->initial; i < iov->nr_virtfn; i++) {
210 state = readb(iov->mstate + i);
211 if (state == PCI_SRIOV_VFM_MI) {
212 writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
213 state = readb(iov->mstate + i);
214 if (state == PCI_SRIOV_VFM_AV)
215 virtfn_add(iov->self, i, 1);
216 } else if (state == PCI_SRIOV_VFM_MO) {
217 virtfn_remove(iov->self, i, 1);
218 writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
219 state = readb(iov->mstate + i);
220 if (state == PCI_SRIOV_VFM_AV)
221 virtfn_add(iov->self, i, 0);
222 }
223 }
224
225 pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
226 status &= ~PCI_SRIOV_STATUS_VFM;
227 pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
228}
229
230static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
231{
232 int bir;
233 u32 table;
234 resource_size_t pa;
235 struct pci_sriov *iov = dev->sriov;
236
237 if (nr_virtfn <= iov->initial)
238 return 0;
239
240 pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
241 bir = PCI_SRIOV_VFM_BIR(table);
242 if (bir > PCI_STD_RESOURCE_END)
243 return -EIO;
244
245 table = PCI_SRIOV_VFM_OFFSET(table);
246 if (table + nr_virtfn > pci_resource_len(dev, bir))
247 return -EIO;
248
249 pa = pci_resource_start(dev, bir) + table;
250 iov->mstate = ioremap(pa, nr_virtfn);
251 if (!iov->mstate)
252 return -ENOMEM;
253
254 INIT_WORK(&iov->mtask, sriov_migration_task);
255
256 iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
257 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
258
259 return 0;
260}
261
262static void sriov_disable_migration(struct pci_dev *dev)
263{
264 struct pci_sriov *iov = dev->sriov;
265
266 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
267 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
268
269 cancel_work_sync(&iov->mtask);
270 iounmap(iov->mstate);
271}
272
273static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
274{
275 int rc;
276 int i, j;
277 int nres;
278 u16 offset, stride, initial;
279 struct resource *res;
280 struct pci_dev *pdev;
281 struct pci_sriov *iov = dev->sriov;
282
283 if (!nr_virtfn)
284 return 0;
285
286 if (iov->nr_virtfn)
287 return -EINVAL;
288
289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
290 if (initial > iov->total ||
291 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
292 return -EIO;
293
294 if (nr_virtfn < 0 || nr_virtfn > iov->total ||
295 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
296 return -EINVAL;
297
298 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
299 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
300 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
301 if (!offset || (nr_virtfn > 1 && !stride))
302 return -EIO;
303
304 nres = 0;
305 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
306 res = dev->resource + PCI_IOV_RESOURCES + i;
307 if (res->parent)
308 nres++;
309 }
310 if (nres != iov->nres) {
311 dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
312 return -ENOMEM;
313 }
314
315 iov->offset = offset;
316 iov->stride = stride;
317
318 if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
319 dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
320 return -ENOMEM;
321 }
322
323 if (iov->link != dev->devfn) {
324 pdev = pci_get_slot(dev->bus, iov->link);
325 if (!pdev)
326 return -ENODEV;
327
328 pci_dev_put(pdev);
329
330 if (!pdev->is_physfn)
331 return -ENODEV;
332
333 rc = sysfs_create_link(&dev->dev.kobj,
334 &pdev->dev.kobj, "dep_link");
335 if (rc)
336 return rc;
337 }
338
339 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
340 pci_block_user_cfg_access(dev);
341 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
342 msleep(100);
343 pci_unblock_user_cfg_access(dev);
344
345 iov->initial = initial;
346 if (nr_virtfn < initial)
347 initial = nr_virtfn;
348
349 for (i = 0; i < initial; i++) {
350 rc = virtfn_add(dev, i, 0);
351 if (rc)
352 goto failed;
353 }
354
355 if (iov->cap & PCI_SRIOV_CAP_VFM) {
356 rc = sriov_enable_migration(dev, nr_virtfn);
357 if (rc)
358 goto failed;
359 }
360
361 kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
362 iov->nr_virtfn = nr_virtfn;
363
364 return 0;
365
366failed:
367 for (j = 0; j < i; j++)
368 virtfn_remove(dev, j, 0);
369
370 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
371 pci_block_user_cfg_access(dev);
372 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
373 ssleep(1);
374 pci_unblock_user_cfg_access(dev);
375
376 if (iov->link != dev->devfn)
377 sysfs_remove_link(&dev->dev.kobj, "dep_link");
378
379 return rc;
380}
381
382static void sriov_disable(struct pci_dev *dev)
383{
384 int i;
385 struct pci_sriov *iov = dev->sriov;
386
387 if (!iov->nr_virtfn)
388 return;
389
390 if (iov->cap & PCI_SRIOV_CAP_VFM)
391 sriov_disable_migration(dev);
392
393 for (i = 0; i < iov->nr_virtfn; i++)
394 virtfn_remove(dev, i, 0);
395
396 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
397 pci_block_user_cfg_access(dev);
398 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
399 ssleep(1);
400 pci_unblock_user_cfg_access(dev);
401
402 if (iov->link != dev->devfn)
403 sysfs_remove_link(&dev->dev.kobj, "dep_link");
404
405 iov->nr_virtfn = 0;
406}
407
408static int sriov_init(struct pci_dev *dev, int pos)
409{
410 int i;
411 int rc;
412 int nres;
413 u32 pgsz;
414 u16 ctrl, total, offset, stride;
415 struct pci_sriov *iov;
416 struct resource *res;
417 struct pci_dev *pdev;
418
419 if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
420 dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
421 return -ENODEV;
422
423 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
424 if (ctrl & PCI_SRIOV_CTRL_VFE) {
425 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
426 ssleep(1);
427 }
428
429 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
430 if (!total)
431 return 0;
432
433 ctrl = 0;
434 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
435 if (pdev->is_physfn)
436 goto found;
437
438 pdev = NULL;
439 if (pci_ari_enabled(dev->bus))
440 ctrl |= PCI_SRIOV_CTRL_ARI;
441
442found:
443 pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
444 pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
445 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
446 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
447 if (!offset || (total > 1 && !stride))
448 return -EIO;
449
450 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
451 i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
452 pgsz &= ~((1 << i) - 1);
453 if (!pgsz)
454 return -EIO;
455
456 pgsz &= ~(pgsz - 1);
457 pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
458
459 nres = 0;
460 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
461 res = dev->resource + PCI_IOV_RESOURCES + i;
462 i += __pci_read_base(dev, pci_bar_unknown, res,
463 pos + PCI_SRIOV_BAR + i * 4);
464 if (!res->flags)
465 continue;
466 if (resource_size(res) & (PAGE_SIZE - 1)) {
467 rc = -EIO;
468 goto failed;
469 }
470 res->end = res->start + resource_size(res) * total - 1;
471 nres++;
472 }
473
474 iov = kzalloc(sizeof(*iov), GFP_KERNEL);
475 if (!iov) {
476 rc = -ENOMEM;
477 goto failed;
478 }
479
480 iov->pos = pos;
481 iov->nres = nres;
482 iov->ctrl = ctrl;
483 iov->total = total;
484 iov->offset = offset;
485 iov->stride = stride;
486 iov->pgsz = pgsz;
487 iov->self = dev;
488 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
489 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
490
491 if (pdev)
492 iov->dev = pci_dev_get(pdev);
493 else {
494 iov->dev = dev;
495 mutex_init(&iov->lock);
496 }
497
498 dev->sriov = iov;
499 dev->is_physfn = 1;
500
501 return 0;
502
503failed:
504 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
505 res = dev->resource + PCI_IOV_RESOURCES + i;
506 res->flags = 0;
507 }
508
509 return rc;
510}
511
512static void sriov_release(struct pci_dev *dev)
513{
514 BUG_ON(dev->sriov->nr_virtfn);
515
516 if (dev == dev->sriov->dev)
517 mutex_destroy(&dev->sriov->lock);
518 else
519 pci_dev_put(dev->sriov->dev);
520
521 kfree(dev->sriov);
522 dev->sriov = NULL;
523}
524
525static void sriov_restore_state(struct pci_dev *dev)
526{
527 int i;
528 u16 ctrl;
529 struct pci_sriov *iov = dev->sriov;
530
531 pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
532 if (ctrl & PCI_SRIOV_CTRL_VFE)
533 return;
534
535 for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
536 pci_update_resource(dev, i);
537
538 pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
539 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
540 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
541 if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
542 msleep(100);
543}
544
545/**
546 * pci_iov_init - initialize the IOV capability
547 * @dev: the PCI device
548 *
549 * Returns 0 on success, or negative on failure.
550 */
551int pci_iov_init(struct pci_dev *dev)
552{
553 int pos;
554
555 if (!dev->is_pcie)
556 return -ENODEV;
557
558 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
559 if (pos)
560 return sriov_init(dev, pos);
561
562 return -ENODEV;
563}
564
565/**
566 * pci_iov_release - release resources used by the IOV capability
567 * @dev: the PCI device
568 */
569void pci_iov_release(struct pci_dev *dev)
570{
571 if (dev->is_physfn)
572 sriov_release(dev);
573}
574
575/**
576 * pci_iov_resource_bar - get position of the SR-IOV BAR
577 * @dev: the PCI device
578 * @resno: the resource number
579 * @type: the BAR type to be filled in
580 *
581 * Returns position of the BAR encapsulated in the SR-IOV capability.
582 */
583int pci_iov_resource_bar(struct pci_dev *dev, int resno,
584 enum pci_bar_type *type)
585{
586 if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
587 return 0;
588
589 BUG_ON(!dev->is_physfn);
590
591 *type = pci_bar_unknown;
592
593 return dev->sriov->pos + PCI_SRIOV_BAR +
594 4 * (resno - PCI_IOV_RESOURCES);
595}
596
597/**
598 * pci_restore_iov_state - restore the state of the IOV capability
599 * @dev: the PCI device
600 */
601void pci_restore_iov_state(struct pci_dev *dev)
602{
603 if (dev->is_physfn)
604 sriov_restore_state(dev);
605}
606
607/**
608 * pci_iov_bus_range - find bus range used by Virtual Function
609 * @bus: the PCI bus
610 *
611 * Returns max number of buses (exclude current one) used by Virtual
612 * Functions.
613 */
614int pci_iov_bus_range(struct pci_bus *bus)
615{
616 int max = 0;
617 u8 busnr;
618 struct pci_dev *dev;
619
620 list_for_each_entry(dev, &bus->devices, bus_list) {
621 if (!dev->is_physfn)
622 continue;
623 busnr = virtfn_bus(dev, dev->sriov->total - 1);
624 if (busnr > max)
625 max = busnr;
626 }
627
628 return max ? max - bus->number : 0;
629}
630
631/**
632 * pci_enable_sriov - enable the SR-IOV capability
633 * @dev: the PCI device
634 *
635 * Returns 0 on success, or negative on failure.
636 */
637int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
638{
639 might_sleep();
640
641 if (!dev->is_physfn)
642 return -ENODEV;
643
644 return sriov_enable(dev, nr_virtfn);
645}
646EXPORT_SYMBOL_GPL(pci_enable_sriov);
647
648/**
649 * pci_disable_sriov - disable the SR-IOV capability
650 * @dev: the PCI device
651 */
652void pci_disable_sriov(struct pci_dev *dev)
653{
654 might_sleep();
655
656 if (!dev->is_physfn)
657 return;
658
659 sriov_disable(dev);
660}
661EXPORT_SYMBOL_GPL(pci_disable_sriov);
662
663/**
664 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
665 * @dev: the PCI device
666 *
667 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
668 *
669 * Physical Function driver is responsible to register IRQ handler using
670 * VF Migration Interrupt Message Number, and call this function when the
671 * interrupt is generated by the hardware.
672 */
673irqreturn_t pci_sriov_migration(struct pci_dev *dev)
674{
675 if (!dev->is_physfn)
676 return IRQ_NONE;
677
678 return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
679}
680EXPORT_SYMBOL_GPL(pci_sriov_migration);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index baba2eb5367d..6f2e6295e773 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -27,48 +27,53 @@ static int pci_msi_enable = 1;
27 27
28/* Arch hooks */ 28/* Arch hooks */
29 29
30int __attribute__ ((weak)) 30#ifndef arch_msi_check_device
31arch_msi_check_device(struct pci_dev *dev, int nvec, int type) 31int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
32{ 32{
33 return 0; 33 return 0;
34} 34}
35#endif
35 36
36int __attribute__ ((weak)) 37#ifndef arch_setup_msi_irqs
37arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) 38int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
38{
39 return 0;
40}
41
42int __attribute__ ((weak))
43arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44{ 39{
45 struct msi_desc *entry; 40 struct msi_desc *entry;
46 int ret; 41 int ret;
47 42
43 /*
44 * If an architecture wants to support multiple MSI, it needs to
45 * override arch_setup_msi_irqs()
46 */
47 if (type == PCI_CAP_ID_MSI && nvec > 1)
48 return 1;
49
48 list_for_each_entry(entry, &dev->msi_list, list) { 50 list_for_each_entry(entry, &dev->msi_list, list) {
49 ret = arch_setup_msi_irq(dev, entry); 51 ret = arch_setup_msi_irq(dev, entry);
50 if (ret) 52 if (ret < 0)
51 return ret; 53 return ret;
54 if (ret > 0)
55 return -ENOSPC;
52 } 56 }
53 57
54 return 0; 58 return 0;
55} 59}
60#endif
56 61
57void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) 62#ifndef arch_teardown_msi_irqs
58{ 63void arch_teardown_msi_irqs(struct pci_dev *dev)
59 return;
60}
61
62void __attribute__ ((weak))
63arch_teardown_msi_irqs(struct pci_dev *dev)
64{ 64{
65 struct msi_desc *entry; 65 struct msi_desc *entry;
66 66
67 list_for_each_entry(entry, &dev->msi_list, list) { 67 list_for_each_entry(entry, &dev->msi_list, list) {
68 if (entry->irq != 0) 68 int i, nvec;
69 arch_teardown_msi_irq(entry->irq); 69 if (entry->irq == 0)
70 continue;
71 nvec = 1 << entry->msi_attrib.multiple;
72 for (i = 0; i < nvec; i++)
73 arch_teardown_msi_irq(entry->irq + i);
70 } 74 }
71} 75}
76#endif
72 77
73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) 78static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{ 79{
@@ -111,27 +116,14 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
111 return (1 << (1 << x)) - 1; 116 return (1 << (1 << x)) - 1;
112} 117}
113 118
114static void msix_flush_writes(struct irq_desc *desc) 119static inline __attribute_const__ u32 msi_capable_mask(u16 control)
115{ 120{
116 struct msi_desc *entry; 121 return msi_mask((control >> 1) & 7);
122}
117 123
118 entry = get_irq_desc_msi(desc); 124static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
119 BUG_ON(!entry || !entry->dev); 125{
120 switch (entry->msi_attrib.type) { 126 return msi_mask((control >> 4) & 7);
121 case PCI_CAP_ID_MSI:
122 /* nothing to do */
123 break;
124 case PCI_CAP_ID_MSIX:
125 {
126 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
127 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
128 readl(entry->mask_base + offset);
129 break;
130 }
131 default:
132 BUG();
133 break;
134 }
135} 127}
136 128
137/* 129/*
@@ -143,49 +135,71 @@ static void msix_flush_writes(struct irq_desc *desc)
143 * Returns 1 if it succeeded in masking the interrupt and 0 if the device 135 * Returns 1 if it succeeded in masking the interrupt and 0 if the device
144 * doesn't support MSI masking. 136 * doesn't support MSI masking.
145 */ 137 */
146static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) 138static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
147{ 139{
148 struct msi_desc *entry; 140 u32 mask_bits = desc->masked;
149 141
150 entry = get_irq_desc_msi(desc); 142 if (!desc->msi_attrib.maskbit)
151 BUG_ON(!entry || !entry->dev); 143 return;
152 switch (entry->msi_attrib.type) { 144
153 case PCI_CAP_ID_MSI: 145 mask_bits &= ~mask;
154 if (entry->msi_attrib.maskbit) { 146 mask_bits |= flag;
155 int pos; 147 pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
156 u32 mask_bits; 148 desc->masked = mask_bits;
157 149}
158 pos = (long)entry->mask_base; 150
159 pci_read_config_dword(entry->dev, pos, &mask_bits); 151/*
160 mask_bits &= ~(mask); 152 * This internal function does not flush PCI writes to the device.
161 mask_bits |= flag & mask; 153 * All users must ensure that they read from the device before either
162 pci_write_config_dword(entry->dev, pos, mask_bits); 154 * assuming that the device state is up to date, or returning out of this
163 } else { 155 * file. This saves a few milliseconds when initialising devices with lots
164 return 0; 156 * of MSI-X interrupts.
165 } 157 */
166 break; 158static void msix_mask_irq(struct msi_desc *desc, u32 flag)
167 case PCI_CAP_ID_MSIX: 159{
168 { 160 u32 mask_bits = desc->masked;
169 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 161 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
170 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 162 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
171 writel(flag, entry->mask_base + offset); 163 mask_bits &= ~1;
172 readl(entry->mask_base + offset); 164 mask_bits |= flag;
173 break; 165 writel(mask_bits, desc->mask_base + offset);
174 } 166 desc->masked = mask_bits;
175 default: 167}
176 BUG(); 168
177 break; 169static void msi_set_mask_bit(unsigned irq, u32 flag)
170{
171 struct msi_desc *desc = get_irq_msi(irq);
172
173 if (desc->msi_attrib.is_msix) {
174 msix_mask_irq(desc, flag);
175 readl(desc->mask_base); /* Flush write to device */
176 } else {
177 unsigned offset = irq - desc->dev->irq;
178 msi_mask_irq(desc, 1 << offset, flag << offset);
178 } 179 }
179 entry->msi_attrib.masked = !!flag; 180}
180 return 1; 181
182void mask_msi_irq(unsigned int irq)
183{
184 msi_set_mask_bit(irq, 1);
185}
186
187void unmask_msi_irq(unsigned int irq)
188{
189 msi_set_mask_bit(irq, 0);
181} 190}
182 191
183void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 192void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
184{ 193{
185 struct msi_desc *entry = get_irq_desc_msi(desc); 194 struct msi_desc *entry = get_irq_desc_msi(desc);
186 switch(entry->msi_attrib.type) { 195 if (entry->msi_attrib.is_msix) {
187 case PCI_CAP_ID_MSI: 196 void __iomem *base = entry->mask_base +
188 { 197 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
198
199 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
200 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
201 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
202 } else {
189 struct pci_dev *dev = entry->dev; 203 struct pci_dev *dev = entry->dev;
190 int pos = entry->msi_attrib.pos; 204 int pos = entry->msi_attrib.pos;
191 u16 data; 205 u16 data;
@@ -201,21 +215,6 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
201 pci_read_config_word(dev, msi_data_reg(pos, 0), &data); 215 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
202 } 216 }
203 msg->data = data; 217 msg->data = data;
204 break;
205 }
206 case PCI_CAP_ID_MSIX:
207 {
208 void __iomem *base;
209 base = entry->mask_base +
210 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
211
212 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
213 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
214 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
215 break;
216 }
217 default:
218 BUG();
219 } 218 }
220} 219}
221 220
@@ -229,11 +228,25 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
229void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 228void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
230{ 229{
231 struct msi_desc *entry = get_irq_desc_msi(desc); 230 struct msi_desc *entry = get_irq_desc_msi(desc);
232 switch (entry->msi_attrib.type) { 231 if (entry->msi_attrib.is_msix) {
233 case PCI_CAP_ID_MSI: 232 void __iomem *base;
234 { 233 base = entry->mask_base +
234 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
235
236 writel(msg->address_lo,
237 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
238 writel(msg->address_hi,
239 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
240 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
241 } else {
235 struct pci_dev *dev = entry->dev; 242 struct pci_dev *dev = entry->dev;
236 int pos = entry->msi_attrib.pos; 243 int pos = entry->msi_attrib.pos;
244 u16 msgctl;
245
246 pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
247 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
248 msgctl |= entry->msi_attrib.multiple << 4;
249 pci_write_config_word(dev, msi_control_reg(pos), msgctl);
237 250
238 pci_write_config_dword(dev, msi_lower_address_reg(pos), 251 pci_write_config_dword(dev, msi_lower_address_reg(pos),
239 msg->address_lo); 252 msg->address_lo);
@@ -246,23 +259,6 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
246 pci_write_config_word(dev, msi_data_reg(pos, 0), 259 pci_write_config_word(dev, msi_data_reg(pos, 0),
247 msg->data); 260 msg->data);
248 } 261 }
249 break;
250 }
251 case PCI_CAP_ID_MSIX:
252 {
253 void __iomem *base;
254 base = entry->mask_base +
255 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
256
257 writel(msg->address_lo,
258 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
259 writel(msg->address_hi,
260 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
261 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
262 break;
263 }
264 default:
265 BUG();
266 } 262 }
267 entry->msg = *msg; 263 entry->msg = *msg;
268} 264}
@@ -274,37 +270,18 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
274 write_msi_msg_desc(desc, msg); 270 write_msi_msg_desc(desc, msg);
275} 271}
276 272
277void mask_msi_irq(unsigned int irq)
278{
279 struct irq_desc *desc = irq_to_desc(irq);
280
281 msi_set_mask_bits(desc, 1, 1);
282 msix_flush_writes(desc);
283}
284
285void unmask_msi_irq(unsigned int irq)
286{
287 struct irq_desc *desc = irq_to_desc(irq);
288
289 msi_set_mask_bits(desc, 1, 0);
290 msix_flush_writes(desc);
291}
292
293static int msi_free_irqs(struct pci_dev* dev); 273static int msi_free_irqs(struct pci_dev* dev);
294 274
295static struct msi_desc* alloc_msi_entry(void) 275static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
296{ 276{
297 struct msi_desc *entry; 277 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
298 278 if (!desc)
299 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
300 if (!entry)
301 return NULL; 279 return NULL;
302 280
303 INIT_LIST_HEAD(&entry->list); 281 INIT_LIST_HEAD(&desc->list);
304 entry->irq = 0; 282 desc->dev = dev;
305 entry->dev = NULL;
306 283
307 return entry; 284 return desc;
308} 285}
309 286
310static void pci_intx_for_msi(struct pci_dev *dev, int enable) 287static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -328,15 +305,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
328 pci_intx_for_msi(dev, 0); 305 pci_intx_for_msi(dev, 0);
329 msi_set_enable(dev, 0); 306 msi_set_enable(dev, 0);
330 write_msi_msg(dev->irq, &entry->msg); 307 write_msi_msg(dev->irq, &entry->msg);
331 if (entry->msi_attrib.maskbit) {
332 struct irq_desc *desc = irq_to_desc(dev->irq);
333 msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
334 entry->msi_attrib.masked);
335 }
336 308
337 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 309 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
310 msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
338 control &= ~PCI_MSI_FLAGS_QSIZE; 311 control &= ~PCI_MSI_FLAGS_QSIZE;
339 control |= PCI_MSI_FLAGS_ENABLE; 312 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
340 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 313 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
341} 314}
342 315
@@ -354,9 +327,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
354 msix_set_enable(dev, 0); 327 msix_set_enable(dev, 0);
355 328
356 list_for_each_entry(entry, &dev->msi_list, list) { 329 list_for_each_entry(entry, &dev->msi_list, list) {
357 struct irq_desc *desc = irq_to_desc(entry->irq);
358 write_msi_msg(entry->irq, &entry->msg); 330 write_msi_msg(entry->irq, &entry->msg);
359 msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); 331 msix_mask_irq(entry, entry->masked);
360 } 332 }
361 333
362 BUG_ON(list_empty(&dev->msi_list)); 334 BUG_ON(list_empty(&dev->msi_list));
@@ -378,52 +350,48 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
378/** 350/**
379 * msi_capability_init - configure device's MSI capability structure 351 * msi_capability_init - configure device's MSI capability structure
380 * @dev: pointer to the pci_dev data structure of MSI device function 352 * @dev: pointer to the pci_dev data structure of MSI device function
353 * @nvec: number of interrupts to allocate
381 * 354 *
382 * Setup the MSI capability structure of device function with a single 355 * Setup the MSI capability structure of the device with the requested
383 * MSI irq, regardless of device function is capable of handling 356 * number of interrupts. A return value of zero indicates the successful
384 * multiple messages. A return of zero indicates the successful setup 357 * setup of an entry with the new MSI irq. A negative return value indicates
385 * of an entry zero with the new MSI irq or non-zero for otherwise. 358 * an error, and a positive return value indicates the number of interrupts
386 **/ 359 * which could have been allocated.
387static int msi_capability_init(struct pci_dev *dev) 360 */
361static int msi_capability_init(struct pci_dev *dev, int nvec)
388{ 362{
389 struct msi_desc *entry; 363 struct msi_desc *entry;
390 int pos, ret; 364 int pos, ret;
391 u16 control; 365 u16 control;
366 unsigned mask;
392 367
393 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ 368 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
394 369
395 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
396 pci_read_config_word(dev, msi_control_reg(pos), &control); 371 pci_read_config_word(dev, msi_control_reg(pos), &control);
397 /* MSI Entry Initialization */ 372 /* MSI Entry Initialization */
398 entry = alloc_msi_entry(); 373 entry = alloc_msi_entry(dev);
399 if (!entry) 374 if (!entry)
400 return -ENOMEM; 375 return -ENOMEM;
401 376
402 entry->msi_attrib.type = PCI_CAP_ID_MSI; 377 entry->msi_attrib.is_msix = 0;
403 entry->msi_attrib.is_64 = is_64bit_address(control); 378 entry->msi_attrib.is_64 = is_64bit_address(control);
404 entry->msi_attrib.entry_nr = 0; 379 entry->msi_attrib.entry_nr = 0;
405 entry->msi_attrib.maskbit = is_mask_bit_support(control); 380 entry->msi_attrib.maskbit = is_mask_bit_support(control);
406 entry->msi_attrib.masked = 1;
407 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 381 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
408 entry->msi_attrib.pos = pos; 382 entry->msi_attrib.pos = pos;
409 entry->dev = dev; 383
410 if (entry->msi_attrib.maskbit) { 384 entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
411 unsigned int base, maskbits, temp; 385 /* All MSIs are unmasked by default, Mask them all */
412 386 if (entry->msi_attrib.maskbit)
413 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); 387 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
414 entry->mask_base = (void __iomem *)(long)base; 388 mask = msi_capable_mask(control);
415 389 msi_mask_irq(entry, mask, mask);
416 /* All MSIs are unmasked by default, Mask them all */ 390
417 pci_read_config_dword(dev, base, &maskbits);
418 temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
419 maskbits |= temp;
420 pci_write_config_dword(dev, base, maskbits);
421 entry->msi_attrib.maskbits_mask = temp;
422 }
423 list_add_tail(&entry->list, &dev->msi_list); 391 list_add_tail(&entry->list, &dev->msi_list);
424 392
425 /* Configure MSI capability structure */ 393 /* Configure MSI capability structure */
426 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); 394 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
427 if (ret) { 395 if (ret) {
428 msi_free_irqs(dev); 396 msi_free_irqs(dev);
429 return ret; 397 return ret;
@@ -476,26 +444,28 @@ static int msix_capability_init(struct pci_dev *dev,
476 444
477 /* MSI-X Table Initialization */ 445 /* MSI-X Table Initialization */
478 for (i = 0; i < nvec; i++) { 446 for (i = 0; i < nvec; i++) {
479 entry = alloc_msi_entry(); 447 entry = alloc_msi_entry(dev);
480 if (!entry) 448 if (!entry)
481 break; 449 break;
482 450
483 j = entries[i].entry; 451 j = entries[i].entry;
484 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 452 entry->msi_attrib.is_msix = 1;
485 entry->msi_attrib.is_64 = 1; 453 entry->msi_attrib.is_64 = 1;
486 entry->msi_attrib.entry_nr = j; 454 entry->msi_attrib.entry_nr = j;
487 entry->msi_attrib.maskbit = 1;
488 entry->msi_attrib.masked = 1;
489 entry->msi_attrib.default_irq = dev->irq; 455 entry->msi_attrib.default_irq = dev->irq;
490 entry->msi_attrib.pos = pos; 456 entry->msi_attrib.pos = pos;
491 entry->dev = dev;
492 entry->mask_base = base; 457 entry->mask_base = base;
458 entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
459 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
460 msix_mask_irq(entry, 1);
493 461
494 list_add_tail(&entry->list, &dev->msi_list); 462 list_add_tail(&entry->list, &dev->msi_list);
495 } 463 }
496 464
497 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 465 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
498 if (ret) { 466 if (ret < 0) {
467 /* If we had some success report the number of irqs
468 * we succeeded in setting up. */
499 int avail = 0; 469 int avail = 0;
500 list_for_each_entry(entry, &dev->msi_list, list) { 470 list_for_each_entry(entry, &dev->msi_list, list) {
501 if (entry->irq != 0) { 471 if (entry->irq != 0) {
@@ -503,14 +473,13 @@ static int msix_capability_init(struct pci_dev *dev,
503 } 473 }
504 } 474 }
505 475
506 msi_free_irqs(dev); 476 if (avail != 0)
477 ret = avail;
478 }
507 479
508 /* If we had some success report the number of irqs 480 if (ret) {
509 * we succeeded in setting up. 481 msi_free_irqs(dev);
510 */ 482 return ret;
511 if (avail == 0)
512 avail = ret;
513 return avail;
514 } 483 }
515 484
516 i = 0; 485 i = 0;
@@ -575,39 +544,54 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
575} 544}
576 545
577/** 546/**
578 * pci_enable_msi - configure device's MSI capability structure 547 * pci_enable_msi_block - configure device's MSI capability structure
579 * @dev: pointer to the pci_dev data structure of MSI device function 548 * @dev: device to configure
549 * @nvec: number of interrupts to configure
580 * 550 *
581 * Setup the MSI capability structure of device function with 551 * Allocate IRQs for a device with the MSI capability.
582 * a single MSI irq upon its software driver call to request for 552 * This function returns a negative errno if an error occurs. If it
583 * MSI mode enabled on its hardware device function. A return of zero 553 * is unable to allocate the number of interrupts requested, it returns
584 * indicates the successful setup of an entry zero with the new MSI 554 * the number of interrupts it might be able to allocate. If it successfully
585 * irq or non-zero for otherwise. 555 * allocates at least the number of interrupts requested, it returns 0 and
586 **/ 556 * updates the @dev's irq member to the lowest new interrupt number; the
587int pci_enable_msi(struct pci_dev* dev) 557 * other interrupt numbers allocated to this device are consecutive.
558 */
559int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
588{ 560{
589 int status; 561 int status, pos, maxvec;
562 u16 msgctl;
590 563
591 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); 564 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
565 if (!pos)
566 return -EINVAL;
567 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
568 maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
569 if (nvec > maxvec)
570 return maxvec;
571
572 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
592 if (status) 573 if (status)
593 return status; 574 return status;
594 575
595 WARN_ON(!!dev->msi_enabled); 576 WARN_ON(!!dev->msi_enabled);
596 577
597 /* Check whether driver already requested for MSI-X irqs */ 578 /* Check whether driver already requested MSI-X irqs */
598 if (dev->msix_enabled) { 579 if (dev->msix_enabled) {
599 dev_info(&dev->dev, "can't enable MSI " 580 dev_info(&dev->dev, "can't enable MSI "
600 "(MSI-X already enabled)\n"); 581 "(MSI-X already enabled)\n");
601 return -EINVAL; 582 return -EINVAL;
602 } 583 }
603 status = msi_capability_init(dev); 584
585 status = msi_capability_init(dev, nvec);
604 return status; 586 return status;
605} 587}
606EXPORT_SYMBOL(pci_enable_msi); 588EXPORT_SYMBOL(pci_enable_msi_block);
607 589
608void pci_msi_shutdown(struct pci_dev* dev) 590void pci_msi_shutdown(struct pci_dev *dev)
609{ 591{
610 struct msi_desc *entry; 592 struct msi_desc *desc;
593 u32 mask;
594 u16 ctrl;
611 595
612 if (!pci_msi_enable || !dev || !dev->msi_enabled) 596 if (!pci_msi_enable || !dev || !dev->msi_enabled)
613 return; 597 return;
@@ -617,19 +601,15 @@ void pci_msi_shutdown(struct pci_dev* dev)
617 dev->msi_enabled = 0; 601 dev->msi_enabled = 0;
618 602
619 BUG_ON(list_empty(&dev->msi_list)); 603 BUG_ON(list_empty(&dev->msi_list));
620 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 604 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
621 /* Return the the pci reset with msi irqs unmasked */ 605 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
622 if (entry->msi_attrib.maskbit) { 606 mask = msi_capable_mask(ctrl);
623 u32 mask = entry->msi_attrib.maskbits_mask; 607 msi_mask_irq(desc, mask, ~mask);
624 struct irq_desc *desc = irq_to_desc(dev->irq);
625 msi_set_mask_bits(desc, mask, ~mask);
626 }
627 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
628 return;
629 608
630 /* Restore dev->irq to its default pin-assertion irq */ 609 /* Restore dev->irq to its default pin-assertion irq */
631 dev->irq = entry->msi_attrib.default_irq; 610 dev->irq = desc->msi_attrib.default_irq;
632} 611}
612
633void pci_disable_msi(struct pci_dev* dev) 613void pci_disable_msi(struct pci_dev* dev)
634{ 614{
635 struct msi_desc *entry; 615 struct msi_desc *entry;
@@ -640,7 +620,7 @@ void pci_disable_msi(struct pci_dev* dev)
640 pci_msi_shutdown(dev); 620 pci_msi_shutdown(dev);
641 621
642 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 622 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
643 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) 623 if (entry->msi_attrib.is_msix)
644 return; 624 return;
645 625
646 msi_free_irqs(dev); 626 msi_free_irqs(dev);
@@ -652,14 +632,18 @@ static int msi_free_irqs(struct pci_dev* dev)
652 struct msi_desc *entry, *tmp; 632 struct msi_desc *entry, *tmp;
653 633
654 list_for_each_entry(entry, &dev->msi_list, list) { 634 list_for_each_entry(entry, &dev->msi_list, list) {
655 if (entry->irq) 635 int i, nvec;
656 BUG_ON(irq_has_action(entry->irq)); 636 if (!entry->irq)
637 continue;
638 nvec = 1 << entry->msi_attrib.multiple;
639 for (i = 0; i < nvec; i++)
640 BUG_ON(irq_has_action(entry->irq + i));
657 } 641 }
658 642
659 arch_teardown_msi_irqs(dev); 643 arch_teardown_msi_irqs(dev);
660 644
661 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 645 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
662 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { 646 if (entry->msi_attrib.is_msix) {
663 writel(1, entry->mask_base + entry->msi_attrib.entry_nr 647 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
664 * PCI_MSIX_ENTRY_SIZE 648 * PCI_MSIX_ENTRY_SIZE
665 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 649 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
@@ -675,6 +659,23 @@ static int msi_free_irqs(struct pci_dev* dev)
675} 659}
676 660
677/** 661/**
662 * pci_msix_table_size - return the number of device's MSI-X table entries
663 * @dev: pointer to the pci_dev data structure of MSI-X device function
664 */
665int pci_msix_table_size(struct pci_dev *dev)
666{
667 int pos;
668 u16 control;
669
670 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
671 if (!pos)
672 return 0;
673
674 pci_read_config_word(dev, msi_control_reg(pos), &control);
675 return multi_msix_capable(control);
676}
677
678/**
678 * pci_enable_msix - configure device's MSI-X capability structure 679 * pci_enable_msix - configure device's MSI-X capability structure
679 * @dev: pointer to the pci_dev data structure of MSI-X device function 680 * @dev: pointer to the pci_dev data structure of MSI-X device function
680 * @entries: pointer to an array of MSI-X entries 681 * @entries: pointer to an array of MSI-X entries
@@ -691,9 +692,8 @@ static int msi_free_irqs(struct pci_dev* dev)
691 **/ 692 **/
692int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 693int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
693{ 694{
694 int status, pos, nr_entries; 695 int status, nr_entries;
695 int i, j; 696 int i, j;
696 u16 control;
697 697
698 if (!entries) 698 if (!entries)
699 return -EINVAL; 699 return -EINVAL;
@@ -702,9 +702,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
702 if (status) 702 if (status)
703 return status; 703 return status;
704 704
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 705 nr_entries = pci_msix_table_size(dev);
706 pci_read_config_word(dev, msi_control_reg(pos), &control);
707 nr_entries = multi_msix_capable(control);
708 if (nvec > nr_entries) 706 if (nvec > nr_entries)
709 return -EINVAL; 707 return -EINVAL;
710 708
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 3898f5237144..71f4df2ef654 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -20,14 +20,8 @@
20#define msi_mask_bits_reg(base, is64bit) \ 20#define msi_mask_bits_reg(base, is64bit) \
21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) 21 ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE 22#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
23#define multi_msi_capable(control) \
24 (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
25#define multi_msi_enable(control, num) \
26 control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
27#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 23#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
28#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 24#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
29#define msi_enable(control, num) multi_msi_enable(control, num); \
30 control |= PCI_MSI_FLAGS_ENABLE
31 25
32#define msix_table_offset_reg(base) (base + 0x04) 26#define msix_table_offset_reg(base) (base + 0x04)
33#define msix_pba_offset_reg(base) (base + 0x08) 27#define msix_pba_offset_reg(base) (base + 0x08)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 368ca72dffbc..ea15b0537457 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,221 +18,6 @@
18#include <linux/pci-acpi.h> 18#include <linux/pci-acpi.h>
19#include "pci.h" 19#include "pci.h"
20 20
21struct acpi_osc_data {
22 acpi_handle handle;
23 u32 support_set;
24 u32 control_set;
25 u32 control_query;
26 int is_queried;
27 struct list_head sibiling;
28};
29static LIST_HEAD(acpi_osc_data_list);
30
31struct acpi_osc_args {
32 u32 capbuf[3];
33};
34
35static DEFINE_MUTEX(pci_acpi_lock);
36
37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
38{
39 struct acpi_osc_data *data;
40
41 list_for_each_entry(data, &acpi_osc_data_list, sibiling) {
42 if (data->handle == handle)
43 return data;
44 }
45 data = kzalloc(sizeof(*data), GFP_KERNEL);
46 if (!data)
47 return NULL;
48 INIT_LIST_HEAD(&data->sibiling);
49 data->handle = handle;
50 list_add_tail(&data->sibiling, &acpi_osc_data_list);
51 return data;
52}
53
54static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
55 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
56
57static acpi_status acpi_run_osc(acpi_handle handle,
58 struct acpi_osc_args *osc_args, u32 *retval)
59{
60 acpi_status status;
61 struct acpi_object_list input;
62 union acpi_object in_params[4];
63 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
64 union acpi_object *out_obj;
65 u32 errors, flags = osc_args->capbuf[OSC_QUERY_TYPE];
66
67 /* Setting up input parameters */
68 input.count = 4;
69 input.pointer = in_params;
70 in_params[0].type = ACPI_TYPE_BUFFER;
71 in_params[0].buffer.length = 16;
72 in_params[0].buffer.pointer = OSC_UUID;
73 in_params[1].type = ACPI_TYPE_INTEGER;
74 in_params[1].integer.value = 1;
75 in_params[2].type = ACPI_TYPE_INTEGER;
76 in_params[2].integer.value = 3;
77 in_params[3].type = ACPI_TYPE_BUFFER;
78 in_params[3].buffer.length = 12;
79 in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
80
81 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
82 if (ACPI_FAILURE(status))
83 return status;
84
85 if (!output.length)
86 return AE_NULL_OBJECT;
87
88 out_obj = output.pointer;
89 if (out_obj->type != ACPI_TYPE_BUFFER) {
90 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
91 status = AE_TYPE;
92 goto out_kfree;
93 }
94 /* Need to ignore the bit0 in result code */
95 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
96 if (errors) {
97 if (errors & OSC_REQUEST_ERROR)
98 printk(KERN_DEBUG "_OSC request fails\n");
99 if (errors & OSC_INVALID_UUID_ERROR)
100 printk(KERN_DEBUG "_OSC invalid UUID\n");
101 if (errors & OSC_INVALID_REVISION_ERROR)
102 printk(KERN_DEBUG "_OSC invalid revision\n");
103 if (errors & OSC_CAPABILITIES_MASK_ERROR) {
104 if (flags & OSC_QUERY_ENABLE)
105 goto out_success;
106 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
107 status = AE_SUPPORT;
108 goto out_kfree;
109 }
110 status = AE_ERROR;
111 goto out_kfree;
112 }
113out_success:
114 *retval = *((u32 *)(out_obj->buffer.pointer + 8));
115 status = AE_OK;
116
117out_kfree:
118 kfree(output.pointer);
119 return status;
120}
121
122static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
123{
124 acpi_status status;
125 u32 support_set, result;
126 struct acpi_osc_args osc_args;
127
128 /* do _OSC query for all possible controls */
129 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
130 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
131 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
132 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
133
134 status = acpi_run_osc(osc_data->handle, &osc_args, &result);
135 if (ACPI_SUCCESS(status)) {
136 osc_data->support_set = support_set;
137 osc_data->control_query = result;
138 osc_data->is_queried = 1;
139 }
140
141 return status;
142}
143
144/*
145 * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
146 * @flags: Bitmask of flags to support
147 *
148 * See the ACPI spec for the definition of the flags
149 */
150int pci_acpi_osc_support(acpi_handle handle, u32 flags)
151{
152 acpi_status status;
153 acpi_handle tmp;
154 struct acpi_osc_data *osc_data;
155 int rc = 0;
156
157 status = acpi_get_handle(handle, "_OSC", &tmp);
158 if (ACPI_FAILURE(status))
159 return -ENOTTY;
160
161 mutex_lock(&pci_acpi_lock);
162 osc_data = acpi_get_osc_data(handle);
163 if (!osc_data) {
164 printk(KERN_ERR "acpi osc data array is full\n");
165 rc = -ENOMEM;
166 goto out;
167 }
168
169 __acpi_query_osc(flags, osc_data);
170out:
171 mutex_unlock(&pci_acpi_lock);
172 return rc;
173}
174
175/**
176 * pci_osc_control_set - commit requested control to Firmware
177 * @handle: acpi_handle for the target ACPI object
178 * @flags: driver's requested control bits
179 *
180 * Attempt to take control from Firmware on requested control bits.
181 **/
182acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
183{
184 acpi_status status;
185 u32 control_req, control_set, result;
186 acpi_handle tmp;
187 struct acpi_osc_data *osc_data;
188 struct acpi_osc_args osc_args;
189
190 status = acpi_get_handle(handle, "_OSC", &tmp);
191 if (ACPI_FAILURE(status))
192 return status;
193
194 mutex_lock(&pci_acpi_lock);
195 osc_data = acpi_get_osc_data(handle);
196 if (!osc_data) {
197 printk(KERN_ERR "acpi osc data array is full\n");
198 status = AE_ERROR;
199 goto out;
200 }
201
202 control_req = (flags & OSC_CONTROL_MASKS);
203 if (!control_req) {
204 status = AE_TYPE;
205 goto out;
206 }
207
208 /* No need to evaluate _OSC if the control was already granted. */
209 if ((osc_data->control_set & control_req) == control_req)
210 goto out;
211
212 if (!osc_data->is_queried) {
213 status = __acpi_query_osc(osc_data->support_set, osc_data);
214 if (ACPI_FAILURE(status))
215 goto out;
216 }
217
218 if ((osc_data->control_query & control_req) != control_req) {
219 status = AE_SUPPORT;
220 goto out;
221 }
222
223 control_set = osc_data->control_set | control_req;
224 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
225 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
226 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
227 status = acpi_run_osc(handle, &osc_args, &result);
228 if (ACPI_SUCCESS(status))
229 osc_data->control_set = result;
230out:
231 mutex_unlock(&pci_acpi_lock);
232 return status;
233}
234EXPORT_SYMBOL(pci_osc_control_set);
235
236/* 21/*
237 * _SxD returns the D-state with the highest power 22 * _SxD returns the D-state with the highest power
238 * (lowest D-state number) supported in the S-state "x". 23 * (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 93eac1423585..c0cbbb5a245e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -99,6 +99,52 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
99} 99}
100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 100static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
101 101
102/**
103 * store_remove_id - remove a PCI device ID from this driver
104 * @driver: target device driver
105 * @buf: buffer for scanning device ID data
106 * @count: input size
107 *
108 * Removes a dynamic pci device ID to this driver.
109 */
110static ssize_t
111store_remove_id(struct device_driver *driver, const char *buf, size_t count)
112{
113 struct pci_dynid *dynid, *n;
114 struct pci_driver *pdrv = to_pci_driver(driver);
115 __u32 vendor, device, subvendor = PCI_ANY_ID,
116 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
117 int fields = 0;
118 int retval = -ENODEV;
119
120 fields = sscanf(buf, "%x %x %x %x %x %x",
121 &vendor, &device, &subvendor, &subdevice,
122 &class, &class_mask);
123 if (fields < 2)
124 return -EINVAL;
125
126 spin_lock(&pdrv->dynids.lock);
127 list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
128 struct pci_device_id *id = &dynid->id;
129 if ((id->vendor == vendor) &&
130 (id->device == device) &&
131 (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
132 (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
133 !((id->class ^ class) & class_mask)) {
134 list_del(&dynid->node);
135 kfree(dynid);
136 retval = 0;
137 break;
138 }
139 }
140 spin_unlock(&pdrv->dynids.lock);
141
142 if (retval)
143 return retval;
144 return count;
145}
146static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
147
102static void 148static void
103pci_free_dynids(struct pci_driver *drv) 149pci_free_dynids(struct pci_driver *drv)
104{ 150{
@@ -125,6 +171,20 @@ static void pci_remove_newid_file(struct pci_driver *drv)
125{ 171{
126 driver_remove_file(&drv->driver, &driver_attr_new_id); 172 driver_remove_file(&drv->driver, &driver_attr_new_id);
127} 173}
174
175static int
176pci_create_removeid_file(struct pci_driver *drv)
177{
178 int error = 0;
179 if (drv->probe != NULL)
180 error = driver_create_file(&drv->driver,&driver_attr_remove_id);
181 return error;
182}
183
184static void pci_remove_removeid_file(struct pci_driver *drv)
185{
186 driver_remove_file(&drv->driver, &driver_attr_remove_id);
187}
128#else /* !CONFIG_HOTPLUG */ 188#else /* !CONFIG_HOTPLUG */
129static inline void pci_free_dynids(struct pci_driver *drv) {} 189static inline void pci_free_dynids(struct pci_driver *drv) {}
130static inline int pci_create_newid_file(struct pci_driver *drv) 190static inline int pci_create_newid_file(struct pci_driver *drv)
@@ -132,6 +192,11 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
132 return 0; 192 return 0;
133} 193}
134static inline void pci_remove_newid_file(struct pci_driver *drv) {} 194static inline void pci_remove_newid_file(struct pci_driver *drv) {}
195static inline int pci_create_removeid_file(struct pci_driver *drv)
196{
197 return 0;
198}
199static inline void pci_remove_removeid_file(struct pci_driver *drv) {}
135#endif 200#endif
136 201
137/** 202/**
@@ -352,53 +417,60 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
352{ 417{
353 struct pci_dev * pci_dev = to_pci_dev(dev); 418 struct pci_dev * pci_dev = to_pci_dev(dev);
354 struct pci_driver * drv = pci_dev->driver; 419 struct pci_driver * drv = pci_dev->driver;
355 int i = 0; 420
421 pci_dev->state_saved = false;
356 422
357 if (drv && drv->suspend) { 423 if (drv && drv->suspend) {
358 pci_power_t prev = pci_dev->current_state; 424 pci_power_t prev = pci_dev->current_state;
425 int error;
359 426
360 pci_dev->state_saved = false; 427 error = drv->suspend(pci_dev, state);
361 428 suspend_report_result(drv->suspend, error);
362 i = drv->suspend(pci_dev, state); 429 if (error)
363 suspend_report_result(drv->suspend, i); 430 return error;
364 if (i)
365 return i;
366
367 if (pci_dev->state_saved)
368 goto Fixup;
369 431
370 if (pci_dev->current_state != PCI_D0 432 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
371 && pci_dev->current_state != PCI_UNKNOWN) { 433 && pci_dev->current_state != PCI_UNKNOWN) {
372 WARN_ONCE(pci_dev->current_state != prev, 434 WARN_ONCE(pci_dev->current_state != prev,
373 "PCI PM: Device state not saved by %pF\n", 435 "PCI PM: Device state not saved by %pF\n",
374 drv->suspend); 436 drv->suspend);
375 goto Fixup;
376 } 437 }
377 } 438 }
378 439
379 pci_save_state(pci_dev);
380 /*
381 * This is for compatibility with existing code with legacy PM support.
382 */
383 pci_pm_set_unknown_state(pci_dev);
384
385 Fixup:
386 pci_fixup_device(pci_fixup_suspend, pci_dev); 440 pci_fixup_device(pci_fixup_suspend, pci_dev);
387 441
388 return i; 442 return 0;
389} 443}
390 444
391static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) 445static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
392{ 446{
393 struct pci_dev * pci_dev = to_pci_dev(dev); 447 struct pci_dev * pci_dev = to_pci_dev(dev);
394 struct pci_driver * drv = pci_dev->driver; 448 struct pci_driver * drv = pci_dev->driver;
395 int i = 0;
396 449
397 if (drv && drv->suspend_late) { 450 if (drv && drv->suspend_late) {
398 i = drv->suspend_late(pci_dev, state); 451 pci_power_t prev = pci_dev->current_state;
399 suspend_report_result(drv->suspend_late, i); 452 int error;
453
454 error = drv->suspend_late(pci_dev, state);
455 suspend_report_result(drv->suspend_late, error);
456 if (error)
457 return error;
458
459 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
460 && pci_dev->current_state != PCI_UNKNOWN) {
461 WARN_ONCE(pci_dev->current_state != prev,
462 "PCI PM: Device state not saved by %pF\n",
463 drv->suspend_late);
464 return 0;
465 }
400 } 466 }
401 return i; 467
468 if (!pci_dev->state_saved)
469 pci_save_state(pci_dev);
470
471 pci_pm_set_unknown_state(pci_dev);
472
473 return 0;
402} 474}
403 475
404static int pci_legacy_resume_early(struct device *dev) 476static int pci_legacy_resume_early(struct device *dev)
@@ -423,6 +495,23 @@ static int pci_legacy_resume(struct device *dev)
423 495
424/* Auxiliary functions used by the new power management framework */ 496/* Auxiliary functions used by the new power management framework */
425 497
498/**
499 * pci_restore_standard_config - restore standard config registers of PCI device
500 * @pci_dev: PCI device to handle
501 */
502static int pci_restore_standard_config(struct pci_dev *pci_dev)
503{
504 pci_update_current_state(pci_dev, PCI_UNKNOWN);
505
506 if (pci_dev->current_state != PCI_D0) {
507 int error = pci_set_power_state(pci_dev, PCI_D0);
508 if (error)
509 return error;
510 }
511
512 return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0;
513}
514
426static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 515static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
427{ 516{
428 pci_restore_standard_config(pci_dev); 517 pci_restore_standard_config(pci_dev);
@@ -443,7 +532,6 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
443 /* Disable non-bridge devices without PM support */ 532 /* Disable non-bridge devices without PM support */
444 if (!pci_is_bridge(pci_dev)) 533 if (!pci_is_bridge(pci_dev))
445 pci_disable_enabled_device(pci_dev); 534 pci_disable_enabled_device(pci_dev);
446 pci_save_state(pci_dev);
447} 535}
448 536
449static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 537static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
@@ -493,13 +581,13 @@ static int pci_pm_suspend(struct device *dev)
493 if (pci_has_legacy_pm_support(pci_dev)) 581 if (pci_has_legacy_pm_support(pci_dev))
494 return pci_legacy_suspend(dev, PMSG_SUSPEND); 582 return pci_legacy_suspend(dev, PMSG_SUSPEND);
495 583
584 pci_dev->state_saved = false;
585
496 if (!pm) { 586 if (!pm) {
497 pci_pm_default_suspend(pci_dev); 587 pci_pm_default_suspend(pci_dev);
498 goto Fixup; 588 goto Fixup;
499 } 589 }
500 590
501 pci_dev->state_saved = false;
502
503 if (pm->suspend) { 591 if (pm->suspend) {
504 pci_power_t prev = pci_dev->current_state; 592 pci_power_t prev = pci_dev->current_state;
505 int error; 593 int error;
@@ -509,24 +597,14 @@ static int pci_pm_suspend(struct device *dev)
509 if (error) 597 if (error)
510 return error; 598 return error;
511 599
512 if (pci_dev->state_saved) 600 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
513 goto Fixup;
514
515 if (pci_dev->current_state != PCI_D0
516 && pci_dev->current_state != PCI_UNKNOWN) { 601 && pci_dev->current_state != PCI_UNKNOWN) {
517 WARN_ONCE(pci_dev->current_state != prev, 602 WARN_ONCE(pci_dev->current_state != prev,
518 "PCI PM: State of device not saved by %pF\n", 603 "PCI PM: State of device not saved by %pF\n",
519 pm->suspend); 604 pm->suspend);
520 goto Fixup;
521 } 605 }
522 } 606 }
523 607
524 if (!pci_dev->state_saved) {
525 pci_save_state(pci_dev);
526 if (!pci_is_bridge(pci_dev))
527 pci_prepare_to_sleep(pci_dev);
528 }
529
530 Fixup: 608 Fixup:
531 pci_fixup_device(pci_fixup_suspend, pci_dev); 609 pci_fixup_device(pci_fixup_suspend, pci_dev);
532 610
@@ -536,21 +614,43 @@ static int pci_pm_suspend(struct device *dev)
536static int pci_pm_suspend_noirq(struct device *dev) 614static int pci_pm_suspend_noirq(struct device *dev)
537{ 615{
538 struct pci_dev *pci_dev = to_pci_dev(dev); 616 struct pci_dev *pci_dev = to_pci_dev(dev);
539 struct device_driver *drv = dev->driver; 617 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
540 int error = 0;
541 618
542 if (pci_has_legacy_pm_support(pci_dev)) 619 if (pci_has_legacy_pm_support(pci_dev))
543 return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 620 return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
544 621
545 if (drv && drv->pm && drv->pm->suspend_noirq) { 622 if (!pm) {
546 error = drv->pm->suspend_noirq(dev); 623 pci_save_state(pci_dev);
547 suspend_report_result(drv->pm->suspend_noirq, error); 624 return 0;
548 } 625 }
549 626
550 if (!error) 627 if (pm->suspend_noirq) {
551 pci_pm_set_unknown_state(pci_dev); 628 pci_power_t prev = pci_dev->current_state;
629 int error;
552 630
553 return error; 631 error = pm->suspend_noirq(dev);
632 suspend_report_result(pm->suspend_noirq, error);
633 if (error)
634 return error;
635
636 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
637 && pci_dev->current_state != PCI_UNKNOWN) {
638 WARN_ONCE(pci_dev->current_state != prev,
639 "PCI PM: State of device not saved by %pF\n",
640 pm->suspend_noirq);
641 return 0;
642 }
643 }
644
645 if (!pci_dev->state_saved) {
646 pci_save_state(pci_dev);
647 if (!pci_is_bridge(pci_dev))
648 pci_prepare_to_sleep(pci_dev);
649 }
650
651 pci_pm_set_unknown_state(pci_dev);
652
653 return 0;
554} 654}
555 655
556static int pci_pm_resume_noirq(struct device *dev) 656static int pci_pm_resume_noirq(struct device *dev)
@@ -617,13 +717,13 @@ static int pci_pm_freeze(struct device *dev)
617 if (pci_has_legacy_pm_support(pci_dev)) 717 if (pci_has_legacy_pm_support(pci_dev))
618 return pci_legacy_suspend(dev, PMSG_FREEZE); 718 return pci_legacy_suspend(dev, PMSG_FREEZE);
619 719
720 pci_dev->state_saved = false;
721
620 if (!pm) { 722 if (!pm) {
621 pci_pm_default_suspend(pci_dev); 723 pci_pm_default_suspend(pci_dev);
622 return 0; 724 return 0;
623 } 725 }
624 726
625 pci_dev->state_saved = false;
626
627 if (pm->freeze) { 727 if (pm->freeze) {
628 int error; 728 int error;
629 729
@@ -633,9 +733,6 @@ static int pci_pm_freeze(struct device *dev)
633 return error; 733 return error;
634 } 734 }
635 735
636 if (!pci_dev->state_saved)
637 pci_save_state(pci_dev);
638
639 return 0; 736 return 0;
640} 737}
641 738
@@ -643,20 +740,25 @@ static int pci_pm_freeze_noirq(struct device *dev)
643{ 740{
644 struct pci_dev *pci_dev = to_pci_dev(dev); 741 struct pci_dev *pci_dev = to_pci_dev(dev);
645 struct device_driver *drv = dev->driver; 742 struct device_driver *drv = dev->driver;
646 int error = 0;
647 743
648 if (pci_has_legacy_pm_support(pci_dev)) 744 if (pci_has_legacy_pm_support(pci_dev))
649 return pci_legacy_suspend_late(dev, PMSG_FREEZE); 745 return pci_legacy_suspend_late(dev, PMSG_FREEZE);
650 746
651 if (drv && drv->pm && drv->pm->freeze_noirq) { 747 if (drv && drv->pm && drv->pm->freeze_noirq) {
748 int error;
749
652 error = drv->pm->freeze_noirq(dev); 750 error = drv->pm->freeze_noirq(dev);
653 suspend_report_result(drv->pm->freeze_noirq, error); 751 suspend_report_result(drv->pm->freeze_noirq, error);
752 if (error)
753 return error;
654 } 754 }
655 755
656 if (!error) 756 if (!pci_dev->state_saved)
657 pci_pm_set_unknown_state(pci_dev); 757 pci_save_state(pci_dev);
658 758
659 return error; 759 pci_pm_set_unknown_state(pci_dev);
760
761 return 0;
660} 762}
661 763
662static int pci_pm_thaw_noirq(struct device *dev) 764static int pci_pm_thaw_noirq(struct device *dev)
@@ -699,46 +801,56 @@ static int pci_pm_poweroff(struct device *dev)
699{ 801{
700 struct pci_dev *pci_dev = to_pci_dev(dev); 802 struct pci_dev *pci_dev = to_pci_dev(dev);
701 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 803 struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
702 int error = 0;
703 804
704 if (pci_has_legacy_pm_support(pci_dev)) 805 if (pci_has_legacy_pm_support(pci_dev))
705 return pci_legacy_suspend(dev, PMSG_HIBERNATE); 806 return pci_legacy_suspend(dev, PMSG_HIBERNATE);
706 807
808 pci_dev->state_saved = false;
809
707 if (!pm) { 810 if (!pm) {
708 pci_pm_default_suspend(pci_dev); 811 pci_pm_default_suspend(pci_dev);
709 goto Fixup; 812 goto Fixup;
710 } 813 }
711 814
712 pci_dev->state_saved = false;
713
714 if (pm->poweroff) { 815 if (pm->poweroff) {
816 int error;
817
715 error = pm->poweroff(dev); 818 error = pm->poweroff(dev);
716 suspend_report_result(pm->poweroff, error); 819 suspend_report_result(pm->poweroff, error);
820 if (error)
821 return error;
717 } 822 }
718 823
719 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
720 pci_prepare_to_sleep(pci_dev);
721
722 Fixup: 824 Fixup:
723 pci_fixup_device(pci_fixup_suspend, pci_dev); 825 pci_fixup_device(pci_fixup_suspend, pci_dev);
724 826
725 return error; 827 return 0;
726} 828}
727 829
728static int pci_pm_poweroff_noirq(struct device *dev) 830static int pci_pm_poweroff_noirq(struct device *dev)
729{ 831{
832 struct pci_dev *pci_dev = to_pci_dev(dev);
730 struct device_driver *drv = dev->driver; 833 struct device_driver *drv = dev->driver;
731 int error = 0;
732 834
733 if (pci_has_legacy_pm_support(to_pci_dev(dev))) 835 if (pci_has_legacy_pm_support(to_pci_dev(dev)))
734 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 836 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
735 837
736 if (drv && drv->pm && drv->pm->poweroff_noirq) { 838 if (!drv || !drv->pm)
839 return 0;
840
841 if (drv->pm->poweroff_noirq) {
842 int error;
843
737 error = drv->pm->poweroff_noirq(dev); 844 error = drv->pm->poweroff_noirq(dev);
738 suspend_report_result(drv->pm->poweroff_noirq, error); 845 suspend_report_result(drv->pm->poweroff_noirq, error);
846 if (error)
847 return error;
739 } 848 }
740 849
741 return error; 850 if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
851 pci_prepare_to_sleep(pci_dev);
852
853 return 0;
742} 854}
743 855
744static int pci_pm_restore_noirq(struct device *dev) 856static int pci_pm_restore_noirq(struct device *dev)
@@ -852,13 +964,23 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
852 /* register with core */ 964 /* register with core */
853 error = driver_register(&drv->driver); 965 error = driver_register(&drv->driver);
854 if (error) 966 if (error)
855 return error; 967 goto out;
856 968
857 error = pci_create_newid_file(drv); 969 error = pci_create_newid_file(drv);
858 if (error) 970 if (error)
859 driver_unregister(&drv->driver); 971 goto out_newid;
860 972
973 error = pci_create_removeid_file(drv);
974 if (error)
975 goto out_removeid;
976out:
861 return error; 977 return error;
978
979out_removeid:
980 pci_remove_newid_file(drv);
981out_newid:
982 driver_unregister(&drv->driver);
983 goto out;
862} 984}
863 985
864/** 986/**
@@ -874,6 +996,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
874void 996void
875pci_unregister_driver(struct pci_driver *drv) 997pci_unregister_driver(struct pci_driver *drv)
876{ 998{
999 pci_remove_removeid_file(drv);
877 pci_remove_newid_file(drv); 1000 pci_remove_newid_file(drv);
878 driver_unregister(&drv->driver); 1001 driver_unregister(&drv->driver);
879 pci_free_dynids(drv); 1002 pci_free_dynids(drv);
@@ -973,6 +1096,7 @@ struct bus_type pci_bus_type = {
973 .remove = pci_device_remove, 1096 .remove = pci_device_remove,
974 .shutdown = pci_device_shutdown, 1097 .shutdown = pci_device_shutdown,
975 .dev_attrs = pci_dev_attrs, 1098 .dev_attrs = pci_dev_attrs,
1099 .bus_attrs = pci_bus_attrs,
976 .pm = PCI_PM_OPS_PTR, 1100 .pm = PCI_PM_OPS_PTR,
977}; 1101};
978 1102
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index dfc4e0ddf241..e9a8706a6401 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -219,6 +219,83 @@ msi_bus_store(struct device *dev, struct device_attribute *attr,
219 return count; 219 return count;
220} 220}
221 221
222#ifdef CONFIG_HOTPLUG
223static DEFINE_MUTEX(pci_remove_rescan_mutex);
224static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
225 size_t count)
226{
227 unsigned long val;
228 struct pci_bus *b = NULL;
229
230 if (strict_strtoul(buf, 0, &val) < 0)
231 return -EINVAL;
232
233 if (val) {
234 mutex_lock(&pci_remove_rescan_mutex);
235 while ((b = pci_find_next_bus(b)) != NULL)
236 pci_rescan_bus(b);
237 mutex_unlock(&pci_remove_rescan_mutex);
238 }
239 return count;
240}
241
242struct bus_attribute pci_bus_attrs[] = {
243 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store),
244 __ATTR_NULL
245};
246
247static ssize_t
248dev_rescan_store(struct device *dev, struct device_attribute *attr,
249 const char *buf, size_t count)
250{
251 unsigned long val;
252 struct pci_dev *pdev = to_pci_dev(dev);
253
254 if (strict_strtoul(buf, 0, &val) < 0)
255 return -EINVAL;
256
257 if (val) {
258 mutex_lock(&pci_remove_rescan_mutex);
259 pci_rescan_bus(pdev->bus);
260 mutex_unlock(&pci_remove_rescan_mutex);
261 }
262 return count;
263}
264
265static void remove_callback(struct device *dev)
266{
267 struct pci_dev *pdev = to_pci_dev(dev);
268
269 mutex_lock(&pci_remove_rescan_mutex);
270 pci_remove_bus_device(pdev);
271 mutex_unlock(&pci_remove_rescan_mutex);
272}
273
274static ssize_t
275remove_store(struct device *dev, struct device_attribute *dummy,
276 const char *buf, size_t count)
277{
278 int ret = 0;
279 unsigned long val;
280 struct pci_dev *pdev = to_pci_dev(dev);
281
282 if (strict_strtoul(buf, 0, &val) < 0)
283 return -EINVAL;
284
285 if (pci_is_root_bus(pdev->bus))
286 return -EBUSY;
287
288 /* An attribute cannot be unregistered by one of its own methods,
289 * so we have to use this roundabout approach.
290 */
291 if (val)
292 ret = device_schedule_callback(dev, remove_callback);
293 if (ret)
294 count = ret;
295 return count;
296}
297#endif
298
222struct device_attribute pci_dev_attrs[] = { 299struct device_attribute pci_dev_attrs[] = {
223 __ATTR_RO(resource), 300 __ATTR_RO(resource),
224 __ATTR_RO(vendor), 301 __ATTR_RO(vendor),
@@ -237,10 +314,25 @@ struct device_attribute pci_dev_attrs[] = {
237 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), 314 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
238 broken_parity_status_show,broken_parity_status_store), 315 broken_parity_status_show,broken_parity_status_store),
239 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store), 316 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store),
317#ifdef CONFIG_HOTPLUG
318 __ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
319 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
320#endif
240 __ATTR_NULL, 321 __ATTR_NULL,
241}; 322};
242 323
243static ssize_t 324static ssize_t
325boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
326{
327 struct pci_dev *pdev = to_pci_dev(dev);
328
329 return sprintf(buf, "%u\n",
330 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
331 IORESOURCE_ROM_SHADOW));
332}
333struct device_attribute vga_attr = __ATTR_RO(boot_vga);
334
335static ssize_t
244pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr, 336pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
245 char *buf, loff_t off, size_t count) 337 char *buf, loff_t off, size_t count)
246{ 338{
@@ -493,6 +585,19 @@ pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
493} 585}
494 586
495/** 587/**
588 * pci_adjust_legacy_attr - adjustment of legacy file attributes
589 * @b: bus to create files under
590 * @mmap_type: I/O port or memory
591 *
592 * Stub implementation. Can be overridden by arch if necessary.
593 */
594void __weak
595pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type)
596{
597 return;
598}
599
600/**
496 * pci_create_legacy_files - create legacy I/O port and memory files 601 * pci_create_legacy_files - create legacy I/O port and memory files
497 * @b: bus to create files under 602 * @b: bus to create files under
498 * 603 *
@@ -518,6 +623,7 @@ void pci_create_legacy_files(struct pci_bus *b)
518 b->legacy_io->read = pci_read_legacy_io; 623 b->legacy_io->read = pci_read_legacy_io;
519 b->legacy_io->write = pci_write_legacy_io; 624 b->legacy_io->write = pci_write_legacy_io;
520 b->legacy_io->mmap = pci_mmap_legacy_io; 625 b->legacy_io->mmap = pci_mmap_legacy_io;
626 pci_adjust_legacy_attr(b, pci_mmap_io);
521 error = device_create_bin_file(&b->dev, b->legacy_io); 627 error = device_create_bin_file(&b->dev, b->legacy_io);
522 if (error) 628 if (error)
523 goto legacy_io_err; 629 goto legacy_io_err;
@@ -528,6 +634,7 @@ void pci_create_legacy_files(struct pci_bus *b)
528 b->legacy_mem->size = 1024*1024; 634 b->legacy_mem->size = 1024*1024;
529 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 635 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
530 b->legacy_mem->mmap = pci_mmap_legacy_mem; 636 b->legacy_mem->mmap = pci_mmap_legacy_mem;
637 pci_adjust_legacy_attr(b, pci_mmap_mem);
531 error = device_create_bin_file(&b->dev, b->legacy_mem); 638 error = device_create_bin_file(&b->dev, b->legacy_mem);
532 if (error) 639 if (error)
533 goto legacy_mem_err; 640 goto legacy_mem_err;
@@ -719,8 +826,8 @@ static int pci_create_resource_files(struct pci_dev *pdev)
719 return 0; 826 return 0;
720} 827}
721#else /* !HAVE_PCI_MMAP */ 828#else /* !HAVE_PCI_MMAP */
722static inline int pci_create_resource_files(struct pci_dev *dev) { return 0; } 829int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
723static inline void pci_remove_resource_files(struct pci_dev *dev) { return; } 830void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
724#endif /* HAVE_PCI_MMAP */ 831#endif /* HAVE_PCI_MMAP */
725 832
726/** 833/**
@@ -884,18 +991,27 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
884 pdev->rom_attr = attr; 991 pdev->rom_attr = attr;
885 } 992 }
886 993
994 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
995 retval = device_create_file(&pdev->dev, &vga_attr);
996 if (retval)
997 goto err_rom_file;
998 }
999
887 /* add platform-specific attributes */ 1000 /* add platform-specific attributes */
888 retval = pcibios_add_platform_entries(pdev); 1001 retval = pcibios_add_platform_entries(pdev);
889 if (retval) 1002 if (retval)
890 goto err_rom_file; 1003 goto err_vga_file;
891 1004
892 /* add sysfs entries for various capabilities */ 1005 /* add sysfs entries for various capabilities */
893 retval = pci_create_capabilities_sysfs(pdev); 1006 retval = pci_create_capabilities_sysfs(pdev);
894 if (retval) 1007 if (retval)
895 goto err_rom_file; 1008 goto err_vga_file;
896 1009
897 return 0; 1010 return 0;
898 1011
1012err_vga_file:
1013 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
1014 device_remove_file(&pdev->dev, &vga_attr);
899err_rom_file: 1015err_rom_file:
900 if (rom_size) { 1016 if (rom_size) {
901 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1017 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6d6120007af4..445fb6f7ea3f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -20,6 +20,8 @@
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include <linux/device.h>
24#include <asm/setup.h>
23#include "pci.h" 25#include "pci.h"
24 26
25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 27unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
@@ -426,7 +428,6 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
426 * given PCI device 428 * given PCI device
427 * @dev: PCI device to handle. 429 * @dev: PCI device to handle.
428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 430 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
429 * @wait: If 'true', wait for the device to change its power state
430 * 431 *
431 * RETURN VALUE: 432 * RETURN VALUE:
432 * -EINVAL if the requested state is invalid. 433 * -EINVAL if the requested state is invalid.
@@ -435,12 +436,15 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
435 * 0 if device already is in the requested state. 436 * 0 if device already is in the requested state.
436 * 0 if device's power state has been successfully changed. 437 * 0 if device's power state has been successfully changed.
437 */ 438 */
438static int 439static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
439pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
440{ 440{
441 u16 pmcsr; 441 u16 pmcsr;
442 bool need_restore = false; 442 bool need_restore = false;
443 443
444 /* Check if we're already there */
445 if (dev->current_state == state)
446 return 0;
447
444 if (!dev->pm_cap) 448 if (!dev->pm_cap)
445 return -EIO; 449 return -EIO;
446 450
@@ -451,10 +455,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
451 * Can enter D0 from any state, but if we can only go deeper 455 * Can enter D0 from any state, but if we can only go deeper
452 * to sleep if we're already in a low power state 456 * to sleep if we're already in a low power state
453 */ 457 */
454 if (dev->current_state == state) { 458 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
455 /* we're already there */
456 return 0;
457 } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
458 && dev->current_state > state) { 459 && dev->current_state > state) {
459 dev_err(&dev->dev, "invalid power transition " 460 dev_err(&dev->dev, "invalid power transition "
460 "(from state %d to %d)\n", dev->current_state, state); 461 "(from state %d to %d)\n", dev->current_state, state);
@@ -481,10 +482,8 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
481 break; 482 break;
482 case PCI_UNKNOWN: /* Boot-up */ 483 case PCI_UNKNOWN: /* Boot-up */
483 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 484 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
484 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) { 485 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
485 need_restore = true; 486 need_restore = true;
486 wait = true;
487 }
488 /* Fall-through: force to D0 */ 487 /* Fall-through: force to D0 */
489 default: 488 default:
490 pmcsr = 0; 489 pmcsr = 0;
@@ -494,9 +493,6 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
494 /* enter specified state */ 493 /* enter specified state */
495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 494 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
496 495
497 if (!wait)
498 return 0;
499
500 /* Mandatory power management transition delays */ 496 /* Mandatory power management transition delays */
501 /* see PCI PM 1.1 5.6.1 table 18 */ 497 /* see PCI PM 1.1 5.6.1 table 18 */
502 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 498 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
@@ -521,7 +517,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
521 if (need_restore) 517 if (need_restore)
522 pci_restore_bars(dev); 518 pci_restore_bars(dev);
523 519
524 if (wait && dev->bus->self) 520 if (dev->bus->self)
525 pcie_aspm_pm_state_change(dev->bus->self); 521 pcie_aspm_pm_state_change(dev->bus->self);
526 522
527 return 0; 523 return 0;
@@ -546,11 +542,58 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
546} 542}
547 543
548/** 544/**
545 * pci_platform_power_transition - Use platform to change device power state
546 * @dev: PCI device to handle.
547 * @state: State to put the device into.
548 */
549static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
550{
551 int error;
552
553 if (platform_pci_power_manageable(dev)) {
554 error = platform_pci_set_power_state(dev, state);
555 if (!error)
556 pci_update_current_state(dev, state);
557 } else {
558 error = -ENODEV;
559 /* Fall back to PCI_D0 if native PM is not supported */
560 pci_update_current_state(dev, PCI_D0);
561 }
562
563 return error;
564}
565
566/**
567 * __pci_start_power_transition - Start power transition of a PCI device
568 * @dev: PCI device to handle.
569 * @state: State to put the device into.
570 */
571static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
572{
573 if (state == PCI_D0)
574 pci_platform_power_transition(dev, PCI_D0);
575}
576
577/**
578 * __pci_complete_power_transition - Complete power transition of a PCI device
579 * @dev: PCI device to handle.
580 * @state: State to put the device into.
581 *
582 * This function should not be called directly by device drivers.
583 */
584int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
585{
586 return state > PCI_D0 ?
587 pci_platform_power_transition(dev, state) : -EINVAL;
588}
589EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
590
591/**
549 * pci_set_power_state - Set the power state of a PCI device 592 * pci_set_power_state - Set the power state of a PCI device
550 * @dev: PCI device to handle. 593 * @dev: PCI device to handle.
551 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 594 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
552 * 595 *
553 * Transition a device to a new power state, using the platform formware and/or 596 * Transition a device to a new power state, using the platform firmware and/or
554 * the device's PCI PM registers. 597 * the device's PCI PM registers.
555 * 598 *
556 * RETURN VALUE: 599 * RETURN VALUE:
@@ -577,30 +620,21 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
577 */ 620 */
578 return 0; 621 return 0;
579 622
580 if (state == PCI_D0 && platform_pci_power_manageable(dev)) { 623 /* Check if we're already there */
581 /* 624 if (dev->current_state == state)
582 * Allow the platform to change the state, for example via ACPI 625 return 0;
583 * _PR0, _PS0 and some such, but do not trust it. 626
584 */ 627 __pci_start_power_transition(dev, state);
585 int ret = platform_pci_set_power_state(dev, PCI_D0); 628
586 if (!ret)
587 pci_update_current_state(dev, PCI_D0);
588 }
589 /* This device is quirked not to be put into D3, so 629 /* This device is quirked not to be put into D3, so
590 don't put it in D3 */ 630 don't put it in D3 */
591 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 631 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
592 return 0; 632 return 0;
593 633
594 error = pci_raw_set_power_state(dev, state, true); 634 error = pci_raw_set_power_state(dev, state);
595 635
596 if (state > PCI_D0 && platform_pci_power_manageable(dev)) { 636 if (!__pci_complete_power_transition(dev, state))
597 /* Allow the platform to finalize the transition */ 637 error = 0;
598 int ret = platform_pci_set_power_state(dev, state);
599 if (!ret) {
600 pci_update_current_state(dev, state);
601 error = 0;
602 }
603 }
604 638
605 return error; 639 return error;
606} 640}
@@ -645,6 +679,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
645 679
646EXPORT_SYMBOL(pci_choose_state); 680EXPORT_SYMBOL(pci_choose_state);
647 681
682#define PCI_EXP_SAVE_REGS 7
683
648static int pci_save_pcie_state(struct pci_dev *dev) 684static int pci_save_pcie_state(struct pci_dev *dev)
649{ 685{
650 int pos, i = 0; 686 int pos, i = 0;
@@ -657,7 +693,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
657 693
658 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 694 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
659 if (!save_state) { 695 if (!save_state) {
660 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 696 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
661 return -ENOMEM; 697 return -ENOMEM;
662 } 698 }
663 cap = (u16 *)&save_state->data[0]; 699 cap = (u16 *)&save_state->data[0];
@@ -666,6 +702,9 @@ static int pci_save_pcie_state(struct pci_dev *dev)
666 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 702 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
667 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 703 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
668 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 704 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
705 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
706 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
707 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
669 708
670 return 0; 709 return 0;
671} 710}
@@ -686,6 +725,9 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
686 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 725 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
687 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 726 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
688 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 727 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
728 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
729 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
730 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
689} 731}
690 732
691 733
@@ -700,7 +742,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
700 742
701 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 743 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
702 if (!save_state) { 744 if (!save_state) {
703 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 745 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
704 return -ENOMEM; 746 return -ENOMEM;
705 } 747 }
706 748
@@ -773,6 +815,7 @@ pci_restore_state(struct pci_dev *dev)
773 } 815 }
774 pci_restore_pcix_state(dev); 816 pci_restore_pcix_state(dev);
775 pci_restore_msi_state(dev); 817 pci_restore_msi_state(dev);
818 pci_restore_iov_state(dev);
776 819
777 return 0; 820 return 0;
778} 821}
@@ -1231,7 +1274,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1231 if (target_state == PCI_POWER_ERROR) 1274 if (target_state == PCI_POWER_ERROR)
1232 return -EIO; 1275 return -EIO;
1233 1276
1234 pci_enable_wake(dev, target_state, true); 1277 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1235 1278
1236 error = pci_set_power_state(dev, target_state); 1279 error = pci_set_power_state(dev, target_state);
1237 1280
@@ -1369,7 +1412,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1369{ 1412{
1370 int error; 1413 int error;
1371 1414
1372 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); 1415 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1416 PCI_EXP_SAVE_REGS * sizeof(u16));
1373 if (error) 1417 if (error)
1374 dev_err(&dev->dev, 1418 dev_err(&dev->dev,
1375 "unable to preallocate PCI Express save buffer\n"); 1419 "unable to preallocate PCI Express save buffer\n");
@@ -1381,50 +1425,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1381} 1425}
1382 1426
1383/** 1427/**
1384 * pci_restore_standard_config - restore standard config registers of PCI device
1385 * @dev: PCI device to handle
1386 *
1387 * This function assumes that the device's configuration space is accessible.
1388 * If the device needs to be powered up, the function will wait for it to
1389 * change the state.
1390 */
1391int pci_restore_standard_config(struct pci_dev *dev)
1392{
1393 pci_power_t prev_state;
1394 int error;
1395
1396 pci_update_current_state(dev, PCI_D0);
1397
1398 prev_state = dev->current_state;
1399 if (prev_state == PCI_D0)
1400 goto Restore;
1401
1402 error = pci_raw_set_power_state(dev, PCI_D0, false);
1403 if (error)
1404 return error;
1405
1406 /*
1407 * This assumes that we won't get a bus in B2 or B3 from the BIOS, but
1408 * we've made this assumption forever and it appears to be universally
1409 * satisfied.
1410 */
1411 switch(prev_state) {
1412 case PCI_D3cold:
1413 case PCI_D3hot:
1414 mdelay(pci_pm_d3_delay);
1415 break;
1416 case PCI_D2:
1417 udelay(PCI_PM_D2_DELAY);
1418 break;
1419 }
1420
1421 pci_update_current_state(dev, PCI_D0);
1422
1423 Restore:
1424 return dev->state_saved ? pci_restore_state(dev) : 0;
1425}
1426
1427/**
1428 * pci_enable_ari - enable ARI forwarding if hardware support it 1428 * pci_enable_ari - enable ARI forwarding if hardware support it
1429 * @dev: the PCI device 1429 * @dev: the PCI device
1430 */ 1430 */
@@ -1484,7 +1484,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1484 if (!pin) 1484 if (!pin)
1485 return -1; 1485 return -1;
1486 1486
1487 while (dev->bus->self) { 1487 while (dev->bus->parent) {
1488 pin = pci_swizzle_interrupt_pin(dev, pin); 1488 pin = pci_swizzle_interrupt_pin(dev, pin);
1489 dev = dev->bus->self; 1489 dev = dev->bus->self;
1490 } 1490 }
@@ -1504,7 +1504,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1504{ 1504{
1505 u8 pin = *pinp; 1505 u8 pin = *pinp;
1506 1506
1507 while (dev->bus->self) { 1507 while (dev->bus->parent) {
1508 pin = pci_swizzle_interrupt_pin(dev, pin); 1508 pin = pci_swizzle_interrupt_pin(dev, pin);
1509 dev = dev->bus->self; 1509 dev = dev->bus->self;
1510 } 1510 }
@@ -2028,18 +2028,24 @@ static int __pcie_flr(struct pci_dev *dev, int probe)
2028 pci_block_user_cfg_access(dev); 2028 pci_block_user_cfg_access(dev);
2029 2029
2030 /* Wait for Transaction Pending bit clean */ 2030 /* Wait for Transaction Pending bit clean */
2031 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2032 if (!(status & PCI_EXP_DEVSTA_TRPND))
2033 goto transaction_done;
2034
2031 msleep(100); 2035 msleep(100);
2032 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2036 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2033 if (status & PCI_EXP_DEVSTA_TRPND) { 2037 if (!(status & PCI_EXP_DEVSTA_TRPND))
2034 dev_info(&dev->dev, "Busy after 100ms while trying to reset; " 2038 goto transaction_done;
2039
2040 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
2035 "sleeping for 1 second\n"); 2041 "sleeping for 1 second\n");
2036 ssleep(1); 2042 ssleep(1);
2037 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); 2043 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
2038 if (status & PCI_EXP_DEVSTA_TRPND) 2044 if (status & PCI_EXP_DEVSTA_TRPND)
2039 dev_info(&dev->dev, "Still busy after 1s; " 2045 dev_info(&dev->dev, "Still busy after 1s; "
2040 "proceeding with reset anyway\n"); 2046 "proceeding with reset anyway\n");
2041 }
2042 2047
2048transaction_done:
2043 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, 2049 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
2044 PCI_EXP_DEVCTL_BCR_FLR); 2050 PCI_EXP_DEVCTL_BCR_FLR);
2045 mdelay(100); 2051 mdelay(100);
@@ -2066,18 +2072,24 @@ static int __pci_af_flr(struct pci_dev *dev, int probe)
2066 pci_block_user_cfg_access(dev); 2072 pci_block_user_cfg_access(dev);
2067 2073
2068 /* Wait for Transaction Pending bit clean */ 2074 /* Wait for Transaction Pending bit clean */
2075 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2076 if (!(status & PCI_AF_STATUS_TP))
2077 goto transaction_done;
2078
2069 msleep(100); 2079 msleep(100);
2070 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 2080 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2071 if (status & PCI_AF_STATUS_TP) { 2081 if (!(status & PCI_AF_STATUS_TP))
2072 dev_info(&dev->dev, "Busy after 100ms while trying to" 2082 goto transaction_done;
2073 " reset; sleeping for 1 second\n"); 2083
2074 ssleep(1); 2084 dev_info(&dev->dev, "Busy after 100ms while trying to"
2075 pci_read_config_byte(dev, 2085 " reset; sleeping for 1 second\n");
2076 cappos + PCI_AF_STATUS, &status); 2086 ssleep(1);
2077 if (status & PCI_AF_STATUS_TP) 2087 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
2078 dev_info(&dev->dev, "Still busy after 1s; " 2088 if (status & PCI_AF_STATUS_TP)
2079 "proceeding with reset anyway\n"); 2089 dev_info(&dev->dev, "Still busy after 1s; "
2080 } 2090 "proceeding with reset anyway\n");
2091
2092transaction_done:
2081 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2093 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2082 mdelay(100); 2094 mdelay(100);
2083 2095
@@ -2346,18 +2358,140 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2346 */ 2358 */
2347int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2359int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2348{ 2360{
2361 int reg;
2362
2349 if (resno < PCI_ROM_RESOURCE) { 2363 if (resno < PCI_ROM_RESOURCE) {
2350 *type = pci_bar_unknown; 2364 *type = pci_bar_unknown;
2351 return PCI_BASE_ADDRESS_0 + 4 * resno; 2365 return PCI_BASE_ADDRESS_0 + 4 * resno;
2352 } else if (resno == PCI_ROM_RESOURCE) { 2366 } else if (resno == PCI_ROM_RESOURCE) {
2353 *type = pci_bar_mem32; 2367 *type = pci_bar_mem32;
2354 return dev->rom_base_reg; 2368 return dev->rom_base_reg;
2369 } else if (resno < PCI_BRIDGE_RESOURCES) {
2370 /* device specific resource */
2371 reg = pci_iov_resource_bar(dev, resno, type);
2372 if (reg)
2373 return reg;
2355 } 2374 }
2356 2375
2357 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2376 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
2358 return 0; 2377 return 0;
2359} 2378}
2360 2379
2380#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2381static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2382spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
2383
2384/**
2385 * pci_specified_resource_alignment - get resource alignment specified by user.
2386 * @dev: the PCI device to get
2387 *
2388 * RETURNS: Resource alignment if it is specified.
2389 * Zero if it is not specified.
2390 */
2391resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2392{
2393 int seg, bus, slot, func, align_order, count;
2394 resource_size_t align = 0;
2395 char *p;
2396
2397 spin_lock(&resource_alignment_lock);
2398 p = resource_alignment_param;
2399 while (*p) {
2400 count = 0;
2401 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2402 p[count] == '@') {
2403 p += count + 1;
2404 } else {
2405 align_order = -1;
2406 }
2407 if (sscanf(p, "%x:%x:%x.%x%n",
2408 &seg, &bus, &slot, &func, &count) != 4) {
2409 seg = 0;
2410 if (sscanf(p, "%x:%x.%x%n",
2411 &bus, &slot, &func, &count) != 3) {
2412 /* Invalid format */
2413 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2414 p);
2415 break;
2416 }
2417 }
2418 p += count;
2419 if (seg == pci_domain_nr(dev->bus) &&
2420 bus == dev->bus->number &&
2421 slot == PCI_SLOT(dev->devfn) &&
2422 func == PCI_FUNC(dev->devfn)) {
2423 if (align_order == -1) {
2424 align = PAGE_SIZE;
2425 } else {
2426 align = 1 << align_order;
2427 }
2428 /* Found */
2429 break;
2430 }
2431 if (*p != ';' && *p != ',') {
2432 /* End of param or invalid format */
2433 break;
2434 }
2435 p++;
2436 }
2437 spin_unlock(&resource_alignment_lock);
2438 return align;
2439}
2440
2441/**
2442 * pci_is_reassigndev - check if specified PCI is target device to reassign
2443 * @dev: the PCI device to check
2444 *
2445 * RETURNS: non-zero for PCI device is a target device to reassign,
2446 * or zero is not.
2447 */
2448int pci_is_reassigndev(struct pci_dev *dev)
2449{
2450 return (pci_specified_resource_alignment(dev) != 0);
2451}
2452
2453ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2454{
2455 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2456 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2457 spin_lock(&resource_alignment_lock);
2458 strncpy(resource_alignment_param, buf, count);
2459 resource_alignment_param[count] = '\0';
2460 spin_unlock(&resource_alignment_lock);
2461 return count;
2462}
2463
2464ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
2465{
2466 size_t count;
2467 spin_lock(&resource_alignment_lock);
2468 count = snprintf(buf, size, "%s", resource_alignment_param);
2469 spin_unlock(&resource_alignment_lock);
2470 return count;
2471}
2472
2473static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
2474{
2475 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
2476}
2477
2478static ssize_t pci_resource_alignment_store(struct bus_type *bus,
2479 const char *buf, size_t count)
2480{
2481 return pci_set_resource_alignment_param(buf, count);
2482}
2483
2484BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
2485 pci_resource_alignment_store);
2486
2487static int __init pci_resource_alignment_sysfs_init(void)
2488{
2489 return bus_create_file(&pci_bus_type,
2490 &bus_attr_resource_alignment);
2491}
2492
2493late_initcall(pci_resource_alignment_sysfs_init);
2494
2361static void __devinit pci_no_domains(void) 2495static void __devinit pci_no_domains(void)
2362{ 2496{
2363#ifdef CONFIG_PCI_DOMAINS 2497#ifdef CONFIG_PCI_DOMAINS
@@ -2406,6 +2540,9 @@ static int __init pci_setup(char *str)
2406 pci_cardbus_io_size = memparse(str + 9, &str); 2540 pci_cardbus_io_size = memparse(str + 9, &str);
2407 } else if (!strncmp(str, "cbmemsize=", 10)) { 2541 } else if (!strncmp(str, "cbmemsize=", 10)) {
2408 pci_cardbus_mem_size = memparse(str + 10, &str); 2542 pci_cardbus_mem_size = memparse(str + 10, &str);
2543 } else if (!strncmp(str, "resource_alignment=", 19)) {
2544 pci_set_resource_alignment_param(str + 19,
2545 strlen(str + 19));
2409 } else { 2546 } else {
2410 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2547 printk(KERN_ERR "PCI: Unknown option `%s'\n",
2411 str); 2548 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 07c0aa5275e6..d03f6b99f292 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,8 @@
1#ifndef DRIVERS_PCI_H 1#ifndef DRIVERS_PCI_H
2#define DRIVERS_PCI_H 2#define DRIVERS_PCI_H
3 3
4#include <linux/workqueue.h>
5
4#define PCI_CFG_SPACE_SIZE 256 6#define PCI_CFG_SPACE_SIZE 256
5#define PCI_CFG_SPACE_EXP_SIZE 4096 7#define PCI_CFG_SPACE_EXP_SIZE 4096
6 8
@@ -49,7 +51,6 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
49extern void pci_pm_init(struct pci_dev *dev); 51extern void pci_pm_init(struct pci_dev *dev);
50extern void platform_pci_wakeup_init(struct pci_dev *dev); 52extern void platform_pci_wakeup_init(struct pci_dev *dev);
51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 53extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
52extern int pci_restore_standard_config(struct pci_dev *dev);
53 54
54static inline bool pci_is_bridge(struct pci_dev *pci_dev) 55static inline bool pci_is_bridge(struct pci_dev *pci_dev)
55{ 56{
@@ -136,6 +137,12 @@ extern int pcie_mch_quirk;
136extern struct device_attribute pci_dev_attrs[]; 137extern struct device_attribute pci_dev_attrs[];
137extern struct device_attribute dev_attr_cpuaffinity; 138extern struct device_attribute dev_attr_cpuaffinity;
138extern struct device_attribute dev_attr_cpulistaffinity; 139extern struct device_attribute dev_attr_cpulistaffinity;
140#ifdef CONFIG_HOTPLUG
141extern struct bus_attribute pci_bus_attrs[];
142#else
143#define pci_bus_attrs NULL
144#endif
145
139 146
140/** 147/**
141 * pci_match_one_device - Tell if a PCI device structure has a matching 148 * pci_match_one_device - Tell if a PCI device structure has a matching
@@ -178,6 +185,7 @@ enum pci_bar_type {
178 pci_bar_mem64, /* A 64-bit memory BAR */ 185 pci_bar_mem64, /* A 64-bit memory BAR */
179}; 186};
180 187
188extern int pci_setup_device(struct pci_dev *dev);
181extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 189extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
182 struct resource *res, unsigned int reg); 190 struct resource *res, unsigned int reg);
183extern int pci_resource_bar(struct pci_dev *dev, int resno, 191extern int pci_resource_bar(struct pci_dev *dev, int resno,
@@ -195,4 +203,60 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
195 return bus->self && bus->self->ari_enabled; 203 return bus->self && bus->self->ari_enabled;
196} 204}
197 205
206#ifdef CONFIG_PCI_QUIRKS
207extern int pci_is_reassigndev(struct pci_dev *dev);
208resource_size_t pci_specified_resource_alignment(struct pci_dev *dev);
209extern void pci_disable_bridge_window(struct pci_dev *dev);
210#endif
211
212/* Single Root I/O Virtualization */
213struct pci_sriov {
214 int pos; /* capability position */
215 int nres; /* number of resources */
216 u32 cap; /* SR-IOV Capabilities */
217 u16 ctrl; /* SR-IOV Control */
218 u16 total; /* total VFs associated with the PF */
219 u16 initial; /* initial VFs associated with the PF */
220 u16 nr_virtfn; /* number of VFs available */
221 u16 offset; /* first VF Routing ID offset */
222 u16 stride; /* following VF stride */
223 u32 pgsz; /* page size for BAR alignment */
224 u8 link; /* Function Dependency Link */
225 struct pci_dev *dev; /* lowest numbered PF */
226 struct pci_dev *self; /* this PF */
227 struct mutex lock; /* lock for VF bus */
228 struct work_struct mtask; /* VF Migration task */
229 u8 __iomem *mstate; /* VF Migration State Array */
230};
231
232#ifdef CONFIG_PCI_IOV
233extern int pci_iov_init(struct pci_dev *dev);
234extern void pci_iov_release(struct pci_dev *dev);
235extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
236 enum pci_bar_type *type);
237extern void pci_restore_iov_state(struct pci_dev *dev);
238extern int pci_iov_bus_range(struct pci_bus *bus);
239#else
240static inline int pci_iov_init(struct pci_dev *dev)
241{
242 return -ENODEV;
243}
244static inline void pci_iov_release(struct pci_dev *dev)
245
246{
247}
248static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno,
249 enum pci_bar_type *type)
250{
251 return 0;
252}
253static inline void pci_restore_iov_state(struct pci_dev *dev)
254{
255}
256static inline int pci_iov_bus_range(struct pci_bus *bus)
257{
258 return 0;
259}
260#endif /* CONFIG_PCI_IOV */
261
198#endif /* DRIVERS_PCI_H */ 262#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index e390707661dd..32ade5af927e 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,30 +38,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
38MODULE_DESCRIPTION(DRIVER_DESC); 38MODULE_DESCRIPTION(DRIVER_DESC);
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static int __devinit aer_probe (struct pcie_device *dev, 41static int __devinit aer_probe (struct pcie_device *dev);
42 const struct pcie_port_service_id *id );
43static void aer_remove(struct pcie_device *dev); 42static void aer_remove(struct pcie_device *dev);
44static int aer_suspend(struct pcie_device *dev, pm_message_t state)
45{return 0;}
46static int aer_resume(struct pcie_device *dev) {return 0;}
47static pci_ers_result_t aer_error_detected(struct pci_dev *dev, 43static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
48 enum pci_channel_state error); 44 enum pci_channel_state error);
49static void aer_error_resume(struct pci_dev *dev); 45static void aer_error_resume(struct pci_dev *dev);
50static pci_ers_result_t aer_root_reset(struct pci_dev *dev); 46static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
51 47
52/*
53 * PCI Express bus's AER Root service driver data structure
54 */
55static struct pcie_port_service_id aer_id[] = {
56 {
57 .vendor = PCI_ANY_ID,
58 .device = PCI_ANY_ID,
59 .port_type = PCIE_RC_PORT,
60 .service_type = PCIE_PORT_SERVICE_AER,
61 },
62 { /* end: all zeroes */ }
63};
64
65static struct pci_error_handlers aer_error_handlers = { 48static struct pci_error_handlers aer_error_handlers = {
66 .error_detected = aer_error_detected, 49 .error_detected = aer_error_detected,
67 .resume = aer_error_resume, 50 .resume = aer_error_resume,
@@ -69,14 +52,12 @@ static struct pci_error_handlers aer_error_handlers = {
69 52
70static struct pcie_port_service_driver aerdriver = { 53static struct pcie_port_service_driver aerdriver = {
71 .name = "aer", 54 .name = "aer",
72 .id_table = &aer_id[0], 55 .port_type = PCIE_ANY_PORT,
56 .service = PCIE_PORT_SERVICE_AER,
73 57
74 .probe = aer_probe, 58 .probe = aer_probe,
75 .remove = aer_remove, 59 .remove = aer_remove,
76 60
77 .suspend = aer_suspend,
78 .resume = aer_resume,
79
80 .err_handler = &aer_error_handlers, 61 .err_handler = &aer_error_handlers,
81 62
82 .reset_link = aer_root_reset, 63 .reset_link = aer_root_reset,
@@ -207,8 +188,7 @@ static void aer_remove(struct pcie_device *dev)
207 * 188 *
208 * Invoked when PCI Express bus loads AER service driver. 189 * Invoked when PCI Express bus loads AER service driver.
209 **/ 190 **/
210static int __devinit aer_probe (struct pcie_device *dev, 191static int __devinit aer_probe (struct pcie_device *dev)
211 const struct pcie_port_service_id *id )
212{ 192{
213 int status; 193 int status;
214 struct aer_rpc *rpc; 194 struct aer_rpc *rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index ebce26c37049..8edb2f300e8f 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -38,7 +38,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
38 38
39 handle = acpi_find_root_bridge_handle(pdev); 39 handle = acpi_find_root_bridge_handle(pdev);
40 if (handle) { 40 if (handle) {
41 status = pci_osc_control_set(handle, 41 status = acpi_pci_osc_control_set(handle,
42 OSC_PCI_EXPRESS_AER_CONTROL | 42 OSC_PCI_EXPRESS_AER_CONTROL |
43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
44 } 44 }
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 382575007382..307452f30035 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -351,21 +351,21 @@ static int find_aer_service_iter(struct device *device, void *data)
351{ 351{
352 struct device_driver *driver; 352 struct device_driver *driver;
353 struct pcie_port_service_driver *service_driver; 353 struct pcie_port_service_driver *service_driver;
354 struct pcie_device *pcie_dev;
355 struct find_aer_service_data *result; 354 struct find_aer_service_data *result;
356 355
357 result = (struct find_aer_service_data *) data; 356 result = (struct find_aer_service_data *) data;
358 357
359 if (device->bus == &pcie_port_bus_type) { 358 if (device->bus == &pcie_port_bus_type) {
360 pcie_dev = to_pcie_device(device); 359 struct pcie_port_data *port_data;
361 if (pcie_dev->id.port_type == PCIE_SW_DOWNSTREAM_PORT) 360
361 port_data = pci_get_drvdata(to_pcie_device(device)->port);
362 if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
362 result->is_downstream = 1; 363 result->is_downstream = 1;
363 364
364 driver = device->driver; 365 driver = device->driver;
365 if (driver) { 366 if (driver) {
366 service_driver = to_service_driver(driver); 367 service_driver = to_service_driver(driver);
367 if (service_driver->id_table->service_type == 368 if (service_driver->service == PCIE_PORT_SERVICE_AER) {
368 PCIE_PORT_SERVICE_AER) {
369 result->aer_driver = service_driver; 369 result->aer_driver = service_driver;
370 return 1; 370 return 1;
371 } 371 }
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 2529f3f2ea5a..17ad53868f9f 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,19 +25,21 @@
25#define PCIE_CAPABILITIES_REG 0x2 25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14 26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4 27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
29/*
30 * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
31 * table entires used by port services must not exceed 31
32 */
33#define PCIE_PORT_MAX_MSIX_ENTRIES 32
28 34
29#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 35#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
30 36
31struct pcie_port_device_ext {
32 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
33};
34
35extern struct bus_type pcie_port_bus_type; 37extern struct bus_type pcie_port_bus_type;
36extern int pcie_port_device_probe(struct pci_dev *dev); 38extern int pcie_port_device_probe(struct pci_dev *dev);
37extern int pcie_port_device_register(struct pci_dev *dev); 39extern int pcie_port_device_register(struct pci_dev *dev);
38#ifdef CONFIG_PM 40#ifdef CONFIG_PM
39extern int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state); 41extern int pcie_port_device_suspend(struct device *dev);
40extern int pcie_port_device_resume(struct pci_dev *dev); 42extern int pcie_port_device_resume(struct device *dev);
41#endif 43#endif
42extern void pcie_port_device_remove(struct pci_dev *dev); 44extern void pcie_port_device_remove(struct pci_dev *dev);
43extern int __must_check pcie_port_bus_register(void); 45extern int __must_check pcie_port_bus_register(void);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index eec89b767f9f..ef3a4eeaebb4 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,20 +26,22 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
27{ 27{
28 struct pcie_device *pciedev; 28 struct pcie_device *pciedev;
29 struct pcie_port_data *port_data;
29 struct pcie_port_service_driver *driver; 30 struct pcie_port_service_driver *driver;
30 31
31 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 32 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
32 return 0; 33 return 0;
33 34
34 pciedev = to_pcie_device(dev); 35 pciedev = to_pcie_device(dev);
35 driver = to_service_driver(drv); 36 driver = to_service_driver(drv);
36 if ( (driver->id_table->vendor != PCI_ANY_ID && 37
37 driver->id_table->vendor != pciedev->id.vendor) || 38 if (driver->service != pciedev->service)
38 (driver->id_table->device != PCI_ANY_ID && 39 return 0;
39 driver->id_table->device != pciedev->id.device) || 40
40 (driver->id_table->port_type != PCIE_ANY_PORT && 41 port_data = pci_get_drvdata(pciedev->port);
41 driver->id_table->port_type != pciedev->id.port_type) || 42
42 driver->id_table->service_type != pciedev->id.service_type ) 43 if (driver->port_type != PCIE_ANY_PORT
44 && driver->port_type != port_data->port_type)
43 return 0; 45 return 0;
44 46
45 return 1; 47 return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 8b3f8c18032f..e39982503863 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -15,10 +15,9 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17 17
18#include "../pci.h"
18#include "portdrv.h" 19#include "portdrv.h"
19 20
20extern int pcie_mch_quirk; /* MSI-quirk Indicator */
21
22/** 21/**
23 * release_pcie_device - free PCI Express port service device structure 22 * release_pcie_device - free PCI Express port service device structure
24 * @dev: Port service device to release 23 * @dev: Port service device to release
@@ -31,26 +30,150 @@ static void release_pcie_device(struct device *dev)
31 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
32} 31}
33 32
34static int is_msi_quirked(struct pci_dev *dev) 33/**
34 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
35 * @entries: Array of MSI-X entries
36 * @new_entry: Index of the entry to add to the array
37 * @nr_entries: Number of entries aleady in the array
38 *
39 * Return value: Position of the added entry in the array
40 */
41static int pcie_port_msix_add_entry(
42 struct msix_entry *entries, int new_entry, int nr_entries)
35{ 43{
36 int port_type, quirk = 0; 44 int j;
45
46 for (j = 0; j < nr_entries; j++)
47 if (entries[j].entry == new_entry)
48 return j;
49
50 entries[j].entry = new_entry;
51 return j;
52}
53
54/**
55 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
56 * @dev: PCI Express port to handle
57 * @vectors: Array of interrupt vectors to populate
58 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
59 *
60 * Return value: 0 on success, error code on failure
61 */
62static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
63{
64 struct msix_entry *msix_entries;
65 int idx[PCIE_PORT_DEVICE_MAXSERVICES];
66 int nr_entries, status, pos, i, nvec;
37 u16 reg16; 67 u16 reg16;
68 u32 reg32;
38 69
39 pci_read_config_word(dev, 70 nr_entries = pci_msix_table_size(dev);
40 pci_find_capability(dev, PCI_CAP_ID_EXP) + 71 if (!nr_entries)
41 PCIE_CAPABILITIES_REG, &reg16); 72 return -EINVAL;
42 port_type = (reg16 >> 4) & PORT_TYPE_MASK; 73 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
43 switch(port_type) { 74 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
44 case PCIE_RC_PORT: 75
45 if (pcie_mch_quirk == 1) 76 msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
46 quirk = 1; 77 if (!msix_entries)
47 break; 78 return -ENOMEM;
48 case PCIE_SW_UPSTREAM_PORT: 79
49 case PCIE_SW_DOWNSTREAM_PORT: 80 /*
50 default: 81 * Allocate as many entries as the port wants, so that we can check
51 break; 82 * which of them will be useful. Moreover, if nr_entries is correctly
83 * equal to the number of entries this port actually uses, we'll happily
84 * go through without any tricks.
85 */
86 for (i = 0; i < nr_entries; i++)
87 msix_entries[i].entry = i;
88
89 status = pci_enable_msix(dev, msix_entries, nr_entries);
90 if (status)
91 goto Exit;
92
93 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
94 idx[i] = -1;
95 status = -EIO;
96 nvec = 0;
97
98 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
99 int entry;
100
101 /*
102 * The code below follows the PCI Express Base Specification 2.0
103 * stating in Section 6.1.6 that "PME and Hot-Plug Event
104 * interrupts (when both are implemented) always share the same
105 * MSI or MSI-X vector, as indicated by the Interrupt Message
106 * Number field in the PCI Express Capabilities register", where
107 * according to Section 7.8.2 of the specification "For MSI-X,
108 * the value in this field indicates which MSI-X Table entry is
109 * used to generate the interrupt message."
110 */
111 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
112 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
113 entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
114 if (entry >= nr_entries)
115 goto Error;
116
117 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
118 if (i == nvec)
119 nvec++;
120
121 idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
122 idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
123 }
124
125 if (mask & PCIE_PORT_SERVICE_AER) {
126 int entry;
127
128 /*
129 * The code below follows Section 7.10.10 of the PCI Express
130 * Base Specification 2.0 stating that bits 31-27 of the Root
131 * Error Status Register contain a value indicating which of the
132 * MSI/MSI-X vectors assigned to the port is going to be used
133 * for AER, where "For MSI-X, the value in this register
134 * indicates which MSI-X Table entry is used to generate the
135 * interrupt message."
136 */
137 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
138 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
139 entry = reg32 >> 27;
140 if (entry >= nr_entries)
141 goto Error;
142
143 i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
144 if (i == nvec)
145 nvec++;
146
147 idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
52 } 148 }
53 return quirk; 149
150 /*
151 * If nvec is equal to the allocated number of entries, we can just use
152 * what we have. Otherwise, the port has some extra entries not for the
153 * services we know and we need to work around that.
154 */
155 if (nvec == nr_entries) {
156 status = 0;
157 } else {
158 /* Drop the temporary MSI-X setup */
159 pci_disable_msix(dev);
160
161 /* Now allocate the MSI-X vectors for real */
162 status = pci_enable_msix(dev, msix_entries, nvec);
163 if (status)
164 goto Exit;
165 }
166
167 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
168 vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
169
170 Exit:
171 kfree(msix_entries);
172 return status;
173
174 Error:
175 pci_disable_msix(dev);
176 goto Exit;
54} 177}
55 178
56/** 179/**
@@ -64,47 +187,32 @@ static int is_msi_quirked(struct pci_dev *dev)
64 */ 187 */
65static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
66{ 189{
67 int i, pos, nvec, status = -EINVAL; 190 struct pcie_port_data *port_data = pci_get_drvdata(dev);
68 int interrupt_mode = PCIE_PORT_INTx_MODE; 191 int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
192 int i;
69 193
70 /* Set INTx as default */
71 for (i = 0, nvec = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
72 if (mask & (1 << i))
73 nvec++;
74 vectors[i] = dev->irq;
75 }
76
77 /* Check MSI quirk */ 194 /* Check MSI quirk */
78 if (is_msi_quirked(dev)) 195 if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
79 return interrupt_mode; 196 goto Fallback;
80 197
81 /* Select MSI-X over MSI if supported */ 198 /* Try to use MSI-X if supported */
82 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 199 if (!pcie_port_enable_msix(dev, vectors, mask))
83 if (pos) { 200 return PCIE_PORT_MSIX_MODE;
84 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 201
85 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 202 /* We're not going to use MSI-X, so try MSI and fall back to INTx */
86 status = pci_enable_msix(dev, msix_entries, nvec); 203 if (!pci_enable_msi(dev))
87 if (!status) { 204 interrupt_mode = PCIE_PORT_MSI_MODE;
88 int j = 0; 205
89 206 Fallback:
90 interrupt_mode = PCIE_PORT_MSIX_MODE; 207 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
91 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 208 interrupt_mode = PCIE_PORT_INTx_MODE;
92 if (mask & (1 << i)) 209
93 vectors[i] = msix_entries[j++].vector; 210 irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
94 } 211 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
95 } 212 vectors[i] = irq;
96 } 213
97 if (status) { 214 vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
98 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 215
99 if (pos) {
100 status = pci_enable_msi(dev);
101 if (!status) {
102 interrupt_mode = PCIE_PORT_MSI_MODE;
103 for (i = 0;i < PCIE_PORT_DEVICE_MAXSERVICES;i++)
104 vectors[i] = dev->irq;
105 }
106 }
107 }
108 return interrupt_mode; 216 return interrupt_mode;
109} 217}
110 218
@@ -132,13 +240,11 @@ static int get_port_device_capability(struct pci_dev *dev)
132 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32); 240 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
133 if (reg32 & SLOT_HP_CAPABLE_MASK) 241 if (reg32 & SLOT_HP_CAPABLE_MASK)
134 services |= PCIE_PORT_SERVICE_HP; 242 services |= PCIE_PORT_SERVICE_HP;
135 } 243 }
136 /* PME Capable - root port capability */ 244 /* AER capable */
137 if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT)
138 services |= PCIE_PORT_SERVICE_PME;
139
140 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 245 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
141 services |= PCIE_PORT_SERVICE_AER; 246 services |= PCIE_PORT_SERVICE_AER;
247 /* VC support */
142 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 248 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
143 services |= PCIE_PORT_SERVICE_VC; 249 services |= PCIE_PORT_SERVICE_VC;
144 250
@@ -152,20 +258,17 @@ static int get_port_device_capability(struct pci_dev *dev)
152 * @port_type: Type of the port 258 * @port_type: Type of the port
153 * @service_type: Type of service to associate with the service device 259 * @service_type: Type of service to associate with the service device
154 * @irq: Interrupt vector to associate with the service device 260 * @irq: Interrupt vector to associate with the service device
155 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
156 */ 261 */
157static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 262static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
158 int port_type, int service_type, int irq, int irq_mode) 263 int service_type, int irq)
159{ 264{
265 struct pcie_port_data *port_data = pci_get_drvdata(parent);
160 struct device *device; 266 struct device *device;
267 int port_type = port_data->port_type;
161 268
162 dev->port = parent; 269 dev->port = parent;
163 dev->interrupt_mode = irq_mode;
164 dev->irq = irq; 270 dev->irq = irq;
165 dev->id.vendor = parent->vendor; 271 dev->service = service_type;
166 dev->id.device = parent->device;
167 dev->id.port_type = port_type;
168 dev->id.service_type = (1 << service_type);
169 272
170 /* Initialize generic device interface */ 273 /* Initialize generic device interface */
171 device = &dev->device; 274 device = &dev->device;
@@ -185,10 +288,9 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
185 * @port_type: Type of the port 288 * @port_type: Type of the port
186 * @service_type: Type of service to associate with the service device 289 * @service_type: Type of service to associate with the service device
187 * @irq: Interrupt vector to associate with the service device 290 * @irq: Interrupt vector to associate with the service device
188 * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
189 */ 291 */
190static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, 292static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
191 int port_type, int service_type, int irq, int irq_mode) 293 int service_type, int irq)
192{ 294{
193 struct pcie_device *device; 295 struct pcie_device *device;
194 296
@@ -196,7 +298,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
196 if (!device) 298 if (!device)
197 return NULL; 299 return NULL;
198 300
199 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 301 pcie_device_init(parent, device, service_type, irq);
200 return device; 302 return device;
201} 303}
202 304
@@ -230,63 +332,90 @@ int pcie_port_device_probe(struct pci_dev *dev)
230 */ 332 */
231int pcie_port_device_register(struct pci_dev *dev) 333int pcie_port_device_register(struct pci_dev *dev)
232{ 334{
233 struct pcie_port_device_ext *p_ext; 335 struct pcie_port_data *port_data;
234 int status, type, capabilities, irq_mode, i; 336 int status, capabilities, irq_mode, i, nr_serv;
235 int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; 337 int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
236 u16 reg16; 338 u16 reg16;
237 339
238 /* Allocate port device extension */ 340 port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
239 if (!(p_ext = kmalloc(sizeof(struct pcie_port_device_ext), GFP_KERNEL))) 341 if (!port_data)
240 return -ENOMEM; 342 return -ENOMEM;
241 343 pci_set_drvdata(dev, port_data);
242 pci_set_drvdata(dev, p_ext);
243 344
244 /* Get port type */ 345 /* Get port type */
245 pci_read_config_word(dev, 346 pci_read_config_word(dev,
246 pci_find_capability(dev, PCI_CAP_ID_EXP) + 347 pci_find_capability(dev, PCI_CAP_ID_EXP) +
247 PCIE_CAPABILITIES_REG, &reg16); 348 PCIE_CAPABILITIES_REG, &reg16);
248 type = (reg16 >> 4) & PORT_TYPE_MASK; 349 port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
249 350
250 /* Now get port services */
251 capabilities = get_port_device_capability(dev); 351 capabilities = get_port_device_capability(dev);
352 /* Root ports are capable of generating PME too */
353 if (port_data->port_type == PCIE_RC_PORT)
354 capabilities |= PCIE_PORT_SERVICE_PME;
355
252 irq_mode = assign_interrupt_mode(dev, vectors, capabilities); 356 irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
253 p_ext->interrupt_mode = irq_mode; 357 if (irq_mode == PCIE_PORT_NO_IRQ) {
358 /*
359 * Don't use service devices that require interrupts if there is
360 * no way to generate them.
361 */
362 if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
363 status = -ENODEV;
364 goto Error;
365 }
366 capabilities = PCIE_PORT_SERVICE_VC;
367 }
368 port_data->port_irq_mode = irq_mode;
369
370 status = pci_enable_device(dev);
371 if (status)
372 goto Error;
373 pci_set_master(dev);
254 374
255 /* Allocate child services if any */ 375 /* Allocate child services if any */
256 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 376 for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
257 struct pcie_device *child; 377 struct pcie_device *child;
378 int service = 1 << i;
379
380 if (!(capabilities & service))
381 continue;
258 382
259 if (capabilities & (1 << i)) { 383 child = alloc_pcie_device(dev, service, vectors[i]);
260 child = alloc_pcie_device( 384 if (!child)
261 dev, /* parent */ 385 continue;
262 type, /* port type */ 386
263 i, /* service type */ 387 status = device_register(&child->device);
264 vectors[i], /* irq */ 388 if (status) {
265 irq_mode /* interrupt mode */); 389 kfree(child);
266 if (child) { 390 continue;
267 status = device_register(&child->device);
268 if (status) {
269 kfree(child);
270 continue;
271 }
272 get_device(&child->device);
273 }
274 } 391 }
392
393 get_device(&child->device);
394 nr_serv++;
395 }
396 if (!nr_serv) {
397 pci_disable_device(dev);
398 status = -ENODEV;
399 goto Error;
275 } 400 }
401
276 return 0; 402 return 0;
403
404 Error:
405 kfree(port_data);
406 return status;
277} 407}
278 408
279#ifdef CONFIG_PM 409#ifdef CONFIG_PM
280static int suspend_iter(struct device *dev, void *data) 410static int suspend_iter(struct device *dev, void *data)
281{ 411{
282 struct pcie_port_service_driver *service_driver; 412 struct pcie_port_service_driver *service_driver;
283 pm_message_t state = * (pm_message_t *) data;
284 413
285 if ((dev->bus == &pcie_port_bus_type) && 414 if ((dev->bus == &pcie_port_bus_type) &&
286 (dev->driver)) { 415 (dev->driver)) {
287 service_driver = to_service_driver(dev->driver); 416 service_driver = to_service_driver(dev->driver);
288 if (service_driver->suspend) 417 if (service_driver->suspend)
289 service_driver->suspend(to_pcie_device(dev), state); 418 service_driver->suspend(to_pcie_device(dev));
290 } 419 }
291 return 0; 420 return 0;
292} 421}
@@ -294,11 +423,10 @@ static int suspend_iter(struct device *dev, void *data)
294/** 423/**
295 * pcie_port_device_suspend - suspend port services associated with a PCIe port 424 * pcie_port_device_suspend - suspend port services associated with a PCIe port
296 * @dev: PCI Express port to handle 425 * @dev: PCI Express port to handle
297 * @state: Representation of system power management transition in progress
298 */ 426 */
299int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) 427int pcie_port_device_suspend(struct device *dev)
300{ 428{
301 return device_for_each_child(&dev->dev, &state, suspend_iter); 429 return device_for_each_child(dev, NULL, suspend_iter);
302} 430}
303 431
304static int resume_iter(struct device *dev, void *data) 432static int resume_iter(struct device *dev, void *data)
@@ -318,24 +446,17 @@ static int resume_iter(struct device *dev, void *data)
318 * pcie_port_device_suspend - resume port services associated with a PCIe port 446 * pcie_port_device_suspend - resume port services associated with a PCIe port
319 * @dev: PCI Express port to handle 447 * @dev: PCI Express port to handle
320 */ 448 */
321int pcie_port_device_resume(struct pci_dev *dev) 449int pcie_port_device_resume(struct device *dev)
322{ 450{
323 return device_for_each_child(&dev->dev, NULL, resume_iter); 451 return device_for_each_child(dev, NULL, resume_iter);
324} 452}
325#endif 453#endif /* PM */
326 454
327static int remove_iter(struct device *dev, void *data) 455static int remove_iter(struct device *dev, void *data)
328{ 456{
329 struct pcie_port_service_driver *service_driver;
330
331 if (dev->bus == &pcie_port_bus_type) { 457 if (dev->bus == &pcie_port_bus_type) {
332 if (dev->driver) { 458 put_device(dev);
333 service_driver = to_service_driver(dev->driver); 459 device_unregister(dev);
334 if (service_driver->remove)
335 service_driver->remove(to_pcie_device(dev));
336 }
337 *(unsigned long*)data = (unsigned long)dev;
338 return 1;
339 } 460 }
340 return 0; 461 return 0;
341} 462}
@@ -349,25 +470,21 @@ static int remove_iter(struct device *dev, void *data)
349 */ 470 */
350void pcie_port_device_remove(struct pci_dev *dev) 471void pcie_port_device_remove(struct pci_dev *dev)
351{ 472{
352 struct device *device; 473 struct pcie_port_data *port_data = pci_get_drvdata(dev);
353 unsigned long device_addr;
354 int interrupt_mode = PCIE_PORT_INTx_MODE;
355 int status;
356 474
357 do { 475 device_for_each_child(&dev->dev, NULL, remove_iter);
358 status = device_for_each_child(&dev->dev, &device_addr, remove_iter); 476 pci_disable_device(dev);
359 if (status) { 477
360 device = (struct device*)device_addr; 478 switch (port_data->port_irq_mode) {
361 interrupt_mode = (to_pcie_device(device))->interrupt_mode; 479 case PCIE_PORT_MSIX_MODE:
362 put_device(device);
363 device_unregister(device);
364 }
365 } while (status);
366 /* Switch to INTx by default if MSI enabled */
367 if (interrupt_mode == PCIE_PORT_MSIX_MODE)
368 pci_disable_msix(dev); 480 pci_disable_msix(dev);
369 else if (interrupt_mode == PCIE_PORT_MSI_MODE) 481 break;
482 case PCIE_PORT_MSI_MODE:
370 pci_disable_msi(dev); 483 pci_disable_msi(dev);
484 break;
485 }
486
487 kfree(port_data);
371} 488}
372 489
373/** 490/**
@@ -392,7 +509,7 @@ static int pcie_port_probe_service(struct device *dev)
392 return -ENODEV; 509 return -ENODEV;
393 510
394 pciedev = to_pcie_device(dev); 511 pciedev = to_pcie_device(dev);
395 status = driver->probe(pciedev, driver->id_table); 512 status = driver->probe(pciedev);
396 if (!status) { 513 if (!status) {
397 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 514 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
398 driver->name); 515 driver->name);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 5ea566e20b37..b924e2463f85 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -32,11 +32,6 @@ MODULE_LICENSE("GPL");
32/* global data */ 32/* global data */
33static const char device_name[] = "pcieport-driver"; 33static const char device_name[] = "pcieport-driver";
34 34
35static int pcie_portdrv_save_config(struct pci_dev *dev)
36{
37 return pci_save_state(dev);
38}
39
40static int pcie_portdrv_restore_config(struct pci_dev *dev) 35static int pcie_portdrv_restore_config(struct pci_dev *dev)
41{ 36{
42 int retval; 37 int retval;
@@ -49,21 +44,21 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
49} 44}
50 45
51#ifdef CONFIG_PM 46#ifdef CONFIG_PM
52static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) 47static struct dev_pm_ops pcie_portdrv_pm_ops = {
53{ 48 .suspend = pcie_port_device_suspend,
54 return pcie_port_device_suspend(dev, state); 49 .resume = pcie_port_device_resume,
50 .freeze = pcie_port_device_suspend,
51 .thaw = pcie_port_device_resume,
52 .poweroff = pcie_port_device_suspend,
53 .restore = pcie_port_device_resume,
54};
55 55
56} 56#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
57 57
58static int pcie_portdrv_resume(struct pci_dev *dev) 58#else /* !PM */
59{ 59
60 pci_set_master(dev); 60#define PCIE_PORTDRV_PM_OPS NULL
61 return pcie_port_device_resume(dev); 61#endif /* !PM */
62}
63#else
64#define pcie_portdrv_suspend NULL
65#define pcie_portdrv_resume NULL
66#endif
67 62
68/* 63/*
69 * pcie_portdrv_probe - Probe PCI-Express port devices 64 * pcie_portdrv_probe - Probe PCI-Express port devices
@@ -82,20 +77,15 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
82 if (status) 77 if (status)
83 return status; 78 return status;
84 79
85 if (pci_enable_device(dev) < 0)
86 return -ENODEV;
87
88 pci_set_master(dev);
89 if (!dev->irq && dev->pin) { 80 if (!dev->irq && dev->pin) {
90 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 81 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
91 "check vendor BIOS\n", dev->vendor, dev->device); 82 "check vendor BIOS\n", dev->vendor, dev->device);
92 } 83 }
93 if (pcie_port_device_register(dev)) { 84 status = pcie_port_device_register(dev);
94 pci_disable_device(dev); 85 if (status)
95 return -ENOMEM; 86 return status;
96 }
97 87
98 pcie_portdrv_save_config(dev); 88 pci_save_state(dev);
99 89
100 return 0; 90 return 0;
101} 91}
@@ -104,7 +94,6 @@ static void pcie_portdrv_remove (struct pci_dev *dev)
104{ 94{
105 pcie_port_device_remove(dev); 95 pcie_port_device_remove(dev);
106 pci_disable_device(dev); 96 pci_disable_device(dev);
107 kfree(pci_get_drvdata(dev));
108} 97}
109 98
110static int error_detected_iter(struct device *device, void *data) 99static int error_detected_iter(struct device *device, void *data)
@@ -278,10 +267,9 @@ static struct pci_driver pcie_portdriver = {
278 .probe = pcie_portdrv_probe, 267 .probe = pcie_portdrv_probe,
279 .remove = pcie_portdrv_remove, 268 .remove = pcie_portdrv_remove,
280 269
281 .suspend = pcie_portdrv_suspend,
282 .resume = pcie_portdrv_resume,
283
284 .err_handler = &pcie_portdrv_err_handler, 270 .err_handler = &pcie_portdrv_err_handler,
271
272 .driver.pm = PCIE_PORTDRV_PM_OPS,
285}; 273};
286 274
287static int __init pcie_portdrv_init(void) 275static int __init pcie_portdrv_init(void)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 55ec44a27e89..e2f3dd098cfa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -287,7 +287,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
287 struct resource *res; 287 struct resource *res;
288 int i; 288 int i;
289 289
290 if (!dev) /* It's a host bus, nothing to read */ 290 if (!child->parent) /* It's a host bus, nothing to read */
291 return; 291 return;
292 292
293 if (dev->transparent) { 293 if (dev->transparent) {
@@ -511,21 +511,21 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
511 511
512 /* 512 /*
513 * If we already got to this bus through a different bridge, 513 * If we already got to this bus through a different bridge,
514 * ignore it. This can happen with the i450NX chipset. 514 * don't re-add it. This can happen with the i450NX chipset.
515 *
516 * However, we continue to descend down the hierarchy and
517 * scan remaining child buses.
515 */ 518 */
516 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 519 child = pci_find_bus(pci_domain_nr(bus), busnr);
517 dev_info(&dev->dev, "bus %04x:%02x already known\n", 520 if (!child) {
518 pci_domain_nr(bus), busnr); 521 child = pci_add_new_bus(bus, dev, busnr);
519 goto out; 522 if (!child)
523 goto out;
524 child->primary = buses & 0xFF;
525 child->subordinate = (buses >> 16) & 0xFF;
526 child->bridge_ctl = bctl;
520 } 527 }
521 528
522 child = pci_add_new_bus(bus, dev, busnr);
523 if (!child)
524 goto out;
525 child->primary = buses & 0xFF;
526 child->subordinate = (buses >> 16) & 0xFF;
527 child->bridge_ctl = bctl;
528
529 cmax = pci_scan_child_bus(child); 529 cmax = pci_scan_child_bus(child);
530 if (cmax > max) 530 if (cmax > max)
531 max = cmax; 531 max = cmax;
@@ -674,6 +674,19 @@ static void pci_read_irq(struct pci_dev *dev)
674 dev->irq = irq; 674 dev->irq = irq;
675} 675}
676 676
677static void set_pcie_port_type(struct pci_dev *pdev)
678{
679 int pos;
680 u16 reg16;
681
682 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
683 if (!pos)
684 return;
685 pdev->is_pcie = 1;
686 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
687 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
688}
689
677#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 690#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
678 691
679/** 692/**
@@ -683,12 +696,33 @@ static void pci_read_irq(struct pci_dev *dev)
683 * Initialize the device structure with information about the device's 696 * Initialize the device structure with information about the device's
684 * vendor,class,memory and IO-space addresses,IRQ lines etc. 697 * vendor,class,memory and IO-space addresses,IRQ lines etc.
685 * Called at initialisation of the PCI subsystem and by CardBus services. 698 * Called at initialisation of the PCI subsystem and by CardBus services.
686 * Returns 0 on success and -1 if unknown type of device (not normal, bridge 699 * Returns 0 on success and negative if unknown type of device (not normal,
687 * or CardBus). 700 * bridge or CardBus).
688 */ 701 */
689static int pci_setup_device(struct pci_dev * dev) 702int pci_setup_device(struct pci_dev *dev)
690{ 703{
691 u32 class; 704 u32 class;
705 u8 hdr_type;
706 struct pci_slot *slot;
707
708 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
709 return -EIO;
710
711 dev->sysdata = dev->bus->sysdata;
712 dev->dev.parent = dev->bus->bridge;
713 dev->dev.bus = &pci_bus_type;
714 dev->hdr_type = hdr_type & 0x7f;
715 dev->multifunction = !!(hdr_type & 0x80);
716 dev->error_state = pci_channel_io_normal;
717 set_pcie_port_type(dev);
718
719 list_for_each_entry(slot, &dev->bus->slots, list)
720 if (PCI_SLOT(dev->devfn) == slot->number)
721 dev->slot = slot;
722
723 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
724 set this higher, assuming the system even supports it. */
725 dev->dma_mask = 0xffffffff;
692 726
693 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 727 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
694 dev->bus->number, PCI_SLOT(dev->devfn), 728 dev->bus->number, PCI_SLOT(dev->devfn),
@@ -703,12 +737,14 @@ static int pci_setup_device(struct pci_dev * dev)
703 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", 737 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
704 dev->vendor, dev->device, class, dev->hdr_type); 738 dev->vendor, dev->device, class, dev->hdr_type);
705 739
740 /* need to have dev->class ready */
741 dev->cfg_size = pci_cfg_space_size(dev);
742
706 /* "Unknown power state" */ 743 /* "Unknown power state" */
707 dev->current_state = PCI_UNKNOWN; 744 dev->current_state = PCI_UNKNOWN;
708 745
709 /* Early fixups, before probing the BARs */ 746 /* Early fixups, before probing the BARs */
710 pci_fixup_device(pci_fixup_early, dev); 747 pci_fixup_device(pci_fixup_early, dev);
711 class = dev->class >> 8;
712 748
713 switch (dev->hdr_type) { /* header type */ 749 switch (dev->hdr_type) { /* header type */
714 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 750 case PCI_HEADER_TYPE_NORMAL: /* standard header */
@@ -770,7 +806,7 @@ static int pci_setup_device(struct pci_dev * dev)
770 default: /* unknown header */ 806 default: /* unknown header */
771 dev_err(&dev->dev, "unknown header type %02x, " 807 dev_err(&dev->dev, "unknown header type %02x, "
772 "ignoring device\n", dev->hdr_type); 808 "ignoring device\n", dev->hdr_type);
773 return -1; 809 return -EIO;
774 810
775 bad: 811 bad:
776 dev_err(&dev->dev, "ignoring class %02x (doesn't match header " 812 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
@@ -785,6 +821,7 @@ static int pci_setup_device(struct pci_dev * dev)
785static void pci_release_capabilities(struct pci_dev *dev) 821static void pci_release_capabilities(struct pci_dev *dev)
786{ 822{
787 pci_vpd_release(dev); 823 pci_vpd_release(dev);
824 pci_iov_release(dev);
788} 825}
789 826
790/** 827/**
@@ -803,19 +840,6 @@ static void pci_release_dev(struct device *dev)
803 kfree(pci_dev); 840 kfree(pci_dev);
804} 841}
805 842
806static void set_pcie_port_type(struct pci_dev *pdev)
807{
808 int pos;
809 u16 reg16;
810
811 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
812 if (!pos)
813 return;
814 pdev->is_pcie = 1;
815 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
816 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
817}
818
819/** 843/**
820 * pci_cfg_space_size - get the configuration space size of the PCI device. 844 * pci_cfg_space_size - get the configuration space size of the PCI device.
821 * @dev: PCI device 845 * @dev: PCI device
@@ -847,6 +871,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
847{ 871{
848 int pos; 872 int pos;
849 u32 status; 873 u32 status;
874 u16 class;
875
876 class = dev->class >> 8;
877 if (class == PCI_CLASS_BRIDGE_HOST)
878 return pci_cfg_space_size_ext(dev);
850 879
851 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 880 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
852 if (!pos) { 881 if (!pos) {
@@ -891,9 +920,7 @@ EXPORT_SYMBOL(alloc_pci_dev);
891static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 920static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
892{ 921{
893 struct pci_dev *dev; 922 struct pci_dev *dev;
894 struct pci_slot *slot;
895 u32 l; 923 u32 l;
896 u8 hdr_type;
897 int delay = 1; 924 int delay = 1;
898 925
899 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 926 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
@@ -920,34 +947,16 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
920 } 947 }
921 } 948 }
922 949
923 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
924 return NULL;
925
926 dev = alloc_pci_dev(); 950 dev = alloc_pci_dev();
927 if (!dev) 951 if (!dev)
928 return NULL; 952 return NULL;
929 953
930 dev->bus = bus; 954 dev->bus = bus;
931 dev->sysdata = bus->sysdata;
932 dev->dev.parent = bus->bridge;
933 dev->dev.bus = &pci_bus_type;
934 dev->devfn = devfn; 955 dev->devfn = devfn;
935 dev->hdr_type = hdr_type & 0x7f;
936 dev->multifunction = !!(hdr_type & 0x80);
937 dev->vendor = l & 0xffff; 956 dev->vendor = l & 0xffff;
938 dev->device = (l >> 16) & 0xffff; 957 dev->device = (l >> 16) & 0xffff;
939 dev->cfg_size = pci_cfg_space_size(dev);
940 dev->error_state = pci_channel_io_normal;
941 set_pcie_port_type(dev);
942
943 list_for_each_entry(slot, &bus->slots, list)
944 if (PCI_SLOT(devfn) == slot->number)
945 dev->slot = slot;
946 958
947 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 959 if (pci_setup_device(dev)) {
948 set this higher, assuming the system even supports it. */
949 dev->dma_mask = 0xffffffff;
950 if (pci_setup_device(dev) < 0) {
951 kfree(dev); 960 kfree(dev);
952 return NULL; 961 return NULL;
953 } 962 }
@@ -972,6 +981,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
972 981
973 /* Alternative Routing-ID Forwarding */ 982 /* Alternative Routing-ID Forwarding */
974 pci_enable_ari(dev); 983 pci_enable_ari(dev);
984
985 /* Single Root I/O Virtualization */
986 pci_iov_init(dev);
975} 987}
976 988
977void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 989void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1006,6 +1018,12 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1006{ 1018{
1007 struct pci_dev *dev; 1019 struct pci_dev *dev;
1008 1020
1021 dev = pci_get_slot(bus, devfn);
1022 if (dev) {
1023 pci_dev_put(dev);
1024 return dev;
1025 }
1026
1009 dev = pci_scan_device(bus, devfn); 1027 dev = pci_scan_device(bus, devfn);
1010 if (!dev) 1028 if (!dev)
1011 return NULL; 1029 return NULL;
@@ -1024,35 +1042,27 @@ EXPORT_SYMBOL(pci_scan_single_device);
1024 * Scan a PCI slot on the specified PCI bus for devices, adding 1042 * Scan a PCI slot on the specified PCI bus for devices, adding
1025 * discovered devices to the @bus->devices list. New devices 1043 * discovered devices to the @bus->devices list. New devices
1026 * will not have is_added set. 1044 * will not have is_added set.
1045 *
1046 * Returns the number of new devices found.
1027 */ 1047 */
1028int pci_scan_slot(struct pci_bus *bus, int devfn) 1048int pci_scan_slot(struct pci_bus *bus, int devfn)
1029{ 1049{
1030 int func, nr = 0; 1050 int fn, nr = 0;
1031 int scan_all_fns; 1051 struct pci_dev *dev;
1032
1033 scan_all_fns = pcibios_scan_all_fns(bus, devfn);
1034
1035 for (func = 0; func < 8; func++, devfn++) {
1036 struct pci_dev *dev;
1037
1038 dev = pci_scan_single_device(bus, devfn);
1039 if (dev) {
1040 nr++;
1041 1052
1042 /* 1053 dev = pci_scan_single_device(bus, devfn);
1043 * If this is a single function device, 1054 if (dev && !dev->is_added) /* new device? */
1044 * don't scan past the first function. 1055 nr++;
1045 */ 1056
1046 if (!dev->multifunction) { 1057 if ((dev && dev->multifunction) ||
1047 if (func > 0) { 1058 (!dev && pcibios_scan_all_fns(bus, devfn))) {
1048 dev->multifunction = 1; 1059 for (fn = 1; fn < 8; fn++) {
1049 } else { 1060 dev = pci_scan_single_device(bus, devfn + fn);
1050 break; 1061 if (dev) {
1051 } 1062 if (!dev->is_added)
1063 nr++;
1064 dev->multifunction = 1;
1052 } 1065 }
1053 } else {
1054 if (func == 0 && !scan_all_fns)
1055 break;
1056 } 1066 }
1057 } 1067 }
1058 1068
@@ -1074,12 +1084,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1074 for (devfn = 0; devfn < 0x100; devfn += 8) 1084 for (devfn = 0; devfn < 0x100; devfn += 8)
1075 pci_scan_slot(bus, devfn); 1085 pci_scan_slot(bus, devfn);
1076 1086
1087 /* Reserve buses for SR-IOV capability. */
1088 max += pci_iov_bus_range(bus);
1089
1077 /* 1090 /*
1078 * After performing arch-dependent fixup of the bus, look behind 1091 * After performing arch-dependent fixup of the bus, look behind
1079 * all PCI-to-PCI bridges on this bus. 1092 * all PCI-to-PCI bridges on this bus.
1080 */ 1093 */
1081 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1094 if (!bus->is_added) {
1082 pcibios_fixup_bus(bus); 1095 pr_debug("PCI: Fixups for bus %04x:%02x\n",
1096 pci_domain_nr(bus), bus->number);
1097 pcibios_fixup_bus(bus);
1098 if (pci_is_root_bus(bus))
1099 bus->is_added = 1;
1100 }
1101
1083 for (pass=0; pass < 2; pass++) 1102 for (pass=0; pass < 2; pass++)
1084 list_for_each_entry(dev, &bus->devices, bus_list) { 1103 list_for_each_entry(dev, &bus->devices, bus_list) {
1085 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1104 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
@@ -1114,7 +1133,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
1114 if (!b) 1133 if (!b)
1115 return NULL; 1134 return NULL;
1116 1135
1117 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1136 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1118 if (!dev){ 1137 if (!dev){
1119 kfree(b); 1138 kfree(b);
1120 return NULL; 1139 return NULL;
@@ -1133,7 +1152,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
1133 list_add_tail(&b->node, &pci_root_buses); 1152 list_add_tail(&b->node, &pci_root_buses);
1134 up_write(&pci_bus_sem); 1153 up_write(&pci_bus_sem);
1135 1154
1136 memset(dev, 0, sizeof(*dev));
1137 dev->parent = parent; 1155 dev->parent = parent;
1138 dev->release = pci_release_bus_bridge_dev; 1156 dev->release = pci_release_bus_bridge_dev;
1139 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1157 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
@@ -1193,6 +1211,38 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1193EXPORT_SYMBOL(pci_scan_bus_parented); 1211EXPORT_SYMBOL(pci_scan_bus_parented);
1194 1212
1195#ifdef CONFIG_HOTPLUG 1213#ifdef CONFIG_HOTPLUG
1214/**
1215 * pci_rescan_bus - scan a PCI bus for devices.
1216 * @bus: PCI bus to scan
1217 *
1218 * Scan a PCI bus and child buses for new devices, adds them,
1219 * and enables them.
1220 *
1221 * Returns the max number of subordinate bus discovered.
1222 */
1223unsigned int __devinit pci_rescan_bus(struct pci_bus *bus)
1224{
1225 unsigned int max;
1226 struct pci_dev *dev;
1227
1228 max = pci_scan_child_bus(bus);
1229
1230 down_read(&pci_bus_sem);
1231 list_for_each_entry(dev, &bus->devices, bus_list)
1232 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1233 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1234 if (dev->subordinate)
1235 pci_bus_size_bridges(dev->subordinate);
1236 up_read(&pci_bus_sem);
1237
1238 pci_bus_assign_resources(bus);
1239 pci_enable_bridges(bus);
1240 pci_bus_add_devices(bus);
1241
1242 return max;
1243}
1244EXPORT_SYMBOL_GPL(pci_rescan_bus);
1245
1196EXPORT_SYMBOL(pci_add_new_bus); 1246EXPORT_SYMBOL(pci_add_new_bus);
1197EXPORT_SYMBOL(pci_scan_slot); 1247EXPORT_SYMBOL(pci_scan_slot);
1198EXPORT_SYMBOL(pci_scan_bridge); 1248EXPORT_SYMBOL(pci_scan_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 92b9efe9bcaf..9b2f0d96900d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/pci-aspm.h> 26#include <linux/pci-aspm.h>
27#include <linux/ioport.h>
27#include "pci.h" 28#include "pci.h"
28 29
29int isa_dma_bridge_buggy; 30int isa_dma_bridge_buggy;
@@ -34,6 +35,65 @@ int pcie_mch_quirk;
34EXPORT_SYMBOL(pcie_mch_quirk); 35EXPORT_SYMBOL(pcie_mch_quirk);
35 36
36#ifdef CONFIG_PCI_QUIRKS 37#ifdef CONFIG_PCI_QUIRKS
38/*
39 * This quirk function disables the device and releases resources
40 * which is specified by kernel's boot parameter 'pci=resource_alignment='.
41 * It also rounds up size to specified alignment.
42 * Later on, the kernel will assign page-aligned memory resource back
43 * to that device.
44 */
45static void __devinit quirk_resource_alignment(struct pci_dev *dev)
46{
47 int i;
48 struct resource *r;
49 resource_size_t align, size;
50
51 if (!pci_is_reassigndev(dev))
52 return;
53
54 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
55 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
56 dev_warn(&dev->dev,
57 "Can't reassign resources to host bridge.\n");
58 return;
59 }
60
61 dev_info(&dev->dev, "Disabling device and release resources.\n");
62 pci_disable_device(dev);
63
64 align = pci_specified_resource_alignment(dev);
65 for (i=0; i < PCI_BRIDGE_RESOURCES; i++) {
66 r = &dev->resource[i];
67 if (!(r->flags & IORESOURCE_MEM))
68 continue;
69 size = resource_size(r);
70 if (size < align) {
71 size = align;
72 dev_info(&dev->dev,
73 "Rounding up size of resource #%d to %#llx.\n",
74 i, (unsigned long long)size);
75 }
76 r->end = size - 1;
77 r->start = 0;
78 }
79 /* Need to disable bridge's resource window,
80 * to enable the kernel to reassign new resource
81 * window later on.
82 */
83 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
84 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
85 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
86 r = &dev->resource[i];
87 if (!(r->flags & IORESOURCE_MEM))
88 continue;
89 r->end = resource_size(r) - 1;
90 r->start = 0;
91 }
92 pci_disable_bridge_window(dev);
93 }
94}
95DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_resource_alignment);
96
37/* The Mellanox Tavor device gives false positive parity errors 97/* The Mellanox Tavor device gives false positive parity errors
38 * Mark this device with a broken_parity_status, to allow 98 * Mark this device with a broken_parity_status, to allow
39 * PCI scanning code to "skip" this now blacklisted device. 99 * PCI scanning code to "skip" this now blacklisted device.
@@ -1126,10 +1186,15 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1126 * its on-board VGA controller */ 1186 * its on-board VGA controller */
1127 asus_hides_smbus = 1; 1187 asus_hides_smbus = 1;
1128 } 1188 }
1129 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG) 1189 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1130 switch(dev->subsystem_device) { 1190 switch(dev->subsystem_device) {
1131 case 0x00b8: /* Compaq Evo D510 CMT */ 1191 case 0x00b8: /* Compaq Evo D510 CMT */
1132 case 0x00b9: /* Compaq Evo D510 SFF */ 1192 case 0x00b9: /* Compaq Evo D510 SFF */
1193 /* Motherboard doesn't have Host bridge
1194 * subvendor/subdevice IDs and on-board VGA
1195 * controller is disabled if an AGP card is
1196 * inserted, therefore checking USB UHCI
1197 * Controller #1 */
1133 asus_hides_smbus = 1; 1198 asus_hides_smbus = 1;
1134 } 1199 }
1135 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) 1200 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
@@ -1154,7 +1219,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
1154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); 1219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1155 1220
1156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); 1221DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge); 1222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); 1223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1159 1224
1160static void asus_hides_smbus_lpc(struct pci_dev *dev) 1225static void asus_hides_smbus_lpc(struct pci_dev *dev)
@@ -1664,9 +1729,13 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1664 * of parallel ports and <S> is the number of serial ports. 1729 * of parallel ports and <S> is the number of serial ports.
1665 */ 1730 */
1666 switch (dev->device) { 1731 switch (dev->device) {
1732 case PCI_DEVICE_ID_NETMOS_9835:
1733 /* Well, this rule doesn't hold for the following 9835 device */
1734 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
1735 dev->subsystem_device == 0x0299)
1736 return;
1667 case PCI_DEVICE_ID_NETMOS_9735: 1737 case PCI_DEVICE_ID_NETMOS_9735:
1668 case PCI_DEVICE_ID_NETMOS_9745: 1738 case PCI_DEVICE_ID_NETMOS_9745:
1669 case PCI_DEVICE_ID_NETMOS_9835:
1670 case PCI_DEVICE_ID_NETMOS_9845: 1739 case PCI_DEVICE_ID_NETMOS_9845:
1671 case PCI_DEVICE_ID_NETMOS_9855: 1740 case PCI_DEVICE_ID_NETMOS_9855:
1672 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && 1741 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
@@ -2078,6 +2147,92 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2078 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2147 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2079 nvenet_msi_disable); 2148 nvenet_msi_disable);
2080 2149
2150static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2151{
2152 int pos, ttl = 48;
2153 int found = 0;
2154
2155 /* check if there is HT MSI cap or enabled on this device */
2156 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2157 while (pos && ttl--) {
2158 u8 flags;
2159
2160 if (found < 1)
2161 found = 1;
2162 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2163 &flags) == 0) {
2164 if (flags & HT_MSI_FLAGS_ENABLE) {
2165 if (found < 2) {
2166 found = 2;
2167 break;
2168 }
2169 }
2170 }
2171 pos = pci_find_next_ht_capability(dev, pos,
2172 HT_CAPTYPE_MSI_MAPPING);
2173 }
2174
2175 return found;
2176}
2177
2178static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
2179{
2180 struct pci_dev *dev;
2181 int pos;
2182 int i, dev_no;
2183 int found = 0;
2184
2185 dev_no = host_bridge->devfn >> 3;
2186 for (i = dev_no + 1; i < 0x20; i++) {
2187 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2188 if (!dev)
2189 continue;
2190
2191 /* found next host bridge ?*/
2192 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2193 if (pos != 0) {
2194 pci_dev_put(dev);
2195 break;
2196 }
2197
2198 if (ht_check_msi_mapping(dev)) {
2199 found = 1;
2200 pci_dev_put(dev);
2201 break;
2202 }
2203 pci_dev_put(dev);
2204 }
2205
2206 return found;
2207}
2208
2209#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
2210#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
2211
2212static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
2213{
2214 int pos, ctrl_off;
2215 int end = 0;
2216 u16 flags, ctrl;
2217
2218 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2219
2220 if (!pos)
2221 goto out;
2222
2223 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2224
2225 ctrl_off = ((flags >> 10) & 1) ?
2226 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2227 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2228
2229 if (ctrl & (1 << 6))
2230 end = 1;
2231
2232out:
2233 return end;
2234}
2235
2081static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev) 2236static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2082{ 2237{
2083 struct pci_dev *host_bridge; 2238 struct pci_dev *host_bridge;
@@ -2102,6 +2257,11 @@ static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
2102 if (!found) 2257 if (!found)
2103 return; 2258 return;
2104 2259
2260 /* don't enable end_device/host_bridge with leaf directly here */
2261 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2262 host_bridge_with_leaf(host_bridge))
2263 goto out;
2264
2105 /* root did that ! */ 2265 /* root did that ! */
2106 if (msi_ht_cap_enabled(host_bridge)) 2266 if (msi_ht_cap_enabled(host_bridge))
2107 goto out; 2267 goto out;
@@ -2132,44 +2292,12 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2132 } 2292 }
2133} 2293}
2134 2294
2135static int __devinit ht_check_msi_mapping(struct pci_dev *dev) 2295static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2136{
2137 int pos, ttl = 48;
2138 int found = 0;
2139
2140 /* check if there is HT MSI cap or enabled on this device */
2141 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2142 while (pos && ttl--) {
2143 u8 flags;
2144
2145 if (found < 1)
2146 found = 1;
2147 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2148 &flags) == 0) {
2149 if (flags & HT_MSI_FLAGS_ENABLE) {
2150 if (found < 2) {
2151 found = 2;
2152 break;
2153 }
2154 }
2155 }
2156 pos = pci_find_next_ht_capability(dev, pos,
2157 HT_CAPTYPE_MSI_MAPPING);
2158 }
2159
2160 return found;
2161}
2162
2163static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2164{ 2296{
2165 struct pci_dev *host_bridge; 2297 struct pci_dev *host_bridge;
2166 int pos; 2298 int pos;
2167 int found; 2299 int found;
2168 2300
2169 /* Enabling HT MSI mapping on this device breaks MCP51 */
2170 if (dev->device == 0x270)
2171 return;
2172
2173 /* check if there is HT MSI cap or enabled on this device */ 2301 /* check if there is HT MSI cap or enabled on this device */
2174 found = ht_check_msi_mapping(dev); 2302 found = ht_check_msi_mapping(dev);
2175 2303
@@ -2193,7 +2321,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2193 /* Host bridge is to HT */ 2321 /* Host bridge is to HT */
2194 if (found == 1) { 2322 if (found == 1) {
2195 /* it is not enabled, try to enable it */ 2323 /* it is not enabled, try to enable it */
2196 nv_ht_enable_msi_mapping(dev); 2324 if (all)
2325 ht_enable_msi_mapping(dev);
2326 else
2327 nv_ht_enable_msi_mapping(dev);
2197 } 2328 }
2198 return; 2329 return;
2199 } 2330 }
@@ -2205,8 +2336,20 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2205 /* Host bridge is not to HT, disable HT MSI mapping on this device */ 2336 /* Host bridge is not to HT, disable HT MSI mapping on this device */
2206 ht_disable_msi_mapping(dev); 2337 ht_disable_msi_mapping(dev);
2207} 2338}
2208DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2339
2209DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 2340static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2341{
2342 return __nv_msi_ht_cap_quirk(dev, 1);
2343}
2344
2345static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2346{
2347 return __nv_msi_ht_cap_quirk(dev, 0);
2348}
2349
2350DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2351
2352DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2210 2353
2211static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 2354static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
2212{ 2355{
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 042e08924421..86503c14ce7e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -71,6 +71,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
71 down_write(&pci_bus_sem); 71 down_write(&pci_bus_sem);
72 list_del(&pci_bus->node); 72 list_del(&pci_bus->node);
73 up_write(&pci_bus_sem); 73 up_write(&pci_bus_sem);
74 if (!pci_bus->is_added)
75 return;
76
74 pci_remove_legacy_files(pci_bus); 77 pci_remove_legacy_files(pci_bus);
75 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); 78 device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
76 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); 79 device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
@@ -92,6 +95,7 @@ EXPORT_SYMBOL(pci_remove_bus);
92 */ 95 */
93void pci_remove_bus_device(struct pci_dev *dev) 96void pci_remove_bus_device(struct pci_dev *dev)
94{ 97{
98 pci_stop_bus_device(dev);
95 if (dev->subordinate) { 99 if (dev->subordinate) {
96 struct pci_bus *b = dev->subordinate; 100 struct pci_bus *b = dev->subordinate;
97 101
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 5af8bd538149..710d4ea69568 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
29 if (pdev->is_pcie) 29 if (pdev->is_pcie)
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (!pdev->bus->self) 32 if (!pdev->bus->parent)
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 704608945780..334285a8e237 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,7 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29 29
30static void pbus_assign_resources_sorted(struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
32 struct pci_dev *dev; 32 struct pci_dev *dev;
33 struct resource *res; 33 struct resource *res;
@@ -144,6 +144,9 @@ static void pci_setup_bridge(struct pci_bus *bus)
144 struct pci_bus_region region; 144 struct pci_bus_region region;
145 u32 l, bu, lu, io_upper16; 145 u32 l, bu, lu, io_upper16;
146 146
147 if (!pci_is_root_bus(bus) && bus->is_added)
148 return;
149
147 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", 150 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
148 pci_domain_nr(bus), bus->number); 151 pci_domain_nr(bus), bus->number);
149 152
@@ -495,7 +498,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
495} 498}
496EXPORT_SYMBOL(pci_bus_size_bridges); 499EXPORT_SYMBOL(pci_bus_size_bridges);
497 500
498void __ref pci_bus_assign_resources(struct pci_bus *bus) 501void __ref pci_bus_assign_resources(const struct pci_bus *bus)
499{ 502{
500 struct pci_bus *b; 503 struct pci_bus *b;
501 struct pci_dev *dev; 504 struct pci_dev *dev;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 32e8d88a4619..3039fcb86afc 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -120,6 +120,21 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
120 return err; 120 return err;
121} 121}
122 122
123#ifdef CONFIG_PCI_QUIRKS
124void pci_disable_bridge_window(struct pci_dev *dev)
125{
126 dev_dbg(&dev->dev, "Disabling bridge window.\n");
127
128 /* MMIO Base/Limit */
129 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
130
131 /* Prefetchable MMIO Base/Limit */
132 pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
133 pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
134 pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
135}
136#endif /* CONFIG_PCI_QUIRKS */
137
123int pci_assign_resource(struct pci_dev *dev, int resno) 138int pci_assign_resource(struct pci_dev *dev, int resno)
124{ 139{
125 struct pci_bus *bus = dev->bus; 140 struct pci_bus *bus = dev->bus;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 5a8ccb4f604d..21189447e545 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * drivers/pci/slot.c 2 * drivers/pci/slot.c
3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com> 5 * Alex Chiang <achiang@hp.com>
6 */ 6 */
7 7
8#include <linux/kobject.h> 8#include <linux/kobject.h>
@@ -52,8 +52,8 @@ static void pci_slot_release(struct kobject *kobj)
52 struct pci_dev *dev; 52 struct pci_dev *dev;
53 struct pci_slot *slot = to_pci_slot(kobj); 53 struct pci_slot *slot = to_pci_slot(kobj);
54 54
55 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, 55 dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
56 slot->bus->number, slot->number); 56 slot->number, pci_slot_name(slot));
57 57
58 list_for_each_entry(dev, &slot->bus->devices, bus_list) 58 list_for_each_entry(dev, &slot->bus->devices, bus_list)
59 if (PCI_SLOT(dev->devfn) == slot->number) 59 if (PCI_SLOT(dev->devfn) == slot->number)
@@ -248,9 +248,8 @@ placeholder:
248 if (PCI_SLOT(dev->devfn) == slot_nr) 248 if (PCI_SLOT(dev->devfn) == slot_nr)
249 dev->slot = slot; 249 dev->slot = slot;
250 250
251 /* Don't care if debug printk has a -1 for slot_nr */ 251 dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 slot_nr, pci_slot_name(slot));
253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
254 253
255out: 254out:
256 kfree(slot_name); 255 kfree(slot_name);
@@ -299,9 +298,8 @@ EXPORT_SYMBOL_GPL(pci_renumber_slot);
299 */ 298 */
300void pci_destroy_slot(struct pci_slot *slot) 299void pci_destroy_slot(struct pci_slot *slot)
301{ 300{
302 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 301 dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
303 atomic_read(&slot->kobj.kref.refcount) - 1, 302 slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
304 pci_domain_nr(slot->bus), slot->bus->number, slot->number);
305 303
306 down_write(&pci_bus_sem); 304 down_write(&pci_bus_sem);
307 kobject_put(&slot->kobj); 305 kobject_put(&slot->kobj);