diff options
Diffstat (limited to 'drivers/pci')
| -rw-r--r-- | drivers/pci/host/pci-hyperv.c | 54 | ||||
| -rw-r--r-- | drivers/pci/msi.c | 13 | ||||
| -rw-r--r-- | drivers/pci/pci-acpi.c | 4 | ||||
| -rw-r--r-- | drivers/pci/pci-driver.c | 5 | ||||
| -rw-r--r-- | drivers/pci/pci.c | 44 | ||||
| -rw-r--r-- | drivers/pci/probe.c | 43 | ||||
| -rw-r--r-- | drivers/pci/quirks.c | 94 |
7 files changed, 188 insertions, 69 deletions
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 334c9a7b8991..0fe3ea164ee5 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c | |||
| @@ -563,52 +563,6 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, | |||
| 563 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); | 563 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); |
| 564 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); | 564 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); |
| 565 | 565 | ||
| 566 | |||
| 567 | /* | ||
| 568 | * Temporary CPU to vCPU mapping to address transitioning | ||
| 569 | * vmbus_cpu_number_to_vp_number() being migrated to | ||
| 570 | * hv_cpu_number_to_vp_number() in a separate patch. Once that patch | ||
| 571 | * has been picked up in the main line, remove this code here and use | ||
| 572 | * the official code. | ||
| 573 | */ | ||
| 574 | static struct hv_tmpcpumap | ||
| 575 | { | ||
| 576 | bool initialized; | ||
| 577 | u32 vp_index[NR_CPUS]; | ||
| 578 | } hv_tmpcpumap; | ||
| 579 | |||
| 580 | static void hv_tmpcpumap_init_cpu(void *_unused) | ||
| 581 | { | ||
| 582 | int cpu = smp_processor_id(); | ||
| 583 | u64 vp_index; | ||
| 584 | |||
| 585 | hv_get_vp_index(vp_index); | ||
| 586 | |||
| 587 | hv_tmpcpumap.vp_index[cpu] = vp_index; | ||
| 588 | } | ||
| 589 | |||
| 590 | static void hv_tmpcpumap_init(void) | ||
| 591 | { | ||
| 592 | if (hv_tmpcpumap.initialized) | ||
| 593 | return; | ||
| 594 | |||
| 595 | memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index)); | ||
| 596 | on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true); | ||
| 597 | hv_tmpcpumap.initialized = true; | ||
| 598 | } | ||
| 599 | |||
| 600 | /** | ||
| 601 | * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr | ||
| 602 | * | ||
| 603 | * Remove once vmbus_cpu_number_to_vp_number() has been converted to | ||
| 604 | * hv_cpu_number_to_vp_number() and replace callers appropriately. | ||
| 605 | */ | ||
| 606 | static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu) | ||
| 607 | { | ||
| 608 | return hv_tmpcpumap.vp_index[cpu]; | ||
| 609 | } | ||
| 610 | |||
| 611 | |||
| 612 | /** | 566 | /** |
| 613 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows | 567 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows |
| 614 | * @devfn: The Linux representation of PCI slot | 568 | * @devfn: The Linux representation of PCI slot |
| @@ -972,7 +926,7 @@ static void hv_irq_unmask(struct irq_data *data) | |||
| 972 | var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; | 926 | var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; |
| 973 | 927 | ||
| 974 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | 928 | for_each_cpu_and(cpu, dest, cpu_online_mask) { |
| 975 | cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); | 929 | cpu_vmbus = hv_cpu_number_to_vp_number(cpu); |
| 976 | 930 | ||
| 977 | if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { | 931 | if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { |
| 978 | dev_err(&hbus->hdev->device, | 932 | dev_err(&hbus->hdev->device, |
| @@ -987,7 +941,7 @@ static void hv_irq_unmask(struct irq_data *data) | |||
| 987 | } else { | 941 | } else { |
| 988 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | 942 | for_each_cpu_and(cpu, dest, cpu_online_mask) { |
| 989 | params->int_target.vp_mask |= | 943 | params->int_target.vp_mask |= |
| 990 | (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); | 944 | (1ULL << hv_cpu_number_to_vp_number(cpu)); |
| 991 | } | 945 | } |
| 992 | } | 946 | } |
| 993 | 947 | ||
| @@ -1064,7 +1018,7 @@ static u32 hv_compose_msi_req_v2( | |||
| 1064 | */ | 1018 | */ |
| 1065 | cpu = cpumask_first_and(affinity, cpu_online_mask); | 1019 | cpu = cpumask_first_and(affinity, cpu_online_mask); |
| 1066 | int_pkt->int_desc.processor_array[0] = | 1020 | int_pkt->int_desc.processor_array[0] = |
| 1067 | hv_tmp_cpu_nr_to_vp_nr(cpu); | 1021 | hv_cpu_number_to_vp_number(cpu); |
| 1068 | int_pkt->int_desc.processor_count = 1; | 1022 | int_pkt->int_desc.processor_count = 1; |
| 1069 | 1023 | ||
| 1070 | return sizeof(*int_pkt); | 1024 | return sizeof(*int_pkt); |
| @@ -2496,8 +2450,6 @@ static int hv_pci_probe(struct hv_device *hdev, | |||
| 2496 | return -ENOMEM; | 2450 | return -ENOMEM; |
| 2497 | hbus->state = hv_pcibus_init; | 2451 | hbus->state = hv_pcibus_init; |
| 2498 | 2452 | ||
| 2499 | hv_tmpcpumap_init(); | ||
| 2500 | |||
| 2501 | /* | 2453 | /* |
| 2502 | * The PCI bus "domain" is what is called "segment" in ACPI and | 2454 | * The PCI bus "domain" is what is called "segment" in ACPI and |
| 2503 | * other specs. Pull it from the instance ID, to get something | 2455 | * other specs. Pull it from the instance ID, to get something |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 2f0dd02d78b7..496ed9130600 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) | |||
| 538 | struct msi_desc *entry; | 538 | struct msi_desc *entry; |
| 539 | u16 control; | 539 | u16 control; |
| 540 | 540 | ||
| 541 | if (affd) { | 541 | if (affd) |
| 542 | masks = irq_create_affinity_masks(nvec, affd); | 542 | masks = irq_create_affinity_masks(nvec, affd); |
| 543 | if (!masks) | 543 | |
| 544 | dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n", | ||
| 545 | nvec); | ||
| 546 | } | ||
| 547 | 544 | ||
| 548 | /* MSI Entry Initialization */ | 545 | /* MSI Entry Initialization */ |
| 549 | entry = alloc_msi_entry(&dev->dev, nvec, masks); | 546 | entry = alloc_msi_entry(&dev->dev, nvec, masks); |
| @@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |||
| 679 | struct msi_desc *entry; | 676 | struct msi_desc *entry; |
| 680 | int ret, i; | 677 | int ret, i; |
| 681 | 678 | ||
| 682 | if (affd) { | 679 | if (affd) |
| 683 | masks = irq_create_affinity_masks(nvec, affd); | 680 | masks = irq_create_affinity_masks(nvec, affd); |
| 684 | if (!masks) | ||
| 685 | dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n", | ||
| 686 | nvec); | ||
| 687 | } | ||
| 688 | 681 | ||
| 689 | for (i = 0, curmsk = masks; i < nvec; i++) { | 682 | for (i = 0, curmsk = masks; i < nvec; i++) { |
| 690 | entry = alloc_msi_entry(&dev->dev, 1, curmsk); | 683 | entry = alloc_msi_entry(&dev->dev, 1, curmsk); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index e70c1c7ba1bf..a8da543b3814 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -573,7 +573,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) | |||
| 573 | { | 573 | { |
| 574 | while (bus->parent) { | 574 | while (bus->parent) { |
| 575 | if (acpi_pm_device_can_wakeup(&bus->self->dev)) | 575 | if (acpi_pm_device_can_wakeup(&bus->self->dev)) |
| 576 | return acpi_pm_set_device_wakeup(&bus->self->dev, enable); | 576 | return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); |
| 577 | 577 | ||
| 578 | bus = bus->parent; | 578 | bus = bus->parent; |
| 579 | } | 579 | } |
| @@ -581,7 +581,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) | |||
| 581 | /* We have reached the root bus. */ | 581 | /* We have reached the root bus. */ |
| 582 | if (bus->bridge) { | 582 | if (bus->bridge) { |
| 583 | if (acpi_pm_device_can_wakeup(bus->bridge)) | 583 | if (acpi_pm_device_can_wakeup(bus->bridge)) |
| 584 | return acpi_pm_set_device_wakeup(bus->bridge, enable); | 584 | return acpi_pm_set_bridge_wakeup(bus->bridge, enable); |
| 585 | } | 585 | } |
| 586 | return 0; | 586 | return 0; |
| 587 | } | 587 | } |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d51e8738f9c2..11bd267fc137 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -647,9 +647,7 @@ static int pci_legacy_resume(struct device *dev) | |||
| 647 | static void pci_pm_default_resume(struct pci_dev *pci_dev) | 647 | static void pci_pm_default_resume(struct pci_dev *pci_dev) |
| 648 | { | 648 | { |
| 649 | pci_fixup_device(pci_fixup_resume, pci_dev); | 649 | pci_fixup_device(pci_fixup_resume, pci_dev); |
| 650 | 650 | pci_enable_wake(pci_dev, PCI_D0, false); | |
| 651 | if (!pci_has_subordinate(pci_dev)) | ||
| 652 | pci_enable_wake(pci_dev, PCI_D0, false); | ||
| 653 | } | 651 | } |
| 654 | 652 | ||
| 655 | static void pci_pm_default_suspend(struct pci_dev *pci_dev) | 653 | static void pci_pm_default_suspend(struct pci_dev *pci_dev) |
| @@ -1307,6 +1305,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, | |||
| 1307 | drv->driver.bus = &pci_bus_type; | 1305 | drv->driver.bus = &pci_bus_type; |
| 1308 | drv->driver.owner = owner; | 1306 | drv->driver.owner = owner; |
| 1309 | drv->driver.mod_name = mod_name; | 1307 | drv->driver.mod_name = mod_name; |
| 1308 | drv->driver.groups = drv->groups; | ||
| 1310 | 1309 | ||
| 1311 | spin_lock_init(&drv->dynids.lock); | 1310 | spin_lock_init(&drv->dynids.lock); |
| 1312 | INIT_LIST_HEAD(&drv->dynids.list); | 1311 | INIT_LIST_HEAD(&drv->dynids.list); |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 2870cd36e3e0..b0002daa50f3 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -515,7 +515,7 @@ EXPORT_SYMBOL(pci_find_resource); | |||
| 515 | */ | 515 | */ |
| 516 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) | 516 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) |
| 517 | { | 517 | { |
| 518 | struct pci_dev *bridge, *highest_pcie_bridge = NULL; | 518 | struct pci_dev *bridge, *highest_pcie_bridge = dev; |
| 519 | 519 | ||
| 520 | bridge = pci_upstream_bridge(dev); | 520 | bridge = pci_upstream_bridge(dev); |
| 521 | while (bridge && pci_is_pcie(bridge)) { | 521 | while (bridge && pci_is_pcie(bridge)) { |
| @@ -1923,6 +1923,13 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
| 1923 | { | 1923 | { |
| 1924 | int ret = 0; | 1924 | int ret = 0; |
| 1925 | 1925 | ||
| 1926 | /* | ||
| 1927 | * Bridges can only signal wakeup on behalf of subordinate devices, | ||
| 1928 | * but that is set up elsewhere, so skip them. | ||
| 1929 | */ | ||
| 1930 | if (pci_has_subordinate(dev)) | ||
| 1931 | return 0; | ||
| 1932 | |||
| 1926 | /* Don't do the same thing twice in a row for one device. */ | 1933 | /* Don't do the same thing twice in a row for one device. */ |
| 1927 | if (!!enable == !!dev->wakeup_prepared) | 1934 | if (!!enable == !!dev->wakeup_prepared) |
| 1928 | return 0; | 1935 | return 0; |
| @@ -4293,6 +4300,41 @@ int pci_reset_function(struct pci_dev *dev) | |||
| 4293 | EXPORT_SYMBOL_GPL(pci_reset_function); | 4300 | EXPORT_SYMBOL_GPL(pci_reset_function); |
| 4294 | 4301 | ||
| 4295 | /** | 4302 | /** |
| 4303 | * pci_reset_function_locked - quiesce and reset a PCI device function | ||
| 4304 | * @dev: PCI device to reset | ||
| 4305 | * | ||
| 4306 | * Some devices allow an individual function to be reset without affecting | ||
| 4307 | * other functions in the same device. The PCI device must be responsive | ||
| 4308 | * to PCI config space in order to use this function. | ||
| 4309 | * | ||
| 4310 | * This function does not just reset the PCI portion of a device, but | ||
| 4311 | * clears all the state associated with the device. This function differs | ||
| 4312 | * from __pci_reset_function() in that it saves and restores device state | ||
| 4313 | * over the reset. It also differs from pci_reset_function() in that it | ||
| 4314 | * requires the PCI device lock to be held. | ||
| 4315 | * | ||
| 4316 | * Returns 0 if the device function was successfully reset or negative if the | ||
| 4317 | * device doesn't support resetting a single function. | ||
| 4318 | */ | ||
| 4319 | int pci_reset_function_locked(struct pci_dev *dev) | ||
| 4320 | { | ||
| 4321 | int rc; | ||
| 4322 | |||
| 4323 | rc = pci_probe_reset_function(dev); | ||
| 4324 | if (rc) | ||
| 4325 | return rc; | ||
| 4326 | |||
| 4327 | pci_dev_save_and_disable(dev); | ||
| 4328 | |||
| 4329 | rc = __pci_reset_function_locked(dev); | ||
| 4330 | |||
| 4331 | pci_dev_restore(dev); | ||
| 4332 | |||
| 4333 | return rc; | ||
| 4334 | } | ||
| 4335 | EXPORT_SYMBOL_GPL(pci_reset_function_locked); | ||
| 4336 | |||
| 4337 | /** | ||
| 4296 | * pci_try_reset_function - quiesce and reset a PCI device function | 4338 | * pci_try_reset_function - quiesce and reset a PCI device function |
| 4297 | * @dev: PCI device to reset | 4339 | * @dev: PCI device to reset |
| 4298 | * | 4340 | * |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e03f95bd1b59..ff94b69738a8 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -1791,6 +1791,48 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign) | |||
| 1791 | return 0; | 1791 | return 0; |
| 1792 | } | 1792 | } |
| 1793 | 1793 | ||
| 1794 | /** | ||
| 1795 | * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable | ||
| 1796 | * @dev: PCI device to query | ||
| 1797 | * | ||
| 1798 | * Returns true if the device has enabled relaxed ordering attribute. | ||
| 1799 | */ | ||
| 1800 | bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) | ||
| 1801 | { | ||
| 1802 | u16 v; | ||
| 1803 | |||
| 1804 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); | ||
| 1805 | |||
| 1806 | return !!(v & PCI_EXP_DEVCTL_RELAX_EN); | ||
| 1807 | } | ||
| 1808 | EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); | ||
| 1809 | |||
| 1810 | static void pci_configure_relaxed_ordering(struct pci_dev *dev) | ||
| 1811 | { | ||
| 1812 | struct pci_dev *root; | ||
| 1813 | |||
| 1814 | /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */ | ||
| 1815 | if (dev->is_virtfn) | ||
| 1816 | return; | ||
| 1817 | |||
| 1818 | if (!pcie_relaxed_ordering_enabled(dev)) | ||
| 1819 | return; | ||
| 1820 | |||
| 1821 | /* | ||
| 1822 | * For now, we only deal with Relaxed Ordering issues with Root | ||
| 1823 | * Ports. Peer-to-Peer DMA is another can of worms. | ||
| 1824 | */ | ||
| 1825 | root = pci_find_pcie_root_port(dev); | ||
| 1826 | if (!root) | ||
| 1827 | return; | ||
| 1828 | |||
| 1829 | if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { | ||
| 1830 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, | ||
| 1831 | PCI_EXP_DEVCTL_RELAX_EN); | ||
| 1832 | dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n"); | ||
| 1833 | } | ||
| 1834 | } | ||
| 1835 | |||
| 1794 | static void pci_configure_device(struct pci_dev *dev) | 1836 | static void pci_configure_device(struct pci_dev *dev) |
| 1795 | { | 1837 | { |
| 1796 | struct hotplug_params hpp; | 1838 | struct hotplug_params hpp; |
| @@ -1798,6 +1840,7 @@ static void pci_configure_device(struct pci_dev *dev) | |||
| 1798 | 1840 | ||
| 1799 | pci_configure_mps(dev); | 1841 | pci_configure_mps(dev); |
| 1800 | pci_configure_extended_tags(dev, NULL); | 1842 | pci_configure_extended_tags(dev, NULL); |
| 1843 | pci_configure_relaxed_ordering(dev); | ||
| 1801 | 1844 | ||
| 1802 | memset(&hpp, 0, sizeof(hpp)); | 1845 | memset(&hpp, 0, sizeof(hpp)); |
| 1803 | ret = pci_get_hp_params(dev, &hpp); | 1846 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 3821c11c9add..a2afb44fad10 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
| 26 | #include <linux/ktime.h> | 26 | #include <linux/ktime.h> |
| 27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
| 28 | #include <linux/platform_data/x86/apple.h> | ||
| 28 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | 29 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ |
| 29 | #include "pci.h" | 30 | #include "pci.h" |
| 30 | 31 | ||
| @@ -3447,7 +3448,7 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) | |||
| 3447 | { | 3448 | { |
| 3448 | acpi_handle bridge, SXIO, SXFP, SXLV; | 3449 | acpi_handle bridge, SXIO, SXFP, SXLV; |
| 3449 | 3450 | ||
| 3450 | if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) | 3451 | if (!x86_apple_machine) |
| 3451 | return; | 3452 | return; |
| 3452 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) | 3453 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) |
| 3453 | return; | 3454 | return; |
| @@ -3492,7 +3493,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) | |||
| 3492 | struct pci_dev *sibling = NULL; | 3493 | struct pci_dev *sibling = NULL; |
| 3493 | struct pci_dev *nhi = NULL; | 3494 | struct pci_dev *nhi = NULL; |
| 3494 | 3495 | ||
| 3495 | if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) | 3496 | if (!x86_apple_machine) |
| 3496 | return; | 3497 | return; |
| 3497 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM) | 3498 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM) |
| 3498 | return; | 3499 | return; |
| @@ -4016,6 +4017,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8, | |||
| 4016 | quirk_tw686x_class); | 4017 | quirk_tw686x_class); |
| 4017 | 4018 | ||
| 4018 | /* | 4019 | /* |
| 4020 | * Some devices have problems with Transaction Layer Packets with the Relaxed | ||
| 4021 | * Ordering Attribute set. Such devices should mark themselves and other | ||
| 4022 | * Device Drivers should check before sending TLPs with RO set. | ||
| 4023 | */ | ||
| 4024 | static void quirk_relaxedordering_disable(struct pci_dev *dev) | ||
| 4025 | { | ||
| 4026 | dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING; | ||
| 4027 | dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n"); | ||
| 4028 | } | ||
| 4029 | |||
| 4030 | /* | ||
| 4031 | * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root | ||
| 4032 | * Complex has a Flow Control Credit issue which can cause performance | ||
| 4033 | * problems with Upstream Transaction Layer Packets with Relaxed Ordering set. | ||
| 4034 | */ | ||
| 4035 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4036 | quirk_relaxedordering_disable); | ||
| 4037 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4038 | quirk_relaxedordering_disable); | ||
| 4039 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4040 | quirk_relaxedordering_disable); | ||
| 4041 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4042 | quirk_relaxedordering_disable); | ||
| 4043 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4044 | quirk_relaxedordering_disable); | ||
| 4045 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4046 | quirk_relaxedordering_disable); | ||
| 4047 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4048 | quirk_relaxedordering_disable); | ||
| 4049 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4050 | quirk_relaxedordering_disable); | ||
| 4051 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4052 | quirk_relaxedordering_disable); | ||
| 4053 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4054 | quirk_relaxedordering_disable); | ||
| 4055 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4056 | quirk_relaxedordering_disable); | ||
| 4057 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4058 | quirk_relaxedordering_disable); | ||
| 4059 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4060 | quirk_relaxedordering_disable); | ||
| 4061 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4062 | quirk_relaxedordering_disable); | ||
| 4063 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4064 | quirk_relaxedordering_disable); | ||
| 4065 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4066 | quirk_relaxedordering_disable); | ||
| 4067 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4068 | quirk_relaxedordering_disable); | ||
| 4069 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4070 | quirk_relaxedordering_disable); | ||
| 4071 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4072 | quirk_relaxedordering_disable); | ||
| 4073 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4074 | quirk_relaxedordering_disable); | ||
| 4075 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4076 | quirk_relaxedordering_disable); | ||
| 4077 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4078 | quirk_relaxedordering_disable); | ||
| 4079 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4080 | quirk_relaxedordering_disable); | ||
| 4081 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4082 | quirk_relaxedordering_disable); | ||
| 4083 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4084 | quirk_relaxedordering_disable); | ||
| 4085 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4086 | quirk_relaxedordering_disable); | ||
| 4087 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4088 | quirk_relaxedordering_disable); | ||
| 4089 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4090 | quirk_relaxedordering_disable); | ||
| 4091 | |||
| 4092 | /* | ||
| 4093 | * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex | ||
| 4094 | * where Upstream Transaction Layer Packets with the Relaxed Ordering | ||
| 4095 | * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering | ||
| 4096 | * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules | ||
| 4097 | * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0 | ||
| 4098 | * November 10, 2010). As a result, on this platform we can't use Relaxed | ||
| 4099 | * Ordering for Upstream TLPs. | ||
| 4100 | */ | ||
| 4101 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4102 | quirk_relaxedordering_disable); | ||
| 4103 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4104 | quirk_relaxedordering_disable); | ||
| 4105 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4106 | quirk_relaxedordering_disable); | ||
| 4107 | |||
| 4108 | /* | ||
| 4019 | * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same | 4109 | * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same |
| 4020 | * values for the Attribute as were supplied in the header of the | 4110 | * values for the Attribute as were supplied in the header of the |
| 4021 | * corresponding Request, except as explicitly allowed when IDO is used." | 4111 | * corresponding Request, except as explicitly allowed when IDO is used." |
