summaryrefslogtreecommitdiffstats
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c424
1 files changed, 291 insertions, 133 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 447e83472c01..f3ea977a5b1b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -110,7 +110,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus* bus)
110 struct list_head *tmp; 110 struct list_head *tmp;
111 unsigned char max, n; 111 unsigned char max, n;
112 112
113 max = bus->subordinate; 113 max = bus->busn_res.end;
114 list_for_each(tmp, &bus->children) { 114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp)); 115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max) 116 if(n > max)
@@ -136,30 +136,6 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
136EXPORT_SYMBOL_GPL(pci_ioremap_bar); 136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif 137#endif
138 138
139#if 0
140/**
141 * pci_max_busnr - returns maximum PCI bus number
142 *
143 * Returns the highest PCI bus number present in the system global list of
144 * PCI buses.
145 */
146unsigned char __devinit
147pci_max_busnr(void)
148{
149 struct pci_bus *bus = NULL;
150 unsigned char max, n;
151
152 max = 0;
153 while ((bus = pci_find_next_bus(bus)) != NULL) {
154 n = pci_bus_max_busnr(bus);
155 if(n > max)
156 max = n;
157 }
158 return max;
159}
160
161#endif /* 0 */
162
163#define PCI_FIND_CAP_TTL 48 139#define PCI_FIND_CAP_TTL 48
164 140
165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 141static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
@@ -278,6 +254,38 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
278} 254}
279 255
280/** 256/**
257 * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
258 * @dev: PCI device to check
259 *
260 * Like pci_pcie_cap() but also checks that the PCIe capability version is
261 * >= 2. Note that v1 capability structures could be sparse in that not
262 * all register fields were required. v2 requires the entire structure to
263 * be present size wise, while still allowing for non-implemented registers
264 * to exist but they must be hardwired to 0.
265 *
266 * Due to the differences in the versions of capability structures, one
267 * must be careful not to try and access non-existant registers that may
268 * exist in early versions - v1 - of Express devices.
269 *
270 * Returns the offset of the PCIe capability structure as long as the
271 * capability version is >= 2; otherwise 0 is returned.
272 */
273static int pci_pcie_cap2(struct pci_dev *dev)
274{
275 u16 flags;
276 int pos;
277
278 pos = pci_pcie_cap(dev);
279 if (pos) {
280 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
281 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
282 pos = 0;
283 }
284
285 return pos;
286}
287
288/**
281 * pci_find_ext_capability - Find an extended capability 289 * pci_find_ext_capability - Find an extended capability
282 * @dev: PCI device to query 290 * @dev: PCI device to query
283 * @cap: capability code 291 * @cap: capability code
@@ -329,49 +337,6 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
329} 337}
330EXPORT_SYMBOL_GPL(pci_find_ext_capability); 338EXPORT_SYMBOL_GPL(pci_find_ext_capability);
331 339
332/**
333 * pci_bus_find_ext_capability - find an extended capability
334 * @bus: the PCI bus to query
335 * @devfn: PCI device to query
336 * @cap: capability code
337 *
338 * Like pci_find_ext_capability() but works for pci devices that do not have a
339 * pci_dev structure set up yet.
340 *
341 * Returns the address of the requested capability structure within the
342 * device's PCI configuration space or 0 in case the device does not
343 * support it.
344 */
345int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
346 int cap)
347{
348 u32 header;
349 int ttl;
350 int pos = PCI_CFG_SPACE_SIZE;
351
352 /* minimum 8 bytes per capability */
353 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
354
355 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
356 return 0;
357 if (header == 0xffffffff || header == 0)
358 return 0;
359
360 while (ttl-- > 0) {
361 if (PCI_EXT_CAP_ID(header) == cap)
362 return pos;
363
364 pos = PCI_EXT_CAP_NEXT(header);
365 if (pos < PCI_CFG_SPACE_SIZE)
366 break;
367
368 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
369 break;
370 }
371
372 return 0;
373}
374
375static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 340static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
376{ 341{
377 int rc, ttl = PCI_FIND_CAP_TTL; 342 int rc, ttl = PCI_FIND_CAP_TTL;
@@ -622,7 +587,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
622 dev_info(&dev->dev, "Refused to change power state, " 587 dev_info(&dev->dev, "Refused to change power state, "
623 "currently in D%d\n", dev->current_state); 588 "currently in D%d\n", dev->current_state);
624 589
625 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 590 /*
591 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
626 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 592 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
627 * from D3hot to D0 _may_ perform an internal reset, thereby 593 * from D3hot to D0 _may_ perform an internal reset, thereby
628 * going to "D0 Uninitialized" rather than "D0 Initialized". 594 * going to "D0 Uninitialized" rather than "D0 Initialized".
@@ -654,6 +620,16 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
654 if (dev->pm_cap) { 620 if (dev->pm_cap) {
655 u16 pmcsr; 621 u16 pmcsr;
656 622
623 /*
624 * Configuration space is not accessible for device in
625 * D3cold, so just keep or set D3cold for safety
626 */
627 if (dev->current_state == PCI_D3cold)
628 return;
629 if (state == PCI_D3cold) {
630 dev->current_state = PCI_D3cold;
631 return;
632 }
657 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 633 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
658 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 634 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
659 } else { 635 } else {
@@ -662,6 +638,19 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
662} 638}
663 639
664/** 640/**
641 * pci_power_up - Put the given device into D0 forcibly
642 * @dev: PCI device to power up
643 */
644void pci_power_up(struct pci_dev *dev)
645{
646 if (platform_pci_power_manageable(dev))
647 platform_pci_set_power_state(dev, PCI_D0);
648
649 pci_raw_set_power_state(dev, PCI_D0);
650 pci_update_current_state(dev, PCI_D0);
651}
652
653/**
665 * pci_platform_power_transition - Use platform to change device power state 654 * pci_platform_power_transition - Use platform to change device power state
666 * @dev: PCI device to handle. 655 * @dev: PCI device to handle.
667 * @state: State to put the device into. 656 * @state: State to put the device into.
@@ -694,8 +683,50 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
694 */ 683 */
695static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 684static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
696{ 685{
697 if (state == PCI_D0) 686 if (state == PCI_D0) {
698 pci_platform_power_transition(dev, PCI_D0); 687 pci_platform_power_transition(dev, PCI_D0);
688 /*
689 * Mandatory power management transition delays, see
690 * PCI Express Base Specification Revision 2.0 Section
691 * 6.6.1: Conventional Reset. Do not delay for
692 * devices powered on/off by corresponding bridge,
693 * because have already delayed for the bridge.
694 */
695 if (dev->runtime_d3cold) {
696 msleep(dev->d3cold_delay);
697 /*
698 * When powering on a bridge from D3cold, the
699 * whole hierarchy may be powered on into
700 * D0uninitialized state, resume them to give
701 * them a chance to suspend again
702 */
703 pci_wakeup_bus(dev->subordinate);
704 }
705 }
706}
707
708/**
709 * __pci_dev_set_current_state - Set current state of a PCI device
710 * @dev: Device to handle
711 * @data: pointer to state to be set
712 */
713static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
714{
715 pci_power_t state = *(pci_power_t *)data;
716
717 dev->current_state = state;
718 return 0;
719}
720
721/**
722 * __pci_bus_set_current_state - Walk given bus and set current state of devices
723 * @bus: Top bus of the subtree to walk.
724 * @state: state to be set
725 */
726static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
727{
728 if (bus)
729 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
699} 730}
700 731
701/** 732/**
@@ -707,8 +738,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
707 */ 738 */
708int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 739int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
709{ 740{
710 return state >= PCI_D0 ? 741 int ret;
711 pci_platform_power_transition(dev, state) : -EINVAL; 742
743 if (state <= PCI_D0)
744 return -EINVAL;
745 ret = pci_platform_power_transition(dev, state);
746 /* Power off the bridge may power off the whole hierarchy */
747 if (!ret && state == PCI_D3cold)
748 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
749 return ret;
712} 750}
713EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 751EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
714 752
@@ -732,8 +770,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
732 int error; 770 int error;
733 771
734 /* bound the state we're entering */ 772 /* bound the state we're entering */
735 if (state > PCI_D3hot) 773 if (state > PCI_D3cold)
736 state = PCI_D3hot; 774 state = PCI_D3cold;
737 else if (state < PCI_D0) 775 else if (state < PCI_D0)
738 state = PCI_D0; 776 state = PCI_D0;
739 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 777 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
@@ -744,14 +782,23 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
744 */ 782 */
745 return 0; 783 return 0;
746 784
785 /* Check if we're already there */
786 if (dev->current_state == state)
787 return 0;
788
747 __pci_start_power_transition(dev, state); 789 __pci_start_power_transition(dev, state);
748 790
749 /* This device is quirked not to be put into D3, so 791 /* This device is quirked not to be put into D3, so
750 don't put it in D3 */ 792 don't put it in D3 */
751 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 793 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
752 return 0; 794 return 0;
753 795
754 error = pci_raw_set_power_state(dev, state); 796 /*
797 * To put device in D3cold, we put device into D3hot in native
798 * way, then put device into D3cold with platform ops
799 */
800 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
801 PCI_D3hot : state);
755 802
756 if (!__pci_complete_power_transition(dev, state)) 803 if (!__pci_complete_power_transition(dev, state))
757 error = 0; 804 error = 0;
@@ -822,12 +869,6 @@ EXPORT_SYMBOL(pci_choose_state);
822 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 869 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
823 (type == PCI_EXP_TYPE_ROOT_PORT || \ 870 (type == PCI_EXP_TYPE_ROOT_PORT || \
824 type == PCI_EXP_TYPE_RC_EC)) 871 type == PCI_EXP_TYPE_RC_EC))
825#define pcie_cap_has_devctl2(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1)
827#define pcie_cap_has_lnkctl2(type, flags) \
828 ((flags & PCI_EXP_FLAGS_VERS) > 1)
829#define pcie_cap_has_sltctl2(type, flags) \
830 ((flags & PCI_EXP_FLAGS_VERS) > 1)
831 872
832static struct pci_cap_saved_state *pci_find_saved_cap( 873static struct pci_cap_saved_state *pci_find_saved_cap(
833 struct pci_dev *pci_dev, char cap) 874 struct pci_dev *pci_dev, char cap)
@@ -870,13 +911,14 @@ static int pci_save_pcie_state(struct pci_dev *dev)
870 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 911 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
871 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 912 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
872 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 913 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
873 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
874 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
875 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
876 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
877 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
878 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
879 914
915 pos = pci_pcie_cap2(dev);
916 if (!pos)
917 return 0;
918
919 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
920 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
921 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
880 return 0; 922 return 0;
881} 923}
882 924
@@ -903,12 +945,14 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
903 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 945 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
904 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 946 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
905 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 947 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
906 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 948
907 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 949 pos = pci_pcie_cap2(dev);
908 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 950 if (!pos)
909 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 951 return;
910 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 952
911 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 953 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
954 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
955 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
912} 956}
913 957
914 958
@@ -1349,7 +1393,7 @@ void pcim_pin_device(struct pci_dev *pdev)
1349 * is the default implementation. Architecture implementations can 1393 * is the default implementation. Architecture implementations can
1350 * override this. 1394 * override this.
1351 */ 1395 */
1352void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 1396void __weak pcibios_disable_device (struct pci_dev *dev) {}
1353 1397
1354static void do_pci_disable_device(struct pci_dev *dev) 1398static void do_pci_disable_device(struct pci_dev *dev)
1355{ 1399{
@@ -1413,8 +1457,8 @@ pci_disable_device(struct pci_dev *dev)
1413 * Sets the PCIe reset state for the device. This is the default 1457 * Sets the PCIe reset state for the device. This is the default
1414 * implementation. Architecture implementations can override this. 1458 * implementation. Architecture implementations can override this.
1415 */ 1459 */
1416int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1460int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417 enum pcie_reset_state state) 1461 enum pcie_reset_state state)
1418{ 1462{
1419 return -EINVAL; 1463 return -EINVAL;
1420} 1464}
@@ -1498,6 +1542,28 @@ void pci_pme_wakeup_bus(struct pci_bus *bus)
1498} 1542}
1499 1543
1500/** 1544/**
1545 * pci_wakeup - Wake up a PCI device
1546 * @dev: Device to handle.
1547 * @ign: ignored parameter
1548 */
1549static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1550{
1551 pci_wakeup_event(pci_dev);
1552 pm_request_resume(&pci_dev->dev);
1553 return 0;
1554}
1555
1556/**
1557 * pci_wakeup_bus - Walk given bus and wake up devices on it
1558 * @bus: Top bus of the subtree to walk.
1559 */
1560void pci_wakeup_bus(struct pci_bus *bus)
1561{
1562 if (bus)
1563 pci_walk_bus(bus, pci_wakeup, NULL);
1564}
1565
1566/**
1501 * pci_pme_capable - check the capability of PCI device to generate PME# 1567 * pci_pme_capable - check the capability of PCI device to generate PME#
1502 * @dev: PCI device to handle. 1568 * @dev: PCI device to handle.
1503 * @state: PCI state from which device will issue PME#. 1569 * @state: PCI state from which device will issue PME#.
@@ -1518,6 +1584,16 @@ static void pci_pme_list_scan(struct work_struct *work)
1518 if (!list_empty(&pci_pme_list)) { 1584 if (!list_empty(&pci_pme_list)) {
1519 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { 1585 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520 if (pme_dev->dev->pme_poll) { 1586 if (pme_dev->dev->pme_poll) {
1587 struct pci_dev *bridge;
1588
1589 bridge = pme_dev->dev->bus->self;
1590 /*
1591 * If bridge is in low power state, the
1592 * configuration space of subordinate devices
1593 * may be not accessible
1594 */
1595 if (bridge && bridge->current_state != PCI_D0)
1596 continue;
1521 pci_pme_wakeup(pme_dev->dev, NULL); 1597 pci_pme_wakeup(pme_dev->dev, NULL);
1522 } else { 1598 } else {
1523 list_del(&pme_dev->list); 1599 list_del(&pme_dev->list);
@@ -1744,6 +1820,10 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1744 if (target_state == PCI_POWER_ERROR) 1820 if (target_state == PCI_POWER_ERROR)
1745 return -EIO; 1821 return -EIO;
1746 1822
1823 /* D3cold during system suspend/hibernate is not supported */
1824 if (target_state > PCI_D3hot)
1825 target_state = PCI_D3hot;
1826
1747 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1827 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1748 1828
1749 error = pci_set_power_state(dev, target_state); 1829 error = pci_set_power_state(dev, target_state);
@@ -1781,12 +1861,16 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
1781 if (target_state == PCI_POWER_ERROR) 1861 if (target_state == PCI_POWER_ERROR)
1782 return -EIO; 1862 return -EIO;
1783 1863
1864 dev->runtime_d3cold = target_state == PCI_D3cold;
1865
1784 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1866 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1785 1867
1786 error = pci_set_power_state(dev, target_state); 1868 error = pci_set_power_state(dev, target_state);
1787 1869
1788 if (error) 1870 if (error) {
1789 __pci_enable_wake(dev, target_state, true, false); 1871 __pci_enable_wake(dev, target_state, true, false);
1872 dev->runtime_d3cold = false;
1873 }
1790 1874
1791 return error; 1875 return error;
1792} 1876}
@@ -1856,6 +1940,7 @@ void pci_pm_init(struct pci_dev *dev)
1856 1940
1857 dev->pm_cap = pm; 1941 dev->pm_cap = pm;
1858 dev->d3_delay = PCI_PM_D3_WAIT; 1942 dev->d3_delay = PCI_PM_D3_WAIT;
1943 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
1859 1944
1860 dev->d1_support = false; 1945 dev->d1_support = false;
1861 dev->d2_support = false; 1946 dev->d2_support = false;
@@ -1983,7 +2068,7 @@ void pci_enable_ari(struct pci_dev *dev)
1983{ 2068{
1984 int pos; 2069 int pos;
1985 u32 cap; 2070 u32 cap;
1986 u16 flags, ctrl; 2071 u16 ctrl;
1987 struct pci_dev *bridge; 2072 struct pci_dev *bridge;
1988 2073
1989 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 2074 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
@@ -1994,18 +2079,14 @@ void pci_enable_ari(struct pci_dev *dev)
1994 return; 2079 return;
1995 2080
1996 bridge = dev->bus->self; 2081 bridge = dev->bus->self;
1997 if (!bridge || !pci_is_pcie(bridge)) 2082 if (!bridge)
1998 return; 2083 return;
1999 2084
2000 pos = pci_pcie_cap(bridge); 2085 /* ARI is a PCIe cap v2 feature */
2086 pos = pci_pcie_cap2(bridge);
2001 if (!pos) 2087 if (!pos)
2002 return; 2088 return;
2003 2089
2004 /* ARI is a PCIe v2 feature */
2005 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2006 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2007 return;
2008
2009 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 2090 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2010 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 2091 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2011 return; 2092 return;
@@ -2018,7 +2099,7 @@ void pci_enable_ari(struct pci_dev *dev)
2018} 2099}
2019 2100
2020/** 2101/**
2021 * pci_enable_ido - enable ID-based ordering on a device 2102 * pci_enable_ido - enable ID-based Ordering on a device
2022 * @dev: the PCI device 2103 * @dev: the PCI device
2023 * @type: which types of IDO to enable 2104 * @type: which types of IDO to enable
2024 * 2105 *
@@ -2031,7 +2112,8 @@ void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2031 int pos; 2112 int pos;
2032 u16 ctrl; 2113 u16 ctrl;
2033 2114
2034 pos = pci_pcie_cap(dev); 2115 /* ID-based Ordering is a PCIe cap v2 feature */
2116 pos = pci_pcie_cap2(dev);
2035 if (!pos) 2117 if (!pos)
2036 return; 2118 return;
2037 2119
@@ -2054,10 +2136,8 @@ void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2054 int pos; 2136 int pos;
2055 u16 ctrl; 2137 u16 ctrl;
2056 2138
2057 if (!pci_is_pcie(dev)) 2139 /* ID-based Ordering is a PCIe cap v2 feature */
2058 return; 2140 pos = pci_pcie_cap2(dev);
2059
2060 pos = pci_pcie_cap(dev);
2061 if (!pos) 2141 if (!pos)
2062 return; 2142 return;
2063 2143
@@ -2096,10 +2176,8 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2096 u16 ctrl; 2176 u16 ctrl;
2097 int ret; 2177 int ret;
2098 2178
2099 if (!pci_is_pcie(dev)) 2179 /* OBFF is a PCIe cap v2 feature */
2100 return -ENOTSUPP; 2180 pos = pci_pcie_cap2(dev);
2101
2102 pos = pci_pcie_cap(dev);
2103 if (!pos) 2181 if (!pos)
2104 return -ENOTSUPP; 2182 return -ENOTSUPP;
2105 2183
@@ -2108,7 +2186,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2108 return -ENOTSUPP; /* no OBFF support at all */ 2186 return -ENOTSUPP; /* no OBFF support at all */
2109 2187
2110 /* Make sure the topology supports OBFF as well */ 2188 /* Make sure the topology supports OBFF as well */
2111 if (dev->bus) { 2189 if (dev->bus->self) {
2112 ret = pci_enable_obff(dev->bus->self, type); 2190 ret = pci_enable_obff(dev->bus->self, type);
2113 if (ret) 2191 if (ret)
2114 return ret; 2192 return ret;
@@ -2149,10 +2227,8 @@ void pci_disable_obff(struct pci_dev *dev)
2149 int pos; 2227 int pos;
2150 u16 ctrl; 2228 u16 ctrl;
2151 2229
2152 if (!pci_is_pcie(dev)) 2230 /* OBFF is a PCIe cap v2 feature */
2153 return; 2231 pos = pci_pcie_cap2(dev);
2154
2155 pos = pci_pcie_cap(dev);
2156 if (!pos) 2232 if (!pos)
2157 return; 2233 return;
2158 2234
@@ -2169,15 +2245,13 @@ EXPORT_SYMBOL(pci_disable_obff);
2169 * RETURNS: 2245 * RETURNS:
2170 * True if @dev supports latency tolerance reporting, false otherwise. 2246 * True if @dev supports latency tolerance reporting, false otherwise.
2171 */ 2247 */
2172bool pci_ltr_supported(struct pci_dev *dev) 2248static bool pci_ltr_supported(struct pci_dev *dev)
2173{ 2249{
2174 int pos; 2250 int pos;
2175 u32 cap; 2251 u32 cap;
2176 2252
2177 if (!pci_is_pcie(dev)) 2253 /* LTR is a PCIe cap v2 feature */
2178 return false; 2254 pos = pci_pcie_cap2(dev);
2179
2180 pos = pci_pcie_cap(dev);
2181 if (!pos) 2255 if (!pos)
2182 return false; 2256 return false;
2183 2257
@@ -2185,7 +2259,6 @@ bool pci_ltr_supported(struct pci_dev *dev)
2185 2259
2186 return cap & PCI_EXP_DEVCAP2_LTR; 2260 return cap & PCI_EXP_DEVCAP2_LTR;
2187} 2261}
2188EXPORT_SYMBOL(pci_ltr_supported);
2189 2262
2190/** 2263/**
2191 * pci_enable_ltr - enable latency tolerance reporting 2264 * pci_enable_ltr - enable latency tolerance reporting
@@ -2206,7 +2279,8 @@ int pci_enable_ltr(struct pci_dev *dev)
2206 if (!pci_ltr_supported(dev)) 2279 if (!pci_ltr_supported(dev))
2207 return -ENOTSUPP; 2280 return -ENOTSUPP;
2208 2281
2209 pos = pci_pcie_cap(dev); 2282 /* LTR is a PCIe cap v2 feature */
2283 pos = pci_pcie_cap2(dev);
2210 if (!pos) 2284 if (!pos)
2211 return -ENOTSUPP; 2285 return -ENOTSUPP;
2212 2286
@@ -2215,7 +2289,7 @@ int pci_enable_ltr(struct pci_dev *dev)
2215 return -EINVAL; 2289 return -EINVAL;
2216 2290
2217 /* Enable upstream ports first */ 2291 /* Enable upstream ports first */
2218 if (dev->bus) { 2292 if (dev->bus->self) {
2219 ret = pci_enable_ltr(dev->bus->self); 2293 ret = pci_enable_ltr(dev->bus->self);
2220 if (ret) 2294 if (ret)
2221 return ret; 2295 return ret;
@@ -2241,7 +2315,8 @@ void pci_disable_ltr(struct pci_dev *dev)
2241 if (!pci_ltr_supported(dev)) 2315 if (!pci_ltr_supported(dev))
2242 return; 2316 return;
2243 2317
2244 pos = pci_pcie_cap(dev); 2318 /* LTR is a PCIe cap v2 feature */
2319 pos = pci_pcie_cap2(dev);
2245 if (!pos) 2320 if (!pos)
2246 return; 2321 return;
2247 2322
@@ -2360,6 +2435,75 @@ void pci_enable_acs(struct pci_dev *dev)
2360} 2435}
2361 2436
2362/** 2437/**
2438 * pci_acs_enabled - test ACS against required flags for a given device
2439 * @pdev: device to test
2440 * @acs_flags: required PCI ACS flags
2441 *
2442 * Return true if the device supports the provided flags. Automatically
2443 * filters out flags that are not implemented on multifunction devices.
2444 */
2445bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2446{
2447 int pos, ret;
2448 u16 ctrl;
2449
2450 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2451 if (ret >= 0)
2452 return ret > 0;
2453
2454 if (!pci_is_pcie(pdev))
2455 return false;
2456
2457 /* Filter out flags not applicable to multifunction */
2458 if (pdev->multifunction)
2459 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2460 PCI_ACS_EC | PCI_ACS_DT);
2461
2462 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM ||
2463 pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2464 pdev->multifunction) {
2465 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2466 if (!pos)
2467 return false;
2468
2469 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2470 if ((ctrl & acs_flags) != acs_flags)
2471 return false;
2472 }
2473
2474 return true;
2475}
2476
2477/**
2478 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2479 * @start: starting downstream device
2480 * @end: ending upstream device or NULL to search to the root bus
2481 * @acs_flags: required flags
2482 *
2483 * Walk up a device tree from start to end testing PCI ACS support. If
2484 * any step along the way does not support the required flags, return false.
2485 */
2486bool pci_acs_path_enabled(struct pci_dev *start,
2487 struct pci_dev *end, u16 acs_flags)
2488{
2489 struct pci_dev *pdev, *parent = start;
2490
2491 do {
2492 pdev = parent;
2493
2494 if (!pci_acs_enabled(pdev, acs_flags))
2495 return false;
2496
2497 if (pci_is_root_bus(pdev->bus))
2498 return (end == NULL);
2499
2500 parent = pdev->bus->self;
2501 } while (pdev != end);
2502
2503 return true;
2504}
2505
2506/**
2363 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 2507 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2364 * @dev: the PCI device 2508 * @dev: the PCI device
2365 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 2509 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
@@ -2666,6 +2810,18 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
2666} 2810}
2667 2811
2668/** 2812/**
2813 * pcibios_setup - process "pci=" kernel boot arguments
2814 * @str: string used to pass in "pci=" kernel boot arguments
2815 *
2816 * Process kernel boot arguments. This is the default implementation.
2817 * Architecture specific implementations can override this as necessary.
2818 */
2819char * __weak __init pcibios_setup(char *str)
2820{
2821 return str;
2822}
2823
2824/**
2669 * pcibios_set_master - enable PCI bus-mastering for device dev 2825 * pcibios_set_master - enable PCI bus-mastering for device dev
2670 * @dev: the PCI device to enable 2826 * @dev: the PCI device to enable
2671 * 2827 *
@@ -2876,6 +3032,9 @@ bool pci_intx_mask_supported(struct pci_dev *dev)
2876 bool mask_supported = false; 3032 bool mask_supported = false;
2877 u16 orig, new; 3033 u16 orig, new;
2878 3034
3035 if (dev->broken_intx_masking)
3036 return false;
3037
2879 pci_cfg_access_lock(dev); 3038 pci_cfg_access_lock(dev);
2880 3039
2881 pci_read_config_word(dev, PCI_COMMAND, &orig); 3040 pci_read_config_word(dev, PCI_COMMAND, &orig);
@@ -3395,8 +3554,7 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3395 3554
3396 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 3555 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3397 if (o != v) { 3556 if (o != v) {
3398 if (v > o && dev->bus && 3557 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3399 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3400 return -EIO; 3558 return -EIO;
3401 3559
3402 cmd &= ~PCI_X_CMD_MAX_READ; 3560 cmd &= ~PCI_X_CMD_MAX_READ;
@@ -3851,7 +4009,7 @@ static void __devinit pci_no_domains(void)
3851 * greater than 0xff). This is the default implementation. Architecture 4009 * greater than 0xff). This is the default implementation. Architecture
3852 * implementations can override this. 4010 * implementations can override this.
3853 */ 4011 */
3854int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 4012int __weak pci_ext_cfg_avail(struct pci_dev *dev)
3855{ 4013{
3856 return 1; 4014 return 1;
3857} 4015}