diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 13:12:29 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 19:58:18 -0400 |
commit | daa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch) | |
tree | be913e8e3745bb367d2ba371598f447649102cfc /drivers/pci/pci.c | |
parent | 6869b7b206595ae0e326f59719090351eb8f4f5d (diff) | |
parent | fba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff) |
Merge branch 'odp_fixes' into hmm.git
From rdma.git
Jason Gunthorpe says:
====================
This is a collection of general cleanups for ODP to clarify some of the
flows around umem creation and use of the interval tree.
====================
The branch is based on v5.3-rc5 due to dependencies, and is being taken
into hmm.git due to dependencies in the next patches.
* odp_fixes:
RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr
RDMA/mlx5: Use ib_umem_start instead of umem.address
RDMA/core: Make invalidate_range a device operation
RDMA/odp: Use kvcalloc for the dma_list and page_list
RDMA/odp: Check for overflow when computing the umem_odp end
RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
RDMA/odp: Split creating a umem_odp from ib_umem_get
RDMA/odp: Make the three ways to create a umem_odp clear
RMDA/odp: Consolidate umem_odp initialization
RDMA/odp: Make it clearer when a umem is an implicit ODP umem
RDMA/odp: Iterate over the whole rbtree directly
RDMA/odp: Use the common interval tree library instead of generic
RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r-- | drivers/pci/pci.c | 29 |
1 files changed, 10 insertions, 19 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 29ed5ec1ac27..1b27b5af3d55 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1025,10 +1025,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
1025 | if (state == PCI_D0) { | 1025 | if (state == PCI_D0) { |
1026 | pci_platform_power_transition(dev, PCI_D0); | 1026 | pci_platform_power_transition(dev, PCI_D0); |
1027 | /* | 1027 | /* |
1028 | * Mandatory power management transition delays are | 1028 | * Mandatory power management transition delays, see |
1029 | * handled in the PCIe portdrv resume hooks. | 1029 | * PCI Express Base Specification Revision 2.0 Section |
1030 | * 6.6.1: Conventional Reset. Do not delay for | ||
1031 | * devices powered on/off by corresponding bridge, | ||
1032 | * because have already delayed for the bridge. | ||
1030 | */ | 1033 | */ |
1031 | if (dev->runtime_d3cold) { | 1034 | if (dev->runtime_d3cold) { |
1035 | if (dev->d3cold_delay && !dev->imm_ready) | ||
1036 | msleep(dev->d3cold_delay); | ||
1032 | /* | 1037 | /* |
1033 | * When powering on a bridge from D3cold, the | 1038 | * When powering on a bridge from D3cold, the |
1034 | * whole hierarchy may be powered on into | 1039 | * whole hierarchy may be powered on into |
@@ -4602,16 +4607,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) | |||
4602 | 4607 | ||
4603 | return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); | 4608 | return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); |
4604 | } | 4609 | } |
4605 | |||
4606 | /** | 4610 | /** |
4607 | * pcie_wait_for_link_delay - Wait until link is active or inactive | 4611 | * pcie_wait_for_link - Wait until link is active or inactive |
4608 | * @pdev: Bridge device | 4612 | * @pdev: Bridge device |
4609 | * @active: waiting for active or inactive? | 4613 | * @active: waiting for active or inactive? |
4610 | * @delay: Delay to wait after link has become active (in ms) | ||
4611 | * | 4614 | * |
4612 | * Use this to wait till link becomes active or inactive. | 4615 | * Use this to wait till link becomes active or inactive. |
4613 | */ | 4616 | */ |
4614 | bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) | 4617 | bool pcie_wait_for_link(struct pci_dev *pdev, bool active) |
4615 | { | 4618 | { |
4616 | int timeout = 1000; | 4619 | int timeout = 1000; |
4617 | bool ret; | 4620 | bool ret; |
@@ -4648,25 +4651,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) | |||
4648 | timeout -= 10; | 4651 | timeout -= 10; |
4649 | } | 4652 | } |
4650 | if (active && ret) | 4653 | if (active && ret) |
4651 | msleep(delay); | 4654 | msleep(100); |
4652 | else if (ret != active) | 4655 | else if (ret != active) |
4653 | pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", | 4656 | pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", |
4654 | active ? "set" : "cleared"); | 4657 | active ? "set" : "cleared"); |
4655 | return ret == active; | 4658 | return ret == active; |
4656 | } | 4659 | } |
4657 | 4660 | ||
4658 | /** | ||
4659 | * pcie_wait_for_link - Wait until link is active or inactive | ||
4660 | * @pdev: Bridge device | ||
4661 | * @active: waiting for active or inactive? | ||
4662 | * | ||
4663 | * Use this to wait till link becomes active or inactive. | ||
4664 | */ | ||
4665 | bool pcie_wait_for_link(struct pci_dev *pdev, bool active) | ||
4666 | { | ||
4667 | return pcie_wait_for_link_delay(pdev, active, 100); | ||
4668 | } | ||
4669 | |||
4670 | void pci_reset_secondary_bus(struct pci_dev *dev) | 4661 | void pci_reset_secondary_bus(struct pci_dev *dev) |
4671 | { | 4662 | { |
4672 | u16 ctrl; | 4663 | u16 ctrl; |