aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c252
1 files changed, 210 insertions, 42 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7904d02ffdb9..b01bd5bba8e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -66,7 +66,8 @@ static void pci_dev_d3_sleep(struct pci_dev *dev)
66 if (delay < pci_pm_d3_delay) 66 if (delay < pci_pm_d3_delay)
67 delay = pci_pm_d3_delay; 67 delay = pci_pm_d3_delay;
68 68
69 msleep(delay); 69 if (delay)
70 msleep(delay);
70} 71}
71 72
72#ifdef CONFIG_PCI_DOMAINS 73#ifdef CONFIG_PCI_DOMAINS
@@ -827,7 +828,8 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
827 * because have already delayed for the bridge. 828 * because have already delayed for the bridge.
828 */ 829 */
829 if (dev->runtime_d3cold) { 830 if (dev->runtime_d3cold) {
830 msleep(dev->d3cold_delay); 831 if (dev->d3cold_delay)
832 msleep(dev->d3cold_delay);
831 /* 833 /*
832 * When powering on a bridge from D3cold, the 834 * When powering on a bridge from D3cold, the
833 * whole hierarchy may be powered on into 835 * whole hierarchy may be powered on into
@@ -1782,8 +1784,8 @@ static void pci_pme_list_scan(struct work_struct *work)
1782 } 1784 }
1783 } 1785 }
1784 if (!list_empty(&pci_pme_list)) 1786 if (!list_empty(&pci_pme_list))
1785 schedule_delayed_work(&pci_pme_work, 1787 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1786 msecs_to_jiffies(PME_TIMEOUT)); 1788 msecs_to_jiffies(PME_TIMEOUT));
1787 mutex_unlock(&pci_pme_list_mutex); 1789 mutex_unlock(&pci_pme_list_mutex);
1788} 1790}
1789 1791
@@ -1848,8 +1850,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1848 mutex_lock(&pci_pme_list_mutex); 1850 mutex_lock(&pci_pme_list_mutex);
1849 list_add(&pme_dev->list, &pci_pme_list); 1851 list_add(&pme_dev->list, &pci_pme_list);
1850 if (list_is_singular(&pci_pme_list)) 1852 if (list_is_singular(&pci_pme_list))
1851 schedule_delayed_work(&pci_pme_work, 1853 queue_delayed_work(system_freezable_wq,
1852 msecs_to_jiffies(PME_TIMEOUT)); 1854 &pci_pme_work,
1855 msecs_to_jiffies(PME_TIMEOUT));
1853 mutex_unlock(&pci_pme_list_mutex); 1856 mutex_unlock(&pci_pme_list_mutex);
1854 } else { 1857 } else {
1855 mutex_lock(&pci_pme_list_mutex); 1858 mutex_lock(&pci_pme_list_mutex);
@@ -3363,7 +3366,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
3363 * Only architectures that have memory mapped IO functions defined 3366 * Only architectures that have memory mapped IO functions defined
3364 * (and the PCI_IOBASE value defined) should call this function. 3367 * (and the PCI_IOBASE value defined) should call this function.
3365 */ 3368 */
3366int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) 3369int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3367{ 3370{
3368#if defined(PCI_IOBASE) && defined(CONFIG_MMU) 3371#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3369 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; 3372 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
@@ -3383,6 +3386,7 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3383 return -ENODEV; 3386 return -ENODEV;
3384#endif 3387#endif
3385} 3388}
3389EXPORT_SYMBOL(pci_remap_iospace);
3386 3390
3387/** 3391/**
3388 * pci_unmap_iospace - Unmap the memory mapped I/O space 3392 * pci_unmap_iospace - Unmap the memory mapped I/O space
@@ -3400,6 +3404,89 @@ void pci_unmap_iospace(struct resource *res)
3400 unmap_kernel_range(vaddr, resource_size(res)); 3404 unmap_kernel_range(vaddr, resource_size(res));
3401#endif 3405#endif
3402} 3406}
3407EXPORT_SYMBOL(pci_unmap_iospace);
3408
3409/**
3410 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3411 * @dev: Generic device to remap IO address for
3412 * @offset: Resource address to map
3413 * @size: Size of map
3414 *
3415 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
3416 * detach.
3417 */
3418void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3419 resource_size_t offset,
3420 resource_size_t size)
3421{
3422 void __iomem **ptr, *addr;
3423
3424 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3425 if (!ptr)
3426 return NULL;
3427
3428 addr = pci_remap_cfgspace(offset, size);
3429 if (addr) {
3430 *ptr = addr;
3431 devres_add(dev, ptr);
3432 } else
3433 devres_free(ptr);
3434
3435 return addr;
3436}
3437EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3438
3439/**
3440 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
3441 * @dev: generic device to handle the resource for
3442 * @res: configuration space resource to be handled
3443 *
3444 * Checks that a resource is a valid memory region, requests the memory
3445 * region and ioremaps with pci_remap_cfgspace() API that ensures the
3446 * proper PCI configuration space memory attributes are guaranteed.
3447 *
3448 * All operations are managed and will be undone on driver detach.
3449 *
3450 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
3451 * on failure. Usage example:
3452 *
3453 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3454 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
3455 * if (IS_ERR(base))
3456 * return PTR_ERR(base);
3457 */
3458void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3459 struct resource *res)
3460{
3461 resource_size_t size;
3462 const char *name;
3463 void __iomem *dest_ptr;
3464
3465 BUG_ON(!dev);
3466
3467 if (!res || resource_type(res) != IORESOURCE_MEM) {
3468 dev_err(dev, "invalid resource\n");
3469 return IOMEM_ERR_PTR(-EINVAL);
3470 }
3471
3472 size = resource_size(res);
3473 name = res->name ?: dev_name(dev);
3474
3475 if (!devm_request_mem_region(dev, res->start, size, name)) {
3476 dev_err(dev, "can't request region for resource %pR\n", res);
3477 return IOMEM_ERR_PTR(-EBUSY);
3478 }
3479
3480 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3481 if (!dest_ptr) {
3482 dev_err(dev, "ioremap failed for resource %pR\n", res);
3483 devm_release_mem_region(dev, res->start, size);
3484 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3485 }
3486
3487 return dest_ptr;
3488}
3489EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3403 3490
3404static void __pci_set_master(struct pci_dev *dev, bool enable) 3491static void __pci_set_master(struct pci_dev *dev, bool enable)
3405{ 3492{
@@ -3773,24 +3860,41 @@ static void pci_flr_wait(struct pci_dev *dev)
3773 (i - 1) * 100); 3860 (i - 1) * 100);
3774} 3861}
3775 3862
3776static int pcie_flr(struct pci_dev *dev, int probe) 3863/**
3864 * pcie_has_flr - check if a device supports function level resets
3865 * @dev: device to check
3866 *
3867 * Returns true if the device advertises support for PCIe function level
3868 * resets.
3869 */
3870static bool pcie_has_flr(struct pci_dev *dev)
3777{ 3871{
3778 u32 cap; 3872 u32 cap;
3779 3873
3780 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 3874 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3781 if (!(cap & PCI_EXP_DEVCAP_FLR)) 3875 return false;
3782 return -ENOTTY;
3783 3876
3784 if (probe) 3877 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3785 return 0; 3878 return cap & PCI_EXP_DEVCAP_FLR;
3879}
3786 3880
3881/**
3882 * pcie_flr - initiate a PCIe function level reset
3883 * @dev: device to reset
3884 *
3885 * Initiate a function level reset on @dev. The caller should ensure the
3886 * device supports FLR before calling this function, e.g. by using the
3887 * pcie_has_flr() helper.
3888 */
3889void pcie_flr(struct pci_dev *dev)
3890{
3787 if (!pci_wait_for_pending_transaction(dev)) 3891 if (!pci_wait_for_pending_transaction(dev))
3788 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); 3892 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3789 3893
3790 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3894 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3791 pci_flr_wait(dev); 3895 pci_flr_wait(dev);
3792 return 0;
3793} 3896}
3897EXPORT_SYMBOL_GPL(pcie_flr);
3794 3898
3795static int pci_af_flr(struct pci_dev *dev, int probe) 3899static int pci_af_flr(struct pci_dev *dev, int probe)
3796{ 3900{
@@ -3801,6 +3905,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3801 if (!pos) 3905 if (!pos)
3802 return -ENOTTY; 3906 return -ENOTTY;
3803 3907
3908 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3909 return -ENOTTY;
3910
3804 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 3911 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3805 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 3912 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3806 return -ENOTTY; 3913 return -ENOTTY;
@@ -3971,9 +4078,12 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
3971 if (rc != -ENOTTY) 4078 if (rc != -ENOTTY)
3972 goto done; 4079 goto done;
3973 4080
3974 rc = pcie_flr(dev, probe); 4081 if (pcie_has_flr(dev)) {
3975 if (rc != -ENOTTY) 4082 if (!probe)
4083 pcie_flr(dev);
4084 rc = 0;
3976 goto done; 4085 goto done;
4086 }
3977 4087
3978 rc = pci_af_flr(dev, probe); 4088 rc = pci_af_flr(dev, probe);
3979 if (rc != -ENOTTY) 4089 if (rc != -ENOTTY)
@@ -4932,6 +5042,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
4932{ 5042{
4933 u32 v; 5043 u32 v;
4934 5044
5045 if (pci_dev_is_disconnected(pdev))
5046 return false;
4935 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); 5047 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4936} 5048}
4937EXPORT_SYMBOL_GPL(pci_device_is_present); 5049EXPORT_SYMBOL_GPL(pci_device_is_present);
@@ -4947,6 +5059,11 @@ void pci_ignore_hotplug(struct pci_dev *dev)
4947} 5059}
4948EXPORT_SYMBOL_GPL(pci_ignore_hotplug); 5060EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
4949 5061
5062resource_size_t __weak pcibios_default_alignment(void)
5063{
5064 return 0;
5065}
5066
4950#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 5067#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4951static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 5068static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4952static DEFINE_SPINLOCK(resource_alignment_lock); 5069static DEFINE_SPINLOCK(resource_alignment_lock);
@@ -4954,22 +5071,25 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
4954/** 5071/**
4955 * pci_specified_resource_alignment - get resource alignment specified by user. 5072 * pci_specified_resource_alignment - get resource alignment specified by user.
4956 * @dev: the PCI device to get 5073 * @dev: the PCI device to get
5074 * @resize: whether or not to change resources' size when reassigning alignment
4957 * 5075 *
4958 * RETURNS: Resource alignment if it is specified. 5076 * RETURNS: Resource alignment if it is specified.
4959 * Zero if it is not specified. 5077 * Zero if it is not specified.
4960 */ 5078 */
4961static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 5079static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5080 bool *resize)
4962{ 5081{
4963 int seg, bus, slot, func, align_order, count; 5082 int seg, bus, slot, func, align_order, count;
4964 unsigned short vendor, device, subsystem_vendor, subsystem_device; 5083 unsigned short vendor, device, subsystem_vendor, subsystem_device;
4965 resource_size_t align = 0; 5084 resource_size_t align = pcibios_default_alignment();
4966 char *p; 5085 char *p;
4967 5086
4968 spin_lock(&resource_alignment_lock); 5087 spin_lock(&resource_alignment_lock);
4969 p = resource_alignment_param; 5088 p = resource_alignment_param;
4970 if (!*p) 5089 if (!*p && !align)
4971 goto out; 5090 goto out;
4972 if (pci_has_flag(PCI_PROBE_ONLY)) { 5091 if (pci_has_flag(PCI_PROBE_ONLY)) {
5092 align = 0;
4973 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); 5093 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
4974 goto out; 5094 goto out;
4975 } 5095 }
@@ -4999,6 +5119,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
4999 (!device || (device == dev->device)) && 5119 (!device || (device == dev->device)) &&
5000 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) && 5120 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5001 (!subsystem_device || (subsystem_device == dev->subsystem_device))) { 5121 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5122 *resize = true;
5002 if (align_order == -1) 5123 if (align_order == -1)
5003 align = PAGE_SIZE; 5124 align = PAGE_SIZE;
5004 else 5125 else
@@ -5024,6 +5145,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
5024 bus == dev->bus->number && 5145 bus == dev->bus->number &&
5025 slot == PCI_SLOT(dev->devfn) && 5146 slot == PCI_SLOT(dev->devfn) &&
5026 func == PCI_FUNC(dev->devfn)) { 5147 func == PCI_FUNC(dev->devfn)) {
5148 *resize = true;
5027 if (align_order == -1) 5149 if (align_order == -1)
5028 align = PAGE_SIZE; 5150 align = PAGE_SIZE;
5029 else 5151 else
@@ -5043,6 +5165,68 @@ out:
5043 return align; 5165 return align;
5044} 5166}
5045 5167
5168static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5169 resource_size_t align, bool resize)
5170{
5171 struct resource *r = &dev->resource[bar];
5172 resource_size_t size;
5173
5174 if (!(r->flags & IORESOURCE_MEM))
5175 return;
5176
5177 if (r->flags & IORESOURCE_PCI_FIXED) {
5178 dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5179 bar, r, (unsigned long long)align);
5180 return;
5181 }
5182
5183 size = resource_size(r);
5184 if (size >= align)
5185 return;
5186
5187 /*
5188 * Increase the alignment of the resource. There are two ways we
5189 * can do this:
5190 *
5191 * 1) Increase the size of the resource. BARs are aligned on their
5192 * size, so when we reallocate space for this resource, we'll
5193 * allocate it with the larger alignment. This also prevents
5194 * assignment of any other BARs inside the alignment region, so
5195 * if we're requesting page alignment, this means no other BARs
5196 * will share the page.
5197 *
5198 * The disadvantage is that this makes the resource larger than
5199 * the hardware BAR, which may break drivers that compute things
5200 * based on the resource size, e.g., to find registers at a
5201 * fixed offset before the end of the BAR.
5202 *
5203 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
5204 * set r->start to the desired alignment. By itself this
5205 * doesn't prevent other BARs being put inside the alignment
5206 * region, but if we realign *every* resource of every device in
5207 * the system, none of them will share an alignment region.
5208 *
5209 * When the user has requested alignment for only some devices via
5210 * the "pci=resource_alignment" argument, "resize" is true and we
5211 * use the first method. Otherwise we assume we're aligning all
5212 * devices and we use the second.
5213 */
5214
5215 dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
5216 bar, r, (unsigned long long)align);
5217
5218 if (resize) {
5219 r->start = 0;
5220 r->end = align - 1;
5221 } else {
5222 r->flags &= ~IORESOURCE_SIZEALIGN;
5223 r->flags |= IORESOURCE_STARTALIGN;
5224 r->start = align;
5225 r->end = r->start + size - 1;
5226 }
5227 r->flags |= IORESOURCE_UNSET;
5228}
5229
5046/* 5230/*
5047 * This function disables memory decoding and releases memory resources 5231 * This function disables memory decoding and releases memory resources
5048 * of the device specified by kernel's boot parameter 'pci=resource_alignment='. 5232 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -5054,8 +5238,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5054{ 5238{
5055 int i; 5239 int i;
5056 struct resource *r; 5240 struct resource *r;
5057 resource_size_t align, size; 5241 resource_size_t align;
5058 u16 command; 5242 u16 command;
5243 bool resize = false;
5059 5244
5060 /* 5245 /*
5061 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec 5246 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
@@ -5067,7 +5252,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5067 return; 5252 return;
5068 5253
5069 /* check if specified PCI is target device to reassign */ 5254 /* check if specified PCI is target device to reassign */
5070 align = pci_specified_resource_alignment(dev); 5255 align = pci_specified_resource_alignment(dev, &resize);
5071 if (!align) 5256 if (!align)
5072 return; 5257 return;
5073 5258
@@ -5084,28 +5269,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5084 command &= ~PCI_COMMAND_MEMORY; 5269 command &= ~PCI_COMMAND_MEMORY;
5085 pci_write_config_word(dev, PCI_COMMAND, command); 5270 pci_write_config_word(dev, PCI_COMMAND, command);
5086 5271
5087 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { 5272 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5088 r = &dev->resource[i]; 5273 pci_request_resource_alignment(dev, i, align, resize);
5089 if (!(r->flags & IORESOURCE_MEM))
5090 continue;
5091 if (r->flags & IORESOURCE_PCI_FIXED) {
5092 dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n",
5093 i, r);
5094 continue;
5095 }
5096 5274
5097 size = resource_size(r); 5275 /*
5098 if (size < align) { 5276 * Need to disable bridge's resource window,
5099 size = align;
5100 dev_info(&dev->dev,
5101 "Rounding up size of resource #%d to %#llx.\n",
5102 i, (unsigned long long)size);
5103 }
5104 r->flags |= IORESOURCE_UNSET;
5105 r->end = size - 1;
5106 r->start = 0;
5107 }
5108 /* Need to disable bridge's resource window,
5109 * to enable the kernel to reassign new resource 5277 * to enable the kernel to reassign new resource
5110 * window later on. 5278 * window later on.
5111 */ 5279 */