aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c183
1 files changed, 139 insertions, 44 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4a7c6864fdf4..f6a4dd10d9b0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 * 4 *
@@ -156,7 +157,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
156 * Make sure the BAR is actually a memory resource, not an IO resource 157 * Make sure the BAR is actually a memory resource, not an IO resource
157 */ 158 */
158 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { 159 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
159 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res); 160 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
160 return NULL; 161 return NULL;
161 } 162 }
162 return ioremap_nocache(res->start, resource_size(res)); 163 return ioremap_nocache(res->start, resource_size(res));
@@ -648,7 +649,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
648 */ 649 */
649 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 650 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
650 && dev->current_state > state) { 651 && dev->current_state > state) {
651 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n", 652 pci_err(dev, "invalid power transition (from state %d to %d)\n",
652 dev->current_state, state); 653 dev->current_state, state);
653 return -EINVAL; 654 return -EINVAL;
654 } 655 }
@@ -696,7 +697,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
696 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 697 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
697 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 698 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
698 if (dev->current_state != state && printk_ratelimit()) 699 if (dev->current_state != state && printk_ratelimit())
699 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n", 700 pci_info(dev, "Refused to change power state, currently in D%d\n",
700 dev->current_state); 701 dev->current_state);
701 702
702 /* 703 /*
@@ -970,7 +971,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
970 case PM_EVENT_HIBERNATE: 971 case PM_EVENT_HIBERNATE:
971 return PCI_D3hot; 972 return PCI_D3hot;
972 default: 973 default:
973 dev_info(&dev->dev, "unrecognized suspend event %d\n", 974 pci_info(dev, "unrecognized suspend event %d\n",
974 state.event); 975 state.event);
975 BUG(); 976 BUG();
976 } 977 }
@@ -1013,7 +1014,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
1013 1014
1014 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 1015 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1015 if (!save_state) { 1016 if (!save_state) {
1016 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 1017 pci_err(dev, "buffer not found in %s\n", __func__);
1017 return -ENOMEM; 1018 return -ENOMEM;
1018 } 1019 }
1019 1020
@@ -1061,7 +1062,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
1061 1062
1062 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 1063 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1063 if (!save_state) { 1064 if (!save_state) {
1064 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 1065 pci_err(dev, "buffer not found in %s\n", __func__);
1065 return -ENOMEM; 1066 return -ENOMEM;
1066 } 1067 }
1067 1068
@@ -1121,7 +1122,7 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1121 return; 1122 return;
1122 1123
1123 for (;;) { 1124 for (;;) {
1124 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n", 1125 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1125 offset, val, saved_val); 1126 offset, val, saved_val);
1126 pci_write_config_dword(pdev, offset, saved_val); 1127 pci_write_config_dword(pdev, offset, saved_val);
1127 if (retry-- <= 0) 1128 if (retry-- <= 0)
@@ -1358,7 +1359,7 @@ static void pci_enable_bridge(struct pci_dev *dev)
1358 1359
1359 retval = pci_enable_device(dev); 1360 retval = pci_enable_device(dev);
1360 if (retval) 1361 if (retval)
1361 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1362 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1362 retval); 1363 retval);
1363 pci_set_master(dev); 1364 pci_set_master(dev);
1364} 1365}
@@ -1458,6 +1459,7 @@ struct pci_devres {
1458 unsigned int pinned:1; 1459 unsigned int pinned:1;
1459 unsigned int orig_intx:1; 1460 unsigned int orig_intx:1;
1460 unsigned int restore_intx:1; 1461 unsigned int restore_intx:1;
1462 unsigned int mwi:1;
1461 u32 region_mask; 1463 u32 region_mask;
1462}; 1464};
1463 1465
@@ -1476,6 +1478,9 @@ static void pcim_release(struct device *gendev, void *res)
1476 if (this->region_mask & (1 << i)) 1478 if (this->region_mask & (1 << i))
1477 pci_release_region(dev, i); 1479 pci_release_region(dev, i);
1478 1480
1481 if (this->mwi)
1482 pci_clear_mwi(dev);
1483
1479 if (this->restore_intx) 1484 if (this->restore_intx)
1480 pci_intx(dev, this->orig_intx); 1485 pci_intx(dev, this->orig_intx);
1481 1486
@@ -1863,7 +1868,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1863 pme_dev = kmalloc(sizeof(struct pci_pme_device), 1868 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1864 GFP_KERNEL); 1869 GFP_KERNEL);
1865 if (!pme_dev) { 1870 if (!pme_dev) {
1866 dev_warn(&dev->dev, "can't enable PME#\n"); 1871 pci_warn(dev, "can't enable PME#\n");
1867 return; 1872 return;
1868 } 1873 }
1869 pme_dev->dev = dev; 1874 pme_dev->dev = dev;
@@ -1887,7 +1892,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1887 } 1892 }
1888 } 1893 }
1889 1894
1890 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); 1895 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
1891} 1896}
1892EXPORT_SYMBOL(pci_pme_active); 1897EXPORT_SYMBOL(pci_pme_active);
1893 1898
@@ -2424,7 +2429,7 @@ void pci_pm_init(struct pci_dev *dev)
2424 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 2429 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2425 2430
2426 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 2431 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2427 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 2432 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2428 pmc & PCI_PM_CAP_VER_MASK); 2433 pmc & PCI_PM_CAP_VER_MASK);
2429 return; 2434 return;
2430 } 2435 }
@@ -2444,15 +2449,14 @@ void pci_pm_init(struct pci_dev *dev)
2444 dev->d2_support = true; 2449 dev->d2_support = true;
2445 2450
2446 if (dev->d1_support || dev->d2_support) 2451 if (dev->d1_support || dev->d2_support)
2447 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 2452 pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
2448 dev->d1_support ? " D1" : "", 2453 dev->d1_support ? " D1" : "",
2449 dev->d2_support ? " D2" : ""); 2454 dev->d2_support ? " D2" : "");
2450 } 2455 }
2451 2456
2452 pmc &= PCI_PM_CAP_PME_MASK; 2457 pmc &= PCI_PM_CAP_PME_MASK;
2453 if (pmc) { 2458 if (pmc) {
2454 dev_printk(KERN_DEBUG, &dev->dev, 2459 pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
2455 "PME# supported from%s%s%s%s%s\n",
2456 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 2460 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2457 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 2461 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2458 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 2462 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -2544,13 +2548,13 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
2544 2548
2545 res = pci_ea_get_resource(dev, bei, prop); 2549 res = pci_ea_get_resource(dev, bei, prop);
2546 if (!res) { 2550 if (!res) {
2547 dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei); 2551 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2548 goto out; 2552 goto out;
2549 } 2553 }
2550 2554
2551 flags = pci_ea_flags(dev, prop); 2555 flags = pci_ea_flags(dev, prop);
2552 if (!flags) { 2556 if (!flags) {
2553 dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop); 2557 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2554 goto out; 2558 goto out;
2555 } 2559 }
2556 2560
@@ -2600,13 +2604,12 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
2600 } 2604 }
2601 2605
2602 if (end < start) { 2606 if (end < start) {
2603 dev_err(&dev->dev, "EA Entry crosses address boundary\n"); 2607 pci_err(dev, "EA Entry crosses address boundary\n");
2604 goto out; 2608 goto out;
2605 } 2609 }
2606 2610
2607 if (ent_size != ent_offset - offset) { 2611 if (ent_size != ent_offset - offset) {
2608 dev_err(&dev->dev, 2612 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2609 "EA Entry Size (%d) does not match length read (%d)\n",
2610 ent_size, ent_offset - offset); 2613 ent_size, ent_offset - offset);
2611 goto out; 2614 goto out;
2612 } 2615 }
@@ -2617,16 +2620,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
2617 res->flags = flags; 2620 res->flags = flags;
2618 2621
2619 if (bei <= PCI_EA_BEI_BAR5) 2622 if (bei <= PCI_EA_BEI_BAR5)
2620 dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", 2623 pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2621 bei, res, prop); 2624 bei, res, prop);
2622 else if (bei == PCI_EA_BEI_ROM) 2625 else if (bei == PCI_EA_BEI_ROM)
2623 dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", 2626 pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2624 res, prop); 2627 res, prop);
2625 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) 2628 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2626 dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", 2629 pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2627 bei - PCI_EA_BEI_VF_BAR0, res, prop); 2630 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2628 else 2631 else
2629 dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", 2632 pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2630 bei, res, prop); 2633 bei, res, prop);
2631 2634
2632out: 2635out:
@@ -2723,13 +2726,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2723 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 2726 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2724 PCI_EXP_SAVE_REGS * sizeof(u16)); 2727 PCI_EXP_SAVE_REGS * sizeof(u16));
2725 if (error) 2728 if (error)
2726 dev_err(&dev->dev, 2729 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
2727 "unable to preallocate PCI Express save buffer\n");
2728 2730
2729 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 2731 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2730 if (error) 2732 if (error)
2731 dev_err(&dev->dev, 2733 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
2732 "unable to preallocate PCI-X save buffer\n");
2733 2734
2734 pci_allocate_vc_save_buffers(dev); 2735 pci_allocate_vc_save_buffers(dev);
2735} 2736}
@@ -3066,6 +3067,81 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3066} 3067}
3067 3068
3068/** 3069/**
3070 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3071 * @dev: the PCI device
3072 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3073 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3074 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3075 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3076 *
3077 * Return 0 if all upstream bridges support AtomicOp routing, egress
3078 * blocking is disabled on all upstream ports, and the root port supports
3079 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3080 * AtomicOp completion), or negative otherwise.
3081 */
3082int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3083{
3084 struct pci_bus *bus = dev->bus;
3085 struct pci_dev *bridge;
3086 u32 cap, ctl2;
3087
3088 if (!pci_is_pcie(dev))
3089 return -EINVAL;
3090
3091 /*
3092 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3093 * AtomicOp requesters. For now, we only support endpoints as
3094 * requesters and root ports as completers. No endpoints as
3095 * completers, and no peer-to-peer.
3096 */
3097
3098 switch (pci_pcie_type(dev)) {
3099 case PCI_EXP_TYPE_ENDPOINT:
3100 case PCI_EXP_TYPE_LEG_END:
3101 case PCI_EXP_TYPE_RC_END:
3102 break;
3103 default:
3104 return -EINVAL;
3105 }
3106
3107 while (bus->parent) {
3108 bridge = bus->self;
3109
3110 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3111
3112 switch (pci_pcie_type(bridge)) {
3113 /* Ensure switch ports support AtomicOp routing */
3114 case PCI_EXP_TYPE_UPSTREAM:
3115 case PCI_EXP_TYPE_DOWNSTREAM:
3116 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3117 return -EINVAL;
3118 break;
3119
3120 /* Ensure root port supports all the sizes we care about */
3121 case PCI_EXP_TYPE_ROOT_PORT:
3122 if ((cap & cap_mask) != cap_mask)
3123 return -EINVAL;
3124 break;
3125 }
3126
3127 /* Ensure upstream ports don't block AtomicOps on egress */
3128 if (!bridge->has_secondary_link) {
3129 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3130 &ctl2);
3131 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3132 return -EINVAL;
3133 }
3134
3135 bus = bus->parent;
3136 }
3137
3138 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3139 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3140 return 0;
3141}
3142EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3143
3144/**
3069 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 3145 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3070 * @dev: the PCI device 3146 * @dev: the PCI device
3071 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) 3147 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
@@ -3198,7 +3274,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
3198 return 0; 3274 return 0;
3199 3275
3200err_out: 3276err_out:
3201 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 3277 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3202 &pdev->resource[bar]); 3278 &pdev->resource[bar]);
3203 return -EBUSY; 3279 return -EBUSY;
3204} 3280}
@@ -3621,7 +3697,7 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
3621 else 3697 else
3622 cmd = old_cmd & ~PCI_COMMAND_MASTER; 3698 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3623 if (cmd != old_cmd) { 3699 if (cmd != old_cmd) {
3624 dev_dbg(&dev->dev, "%s bus mastering\n", 3700 pci_dbg(dev, "%s bus mastering\n",
3625 enable ? "enabling" : "disabling"); 3701 enable ? "enabling" : "disabling");
3626 pci_write_config_word(dev, PCI_COMMAND, cmd); 3702 pci_write_config_word(dev, PCI_COMMAND, cmd);
3627 } 3703 }
@@ -3722,7 +3798,7 @@ int pci_set_cacheline_size(struct pci_dev *dev)
3722 if (cacheline_size == pci_cache_line_size) 3798 if (cacheline_size == pci_cache_line_size)
3723 return 0; 3799 return 0;
3724 3800
3725 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n", 3801 pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
3726 pci_cache_line_size << 2); 3802 pci_cache_line_size << 2);
3727 3803
3728 return -EINVAL; 3804 return -EINVAL;
@@ -3751,7 +3827,7 @@ int pci_set_mwi(struct pci_dev *dev)
3751 3827
3752 pci_read_config_word(dev, PCI_COMMAND, &cmd); 3828 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3753 if (!(cmd & PCI_COMMAND_INVALIDATE)) { 3829 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3754 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 3830 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
3755 cmd |= PCI_COMMAND_INVALIDATE; 3831 cmd |= PCI_COMMAND_INVALIDATE;
3756 pci_write_config_word(dev, PCI_COMMAND, cmd); 3832 pci_write_config_word(dev, PCI_COMMAND, cmd);
3757 } 3833 }
@@ -3761,6 +3837,27 @@ int pci_set_mwi(struct pci_dev *dev)
3761EXPORT_SYMBOL(pci_set_mwi); 3837EXPORT_SYMBOL(pci_set_mwi);
3762 3838
3763/** 3839/**
3840 * pcim_set_mwi - a device-managed pci_set_mwi()
3841 * @dev: the PCI device for which MWI is enabled
3842 *
3843 * Managed pci_set_mwi().
3844 *
3845 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3846 */
3847int pcim_set_mwi(struct pci_dev *dev)
3848{
3849 struct pci_devres *dr;
3850
3851 dr = find_pci_dr(dev);
3852 if (!dr)
3853 return -ENOMEM;
3854
3855 dr->mwi = 1;
3856 return pci_set_mwi(dev);
3857}
3858EXPORT_SYMBOL(pcim_set_mwi);
3859
3860/**
3764 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 3861 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3765 * @dev: the PCI device for which MWI is enabled 3862 * @dev: the PCI device for which MWI is enabled
3766 * 3863 *
@@ -3947,13 +4044,13 @@ static void pci_flr_wait(struct pci_dev *dev)
3947 pci_read_config_dword(dev, PCI_COMMAND, &id); 4044 pci_read_config_dword(dev, PCI_COMMAND, &id);
3948 while (id == ~0) { 4045 while (id == ~0) {
3949 if (delay > timeout) { 4046 if (delay > timeout) {
3950 dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n", 4047 pci_warn(dev, "not ready %dms after FLR; giving up\n",
3951 100 + delay - 1); 4048 100 + delay - 1);
3952 return; 4049 return;
3953 } 4050 }
3954 4051
3955 if (delay > 1000) 4052 if (delay > 1000)
3956 dev_info(&dev->dev, "not ready %dms after FLR; waiting\n", 4053 pci_info(dev, "not ready %dms after FLR; waiting\n",
3957 100 + delay - 1); 4054 100 + delay - 1);
3958 4055
3959 msleep(delay); 4056 msleep(delay);
@@ -3962,7 +4059,7 @@ static void pci_flr_wait(struct pci_dev *dev)
3962 } 4059 }
3963 4060
3964 if (delay > 1000) 4061 if (delay > 1000)
3965 dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1); 4062 pci_info(dev, "ready %dms after FLR\n", 100 + delay - 1);
3966} 4063}
3967 4064
3968/** 4065/**
@@ -3994,7 +4091,7 @@ static bool pcie_has_flr(struct pci_dev *dev)
3994void pcie_flr(struct pci_dev *dev) 4091void pcie_flr(struct pci_dev *dev)
3995{ 4092{
3996 if (!pci_wait_for_pending_transaction(dev)) 4093 if (!pci_wait_for_pending_transaction(dev))
3997 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); 4094 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3998 4095
3999 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 4096 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4000 pci_flr_wait(dev); 4097 pci_flr_wait(dev);
@@ -4027,7 +4124,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
4027 */ 4124 */
4028 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, 4125 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4029 PCI_AF_STATUS_TP << 8)) 4126 PCI_AF_STATUS_TP << 8))
4030 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); 4127 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4031 4128
4032 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 4129 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4033 pci_flr_wait(dev); 4130 pci_flr_wait(dev);
@@ -5150,12 +5247,12 @@ void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5150 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX), 5247 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5151 sizeof(long), GFP_KERNEL); 5248 sizeof(long), GFP_KERNEL);
5152 if (!dev->dma_alias_mask) { 5249 if (!dev->dma_alias_mask) {
5153 dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n"); 5250 pci_warn(dev, "Unable to allocate DMA alias mask\n");
5154 return; 5251 return;
5155 } 5252 }
5156 5253
5157 set_bit(devfn, dev->dma_alias_mask); 5254 set_bit(devfn, dev->dma_alias_mask);
5158 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", 5255 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5159 PCI_SLOT(devfn), PCI_FUNC(devfn)); 5256 PCI_SLOT(devfn), PCI_FUNC(devfn));
5160} 5257}
5161 5258
@@ -5304,7 +5401,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5304 return; 5401 return;
5305 5402
5306 if (r->flags & IORESOURCE_PCI_FIXED) { 5403 if (r->flags & IORESOURCE_PCI_FIXED) {
5307 dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n", 5404 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5308 bar, r, (unsigned long long)align); 5405 bar, r, (unsigned long long)align);
5309 return; 5406 return;
5310 } 5407 }
@@ -5341,7 +5438,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5341 * devices and we use the second. 5438 * devices and we use the second.
5342 */ 5439 */
5343 5440
5344 dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n", 5441 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
5345 bar, r, (unsigned long long)align); 5442 bar, r, (unsigned long long)align);
5346 5443
5347 if (resize) { 5444 if (resize) {
@@ -5387,13 +5484,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5387 5484
5388 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && 5485 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5389 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { 5486 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5390 dev_warn(&dev->dev, 5487 pci_warn(dev, "Can't reassign resources to host bridge\n");
5391 "Can't reassign resources to host bridge.\n");
5392 return; 5488 return;
5393 } 5489 }
5394 5490
5395 dev_info(&dev->dev, 5491 pci_info(dev, "Disabling memory decoding and releasing memory resources\n");
5396 "Disabling memory decoding and releasing memory resources.\n");
5397 pci_read_config_word(dev, PCI_COMMAND, &command); 5492 pci_read_config_word(dev, PCI_COMMAND, &command);
5398 command &= ~PCI_COMMAND_MEMORY; 5493 command &= ~PCI_COMMAND_MEMORY;
5399 pci_write_config_word(dev, PCI_COMMAND, command); 5494 pci_write_config_word(dev, PCI_COMMAND, command);