aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c545
1 files changed, 252 insertions, 293 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07369f32e8bb..1febe90831b4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -431,6 +431,32 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
431} 431}
432 432
433/** 433/**
434 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
435 * @dev: the PCI device to operate on
436 * @pos: config space offset of status word
437 * @mask: mask of bit(s) to care about in status word
438 *
439 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
440 */
441int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
442{
443 int i;
444
445 /* Wait for Transaction Pending bit clean */
446 for (i = 0; i < 4; i++) {
447 u16 status;
448 if (i)
449 msleep((1 << (i - 1)) * 100);
450
451 pci_read_config_word(dev, pos, &status);
452 if (!(status & mask))
453 return 1;
454 }
455
456 return 0;
457}
458
459/**
434 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 460 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
435 * @dev: PCI device to have its BARs restored 461 * @dev: PCI device to have its BARs restored
436 * 462 *
@@ -657,6 +683,28 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
657} 683}
658 684
659/** 685/**
686 * pci_wakeup - Wake up a PCI device
687 * @pci_dev: Device to handle.
688 * @ign: ignored parameter
689 */
690static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
691{
692 pci_wakeup_event(pci_dev);
693 pm_request_resume(&pci_dev->dev);
694 return 0;
695}
696
697/**
698 * pci_wakeup_bus - Walk given bus and wake up devices on it
699 * @bus: Top bus of the subtree to walk.
700 */
701static void pci_wakeup_bus(struct pci_bus *bus)
702{
703 if (bus)
704 pci_walk_bus(bus, pci_wakeup, NULL);
705}
706
707/**
660 * __pci_start_power_transition - Start power transition of a PCI device 708 * __pci_start_power_transition - Start power transition of a PCI device
661 * @dev: PCI device to handle. 709 * @dev: PCI device to handle.
662 * @state: State to put the device into. 710 * @state: State to put the device into.
@@ -835,18 +883,28 @@ EXPORT_SYMBOL(pci_choose_state);
835#define PCI_EXP_SAVE_REGS 7 883#define PCI_EXP_SAVE_REGS 7
836 884
837 885
838static struct pci_cap_saved_state *pci_find_saved_cap( 886static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
839 struct pci_dev *pci_dev, char cap) 887 u16 cap, bool extended)
840{ 888{
841 struct pci_cap_saved_state *tmp; 889 struct pci_cap_saved_state *tmp;
842 890
843 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { 891 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
844 if (tmp->cap.cap_nr == cap) 892 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
845 return tmp; 893 return tmp;
846 } 894 }
847 return NULL; 895 return NULL;
848} 896}
849 897
898struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
899{
900 return _pci_find_saved_cap(dev, cap, false);
901}
902
903struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
904{
905 return _pci_find_saved_cap(dev, cap, true);
906}
907
850static int pci_save_pcie_state(struct pci_dev *dev) 908static int pci_save_pcie_state(struct pci_dev *dev)
851{ 909{
852 int i = 0; 910 int i = 0;
@@ -948,6 +1006,8 @@ pci_save_state(struct pci_dev *dev)
948 return i; 1006 return i;
949 if ((i = pci_save_pcix_state(dev)) != 0) 1007 if ((i = pci_save_pcix_state(dev)) != 0)
950 return i; 1008 return i;
1009 if ((i = pci_save_vc_state(dev)) != 0)
1010 return i;
951 return 0; 1011 return 0;
952} 1012}
953 1013
@@ -1010,6 +1070,7 @@ void pci_restore_state(struct pci_dev *dev)
1010 /* PCI Express register must be restored first */ 1070 /* PCI Express register must be restored first */
1011 pci_restore_pcie_state(dev); 1071 pci_restore_pcie_state(dev);
1012 pci_restore_ats_state(dev); 1072 pci_restore_ats_state(dev);
1073 pci_restore_vc_state(dev);
1013 1074
1014 pci_restore_config_space(dev); 1075 pci_restore_config_space(dev);
1015 1076
@@ -1071,7 +1132,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state);
1071 * @dev: PCI device that we're dealing with 1132 * @dev: PCI device that we're dealing with
1072 * @state: Saved state returned from pci_store_saved_state() 1133 * @state: Saved state returned from pci_store_saved_state()
1073 */ 1134 */
1074int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) 1135static int pci_load_saved_state(struct pci_dev *dev,
1136 struct pci_saved_state *state)
1075{ 1137{
1076 struct pci_cap_saved_data *cap; 1138 struct pci_cap_saved_data *cap;
1077 1139
@@ -1087,7 +1149,7 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1087 while (cap->size) { 1149 while (cap->size) {
1088 struct pci_cap_saved_state *tmp; 1150 struct pci_cap_saved_state *tmp;
1089 1151
1090 tmp = pci_find_saved_cap(dev, cap->cap_nr); 1152 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1091 if (!tmp || tmp->cap.size != cap->size) 1153 if (!tmp || tmp->cap.size != cap->size)
1092 return -EINVAL; 1154 return -EINVAL;
1093 1155
@@ -1099,7 +1161,6 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1099 dev->state_saved = true; 1161 dev->state_saved = true;
1100 return 0; 1162 return 0;
1101} 1163}
1102EXPORT_SYMBOL_GPL(pci_load_saved_state);
1103 1164
1104/** 1165/**
1105 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1166 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
@@ -1531,27 +1592,6 @@ void pci_pme_wakeup_bus(struct pci_bus *bus)
1531 pci_walk_bus(bus, pci_pme_wakeup, (void *)true); 1592 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1532} 1593}
1533 1594
1534/**
1535 * pci_wakeup - Wake up a PCI device
1536 * @pci_dev: Device to handle.
1537 * @ign: ignored parameter
1538 */
1539static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1540{
1541 pci_wakeup_event(pci_dev);
1542 pm_request_resume(&pci_dev->dev);
1543 return 0;
1544}
1545
1546/**
1547 * pci_wakeup_bus - Walk given bus and wake up devices on it
1548 * @bus: Top bus of the subtree to walk.
1549 */
1550void pci_wakeup_bus(struct pci_bus *bus)
1551{
1552 if (bus)
1553 pci_walk_bus(bus, pci_wakeup, NULL);
1554}
1555 1595
1556/** 1596/**
1557 * pci_pme_capable - check the capability of PCI device to generate PME# 1597 * pci_pme_capable - check the capability of PCI device to generate PME#
@@ -1765,7 +1805,7 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1765 * If the platform can't manage @dev, return the deepest state from which it 1805 * If the platform can't manage @dev, return the deepest state from which it
1766 * can generate wake events, based on any available PME info. 1806 * can generate wake events, based on any available PME info.
1767 */ 1807 */
1768pci_power_t pci_target_state(struct pci_dev *dev) 1808static pci_power_t pci_target_state(struct pci_dev *dev)
1769{ 1809{
1770 pci_power_t target_state = PCI_D3hot; 1810 pci_power_t target_state = PCI_D3hot;
1771 1811
@@ -2021,18 +2061,24 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
2021} 2061}
2022 2062
2023/** 2063/**
2024 * pci_add_cap_save_buffer - allocate buffer for saving given capability registers 2064 * _pci_add_cap_save_buffer - allocate buffer for saving given
2065 * capability registers
2025 * @dev: the PCI device 2066 * @dev: the PCI device
2026 * @cap: the capability to allocate the buffer for 2067 * @cap: the capability to allocate the buffer for
2068 * @extended: Standard or Extended capability ID
2027 * @size: requested size of the buffer 2069 * @size: requested size of the buffer
2028 */ 2070 */
2029static int pci_add_cap_save_buffer( 2071static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2030 struct pci_dev *dev, char cap, unsigned int size) 2072 bool extended, unsigned int size)
2031{ 2073{
2032 int pos; 2074 int pos;
2033 struct pci_cap_saved_state *save_state; 2075 struct pci_cap_saved_state *save_state;
2034 2076
2035 pos = pci_find_capability(dev, cap); 2077 if (extended)
2078 pos = pci_find_ext_capability(dev, cap);
2079 else
2080 pos = pci_find_capability(dev, cap);
2081
2036 if (pos <= 0) 2082 if (pos <= 0)
2037 return 0; 2083 return 0;
2038 2084
@@ -2041,12 +2087,23 @@ static int pci_add_cap_save_buffer(
2041 return -ENOMEM; 2087 return -ENOMEM;
2042 2088
2043 save_state->cap.cap_nr = cap; 2089 save_state->cap.cap_nr = cap;
2090 save_state->cap.cap_extended = extended;
2044 save_state->cap.size = size; 2091 save_state->cap.size = size;
2045 pci_add_saved_cap(dev, save_state); 2092 pci_add_saved_cap(dev, save_state);
2046 2093
2047 return 0; 2094 return 0;
2048} 2095}
2049 2096
2097int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2098{
2099 return _pci_add_cap_save_buffer(dev, cap, false, size);
2100}
2101
2102int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2103{
2104 return _pci_add_cap_save_buffer(dev, cap, true, size);
2105}
2106
2050/** 2107/**
2051 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 2108 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2052 * @dev: the PCI device 2109 * @dev: the PCI device
@@ -2065,6 +2122,8 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2065 if (error) 2122 if (error)
2066 dev_err(&dev->dev, 2123 dev_err(&dev->dev,
2067 "unable to preallocate PCI-X save buffer\n"); 2124 "unable to preallocate PCI-X save buffer\n");
2125
2126 pci_allocate_vc_save_buffers(dev);
2068} 2127}
2069 2128
2070void pci_free_cap_save_buffers(struct pci_dev *dev) 2129void pci_free_cap_save_buffers(struct pci_dev *dev)
@@ -2110,242 +2169,6 @@ void pci_configure_ari(struct pci_dev *dev)
2110 } 2169 }
2111} 2170}
2112 2171
2113/**
2114 * pci_enable_ido - enable ID-based Ordering on a device
2115 * @dev: the PCI device
2116 * @type: which types of IDO to enable
2117 *
2118 * Enable ID-based ordering on @dev. @type can contain the bits
2119 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2120 * which types of transactions are allowed to be re-ordered.
2121 */
2122void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2123{
2124 u16 ctrl = 0;
2125
2126 if (type & PCI_EXP_IDO_REQUEST)
2127 ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
2128 if (type & PCI_EXP_IDO_COMPLETION)
2129 ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
2130 if (ctrl)
2131 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
2132}
2133EXPORT_SYMBOL(pci_enable_ido);
2134
2135/**
2136 * pci_disable_ido - disable ID-based ordering on a device
2137 * @dev: the PCI device
2138 * @type: which types of IDO to disable
2139 */
2140void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2141{
2142 u16 ctrl = 0;
2143
2144 if (type & PCI_EXP_IDO_REQUEST)
2145 ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
2146 if (type & PCI_EXP_IDO_COMPLETION)
2147 ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
2148 if (ctrl)
2149 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
2150}
2151EXPORT_SYMBOL(pci_disable_ido);
2152
2153/**
2154 * pci_enable_obff - enable optimized buffer flush/fill
2155 * @dev: PCI device
2156 * @type: type of signaling to use
2157 *
2158 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2159 * signaling if possible, falling back to message signaling only if
2160 * WAKE# isn't supported. @type should indicate whether the PCIe link
2161 * be brought out of L0s or L1 to send the message. It should be either
2162 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2163 *
2164 * If your device can benefit from receiving all messages, even at the
2165 * power cost of bringing the link back up from a low power state, use
2166 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2167 * preferred type).
2168 *
2169 * RETURNS:
2170 * Zero on success, appropriate error number on failure.
2171 */
2172int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2173{
2174 u32 cap;
2175 u16 ctrl;
2176 int ret;
2177
2178 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2179 if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK))
2180 return -ENOTSUPP; /* no OBFF support at all */
2181
2182 /* Make sure the topology supports OBFF as well */
2183 if (dev->bus->self) {
2184 ret = pci_enable_obff(dev->bus->self, type);
2185 if (ret)
2186 return ret;
2187 }
2188
2189 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
2190 if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE)
2191 ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
2192 else {
2193 switch (type) {
2194 case PCI_EXP_OBFF_SIGNAL_L0:
2195 if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN))
2196 ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN;
2197 break;
2198 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2199 ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
2200 ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN;
2201 break;
2202 default:
2203 WARN(1, "bad OBFF signal type\n");
2204 return -ENOTSUPP;
2205 }
2206 }
2207 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl);
2208
2209 return 0;
2210}
2211EXPORT_SYMBOL(pci_enable_obff);
2212
2213/**
2214 * pci_disable_obff - disable optimized buffer flush/fill
2215 * @dev: PCI device
2216 *
2217 * Disable OBFF on @dev.
2218 */
2219void pci_disable_obff(struct pci_dev *dev)
2220{
2221 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
2222 PCI_EXP_DEVCTL2_OBFF_WAKE_EN);
2223}
2224EXPORT_SYMBOL(pci_disable_obff);
2225
2226/**
2227 * pci_ltr_supported - check whether a device supports LTR
2228 * @dev: PCI device
2229 *
2230 * RETURNS:
2231 * True if @dev supports latency tolerance reporting, false otherwise.
2232 */
2233static bool pci_ltr_supported(struct pci_dev *dev)
2234{
2235 u32 cap;
2236
2237 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2238
2239 return cap & PCI_EXP_DEVCAP2_LTR;
2240}
2241
2242/**
2243 * pci_enable_ltr - enable latency tolerance reporting
2244 * @dev: PCI device
2245 *
2246 * Enable LTR on @dev if possible, which means enabling it first on
2247 * upstream ports.
2248 *
2249 * RETURNS:
2250 * Zero on success, errno on failure.
2251 */
2252int pci_enable_ltr(struct pci_dev *dev)
2253{
2254 int ret;
2255
2256 /* Only primary function can enable/disable LTR */
2257 if (PCI_FUNC(dev->devfn) != 0)
2258 return -EINVAL;
2259
2260 if (!pci_ltr_supported(dev))
2261 return -ENOTSUPP;
2262
2263 /* Enable upstream ports first */
2264 if (dev->bus->self) {
2265 ret = pci_enable_ltr(dev->bus->self);
2266 if (ret)
2267 return ret;
2268 }
2269
2270 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
2271 PCI_EXP_DEVCTL2_LTR_EN);
2272}
2273EXPORT_SYMBOL(pci_enable_ltr);
2274
2275/**
2276 * pci_disable_ltr - disable latency tolerance reporting
2277 * @dev: PCI device
2278 */
2279void pci_disable_ltr(struct pci_dev *dev)
2280{
2281 /* Only primary function can enable/disable LTR */
2282 if (PCI_FUNC(dev->devfn) != 0)
2283 return;
2284
2285 if (!pci_ltr_supported(dev))
2286 return;
2287
2288 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
2289 PCI_EXP_DEVCTL2_LTR_EN);
2290}
2291EXPORT_SYMBOL(pci_disable_ltr);
2292
2293static int __pci_ltr_scale(int *val)
2294{
2295 int scale = 0;
2296
2297 while (*val > 1023) {
2298 *val = (*val + 31) / 32;
2299 scale++;
2300 }
2301 return scale;
2302}
2303
2304/**
2305 * pci_set_ltr - set LTR latency values
2306 * @dev: PCI device
2307 * @snoop_lat_ns: snoop latency in nanoseconds
2308 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2309 *
2310 * Figure out the scale and set the LTR values accordingly.
2311 */
2312int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2313{
2314 int pos, ret, snoop_scale, nosnoop_scale;
2315 u16 val;
2316
2317 if (!pci_ltr_supported(dev))
2318 return -ENOTSUPP;
2319
2320 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2321 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2322
2323 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2324 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2325 return -EINVAL;
2326
2327 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2328 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2329 return -EINVAL;
2330
2331 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2332 if (!pos)
2333 return -ENOTSUPP;
2334
2335 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2336 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2337 if (ret != 4)
2338 return -EIO;
2339
2340 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2341 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2342 if (ret != 4)
2343 return -EIO;
2344
2345 return 0;
2346}
2347EXPORT_SYMBOL(pci_set_ltr);
2348
2349static int pci_acs_enable; 2172static int pci_acs_enable;
2350 2173
2351/** 2174/**
@@ -3138,7 +2961,7 @@ bool pci_check_and_mask_intx(struct pci_dev *dev)
3138EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); 2961EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3139 2962
3140/** 2963/**
3141 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending 2964 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3142 * @dev: the PCI device to operate on 2965 * @dev: the PCI device to operate on
3143 * 2966 *
3144 * Check if the device dev has its INTx line asserted, unmask it if not 2967 * Check if the device dev has its INTx line asserted, unmask it if not
@@ -3204,20 +3027,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3204 */ 3027 */
3205int pci_wait_for_pending_transaction(struct pci_dev *dev) 3028int pci_wait_for_pending_transaction(struct pci_dev *dev)
3206{ 3029{
3207 int i; 3030 if (!pci_is_pcie(dev))
3208 u16 status; 3031 return 1;
3209
3210 /* Wait for Transaction Pending bit clean */
3211 for (i = 0; i < 4; i++) {
3212 if (i)
3213 msleep((1 << (i - 1)) * 100);
3214
3215 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3216 if (!(status & PCI_EXP_DEVSTA_TRPND))
3217 return 1;
3218 }
3219 3032
3220 return 0; 3033 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
3221} 3034}
3222EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3035EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3223 3036
@@ -3244,10 +3057,8 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3244 3057
3245static int pci_af_flr(struct pci_dev *dev, int probe) 3058static int pci_af_flr(struct pci_dev *dev, int probe)
3246{ 3059{
3247 int i;
3248 int pos; 3060 int pos;
3249 u8 cap; 3061 u8 cap;
3250 u8 status;
3251 3062
3252 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 3063 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3253 if (!pos) 3064 if (!pos)
@@ -3261,14 +3072,8 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3261 return 0; 3072 return 0;
3262 3073
3263 /* Wait for Transaction Pending bit clean */ 3074 /* Wait for Transaction Pending bit clean */
3264 for (i = 0; i < 4; i++) { 3075 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
3265 if (i) 3076 goto clear;
3266 msleep((1 << (i - 1)) * 100);
3267
3268 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3269 if (!(status & PCI_AF_STATUS_TP))
3270 goto clear;
3271 }
3272 3077
3273 dev_err(&dev->dev, "transaction is not cleared; " 3078 dev_err(&dev->dev, "transaction is not cleared; "
3274 "proceeding with reset anyway\n"); 3079 "proceeding with reset anyway\n");
@@ -3445,6 +3250,18 @@ static void pci_dev_lock(struct pci_dev *dev)
3445 device_lock(&dev->dev); 3250 device_lock(&dev->dev);
3446} 3251}
3447 3252
3253/* Return 1 on successful lock, 0 on contention */
3254static int pci_dev_trylock(struct pci_dev *dev)
3255{
3256 if (pci_cfg_access_trylock(dev)) {
3257 if (device_trylock(&dev->dev))
3258 return 1;
3259 pci_cfg_access_unlock(dev);
3260 }
3261
3262 return 0;
3263}
3264
3448static void pci_dev_unlock(struct pci_dev *dev) 3265static void pci_dev_unlock(struct pci_dev *dev)
3449{ 3266{
3450 device_unlock(&dev->dev); 3267 device_unlock(&dev->dev);
@@ -3588,6 +3405,34 @@ int pci_reset_function(struct pci_dev *dev)
3588} 3405}
3589EXPORT_SYMBOL_GPL(pci_reset_function); 3406EXPORT_SYMBOL_GPL(pci_reset_function);
3590 3407
3408/**
3409 * pci_try_reset_function - quiesce and reset a PCI device function
3410 * @dev: PCI device to reset
3411 *
3412 * Same as above, except return -EAGAIN if unable to lock device.
3413 */
3414int pci_try_reset_function(struct pci_dev *dev)
3415{
3416 int rc;
3417
3418 rc = pci_dev_reset(dev, 1);
3419 if (rc)
3420 return rc;
3421
3422 pci_dev_save_and_disable(dev);
3423
3424 if (pci_dev_trylock(dev)) {
3425 rc = __pci_dev_reset(dev, 0);
3426 pci_dev_unlock(dev);
3427 } else
3428 rc = -EAGAIN;
3429
3430 pci_dev_restore(dev);
3431
3432 return rc;
3433}
3434EXPORT_SYMBOL_GPL(pci_try_reset_function);
3435
3591/* Lock devices from the top of the tree down */ 3436/* Lock devices from the top of the tree down */
3592static void pci_bus_lock(struct pci_bus *bus) 3437static void pci_bus_lock(struct pci_bus *bus)
3593{ 3438{
@@ -3612,6 +3457,32 @@ static void pci_bus_unlock(struct pci_bus *bus)
3612 } 3457 }
3613} 3458}
3614 3459
3460/* Return 1 on successful lock, 0 on contention */
3461static int pci_bus_trylock(struct pci_bus *bus)
3462{
3463 struct pci_dev *dev;
3464
3465 list_for_each_entry(dev, &bus->devices, bus_list) {
3466 if (!pci_dev_trylock(dev))
3467 goto unlock;
3468 if (dev->subordinate) {
3469 if (!pci_bus_trylock(dev->subordinate)) {
3470 pci_dev_unlock(dev);
3471 goto unlock;
3472 }
3473 }
3474 }
3475 return 1;
3476
3477unlock:
3478 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3479 if (dev->subordinate)
3480 pci_bus_unlock(dev->subordinate);
3481 pci_dev_unlock(dev);
3482 }
3483 return 0;
3484}
3485
3615/* Lock devices from the top of the tree down */ 3486/* Lock devices from the top of the tree down */
3616static void pci_slot_lock(struct pci_slot *slot) 3487static void pci_slot_lock(struct pci_slot *slot)
3617{ 3488{
@@ -3640,6 +3511,37 @@ static void pci_slot_unlock(struct pci_slot *slot)
3640 } 3511 }
3641} 3512}
3642 3513
3514/* Return 1 on successful lock, 0 on contention */
3515static int pci_slot_trylock(struct pci_slot *slot)
3516{
3517 struct pci_dev *dev;
3518
3519 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3520 if (!dev->slot || dev->slot != slot)
3521 continue;
3522 if (!pci_dev_trylock(dev))
3523 goto unlock;
3524 if (dev->subordinate) {
3525 if (!pci_bus_trylock(dev->subordinate)) {
3526 pci_dev_unlock(dev);
3527 goto unlock;
3528 }
3529 }
3530 }
3531 return 1;
3532
3533unlock:
3534 list_for_each_entry_continue_reverse(dev,
3535 &slot->bus->devices, bus_list) {
3536 if (!dev->slot || dev->slot != slot)
3537 continue;
3538 if (dev->subordinate)
3539 pci_bus_unlock(dev->subordinate);
3540 pci_dev_unlock(dev);
3541 }
3542 return 0;
3543}
3544
3643/* Save and disable devices from the top of the tree down */ 3545/* Save and disable devices from the top of the tree down */
3644static void pci_bus_save_and_disable(struct pci_bus *bus) 3546static void pci_bus_save_and_disable(struct pci_bus *bus)
3645{ 3547{
@@ -3763,6 +3665,35 @@ int pci_reset_slot(struct pci_slot *slot)
3763} 3665}
3764EXPORT_SYMBOL_GPL(pci_reset_slot); 3666EXPORT_SYMBOL_GPL(pci_reset_slot);
3765 3667
3668/**
3669 * pci_try_reset_slot - Try to reset a PCI slot
3670 * @slot: PCI slot to reset
3671 *
3672 * Same as above except return -EAGAIN if the slot cannot be locked
3673 */
3674int pci_try_reset_slot(struct pci_slot *slot)
3675{
3676 int rc;
3677
3678 rc = pci_slot_reset(slot, 1);
3679 if (rc)
3680 return rc;
3681
3682 pci_slot_save_and_disable(slot);
3683
3684 if (pci_slot_trylock(slot)) {
3685 might_sleep();
3686 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
3687 pci_slot_unlock(slot);
3688 } else
3689 rc = -EAGAIN;
3690
3691 pci_slot_restore(slot);
3692
3693 return rc;
3694}
3695EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3696
3766static int pci_bus_reset(struct pci_bus *bus, int probe) 3697static int pci_bus_reset(struct pci_bus *bus, int probe)
3767{ 3698{
3768 if (!bus->self) 3699 if (!bus->self)
@@ -3822,6 +3753,35 @@ int pci_reset_bus(struct pci_bus *bus)
3822EXPORT_SYMBOL_GPL(pci_reset_bus); 3753EXPORT_SYMBOL_GPL(pci_reset_bus);
3823 3754
3824/** 3755/**
3756 * pci_try_reset_bus - Try to reset a PCI bus
3757 * @bus: top level PCI bus to reset
3758 *
3759 * Same as above except return -EAGAIN if the bus cannot be locked
3760 */
3761int pci_try_reset_bus(struct pci_bus *bus)
3762{
3763 int rc;
3764
3765 rc = pci_bus_reset(bus, 1);
3766 if (rc)
3767 return rc;
3768
3769 pci_bus_save_and_disable(bus);
3770
3771 if (pci_bus_trylock(bus)) {
3772 might_sleep();
3773 pci_reset_bridge_secondary_bus(bus->self);
3774 pci_bus_unlock(bus);
3775 } else
3776 rc = -EAGAIN;
3777
3778 pci_bus_restore(bus);
3779
3780 return rc;
3781}
3782EXPORT_SYMBOL_GPL(pci_try_reset_bus);
3783
3784/**
3825 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 3785 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3826 * @dev: PCI device to query 3786 * @dev: PCI device to query
3827 * 3787 *
@@ -4450,7 +4410,6 @@ EXPORT_SYMBOL(pci_restore_state);
4450EXPORT_SYMBOL(pci_pme_capable); 4410EXPORT_SYMBOL(pci_pme_capable);
4451EXPORT_SYMBOL(pci_pme_active); 4411EXPORT_SYMBOL(pci_pme_active);
4452EXPORT_SYMBOL(pci_wake_from_d3); 4412EXPORT_SYMBOL(pci_wake_from_d3);
4453EXPORT_SYMBOL(pci_target_state);
4454EXPORT_SYMBOL(pci_prepare_to_sleep); 4413EXPORT_SYMBOL(pci_prepare_to_sleep);
4455EXPORT_SYMBOL(pci_back_from_sleep); 4414EXPORT_SYMBOL(pci_back_from_sleep);
4456EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 4415EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);