diff options
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r-- | drivers/pci/pci.c | 339 |
1 files changed, 265 insertions, 74 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0bc27e059019..1531f3a49879 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -19,8 +19,8 @@ | |||
19 | #include <linux/pci-aspm.h> | 19 | #include <linux/pci-aspm.h> |
20 | #include <linux/pm_wakeup.h> | 20 | #include <linux/pm_wakeup.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
23 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/pm_runtime.h> | ||
24 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
25 | #include "pci.h" | 25 | #include "pci.h" |
26 | 26 | ||
@@ -29,7 +29,23 @@ const char *pci_power_names[] = { | |||
29 | }; | 29 | }; |
30 | EXPORT_SYMBOL_GPL(pci_power_names); | 30 | EXPORT_SYMBOL_GPL(pci_power_names); |
31 | 31 | ||
32 | unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; | 32 | int isa_dma_bridge_buggy; |
33 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
34 | |||
35 | int pci_pci_problems; | ||
36 | EXPORT_SYMBOL(pci_pci_problems); | ||
37 | |||
38 | unsigned int pci_pm_d3_delay; | ||
39 | |||
40 | static void pci_dev_d3_sleep(struct pci_dev *dev) | ||
41 | { | ||
42 | unsigned int delay = dev->d3_delay; | ||
43 | |||
44 | if (delay < pci_pm_d3_delay) | ||
45 | delay = pci_pm_d3_delay; | ||
46 | |||
47 | msleep(delay); | ||
48 | } | ||
33 | 49 | ||
34 | #ifdef CONFIG_PCI_DOMAINS | 50 | #ifdef CONFIG_PCI_DOMAINS |
35 | int pci_domains_supported = 1; | 51 | int pci_domains_supported = 1; |
@@ -287,6 +303,49 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) | |||
287 | } | 303 | } |
288 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); | 304 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
289 | 305 | ||
306 | /** | ||
307 | * pci_bus_find_ext_capability - find an extended capability | ||
308 | * @bus: the PCI bus to query | ||
309 | * @devfn: PCI device to query | ||
310 | * @cap: capability code | ||
311 | * | ||
312 | * Like pci_find_ext_capability() but works for pci devices that do not have a | ||
313 | * pci_dev structure set up yet. | ||
314 | * | ||
315 | * Returns the address of the requested capability structure within the | ||
316 | * device's PCI configuration space or 0 in case the device does not | ||
317 | * support it. | ||
318 | */ | ||
319 | int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, | ||
320 | int cap) | ||
321 | { | ||
322 | u32 header; | ||
323 | int ttl; | ||
324 | int pos = PCI_CFG_SPACE_SIZE; | ||
325 | |||
326 | /* minimum 8 bytes per capability */ | ||
327 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | ||
328 | |||
329 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
330 | return 0; | ||
331 | if (header == 0xffffffff || header == 0) | ||
332 | return 0; | ||
333 | |||
334 | while (ttl-- > 0) { | ||
335 | if (PCI_EXT_CAP_ID(header) == cap) | ||
336 | return pos; | ||
337 | |||
338 | pos = PCI_EXT_CAP_NEXT(header); | ||
339 | if (pos < PCI_CFG_SPACE_SIZE) | ||
340 | break; | ||
341 | |||
342 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
343 | break; | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
290 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) | 349 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) |
291 | { | 350 | { |
292 | int rc, ttl = PCI_FIND_CAP_TTL; | 351 | int rc, ttl = PCI_FIND_CAP_TTL; |
@@ -370,10 +429,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
370 | { | 429 | { |
371 | const struct pci_bus *bus = dev->bus; | 430 | const struct pci_bus *bus = dev->bus; |
372 | int i; | 431 | int i; |
373 | struct resource *best = NULL; | 432 | struct resource *best = NULL, *r; |
374 | 433 | ||
375 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 434 | pci_bus_for_each_resource(bus, r, i) { |
376 | struct resource *r = bus->resource[i]; | ||
377 | if (!r) | 435 | if (!r) |
378 | continue; | 436 | continue; |
379 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | 437 | if (res->start && !(res->start >= r->start && res->end <= r->end)) |
@@ -447,6 +505,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
447 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 505 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; |
448 | } | 506 | } |
449 | 507 | ||
508 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
509 | { | ||
510 | return pci_platform_pm ? | ||
511 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
512 | } | ||
513 | |||
450 | /** | 514 | /** |
451 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 515 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
452 | * given PCI device | 516 | * given PCI device |
@@ -522,7 +586,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
522 | /* Mandatory power management transition delays */ | 586 | /* Mandatory power management transition delays */ |
523 | /* see PCI PM 1.1 5.6.1 table 18 */ | 587 | /* see PCI PM 1.1 5.6.1 table 18 */ |
524 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | 588 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
525 | msleep(pci_pm_d3_delay); | 589 | pci_dev_d3_sleep(dev); |
526 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | 590 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
527 | udelay(PCI_PM_D2_DELAY); | 591 | udelay(PCI_PM_D2_DELAY); |
528 | 592 | ||
@@ -1153,11 +1217,11 @@ pci_disable_device(struct pci_dev *dev) | |||
1153 | 1217 | ||
1154 | /** | 1218 | /** |
1155 | * pcibios_set_pcie_reset_state - set reset state for device dev | 1219 | * pcibios_set_pcie_reset_state - set reset state for device dev |
1156 | * @dev: the PCI-E device reset | 1220 | * @dev: the PCIe device reset |
1157 | * @state: Reset state to enter into | 1221 | * @state: Reset state to enter into |
1158 | * | 1222 | * |
1159 | * | 1223 | * |
1160 | * Sets the PCI-E reset state for the device. This is the default | 1224 | * Sets the PCIe reset state for the device. This is the default |
1161 | * implementation. Architecture implementations can override this. | 1225 | * implementation. Architecture implementations can override this. |
1162 | */ | 1226 | */ |
1163 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | 1227 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, |
@@ -1168,7 +1232,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | |||
1168 | 1232 | ||
1169 | /** | 1233 | /** |
1170 | * pci_set_pcie_reset_state - set reset state for device dev | 1234 | * pci_set_pcie_reset_state - set reset state for device dev |
1171 | * @dev: the PCI-E device reset | 1235 | * @dev: the PCIe device reset |
1172 | * @state: Reset state to enter into | 1236 | * @state: Reset state to enter into |
1173 | * | 1237 | * |
1174 | * | 1238 | * |
@@ -1180,6 +1244,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | |||
1180 | } | 1244 | } |
1181 | 1245 | ||
1182 | /** | 1246 | /** |
1247 | * pci_check_pme_status - Check if given device has generated PME. | ||
1248 | * @dev: Device to check. | ||
1249 | * | ||
1250 | * Check the PME status of the device and if set, clear it and clear PME enable | ||
1251 | * (if set). Return 'true' if PME status and PME enable were both set or | ||
1252 | * 'false' otherwise. | ||
1253 | */ | ||
1254 | bool pci_check_pme_status(struct pci_dev *dev) | ||
1255 | { | ||
1256 | int pmcsr_pos; | ||
1257 | u16 pmcsr; | ||
1258 | bool ret = false; | ||
1259 | |||
1260 | if (!dev->pm_cap) | ||
1261 | return false; | ||
1262 | |||
1263 | pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; | ||
1264 | pci_read_config_word(dev, pmcsr_pos, &pmcsr); | ||
1265 | if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) | ||
1266 | return false; | ||
1267 | |||
1268 | /* Clear PME status. */ | ||
1269 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
1270 | if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { | ||
1271 | /* Disable PME to avoid interrupt flood. */ | ||
1272 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
1273 | ret = true; | ||
1274 | } | ||
1275 | |||
1276 | pci_write_config_word(dev, pmcsr_pos, pmcsr); | ||
1277 | |||
1278 | return ret; | ||
1279 | } | ||
1280 | |||
1281 | /** | ||
1282 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | ||
1283 | * @dev: Device to handle. | ||
1284 | * @ign: Ignored. | ||
1285 | * | ||
1286 | * Check if @dev has generated PME and queue a resume request for it in that | ||
1287 | * case. | ||
1288 | */ | ||
1289 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | ||
1290 | { | ||
1291 | if (pci_check_pme_status(dev)) | ||
1292 | pm_request_resume(&dev->dev); | ||
1293 | return 0; | ||
1294 | } | ||
1295 | |||
1296 | /** | ||
1297 | * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. | ||
1298 | * @bus: Top bus of the subtree to walk. | ||
1299 | */ | ||
1300 | void pci_pme_wakeup_bus(struct pci_bus *bus) | ||
1301 | { | ||
1302 | if (bus) | ||
1303 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | ||
1304 | } | ||
1305 | |||
1306 | /** | ||
1183 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1307 | * pci_pme_capable - check the capability of PCI device to generate PME# |
1184 | * @dev: PCI device to handle. | 1308 | * @dev: PCI device to handle. |
1185 | * @state: PCI state from which device will issue PME#. | 1309 | * @state: PCI state from which device will issue PME#. |
@@ -1220,9 +1344,10 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1220 | } | 1344 | } |
1221 | 1345 | ||
1222 | /** | 1346 | /** |
1223 | * pci_enable_wake - enable PCI device as wakeup event source | 1347 | * __pci_enable_wake - enable PCI device as wakeup event source |
1224 | * @dev: PCI device affected | 1348 | * @dev: PCI device affected |
1225 | * @state: PCI state from which device will issue wakeup events | 1349 | * @state: PCI state from which device will issue wakeup events |
1350 | * @runtime: True if the events are to be generated at run time | ||
1226 | * @enable: True to enable event generation; false to disable | 1351 | * @enable: True to enable event generation; false to disable |
1227 | * | 1352 | * |
1228 | * This enables the device as a wakeup event source, or disables it. | 1353 | * This enables the device as a wakeup event source, or disables it. |
@@ -1238,11 +1363,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1238 | * Error code depending on the platform is returned if both the platform and | 1363 | * Error code depending on the platform is returned if both the platform and |
1239 | * the native mechanism fail to enable the generation of wake-up events | 1364 | * the native mechanism fail to enable the generation of wake-up events |
1240 | */ | 1365 | */ |
1241 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1366 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1367 | bool runtime, bool enable) | ||
1242 | { | 1368 | { |
1243 | int ret = 0; | 1369 | int ret = 0; |
1244 | 1370 | ||
1245 | if (enable && !device_may_wakeup(&dev->dev)) | 1371 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) |
1246 | return -EINVAL; | 1372 | return -EINVAL; |
1247 | 1373 | ||
1248 | /* Don't do the same thing twice in a row for one device. */ | 1374 | /* Don't do the same thing twice in a row for one device. */ |
@@ -1262,19 +1388,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1262 | pci_pme_active(dev, true); | 1388 | pci_pme_active(dev, true); |
1263 | else | 1389 | else |
1264 | ret = 1; | 1390 | ret = 1; |
1265 | error = platform_pci_sleep_wake(dev, true); | 1391 | error = runtime ? platform_pci_run_wake(dev, true) : |
1392 | platform_pci_sleep_wake(dev, true); | ||
1266 | if (ret) | 1393 | if (ret) |
1267 | ret = error; | 1394 | ret = error; |
1268 | if (!ret) | 1395 | if (!ret) |
1269 | dev->wakeup_prepared = true; | 1396 | dev->wakeup_prepared = true; |
1270 | } else { | 1397 | } else { |
1271 | platform_pci_sleep_wake(dev, false); | 1398 | if (runtime) |
1399 | platform_pci_run_wake(dev, false); | ||
1400 | else | ||
1401 | platform_pci_sleep_wake(dev, false); | ||
1272 | pci_pme_active(dev, false); | 1402 | pci_pme_active(dev, false); |
1273 | dev->wakeup_prepared = false; | 1403 | dev->wakeup_prepared = false; |
1274 | } | 1404 | } |
1275 | 1405 | ||
1276 | return ret; | 1406 | return ret; |
1277 | } | 1407 | } |
1408 | EXPORT_SYMBOL(__pci_enable_wake); | ||
1278 | 1409 | ||
1279 | /** | 1410 | /** |
1280 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1411 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
@@ -1384,6 +1515,66 @@ int pci_back_from_sleep(struct pci_dev *dev) | |||
1384 | } | 1515 | } |
1385 | 1516 | ||
1386 | /** | 1517 | /** |
1518 | * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. | ||
1519 | * @dev: PCI device being suspended. | ||
1520 | * | ||
1521 | * Prepare @dev to generate wake-up events at run time and put it into a low | ||
1522 | * power state. | ||
1523 | */ | ||
1524 | int pci_finish_runtime_suspend(struct pci_dev *dev) | ||
1525 | { | ||
1526 | pci_power_t target_state = pci_target_state(dev); | ||
1527 | int error; | ||
1528 | |||
1529 | if (target_state == PCI_POWER_ERROR) | ||
1530 | return -EIO; | ||
1531 | |||
1532 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | ||
1533 | |||
1534 | error = pci_set_power_state(dev, target_state); | ||
1535 | |||
1536 | if (error) | ||
1537 | __pci_enable_wake(dev, target_state, true, false); | ||
1538 | |||
1539 | return error; | ||
1540 | } | ||
1541 | |||
1542 | /** | ||
1543 | * pci_dev_run_wake - Check if device can generate run-time wake-up events. | ||
1544 | * @dev: Device to check. | ||
1545 | * | ||
1546 | * Return true if the device itself is cabable of generating wake-up events | ||
1547 | * (through the platform or using the native PCIe PME) or if the device supports | ||
1548 | * PME and one of its upstream bridges can generate wake-up events. | ||
1549 | */ | ||
1550 | bool pci_dev_run_wake(struct pci_dev *dev) | ||
1551 | { | ||
1552 | struct pci_bus *bus = dev->bus; | ||
1553 | |||
1554 | if (device_run_wake(&dev->dev)) | ||
1555 | return true; | ||
1556 | |||
1557 | if (!dev->pme_support) | ||
1558 | return false; | ||
1559 | |||
1560 | while (bus->parent) { | ||
1561 | struct pci_dev *bridge = bus->self; | ||
1562 | |||
1563 | if (device_run_wake(&bridge->dev)) | ||
1564 | return true; | ||
1565 | |||
1566 | bus = bus->parent; | ||
1567 | } | ||
1568 | |||
1569 | /* We have reached the root bus. */ | ||
1570 | if (bus->bridge) | ||
1571 | return device_run_wake(bus->bridge); | ||
1572 | |||
1573 | return false; | ||
1574 | } | ||
1575 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | ||
1576 | |||
1577 | /** | ||
1387 | * pci_pm_init - Initialize PM functions of given PCI device | 1578 | * pci_pm_init - Initialize PM functions of given PCI device |
1388 | * @dev: PCI device to handle. | 1579 | * @dev: PCI device to handle. |
1389 | */ | 1580 | */ |
@@ -1392,7 +1583,10 @@ void pci_pm_init(struct pci_dev *dev) | |||
1392 | int pm; | 1583 | int pm; |
1393 | u16 pmc; | 1584 | u16 pmc; |
1394 | 1585 | ||
1586 | pm_runtime_forbid(&dev->dev); | ||
1587 | device_enable_async_suspend(&dev->dev); | ||
1395 | dev->wakeup_prepared = false; | 1588 | dev->wakeup_prepared = false; |
1589 | |||
1396 | dev->pm_cap = 0; | 1590 | dev->pm_cap = 0; |
1397 | 1591 | ||
1398 | /* find PCI PM capability in list */ | 1592 | /* find PCI PM capability in list */ |
@@ -1409,6 +1603,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1409 | } | 1603 | } |
1410 | 1604 | ||
1411 | dev->pm_cap = pm; | 1605 | dev->pm_cap = pm; |
1606 | dev->d3_delay = PCI_PM_D3_WAIT; | ||
1412 | 1607 | ||
1413 | dev->d1_support = false; | 1608 | dev->d1_support = false; |
1414 | dev->d2_support = false; | 1609 | dev->d2_support = false; |
@@ -2103,35 +2298,6 @@ void pci_msi_off(struct pci_dev *dev) | |||
2103 | } | 2298 | } |
2104 | } | 2299 | } |
2105 | 2300 | ||
2106 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK | ||
2107 | /* | ||
2108 | * These can be overridden by arch-specific implementations | ||
2109 | */ | ||
2110 | int | ||
2111 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
2112 | { | ||
2113 | if (!pci_dma_supported(dev, mask)) | ||
2114 | return -EIO; | ||
2115 | |||
2116 | dev->dma_mask = mask; | ||
2117 | dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask)); | ||
2118 | |||
2119 | return 0; | ||
2120 | } | ||
2121 | |||
2122 | int | ||
2123 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
2124 | { | ||
2125 | if (!pci_dma_supported(dev, mask)) | ||
2126 | return -EIO; | ||
2127 | |||
2128 | dev->dev.coherent_dma_mask = mask; | ||
2129 | dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask)); | ||
2130 | |||
2131 | return 0; | ||
2132 | } | ||
2133 | #endif | ||
2134 | |||
2135 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE | 2301 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE |
2136 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) | 2302 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) |
2137 | { | 2303 | { |
@@ -2247,12 +2413,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) | |||
2247 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2413 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
2248 | csr |= PCI_D3hot; | 2414 | csr |= PCI_D3hot; |
2249 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2415 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
2250 | msleep(pci_pm_d3_delay); | 2416 | pci_dev_d3_sleep(dev); |
2251 | 2417 | ||
2252 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2418 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
2253 | csr |= PCI_D0; | 2419 | csr |= PCI_D0; |
2254 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2420 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
2255 | msleep(pci_pm_d3_delay); | 2421 | pci_dev_d3_sleep(dev); |
2256 | 2422 | ||
2257 | return 0; | 2423 | return 0; |
2258 | } | 2424 | } |
@@ -2293,9 +2459,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2293 | if (!probe) { | 2459 | if (!probe) { |
2294 | pci_block_user_cfg_access(dev); | 2460 | pci_block_user_cfg_access(dev); |
2295 | /* block PM suspend, driver probe, etc. */ | 2461 | /* block PM suspend, driver probe, etc. */ |
2296 | down(&dev->dev.sem); | 2462 | device_lock(&dev->dev); |
2297 | } | 2463 | } |
2298 | 2464 | ||
2465 | rc = pci_dev_specific_reset(dev, probe); | ||
2466 | if (rc != -ENOTTY) | ||
2467 | goto done; | ||
2468 | |||
2299 | rc = pcie_flr(dev, probe); | 2469 | rc = pcie_flr(dev, probe); |
2300 | if (rc != -ENOTTY) | 2470 | if (rc != -ENOTTY) |
2301 | goto done; | 2471 | goto done; |
@@ -2311,7 +2481,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2311 | rc = pci_parent_bus_reset(dev, probe); | 2481 | rc = pci_parent_bus_reset(dev, probe); |
2312 | done: | 2482 | done: |
2313 | if (!probe) { | 2483 | if (!probe) { |
2314 | up(&dev->dev.sem); | 2484 | device_unlock(&dev->dev); |
2315 | pci_unblock_user_cfg_access(dev); | 2485 | pci_unblock_user_cfg_access(dev); |
2316 | } | 2486 | } |
2317 | 2487 | ||
@@ -2406,18 +2576,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function); | |||
2406 | */ | 2576 | */ |
2407 | int pcix_get_max_mmrbc(struct pci_dev *dev) | 2577 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
2408 | { | 2578 | { |
2409 | int err, cap; | 2579 | int cap; |
2410 | u32 stat; | 2580 | u32 stat; |
2411 | 2581 | ||
2412 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2582 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2413 | if (!cap) | 2583 | if (!cap) |
2414 | return -EINVAL; | 2584 | return -EINVAL; |
2415 | 2585 | ||
2416 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2586 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2417 | if (err) | ||
2418 | return -EINVAL; | 2587 | return -EINVAL; |
2419 | 2588 | ||
2420 | return (stat & PCI_X_STATUS_MAX_READ) >> 12; | 2589 | return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); |
2421 | } | 2590 | } |
2422 | EXPORT_SYMBOL(pcix_get_max_mmrbc); | 2591 | EXPORT_SYMBOL(pcix_get_max_mmrbc); |
2423 | 2592 | ||
@@ -2430,18 +2599,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); | |||
2430 | */ | 2599 | */ |
2431 | int pcix_get_mmrbc(struct pci_dev *dev) | 2600 | int pcix_get_mmrbc(struct pci_dev *dev) |
2432 | { | 2601 | { |
2433 | int ret, cap; | 2602 | int cap; |
2434 | u32 cmd; | 2603 | u16 cmd; |
2435 | 2604 | ||
2436 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2605 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2437 | if (!cap) | 2606 | if (!cap) |
2438 | return -EINVAL; | 2607 | return -EINVAL; |
2439 | 2608 | ||
2440 | ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2609 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2441 | if (!ret) | 2610 | return -EINVAL; |
2442 | ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); | ||
2443 | 2611 | ||
2444 | return ret; | 2612 | return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); |
2445 | } | 2613 | } |
2446 | EXPORT_SYMBOL(pcix_get_mmrbc); | 2614 | EXPORT_SYMBOL(pcix_get_mmrbc); |
2447 | 2615 | ||
@@ -2456,28 +2624,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc); | |||
2456 | */ | 2624 | */ |
2457 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | 2625 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
2458 | { | 2626 | { |
2459 | int cap, err = -EINVAL; | 2627 | int cap; |
2460 | u32 stat, cmd, v, o; | 2628 | u32 stat, v, o; |
2629 | u16 cmd; | ||
2461 | 2630 | ||
2462 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) | 2631 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) |
2463 | goto out; | 2632 | return -EINVAL; |
2464 | 2633 | ||
2465 | v = ffs(mmrbc) - 10; | 2634 | v = ffs(mmrbc) - 10; |
2466 | 2635 | ||
2467 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2636 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2468 | if (!cap) | 2637 | if (!cap) |
2469 | goto out; | 2638 | return -EINVAL; |
2470 | 2639 | ||
2471 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2640 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2472 | if (err) | 2641 | return -EINVAL; |
2473 | goto out; | ||
2474 | 2642 | ||
2475 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) | 2643 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) |
2476 | return -E2BIG; | 2644 | return -E2BIG; |
2477 | 2645 | ||
2478 | err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2646 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2479 | if (err) | 2647 | return -EINVAL; |
2480 | goto out; | ||
2481 | 2648 | ||
2482 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; | 2649 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; |
2483 | if (o != v) { | 2650 | if (o != v) { |
@@ -2487,10 +2654,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | |||
2487 | 2654 | ||
2488 | cmd &= ~PCI_X_CMD_MAX_READ; | 2655 | cmd &= ~PCI_X_CMD_MAX_READ; |
2489 | cmd |= v << 2; | 2656 | cmd |= v << 2; |
2490 | err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); | 2657 | if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) |
2658 | return -EIO; | ||
2491 | } | 2659 | } |
2492 | out: | 2660 | return 0; |
2493 | return err; | ||
2494 | } | 2661 | } |
2495 | EXPORT_SYMBOL(pcix_set_mmrbc); | 2662 | EXPORT_SYMBOL(pcix_set_mmrbc); |
2496 | 2663 | ||
@@ -2600,6 +2767,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2600 | return 0; | 2767 | return 0; |
2601 | } | 2768 | } |
2602 | 2769 | ||
2770 | /* Some architectures require additional programming to enable VGA */ | ||
2771 | static arch_set_vga_state_t arch_set_vga_state; | ||
2772 | |||
2773 | void __init pci_register_set_vga_state(arch_set_vga_state_t func) | ||
2774 | { | ||
2775 | arch_set_vga_state = func; /* NULL disables */ | ||
2776 | } | ||
2777 | |||
2778 | static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, | ||
2779 | unsigned int command_bits, bool change_bridge) | ||
2780 | { | ||
2781 | if (arch_set_vga_state) | ||
2782 | return arch_set_vga_state(dev, decode, command_bits, | ||
2783 | change_bridge); | ||
2784 | return 0; | ||
2785 | } | ||
2786 | |||
2603 | /** | 2787 | /** |
2604 | * pci_set_vga_state - set VGA decode state on device and parents if requested | 2788 | * pci_set_vga_state - set VGA decode state on device and parents if requested |
2605 | * @dev: the PCI device | 2789 | * @dev: the PCI device |
@@ -2613,9 +2797,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
2613 | struct pci_bus *bus; | 2797 | struct pci_bus *bus; |
2614 | struct pci_dev *bridge; | 2798 | struct pci_dev *bridge; |
2615 | u16 cmd; | 2799 | u16 cmd; |
2800 | int rc; | ||
2616 | 2801 | ||
2617 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | 2802 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); |
2618 | 2803 | ||
2804 | /* ARCH specific VGA enables */ | ||
2805 | rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); | ||
2806 | if (rc) | ||
2807 | return rc; | ||
2808 | |||
2619 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 2809 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
2620 | if (decode == true) | 2810 | if (decode == true) |
2621 | cmd |= command_bits; | 2811 | cmd |= command_bits; |
@@ -2779,6 +2969,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) | |||
2779 | return 1; | 2969 | return 1; |
2780 | } | 2970 | } |
2781 | 2971 | ||
2972 | void __weak pci_fixup_cardbus(struct pci_bus *bus) | ||
2973 | { | ||
2974 | } | ||
2975 | EXPORT_SYMBOL(pci_fixup_cardbus); | ||
2976 | |||
2782 | static int __init pci_setup(char *str) | 2977 | static int __init pci_setup(char *str) |
2783 | { | 2978 | { |
2784 | while (str) { | 2979 | while (str) { |
@@ -2840,8 +3035,6 @@ EXPORT_SYMBOL(pci_set_mwi); | |||
2840 | EXPORT_SYMBOL(pci_try_set_mwi); | 3035 | EXPORT_SYMBOL(pci_try_set_mwi); |
2841 | EXPORT_SYMBOL(pci_clear_mwi); | 3036 | EXPORT_SYMBOL(pci_clear_mwi); |
2842 | EXPORT_SYMBOL_GPL(pci_intx); | 3037 | EXPORT_SYMBOL_GPL(pci_intx); |
2843 | EXPORT_SYMBOL(pci_set_dma_mask); | ||
2844 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); | ||
2845 | EXPORT_SYMBOL(pci_assign_resource); | 3038 | EXPORT_SYMBOL(pci_assign_resource); |
2846 | EXPORT_SYMBOL(pci_find_parent_resource); | 3039 | EXPORT_SYMBOL(pci_find_parent_resource); |
2847 | EXPORT_SYMBOL(pci_select_bars); | 3040 | EXPORT_SYMBOL(pci_select_bars); |
@@ -2851,10 +3044,8 @@ EXPORT_SYMBOL(pci_save_state); | |||
2851 | EXPORT_SYMBOL(pci_restore_state); | 3044 | EXPORT_SYMBOL(pci_restore_state); |
2852 | EXPORT_SYMBOL(pci_pme_capable); | 3045 | EXPORT_SYMBOL(pci_pme_capable); |
2853 | EXPORT_SYMBOL(pci_pme_active); | 3046 | EXPORT_SYMBOL(pci_pme_active); |
2854 | EXPORT_SYMBOL(pci_enable_wake); | ||
2855 | EXPORT_SYMBOL(pci_wake_from_d3); | 3047 | EXPORT_SYMBOL(pci_wake_from_d3); |
2856 | EXPORT_SYMBOL(pci_target_state); | 3048 | EXPORT_SYMBOL(pci_target_state); |
2857 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 3049 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
2858 | EXPORT_SYMBOL(pci_back_from_sleep); | 3050 | EXPORT_SYMBOL(pci_back_from_sleep); |
2859 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); | 3051 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
2860 | |||