diff options
author | Michal Marek <mmarek@suse.cz> | 2011-03-09 10:15:44 -0500 |
---|---|---|
committer | Michal Marek <mmarek@suse.cz> | 2011-03-09 10:15:44 -0500 |
commit | 2d8ad8719591fa803b0d589ed057fa46f49b7155 (patch) | |
tree | 4ae051577dad1161c91dafbf4207bb10a9dc91bb /drivers/pci/pci.c | |
parent | 9b4ce7bce5f30712fd926ab4599a803314a07719 (diff) | |
parent | c56eb8fb6dccb83d9fe62fd4dc00c834de9bc470 (diff) |
Merge commit 'v2.6.38-rc1' into kbuild/packaging
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r-- | drivers/pci/pci.c | 423 |
1 files changed, 337 insertions, 86 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 315fea47e784..b714d787bddd 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/pm.h> | 14 | #include <linux/pm.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
@@ -19,8 +20,8 @@ | |||
19 | #include <linux/pci-aspm.h> | 20 | #include <linux/pci-aspm.h> |
20 | #include <linux/pm_wakeup.h> | 21 | #include <linux/pm_wakeup.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/pm_runtime.h> | ||
24 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
25 | #include "pci.h" | 26 | #include "pci.h" |
26 | 27 | ||
@@ -29,8 +30,27 @@ const char *pci_power_names[] = { | |||
29 | }; | 30 | }; |
30 | EXPORT_SYMBOL_GPL(pci_power_names); | 31 | EXPORT_SYMBOL_GPL(pci_power_names); |
31 | 32 | ||
33 | int isa_dma_bridge_buggy; | ||
34 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
35 | |||
36 | int pci_pci_problems; | ||
37 | EXPORT_SYMBOL(pci_pci_problems); | ||
38 | |||
32 | unsigned int pci_pm_d3_delay; | 39 | unsigned int pci_pm_d3_delay; |
33 | 40 | ||
41 | static void pci_pme_list_scan(struct work_struct *work); | ||
42 | |||
43 | static LIST_HEAD(pci_pme_list); | ||
44 | static DEFINE_MUTEX(pci_pme_list_mutex); | ||
45 | static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); | ||
46 | |||
47 | struct pci_pme_device { | ||
48 | struct list_head list; | ||
49 | struct pci_dev *dev; | ||
50 | }; | ||
51 | |||
52 | #define PME_TIMEOUT 1000 /* How long between PME checks */ | ||
53 | |||
34 | static void pci_dev_d3_sleep(struct pci_dev *dev) | 54 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
35 | { | 55 | { |
36 | unsigned int delay = dev->d3_delay; | 56 | unsigned int delay = dev->d3_delay; |
@@ -297,6 +317,49 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) | |||
297 | } | 317 | } |
298 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); | 318 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
299 | 319 | ||
320 | /** | ||
321 | * pci_bus_find_ext_capability - find an extended capability | ||
322 | * @bus: the PCI bus to query | ||
323 | * @devfn: PCI device to query | ||
324 | * @cap: capability code | ||
325 | * | ||
326 | * Like pci_find_ext_capability() but works for pci devices that do not have a | ||
327 | * pci_dev structure set up yet. | ||
328 | * | ||
329 | * Returns the address of the requested capability structure within the | ||
330 | * device's PCI configuration space or 0 in case the device does not | ||
331 | * support it. | ||
332 | */ | ||
333 | int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, | ||
334 | int cap) | ||
335 | { | ||
336 | u32 header; | ||
337 | int ttl; | ||
338 | int pos = PCI_CFG_SPACE_SIZE; | ||
339 | |||
340 | /* minimum 8 bytes per capability */ | ||
341 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | ||
342 | |||
343 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
344 | return 0; | ||
345 | if (header == 0xffffffff || header == 0) | ||
346 | return 0; | ||
347 | |||
348 | while (ttl-- > 0) { | ||
349 | if (PCI_EXT_CAP_ID(header) == cap) | ||
350 | return pos; | ||
351 | |||
352 | pos = PCI_EXT_CAP_NEXT(header); | ||
353 | if (pos < PCI_CFG_SPACE_SIZE) | ||
354 | break; | ||
355 | |||
356 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
357 | break; | ||
358 | } | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
300 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) | 363 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) |
301 | { | 364 | { |
302 | int rc, ttl = PCI_FIND_CAP_TTL; | 365 | int rc, ttl = PCI_FIND_CAP_TTL; |
@@ -380,10 +443,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
380 | { | 443 | { |
381 | const struct pci_bus *bus = dev->bus; | 444 | const struct pci_bus *bus = dev->bus; |
382 | int i; | 445 | int i; |
383 | struct resource *best = NULL; | 446 | struct resource *best = NULL, *r; |
384 | 447 | ||
385 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 448 | pci_bus_for_each_resource(bus, r, i) { |
386 | struct resource *r = bus->resource[i]; | ||
387 | if (!r) | 449 | if (!r) |
388 | continue; | 450 | continue; |
389 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | 451 | if (res->start && !(res->start >= r->start && res->end <= r->end)) |
@@ -457,6 +519,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
457 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 519 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; |
458 | } | 520 | } |
459 | 521 | ||
522 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
523 | { | ||
524 | return pci_platform_pm ? | ||
525 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
526 | } | ||
527 | |||
460 | /** | 528 | /** |
461 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 529 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
462 | * given PCI device | 530 | * given PCI device |
@@ -624,7 +692,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
624 | */ | 692 | */ |
625 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) | 693 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
626 | { | 694 | { |
627 | return state > PCI_D0 ? | 695 | return state >= PCI_D0 ? |
628 | pci_platform_power_transition(dev, state) : -EINVAL; | 696 | pci_platform_power_transition(dev, state) : -EINVAL; |
629 | } | 697 | } |
630 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); | 698 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
@@ -661,10 +729,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
661 | */ | 729 | */ |
662 | return 0; | 730 | return 0; |
663 | 731 | ||
664 | /* Check if we're already there */ | ||
665 | if (dev->current_state == state) | ||
666 | return 0; | ||
667 | |||
668 | __pci_start_power_transition(dev, state); | 732 | __pci_start_power_transition(dev, state); |
669 | 733 | ||
670 | /* This device is quirked not to be put into D3, so | 734 | /* This device is quirked not to be put into D3, so |
@@ -873,14 +937,13 @@ pci_save_state(struct pci_dev *dev) | |||
873 | * pci_restore_state - Restore the saved state of a PCI device | 937 | * pci_restore_state - Restore the saved state of a PCI device |
874 | * @dev: - PCI device that we're dealing with | 938 | * @dev: - PCI device that we're dealing with |
875 | */ | 939 | */ |
876 | int | 940 | void pci_restore_state(struct pci_dev *dev) |
877 | pci_restore_state(struct pci_dev *dev) | ||
878 | { | 941 | { |
879 | int i; | 942 | int i; |
880 | u32 val; | 943 | u32 val; |
881 | 944 | ||
882 | if (!dev->state_saved) | 945 | if (!dev->state_saved) |
883 | return 0; | 946 | return; |
884 | 947 | ||
885 | /* PCI Express register must be restored first */ | 948 | /* PCI Express register must be restored first */ |
886 | pci_restore_pcie_state(dev); | 949 | pci_restore_pcie_state(dev); |
@@ -904,8 +967,6 @@ pci_restore_state(struct pci_dev *dev) | |||
904 | pci_restore_iov_state(dev); | 967 | pci_restore_iov_state(dev); |
905 | 968 | ||
906 | dev->state_saved = false; | 969 | dev->state_saved = false; |
907 | |||
908 | return 0; | ||
909 | } | 970 | } |
910 | 971 | ||
911 | static int do_pci_enable_device(struct pci_dev *dev, int bars) | 972 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
@@ -943,6 +1004,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev, | |||
943 | int err; | 1004 | int err; |
944 | int i, bars = 0; | 1005 | int i, bars = 0; |
945 | 1006 | ||
1007 | /* | ||
1008 | * Power state could be unknown at this point, either due to a fresh | ||
1009 | * boot or a device removal call. So get the current power state | ||
1010 | * so that things like MSI message writing will behave as expected | ||
1011 | * (e.g. if the device really is in D0 at enable time). | ||
1012 | */ | ||
1013 | if (dev->pm_cap) { | ||
1014 | u16 pmcsr; | ||
1015 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
1016 | dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); | ||
1017 | } | ||
1018 | |||
946 | if (atomic_add_return(1, &dev->enable_cnt) > 1) | 1019 | if (atomic_add_return(1, &dev->enable_cnt) > 1) |
947 | return 0; /* already enabled */ | 1020 | return 0; /* already enabled */ |
948 | 1021 | ||
@@ -1142,7 +1215,7 @@ void pci_disable_enabled_device(struct pci_dev *dev) | |||
1142 | * anymore. This only involves disabling PCI bus-mastering, if active. | 1215 | * anymore. This only involves disabling PCI bus-mastering, if active. |
1143 | * | 1216 | * |
1144 | * Note we don't actually disable the device until all callers of | 1217 | * Note we don't actually disable the device until all callers of |
1145 | * pci_device_enable() have called pci_device_disable(). | 1218 | * pci_enable_device() have called pci_disable_device(). |
1146 | */ | 1219 | */ |
1147 | void | 1220 | void |
1148 | pci_disable_device(struct pci_dev *dev) | 1221 | pci_disable_device(struct pci_dev *dev) |
@@ -1190,6 +1263,68 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | |||
1190 | } | 1263 | } |
1191 | 1264 | ||
1192 | /** | 1265 | /** |
1266 | * pci_check_pme_status - Check if given device has generated PME. | ||
1267 | * @dev: Device to check. | ||
1268 | * | ||
1269 | * Check the PME status of the device and if set, clear it and clear PME enable | ||
1270 | * (if set). Return 'true' if PME status and PME enable were both set or | ||
1271 | * 'false' otherwise. | ||
1272 | */ | ||
1273 | bool pci_check_pme_status(struct pci_dev *dev) | ||
1274 | { | ||
1275 | int pmcsr_pos; | ||
1276 | u16 pmcsr; | ||
1277 | bool ret = false; | ||
1278 | |||
1279 | if (!dev->pm_cap) | ||
1280 | return false; | ||
1281 | |||
1282 | pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; | ||
1283 | pci_read_config_word(dev, pmcsr_pos, &pmcsr); | ||
1284 | if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) | ||
1285 | return false; | ||
1286 | |||
1287 | /* Clear PME status. */ | ||
1288 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
1289 | if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { | ||
1290 | /* Disable PME to avoid interrupt flood. */ | ||
1291 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
1292 | ret = true; | ||
1293 | } | ||
1294 | |||
1295 | pci_write_config_word(dev, pmcsr_pos, pmcsr); | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1300 | /** | ||
1301 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | ||
1302 | * @dev: Device to handle. | ||
1303 | * @ign: Ignored. | ||
1304 | * | ||
1305 | * Check if @dev has generated PME and queue a resume request for it in that | ||
1306 | * case. | ||
1307 | */ | ||
1308 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | ||
1309 | { | ||
1310 | if (pci_check_pme_status(dev)) { | ||
1311 | pci_wakeup_event(dev); | ||
1312 | pm_request_resume(&dev->dev); | ||
1313 | } | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | /** | ||
1318 | * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. | ||
1319 | * @bus: Top bus of the subtree to walk. | ||
1320 | */ | ||
1321 | void pci_pme_wakeup_bus(struct pci_bus *bus) | ||
1322 | { | ||
1323 | if (bus) | ||
1324 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | ||
1325 | } | ||
1326 | |||
1327 | /** | ||
1193 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1328 | * pci_pme_capable - check the capability of PCI device to generate PME# |
1194 | * @dev: PCI device to handle. | 1329 | * @dev: PCI device to handle. |
1195 | * @state: PCI state from which device will issue PME#. | 1330 | * @state: PCI state from which device will issue PME#. |
@@ -1202,6 +1337,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) | |||
1202 | return !!(dev->pme_support & (1 << state)); | 1337 | return !!(dev->pme_support & (1 << state)); |
1203 | } | 1338 | } |
1204 | 1339 | ||
1340 | static void pci_pme_list_scan(struct work_struct *work) | ||
1341 | { | ||
1342 | struct pci_pme_device *pme_dev; | ||
1343 | |||
1344 | mutex_lock(&pci_pme_list_mutex); | ||
1345 | if (!list_empty(&pci_pme_list)) { | ||
1346 | list_for_each_entry(pme_dev, &pci_pme_list, list) | ||
1347 | pci_pme_wakeup(pme_dev->dev, NULL); | ||
1348 | schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); | ||
1349 | } | ||
1350 | mutex_unlock(&pci_pme_list_mutex); | ||
1351 | } | ||
1352 | |||
1353 | /** | ||
1354 | * pci_external_pme - is a device an external PCI PME source? | ||
1355 | * @dev: PCI device to check | ||
1356 | * | ||
1357 | */ | ||
1358 | |||
1359 | static bool pci_external_pme(struct pci_dev *dev) | ||
1360 | { | ||
1361 | if (pci_is_pcie(dev) || dev->bus->number == 0) | ||
1362 | return false; | ||
1363 | return true; | ||
1364 | } | ||
1365 | |||
1205 | /** | 1366 | /** |
1206 | * pci_pme_active - enable or disable PCI device's PME# function | 1367 | * pci_pme_active - enable or disable PCI device's PME# function |
1207 | * @dev: PCI device to handle. | 1368 | * @dev: PCI device to handle. |
@@ -1225,14 +1386,53 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1225 | 1386 | ||
1226 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 1387 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
1227 | 1388 | ||
1389 | /* PCI (as opposed to PCIe) PME requires that the device have | ||
1390 | its PME# line hooked up correctly. Not all hardware vendors | ||
1391 | do this, so the PME never gets delivered and the device | ||
1392 | remains asleep. The easiest way around this is to | ||
1393 | periodically walk the list of suspended devices and check | ||
1394 | whether any have their PME flag set. The assumption is that | ||
1395 | we'll wake up often enough anyway that this won't be a huge | ||
1396 | hit, and the power savings from the devices will still be a | ||
1397 | win. */ | ||
1398 | |||
1399 | if (pci_external_pme(dev)) { | ||
1400 | struct pci_pme_device *pme_dev; | ||
1401 | if (enable) { | ||
1402 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | ||
1403 | GFP_KERNEL); | ||
1404 | if (!pme_dev) | ||
1405 | goto out; | ||
1406 | pme_dev->dev = dev; | ||
1407 | mutex_lock(&pci_pme_list_mutex); | ||
1408 | list_add(&pme_dev->list, &pci_pme_list); | ||
1409 | if (list_is_singular(&pci_pme_list)) | ||
1410 | schedule_delayed_work(&pci_pme_work, | ||
1411 | msecs_to_jiffies(PME_TIMEOUT)); | ||
1412 | mutex_unlock(&pci_pme_list_mutex); | ||
1413 | } else { | ||
1414 | mutex_lock(&pci_pme_list_mutex); | ||
1415 | list_for_each_entry(pme_dev, &pci_pme_list, list) { | ||
1416 | if (pme_dev->dev == dev) { | ||
1417 | list_del(&pme_dev->list); | ||
1418 | kfree(pme_dev); | ||
1419 | break; | ||
1420 | } | ||
1421 | } | ||
1422 | mutex_unlock(&pci_pme_list_mutex); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | out: | ||
1228 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", | 1427 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", |
1229 | enable ? "enabled" : "disabled"); | 1428 | enable ? "enabled" : "disabled"); |
1230 | } | 1429 | } |
1231 | 1430 | ||
1232 | /** | 1431 | /** |
1233 | * pci_enable_wake - enable PCI device as wakeup event source | 1432 | * __pci_enable_wake - enable PCI device as wakeup event source |
1234 | * @dev: PCI device affected | 1433 | * @dev: PCI device affected |
1235 | * @state: PCI state from which device will issue wakeup events | 1434 | * @state: PCI state from which device will issue wakeup events |
1435 | * @runtime: True if the events are to be generated at run time | ||
1236 | * @enable: True to enable event generation; false to disable | 1436 | * @enable: True to enable event generation; false to disable |
1237 | * | 1437 | * |
1238 | * This enables the device as a wakeup event source, or disables it. | 1438 | * This enables the device as a wakeup event source, or disables it. |
@@ -1248,11 +1448,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1248 | * Error code depending on the platform is returned if both the platform and | 1448 | * Error code depending on the platform is returned if both the platform and |
1249 | * the native mechanism fail to enable the generation of wake-up events | 1449 | * the native mechanism fail to enable the generation of wake-up events |
1250 | */ | 1450 | */ |
1251 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1451 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1452 | bool runtime, bool enable) | ||
1252 | { | 1453 | { |
1253 | int ret = 0; | 1454 | int ret = 0; |
1254 | 1455 | ||
1255 | if (enable && !device_may_wakeup(&dev->dev)) | 1456 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) |
1256 | return -EINVAL; | 1457 | return -EINVAL; |
1257 | 1458 | ||
1258 | /* Don't do the same thing twice in a row for one device. */ | 1459 | /* Don't do the same thing twice in a row for one device. */ |
@@ -1272,19 +1473,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1272 | pci_pme_active(dev, true); | 1473 | pci_pme_active(dev, true); |
1273 | else | 1474 | else |
1274 | ret = 1; | 1475 | ret = 1; |
1275 | error = platform_pci_sleep_wake(dev, true); | 1476 | error = runtime ? platform_pci_run_wake(dev, true) : |
1477 | platform_pci_sleep_wake(dev, true); | ||
1276 | if (ret) | 1478 | if (ret) |
1277 | ret = error; | 1479 | ret = error; |
1278 | if (!ret) | 1480 | if (!ret) |
1279 | dev->wakeup_prepared = true; | 1481 | dev->wakeup_prepared = true; |
1280 | } else { | 1482 | } else { |
1281 | platform_pci_sleep_wake(dev, false); | 1483 | if (runtime) |
1484 | platform_pci_run_wake(dev, false); | ||
1485 | else | ||
1486 | platform_pci_sleep_wake(dev, false); | ||
1282 | pci_pme_active(dev, false); | 1487 | pci_pme_active(dev, false); |
1283 | dev->wakeup_prepared = false; | 1488 | dev->wakeup_prepared = false; |
1284 | } | 1489 | } |
1285 | 1490 | ||
1286 | return ret; | 1491 | return ret; |
1287 | } | 1492 | } |
1493 | EXPORT_SYMBOL(__pci_enable_wake); | ||
1288 | 1494 | ||
1289 | /** | 1495 | /** |
1290 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1496 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
@@ -1385,7 +1591,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev) | |||
1385 | * pci_back_from_sleep - turn PCI device on during system-wide transition into working state | 1591 | * pci_back_from_sleep - turn PCI device on during system-wide transition into working state |
1386 | * @dev: Device to handle. | 1592 | * @dev: Device to handle. |
1387 | * | 1593 | * |
1388 | * Disable device's sytem wake-up capability and put it into D0. | 1594 | * Disable device's system wake-up capability and put it into D0. |
1389 | */ | 1595 | */ |
1390 | int pci_back_from_sleep(struct pci_dev *dev) | 1596 | int pci_back_from_sleep(struct pci_dev *dev) |
1391 | { | 1597 | { |
@@ -1394,6 +1600,66 @@ int pci_back_from_sleep(struct pci_dev *dev) | |||
1394 | } | 1600 | } |
1395 | 1601 | ||
1396 | /** | 1602 | /** |
1603 | * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. | ||
1604 | * @dev: PCI device being suspended. | ||
1605 | * | ||
1606 | * Prepare @dev to generate wake-up events at run time and put it into a low | ||
1607 | * power state. | ||
1608 | */ | ||
1609 | int pci_finish_runtime_suspend(struct pci_dev *dev) | ||
1610 | { | ||
1611 | pci_power_t target_state = pci_target_state(dev); | ||
1612 | int error; | ||
1613 | |||
1614 | if (target_state == PCI_POWER_ERROR) | ||
1615 | return -EIO; | ||
1616 | |||
1617 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | ||
1618 | |||
1619 | error = pci_set_power_state(dev, target_state); | ||
1620 | |||
1621 | if (error) | ||
1622 | __pci_enable_wake(dev, target_state, true, false); | ||
1623 | |||
1624 | return error; | ||
1625 | } | ||
1626 | |||
1627 | /** | ||
1628 | * pci_dev_run_wake - Check if device can generate run-time wake-up events. | ||
1629 | * @dev: Device to check. | ||
1630 | * | ||
1631 | * Return true if the device itself is cabable of generating wake-up events | ||
1632 | * (through the platform or using the native PCIe PME) or if the device supports | ||
1633 | * PME and one of its upstream bridges can generate wake-up events. | ||
1634 | */ | ||
1635 | bool pci_dev_run_wake(struct pci_dev *dev) | ||
1636 | { | ||
1637 | struct pci_bus *bus = dev->bus; | ||
1638 | |||
1639 | if (device_run_wake(&dev->dev)) | ||
1640 | return true; | ||
1641 | |||
1642 | if (!dev->pme_support) | ||
1643 | return false; | ||
1644 | |||
1645 | while (bus->parent) { | ||
1646 | struct pci_dev *bridge = bus->self; | ||
1647 | |||
1648 | if (device_run_wake(&bridge->dev)) | ||
1649 | return true; | ||
1650 | |||
1651 | bus = bus->parent; | ||
1652 | } | ||
1653 | |||
1654 | /* We have reached the root bus. */ | ||
1655 | if (bus->bridge) | ||
1656 | return device_run_wake(bus->bridge); | ||
1657 | |||
1658 | return false; | ||
1659 | } | ||
1660 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | ||
1661 | |||
1662 | /** | ||
1397 | * pci_pm_init - Initialize PM functions of given PCI device | 1663 | * pci_pm_init - Initialize PM functions of given PCI device |
1398 | * @dev: PCI device to handle. | 1664 | * @dev: PCI device to handle. |
1399 | */ | 1665 | */ |
@@ -1402,7 +1668,10 @@ void pci_pm_init(struct pci_dev *dev) | |||
1402 | int pm; | 1668 | int pm; |
1403 | u16 pmc; | 1669 | u16 pmc; |
1404 | 1670 | ||
1671 | pm_runtime_forbid(&dev->dev); | ||
1672 | device_enable_async_suspend(&dev->dev); | ||
1405 | dev->wakeup_prepared = false; | 1673 | dev->wakeup_prepared = false; |
1674 | |||
1406 | dev->pm_cap = 0; | 1675 | dev->pm_cap = 0; |
1407 | 1676 | ||
1408 | /* find PCI PM capability in list */ | 1677 | /* find PCI PM capability in list */ |
@@ -1450,7 +1719,6 @@ void pci_pm_init(struct pci_dev *dev) | |||
1450 | * let the user space enable it to wake up the system as needed. | 1719 | * let the user space enable it to wake up the system as needed. |
1451 | */ | 1720 | */ |
1452 | device_set_wakeup_capable(&dev->dev, true); | 1721 | device_set_wakeup_capable(&dev->dev, true); |
1453 | device_set_wakeup_enable(&dev->dev, false); | ||
1454 | /* Disable the PME# generation functionality */ | 1722 | /* Disable the PME# generation functionality */ |
1455 | pci_pme_active(dev, false); | 1723 | pci_pme_active(dev, false); |
1456 | } else { | 1724 | } else { |
@@ -1474,7 +1742,6 @@ void platform_pci_wakeup_init(struct pci_dev *dev) | |||
1474 | return; | 1742 | return; |
1475 | 1743 | ||
1476 | device_set_wakeup_capable(&dev->dev, true); | 1744 | device_set_wakeup_capable(&dev->dev, true); |
1477 | device_set_wakeup_enable(&dev->dev, false); | ||
1478 | platform_pci_sleep_wake(dev, false); | 1745 | platform_pci_sleep_wake(dev, false); |
1479 | } | 1746 | } |
1480 | 1747 | ||
@@ -2113,51 +2380,19 @@ void pci_msi_off(struct pci_dev *dev) | |||
2113 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 2380 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
2114 | } | 2381 | } |
2115 | } | 2382 | } |
2383 | EXPORT_SYMBOL_GPL(pci_msi_off); | ||
2116 | 2384 | ||
2117 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK | ||
2118 | /* | ||
2119 | * These can be overridden by arch-specific implementations | ||
2120 | */ | ||
2121 | int | ||
2122 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
2123 | { | ||
2124 | if (!pci_dma_supported(dev, mask)) | ||
2125 | return -EIO; | ||
2126 | |||
2127 | dev->dma_mask = mask; | ||
2128 | dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask)); | ||
2129 | |||
2130 | return 0; | ||
2131 | } | ||
2132 | |||
2133 | int | ||
2134 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
2135 | { | ||
2136 | if (!pci_dma_supported(dev, mask)) | ||
2137 | return -EIO; | ||
2138 | |||
2139 | dev->dev.coherent_dma_mask = mask; | ||
2140 | dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask)); | ||
2141 | |||
2142 | return 0; | ||
2143 | } | ||
2144 | #endif | ||
2145 | |||
2146 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE | ||
2147 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) | 2385 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) |
2148 | { | 2386 | { |
2149 | return dma_set_max_seg_size(&dev->dev, size); | 2387 | return dma_set_max_seg_size(&dev->dev, size); |
2150 | } | 2388 | } |
2151 | EXPORT_SYMBOL(pci_set_dma_max_seg_size); | 2389 | EXPORT_SYMBOL(pci_set_dma_max_seg_size); |
2152 | #endif | ||
2153 | 2390 | ||
2154 | #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY | ||
2155 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) | 2391 | int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) |
2156 | { | 2392 | { |
2157 | return dma_set_seg_boundary(&dev->dev, mask); | 2393 | return dma_set_seg_boundary(&dev->dev, mask); |
2158 | } | 2394 | } |
2159 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); | 2395 | EXPORT_SYMBOL(pci_set_dma_seg_boundary); |
2160 | #endif | ||
2161 | 2396 | ||
2162 | static int pcie_flr(struct pci_dev *dev, int probe) | 2397 | static int pcie_flr(struct pci_dev *dev, int probe) |
2163 | { | 2398 | { |
@@ -2304,7 +2539,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2304 | if (!probe) { | 2539 | if (!probe) { |
2305 | pci_block_user_cfg_access(dev); | 2540 | pci_block_user_cfg_access(dev); |
2306 | /* block PM suspend, driver probe, etc. */ | 2541 | /* block PM suspend, driver probe, etc. */ |
2307 | down(&dev->dev.sem); | 2542 | device_lock(&dev->dev); |
2308 | } | 2543 | } |
2309 | 2544 | ||
2310 | rc = pci_dev_specific_reset(dev, probe); | 2545 | rc = pci_dev_specific_reset(dev, probe); |
@@ -2326,7 +2561,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2326 | rc = pci_parent_bus_reset(dev, probe); | 2561 | rc = pci_parent_bus_reset(dev, probe); |
2327 | done: | 2562 | done: |
2328 | if (!probe) { | 2563 | if (!probe) { |
2329 | up(&dev->dev.sem); | 2564 | device_unlock(&dev->dev); |
2330 | pci_unblock_user_cfg_access(dev); | 2565 | pci_unblock_user_cfg_access(dev); |
2331 | } | 2566 | } |
2332 | 2567 | ||
@@ -2421,18 +2656,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function); | |||
2421 | */ | 2656 | */ |
2422 | int pcix_get_max_mmrbc(struct pci_dev *dev) | 2657 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
2423 | { | 2658 | { |
2424 | int err, cap; | 2659 | int cap; |
2425 | u32 stat; | 2660 | u32 stat; |
2426 | 2661 | ||
2427 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2662 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2428 | if (!cap) | 2663 | if (!cap) |
2429 | return -EINVAL; | 2664 | return -EINVAL; |
2430 | 2665 | ||
2431 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2666 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2432 | if (err) | ||
2433 | return -EINVAL; | 2667 | return -EINVAL; |
2434 | 2668 | ||
2435 | return (stat & PCI_X_STATUS_MAX_READ) >> 12; | 2669 | return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); |
2436 | } | 2670 | } |
2437 | EXPORT_SYMBOL(pcix_get_max_mmrbc); | 2671 | EXPORT_SYMBOL(pcix_get_max_mmrbc); |
2438 | 2672 | ||
@@ -2445,18 +2679,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); | |||
2445 | */ | 2679 | */ |
2446 | int pcix_get_mmrbc(struct pci_dev *dev) | 2680 | int pcix_get_mmrbc(struct pci_dev *dev) |
2447 | { | 2681 | { |
2448 | int ret, cap; | 2682 | int cap; |
2449 | u32 cmd; | 2683 | u16 cmd; |
2450 | 2684 | ||
2451 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2685 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2452 | if (!cap) | 2686 | if (!cap) |
2453 | return -EINVAL; | 2687 | return -EINVAL; |
2454 | 2688 | ||
2455 | ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2689 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2456 | if (!ret) | 2690 | return -EINVAL; |
2457 | ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); | ||
2458 | 2691 | ||
2459 | return ret; | 2692 | return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); |
2460 | } | 2693 | } |
2461 | EXPORT_SYMBOL(pcix_get_mmrbc); | 2694 | EXPORT_SYMBOL(pcix_get_mmrbc); |
2462 | 2695 | ||
@@ -2471,28 +2704,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc); | |||
2471 | */ | 2704 | */ |
2472 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | 2705 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
2473 | { | 2706 | { |
2474 | int cap, err = -EINVAL; | 2707 | int cap; |
2475 | u32 stat, cmd, v, o; | 2708 | u32 stat, v, o; |
2709 | u16 cmd; | ||
2476 | 2710 | ||
2477 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) | 2711 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) |
2478 | goto out; | 2712 | return -EINVAL; |
2479 | 2713 | ||
2480 | v = ffs(mmrbc) - 10; | 2714 | v = ffs(mmrbc) - 10; |
2481 | 2715 | ||
2482 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2716 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2483 | if (!cap) | 2717 | if (!cap) |
2484 | goto out; | 2718 | return -EINVAL; |
2485 | 2719 | ||
2486 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2720 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2487 | if (err) | 2721 | return -EINVAL; |
2488 | goto out; | ||
2489 | 2722 | ||
2490 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) | 2723 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) |
2491 | return -E2BIG; | 2724 | return -E2BIG; |
2492 | 2725 | ||
2493 | err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2726 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2494 | if (err) | 2727 | return -EINVAL; |
2495 | goto out; | ||
2496 | 2728 | ||
2497 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; | 2729 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; |
2498 | if (o != v) { | 2730 | if (o != v) { |
@@ -2502,10 +2734,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | |||
2502 | 2734 | ||
2503 | cmd &= ~PCI_X_CMD_MAX_READ; | 2735 | cmd &= ~PCI_X_CMD_MAX_READ; |
2504 | cmd |= v << 2; | 2736 | cmd |= v << 2; |
2505 | err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); | 2737 | if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) |
2738 | return -EIO; | ||
2506 | } | 2739 | } |
2507 | out: | 2740 | return 0; |
2508 | return err; | ||
2509 | } | 2741 | } |
2510 | EXPORT_SYMBOL(pcix_set_mmrbc); | 2742 | EXPORT_SYMBOL(pcix_set_mmrbc); |
2511 | 2743 | ||
@@ -2527,7 +2759,7 @@ int pcie_get_readrq(struct pci_dev *dev) | |||
2527 | 2759 | ||
2528 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | 2760 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); |
2529 | if (!ret) | 2761 | if (!ret) |
2530 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); | 2762 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
2531 | 2763 | ||
2532 | return ret; | 2764 | return ret; |
2533 | } | 2765 | } |
@@ -2615,6 +2847,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2615 | return 0; | 2847 | return 0; |
2616 | } | 2848 | } |
2617 | 2849 | ||
2850 | /* Some architectures require additional programming to enable VGA */ | ||
2851 | static arch_set_vga_state_t arch_set_vga_state; | ||
2852 | |||
2853 | void __init pci_register_set_vga_state(arch_set_vga_state_t func) | ||
2854 | { | ||
2855 | arch_set_vga_state = func; /* NULL disables */ | ||
2856 | } | ||
2857 | |||
2858 | static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, | ||
2859 | unsigned int command_bits, bool change_bridge) | ||
2860 | { | ||
2861 | if (arch_set_vga_state) | ||
2862 | return arch_set_vga_state(dev, decode, command_bits, | ||
2863 | change_bridge); | ||
2864 | return 0; | ||
2865 | } | ||
2866 | |||
2618 | /** | 2867 | /** |
2619 | * pci_set_vga_state - set VGA decode state on device and parents if requested | 2868 | * pci_set_vga_state - set VGA decode state on device and parents if requested |
2620 | * @dev: the PCI device | 2869 | * @dev: the PCI device |
@@ -2628,9 +2877,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
2628 | struct pci_bus *bus; | 2877 | struct pci_bus *bus; |
2629 | struct pci_dev *bridge; | 2878 | struct pci_dev *bridge; |
2630 | u16 cmd; | 2879 | u16 cmd; |
2880 | int rc; | ||
2631 | 2881 | ||
2632 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | 2882 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); |
2633 | 2883 | ||
2884 | /* ARCH specific VGA enables */ | ||
2885 | rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); | ||
2886 | if (rc) | ||
2887 | return rc; | ||
2888 | |||
2634 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 2889 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
2635 | if (decode == true) | 2890 | if (decode == true) |
2636 | cmd |= command_bits; | 2891 | cmd |= command_bits; |
@@ -2860,8 +3115,6 @@ EXPORT_SYMBOL(pci_set_mwi); | |||
2860 | EXPORT_SYMBOL(pci_try_set_mwi); | 3115 | EXPORT_SYMBOL(pci_try_set_mwi); |
2861 | EXPORT_SYMBOL(pci_clear_mwi); | 3116 | EXPORT_SYMBOL(pci_clear_mwi); |
2862 | EXPORT_SYMBOL_GPL(pci_intx); | 3117 | EXPORT_SYMBOL_GPL(pci_intx); |
2863 | EXPORT_SYMBOL(pci_set_dma_mask); | ||
2864 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); | ||
2865 | EXPORT_SYMBOL(pci_assign_resource); | 3118 | EXPORT_SYMBOL(pci_assign_resource); |
2866 | EXPORT_SYMBOL(pci_find_parent_resource); | 3119 | EXPORT_SYMBOL(pci_find_parent_resource); |
2867 | EXPORT_SYMBOL(pci_select_bars); | 3120 | EXPORT_SYMBOL(pci_select_bars); |
@@ -2871,10 +3124,8 @@ EXPORT_SYMBOL(pci_save_state); | |||
2871 | EXPORT_SYMBOL(pci_restore_state); | 3124 | EXPORT_SYMBOL(pci_restore_state); |
2872 | EXPORT_SYMBOL(pci_pme_capable); | 3125 | EXPORT_SYMBOL(pci_pme_capable); |
2873 | EXPORT_SYMBOL(pci_pme_active); | 3126 | EXPORT_SYMBOL(pci_pme_active); |
2874 | EXPORT_SYMBOL(pci_enable_wake); | ||
2875 | EXPORT_SYMBOL(pci_wake_from_d3); | 3127 | EXPORT_SYMBOL(pci_wake_from_d3); |
2876 | EXPORT_SYMBOL(pci_target_state); | 3128 | EXPORT_SYMBOL(pci_target_state); |
2877 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 3129 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
2878 | EXPORT_SYMBOL(pci_back_from_sleep); | 3130 | EXPORT_SYMBOL(pci_back_from_sleep); |
2879 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); | 3131 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
2880 | |||