diff options
Diffstat (limited to 'drivers/pci/pci.c')
| -rw-r--r-- | drivers/pci/pci.c | 79 |
1 files changed, 78 insertions, 1 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7fa3cbd742c5..e98c8104297b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -38,6 +38,19 @@ EXPORT_SYMBOL(pci_pci_problems); | |||
| 38 | 38 | ||
| 39 | unsigned int pci_pm_d3_delay; | 39 | unsigned int pci_pm_d3_delay; |
| 40 | 40 | ||
| 41 | static void pci_pme_list_scan(struct work_struct *work); | ||
| 42 | |||
| 43 | static LIST_HEAD(pci_pme_list); | ||
| 44 | static DEFINE_MUTEX(pci_pme_list_mutex); | ||
| 45 | static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); | ||
| 46 | |||
| 47 | struct pci_pme_device { | ||
| 48 | struct list_head list; | ||
| 49 | struct pci_dev *dev; | ||
| 50 | }; | ||
| 51 | |||
| 52 | #define PME_TIMEOUT 1000 /* How long between PME checks */ | ||
| 53 | |||
| 41 | static void pci_dev_d3_sleep(struct pci_dev *dev) | 54 | static void pci_dev_d3_sleep(struct pci_dev *dev) |
| 42 | { | 55 | { |
| 43 | unsigned int delay = dev->d3_delay; | 56 | unsigned int delay = dev->d3_delay; |
| @@ -1331,6 +1344,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) | |||
| 1331 | return !!(dev->pme_support & (1 << state)); | 1344 | return !!(dev->pme_support & (1 << state)); |
| 1332 | } | 1345 | } |
| 1333 | 1346 | ||
| 1347 | static void pci_pme_list_scan(struct work_struct *work) | ||
| 1348 | { | ||
| 1349 | struct pci_pme_device *pme_dev; | ||
| 1350 | |||
| 1351 | mutex_lock(&pci_pme_list_mutex); | ||
| 1352 | if (!list_empty(&pci_pme_list)) { | ||
| 1353 | list_for_each_entry(pme_dev, &pci_pme_list, list) | ||
| 1354 | pci_pme_wakeup(pme_dev->dev, NULL); | ||
| 1355 | schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); | ||
| 1356 | } | ||
| 1357 | mutex_unlock(&pci_pme_list_mutex); | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | /** | ||
| 1361 | * pci_external_pme - is a device an external PCI PME source? | ||
| 1362 | * @dev: PCI device to check | ||
| 1363 | * | ||
| 1364 | */ | ||
| 1365 | |||
| 1366 | static bool pci_external_pme(struct pci_dev *dev) | ||
| 1367 | { | ||
| 1368 | if (pci_is_pcie(dev) || dev->bus->number == 0) | ||
| 1369 | return false; | ||
| 1370 | return true; | ||
| 1371 | } | ||
| 1372 | |||
| 1334 | /** | 1373 | /** |
| 1335 | * pci_pme_active - enable or disable PCI device's PME# function | 1374 | * pci_pme_active - enable or disable PCI device's PME# function |
| 1336 | * @dev: PCI device to handle. | 1375 | * @dev: PCI device to handle. |
| @@ -1354,6 +1393,44 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
| 1354 | 1393 | ||
| 1355 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 1394 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| 1356 | 1395 | ||
| 1396 | /* PCI (as opposed to PCIe) PME requires that the device have | ||
| 1397 | its PME# line hooked up correctly. Not all hardware vendors | ||
| 1398 | do this, so the PME never gets delivered and the device | ||
| 1399 | remains asleep. The easiest way around this is to | ||
| 1400 | periodically walk the list of suspended devices and check | ||
| 1401 | whether any have their PME flag set. The assumption is that | ||
| 1402 | we'll wake up often enough anyway that this won't be a huge | ||
| 1403 | hit, and the power savings from the devices will still be a | ||
| 1404 | win. */ | ||
| 1405 | |||
| 1406 | if (pci_external_pme(dev)) { | ||
| 1407 | struct pci_pme_device *pme_dev; | ||
| 1408 | if (enable) { | ||
| 1409 | pme_dev = kmalloc(sizeof(struct pci_pme_device), | ||
| 1410 | GFP_KERNEL); | ||
| 1411 | if (!pme_dev) | ||
| 1412 | goto out; | ||
| 1413 | pme_dev->dev = dev; | ||
| 1414 | mutex_lock(&pci_pme_list_mutex); | ||
| 1415 | list_add(&pme_dev->list, &pci_pme_list); | ||
| 1416 | if (list_is_singular(&pci_pme_list)) | ||
| 1417 | schedule_delayed_work(&pci_pme_work, | ||
| 1418 | msecs_to_jiffies(PME_TIMEOUT)); | ||
| 1419 | mutex_unlock(&pci_pme_list_mutex); | ||
| 1420 | } else { | ||
| 1421 | mutex_lock(&pci_pme_list_mutex); | ||
| 1422 | list_for_each_entry(pme_dev, &pci_pme_list, list) { | ||
| 1423 | if (pme_dev->dev == dev) { | ||
| 1424 | list_del(&pme_dev->list); | ||
| 1425 | kfree(pme_dev); | ||
| 1426 | break; | ||
| 1427 | } | ||
| 1428 | } | ||
| 1429 | mutex_unlock(&pci_pme_list_mutex); | ||
| 1430 | } | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | out: | ||
| 1357 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", | 1434 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", |
| 1358 | enable ? "enabled" : "disabled"); | 1435 | enable ? "enabled" : "disabled"); |
| 1359 | } | 1436 | } |
| @@ -2689,7 +2766,7 @@ int pcie_get_readrq(struct pci_dev *dev) | |||
| 2689 | 2766 | ||
| 2690 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); | 2767 | ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); |
| 2691 | if (!ret) | 2768 | if (!ret) |
| 2692 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); | 2769 | ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
| 2693 | 2770 | ||
| 2694 | return ret; | 2771 | return ret; |
| 2695 | } | 2772 | } |
