diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/pci/pci.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r-- | drivers/pci/pci.c | 496 |
1 files changed, 370 insertions, 126 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e4c295a049f..37499127c801 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/pm.h> | 14 | #include <linux/pm.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
@@ -19,8 +20,8 @@ | |||
19 | #include <linux/pci-aspm.h> | 20 | #include <linux/pci-aspm.h> |
20 | #include <linux/pm_wakeup.h> | 21 | #include <linux/pm_wakeup.h> |
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | ||
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/pm_runtime.h> | ||
24 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
25 | #include "pci.h" | 26 | #include "pci.h" |
26 | 27 | ||
@@ -29,7 +30,23 @@ const char *pci_power_names[] = { | |||
29 | }; | 30 | }; |
30 | EXPORT_SYMBOL_GPL(pci_power_names); | 31 | EXPORT_SYMBOL_GPL(pci_power_names); |
31 | 32 | ||
32 | unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; | 33 | int isa_dma_bridge_buggy; |
34 | EXPORT_SYMBOL(isa_dma_bridge_buggy); | ||
35 | |||
36 | int pci_pci_problems; | ||
37 | EXPORT_SYMBOL(pci_pci_problems); | ||
38 | |||
39 | unsigned int pci_pm_d3_delay; | ||
40 | |||
41 | static void pci_dev_d3_sleep(struct pci_dev *dev) | ||
42 | { | ||
43 | unsigned int delay = dev->d3_delay; | ||
44 | |||
45 | if (delay < pci_pm_d3_delay) | ||
46 | delay = pci_pm_d3_delay; | ||
47 | |||
48 | msleep(delay); | ||
49 | } | ||
33 | 50 | ||
34 | #ifdef CONFIG_PCI_DOMAINS | 51 | #ifdef CONFIG_PCI_DOMAINS |
35 | int pci_domains_supported = 1; | 52 | int pci_domains_supported = 1; |
@@ -47,6 +64,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
47 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 64 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
48 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 65 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
49 | 66 | ||
67 | /* | ||
68 | * The default CLS is used if arch didn't set CLS explicitly and not | ||
69 | * all pci devices agree on the same value. Arch can override either | ||
70 | * the dfl or actual value as it sees fit. Don't forget this is | ||
71 | * measured in 32-bit words, not bytes. | ||
72 | */ | ||
73 | u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2; | ||
74 | u8 pci_cache_line_size; | ||
75 | |||
50 | /** | 76 | /** |
51 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children | 77 | * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children |
52 | * @bus: pointer to PCI bus structure to search | 78 | * @bus: pointer to PCI bus structure to search |
@@ -278,6 +304,49 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) | |||
278 | } | 304 | } |
279 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); | 305 | EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
280 | 306 | ||
307 | /** | ||
308 | * pci_bus_find_ext_capability - find an extended capability | ||
309 | * @bus: the PCI bus to query | ||
310 | * @devfn: PCI device to query | ||
311 | * @cap: capability code | ||
312 | * | ||
313 | * Like pci_find_ext_capability() but works for pci devices that do not have a | ||
314 | * pci_dev structure set up yet. | ||
315 | * | ||
316 | * Returns the address of the requested capability structure within the | ||
317 | * device's PCI configuration space or 0 in case the device does not | ||
318 | * support it. | ||
319 | */ | ||
320 | int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, | ||
321 | int cap) | ||
322 | { | ||
323 | u32 header; | ||
324 | int ttl; | ||
325 | int pos = PCI_CFG_SPACE_SIZE; | ||
326 | |||
327 | /* minimum 8 bytes per capability */ | ||
328 | ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; | ||
329 | |||
330 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
331 | return 0; | ||
332 | if (header == 0xffffffff || header == 0) | ||
333 | return 0; | ||
334 | |||
335 | while (ttl-- > 0) { | ||
336 | if (PCI_EXT_CAP_ID(header) == cap) | ||
337 | return pos; | ||
338 | |||
339 | pos = PCI_EXT_CAP_NEXT(header); | ||
340 | if (pos < PCI_CFG_SPACE_SIZE) | ||
341 | break; | ||
342 | |||
343 | if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
281 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) | 350 | static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) |
282 | { | 351 | { |
283 | int rc, ttl = PCI_FIND_CAP_TTL; | 352 | int rc, ttl = PCI_FIND_CAP_TTL; |
@@ -361,10 +430,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
361 | { | 430 | { |
362 | const struct pci_bus *bus = dev->bus; | 431 | const struct pci_bus *bus = dev->bus; |
363 | int i; | 432 | int i; |
364 | struct resource *best = NULL; | 433 | struct resource *best = NULL, *r; |
365 | 434 | ||
366 | for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 435 | pci_bus_for_each_resource(bus, r, i) { |
367 | struct resource *r = bus->resource[i]; | ||
368 | if (!r) | 436 | if (!r) |
369 | continue; | 437 | continue; |
370 | if (res->start && !(res->start >= r->start && res->end <= r->end)) | 438 | if (res->start && !(res->start >= r->start && res->end <= r->end)) |
@@ -373,8 +441,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) | |||
373 | continue; /* Wrong type */ | 441 | continue; /* Wrong type */ |
374 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) | 442 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) |
375 | return r; /* Exact match */ | 443 | return r; /* Exact match */ |
376 | if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) | 444 | /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ |
377 | best = r; /* Approximating prefetchable by non-prefetchable */ | 445 | if (r->flags & IORESOURCE_PREFETCH) |
446 | continue; | ||
447 | /* .. but we can put a prefetchable resource inside a non-prefetchable one */ | ||
448 | if (!best) | ||
449 | best = r; | ||
378 | } | 450 | } |
379 | return best; | 451 | return best; |
380 | } | 452 | } |
@@ -434,6 +506,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
434 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 506 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; |
435 | } | 507 | } |
436 | 508 | ||
509 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
510 | { | ||
511 | return pci_platform_pm ? | ||
512 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
513 | } | ||
514 | |||
437 | /** | 515 | /** |
438 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 516 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
439 | * given PCI device | 517 | * given PCI device |
@@ -509,7 +587,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
509 | /* Mandatory power management transition delays */ | 587 | /* Mandatory power management transition delays */ |
510 | /* see PCI PM 1.1 5.6.1 table 18 */ | 588 | /* see PCI PM 1.1 5.6.1 table 18 */ |
511 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | 589 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
512 | msleep(pci_pm_d3_delay); | 590 | pci_dev_d3_sleep(dev); |
513 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | 591 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
514 | udelay(PCI_PM_D2_DELAY); | 592 | udelay(PCI_PM_D2_DELAY); |
515 | 593 | ||
@@ -601,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
601 | */ | 679 | */ |
602 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) | 680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
603 | { | 681 | { |
604 | return state > PCI_D0 ? | 682 | return state >= PCI_D0 ? |
605 | pci_platform_power_transition(dev, state) : -EINVAL; | 683 | pci_platform_power_transition(dev, state) : -EINVAL; |
606 | } | 684 | } |
607 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); | 685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
@@ -638,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
638 | */ | 716 | */ |
639 | return 0; | 717 | return 0; |
640 | 718 | ||
641 | /* Check if we're already there */ | ||
642 | if (dev->current_state == state) | ||
643 | return 0; | ||
644 | |||
645 | __pci_start_power_transition(dev, state); | 719 | __pci_start_power_transition(dev, state); |
646 | 720 | ||
647 | /* This device is quirked not to be put into D3, so | 721 | /* This device is quirked not to be put into D3, so |
@@ -728,8 +802,8 @@ static int pci_save_pcie_state(struct pci_dev *dev) | |||
728 | u16 *cap; | 802 | u16 *cap; |
729 | u16 flags; | 803 | u16 flags; |
730 | 804 | ||
731 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 805 | pos = pci_pcie_cap(dev); |
732 | if (pos <= 0) | 806 | if (!pos) |
733 | return 0; | 807 | return 0; |
734 | 808 | ||
735 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); | 809 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); |
@@ -837,7 +911,7 @@ pci_save_state(struct pci_dev *dev) | |||
837 | int i; | 911 | int i; |
838 | /* XXX: 100% dword access ok here? */ | 912 | /* XXX: 100% dword access ok here? */ |
839 | for (i = 0; i < 16; i++) | 913 | for (i = 0; i < 16; i++) |
840 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); | 914 | pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); |
841 | dev->state_saved = true; | 915 | dev->state_saved = true; |
842 | if ((i = pci_save_pcie_state(dev)) != 0) | 916 | if ((i = pci_save_pcie_state(dev)) != 0) |
843 | return i; | 917 | return i; |
@@ -1140,11 +1214,11 @@ pci_disable_device(struct pci_dev *dev) | |||
1140 | 1214 | ||
1141 | /** | 1215 | /** |
1142 | * pcibios_set_pcie_reset_state - set reset state for device dev | 1216 | * pcibios_set_pcie_reset_state - set reset state for device dev |
1143 | * @dev: the PCI-E device reset | 1217 | * @dev: the PCIe device reset |
1144 | * @state: Reset state to enter into | 1218 | * @state: Reset state to enter into |
1145 | * | 1219 | * |
1146 | * | 1220 | * |
1147 | * Sets the PCI-E reset state for the device. This is the default | 1221 | * Sets the PCIe reset state for the device. This is the default |
1148 | * implementation. Architecture implementations can override this. | 1222 | * implementation. Architecture implementations can override this. |
1149 | */ | 1223 | */ |
1150 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | 1224 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, |
@@ -1155,7 +1229,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | |||
1155 | 1229 | ||
1156 | /** | 1230 | /** |
1157 | * pci_set_pcie_reset_state - set reset state for device dev | 1231 | * pci_set_pcie_reset_state - set reset state for device dev |
1158 | * @dev: the PCI-E device reset | 1232 | * @dev: the PCIe device reset |
1159 | * @state: Reset state to enter into | 1233 | * @state: Reset state to enter into |
1160 | * | 1234 | * |
1161 | * | 1235 | * |
@@ -1167,6 +1241,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | |||
1167 | } | 1241 | } |
1168 | 1242 | ||
1169 | /** | 1243 | /** |
1244 | * pci_check_pme_status - Check if given device has generated PME. | ||
1245 | * @dev: Device to check. | ||
1246 | * | ||
1247 | * Check the PME status of the device and if set, clear it and clear PME enable | ||
1248 | * (if set). Return 'true' if PME status and PME enable were both set or | ||
1249 | * 'false' otherwise. | ||
1250 | */ | ||
1251 | bool pci_check_pme_status(struct pci_dev *dev) | ||
1252 | { | ||
1253 | int pmcsr_pos; | ||
1254 | u16 pmcsr; | ||
1255 | bool ret = false; | ||
1256 | |||
1257 | if (!dev->pm_cap) | ||
1258 | return false; | ||
1259 | |||
1260 | pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; | ||
1261 | pci_read_config_word(dev, pmcsr_pos, &pmcsr); | ||
1262 | if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) | ||
1263 | return false; | ||
1264 | |||
1265 | /* Clear PME status. */ | ||
1266 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
1267 | if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { | ||
1268 | /* Disable PME to avoid interrupt flood. */ | ||
1269 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
1270 | ret = true; | ||
1271 | } | ||
1272 | |||
1273 | pci_write_config_word(dev, pmcsr_pos, pmcsr); | ||
1274 | |||
1275 | return ret; | ||
1276 | } | ||
1277 | |||
1278 | /** | ||
1279 | * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. | ||
1280 | * @dev: Device to handle. | ||
1281 | * @ign: Ignored. | ||
1282 | * | ||
1283 | * Check if @dev has generated PME and queue a resume request for it in that | ||
1284 | * case. | ||
1285 | */ | ||
1286 | static int pci_pme_wakeup(struct pci_dev *dev, void *ign) | ||
1287 | { | ||
1288 | if (pci_check_pme_status(dev)) | ||
1289 | pm_request_resume(&dev->dev); | ||
1290 | return 0; | ||
1291 | } | ||
1292 | |||
1293 | /** | ||
1294 | * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. | ||
1295 | * @bus: Top bus of the subtree to walk. | ||
1296 | */ | ||
1297 | void pci_pme_wakeup_bus(struct pci_bus *bus) | ||
1298 | { | ||
1299 | if (bus) | ||
1300 | pci_walk_bus(bus, pci_pme_wakeup, NULL); | ||
1301 | } | ||
1302 | |||
1303 | /** | ||
1170 | * pci_pme_capable - check the capability of PCI device to generate PME# | 1304 | * pci_pme_capable - check the capability of PCI device to generate PME# |
1171 | * @dev: PCI device to handle. | 1305 | * @dev: PCI device to handle. |
1172 | * @state: PCI state from which device will issue PME#. | 1306 | * @state: PCI state from which device will issue PME#. |
@@ -1202,14 +1336,15 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1202 | 1336 | ||
1203 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 1337 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
1204 | 1338 | ||
1205 | dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", | 1339 | dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", |
1206 | enable ? "enabled" : "disabled"); | 1340 | enable ? "enabled" : "disabled"); |
1207 | } | 1341 | } |
1208 | 1342 | ||
1209 | /** | 1343 | /** |
1210 | * pci_enable_wake - enable PCI device as wakeup event source | 1344 | * __pci_enable_wake - enable PCI device as wakeup event source |
1211 | * @dev: PCI device affected | 1345 | * @dev: PCI device affected |
1212 | * @state: PCI state from which device will issue wakeup events | 1346 | * @state: PCI state from which device will issue wakeup events |
1347 | * @runtime: True if the events are to be generated at run time | ||
1213 | * @enable: True to enable event generation; false to disable | 1348 | * @enable: True to enable event generation; false to disable |
1214 | * | 1349 | * |
1215 | * This enables the device as a wakeup event source, or disables it. | 1350 | * This enables the device as a wakeup event source, or disables it. |
@@ -1225,11 +1360,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1225 | * Error code depending on the platform is returned if both the platform and | 1360 | * Error code depending on the platform is returned if both the platform and |
1226 | * the native mechanism fail to enable the generation of wake-up events | 1361 | * the native mechanism fail to enable the generation of wake-up events |
1227 | */ | 1362 | */ |
1228 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1363 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, |
1364 | bool runtime, bool enable) | ||
1229 | { | 1365 | { |
1230 | int ret = 0; | 1366 | int ret = 0; |
1231 | 1367 | ||
1232 | if (enable && !device_may_wakeup(&dev->dev)) | 1368 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) |
1233 | return -EINVAL; | 1369 | return -EINVAL; |
1234 | 1370 | ||
1235 | /* Don't do the same thing twice in a row for one device. */ | 1371 | /* Don't do the same thing twice in a row for one device. */ |
@@ -1249,19 +1385,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1249 | pci_pme_active(dev, true); | 1385 | pci_pme_active(dev, true); |
1250 | else | 1386 | else |
1251 | ret = 1; | 1387 | ret = 1; |
1252 | error = platform_pci_sleep_wake(dev, true); | 1388 | error = runtime ? platform_pci_run_wake(dev, true) : |
1389 | platform_pci_sleep_wake(dev, true); | ||
1253 | if (ret) | 1390 | if (ret) |
1254 | ret = error; | 1391 | ret = error; |
1255 | if (!ret) | 1392 | if (!ret) |
1256 | dev->wakeup_prepared = true; | 1393 | dev->wakeup_prepared = true; |
1257 | } else { | 1394 | } else { |
1258 | platform_pci_sleep_wake(dev, false); | 1395 | if (runtime) |
1396 | platform_pci_run_wake(dev, false); | ||
1397 | else | ||
1398 | platform_pci_sleep_wake(dev, false); | ||
1259 | pci_pme_active(dev, false); | 1399 | pci_pme_active(dev, false); |
1260 | dev->wakeup_prepared = false; | 1400 | dev->wakeup_prepared = false; |
1261 | } | 1401 | } |
1262 | 1402 | ||
1263 | return ret; | 1403 | return ret; |
1264 | } | 1404 | } |
1405 | EXPORT_SYMBOL(__pci_enable_wake); | ||
1265 | 1406 | ||
1266 | /** | 1407 | /** |
1267 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1408 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
@@ -1371,6 +1512,66 @@ int pci_back_from_sleep(struct pci_dev *dev) | |||
1371 | } | 1512 | } |
1372 | 1513 | ||
1373 | /** | 1514 | /** |
1515 | * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. | ||
1516 | * @dev: PCI device being suspended. | ||
1517 | * | ||
1518 | * Prepare @dev to generate wake-up events at run time and put it into a low | ||
1519 | * power state. | ||
1520 | */ | ||
1521 | int pci_finish_runtime_suspend(struct pci_dev *dev) | ||
1522 | { | ||
1523 | pci_power_t target_state = pci_target_state(dev); | ||
1524 | int error; | ||
1525 | |||
1526 | if (target_state == PCI_POWER_ERROR) | ||
1527 | return -EIO; | ||
1528 | |||
1529 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | ||
1530 | |||
1531 | error = pci_set_power_state(dev, target_state); | ||
1532 | |||
1533 | if (error) | ||
1534 | __pci_enable_wake(dev, target_state, true, false); | ||
1535 | |||
1536 | return error; | ||
1537 | } | ||
1538 | |||
1539 | /** | ||
1540 | * pci_dev_run_wake - Check if device can generate run-time wake-up events. | ||
1541 | * @dev: Device to check. | ||
1542 | * | ||
1543 | * Return true if the device itself is cabable of generating wake-up events | ||
1544 | * (through the platform or using the native PCIe PME) or if the device supports | ||
1545 | * PME and one of its upstream bridges can generate wake-up events. | ||
1546 | */ | ||
1547 | bool pci_dev_run_wake(struct pci_dev *dev) | ||
1548 | { | ||
1549 | struct pci_bus *bus = dev->bus; | ||
1550 | |||
1551 | if (device_run_wake(&dev->dev)) | ||
1552 | return true; | ||
1553 | |||
1554 | if (!dev->pme_support) | ||
1555 | return false; | ||
1556 | |||
1557 | while (bus->parent) { | ||
1558 | struct pci_dev *bridge = bus->self; | ||
1559 | |||
1560 | if (device_run_wake(&bridge->dev)) | ||
1561 | return true; | ||
1562 | |||
1563 | bus = bus->parent; | ||
1564 | } | ||
1565 | |||
1566 | /* We have reached the root bus. */ | ||
1567 | if (bus->bridge) | ||
1568 | return device_run_wake(bus->bridge); | ||
1569 | |||
1570 | return false; | ||
1571 | } | ||
1572 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | ||
1573 | |||
1574 | /** | ||
1374 | * pci_pm_init - Initialize PM functions of given PCI device | 1575 | * pci_pm_init - Initialize PM functions of given PCI device |
1375 | * @dev: PCI device to handle. | 1576 | * @dev: PCI device to handle. |
1376 | */ | 1577 | */ |
@@ -1379,7 +1580,10 @@ void pci_pm_init(struct pci_dev *dev) | |||
1379 | int pm; | 1580 | int pm; |
1380 | u16 pmc; | 1581 | u16 pmc; |
1381 | 1582 | ||
1583 | pm_runtime_forbid(&dev->dev); | ||
1584 | device_enable_async_suspend(&dev->dev); | ||
1382 | dev->wakeup_prepared = false; | 1585 | dev->wakeup_prepared = false; |
1586 | |||
1383 | dev->pm_cap = 0; | 1587 | dev->pm_cap = 0; |
1384 | 1588 | ||
1385 | /* find PCI PM capability in list */ | 1589 | /* find PCI PM capability in list */ |
@@ -1396,6 +1600,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1396 | } | 1600 | } |
1397 | 1601 | ||
1398 | dev->pm_cap = pm; | 1602 | dev->pm_cap = pm; |
1603 | dev->d3_delay = PCI_PM_D3_WAIT; | ||
1399 | 1604 | ||
1400 | dev->d1_support = false; | 1605 | dev->d1_support = false; |
1401 | dev->d2_support = false; | 1606 | dev->d2_support = false; |
@@ -1413,7 +1618,8 @@ void pci_pm_init(struct pci_dev *dev) | |||
1413 | 1618 | ||
1414 | pmc &= PCI_PM_CAP_PME_MASK; | 1619 | pmc &= PCI_PM_CAP_PME_MASK; |
1415 | if (pmc) { | 1620 | if (pmc) { |
1416 | dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", | 1621 | dev_printk(KERN_DEBUG, &dev->dev, |
1622 | "PME# supported from%s%s%s%s%s\n", | ||
1417 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", | 1623 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", |
1418 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", | 1624 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", |
1419 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", | 1625 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", |
@@ -1510,7 +1716,7 @@ void pci_enable_ari(struct pci_dev *dev) | |||
1510 | u16 ctrl; | 1716 | u16 ctrl; |
1511 | struct pci_dev *bridge; | 1717 | struct pci_dev *bridge; |
1512 | 1718 | ||
1513 | if (!dev->is_pcie || dev->devfn) | 1719 | if (!pci_is_pcie(dev) || dev->devfn) |
1514 | return; | 1720 | return; |
1515 | 1721 | ||
1516 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); | 1722 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); |
@@ -1518,10 +1724,10 @@ void pci_enable_ari(struct pci_dev *dev) | |||
1518 | return; | 1724 | return; |
1519 | 1725 | ||
1520 | bridge = dev->bus->self; | 1726 | bridge = dev->bus->self; |
1521 | if (!bridge || !bridge->is_pcie) | 1727 | if (!bridge || !pci_is_pcie(bridge)) |
1522 | return; | 1728 | return; |
1523 | 1729 | ||
1524 | pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); | 1730 | pos = pci_pcie_cap(bridge); |
1525 | if (!pos) | 1731 | if (!pos) |
1526 | return; | 1732 | return; |
1527 | 1733 | ||
@@ -1536,6 +1742,54 @@ void pci_enable_ari(struct pci_dev *dev) | |||
1536 | bridge->ari_enabled = 1; | 1742 | bridge->ari_enabled = 1; |
1537 | } | 1743 | } |
1538 | 1744 | ||
1745 | static int pci_acs_enable; | ||
1746 | |||
1747 | /** | ||
1748 | * pci_request_acs - ask for ACS to be enabled if supported | ||
1749 | */ | ||
1750 | void pci_request_acs(void) | ||
1751 | { | ||
1752 | pci_acs_enable = 1; | ||
1753 | } | ||
1754 | |||
1755 | /** | ||
1756 | * pci_enable_acs - enable ACS if hardware support it | ||
1757 | * @dev: the PCI device | ||
1758 | */ | ||
1759 | void pci_enable_acs(struct pci_dev *dev) | ||
1760 | { | ||
1761 | int pos; | ||
1762 | u16 cap; | ||
1763 | u16 ctrl; | ||
1764 | |||
1765 | if (!pci_acs_enable) | ||
1766 | return; | ||
1767 | |||
1768 | if (!pci_is_pcie(dev)) | ||
1769 | return; | ||
1770 | |||
1771 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); | ||
1772 | if (!pos) | ||
1773 | return; | ||
1774 | |||
1775 | pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); | ||
1776 | pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); | ||
1777 | |||
1778 | /* Source Validation */ | ||
1779 | ctrl |= (cap & PCI_ACS_SV); | ||
1780 | |||
1781 | /* P2P Request Redirect */ | ||
1782 | ctrl |= (cap & PCI_ACS_RR); | ||
1783 | |||
1784 | /* P2P Completion Redirect */ | ||
1785 | ctrl |= (cap & PCI_ACS_CR); | ||
1786 | |||
1787 | /* Upstream Forwarding */ | ||
1788 | ctrl |= (cap & PCI_ACS_UF); | ||
1789 | |||
1790 | pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); | ||
1791 | } | ||
1792 | |||
1539 | /** | 1793 | /** |
1540 | * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge | 1794 | * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge |
1541 | * @dev: the PCI device | 1795 | * @dev: the PCI device |
@@ -1669,9 +1923,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n | |||
1669 | return 0; | 1923 | return 0; |
1670 | 1924 | ||
1671 | err_out: | 1925 | err_out: |
1672 | dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", | 1926 | dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, |
1673 | bar, | ||
1674 | pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", | ||
1675 | &pdev->resource[bar]); | 1927 | &pdev->resource[bar]); |
1676 | return -EBUSY; | 1928 | return -EBUSY; |
1677 | } | 1929 | } |
@@ -1866,31 +2118,6 @@ void pci_clear_master(struct pci_dev *dev) | |||
1866 | __pci_set_master(dev, false); | 2118 | __pci_set_master(dev, false); |
1867 | } | 2119 | } |
1868 | 2120 | ||
1869 | #ifdef PCI_DISABLE_MWI | ||
1870 | int pci_set_mwi(struct pci_dev *dev) | ||
1871 | { | ||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | int pci_try_set_mwi(struct pci_dev *dev) | ||
1876 | { | ||
1877 | return 0; | ||
1878 | } | ||
1879 | |||
1880 | void pci_clear_mwi(struct pci_dev *dev) | ||
1881 | { | ||
1882 | } | ||
1883 | |||
1884 | #else | ||
1885 | |||
1886 | #ifndef PCI_CACHE_LINE_BYTES | ||
1887 | #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES | ||
1888 | #endif | ||
1889 | |||
1890 | /* This can be overridden by arch code. */ | ||
1891 | /* Don't forget this is measured in 32-bit words, not bytes */ | ||
1892 | u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; | ||
1893 | |||
1894 | /** | 2121 | /** |
1895 | * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed | 2122 | * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed |
1896 | * @dev: the PCI device for which MWI is to be enabled | 2123 | * @dev: the PCI device for which MWI is to be enabled |
@@ -1901,13 +2128,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; | |||
1901 | * | 2128 | * |
1902 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. | 2129 | * RETURNS: An appropriate -ERRNO error value on error, or zero for success. |
1903 | */ | 2130 | */ |
1904 | static int | 2131 | int pci_set_cacheline_size(struct pci_dev *dev) |
1905 | pci_set_cacheline_size(struct pci_dev *dev) | ||
1906 | { | 2132 | { |
1907 | u8 cacheline_size; | 2133 | u8 cacheline_size; |
1908 | 2134 | ||
1909 | if (!pci_cache_line_size) | 2135 | if (!pci_cache_line_size) |
1910 | return -EINVAL; /* The system doesn't support MWI. */ | 2136 | return -EINVAL; |
1911 | 2137 | ||
1912 | /* Validate current setting: the PCI_CACHE_LINE_SIZE must be | 2138 | /* Validate current setting: the PCI_CACHE_LINE_SIZE must be |
1913 | equal to or multiple of the right value. */ | 2139 | equal to or multiple of the right value. */ |
@@ -1928,6 +2154,24 @@ pci_set_cacheline_size(struct pci_dev *dev) | |||
1928 | 2154 | ||
1929 | return -EINVAL; | 2155 | return -EINVAL; |
1930 | } | 2156 | } |
2157 | EXPORT_SYMBOL_GPL(pci_set_cacheline_size); | ||
2158 | |||
2159 | #ifdef PCI_DISABLE_MWI | ||
2160 | int pci_set_mwi(struct pci_dev *dev) | ||
2161 | { | ||
2162 | return 0; | ||
2163 | } | ||
2164 | |||
2165 | int pci_try_set_mwi(struct pci_dev *dev) | ||
2166 | { | ||
2167 | return 0; | ||
2168 | } | ||
2169 | |||
2170 | void pci_clear_mwi(struct pci_dev *dev) | ||
2171 | { | ||
2172 | } | ||
2173 | |||
2174 | #else | ||
1931 | 2175 | ||
1932 | /** | 2176 | /** |
1933 | * pci_set_mwi - enables memory-write-invalidate PCI transaction | 2177 | * pci_set_mwi - enables memory-write-invalidate PCI transaction |
@@ -2051,33 +2295,6 @@ void pci_msi_off(struct pci_dev *dev) | |||
2051 | } | 2295 | } |
2052 | } | 2296 | } |
2053 | 2297 | ||
2054 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK | ||
2055 | /* | ||
2056 | * These can be overridden by arch-specific implementations | ||
2057 | */ | ||
2058 | int | ||
2059 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
2060 | { | ||
2061 | if (!pci_dma_supported(dev, mask)) | ||
2062 | return -EIO; | ||
2063 | |||
2064 | dev->dma_mask = mask; | ||
2065 | |||
2066 | return 0; | ||
2067 | } | ||
2068 | |||
2069 | int | ||
2070 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
2071 | { | ||
2072 | if (!pci_dma_supported(dev, mask)) | ||
2073 | return -EIO; | ||
2074 | |||
2075 | dev->dev.coherent_dma_mask = mask; | ||
2076 | |||
2077 | return 0; | ||
2078 | } | ||
2079 | #endif | ||
2080 | |||
2081 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE | 2298 | #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE |
2082 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) | 2299 | int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) |
2083 | { | 2300 | { |
@@ -2099,9 +2316,9 @@ static int pcie_flr(struct pci_dev *dev, int probe) | |||
2099 | int i; | 2316 | int i; |
2100 | int pos; | 2317 | int pos; |
2101 | u32 cap; | 2318 | u32 cap; |
2102 | u16 status; | 2319 | u16 status, control; |
2103 | 2320 | ||
2104 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | 2321 | pos = pci_pcie_cap(dev); |
2105 | if (!pos) | 2322 | if (!pos) |
2106 | return -ENOTTY; | 2323 | return -ENOTTY; |
2107 | 2324 | ||
@@ -2126,8 +2343,10 @@ static int pcie_flr(struct pci_dev *dev, int probe) | |||
2126 | "proceeding with reset anyway\n"); | 2343 | "proceeding with reset anyway\n"); |
2127 | 2344 | ||
2128 | clear: | 2345 | clear: |
2129 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | 2346 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); |
2130 | PCI_EXP_DEVCTL_BCR_FLR); | 2347 | control |= PCI_EXP_DEVCTL_BCR_FLR; |
2348 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control); | ||
2349 | |||
2131 | msleep(100); | 2350 | msleep(100); |
2132 | 2351 | ||
2133 | return 0; | 2352 | return 0; |
@@ -2191,12 +2410,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) | |||
2191 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2410 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
2192 | csr |= PCI_D3hot; | 2411 | csr |= PCI_D3hot; |
2193 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2412 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
2194 | msleep(pci_pm_d3_delay); | 2413 | pci_dev_d3_sleep(dev); |
2195 | 2414 | ||
2196 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2415 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
2197 | csr |= PCI_D0; | 2416 | csr |= PCI_D0; |
2198 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2417 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
2199 | msleep(pci_pm_d3_delay); | 2418 | pci_dev_d3_sleep(dev); |
2200 | 2419 | ||
2201 | return 0; | 2420 | return 0; |
2202 | } | 2421 | } |
@@ -2237,9 +2456,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2237 | if (!probe) { | 2456 | if (!probe) { |
2238 | pci_block_user_cfg_access(dev); | 2457 | pci_block_user_cfg_access(dev); |
2239 | /* block PM suspend, driver probe, etc. */ | 2458 | /* block PM suspend, driver probe, etc. */ |
2240 | down(&dev->dev.sem); | 2459 | device_lock(&dev->dev); |
2241 | } | 2460 | } |
2242 | 2461 | ||
2462 | rc = pci_dev_specific_reset(dev, probe); | ||
2463 | if (rc != -ENOTTY) | ||
2464 | goto done; | ||
2465 | |||
2243 | rc = pcie_flr(dev, probe); | 2466 | rc = pcie_flr(dev, probe); |
2244 | if (rc != -ENOTTY) | 2467 | if (rc != -ENOTTY) |
2245 | goto done; | 2468 | goto done; |
@@ -2255,7 +2478,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
2255 | rc = pci_parent_bus_reset(dev, probe); | 2478 | rc = pci_parent_bus_reset(dev, probe); |
2256 | done: | 2479 | done: |
2257 | if (!probe) { | 2480 | if (!probe) { |
2258 | up(&dev->dev.sem); | 2481 | device_unlock(&dev->dev); |
2259 | pci_unblock_user_cfg_access(dev); | 2482 | pci_unblock_user_cfg_access(dev); |
2260 | } | 2483 | } |
2261 | 2484 | ||
@@ -2350,18 +2573,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function); | |||
2350 | */ | 2573 | */ |
2351 | int pcix_get_max_mmrbc(struct pci_dev *dev) | 2574 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
2352 | { | 2575 | { |
2353 | int err, cap; | 2576 | int cap; |
2354 | u32 stat; | 2577 | u32 stat; |
2355 | 2578 | ||
2356 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2579 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2357 | if (!cap) | 2580 | if (!cap) |
2358 | return -EINVAL; | 2581 | return -EINVAL; |
2359 | 2582 | ||
2360 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2583 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2361 | if (err) | ||
2362 | return -EINVAL; | 2584 | return -EINVAL; |
2363 | 2585 | ||
2364 | return (stat & PCI_X_STATUS_MAX_READ) >> 12; | 2586 | return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); |
2365 | } | 2587 | } |
2366 | EXPORT_SYMBOL(pcix_get_max_mmrbc); | 2588 | EXPORT_SYMBOL(pcix_get_max_mmrbc); |
2367 | 2589 | ||
@@ -2374,18 +2596,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); | |||
2374 | */ | 2596 | */ |
2375 | int pcix_get_mmrbc(struct pci_dev *dev) | 2597 | int pcix_get_mmrbc(struct pci_dev *dev) |
2376 | { | 2598 | { |
2377 | int ret, cap; | 2599 | int cap; |
2378 | u32 cmd; | 2600 | u16 cmd; |
2379 | 2601 | ||
2380 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2602 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2381 | if (!cap) | 2603 | if (!cap) |
2382 | return -EINVAL; | 2604 | return -EINVAL; |
2383 | 2605 | ||
2384 | ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2606 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2385 | if (!ret) | 2607 | return -EINVAL; |
2386 | ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); | ||
2387 | 2608 | ||
2388 | return ret; | 2609 | return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); |
2389 | } | 2610 | } |
2390 | EXPORT_SYMBOL(pcix_get_mmrbc); | 2611 | EXPORT_SYMBOL(pcix_get_mmrbc); |
2391 | 2612 | ||
@@ -2400,28 +2621,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc); | |||
2400 | */ | 2621 | */ |
2401 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | 2622 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
2402 | { | 2623 | { |
2403 | int cap, err = -EINVAL; | 2624 | int cap; |
2404 | u32 stat, cmd, v, o; | 2625 | u32 stat, v, o; |
2626 | u16 cmd; | ||
2405 | 2627 | ||
2406 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) | 2628 | if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) |
2407 | goto out; | 2629 | return -EINVAL; |
2408 | 2630 | ||
2409 | v = ffs(mmrbc) - 10; | 2631 | v = ffs(mmrbc) - 10; |
2410 | 2632 | ||
2411 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 2633 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
2412 | if (!cap) | 2634 | if (!cap) |
2413 | goto out; | 2635 | return -EINVAL; |
2414 | 2636 | ||
2415 | err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat); | 2637 | if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) |
2416 | if (err) | 2638 | return -EINVAL; |
2417 | goto out; | ||
2418 | 2639 | ||
2419 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) | 2640 | if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) |
2420 | return -E2BIG; | 2641 | return -E2BIG; |
2421 | 2642 | ||
2422 | err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd); | 2643 | if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) |
2423 | if (err) | 2644 | return -EINVAL; |
2424 | goto out; | ||
2425 | 2645 | ||
2426 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; | 2646 | o = (cmd & PCI_X_CMD_MAX_READ) >> 2; |
2427 | if (o != v) { | 2647 | if (o != v) { |
@@ -2431,10 +2651,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | |||
2431 | 2651 | ||
2432 | cmd &= ~PCI_X_CMD_MAX_READ; | 2652 | cmd &= ~PCI_X_CMD_MAX_READ; |
2433 | cmd |= v << 2; | 2653 | cmd |= v << 2; |
2434 | err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd); | 2654 | if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) |
2655 | return -EIO; | ||
2435 | } | 2656 | } |
2436 | out: | 2657 | return 0; |
2437 | return err; | ||
2438 | } | 2658 | } |
2439 | EXPORT_SYMBOL(pcix_set_mmrbc); | 2659 | EXPORT_SYMBOL(pcix_set_mmrbc); |
2440 | 2660 | ||
@@ -2450,7 +2670,7 @@ int pcie_get_readrq(struct pci_dev *dev) | |||
2450 | int ret, cap; | 2670 | int ret, cap; |
2451 | u16 ctl; | 2671 | u16 ctl; |
2452 | 2672 | ||
2453 | cap = pci_find_capability(dev, PCI_CAP_ID_EXP); | 2673 | cap = pci_pcie_cap(dev); |
2454 | if (!cap) | 2674 | if (!cap) |
2455 | return -EINVAL; | 2675 | return -EINVAL; |
2456 | 2676 | ||
@@ -2480,7 +2700,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
2480 | 2700 | ||
2481 | v = (ffs(rq) - 8) << 12; | 2701 | v = (ffs(rq) - 8) << 12; |
2482 | 2702 | ||
2483 | cap = pci_find_capability(dev, PCI_CAP_ID_EXP); | 2703 | cap = pci_pcie_cap(dev); |
2484 | if (!cap) | 2704 | if (!cap) |
2485 | goto out; | 2705 | goto out; |
2486 | 2706 | ||
@@ -2540,7 +2760,24 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | |||
2540 | return reg; | 2760 | return reg; |
2541 | } | 2761 | } |
2542 | 2762 | ||
2543 | dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); | 2763 | dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); |
2764 | return 0; | ||
2765 | } | ||
2766 | |||
2767 | /* Some architectures require additional programming to enable VGA */ | ||
2768 | static arch_set_vga_state_t arch_set_vga_state; | ||
2769 | |||
2770 | void __init pci_register_set_vga_state(arch_set_vga_state_t func) | ||
2771 | { | ||
2772 | arch_set_vga_state = func; /* NULL disables */ | ||
2773 | } | ||
2774 | |||
2775 | static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, | ||
2776 | unsigned int command_bits, bool change_bridge) | ||
2777 | { | ||
2778 | if (arch_set_vga_state) | ||
2779 | return arch_set_vga_state(dev, decode, command_bits, | ||
2780 | change_bridge); | ||
2544 | return 0; | 2781 | return 0; |
2545 | } | 2782 | } |
2546 | 2783 | ||
@@ -2557,9 +2794,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
2557 | struct pci_bus *bus; | 2794 | struct pci_bus *bus; |
2558 | struct pci_dev *bridge; | 2795 | struct pci_dev *bridge; |
2559 | u16 cmd; | 2796 | u16 cmd; |
2797 | int rc; | ||
2560 | 2798 | ||
2561 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); | 2799 | WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); |
2562 | 2800 | ||
2801 | /* ARCH specific VGA enables */ | ||
2802 | rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); | ||
2803 | if (rc) | ||
2804 | return rc; | ||
2805 | |||
2563 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 2806 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
2564 | if (decode == true) | 2807 | if (decode == true) |
2565 | cmd |= command_bits; | 2808 | cmd |= command_bits; |
@@ -2590,7 +2833,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
2590 | 2833 | ||
2591 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE | 2834 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
2592 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; | 2835 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
2593 | spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; | 2836 | static DEFINE_SPINLOCK(resource_alignment_lock); |
2594 | 2837 | ||
2595 | /** | 2838 | /** |
2596 | * pci_specified_resource_alignment - get resource alignment specified by user. | 2839 | * pci_specified_resource_alignment - get resource alignment specified by user. |
@@ -2723,6 +2966,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) | |||
2723 | return 1; | 2966 | return 1; |
2724 | } | 2967 | } |
2725 | 2968 | ||
2969 | void __weak pci_fixup_cardbus(struct pci_bus *bus) | ||
2970 | { | ||
2971 | } | ||
2972 | EXPORT_SYMBOL(pci_fixup_cardbus); | ||
2973 | |||
2726 | static int __init pci_setup(char *str) | 2974 | static int __init pci_setup(char *str) |
2727 | { | 2975 | { |
2728 | while (str) { | 2976 | while (str) { |
@@ -2784,8 +3032,6 @@ EXPORT_SYMBOL(pci_set_mwi); | |||
2784 | EXPORT_SYMBOL(pci_try_set_mwi); | 3032 | EXPORT_SYMBOL(pci_try_set_mwi); |
2785 | EXPORT_SYMBOL(pci_clear_mwi); | 3033 | EXPORT_SYMBOL(pci_clear_mwi); |
2786 | EXPORT_SYMBOL_GPL(pci_intx); | 3034 | EXPORT_SYMBOL_GPL(pci_intx); |
2787 | EXPORT_SYMBOL(pci_set_dma_mask); | ||
2788 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); | ||
2789 | EXPORT_SYMBOL(pci_assign_resource); | 3035 | EXPORT_SYMBOL(pci_assign_resource); |
2790 | EXPORT_SYMBOL(pci_find_parent_resource); | 3036 | EXPORT_SYMBOL(pci_find_parent_resource); |
2791 | EXPORT_SYMBOL(pci_select_bars); | 3037 | EXPORT_SYMBOL(pci_select_bars); |
@@ -2795,10 +3041,8 @@ EXPORT_SYMBOL(pci_save_state); | |||
2795 | EXPORT_SYMBOL(pci_restore_state); | 3041 | EXPORT_SYMBOL(pci_restore_state); |
2796 | EXPORT_SYMBOL(pci_pme_capable); | 3042 | EXPORT_SYMBOL(pci_pme_capable); |
2797 | EXPORT_SYMBOL(pci_pme_active); | 3043 | EXPORT_SYMBOL(pci_pme_active); |
2798 | EXPORT_SYMBOL(pci_enable_wake); | ||
2799 | EXPORT_SYMBOL(pci_wake_from_d3); | 3044 | EXPORT_SYMBOL(pci_wake_from_d3); |
2800 | EXPORT_SYMBOL(pci_target_state); | 3045 | EXPORT_SYMBOL(pci_target_state); |
2801 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 3046 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
2802 | EXPORT_SYMBOL(pci_back_from_sleep); | 3047 | EXPORT_SYMBOL(pci_back_from_sleep); |
2803 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); | 3048 | EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
2804 | |||