diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-27 06:01:51 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-27 06:01:51 -0500 |
commit | 3ddeb51d9c83931c1ca6abf76a38934c5a1ed918 (patch) | |
tree | fc2efb59d627135ea2199a8a68415b162646b121 /drivers/pci | |
parent | 5a611268b69f05262936dd177205acbce4471358 (diff) | |
parent | 5ee810072175042775e39bdd3eaaa68884c27805 (diff) |
Merge branch 'linus' into core/percpu
Conflicts:
arch/x86/kernel/setup_percpu.c
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/hotplug/pciehp_core.c | 4 | ||||
-rw-r--r-- | drivers/pci/msi.c | 16 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 91 | ||||
-rw-r--r-- | drivers/pci/pci.c | 63 | ||||
-rw-r--r-- | drivers/pci/pci.h | 6 |
5 files changed, 100 insertions, 80 deletions
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 5482d4ed8256..c2485542f543 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
@@ -126,8 +126,10 @@ static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status) | |||
126 | mutex_lock(&slot->ctrl->crit_sect); | 126 | mutex_lock(&slot->ctrl->crit_sect); |
127 | 127 | ||
128 | /* has it been >1 sec since our last toggle? */ | 128 | /* has it been >1 sec since our last toggle? */ |
129 | if ((get_seconds() - slot->last_emi_toggle) < 1) | 129 | if ((get_seconds() - slot->last_emi_toggle) < 1) { |
130 | mutex_unlock(&slot->ctrl->crit_sect); | ||
130 | return -EINVAL; | 131 | return -EINVAL; |
132 | } | ||
131 | 133 | ||
132 | /* see what our current state is */ | 134 | /* see what our current state is */ |
133 | retval = get_lock_status(hotplug_slot, &value); | 135 | retval = get_lock_status(hotplug_slot, &value); |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index b4a90badd0a6..896a15d70f5b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -398,21 +398,19 @@ static int msi_capability_init(struct pci_dev *dev) | |||
398 | entry->msi_attrib.masked = 1; | 398 | entry->msi_attrib.masked = 1; |
399 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 399 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
400 | entry->msi_attrib.pos = pos; | 400 | entry->msi_attrib.pos = pos; |
401 | if (entry->msi_attrib.maskbit) { | ||
402 | entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, | ||
403 | entry->msi_attrib.is_64); | ||
404 | } | ||
405 | entry->dev = dev; | 401 | entry->dev = dev; |
406 | if (entry->msi_attrib.maskbit) { | 402 | if (entry->msi_attrib.maskbit) { |
407 | unsigned int maskbits, temp; | 403 | unsigned int base, maskbits, temp; |
404 | |||
405 | base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); | ||
406 | entry->mask_base = (void __iomem *)(long)base; | ||
407 | |||
408 | /* All MSIs are unmasked by default, Mask them all */ | 408 | /* All MSIs are unmasked by default, Mask them all */ |
409 | pci_read_config_dword(dev, | 409 | pci_read_config_dword(dev, base, &maskbits); |
410 | msi_mask_bits_reg(pos, entry->msi_attrib.is_64), | ||
411 | &maskbits); | ||
412 | temp = (1 << multi_msi_capable(control)); | 410 | temp = (1 << multi_msi_capable(control)); |
413 | temp = ((temp - 1) & ~temp); | 411 | temp = ((temp - 1) & ~temp); |
414 | maskbits |= temp; | 412 | maskbits |= temp; |
415 | pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); | 413 | pci_write_config_dword(dev, base, maskbits); |
416 | entry->msi_attrib.maskbits_mask = temp; | 414 | entry->msi_attrib.maskbits_mask = temp; |
417 | } | 415 | } |
418 | list_add_tail(&entry->list, &dev->msi_list); | 416 | list_add_tail(&entry->list, &dev->msi_list); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index c697f2680856..9de07b75b993 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -355,17 +355,27 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) | |||
355 | int i = 0; | 355 | int i = 0; |
356 | 356 | ||
357 | if (drv && drv->suspend) { | 357 | if (drv && drv->suspend) { |
358 | pci_dev->state_saved = false; | ||
359 | |||
358 | i = drv->suspend(pci_dev, state); | 360 | i = drv->suspend(pci_dev, state); |
359 | suspend_report_result(drv->suspend, i); | 361 | suspend_report_result(drv->suspend, i); |
360 | } else { | 362 | if (i) |
361 | pci_save_state(pci_dev); | 363 | return i; |
362 | /* | 364 | |
363 | * This is for compatibility with existing code with legacy PM | 365 | if (pci_dev->state_saved) |
364 | * support. | 366 | goto Fixup; |
365 | */ | 367 | |
366 | pci_pm_set_unknown_state(pci_dev); | 368 | if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0)) |
369 | goto Fixup; | ||
367 | } | 370 | } |
368 | 371 | ||
372 | pci_save_state(pci_dev); | ||
373 | /* | ||
374 | * This is for compatibility with existing code with legacy PM support. | ||
375 | */ | ||
376 | pci_pm_set_unknown_state(pci_dev); | ||
377 | |||
378 | Fixup: | ||
369 | pci_fixup_device(pci_fixup_suspend, pci_dev); | 379 | pci_fixup_device(pci_fixup_suspend, pci_dev); |
370 | 380 | ||
371 | return i; | 381 | return i; |
@@ -386,81 +396,34 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) | |||
386 | 396 | ||
387 | static int pci_legacy_resume_early(struct device *dev) | 397 | static int pci_legacy_resume_early(struct device *dev) |
388 | { | 398 | { |
389 | int error = 0; | ||
390 | struct pci_dev * pci_dev = to_pci_dev(dev); | 399 | struct pci_dev * pci_dev = to_pci_dev(dev); |
391 | struct pci_driver * drv = pci_dev->driver; | 400 | struct pci_driver * drv = pci_dev->driver; |
392 | 401 | ||
393 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 402 | return drv && drv->resume_early ? |
394 | 403 | drv->resume_early(pci_dev) : 0; | |
395 | if (drv && drv->resume_early) | ||
396 | error = drv->resume_early(pci_dev); | ||
397 | return error; | ||
398 | } | 404 | } |
399 | 405 | ||
400 | static int pci_legacy_resume(struct device *dev) | 406 | static int pci_legacy_resume(struct device *dev) |
401 | { | 407 | { |
402 | int error; | ||
403 | struct pci_dev * pci_dev = to_pci_dev(dev); | 408 | struct pci_dev * pci_dev = to_pci_dev(dev); |
404 | struct pci_driver * drv = pci_dev->driver; | 409 | struct pci_driver * drv = pci_dev->driver; |
405 | 410 | ||
406 | pci_fixup_device(pci_fixup_resume, pci_dev); | 411 | pci_fixup_device(pci_fixup_resume, pci_dev); |
407 | 412 | ||
408 | if (drv && drv->resume) { | 413 | return drv && drv->resume ? |
409 | error = drv->resume(pci_dev); | 414 | drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev); |
410 | } else { | ||
411 | /* restore the PCI config space */ | ||
412 | pci_restore_state(pci_dev); | ||
413 | error = pci_pm_reenable_device(pci_dev); | ||
414 | } | ||
415 | return error; | ||
416 | } | 415 | } |
417 | 416 | ||
418 | /* Auxiliary functions used by the new power management framework */ | 417 | /* Auxiliary functions used by the new power management framework */ |
419 | 418 | ||
420 | static int pci_restore_standard_config(struct pci_dev *pci_dev) | ||
421 | { | ||
422 | struct pci_dev *parent = pci_dev->bus->self; | ||
423 | int error = 0; | ||
424 | |||
425 | /* Check if the device's bus is operational */ | ||
426 | if (!parent || parent->current_state == PCI_D0) { | ||
427 | pci_restore_state(pci_dev); | ||
428 | pci_update_current_state(pci_dev, PCI_D0); | ||
429 | } else { | ||
430 | dev_warn(&pci_dev->dev, "unable to restore config, " | ||
431 | "bridge %s in low power state D%d\n", pci_name(parent), | ||
432 | parent->current_state); | ||
433 | pci_dev->current_state = PCI_UNKNOWN; | ||
434 | error = -EAGAIN; | ||
435 | } | ||
436 | |||
437 | return error; | ||
438 | } | ||
439 | |||
440 | static bool pci_is_bridge(struct pci_dev *pci_dev) | ||
441 | { | ||
442 | return !!(pci_dev->subordinate); | ||
443 | } | ||
444 | |||
445 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) | 419 | static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) |
446 | { | 420 | { |
447 | if (pci_restore_standard_config(pci_dev)) | 421 | pci_restore_standard_config(pci_dev); |
448 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 422 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
449 | } | 423 | } |
450 | 424 | ||
451 | static int pci_pm_default_resume(struct pci_dev *pci_dev) | 425 | static int pci_pm_default_resume(struct pci_dev *pci_dev) |
452 | { | 426 | { |
453 | /* | ||
454 | * pci_restore_standard_config() should have been called once already, | ||
455 | * but it would have failed if the device's parent bridge had not been | ||
456 | * in power state D0 at that time. Check it and try again if necessary. | ||
457 | */ | ||
458 | if (pci_dev->current_state == PCI_UNKNOWN) { | ||
459 | int error = pci_restore_standard_config(pci_dev); | ||
460 | if (error) | ||
461 | return error; | ||
462 | } | ||
463 | |||
464 | pci_fixup_device(pci_fixup_resume, pci_dev); | 427 | pci_fixup_device(pci_fixup_resume, pci_dev); |
465 | 428 | ||
466 | if (!pci_is_bridge(pci_dev)) | 429 | if (!pci_is_bridge(pci_dev)) |
@@ -575,11 +538,11 @@ static int pci_pm_resume_noirq(struct device *dev) | |||
575 | struct device_driver *drv = dev->driver; | 538 | struct device_driver *drv = dev->driver; |
576 | int error = 0; | 539 | int error = 0; |
577 | 540 | ||
541 | pci_pm_default_resume_noirq(pci_dev); | ||
542 | |||
578 | if (pci_has_legacy_pm_support(pci_dev)) | 543 | if (pci_has_legacy_pm_support(pci_dev)) |
579 | return pci_legacy_resume_early(dev); | 544 | return pci_legacy_resume_early(dev); |
580 | 545 | ||
581 | pci_pm_default_resume_noirq(pci_dev); | ||
582 | |||
583 | if (drv && drv->pm && drv->pm->resume_noirq) | 546 | if (drv && drv->pm && drv->pm->resume_noirq) |
584 | error = drv->pm->resume_noirq(dev); | 547 | error = drv->pm->resume_noirq(dev); |
585 | 548 | ||
@@ -730,11 +693,11 @@ static int pci_pm_restore_noirq(struct device *dev) | |||
730 | struct device_driver *drv = dev->driver; | 693 | struct device_driver *drv = dev->driver; |
731 | int error = 0; | 694 | int error = 0; |
732 | 695 | ||
696 | pci_pm_default_resume_noirq(pci_dev); | ||
697 | |||
733 | if (pci_has_legacy_pm_support(pci_dev)) | 698 | if (pci_has_legacy_pm_support(pci_dev)) |
734 | return pci_legacy_resume_early(dev); | 699 | return pci_legacy_resume_early(dev); |
735 | 700 | ||
736 | pci_pm_default_resume_noirq(pci_dev); | ||
737 | |||
738 | if (drv && drv->pm && drv->pm->restore_noirq) | 701 | if (drv && drv->pm && drv->pm->restore_noirq) |
739 | error = drv->pm->restore_noirq(dev); | 702 | error = drv->pm->restore_noirq(dev); |
740 | 703 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e491fdedf705..17bd9325a245 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ | 22 | #include <asm/dma.h> /* isa_dma_bridge_buggy */ |
23 | #include "pci.h" | 23 | #include "pci.h" |
24 | 24 | ||
25 | unsigned int pci_pm_d3_delay = 10; | 25 | unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; |
26 | 26 | ||
27 | #ifdef CONFIG_PCI_DOMAINS | 27 | #ifdef CONFIG_PCI_DOMAINS |
28 | int pci_domains_supported = 1; | 28 | int pci_domains_supported = 1; |
@@ -426,6 +426,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
426 | * given PCI device | 426 | * given PCI device |
427 | * @dev: PCI device to handle. | 427 | * @dev: PCI device to handle. |
428 | * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. | 428 | * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. |
429 | * @wait: If 'true', wait for the device to change its power state | ||
429 | * | 430 | * |
430 | * RETURN VALUE: | 431 | * RETURN VALUE: |
431 | * -EINVAL if the requested state is invalid. | 432 | * -EINVAL if the requested state is invalid. |
@@ -435,7 +436,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
435 | * 0 if device's power state has been successfully changed. | 436 | * 0 if device's power state has been successfully changed. |
436 | */ | 437 | */ |
437 | static int | 438 | static int |
438 | pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | 439 | pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait) |
439 | { | 440 | { |
440 | u16 pmcsr; | 441 | u16 pmcsr; |
441 | bool need_restore = false; | 442 | bool need_restore = false; |
@@ -480,8 +481,10 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
480 | break; | 481 | break; |
481 | case PCI_UNKNOWN: /* Boot-up */ | 482 | case PCI_UNKNOWN: /* Boot-up */ |
482 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot | 483 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
483 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) | 484 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) { |
484 | need_restore = true; | 485 | need_restore = true; |
486 | wait = true; | ||
487 | } | ||
485 | /* Fall-through: force to D0 */ | 488 | /* Fall-through: force to D0 */ |
486 | default: | 489 | default: |
487 | pmcsr = 0; | 490 | pmcsr = 0; |
@@ -491,12 +494,15 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
491 | /* enter specified state */ | 494 | /* enter specified state */ |
492 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 495 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
493 | 496 | ||
497 | if (!wait) | ||
498 | return 0; | ||
499 | |||
494 | /* Mandatory power management transition delays */ | 500 | /* Mandatory power management transition delays */ |
495 | /* see PCI PM 1.1 5.6.1 table 18 */ | 501 | /* see PCI PM 1.1 5.6.1 table 18 */ |
496 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | 502 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
497 | msleep(pci_pm_d3_delay); | 503 | msleep(pci_pm_d3_delay); |
498 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | 504 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
499 | udelay(200); | 505 | udelay(PCI_PM_D2_DELAY); |
500 | 506 | ||
501 | dev->current_state = state; | 507 | dev->current_state = state; |
502 | 508 | ||
@@ -515,7 +521,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
515 | if (need_restore) | 521 | if (need_restore) |
516 | pci_restore_bars(dev); | 522 | pci_restore_bars(dev); |
517 | 523 | ||
518 | if (dev->bus->self) | 524 | if (wait && dev->bus->self) |
519 | pcie_aspm_pm_state_change(dev->bus->self); | 525 | pcie_aspm_pm_state_change(dev->bus->self); |
520 | 526 | ||
521 | return 0; | 527 | return 0; |
@@ -585,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
585 | if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) | 591 | if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) |
586 | return 0; | 592 | return 0; |
587 | 593 | ||
588 | error = pci_raw_set_power_state(dev, state); | 594 | error = pci_raw_set_power_state(dev, state, true); |
589 | 595 | ||
590 | if (state > PCI_D0 && platform_pci_power_manageable(dev)) { | 596 | if (state > PCI_D0 && platform_pci_power_manageable(dev)) { |
591 | /* Allow the platform to finalize the transition */ | 597 | /* Allow the platform to finalize the transition */ |
@@ -730,6 +736,7 @@ pci_save_state(struct pci_dev *dev) | |||
730 | /* XXX: 100% dword access ok here? */ | 736 | /* XXX: 100% dword access ok here? */ |
731 | for (i = 0; i < 16; i++) | 737 | for (i = 0; i < 16; i++) |
732 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); | 738 | pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); |
739 | dev->state_saved = true; | ||
733 | if ((i = pci_save_pcie_state(dev)) != 0) | 740 | if ((i = pci_save_pcie_state(dev)) != 0) |
734 | return i; | 741 | return i; |
735 | if ((i = pci_save_pcix_state(dev)) != 0) | 742 | if ((i = pci_save_pcix_state(dev)) != 0) |
@@ -1374,6 +1381,50 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) | |||
1374 | } | 1381 | } |
1375 | 1382 | ||
1376 | /** | 1383 | /** |
1384 | * pci_restore_standard_config - restore standard config registers of PCI device | ||
1385 | * @dev: PCI device to handle | ||
1386 | * | ||
1387 | * This function assumes that the device's configuration space is accessible. | ||
1388 | * If the device needs to be powered up, the function will wait for it to | ||
1389 | * change the state. | ||
1390 | */ | ||
1391 | int pci_restore_standard_config(struct pci_dev *dev) | ||
1392 | { | ||
1393 | pci_power_t prev_state; | ||
1394 | int error; | ||
1395 | |||
1396 | pci_restore_state(dev); | ||
1397 | pci_update_current_state(dev, PCI_D0); | ||
1398 | |||
1399 | prev_state = dev->current_state; | ||
1400 | if (prev_state == PCI_D0) | ||
1401 | return 0; | ||
1402 | |||
1403 | error = pci_raw_set_power_state(dev, PCI_D0, false); | ||
1404 | if (error) | ||
1405 | return error; | ||
1406 | |||
1407 | if (pci_is_bridge(dev)) { | ||
1408 | if (prev_state > PCI_D1) | ||
1409 | mdelay(PCI_PM_BUS_WAIT); | ||
1410 | } else { | ||
1411 | switch(prev_state) { | ||
1412 | case PCI_D3cold: | ||
1413 | case PCI_D3hot: | ||
1414 | mdelay(pci_pm_d3_delay); | ||
1415 | break; | ||
1416 | case PCI_D2: | ||
1417 | udelay(PCI_PM_D2_DELAY); | ||
1418 | break; | ||
1419 | } | ||
1420 | } | ||
1421 | |||
1422 | dev->current_state = PCI_D0; | ||
1423 | |||
1424 | return 0; | ||
1425 | } | ||
1426 | |||
1427 | /** | ||
1377 | * pci_enable_ari - enable ARI forwarding if hardware support it | 1428 | * pci_enable_ari - enable ARI forwarding if hardware support it |
1378 | * @dev: the PCI device | 1429 | * @dev: the PCI device |
1379 | */ | 1430 | */ |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 1351bb4addde..26ddf78ac300 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -49,6 +49,12 @@ extern void pci_disable_enabled_device(struct pci_dev *dev); | |||
49 | extern void pci_pm_init(struct pci_dev *dev); | 49 | extern void pci_pm_init(struct pci_dev *dev); |
50 | extern void platform_pci_wakeup_init(struct pci_dev *dev); | 50 | extern void platform_pci_wakeup_init(struct pci_dev *dev); |
51 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); | 51 | extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); |
52 | extern int pci_restore_standard_config(struct pci_dev *dev); | ||
53 | |||
54 | static inline bool pci_is_bridge(struct pci_dev *pci_dev) | ||
55 | { | ||
56 | return !!(pci_dev->subordinate); | ||
57 | } | ||
52 | 58 | ||
53 | extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); | 59 | extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); |
54 | extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); | 60 | extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); |