diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 21:32:35 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 21:32:35 -0400 |
| commit | 16642a2e7be23bbda013fc32d8f6c68982eab603 (patch) | |
| tree | 346ae485f485f6901e5d8150f0d34d178a7dd448 /drivers/clocksource | |
| parent | 51562cba98939da0a1d10fe7c25359b77a069033 (diff) | |
| parent | b9142167a2bb979b58b98ffcd928a311b55cbd9f (diff) | |
Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael J Wysocki:
- Improved system suspend/resume and runtime PM handling for the SH
TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile).
- Generic PM domains framework extensions related to cpuidle support
and domain objects lookup using names.
- ARM/shmobile power management updates including improved support for
the SH7372's A4S power domain containing the CPU core.
- cpufreq changes related to AMD CPUs support from Matthew Garrett,
Andre Przywara and Borislav Petkov.
- cpu0 cpufreq driver from Shawn Guo.
- cpufreq governor fixes related to the relaxing of limit from Michal
Pecio.
- OMAP cpufreq updates from Axel Lin and Richard Zhao.
- cpuidle ladder governor fixes related to the disabling of states from
Carsten Emde and me.
- Runtime PM core updates related to the interactions with the system
suspend core from Alan Stern and Kevin Hilman.
- Wakeup sources modification allowing more helper functions to be
called from interrupt context from John Stultz and additional
diagnostic code from Todd Poynor.
- System suspend error code path fix from Feng Hong.
Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the
workqueue fixes conflicting fairly badly with the removal of support for
hardware P-state chips. The changes were independent but somewhat
intertwined.
* tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits)
Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code"
PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2
cpuidle: rename function name "__cpuidle_register_driver", v2
cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name
cpuidle: remove some empty lines
PM: Prevent runtime suspend during system resume
PM QoS: Use spinlock in the per-device PM QoS constraints code
PM / Sleep: use resume event when call dpm_resume_early
cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure
ACPI / processor: remove pointless variable initialization
ACPI / processor: remove unused function parameter
cpufreq: OMAP: remove loops_per_jiffy recalculate for smp
sections: fix section conflicts in drivers/cpufreq
cpufreq: conservative: update frequency when limits are relaxed
cpufreq / ondemand: update frequency when limits are relaxed
properly __init-annotate pm_sysrq_init()
cpufreq: Add a generic cpufreq-cpu0 driver
PM / OPP: Initialize OPP table from device tree
ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp
cpufreq: Remove support for hardware P-state chips from powernow-k8
...
Diffstat (limited to 'drivers/clocksource')
| -rw-r--r-- | drivers/clocksource/sh_cmt.c | 71 | ||||
| -rw-r--r-- | drivers/clocksource/sh_mtu2.c | 41 | ||||
| -rw-r--r-- | drivers/clocksource/sh_tmu.c | 112 |
3 files changed, 204 insertions, 20 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 98b06baafcc6..a5f7829f2799 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
| 35 | #include <linux/pm_domain.h> | 35 | #include <linux/pm_domain.h> |
| 36 | #include <linux/pm_runtime.h> | ||
| 36 | 37 | ||
| 37 | struct sh_cmt_priv { | 38 | struct sh_cmt_priv { |
| 38 | void __iomem *mapbase; | 39 | void __iomem *mapbase; |
| @@ -52,6 +53,7 @@ struct sh_cmt_priv { | |||
| 52 | struct clock_event_device ced; | 53 | struct clock_event_device ced; |
| 53 | struct clocksource cs; | 54 | struct clocksource cs; |
| 54 | unsigned long total_cycles; | 55 | unsigned long total_cycles; |
| 56 | bool cs_enabled; | ||
| 55 | }; | 57 | }; |
| 56 | 58 | ||
| 57 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); | 59 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); |
| @@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | |||
| 155 | { | 157 | { |
| 156 | int k, ret; | 158 | int k, ret; |
| 157 | 159 | ||
| 160 | pm_runtime_get_sync(&p->pdev->dev); | ||
| 161 | dev_pm_syscore_device(&p->pdev->dev, true); | ||
| 162 | |||
| 158 | /* enable clock */ | 163 | /* enable clock */ |
| 159 | ret = clk_enable(p->clk); | 164 | ret = clk_enable(p->clk); |
| 160 | if (ret) { | 165 | if (ret) { |
| @@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p) | |||
| 221 | 226 | ||
| 222 | /* stop clock */ | 227 | /* stop clock */ |
| 223 | clk_disable(p->clk); | 228 | clk_disable(p->clk); |
| 229 | |||
| 230 | dev_pm_syscore_device(&p->pdev->dev, false); | ||
| 231 | pm_runtime_put(&p->pdev->dev); | ||
| 224 | } | 232 | } |
| 225 | 233 | ||
| 226 | /* private flags */ | 234 | /* private flags */ |
| @@ -451,22 +459,42 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs) | |||
| 451 | int ret; | 459 | int ret; |
| 452 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); | 460 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); |
| 453 | 461 | ||
| 462 | WARN_ON(p->cs_enabled); | ||
| 463 | |||
| 454 | p->total_cycles = 0; | 464 | p->total_cycles = 0; |
| 455 | 465 | ||
| 456 | ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); | 466 | ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); |
| 457 | if (!ret) | 467 | if (!ret) { |
| 458 | __clocksource_updatefreq_hz(cs, p->rate); | 468 | __clocksource_updatefreq_hz(cs, p->rate); |
| 469 | p->cs_enabled = true; | ||
| 470 | } | ||
| 459 | return ret; | 471 | return ret; |
| 460 | } | 472 | } |
| 461 | 473 | ||
| 462 | static void sh_cmt_clocksource_disable(struct clocksource *cs) | 474 | static void sh_cmt_clocksource_disable(struct clocksource *cs) |
| 463 | { | 475 | { |
| 464 | sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); | 476 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); |
| 477 | |||
| 478 | WARN_ON(!p->cs_enabled); | ||
| 479 | |||
| 480 | sh_cmt_stop(p, FLAG_CLOCKSOURCE); | ||
| 481 | p->cs_enabled = false; | ||
| 482 | } | ||
| 483 | |||
| 484 | static void sh_cmt_clocksource_suspend(struct clocksource *cs) | ||
| 485 | { | ||
| 486 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); | ||
| 487 | |||
| 488 | sh_cmt_stop(p, FLAG_CLOCKSOURCE); | ||
| 489 | pm_genpd_syscore_poweroff(&p->pdev->dev); | ||
| 465 | } | 490 | } |
| 466 | 491 | ||
| 467 | static void sh_cmt_clocksource_resume(struct clocksource *cs) | 492 | static void sh_cmt_clocksource_resume(struct clocksource *cs) |
| 468 | { | 493 | { |
| 469 | sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); | 494 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); |
| 495 | |||
| 496 | pm_genpd_syscore_poweron(&p->pdev->dev); | ||
| 497 | sh_cmt_start(p, FLAG_CLOCKSOURCE); | ||
| 470 | } | 498 | } |
| 471 | 499 | ||
| 472 | static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, | 500 | static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, |
| @@ -479,7 +507,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, | |||
| 479 | cs->read = sh_cmt_clocksource_read; | 507 | cs->read = sh_cmt_clocksource_read; |
| 480 | cs->enable = sh_cmt_clocksource_enable; | 508 | cs->enable = sh_cmt_clocksource_enable; |
| 481 | cs->disable = sh_cmt_clocksource_disable; | 509 | cs->disable = sh_cmt_clocksource_disable; |
| 482 | cs->suspend = sh_cmt_clocksource_disable; | 510 | cs->suspend = sh_cmt_clocksource_suspend; |
| 483 | cs->resume = sh_cmt_clocksource_resume; | 511 | cs->resume = sh_cmt_clocksource_resume; |
| 484 | cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); | 512 | cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); |
| 485 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | 513 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; |
| @@ -562,6 +590,16 @@ static int sh_cmt_clock_event_next(unsigned long delta, | |||
| 562 | return 0; | 590 | return 0; |
| 563 | } | 591 | } |
| 564 | 592 | ||
| 593 | static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) | ||
| 594 | { | ||
| 595 | pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev); | ||
| 596 | } | ||
| 597 | |||
| 598 | static void sh_cmt_clock_event_resume(struct clock_event_device *ced) | ||
| 599 | { | ||
| 600 | pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev); | ||
| 601 | } | ||
| 602 | |||
| 565 | static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, | 603 | static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, |
| 566 | char *name, unsigned long rating) | 604 | char *name, unsigned long rating) |
| 567 | { | 605 | { |
| @@ -576,6 +614,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, | |||
| 576 | ced->cpumask = cpumask_of(0); | 614 | ced->cpumask = cpumask_of(0); |
| 577 | ced->set_next_event = sh_cmt_clock_event_next; | 615 | ced->set_next_event = sh_cmt_clock_event_next; |
| 578 | ced->set_mode = sh_cmt_clock_event_mode; | 616 | ced->set_mode = sh_cmt_clock_event_mode; |
| 617 | ced->suspend = sh_cmt_clock_event_suspend; | ||
| 618 | ced->resume = sh_cmt_clock_event_resume; | ||
| 579 | 619 | ||
| 580 | dev_info(&p->pdev->dev, "used for clock events\n"); | 620 | dev_info(&p->pdev->dev, "used for clock events\n"); |
| 581 | clockevents_register_device(ced); | 621 | clockevents_register_device(ced); |
| @@ -670,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
| 670 | dev_err(&p->pdev->dev, "registration failed\n"); | 710 | dev_err(&p->pdev->dev, "registration failed\n"); |
| 671 | goto err1; | 711 | goto err1; |
| 672 | } | 712 | } |
| 713 | p->cs_enabled = false; | ||
| 673 | 714 | ||
| 674 | ret = setup_irq(irq, &p->irqaction); | 715 | ret = setup_irq(irq, &p->irqaction); |
| 675 | if (ret) { | 716 | if (ret) { |
| @@ -688,14 +729,17 @@ err0: | |||
| 688 | static int __devinit sh_cmt_probe(struct platform_device *pdev) | 729 | static int __devinit sh_cmt_probe(struct platform_device *pdev) |
| 689 | { | 730 | { |
| 690 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); | 731 | struct sh_cmt_priv *p = platform_get_drvdata(pdev); |
| 732 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
| 691 | int ret; | 733 | int ret; |
| 692 | 734 | ||
| 693 | if (!is_early_platform_device(pdev)) | 735 | if (!is_early_platform_device(pdev)) { |
| 694 | pm_genpd_dev_always_on(&pdev->dev, true); | 736 | pm_runtime_set_active(&pdev->dev); |
| 737 | pm_runtime_enable(&pdev->dev); | ||
| 738 | } | ||
| 695 | 739 | ||
| 696 | if (p) { | 740 | if (p) { |
| 697 | dev_info(&pdev->dev, "kept as earlytimer\n"); | 741 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
| 698 | return 0; | 742 | goto out; |
| 699 | } | 743 | } |
| 700 | 744 | ||
| 701 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 745 | p = kmalloc(sizeof(*p), GFP_KERNEL); |
| @@ -708,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev) | |||
| 708 | if (ret) { | 752 | if (ret) { |
| 709 | kfree(p); | 753 | kfree(p); |
| 710 | platform_set_drvdata(pdev, NULL); | 754 | platform_set_drvdata(pdev, NULL); |
| 755 | pm_runtime_idle(&pdev->dev); | ||
| 756 | return ret; | ||
| 711 | } | 757 | } |
| 712 | return ret; | 758 | if (is_early_platform_device(pdev)) |
| 759 | return 0; | ||
| 760 | |||
| 761 | out: | ||
| 762 | if (cfg->clockevent_rating || cfg->clocksource_rating) | ||
| 763 | pm_runtime_irq_safe(&pdev->dev); | ||
| 764 | else | ||
| 765 | pm_runtime_idle(&pdev->dev); | ||
| 766 | |||
| 767 | return 0; | ||
| 713 | } | 768 | } |
| 714 | 769 | ||
| 715 | static int __devexit sh_cmt_remove(struct platform_device *pdev) | 770 | static int __devexit sh_cmt_remove(struct platform_device *pdev) |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index d9b76ca64a61..c5eea858054a 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
| 33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
| 34 | #include <linux/pm_domain.h> | 34 | #include <linux/pm_domain.h> |
| 35 | #include <linux/pm_runtime.h> | ||
| 35 | 36 | ||
| 36 | struct sh_mtu2_priv { | 37 | struct sh_mtu2_priv { |
| 37 | void __iomem *mapbase; | 38 | void __iomem *mapbase; |
| @@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p) | |||
| 123 | { | 124 | { |
| 124 | int ret; | 125 | int ret; |
| 125 | 126 | ||
| 127 | pm_runtime_get_sync(&p->pdev->dev); | ||
| 128 | dev_pm_syscore_device(&p->pdev->dev, true); | ||
| 129 | |||
| 126 | /* enable clock */ | 130 | /* enable clock */ |
| 127 | ret = clk_enable(p->clk); | 131 | ret = clk_enable(p->clk); |
| 128 | if (ret) { | 132 | if (ret) { |
| @@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p) | |||
| 157 | 161 | ||
| 158 | /* stop clock */ | 162 | /* stop clock */ |
| 159 | clk_disable(p->clk); | 163 | clk_disable(p->clk); |
| 164 | |||
| 165 | dev_pm_syscore_device(&p->pdev->dev, false); | ||
| 166 | pm_runtime_put(&p->pdev->dev); | ||
| 160 | } | 167 | } |
| 161 | 168 | ||
| 162 | static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) | 169 | static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) |
| @@ -208,6 +215,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, | |||
| 208 | } | 215 | } |
| 209 | } | 216 | } |
| 210 | 217 | ||
| 218 | static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) | ||
| 219 | { | ||
| 220 | pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); | ||
| 221 | } | ||
| 222 | |||
| 223 | static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) | ||
| 224 | { | ||
| 225 | pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); | ||
| 226 | } | ||
| 227 | |||
| 211 | static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, | 228 | static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, |
| 212 | char *name, unsigned long rating) | 229 | char *name, unsigned long rating) |
| 213 | { | 230 | { |
| @@ -221,6 +238,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, | |||
| 221 | ced->rating = rating; | 238 | ced->rating = rating; |
| 222 | ced->cpumask = cpumask_of(0); | 239 | ced->cpumask = cpumask_of(0); |
| 223 | ced->set_mode = sh_mtu2_clock_event_mode; | 240 | ced->set_mode = sh_mtu2_clock_event_mode; |
| 241 | ced->suspend = sh_mtu2_clock_event_suspend; | ||
| 242 | ced->resume = sh_mtu2_clock_event_resume; | ||
| 224 | 243 | ||
| 225 | dev_info(&p->pdev->dev, "used for clock events\n"); | 244 | dev_info(&p->pdev->dev, "used for clock events\n"); |
| 226 | clockevents_register_device(ced); | 245 | clockevents_register_device(ced); |
| @@ -305,14 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) | |||
| 305 | static int __devinit sh_mtu2_probe(struct platform_device *pdev) | 324 | static int __devinit sh_mtu2_probe(struct platform_device *pdev) |
| 306 | { | 325 | { |
| 307 | struct sh_mtu2_priv *p = platform_get_drvdata(pdev); | 326 | struct sh_mtu2_priv *p = platform_get_drvdata(pdev); |
| 327 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
| 308 | int ret; | 328 | int ret; |
| 309 | 329 | ||
| 310 | if (!is_early_platform_device(pdev)) | 330 | if (!is_early_platform_device(pdev)) { |
| 311 | pm_genpd_dev_always_on(&pdev->dev, true); | 331 | pm_runtime_set_active(&pdev->dev); |
| 332 | pm_runtime_enable(&pdev->dev); | ||
| 333 | } | ||
| 312 | 334 | ||
| 313 | if (p) { | 335 | if (p) { |
| 314 | dev_info(&pdev->dev, "kept as earlytimer\n"); | 336 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
| 315 | return 0; | 337 | goto out; |
| 316 | } | 338 | } |
| 317 | 339 | ||
| 318 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 340 | p = kmalloc(sizeof(*p), GFP_KERNEL); |
| @@ -325,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev) | |||
| 325 | if (ret) { | 347 | if (ret) { |
| 326 | kfree(p); | 348 | kfree(p); |
| 327 | platform_set_drvdata(pdev, NULL); | 349 | platform_set_drvdata(pdev, NULL); |
| 350 | pm_runtime_idle(&pdev->dev); | ||
| 351 | return ret; | ||
| 328 | } | 352 | } |
| 329 | return ret; | 353 | if (is_early_platform_device(pdev)) |
| 354 | return 0; | ||
| 355 | |||
| 356 | out: | ||
| 357 | if (cfg->clockevent_rating) | ||
| 358 | pm_runtime_irq_safe(&pdev->dev); | ||
| 359 | else | ||
| 360 | pm_runtime_idle(&pdev->dev); | ||
| 361 | |||
| 362 | return 0; | ||
| 330 | } | 363 | } |
| 331 | 364 | ||
| 332 | static int __devexit sh_mtu2_remove(struct platform_device *pdev) | 365 | static int __devexit sh_mtu2_remove(struct platform_device *pdev) |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index c1b51d49d106..0cc4add88279 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
| 35 | #include <linux/pm_domain.h> | 35 | #include <linux/pm_domain.h> |
| 36 | #include <linux/pm_runtime.h> | ||
| 36 | 37 | ||
| 37 | struct sh_tmu_priv { | 38 | struct sh_tmu_priv { |
| 38 | void __iomem *mapbase; | 39 | void __iomem *mapbase; |
| @@ -43,6 +44,8 @@ struct sh_tmu_priv { | |||
| 43 | unsigned long periodic; | 44 | unsigned long periodic; |
| 44 | struct clock_event_device ced; | 45 | struct clock_event_device ced; |
| 45 | struct clocksource cs; | 46 | struct clocksource cs; |
| 47 | bool cs_enabled; | ||
| 48 | unsigned int enable_count; | ||
| 46 | }; | 49 | }; |
| 47 | 50 | ||
| 48 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); | 51 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
| @@ -107,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
| 107 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); | 110 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
| 108 | } | 111 | } |
| 109 | 112 | ||
| 110 | static int sh_tmu_enable(struct sh_tmu_priv *p) | 113 | static int __sh_tmu_enable(struct sh_tmu_priv *p) |
| 111 | { | 114 | { |
| 112 | int ret; | 115 | int ret; |
| 113 | 116 | ||
| @@ -135,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p) | |||
| 135 | return 0; | 138 | return 0; |
| 136 | } | 139 | } |
| 137 | 140 | ||
| 138 | static void sh_tmu_disable(struct sh_tmu_priv *p) | 141 | static int sh_tmu_enable(struct sh_tmu_priv *p) |
| 142 | { | ||
| 143 | if (p->enable_count++ > 0) | ||
| 144 | return 0; | ||
| 145 | |||
| 146 | pm_runtime_get_sync(&p->pdev->dev); | ||
| 147 | dev_pm_syscore_device(&p->pdev->dev, true); | ||
| 148 | |||
| 149 | return __sh_tmu_enable(p); | ||
| 150 | } | ||
| 151 | |||
| 152 | static void __sh_tmu_disable(struct sh_tmu_priv *p) | ||
| 139 | { | 153 | { |
| 140 | /* disable channel */ | 154 | /* disable channel */ |
| 141 | sh_tmu_start_stop_ch(p, 0); | 155 | sh_tmu_start_stop_ch(p, 0); |
| @@ -147,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p) | |||
| 147 | clk_disable(p->clk); | 161 | clk_disable(p->clk); |
| 148 | } | 162 | } |
| 149 | 163 | ||
| 164 | static void sh_tmu_disable(struct sh_tmu_priv *p) | ||
| 165 | { | ||
| 166 | if (WARN_ON(p->enable_count == 0)) | ||
| 167 | return; | ||
| 168 | |||
| 169 | if (--p->enable_count > 0) | ||
| 170 | return; | ||
| 171 | |||
| 172 | __sh_tmu_disable(p); | ||
| 173 | |||
| 174 | dev_pm_syscore_device(&p->pdev->dev, false); | ||
| 175 | pm_runtime_put(&p->pdev->dev); | ||
| 176 | } | ||
| 177 | |||
| 150 | static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, | 178 | static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, |
| 151 | int periodic) | 179 | int periodic) |
| 152 | { | 180 | { |
| @@ -203,15 +231,53 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs) | |||
| 203 | struct sh_tmu_priv *p = cs_to_sh_tmu(cs); | 231 | struct sh_tmu_priv *p = cs_to_sh_tmu(cs); |
| 204 | int ret; | 232 | int ret; |
| 205 | 233 | ||
| 234 | if (WARN_ON(p->cs_enabled)) | ||
| 235 | return 0; | ||
| 236 | |||
| 206 | ret = sh_tmu_enable(p); | 237 | ret = sh_tmu_enable(p); |
| 207 | if (!ret) | 238 | if (!ret) { |
| 208 | __clocksource_updatefreq_hz(cs, p->rate); | 239 | __clocksource_updatefreq_hz(cs, p->rate); |
| 240 | p->cs_enabled = true; | ||
| 241 | } | ||
| 242 | |||
| 209 | return ret; | 243 | return ret; |
| 210 | } | 244 | } |
| 211 | 245 | ||
| 212 | static void sh_tmu_clocksource_disable(struct clocksource *cs) | 246 | static void sh_tmu_clocksource_disable(struct clocksource *cs) |
| 213 | { | 247 | { |
| 214 | sh_tmu_disable(cs_to_sh_tmu(cs)); | 248 | struct sh_tmu_priv *p = cs_to_sh_tmu(cs); |
| 249 | |||
| 250 | if (WARN_ON(!p->cs_enabled)) | ||
| 251 | return; | ||
| 252 | |||
| 253 | sh_tmu_disable(p); | ||
| 254 | p->cs_enabled = false; | ||
| 255 | } | ||
| 256 | |||
| 257 | static void sh_tmu_clocksource_suspend(struct clocksource *cs) | ||
| 258 | { | ||
| 259 | struct sh_tmu_priv *p = cs_to_sh_tmu(cs); | ||
| 260 | |||
| 261 | if (!p->cs_enabled) | ||
| 262 | return; | ||
| 263 | |||
| 264 | if (--p->enable_count == 0) { | ||
| 265 | __sh_tmu_disable(p); | ||
| 266 | pm_genpd_syscore_poweroff(&p->pdev->dev); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | |||
| 270 | static void sh_tmu_clocksource_resume(struct clocksource *cs) | ||
| 271 | { | ||
| 272 | struct sh_tmu_priv *p = cs_to_sh_tmu(cs); | ||
| 273 | |||
| 274 | if (!p->cs_enabled) | ||
| 275 | return; | ||
| 276 | |||
| 277 | if (p->enable_count++ == 0) { | ||
| 278 | pm_genpd_syscore_poweron(&p->pdev->dev); | ||
| 279 | __sh_tmu_enable(p); | ||
| 280 | } | ||
| 215 | } | 281 | } |
| 216 | 282 | ||
| 217 | static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, | 283 | static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, |
| @@ -224,6 +290,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, | |||
| 224 | cs->read = sh_tmu_clocksource_read; | 290 | cs->read = sh_tmu_clocksource_read; |
| 225 | cs->enable = sh_tmu_clocksource_enable; | 291 | cs->enable = sh_tmu_clocksource_enable; |
| 226 | cs->disable = sh_tmu_clocksource_disable; | 292 | cs->disable = sh_tmu_clocksource_disable; |
| 293 | cs->suspend = sh_tmu_clocksource_suspend; | ||
| 294 | cs->resume = sh_tmu_clocksource_resume; | ||
| 227 | cs->mask = CLOCKSOURCE_MASK(32); | 295 | cs->mask = CLOCKSOURCE_MASK(32); |
| 228 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | 296 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; |
| 229 | 297 | ||
| @@ -301,6 +369,16 @@ static int sh_tmu_clock_event_next(unsigned long delta, | |||
| 301 | return 0; | 369 | return 0; |
| 302 | } | 370 | } |
| 303 | 371 | ||
| 372 | static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) | ||
| 373 | { | ||
| 374 | pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); | ||
| 375 | } | ||
| 376 | |||
| 377 | static void sh_tmu_clock_event_resume(struct clock_event_device *ced) | ||
| 378 | { | ||
| 379 | pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); | ||
| 380 | } | ||
| 381 | |||
| 304 | static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | 382 | static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, |
| 305 | char *name, unsigned long rating) | 383 | char *name, unsigned long rating) |
| 306 | { | 384 | { |
| @@ -316,6 +394,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | |||
| 316 | ced->cpumask = cpumask_of(0); | 394 | ced->cpumask = cpumask_of(0); |
| 317 | ced->set_next_event = sh_tmu_clock_event_next; | 395 | ced->set_next_event = sh_tmu_clock_event_next; |
| 318 | ced->set_mode = sh_tmu_clock_event_mode; | 396 | ced->set_mode = sh_tmu_clock_event_mode; |
| 397 | ced->suspend = sh_tmu_clock_event_suspend; | ||
| 398 | ced->resume = sh_tmu_clock_event_resume; | ||
| 319 | 399 | ||
| 320 | dev_info(&p->pdev->dev, "used for clock events\n"); | 400 | dev_info(&p->pdev->dev, "used for clock events\n"); |
| 321 | 401 | ||
| @@ -392,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) | |||
| 392 | ret = PTR_ERR(p->clk); | 472 | ret = PTR_ERR(p->clk); |
| 393 | goto err1; | 473 | goto err1; |
| 394 | } | 474 | } |
| 475 | p->cs_enabled = false; | ||
| 476 | p->enable_count = 0; | ||
| 395 | 477 | ||
| 396 | return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), | 478 | return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), |
| 397 | cfg->clockevent_rating, | 479 | cfg->clockevent_rating, |
| @@ -405,14 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) | |||
| 405 | static int __devinit sh_tmu_probe(struct platform_device *pdev) | 487 | static int __devinit sh_tmu_probe(struct platform_device *pdev) |
| 406 | { | 488 | { |
| 407 | struct sh_tmu_priv *p = platform_get_drvdata(pdev); | 489 | struct sh_tmu_priv *p = platform_get_drvdata(pdev); |
| 490 | struct sh_timer_config *cfg = pdev->dev.platform_data; | ||
| 408 | int ret; | 491 | int ret; |
| 409 | 492 | ||
| 410 | if (!is_early_platform_device(pdev)) | 493 | if (!is_early_platform_device(pdev)) { |
| 411 | pm_genpd_dev_always_on(&pdev->dev, true); | 494 | pm_runtime_set_active(&pdev->dev); |
| 495 | pm_runtime_enable(&pdev->dev); | ||
| 496 | } | ||
| 412 | 497 | ||
| 413 | if (p) { | 498 | if (p) { |
| 414 | dev_info(&pdev->dev, "kept as earlytimer\n"); | 499 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
| 415 | return 0; | 500 | goto out; |
| 416 | } | 501 | } |
| 417 | 502 | ||
| 418 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 503 | p = kmalloc(sizeof(*p), GFP_KERNEL); |
| @@ -425,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev) | |||
| 425 | if (ret) { | 510 | if (ret) { |
| 426 | kfree(p); | 511 | kfree(p); |
| 427 | platform_set_drvdata(pdev, NULL); | 512 | platform_set_drvdata(pdev, NULL); |
| 513 | pm_runtime_idle(&pdev->dev); | ||
| 514 | return ret; | ||
| 428 | } | 515 | } |
| 429 | return ret; | 516 | if (is_early_platform_device(pdev)) |
| 517 | return 0; | ||
| 518 | |||
| 519 | out: | ||
| 520 | if (cfg->clockevent_rating || cfg->clocksource_rating) | ||
| 521 | pm_runtime_irq_safe(&pdev->dev); | ||
| 522 | else | ||
| 523 | pm_runtime_idle(&pdev->dev); | ||
| 524 | |||
| 525 | return 0; | ||
| 430 | } | 526 | } |
| 431 | 527 | ||
| 432 | static int __devexit sh_tmu_remove(struct platform_device *pdev) | 528 | static int __devexit sh_tmu_remove(struct platform_device *pdev) |
