aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-omap1/pm_bus.c69
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/pm_bus.c85
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c145
-rw-r--r--arch/arm/plat-omap/omap_device.c23
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c33
-rw-r--r--drivers/base/platform.c138
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c431
-rw-r--r--drivers/base/power/main.c64
-rw-r--r--drivers/base/power/runtime.c29
-rw-r--r--include/linux/platform_device.h63
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pm_runtime.h42
-rw-r--r--kernel/power/Kconfig4
15 files changed, 688 insertions, 446 deletions
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 6588c22b8a64..fe31d933f0ed 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -24,75 +24,50 @@
24#ifdef CONFIG_PM_RUNTIME 24#ifdef CONFIG_PM_RUNTIME
25static int omap1_pm_runtime_suspend(struct device *dev) 25static int omap1_pm_runtime_suspend(struct device *dev)
26{ 26{
27 struct clk *iclk, *fclk; 27 int ret;
28 int ret = 0;
29 28
30 dev_dbg(dev, "%s\n", __func__); 29 dev_dbg(dev, "%s\n", __func__);
31 30
32 ret = pm_generic_runtime_suspend(dev); 31 ret = pm_generic_runtime_suspend(dev);
32 if (ret)
33 return ret;
33 34
34 fclk = clk_get(dev, "fck"); 35 ret = pm_runtime_clk_suspend(dev);
35 if (!IS_ERR(fclk)) { 36 if (ret) {
36 clk_disable(fclk); 37 pm_generic_runtime_resume(dev);
37 clk_put(fclk); 38 return ret;
38 }
39
40 iclk = clk_get(dev, "ick");
41 if (!IS_ERR(iclk)) {
42 clk_disable(iclk);
43 clk_put(iclk);
44 } 39 }
45 40
46 return 0; 41 return 0;
47}; 42}
48 43
49static int omap1_pm_runtime_resume(struct device *dev) 44static int omap1_pm_runtime_resume(struct device *dev)
50{ 45{
51 struct clk *iclk, *fclk;
52
53 dev_dbg(dev, "%s\n", __func__); 46 dev_dbg(dev, "%s\n", __func__);
54 47
55 iclk = clk_get(dev, "ick"); 48 pm_runtime_clk_resume(dev);
56 if (!IS_ERR(iclk)) { 49 return pm_generic_runtime_resume(dev);
57 clk_enable(iclk); 50}
58 clk_put(iclk);
59 }
60 51
61 fclk = clk_get(dev, "fck"); 52static struct dev_power_domain default_power_domain = {
62 if (!IS_ERR(fclk)) { 53 .ops = {
63 clk_enable(fclk); 54 .runtime_suspend = omap1_pm_runtime_suspend,
64 clk_put(fclk); 55 .runtime_resume = omap1_pm_runtime_resume,
65 } 56 USE_PLATFORM_PM_SLEEP_OPS
57 },
58};
66 59
67 return pm_generic_runtime_resume(dev); 60static struct pm_clk_notifier_block platform_bus_notifier = {
61 .pwr_domain = &default_power_domain,
62 .con_ids = { "ick", "fck", NULL, },
68}; 63};
69 64
70static int __init omap1_pm_runtime_init(void) 65static int __init omap1_pm_runtime_init(void)
71{ 66{
72 const struct dev_pm_ops *pm;
73 struct dev_pm_ops *omap_pm;
74
75 if (!cpu_class_is_omap1()) 67 if (!cpu_class_is_omap1())
76 return -ENODEV; 68 return -ENODEV;
77 69
78 pm = platform_bus_get_pm_ops(); 70 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
79 if (!pm) {
80 pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
81 __func__);
82 return -ENODEV;
83 }
84
85 omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
86 if (!omap_pm) {
87 pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
88 __func__);
89 return -ENOMEM;
90 }
91
92 omap_pm->runtime_suspend = omap1_pm_runtime_suspend;
93 omap_pm->runtime_resume = omap1_pm_runtime_resume;
94
95 platform_bus_set_pm_ops(omap_pm);
96 71
97 return 0; 72 return 0;
98} 73}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 512b15204450..66dfbccacd25 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -59,10 +59,10 @@ endif
59# Power Management 59# Power Management
60ifeq ($(CONFIG_PM),y) 60ifeq ($(CONFIG_PM),y)
61obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o 61obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
62obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o 62obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
63obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ 63obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
64 cpuidle34xx.o pm_bus.o 64 cpuidle34xx.o
65obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o 65obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
66obj-$(CONFIG_PM_DEBUG) += pm-debug.o 66obj-$(CONFIG_PM_DEBUG) += pm-debug.o
67obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o 67obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
68obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o 68obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c
deleted file mode 100644
index 5acd2ab298b1..000000000000
--- a/arch/arm/mach-omap2/pm_bus.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Runtime PM support code for OMAP
3 *
4 * Author: Kevin Hilman, Deep Root Systems, LLC
5 *
6 * Copyright (C) 2010 Texas Instruments, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/pm_runtime.h>
16#include <linux/platform_device.h>
17#include <linux/mutex.h>
18
19#include <plat/omap_device.h>
20#include <plat/omap-pm.h>
21
22#ifdef CONFIG_PM_RUNTIME
23static int omap_pm_runtime_suspend(struct device *dev)
24{
25 struct platform_device *pdev = to_platform_device(dev);
26 int r, ret = 0;
27
28 dev_dbg(dev, "%s\n", __func__);
29
30 ret = pm_generic_runtime_suspend(dev);
31
32 if (!ret && dev->parent == &omap_device_parent) {
33 r = omap_device_idle(pdev);
34 WARN_ON(r);
35 }
36
37 return ret;
38};
39
40static int omap_pm_runtime_resume(struct device *dev)
41{
42 struct platform_device *pdev = to_platform_device(dev);
43 int r;
44
45 dev_dbg(dev, "%s\n", __func__);
46
47 if (dev->parent == &omap_device_parent) {
48 r = omap_device_enable(pdev);
49 WARN_ON(r);
50 }
51
52 return pm_generic_runtime_resume(dev);
53};
54#else
55#define omap_pm_runtime_suspend NULL
56#define omap_pm_runtime_resume NULL
57#endif /* CONFIG_PM_RUNTIME */
58
59static int __init omap_pm_runtime_init(void)
60{
61 const struct dev_pm_ops *pm;
62 struct dev_pm_ops *omap_pm;
63
64 pm = platform_bus_get_pm_ops();
65 if (!pm) {
66 pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
67 __func__);
68 return -ENODEV;
69 }
70
71 omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL);
72 if (!omap_pm) {
73 pr_err("%s: unable to alloc memory for new dev_pm_ops\n",
74 __func__);
75 return -ENOMEM;
76 }
77
78 omap_pm->runtime_suspend = omap_pm_runtime_suspend;
79 omap_pm->runtime_resume = omap_pm_runtime_resume;
80
81 platform_bus_set_pm_ops(omap_pm);
82
83 return 0;
84}
85core_initcall(omap_pm_runtime_init);
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 94912d3944d3..2d1b67a59e4a 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -18,152 +18,41 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/sh_clk.h> 19#include <linux/sh_clk.h>
20#include <linux/bitmap.h> 20#include <linux/bitmap.h>
21#include <linux/slab.h>
21 22
22#ifdef CONFIG_PM_RUNTIME 23#ifdef CONFIG_PM_RUNTIME
23#define BIT_ONCE 0
24#define BIT_ACTIVE 1
25#define BIT_CLK_ENABLED 2
26 24
27struct pm_runtime_data { 25static int default_platform_runtime_idle(struct device *dev)
28 unsigned long flags;
29 struct clk *clk;
30};
31
32static void __devres_release(struct device *dev, void *res)
33{
34 struct pm_runtime_data *prd = res;
35
36 dev_dbg(dev, "__devres_release()\n");
37
38 if (test_bit(BIT_CLK_ENABLED, &prd->flags))
39 clk_disable(prd->clk);
40
41 if (test_bit(BIT_ACTIVE, &prd->flags))
42 clk_put(prd->clk);
43}
44
45static struct pm_runtime_data *__to_prd(struct device *dev)
46{
47 return devres_find(dev, __devres_release, NULL, NULL);
48}
49
50static void platform_pm_runtime_init(struct device *dev,
51 struct pm_runtime_data *prd)
52{
53 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
54 prd->clk = clk_get(dev, NULL);
55 if (!IS_ERR(prd->clk)) {
56 set_bit(BIT_ACTIVE, &prd->flags);
57 dev_info(dev, "clocks managed by runtime pm\n");
58 }
59 }
60}
61
62static void platform_pm_runtime_bug(struct device *dev,
63 struct pm_runtime_data *prd)
64{
65 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
66 dev_err(dev, "runtime pm suspend before resume\n");
67}
68
69int platform_pm_runtime_suspend(struct device *dev)
70{
71 struct pm_runtime_data *prd = __to_prd(dev);
72
73 dev_dbg(dev, "platform_pm_runtime_suspend()\n");
74
75 platform_pm_runtime_bug(dev, prd);
76
77 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
78 clk_disable(prd->clk);
79 clear_bit(BIT_CLK_ENABLED, &prd->flags);
80 }
81
82 return 0;
83}
84
85int platform_pm_runtime_resume(struct device *dev)
86{
87 struct pm_runtime_data *prd = __to_prd(dev);
88
89 dev_dbg(dev, "platform_pm_runtime_resume()\n");
90
91 platform_pm_runtime_init(dev, prd);
92
93 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
94 clk_enable(prd->clk);
95 set_bit(BIT_CLK_ENABLED, &prd->flags);
96 }
97
98 return 0;
99}
100
101int platform_pm_runtime_idle(struct device *dev)
102{ 26{
103 /* suspend synchronously to disable clocks immediately */ 27 /* suspend synchronously to disable clocks immediately */
104 return pm_runtime_suspend(dev); 28 return pm_runtime_suspend(dev);
105} 29}
106 30
107static int platform_bus_notify(struct notifier_block *nb, 31static struct dev_power_domain default_power_domain = {
108 unsigned long action, void *data) 32 .ops = {
109{ 33 .runtime_suspend = pm_runtime_clk_suspend,
110 struct device *dev = data; 34 .runtime_resume = pm_runtime_clk_resume,
111 struct pm_runtime_data *prd; 35 .runtime_idle = default_platform_runtime_idle,
112 36 USE_PLATFORM_PM_SLEEP_OPS
113 dev_dbg(dev, "platform_bus_notify() %ld !\n", action); 37 },
114 38};
115 if (action == BUS_NOTIFY_BIND_DRIVER) {
116 prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
117 if (prd)
118 devres_add(dev, prd);
119 else
120 dev_err(dev, "unable to alloc memory for runtime pm\n");
121 }
122
123 return 0;
124}
125
126#else /* CONFIG_PM_RUNTIME */
127
128static int platform_bus_notify(struct notifier_block *nb,
129 unsigned long action, void *data)
130{
131 struct device *dev = data;
132 struct clk *clk;
133 39
134 dev_dbg(dev, "platform_bus_notify() %ld !\n", action); 40#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain)
135 41
136 switch (action) { 42#else
137 case BUS_NOTIFY_BIND_DRIVER:
138 clk = clk_get(dev, NULL);
139 if (!IS_ERR(clk)) {
140 clk_enable(clk);
141 clk_put(clk);
142 dev_info(dev, "runtime pm disabled, clock forced on\n");
143 }
144 break;
145 case BUS_NOTIFY_UNBOUND_DRIVER:
146 clk = clk_get(dev, NULL);
147 if (!IS_ERR(clk)) {
148 clk_disable(clk);
149 clk_put(clk);
150 dev_info(dev, "runtime pm disabled, clock forced off\n");
151 }
152 break;
153 }
154 43
155 return 0; 44#define DEFAULT_PWR_DOMAIN_PTR NULL
156}
157 45
158#endif /* CONFIG_PM_RUNTIME */ 46#endif /* CONFIG_PM_RUNTIME */
159 47
160static struct notifier_block platform_bus_notifier = { 48static struct pm_clk_notifier_block platform_bus_notifier = {
161 .notifier_call = platform_bus_notify 49 .pwr_domain = DEFAULT_PWR_DOMAIN_PTR,
50 .con_ids = { NULL, },
162}; 51};
163 52
164static int __init sh_pm_runtime_init(void) 53static int __init sh_pm_runtime_init(void)
165{ 54{
166 bus_register_notifier(&platform_bus_type, &platform_bus_notifier); 55 pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
167 return 0; 56 return 0;
168} 57}
169core_initcall(sh_pm_runtime_init); 58core_initcall(sh_pm_runtime_init);
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 9bbda9acb73b..a37b8eb65b76 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -536,6 +536,28 @@ int omap_early_device_register(struct omap_device *od)
536 return 0; 536 return 0;
537} 537}
538 538
539static int _od_runtime_suspend(struct device *dev)
540{
541 struct platform_device *pdev = to_platform_device(dev);
542
543 return omap_device_idle(pdev);
544}
545
546static int _od_runtime_resume(struct device *dev)
547{
548 struct platform_device *pdev = to_platform_device(dev);
549
550 return omap_device_enable(pdev);
551}
552
553static struct dev_power_domain omap_device_power_domain = {
554 .ops = {
555 .runtime_suspend = _od_runtime_suspend,
556 .runtime_resume = _od_runtime_resume,
557 USE_PLATFORM_PM_SLEEP_OPS
558 }
559};
560
539/** 561/**
540 * omap_device_register - register an omap_device with one omap_hwmod 562 * omap_device_register - register an omap_device with one omap_hwmod
541 * @od: struct omap_device * to register 563 * @od: struct omap_device * to register
@@ -549,6 +571,7 @@ int omap_device_register(struct omap_device *od)
549 pr_debug("omap_device: %s: registering\n", od->pdev.name); 571 pr_debug("omap_device: %s: registering\n", od->pdev.name);
550 572
551 od->pdev.dev.parent = &omap_device_parent; 573 od->pdev.dev.parent = &omap_device_parent;
574 od->pdev.dev.pwr_domain = &omap_device_power_domain;
552 return platform_device_register(&od->pdev); 575 return platform_device_register(&od->pdev);
553} 576}
554 577
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 6dcb8166a64d..22db127afa7b 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void)
139 queue_work(pm_wq, &hwblk_work); 139 queue_work(pm_wq, &hwblk_work);
140} 140}
141 141
142int platform_pm_runtime_suspend(struct device *dev) 142static int default_platform_runtime_suspend(struct device *dev)
143{ 143{
144 struct platform_device *pdev = to_platform_device(dev); 144 struct platform_device *pdev = to_platform_device(dev);
145 struct pdev_archdata *ad = &pdev->archdata; 145 struct pdev_archdata *ad = &pdev->archdata;
@@ -147,7 +147,7 @@ int platform_pm_runtime_suspend(struct device *dev)
147 int hwblk = ad->hwblk_id; 147 int hwblk = ad->hwblk_id;
148 int ret = 0; 148 int ret = 0;
149 149
150 dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); 150 dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
151 151
152 /* ignore off-chip platform devices */ 152 /* ignore off-chip platform devices */
153 if (!hwblk) 153 if (!hwblk)
@@ -183,20 +183,20 @@ int platform_pm_runtime_suspend(struct device *dev)
183 mutex_unlock(&ad->mutex); 183 mutex_unlock(&ad->mutex);
184 184
185out: 185out:
186 dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", 186 dev_dbg(dev, "%s() [%d] returns %d\n",
187 hwblk, ret); 187 __func__, hwblk, ret);
188 188
189 return ret; 189 return ret;
190} 190}
191 191
192int platform_pm_runtime_resume(struct device *dev) 192static int default_platform_runtime_resume(struct device *dev)
193{ 193{
194 struct platform_device *pdev = to_platform_device(dev); 194 struct platform_device *pdev = to_platform_device(dev);
195 struct pdev_archdata *ad = &pdev->archdata; 195 struct pdev_archdata *ad = &pdev->archdata;
196 int hwblk = ad->hwblk_id; 196 int hwblk = ad->hwblk_id;
197 int ret = 0; 197 int ret = 0;
198 198
199 dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); 199 dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
200 200
201 /* ignore off-chip platform devices */ 201 /* ignore off-chip platform devices */
202 if (!hwblk) 202 if (!hwblk)
@@ -228,19 +228,19 @@ int platform_pm_runtime_resume(struct device *dev)
228 */ 228 */
229 mutex_unlock(&ad->mutex); 229 mutex_unlock(&ad->mutex);
230out: 230out:
231 dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", 231 dev_dbg(dev, "%s() [%d] returns %d\n",
232 hwblk, ret); 232 __func__, hwblk, ret);
233 233
234 return ret; 234 return ret;
235} 235}
236 236
237int platform_pm_runtime_idle(struct device *dev) 237static int default_platform_runtime_idle(struct device *dev)
238{ 238{
239 struct platform_device *pdev = to_platform_device(dev); 239 struct platform_device *pdev = to_platform_device(dev);
240 int hwblk = pdev->archdata.hwblk_id; 240 int hwblk = pdev->archdata.hwblk_id;
241 int ret = 0; 241 int ret = 0;
242 242
243 dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); 243 dev_dbg(dev, "%s() [%d]\n", __func__, hwblk);
244 244
245 /* ignore off-chip platform devices */ 245 /* ignore off-chip platform devices */
246 if (!hwblk) 246 if (!hwblk)
@@ -252,10 +252,19 @@ int platform_pm_runtime_idle(struct device *dev)
252 /* suspend synchronously to disable clocks immediately */ 252 /* suspend synchronously to disable clocks immediately */
253 ret = pm_runtime_suspend(dev); 253 ret = pm_runtime_suspend(dev);
254out: 254out:
255 dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); 255 dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk);
256 return ret; 256 return ret;
257} 257}
258 258
259static struct dev_power_domain default_power_domain = {
260 .ops = {
261 .runtime_suspend = default_platform_runtime_suspend,
262 .runtime_resume = default_platform_runtime_resume,
263 .runtime_idle = default_platform_runtime_idle,
264 USE_PLATFORM_PM_SLEEP_OPS
265 },
266};
267
259static int platform_bus_notify(struct notifier_block *nb, 268static int platform_bus_notify(struct notifier_block *nb,
260 unsigned long action, void *data) 269 unsigned long action, void *data)
261{ 270{
@@ -276,6 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
276 hwblk_disable(hwblk_info, hwblk); 285 hwblk_disable(hwblk_info, hwblk);
277 /* make sure driver re-inits itself once */ 286 /* make sure driver re-inits itself once */
278 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 287 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
288 dev->pwr_domain = &default_power_domain;
279 break; 289 break;
280 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ 290 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
281 case BUS_NOTIFY_BOUND_DRIVER: 291 case BUS_NOTIFY_BOUND_DRIVER:
@@ -289,6 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
289 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 299 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
290 break; 300 break;
291 case BUS_NOTIFY_DEL_DEVICE: 301 case BUS_NOTIFY_DEL_DEVICE:
302 dev->pwr_domain = NULL;
292 break; 303 break;
293 } 304 }
294 return 0; 305 return 0;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e0e4fc24c46..48425f183029 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev)
667 return ret; 667 return ret;
668} 668}
669 669
670static int platform_pm_prepare(struct device *dev) 670int platform_pm_prepare(struct device *dev)
671{ 671{
672 struct device_driver *drv = dev->driver; 672 struct device_driver *drv = dev->driver;
673 int ret = 0; 673 int ret = 0;
@@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev)
678 return ret; 678 return ret;
679} 679}
680 680
681static void platform_pm_complete(struct device *dev) 681void platform_pm_complete(struct device *dev)
682{ 682{
683 struct device_driver *drv = dev->driver; 683 struct device_driver *drv = dev->driver;
684 684
@@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev)
686 drv->pm->complete(dev); 686 drv->pm->complete(dev);
687} 687}
688 688
689#else /* !CONFIG_PM_SLEEP */ 689#endif /* CONFIG_PM_SLEEP */
690
691#define platform_pm_prepare NULL
692#define platform_pm_complete NULL
693
694#endif /* !CONFIG_PM_SLEEP */
695 690
696#ifdef CONFIG_SUSPEND 691#ifdef CONFIG_SUSPEND
697 692
698int __weak platform_pm_suspend(struct device *dev) 693int platform_pm_suspend(struct device *dev)
699{ 694{
700 struct device_driver *drv = dev->driver; 695 struct device_driver *drv = dev->driver;
701 int ret = 0; 696 int ret = 0;
@@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev)
713 return ret; 708 return ret;
714} 709}
715 710
716int __weak platform_pm_suspend_noirq(struct device *dev) 711int platform_pm_suspend_noirq(struct device *dev)
717{ 712{
718 struct device_driver *drv = dev->driver; 713 struct device_driver *drv = dev->driver;
719 int ret = 0; 714 int ret = 0;
@@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev)
729 return ret; 724 return ret;
730} 725}
731 726
732int __weak platform_pm_resume(struct device *dev) 727int platform_pm_resume(struct device *dev)
733{ 728{
734 struct device_driver *drv = dev->driver; 729 struct device_driver *drv = dev->driver;
735 int ret = 0; 730 int ret = 0;
@@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev)
747 return ret; 742 return ret;
748} 743}
749 744
750int __weak platform_pm_resume_noirq(struct device *dev) 745int platform_pm_resume_noirq(struct device *dev)
751{ 746{
752 struct device_driver *drv = dev->driver; 747 struct device_driver *drv = dev->driver;
753 int ret = 0; 748 int ret = 0;
@@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev)
763 return ret; 758 return ret;
764} 759}
765 760
766#else /* !CONFIG_SUSPEND */ 761#endif /* CONFIG_SUSPEND */
767
768#define platform_pm_suspend NULL
769#define platform_pm_resume NULL
770#define platform_pm_suspend_noirq NULL
771#define platform_pm_resume_noirq NULL
772
773#endif /* !CONFIG_SUSPEND */
774 762
775#ifdef CONFIG_HIBERNATE_CALLBACKS 763#ifdef CONFIG_HIBERNATE_CALLBACKS
776 764
777static int platform_pm_freeze(struct device *dev) 765int platform_pm_freeze(struct device *dev)
778{ 766{
779 struct device_driver *drv = dev->driver; 767 struct device_driver *drv = dev->driver;
780 int ret = 0; 768 int ret = 0;
@@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev)
792 return ret; 780 return ret;
793} 781}
794 782
795static int platform_pm_freeze_noirq(struct device *dev) 783int platform_pm_freeze_noirq(struct device *dev)
796{ 784{
797 struct device_driver *drv = dev->driver; 785 struct device_driver *drv = dev->driver;
798 int ret = 0; 786 int ret = 0;
@@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev)
808 return ret; 796 return ret;
809} 797}
810 798
811static int platform_pm_thaw(struct device *dev) 799int platform_pm_thaw(struct device *dev)
812{ 800{
813 struct device_driver *drv = dev->driver; 801 struct device_driver *drv = dev->driver;
814 int ret = 0; 802 int ret = 0;
@@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev)
826 return ret; 814 return ret;
827} 815}
828 816
829static int platform_pm_thaw_noirq(struct device *dev) 817int platform_pm_thaw_noirq(struct device *dev)
830{ 818{
831 struct device_driver *drv = dev->driver; 819 struct device_driver *drv = dev->driver;
832 int ret = 0; 820 int ret = 0;
@@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev)
842 return ret; 830 return ret;
843} 831}
844 832
845static int platform_pm_poweroff(struct device *dev) 833int platform_pm_poweroff(struct device *dev)
846{ 834{
847 struct device_driver *drv = dev->driver; 835 struct device_driver *drv = dev->driver;
848 int ret = 0; 836 int ret = 0;
@@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev)
860 return ret; 848 return ret;
861} 849}
862 850
863static int platform_pm_poweroff_noirq(struct device *dev) 851int platform_pm_poweroff_noirq(struct device *dev)
864{ 852{
865 struct device_driver *drv = dev->driver; 853 struct device_driver *drv = dev->driver;
866 int ret = 0; 854 int ret = 0;
@@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev)
876 return ret; 864 return ret;
877} 865}
878 866
879static int platform_pm_restore(struct device *dev) 867int platform_pm_restore(struct device *dev)
880{ 868{
881 struct device_driver *drv = dev->driver; 869 struct device_driver *drv = dev->driver;
882 int ret = 0; 870 int ret = 0;
@@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev)
894 return ret; 882 return ret;
895} 883}
896 884
897static int platform_pm_restore_noirq(struct device *dev) 885int platform_pm_restore_noirq(struct device *dev)
898{ 886{
899 struct device_driver *drv = dev->driver; 887 struct device_driver *drv = dev->driver;
900 int ret = 0; 888 int ret = 0;
@@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev)
910 return ret; 898 return ret;
911} 899}
912 900
913#else /* !CONFIG_HIBERNATE_CALLBACKS */ 901#endif /* CONFIG_HIBERNATE_CALLBACKS */
914
915#define platform_pm_freeze NULL
916#define platform_pm_thaw NULL
917#define platform_pm_poweroff NULL
918#define platform_pm_restore NULL
919#define platform_pm_freeze_noirq NULL
920#define platform_pm_thaw_noirq NULL
921#define platform_pm_poweroff_noirq NULL
922#define platform_pm_restore_noirq NULL
923
924#endif /* !CONFIG_HIBERNATE_CALLBACKS */
925
926#ifdef CONFIG_PM_RUNTIME
927
928int __weak platform_pm_runtime_suspend(struct device *dev)
929{
930 return pm_generic_runtime_suspend(dev);
931};
932
933int __weak platform_pm_runtime_resume(struct device *dev)
934{
935 return pm_generic_runtime_resume(dev);
936};
937
938int __weak platform_pm_runtime_idle(struct device *dev)
939{
940 return pm_generic_runtime_idle(dev);
941};
942
943#else /* !CONFIG_PM_RUNTIME */
944
945#define platform_pm_runtime_suspend NULL
946#define platform_pm_runtime_resume NULL
947#define platform_pm_runtime_idle NULL
948
949#endif /* !CONFIG_PM_RUNTIME */
950 902
951static const struct dev_pm_ops platform_dev_pm_ops = { 903static const struct dev_pm_ops platform_dev_pm_ops = {
952 .prepare = platform_pm_prepare, 904 .runtime_suspend = pm_generic_runtime_suspend,
953 .complete = platform_pm_complete, 905 .runtime_resume = pm_generic_runtime_resume,
954 .suspend = platform_pm_suspend, 906 .runtime_idle = pm_generic_runtime_idle,
955 .resume = platform_pm_resume, 907 USE_PLATFORM_PM_SLEEP_OPS
956 .freeze = platform_pm_freeze,
957 .thaw = platform_pm_thaw,
958 .poweroff = platform_pm_poweroff,
959 .restore = platform_pm_restore,
960 .suspend_noirq = platform_pm_suspend_noirq,
961 .resume_noirq = platform_pm_resume_noirq,
962 .freeze_noirq = platform_pm_freeze_noirq,
963 .thaw_noirq = platform_pm_thaw_noirq,
964 .poweroff_noirq = platform_pm_poweroff_noirq,
965 .restore_noirq = platform_pm_restore_noirq,
966 .runtime_suspend = platform_pm_runtime_suspend,
967 .runtime_resume = platform_pm_runtime_resume,
968 .runtime_idle = platform_pm_runtime_idle,
969}; 908};
970 909
971struct bus_type platform_bus_type = { 910struct bus_type platform_bus_type = {
@@ -977,41 +916,6 @@ struct bus_type platform_bus_type = {
977}; 916};
978EXPORT_SYMBOL_GPL(platform_bus_type); 917EXPORT_SYMBOL_GPL(platform_bus_type);
979 918
980/**
981 * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops
982 *
983 * This function can be used by platform code to get the current
984 * set of dev_pm_ops functions used by the platform_bus_type.
985 */
986const struct dev_pm_ops * __init platform_bus_get_pm_ops(void)
987{
988 return platform_bus_type.pm;
989}
990
991/**
992 * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type
993 *
994 * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type
995 *
996 * Platform code can override the dev_pm_ops methods of
997 * platform_bus_type by using this function. It is expected that
998 * platform code will first do a platform_bus_get_pm_ops(), then
999 * kmemdup it, then customize selected methods and pass a pointer to
1000 * the new struct dev_pm_ops to this function.
1001 *
1002 * Since platform-specific code is customizing methods for *all*
1003 * devices (not just platform-specific devices) it is expected that
1004 * any custom overrides of these functions will keep existing behavior
1005 * and simply extend it. For example, any customization of the
1006 * runtime PM methods should continue to call the pm_generic_*
1007 * functions as the default ones do in addition to the
1008 * platform-specific behavior.
1009 */
1010void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm)
1011{
1012 platform_bus_type.pm = pm;
1013}
1014
1015int __init platform_bus_init(void) 919int __init platform_bus_init(void)
1016{ 920{
1017 int error; 921 int error;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 118c1b92a511..06a7073f9027 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o
6 7
7ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
8ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG 9ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000000..c0dd09df7be8
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,431 @@
1/*
2 * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
3 *
4 * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <linux/pm_runtime.h>
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17
18#ifdef CONFIG_PM_RUNTIME
19
20struct pm_runtime_clk_data {
21 struct list_head clock_list;
22 struct mutex lock;
23};
24
25enum pce_status {
26 PCE_STATUS_NONE = 0,
27 PCE_STATUS_ACQUIRED,
28 PCE_STATUS_ENABLED,
29 PCE_STATUS_ERROR,
30};
31
32struct pm_clock_entry {
33 struct list_head node;
34 char *con_id;
35 struct clk *clk;
36 enum pce_status status;
37};
38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev)
40{
41 return dev ? dev->power.subsys_data : NULL;
42}
43
44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM.
46 * @dev: Device whose clock is going to be used for runtime PM.
47 * @con_id: Connection ID of the clock.
48 *
49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev.
51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id)
53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev);
55 struct pm_clock_entry *ce;
56
57 if (!prd)
58 return -EINVAL;
59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
61 if (!ce) {
62 dev_err(dev, "Not enough memory for clock entry.\n");
63 return -ENOMEM;
64 }
65
66 if (con_id) {
67 ce->con_id = kstrdup(con_id, GFP_KERNEL);
68 if (!ce->con_id) {
69 dev_err(dev,
70 "Not enough memory for clock connection ID.\n");
71 kfree(ce);
72 return -ENOMEM;
73 }
74 }
75
76 mutex_lock(&prd->lock);
77 list_add_tail(&ce->node, &prd->clock_list);
78 mutex_unlock(&prd->lock);
79 return 0;
80}
81
82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
84 * @ce: Runtime PM clock entry to destroy.
85 *
86 * This routine must be called under the mutex protecting the runtime PM list
87 * of clocks corresponding the the @ce's device.
88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
90{
91 if (!ce)
92 return;
93
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk);
99
100 if (ce->status >= PCE_STATUS_ACQUIRED)
101 clk_put(ce->clk);
102 }
103
104 if (ce->con_id)
105 kfree(ce->con_id);
106
107 kfree(ce);
108}
109
110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
112 * @dev: Device whose clock should not be used for runtime PM any more.
113 * @con_id: Connection ID of the clock.
114 *
115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev.
117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id)
119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev);
121 struct pm_clock_entry *ce;
122
123 if (!prd)
124 return;
125
126 mutex_lock(&prd->lock);
127
128 list_for_each_entry(ce, &prd->clock_list, node) {
129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce);
131 break;
132 } else if (!con_id || !ce->con_id) {
133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce);
136 break;
137 }
138 }
139
140 mutex_unlock(&prd->lock);
141}
142
143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for.
146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it.
149 */
150int pm_runtime_clk_init(struct device *dev)
151{
152 struct pm_runtime_clk_data *prd;
153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL);
155 if (!prd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n");
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&prd->clock_list);
161 mutex_init(&prd->lock);
162 dev->power.subsys_data = prd;
163 return 0;
164}
165
166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for.
169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free
172 * that object.
173 */
174void pm_runtime_clk_destroy(struct device *dev)
175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev);
177 struct pm_clock_entry *ce, *c;
178
179 if (!prd)
180 return;
181
182 dev->power.subsys_data = NULL;
183
184 mutex_lock(&prd->lock);
185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
187 __pm_runtime_clk_remove(ce);
188
189 mutex_unlock(&prd->lock);
190
191 kfree(prd);
192}
193
194/**
195 * pm_runtime_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock.
198 */
199static void pm_runtime_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce)
201{
202 ce->clk = clk_get(dev, ce->con_id);
203 if (IS_ERR(ce->clk)) {
204 ce->status = PCE_STATUS_ERROR;
205 } else {
206 ce->status = PCE_STATUS_ACQUIRED;
207 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
208 }
209}
210
211/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
213 * @dev: Device to disable the clocks for.
214 */
215int pm_runtime_clk_suspend(struct device *dev)
216{
217 struct pm_runtime_clk_data *prd = __to_prd(dev);
218 struct pm_clock_entry *ce;
219
220 dev_dbg(dev, "%s()\n", __func__);
221
222 if (!prd)
223 return 0;
224
225 mutex_lock(&prd->lock);
226
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce);
230
231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED;
234 }
235 }
236
237 mutex_unlock(&prd->lock);
238
239 return 0;
240}
241
242/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
244 * @dev: Device to enable the clocks for.
245 */
246int pm_runtime_clk_resume(struct device *dev)
247{
248 struct pm_runtime_clk_data *prd = __to_prd(dev);
249 struct pm_clock_entry *ce;
250
251 dev_dbg(dev, "%s()\n", __func__);
252
253 if (!prd)
254 return 0;
255
256 mutex_lock(&prd->lock);
257
258 list_for_each_entry(ce, &prd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce);
261
262 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk);
264 ce->status = PCE_STATUS_ENABLED;
265 }
266 }
267
268 mutex_unlock(&prd->lock);
269
270 return 0;
271}
272
273/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for.
278 *
279 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action.
284 *
285 * If the device's pwr_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing.
288 */
289static int pm_runtime_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data)
291{
292 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data;
294 char *con_id;
295 int error;
296
297 dev_dbg(dev, "%s() %ld\n", __func__, action);
298
299 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
300
301 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain)
304 break;
305
306 error = pm_runtime_clk_init(dev);
307 if (error)
308 break;
309
310 dev->pwr_domain = clknb->pwr_domain;
311 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id);
314 } else {
315 pm_runtime_clk_add(dev, NULL);
316 }
317
318 break;
319 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain)
321 break;
322
323 dev->pwr_domain = NULL;
324 pm_runtime_clk_destroy(dev);
325 break;
326 }
327
328 return 0;
329}
330
331#else /* !CONFIG_PM_RUNTIME */
332
333/**
334 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled.
336 * @con_id: Connection ID of the clock.
337 */
338static void enable_clock(struct device *dev, const char *con_id)
339{
340 struct clk *clk;
341
342 clk = clk_get(dev, con_id);
343 if (!IS_ERR(clk)) {
344 clk_enable(clk);
345 clk_put(clk);
346 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
347 }
348}
349
350/**
351 * disable_clock - Disable a device clock.
352 * @dev: Device whose clock is to be disabled.
353 * @con_id: Connection ID of the clock.
354 */
355static void disable_clock(struct device *dev, const char *con_id)
356{
357 struct clk *clk;
358
359 clk = clk_get(dev, con_id);
360 if (!IS_ERR(clk)) {
361 clk_disable(clk);
362 clk_put(clk);
363 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
364 }
365}
366
367/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for.
372 *
373 * For this function to work, @nb must be a member of an object of type
374 * struct pm_clk_notifier_block containing all of the requisite data.
375 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action.
377 */
378static int pm_runtime_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data)
380{
381 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data;
383 char *con_id;
384
385 dev_dbg(dev, "%s() %ld\n", __func__, action);
386
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388
389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE:
391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
393 enable_clock(dev, con_id);
394 } else {
395 enable_clock(dev, NULL);
396 }
397 break;
398 case BUS_NOTIFY_DEL_DEVICE:
399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++)
401 disable_clock(dev, con_id);
402 } else {
403 disable_clock(dev, NULL);
404 }
405 break;
406 }
407
408 return 0;
409}
410
411#endif /* !CONFIG_PM_RUNTIME */
412
413/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
415 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type.
417 *
418 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this
421 * routine.
422 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb)
425{
426 if (!bus || !clknb)
427 return;
428
429 clknb->nb.notifier_call = pm_runtime_clk_notify;
430 bus_register_notifier(bus, &clknb->nb);
431}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index abe3ab709e87..3b354560f306 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
426 426
427 if (dev->pwr_domain) { 427 if (dev->pwr_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 428 pm_dev_dbg(dev, state, "EARLY power domain ");
429 pm_noirq_op(dev, &dev->pwr_domain->ops, state); 429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 } 430 } else if (dev->type && dev->type->pm) {
431
432 if (dev->type && dev->type->pm) {
433 pm_dev_dbg(dev, state, "EARLY type "); 431 pm_dev_dbg(dev, state, "EARLY type ");
434 error = pm_noirq_op(dev, dev->type->pm, state); 432 error = pm_noirq_op(dev, dev->type->pm, state);
435 } else if (dev->class && dev->class->pm) { 433 } else if (dev->class && dev->class->pm) {
@@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
517 515
518 if (dev->pwr_domain) { 516 if (dev->pwr_domain) {
519 pm_dev_dbg(dev, state, "power domain "); 517 pm_dev_dbg(dev, state, "power domain ");
520 pm_op(dev, &dev->pwr_domain->ops, state); 518 error = pm_op(dev, &dev->pwr_domain->ops, state);
519 goto End;
521 } 520 }
522 521
523 if (dev->type && dev->type->pm) { 522 if (dev->type && dev->type->pm) {
@@ -629,12 +628,11 @@ static void device_complete(struct device *dev, pm_message_t state)
629{ 628{
630 device_lock(dev); 629 device_lock(dev);
631 630
632 if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 631 if (dev->pwr_domain) {
633 pm_dev_dbg(dev, state, "completing power domain "); 632 pm_dev_dbg(dev, state, "completing power domain ");
634 dev->pwr_domain->ops.complete(dev); 633 if (dev->pwr_domain->ops.complete)
635 } 634 dev->pwr_domain->ops.complete(dev);
636 635 } else if (dev->type && dev->type->pm) {
637 if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 636 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 637 if (dev->type->pm->complete)
640 dev->type->pm->complete(dev); 638 dev->type->pm->complete(dev);
@@ -732,7 +730,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
732{ 730{
733 int error; 731 int error;
734 732
735 if (dev->type && dev->type->pm) { 733 if (dev->pwr_domain) {
734 pm_dev_dbg(dev, state, "LATE power domain ");
735 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
736 if (error)
737 return error;
738 } else if (dev->type && dev->type->pm) {
736 pm_dev_dbg(dev, state, "LATE type "); 739 pm_dev_dbg(dev, state, "LATE type ");
737 error = pm_noirq_op(dev, dev->type->pm, state); 740 error = pm_noirq_op(dev, dev->type->pm, state);
738 if (error) 741 if (error)
@@ -749,11 +752,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
749 return error; 752 return error;
750 } 753 }
751 754
752 if (dev->pwr_domain) {
753 pm_dev_dbg(dev, state, "LATE power domain ");
754 pm_noirq_op(dev, &dev->pwr_domain->ops, state);
755 }
756
757 return 0; 755 return 0;
758} 756}
759 757
@@ -841,21 +839,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
841 goto End; 839 goto End;
842 } 840 }
843 841
842 if (dev->pwr_domain) {
843 pm_dev_dbg(dev, state, "power domain ");
844 error = pm_op(dev, &dev->pwr_domain->ops, state);
845 goto End;
846 }
847
844 if (dev->type && dev->type->pm) { 848 if (dev->type && dev->type->pm) {
845 pm_dev_dbg(dev, state, "type "); 849 pm_dev_dbg(dev, state, "type ");
846 error = pm_op(dev, dev->type->pm, state); 850 error = pm_op(dev, dev->type->pm, state);
847 goto Domain; 851 goto End;
848 } 852 }
849 853
850 if (dev->class) { 854 if (dev->class) {
851 if (dev->class->pm) { 855 if (dev->class->pm) {
852 pm_dev_dbg(dev, state, "class "); 856 pm_dev_dbg(dev, state, "class ");
853 error = pm_op(dev, dev->class->pm, state); 857 error = pm_op(dev, dev->class->pm, state);
854 goto Domain; 858 goto End;
855 } else if (dev->class->suspend) { 859 } else if (dev->class->suspend) {
856 pm_dev_dbg(dev, state, "legacy class "); 860 pm_dev_dbg(dev, state, "legacy class ");
857 error = legacy_suspend(dev, state, dev->class->suspend); 861 error = legacy_suspend(dev, state, dev->class->suspend);
858 goto Domain; 862 goto End;
859 } 863 }
860 } 864 }
861 865
@@ -869,12 +873,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
869 } 873 }
870 } 874 }
871 875
872 Domain:
873 if (!error && dev->pwr_domain) {
874 pm_dev_dbg(dev, state, "power domain ");
875 pm_op(dev, &dev->pwr_domain->ops, state);
876 }
877
878 End: 876 End:
879 device_unlock(dev); 877 device_unlock(dev);
880 complete_all(&dev->power.completion); 878 complete_all(&dev->power.completion);
@@ -965,7 +963,14 @@ static int device_prepare(struct device *dev, pm_message_t state)
965 963
966 device_lock(dev); 964 device_lock(dev);
967 965
968 if (dev->type && dev->type->pm) { 966 if (dev->pwr_domain) {
967 pm_dev_dbg(dev, state, "preparing power domain ");
968 if (dev->pwr_domain->ops.prepare)
969 error = dev->pwr_domain->ops.prepare(dev);
970 suspend_report_result(dev->pwr_domain->ops.prepare, error);
971 if (error)
972 goto End;
973 } else if (dev->type && dev->type->pm) {
969 pm_dev_dbg(dev, state, "preparing type "); 974 pm_dev_dbg(dev, state, "preparing type ");
970 if (dev->type->pm->prepare) 975 if (dev->type->pm->prepare)
971 error = dev->type->pm->prepare(dev); 976 error = dev->type->pm->prepare(dev);
@@ -984,13 +989,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
984 if (dev->bus->pm->prepare) 989 if (dev->bus->pm->prepare)
985 error = dev->bus->pm->prepare(dev); 990 error = dev->bus->pm->prepare(dev);
986 suspend_report_result(dev->bus->pm->prepare, error); 991 suspend_report_result(dev->bus->pm->prepare, error);
987 if (error)
988 goto End;
989 }
990
991 if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
992 pm_dev_dbg(dev, state, "preparing power domain ");
993 dev->pwr_domain->ops.prepare(dev);
994 } 992 }
995 993
996 End: 994 End:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3172c60d23a9..0d4587b15c55 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev)
168static int rpm_idle(struct device *dev, int rpmflags) 168static int rpm_idle(struct device *dev, int rpmflags)
169{ 169{
170 int (*callback)(struct device *); 170 int (*callback)(struct device *);
171 int (*domain_callback)(struct device *);
172 int retval; 171 int retval;
173 172
174 retval = rpm_check_suspend_allowed(dev); 173 retval = rpm_check_suspend_allowed(dev);
@@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
214 213
215 dev->power.idle_notification = true; 214 dev->power.idle_notification = true;
216 215
217 if (dev->type && dev->type->pm) 216 if (dev->pwr_domain)
217 callback = dev->pwr_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm)
218 callback = dev->type->pm->runtime_idle; 219 callback = dev->type->pm->runtime_idle;
219 else if (dev->class && dev->class->pm) 220 else if (dev->class && dev->class->pm)
220 callback = dev->class->pm->runtime_idle; 221 callback = dev->class->pm->runtime_idle;
@@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
223 else 224 else
224 callback = NULL; 225 callback = NULL;
225 226
226 if (dev->pwr_domain) 227 if (callback) {
227 domain_callback = dev->pwr_domain->ops.runtime_idle;
228 else
229 domain_callback = NULL;
230
231 if (callback || domain_callback) {
232 spin_unlock_irq(&dev->power.lock); 228 spin_unlock_irq(&dev->power.lock);
233 229
234 if (domain_callback) 230 callback(dev);
235 retval = domain_callback(dev);
236
237 if (!retval && callback)
238 callback(dev);
239 231
240 spin_lock_irq(&dev->power.lock); 232 spin_lock_irq(&dev->power.lock);
241 } 233 }
@@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
382 374
383 __update_runtime_status(dev, RPM_SUSPENDING); 375 __update_runtime_status(dev, RPM_SUSPENDING);
384 376
385 if (dev->type && dev->type->pm) 377 if (dev->pwr_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm)
386 callback = dev->type->pm->runtime_suspend; 380 callback = dev->type->pm->runtime_suspend;
387 else if (dev->class && dev->class->pm) 381 else if (dev->class && dev->class->pm)
388 callback = dev->class->pm->runtime_suspend; 382 callback = dev->class->pm->runtime_suspend;
@@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
400 else 394 else
401 pm_runtime_cancel_pending(dev); 395 pm_runtime_cancel_pending(dev);
402 } else { 396 } else {
403 if (dev->pwr_domain)
404 rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
405 no_callback: 397 no_callback:
406 __update_runtime_status(dev, RPM_SUSPENDED); 398 __update_runtime_status(dev, RPM_SUSPENDED);
407 pm_runtime_deactivate_timer(dev); 399 pm_runtime_deactivate_timer(dev);
@@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
582 __update_runtime_status(dev, RPM_RESUMING); 574 __update_runtime_status(dev, RPM_RESUMING);
583 575
584 if (dev->pwr_domain) 576 if (dev->pwr_domain)
585 rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 577 callback = dev->pwr_domain->ops.runtime_resume;
586 578 else if (dev->type && dev->type->pm)
587 if (dev->type && dev->type->pm)
588 callback = dev->type->pm->runtime_resume; 579 callback = dev->type->pm->runtime_resume;
589 else if (dev->class && dev->class->pm) 580 else if (dev->class && dev->class->pm)
590 callback = dev->class->pm->runtime_resume; 581 callback = dev->class->pm->runtime_resume;
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 744942c95fec..ede1a80e3358 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -150,9 +150,6 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr
150 struct resource *res, unsigned int n_res, 150 struct resource *res, unsigned int n_res,
151 const void *data, size_t size); 151 const void *data, size_t size);
152 152
153extern const struct dev_pm_ops * platform_bus_get_pm_ops(void);
154extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm);
155
156/* early platform driver interface */ 153/* early platform driver interface */
157struct early_platform_driver { 154struct early_platform_driver {
158 const char *class_str; 155 const char *class_str;
@@ -205,4 +202,64 @@ static inline char *early_platform_driver_setup_func(void) \
205} 202}
206#endif /* MODULE */ 203#endif /* MODULE */
207 204
205#ifdef CONFIG_PM_SLEEP
206extern int platform_pm_prepare(struct device *dev);
207extern void platform_pm_complete(struct device *dev);
208#else
209#define platform_pm_prepare NULL
210#define platform_pm_complete NULL
211#endif
212
213#ifdef CONFIG_SUSPEND
214extern int platform_pm_suspend(struct device *dev);
215extern int platform_pm_suspend_noirq(struct device *dev);
216extern int platform_pm_resume(struct device *dev);
217extern int platform_pm_resume_noirq(struct device *dev);
218#else
219#define platform_pm_suspend NULL
220#define platform_pm_resume NULL
221#define platform_pm_suspend_noirq NULL
222#define platform_pm_resume_noirq NULL
223#endif
224
225#ifdef CONFIG_HIBERNATE_CALLBACKS
226extern int platform_pm_freeze(struct device *dev);
227extern int platform_pm_freeze_noirq(struct device *dev);
228extern int platform_pm_thaw(struct device *dev);
229extern int platform_pm_thaw_noirq(struct device *dev);
230extern int platform_pm_poweroff(struct device *dev);
231extern int platform_pm_poweroff_noirq(struct device *dev);
232extern int platform_pm_restore(struct device *dev);
233extern int platform_pm_restore_noirq(struct device *dev);
234#else
235#define platform_pm_freeze NULL
236#define platform_pm_thaw NULL
237#define platform_pm_poweroff NULL
238#define platform_pm_restore NULL
239#define platform_pm_freeze_noirq NULL
240#define platform_pm_thaw_noirq NULL
241#define platform_pm_poweroff_noirq NULL
242#define platform_pm_restore_noirq NULL
243#endif
244
245#ifdef CONFIG_PM_SLEEP
246#define USE_PLATFORM_PM_SLEEP_OPS \
247 .prepare = platform_pm_prepare, \
248 .complete = platform_pm_complete, \
249 .suspend = platform_pm_suspend, \
250 .resume = platform_pm_resume, \
251 .freeze = platform_pm_freeze, \
252 .thaw = platform_pm_thaw, \
253 .poweroff = platform_pm_poweroff, \
254 .restore = platform_pm_restore, \
255 .suspend_noirq = platform_pm_suspend_noirq, \
256 .resume_noirq = platform_pm_resume_noirq, \
257 .freeze_noirq = platform_pm_freeze_noirq, \
258 .thaw_noirq = platform_pm_thaw_noirq, \
259 .poweroff_noirq = platform_pm_poweroff_noirq, \
260 .restore_noirq = platform_pm_restore_noirq,
261#else
262#define USE_PLATFORM_PM_SLEEP_OPS
263#endif
264
208#endif /* _PLATFORM_DEVICE_H_ */ 265#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3c053e2beb84..3cc3e7e589f0 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -460,6 +460,7 @@ struct dev_pm_info {
460 unsigned long active_jiffies; 460 unsigned long active_jiffies;
461 unsigned long suspended_jiffies; 461 unsigned long suspended_jiffies;
462 unsigned long accounting_timestamp; 462 unsigned long accounting_timestamp;
463 void *subsys_data; /* Owned by the subsystem. */
463#endif 464#endif
464}; 465};
465 466
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 8de9aa6e7def..878cf84baeb1 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -245,4 +245,46 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
245 __pm_runtime_use_autosuspend(dev, false); 245 __pm_runtime_use_autosuspend(dev, false);
246} 246}
247 247
248struct pm_clk_notifier_block {
249 struct notifier_block nb;
250 struct dev_power_domain *pwr_domain;
251 char *con_ids[];
252};
253
254#ifdef CONFIG_PM_RUNTIME_CLK
255extern int pm_runtime_clk_init(struct device *dev);
256extern void pm_runtime_clk_destroy(struct device *dev);
257extern int pm_runtime_clk_add(struct device *dev, const char *con_id);
258extern void pm_runtime_clk_remove(struct device *dev, const char *con_id);
259extern int pm_runtime_clk_suspend(struct device *dev);
260extern int pm_runtime_clk_resume(struct device *dev);
261#else
262static inline int pm_runtime_clk_init(struct device *dev)
263{
264 return -EINVAL;
265}
266static inline void pm_runtime_clk_destroy(struct device *dev)
267{
268}
269static inline int pm_runtime_clk_add(struct device *dev, const char *con_id)
270{
271 return -EINVAL;
272}
273static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id)
274{
275}
276#define pm_runtime_clock_suspend NULL
277#define pm_runtime_clock_resume NULL
278#endif
279
280#ifdef CONFIG_HAVE_CLK
281extern void pm_runtime_clk_add_notifier(struct bus_type *bus,
282 struct pm_clk_notifier_block *clknb);
283#else
284static inline void pm_runtime_clk_add_notifier(struct bus_type *bus,
285 struct pm_clk_notifier_block *clknb)
286{
287}
288#endif
289
248#endif 290#endif
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 6de9a8fc3417..d74ad4a90695 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -229,3 +229,7 @@ config PM_OPP
229 representing individual voltage domains and provides SOC 229 representing individual voltage domains and provides SOC
230 implementations a ready to use framework to manage OPPs. 230 implementations a ready to use framework to manage OPPs.
231 For more information, read <file:Documentation/power/opp.txt> 231 For more information, read <file:Documentation/power/opp.txt>
232
233config PM_RUNTIME_CLK
234 def_bool y
235 depends on PM_RUNTIME && HAVE_CLK