aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/shmobile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu/shmobile')
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile2
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c119
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c134
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c308
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S406
5 files changed, 871 insertions, 98 deletions
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
index 08bfa7c7db29..a39f88ea1a85 100644
--- a/arch/sh/kernel/cpu/shmobile/Makefile
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -4,3 +4,5 @@
4 4
5# Power Management & Sleep mode 5# Power Management & Sleep mode
6obj-$(CONFIG_PM) += pm.o sleep.o 6obj-$(CONFIG_PM) += pm.o sleep.o
7obj-$(CONFIG_CPU_IDLE) += cpuidle.o
8obj-$(CONFIG_PM_RUNTIME) += pm_runtime.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 000000000000..83972aa319c2
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,119 @@
1/*
2 * arch/sh/kernel/cpu/shmobile/cpuidle.c
3 *
4 * Cpuidle support code for SuperH Mobile
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/suspend.h>
16#include <linux/cpuidle.h>
17#include <asm/suspend.h>
18#include <asm/uaccess.h>
19#include <asm/hwblk.h>
20
21static unsigned long cpuidle_mode[] = {
22 SUSP_SH_SLEEP, /* regular sleep mode */
23 SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
24 SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */
25};
26
27static int cpuidle_sleep_enter(struct cpuidle_device *dev,
28 struct cpuidle_state *state)
29{
30 unsigned long allowed_mode = arch_hwblk_sleep_mode();
31 ktime_t before, after;
32 int requested_state = state - &dev->states[0];
33 int allowed_state;
34 int k;
35
36 /* convert allowed mode to allowed state */
37 for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
38 if (cpuidle_mode[k] == allowed_mode)
39 break;
40
41 allowed_state = k;
42
43 /* take the following into account for sleep mode selection:
44 * - allowed_state: best mode allowed by hardware (clock deps)
45 * - requested_state: best mode allowed by software (latencies)
46 */
47 k = min_t(int, allowed_state, requested_state);
48
49 dev->last_state = &dev->states[k];
50 before = ktime_get();
51 sh_mobile_call_standby(cpuidle_mode[k]);
52 after = ktime_get();
53 return ktime_to_ns(ktime_sub(after, before)) >> 10;
54}
55
56static struct cpuidle_device cpuidle_dev;
57static struct cpuidle_driver cpuidle_driver = {
58 .name = "sh_idle",
59 .owner = THIS_MODULE,
60};
61
62void sh_mobile_setup_cpuidle(void)
63{
64 struct cpuidle_device *dev = &cpuidle_dev;
65 struct cpuidle_state *state;
66 int i;
67
68 cpuidle_register_driver(&cpuidle_driver);
69
70 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
71 dev->states[i].name[0] = '\0';
72 dev->states[i].desc[0] = '\0';
73 }
74
75 i = CPUIDLE_DRIVER_STATE_START;
76
77 state = &dev->states[i++];
78 snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
79 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
80 state->exit_latency = 1;
81 state->target_residency = 1 * 2;
82 state->power_usage = 3;
83 state->flags = 0;
84 state->flags |= CPUIDLE_FLAG_SHALLOW;
85 state->flags |= CPUIDLE_FLAG_TIME_VALID;
86 state->enter = cpuidle_sleep_enter;
87
88 dev->safe_state = state;
89
90 if (sh_mobile_sleep_supported & SUSP_SH_SF) {
91 state = &dev->states[i++];
92 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
93 strncpy(state->desc, "SuperH Sleep Mode [SF]",
94 CPUIDLE_DESC_LEN);
95 state->exit_latency = 100;
96 state->target_residency = 1 * 2;
97 state->power_usage = 1;
98 state->flags = 0;
99 state->flags |= CPUIDLE_FLAG_TIME_VALID;
100 state->enter = cpuidle_sleep_enter;
101 }
102
103 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
104 state = &dev->states[i++];
105 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
106 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
107 CPUIDLE_DESC_LEN);
108 state->exit_latency = 2300;
109 state->target_residency = 1 * 2;
110 state->power_usage = 1;
111 state->flags = 0;
112 state->flags |= CPUIDLE_FLAG_TIME_VALID;
113 state->enter = cpuidle_sleep_enter;
114 }
115
116 dev->state_count = i;
117
118 cpuidle_register_device(dev);
119}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 8c067adf6830..e55968712706 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c 2 * arch/sh/kernel/cpu/shmobile/pm.c
3 * 3 *
4 * Power management support code for SuperH Mobile 4 * Power management support code for SuperH Mobile
5 * 5 *
@@ -15,6 +15,13 @@
15#include <linux/suspend.h> 15#include <linux/suspend.h>
16#include <asm/suspend.h> 16#include <asm/suspend.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/cacheflush.h>
19
20/*
21 * Notifier lists for pre/post sleep notification
22 */
23ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
24ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
18 25
19/* 26/*
20 * Sleep modes available on SuperH Mobile: 27 * Sleep modes available on SuperH Mobile:
@@ -26,50 +33,106 @@
26#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) 33#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
27#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) 34#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
28#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) 35#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
36#define SUSP_MODE_RSTANDBY_SF \
37 (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
38 /*
39 * U-standby mode is unsupported since it needs bootloader hacks
40 */
29 41
30/* 42#ifdef CONFIG_CPU_SUBTYPE_SH7724
31 * The following modes are not there yet: 43#define RAM_BASE 0xfd800000 /* RSMEM */
32 * 44#else
33 * R-standby mode is unsupported, but will be added in the future 45#define RAM_BASE 0xe5200000 /* ILRAM */
34 * U-standby mode is low priority since it needs bootloader hacks 46#endif
35 *
36 * All modes should be tied in with cpuidle. But before that can
37 * happen we need to keep track of enabled hardware blocks so we
38 * can avoid entering sleep modes that stop clocks to hardware
39 * blocks that are in use even though the cpu core is idle.
40 */
41
42extern const unsigned char sh_mobile_standby[];
43extern const unsigned int sh_mobile_standby_size;
44 47
45static void sh_mobile_call_standby(unsigned long mode) 48void sh_mobile_call_standby(unsigned long mode)
46{ 49{
47 extern void *vbr_base; 50 void *onchip_mem = (void *)RAM_BASE;
48 void *onchip_mem = (void *)0xe5200000; /* ILRAM */ 51 struct sh_sleep_data *sdp = onchip_mem;
49 void (*standby_onchip_mem)(unsigned long) = onchip_mem; 52 void (*standby_onchip_mem)(unsigned long, unsigned long);
50 53
51 /* Note: Wake up from sleep may generate exceptions! 54 /* code located directly after data structure */
52 * Setup VBR to point to on-chip ram if self-refresh is 55 standby_onchip_mem = (void *)(sdp + 1);
53 * going to be used. 56
54 */ 57 atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
55 if (mode & SUSP_SH_SF) 58 mode, NULL);
56 asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); 59
57 60 /* flush the caches if MMU flag is set */
58 /* Copy the assembly snippet to the otherwise ununsed ILRAM */ 61 if (mode & SUSP_SH_MMU)
59 memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); 62 flush_cache_all();
60 wmb();
61 ctrl_barrier();
62 63
63 /* Let assembly snippet in on-chip memory handle the rest */ 64 /* Let assembly snippet in on-chip memory handle the rest */
64 standby_onchip_mem(mode); 65 standby_onchip_mem(mode, RAM_BASE);
66
67 atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
68 mode, NULL);
69}
70
71extern char sh_mobile_sleep_enter_start;
72extern char sh_mobile_sleep_enter_end;
73
74extern char sh_mobile_sleep_resume_start;
75extern char sh_mobile_sleep_resume_end;
76
77unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
78
79void sh_mobile_register_self_refresh(unsigned long flags,
80 void *pre_start, void *pre_end,
81 void *post_start, void *post_end)
82{
83 void *onchip_mem = (void *)RAM_BASE;
84 void *vp;
85 struct sh_sleep_data *sdp;
86 int n;
65 87
66 /* Put VBR back in System RAM again */ 88 /* part 0: data area */
67 if (mode & SUSP_SH_SF) 89 sdp = onchip_mem;
68 asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); 90 sdp->addr.stbcr = 0xa4150020; /* STBCR */
91 sdp->addr.bar = 0xa4150040; /* BAR */
92 sdp->addr.pteh = 0xff000000; /* PTEH */
93 sdp->addr.ptel = 0xff000004; /* PTEL */
94 sdp->addr.ttb = 0xff000008; /* TTB */
95 sdp->addr.tea = 0xff00000c; /* TEA */
96 sdp->addr.mmucr = 0xff000010; /* MMUCR */
97 sdp->addr.ptea = 0xff000034; /* PTEA */
98 sdp->addr.pascr = 0xff000070; /* PASCR */
99 sdp->addr.irmcr = 0xff000078; /* IRMCR */
100 sdp->addr.ccr = 0xff00001c; /* CCR */
101 sdp->addr.ramcr = 0xff000074; /* RAMCR */
102 vp = sdp + 1;
103
104 /* part 1: common code to enter sleep mode */
105 n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
106 memcpy(vp, &sh_mobile_sleep_enter_start, n);
107 vp += roundup(n, 4);
108
109 /* part 2: board specific code to enter self-refresh mode */
110 n = pre_end - pre_start;
111 memcpy(vp, pre_start, n);
112 sdp->sf_pre = (unsigned long)vp;
113 vp += roundup(n, 4);
114
115 /* part 3: board specific code to resume from self-refresh mode */
116 n = post_end - post_start;
117 memcpy(vp, post_start, n);
118 sdp->sf_post = (unsigned long)vp;
119 vp += roundup(n, 4);
120
121 /* part 4: common code to resume from sleep mode */
122 WARN_ON(vp > (onchip_mem + 0x600));
123 vp = onchip_mem + 0x600; /* located at interrupt vector */
124 n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
125 memcpy(vp, &sh_mobile_sleep_resume_start, n);
126 sdp->resume = (unsigned long)vp;
127
128 sh_mobile_sleep_supported |= flags;
69} 129}
70 130
71static int sh_pm_enter(suspend_state_t state) 131static int sh_pm_enter(suspend_state_t state)
72{ 132{
133 if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
134 return -ENXIO;
135
73 local_irq_disable(); 136 local_irq_disable();
74 set_bl_bit(); 137 set_bl_bit();
75 sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); 138 sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
@@ -86,6 +149,7 @@ static struct platform_suspend_ops sh_pm_ops = {
86static int __init sh_pm_init(void) 149static int __init sh_pm_init(void)
87{ 150{
88 suspend_set_ops(&sh_pm_ops); 151 suspend_set_ops(&sh_pm_ops);
152 sh_mobile_setup_cpuidle();
89 return 0; 153 return 0;
90} 154}
91 155
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
new file mode 100644
index 000000000000..6dcb8166a64d
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -0,0 +1,308 @@
1/*
2 * arch/sh/kernel/cpu/shmobile/pm_runtime.c
3 *
4 * Runtime PM support code for SuperH Mobile
5 *
6 * Copyright (C) 2009 Magnus Damm
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/io.h>
15#include <linux/pm_runtime.h>
16#include <linux/platform_device.h>
17#include <linux/mutex.h>
18#include <asm/hwblk.h>
19
20static DEFINE_SPINLOCK(hwblk_lock);
21static LIST_HEAD(hwblk_idle_list);
22static struct work_struct hwblk_work;
23
24extern struct hwblk_info *hwblk_info;
25
26static void platform_pm_runtime_not_idle(struct platform_device *pdev)
27{
28 unsigned long flags;
29
30 /* remove device from idle list */
31 spin_lock_irqsave(&hwblk_lock, flags);
32 if (test_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags)) {
33 list_del(&pdev->archdata.entry);
34 __clear_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
35 }
36 spin_unlock_irqrestore(&hwblk_lock, flags);
37}
38
39static int __platform_pm_runtime_resume(struct platform_device *pdev)
40{
41 struct device *d = &pdev->dev;
42 struct pdev_archdata *ad = &pdev->archdata;
43 int hwblk = ad->hwblk_id;
44 int ret = -ENOSYS;
45
46 dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk);
47
48 if (d->driver) {
49 hwblk_enable(hwblk_info, hwblk);
50 ret = 0;
51
52 if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) {
53 if (d->driver->pm && d->driver->pm->runtime_resume)
54 ret = d->driver->pm->runtime_resume(d);
55
56 if (!ret)
57 clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
58 else
59 hwblk_disable(hwblk_info, hwblk);
60 }
61 }
62
63 dev_dbg(d, "__platform_pm_runtime_resume() [%d] - returns %d\n",
64 hwblk, ret);
65
66 return ret;
67}
68
69static int __platform_pm_runtime_suspend(struct platform_device *pdev)
70{
71 struct device *d = &pdev->dev;
72 struct pdev_archdata *ad = &pdev->archdata;
73 int hwblk = ad->hwblk_id;
74 int ret = -ENOSYS;
75
76 dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk);
77
78 if (d->driver) {
79 BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags));
80 ret = 0;
81
82 if (d->driver->pm && d->driver->pm->runtime_suspend) {
83 hwblk_enable(hwblk_info, hwblk);
84 ret = d->driver->pm->runtime_suspend(d);
85 hwblk_disable(hwblk_info, hwblk);
86 }
87
88 if (!ret) {
89 set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
90 platform_pm_runtime_not_idle(pdev);
91 hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE);
92 }
93 }
94
95 dev_dbg(d, "__platform_pm_runtime_suspend() [%d] - returns %d\n",
96 hwblk, ret);
97
98 return ret;
99}
100
101static void platform_pm_runtime_work(struct work_struct *work)
102{
103 struct platform_device *pdev;
104 unsigned long flags;
105 int ret;
106
107 /* go through the idle list and suspend one device at a time */
108 do {
109 spin_lock_irqsave(&hwblk_lock, flags);
110 if (list_empty(&hwblk_idle_list))
111 pdev = NULL;
112 else
113 pdev = list_first_entry(&hwblk_idle_list,
114 struct platform_device,
115 archdata.entry);
116 spin_unlock_irqrestore(&hwblk_lock, flags);
117
118 if (pdev) {
119 mutex_lock(&pdev->archdata.mutex);
120 ret = __platform_pm_runtime_suspend(pdev);
121
122 /* at this point the platform device may be:
123 * suspended: ret = 0, FLAG_SUSP set, clock stopped
124 * failed: ret < 0, FLAG_IDLE set, clock stopped
125 */
126 mutex_unlock(&pdev->archdata.mutex);
127 } else {
128 ret = -ENODEV;
129 }
130 } while (!ret);
131}
132
133/* this function gets called from cpuidle context when all devices in the
134 * main power domain are unused but some are counted as idle, ie the hwblk
135 * counter values are (HWBLK_CNT_USAGE == 0) && (HWBLK_CNT_IDLE != 0)
136 */
137void platform_pm_runtime_suspend_idle(void)
138{
139 queue_work(pm_wq, &hwblk_work);
140}
141
142int platform_pm_runtime_suspend(struct device *dev)
143{
144 struct platform_device *pdev = to_platform_device(dev);
145 struct pdev_archdata *ad = &pdev->archdata;
146 unsigned long flags;
147 int hwblk = ad->hwblk_id;
148 int ret = 0;
149
150 dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk);
151
152 /* ignore off-chip platform devices */
153 if (!hwblk)
154 goto out;
155
156 /* interrupt context not allowed */
157 might_sleep();
158
159 /* catch misconfigured drivers not starting with resume */
160 if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags)) {
161 ret = -EINVAL;
162 goto out;
163 }
164
165 /* serialize */
166 mutex_lock(&ad->mutex);
167
168 /* disable clock */
169 hwblk_disable(hwblk_info, hwblk);
170
171 /* put device on idle list */
172 spin_lock_irqsave(&hwblk_lock, flags);
173 list_add_tail(&pdev->archdata.entry, &hwblk_idle_list);
174 __set_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
175 spin_unlock_irqrestore(&hwblk_lock, flags);
176
177 /* increase idle count */
178 hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_IDLE);
179
180 /* at this point the platform device is:
181 * idle: ret = 0, FLAG_IDLE set, clock stopped
182 */
183 mutex_unlock(&ad->mutex);
184
185out:
186 dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n",
187 hwblk, ret);
188
189 return ret;
190}
191
192int platform_pm_runtime_resume(struct device *dev)
193{
194 struct platform_device *pdev = to_platform_device(dev);
195 struct pdev_archdata *ad = &pdev->archdata;
196 int hwblk = ad->hwblk_id;
197 int ret = 0;
198
199 dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk);
200
201 /* ignore off-chip platform devices */
202 if (!hwblk)
203 goto out;
204
205 /* interrupt context not allowed */
206 might_sleep();
207
208 /* serialize */
209 mutex_lock(&ad->mutex);
210
211 /* make sure device is removed from idle list */
212 platform_pm_runtime_not_idle(pdev);
213
214 /* decrease idle count */
215 if (!test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags) &&
216 !test_bit(PDEV_ARCHDATA_FLAG_SUSP, &pdev->archdata.flags))
217 hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE);
218
219 /* resume the device if needed */
220 ret = __platform_pm_runtime_resume(pdev);
221
222 /* the driver has been initialized now, so clear the init flag */
223 clear_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
224
225 /* at this point the platform device may be:
226 * resumed: ret = 0, flags = 0, clock started
227 * failed: ret < 0, FLAG_SUSP set, clock stopped
228 */
229 mutex_unlock(&ad->mutex);
230out:
231 dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n",
232 hwblk, ret);
233
234 return ret;
235}
236
237int platform_pm_runtime_idle(struct device *dev)
238{
239 struct platform_device *pdev = to_platform_device(dev);
240 int hwblk = pdev->archdata.hwblk_id;
241 int ret = 0;
242
243 dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk);
244
245 /* ignore off-chip platform devices */
246 if (!hwblk)
247 goto out;
248
249 /* interrupt context not allowed, use pm_runtime_put()! */
250 might_sleep();
251
252 /* suspend synchronously to disable clocks immediately */
253 ret = pm_runtime_suspend(dev);
254out:
255 dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk);
256 return ret;
257}
258
259static int platform_bus_notify(struct notifier_block *nb,
260 unsigned long action, void *data)
261{
262 struct device *dev = data;
263 struct platform_device *pdev = to_platform_device(dev);
264 int hwblk = pdev->archdata.hwblk_id;
265
266 /* ignore off-chip platform devices */
267 if (!hwblk)
268 return 0;
269
270 switch (action) {
271 case BUS_NOTIFY_ADD_DEVICE:
272 INIT_LIST_HEAD(&pdev->archdata.entry);
273 mutex_init(&pdev->archdata.mutex);
274 /* platform devices without drivers should be disabled */
275 hwblk_enable(hwblk_info, hwblk);
276 hwblk_disable(hwblk_info, hwblk);
277 /* make sure driver re-inits itself once */
278 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
279 break;
280 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
281 case BUS_NOTIFY_BOUND_DRIVER:
282 /* keep track of number of devices in use per hwblk */
283 hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_DEVICES);
284 break;
285 case BUS_NOTIFY_UNBOUND_DRIVER:
286 /* keep track of number of devices in use per hwblk */
287 hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_DEVICES);
288 /* make sure driver re-inits itself once */
289 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
290 break;
291 case BUS_NOTIFY_DEL_DEVICE:
292 break;
293 }
294 return 0;
295}
296
297static struct notifier_block platform_bus_notifier = {
298 .notifier_call = platform_bus_notify
299};
300
301static int __init sh_pm_runtime_init(void)
302{
303 INIT_WORK(&hwblk_work, platform_pm_runtime_work);
304
305 bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
306 return 0;
307}
308core_initcall(sh_pm_runtime_init);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index 5d888ef53d82..e6aac65f5750 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -16,35 +16,147 @@
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/suspend.h> 17#include <asm/suspend.h>
18 18
19/* manage self-refresh and enter standby mode. 19/*
20 * Kernel mode register usage, see entry.S:
21 * k0 scratch
22 * k1 scratch
23 */
24#define k0 r0
25#define k1 r1
26
27/* manage self-refresh and enter standby mode. must be self-contained.
20 * this code will be copied to on-chip memory and executed from there. 28 * this code will be copied to on-chip memory and executed from there.
21 */ 29 */
30 .balign 4
31ENTRY(sh_mobile_sleep_enter_start)
32
33 /* save mode flags */
34 mov.l r4, @(SH_SLEEP_MODE, r5)
35
36 /* save original vbr */
37 stc vbr, r0
38 mov.l r0, @(SH_SLEEP_VBR, r5)
39
40 /* point vbr to our on-chip memory page */
41 ldc r5, vbr
42
43 /* save return address */
44 sts pr, r0
45 mov.l r0, @(SH_SLEEP_SPC, r5)
46
47 /* save sr */
48 stc sr, r0
49 mov.l r0, @(SH_SLEEP_SR, r5)
50
51 /* save general purpose registers to stack if needed */
52 mov.l @(SH_SLEEP_MODE, r5), r0
53 tst #SUSP_SH_REGS, r0
54 bt skip_regs_save
55
56 sts.l pr, @-r15
57 mov.l r14, @-r15
58 mov.l r13, @-r15
59 mov.l r12, @-r15
60 mov.l r11, @-r15
61 mov.l r10, @-r15
62 mov.l r9, @-r15
63 mov.l r8, @-r15
64
65 /* make sure bank0 is selected, save low registers */
66 mov.l rb_bit, r9
67 not r9, r9
68 bsr set_sr
69 mov #0, r10
70
71 bsr save_low_regs
72 nop
73
74 /* switch to bank 1, save low registers */
75 mov.l rb_bit, r10
76 bsr set_sr
77 mov #-1, r9
78
79 bsr save_low_regs
80 nop
81
82 /* switch back to bank 0 */
83 mov.l rb_bit, r9
84 not r9, r9
85 bsr set_sr
86 mov #0, r10
87
88skip_regs_save:
89
90 /* save sp, also set to internal ram */
91 mov.l r15, @(SH_SLEEP_SP, r5)
92 mov r5, r15
93
94 /* save stbcr */
95 bsr save_register
96 mov #SH_SLEEP_REG_STBCR, r0
97
98 /* save mmu and cache context if needed */
99 mov.l @(SH_SLEEP_MODE, r5), r0
100 tst #SUSP_SH_MMU, r0
101 bt skip_mmu_save_disable
102
103 /* save mmu state */
104 bsr save_register
105 mov #SH_SLEEP_REG_PTEH, r0
106
107 bsr save_register
108 mov #SH_SLEEP_REG_PTEL, r0
109
110 bsr save_register
111 mov #SH_SLEEP_REG_TTB, r0
112
113 bsr save_register
114 mov #SH_SLEEP_REG_TEA, r0
22 115
23 .balign 4096,0,4096 116 bsr save_register
24ENTRY(sh_mobile_standby) 117 mov #SH_SLEEP_REG_MMUCR, r0
25 mov r4, r0
26 118
119 bsr save_register
120 mov #SH_SLEEP_REG_PTEA, r0
121
122 bsr save_register
123 mov #SH_SLEEP_REG_PASCR, r0
124
125 bsr save_register
126 mov #SH_SLEEP_REG_IRMCR, r0
127
128 /* invalidate TLBs and disable the MMU */
129 bsr get_register
130 mov #SH_SLEEP_REG_MMUCR, r0
131 mov #4, r1
132 mov.l r1, @r0
133 icbi @r0
134
135 /* save cache registers and disable caches */
136 bsr save_register
137 mov #SH_SLEEP_REG_CCR, r0
138
139 bsr save_register
140 mov #SH_SLEEP_REG_RAMCR, r0
141
142 bsr get_register
143 mov #SH_SLEEP_REG_CCR, r0
144 mov #0, r1
145 mov.l r1, @r0
146 icbi @r0
147
148skip_mmu_save_disable:
149 /* call self-refresh entering code if needed */
150 mov.l @(SH_SLEEP_MODE, r5), r0
27 tst #SUSP_SH_SF, r0 151 tst #SUSP_SH_SF, r0
28 bt skip_set_sf 152 bt skip_set_sf
29 153
30 /* SDRAM: disable power down and put in self-refresh mode */ 154 mov.l @(SH_SLEEP_SF_PRE, r5), r0
31 mov.l 1f, r4 155 jsr @r0
32 mov.l 2f, r1 156 nop
33 mov.l @r4, r2
34 or r1, r2
35 mov.l 3f, r3
36 and r3, r2
37 mov.l r2, @r4
38 157
39skip_set_sf: 158skip_set_sf:
40 tst #SUSP_SH_SLEEP, r0 159 mov.l @(SH_SLEEP_MODE, r5), r0
41 bt test_standby
42
43 /* set mode to "sleep mode" */
44 bra do_sleep
45 mov #0x00, r1
46
47test_standby:
48 tst #SUSP_SH_STANDBY, r0 160 tst #SUSP_SH_STANDBY, r0
49 bt test_rstandby 161 bt test_rstandby
50 162
@@ -56,70 +168,238 @@ test_rstandby:
56 tst #SUSP_SH_RSTANDBY, r0 168 tst #SUSP_SH_RSTANDBY, r0
57 bt test_ustandby 169 bt test_ustandby
58 170
171 /* setup BAR register */
172 bsr get_register
173 mov #SH_SLEEP_REG_BAR, r0
174 mov.l @(SH_SLEEP_RESUME, r5), r1
175 mov.l r1, @r0
176
59 /* set mode to "r-standby mode" */ 177 /* set mode to "r-standby mode" */
60 bra do_sleep 178 bra do_sleep
61 mov #0x20, r1 179 mov #0x20, r1
62 180
63test_ustandby: 181test_ustandby:
64 tst #SUSP_SH_USTANDBY, r0 182 tst #SUSP_SH_USTANDBY, r0
65 bt done_sleep 183 bt force_sleep
66 184
67 /* set mode to "u-standby mode" */ 185 /* set mode to "u-standby mode" */
68 mov #0x10, r1 186 bra do_sleep
187 mov #0x10, r1
188
189force_sleep:
69 190
70 /* fall-through */ 191 /* set mode to "sleep mode" */
192 mov #0x00, r1
71 193
72do_sleep: 194do_sleep:
73 /* setup and enter selected standby mode */ 195 /* setup and enter selected standby mode */
74 mov.l 5f, r4 196 bsr get_register
75 mov.l r1, @r4 197 mov #SH_SLEEP_REG_STBCR, r0
198 mov.l r1, @r0
199again:
76 sleep 200 sleep
201 bra again
202 nop
77 203
78done_sleep: 204save_register:
79 /* reset standby mode to sleep mode */ 205 add #SH_SLEEP_BASE_ADDR, r0
80 mov.l 5f, r4 206 mov.l @(r0, r5), r1
81 mov #0x00, r1 207 add #-SH_SLEEP_BASE_ADDR, r0
82 mov.l r1, @r4 208 mov.l @r1, r1
209 add #SH_SLEEP_BASE_DATA, r0
210 mov.l r1, @(r0, r5)
211 add #-SH_SLEEP_BASE_DATA, r0
212 rts
213 nop
214
215get_register:
216 add #SH_SLEEP_BASE_ADDR, r0
217 mov.l @(r0, r5), r0
218 rts
219 nop
83 220
221set_sr:
222 stc sr, r8
223 and r9, r8
224 or r10, r8
225 ldc r8, sr
226 rts
227 nop
228
229save_low_regs:
230 mov.l r7, @-r15
231 mov.l r6, @-r15
232 mov.l r5, @-r15
233 mov.l r4, @-r15
234 mov.l r3, @-r15
235 mov.l r2, @-r15
236 mov.l r1, @-r15
237 rts
238 mov.l r0, @-r15
239
240 .balign 4
241rb_bit: .long 0x20000000 ! RB=1
242
243ENTRY(sh_mobile_sleep_enter_end)
244
245 .balign 4
246ENTRY(sh_mobile_sleep_resume_start)
247
248 /* figure out start address */
249 bsr 0f
250 nop
2510:
252 sts pr, k1
253 mov.l 1f, k0
254 and k0, k1
255
256 /* store pointer to data area in VBR */
257 ldc k1, vbr
258
259 /* setup sr with saved sr */
260 mov.l @(SH_SLEEP_SR, k1), k0
261 ldc k0, sr
262
263 /* now: user register set! */
264 stc vbr, r5
265
266 /* setup spc with return address to c code */
267 mov.l @(SH_SLEEP_SPC, r5), r0
268 ldc r0, spc
269
270 /* restore vbr */
271 mov.l @(SH_SLEEP_VBR, r5), r0
272 ldc r0, vbr
273
274 /* setup ssr with saved sr */
275 mov.l @(SH_SLEEP_SR, r5), r0
276 ldc r0, ssr
277
278 /* restore sp */
279 mov.l @(SH_SLEEP_SP, r5), r15
280
281 /* restore sleep mode register */
282 bsr restore_register
283 mov #SH_SLEEP_REG_STBCR, r0
284
285 /* call self-refresh resume code if needed */
286 mov.l @(SH_SLEEP_MODE, r5), r0
84 tst #SUSP_SH_SF, r0 287 tst #SUSP_SH_SF, r0
85 bt skip_restore_sf 288 bt skip_restore_sf
86 289
87 /* SDRAM: set auto-refresh mode */ 290 mov.l @(SH_SLEEP_SF_POST, r5), r0
88 mov.l 1f, r4 291 jsr @r0
89 mov.l @r4, r2 292 nop
90 mov.l 4f, r3 293
91 and r3, r2
92 mov.l r2, @r4
93 mov.l 6f, r4
94 mov.l 7f, r1
95 mov.l 8f, r2
96 mov.l @r4, r3
97 mov #-1, r4
98 add r4, r3
99 or r2, r3
100 mov.l r3, @r1
101skip_restore_sf: 294skip_restore_sf:
102 rts 295 /* restore mmu and cache state if needed */
296 mov.l @(SH_SLEEP_MODE, r5), r0
297 tst #SUSP_SH_MMU, r0
298 bt skip_restore_mmu
299
300 /* restore mmu state */
301 bsr restore_register
302 mov #SH_SLEEP_REG_PTEH, r0
303
304 bsr restore_register
305 mov #SH_SLEEP_REG_PTEL, r0
306
307 bsr restore_register
308 mov #SH_SLEEP_REG_TTB, r0
309
310 bsr restore_register
311 mov #SH_SLEEP_REG_TEA, r0
312
313 bsr restore_register
314 mov #SH_SLEEP_REG_PTEA, r0
315
316 bsr restore_register
317 mov #SH_SLEEP_REG_PASCR, r0
318
319 bsr restore_register
320 mov #SH_SLEEP_REG_IRMCR, r0
321
322 bsr restore_register
323 mov #SH_SLEEP_REG_MMUCR, r0
324 icbi @r0
325
326 /* restore cache settings */
327 bsr restore_register
328 mov #SH_SLEEP_REG_RAMCR, r0
329 icbi @r0
330
331 bsr restore_register
332 mov #SH_SLEEP_REG_CCR, r0
333 icbi @r0
334
335skip_restore_mmu:
336
337 /* restore general purpose registers if needed */
338 mov.l @(SH_SLEEP_MODE, r5), r0
339 tst #SUSP_SH_REGS, r0
340 bt skip_restore_regs
341
342 /* switch to bank 1, restore low registers */
343 mov.l _rb_bit, r10
344 bsr _set_sr
345 mov #-1, r9
346
347 bsr restore_low_regs
103 nop 348 nop
104 349
105 .balign 4 350 /* switch to bank0, restore low registers */
1061: .long 0xfe400008 /* SDCR0 */ 351 mov.l _rb_bit, r9
1072: .long 0x00000400 352 not r9, r9
1083: .long 0xffff7fff 353 bsr _set_sr
1094: .long 0xfffffbff 354 mov #0, r10
1105: .long 0xa4150020 /* STBCR */ 355
1116: .long 0xfe40001c /* RTCOR */ 356 bsr restore_low_regs
1127: .long 0xfe400018 /* RTCNT */ 357 nop
1138: .long 0xa55a0000 358
114 359 /* restore the rest of the registers */
115/* interrupt vector @ 0x600 */ 360 mov.l @r15+, r8
116 .balign 0x400,0,0x400 361 mov.l @r15+, r9
117 .long 0xdeadbeef 362 mov.l @r15+, r10
118 .balign 0x200,0,0x200 363 mov.l @r15+, r11
119 /* sh7722 will end up here in sleep mode */ 364 mov.l @r15+, r12
365 mov.l @r15+, r13
366 mov.l @r15+, r14
367 lds.l @r15+, pr
368
369skip_restore_regs:
120 rte 370 rte
121 nop 371 nop
122sh_mobile_standby_end:
123 372
124ENTRY(sh_mobile_standby_size) 373restore_register:
125 .long sh_mobile_standby_end - sh_mobile_standby 374 add #SH_SLEEP_BASE_DATA, r0
375 mov.l @(r0, r5), r1
376 add #-SH_SLEEP_BASE_DATA, r0
377 add #SH_SLEEP_BASE_ADDR, r0
378 mov.l @(r0, r5), r0
379 mov.l r1, @r0
380 rts
381 nop
382
383_set_sr:
384 stc sr, r8
385 and r9, r8
386 or r10, r8
387 ldc r8, sr
388 rts
389 nop
390
391restore_low_regs:
392 mov.l @r15+, r0
393 mov.l @r15+, r1
394 mov.l @r15+, r2
395 mov.l @r15+, r3
396 mov.l @r15+, r4
397 mov.l @r15+, r5
398 mov.l @r15+, r6
399 rts
400 mov.l @r15+, r7
401
402 .balign 4
403_rb_bit: .long 0x20000000 ! RB=1
4041: .long ~0x7ff
405ENTRY(sh_mobile_sleep_resume_end)