aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-08 16:10:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-08 16:10:57 -0500
commiteb59c505f8a5906ad2e053d14fab50eb8574fd6f (patch)
treec6e875adc12b481b916e847e8f80b8881a0fb02c /include
parent1619ed8f60959829d070d8f39cd2f8ca0e7135ce (diff)
parentc233523b3d392e530033a7587d7970dc62a02361 (diff)
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) PM / Hibernate: Implement compat_ioctl for /dev/snapshot PM / Freezer: fix return value of freezable_schedule_timeout_killable() PM / shmobile: Allow the A4R domain to be turned off at run time PM / input / touchscreen: Make st1232 use device PM QoS constraints PM / QoS: Introduce dev_pm_qos_add_ancestor_request() PM / shmobile: Remove the stay_on flag from SH7372's PM domains PM / shmobile: Don't include SH7372's INTCS in syscore suspend/resume PM / shmobile: Add support for the sh7372 A4S power domain / sleep mode PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM/Devfreq: Add Exynos4-bus device DVFS driver for Exynos4210/4212/4412. PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() ARM: S3C64XX: Implement basic power domain support PM / shmobile: Use common always on power domain governor ... Fix up trivial conflict in fs/xfs/xfs_buf.c due to removal of unused XBT_FORCE_SLEEP bit
Diffstat (limited to 'include')
-rw-r--r--include/linux/freezer.h159
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/platform_device.h30
-rw-r--r--include/linux/pm.h15
-rw-r--r--include/linux/pm_domain.h103
-rw-r--r--include/linux/pm_qos.h8
-rw-r--r--include/linux/pm_runtime.h5
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sh_intc.h1
-rw-r--r--include/linux/suspend.h35
11 files changed, 210 insertions, 153 deletions
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a5386e3ee756..0ab54e16a91f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,71 +5,58 @@
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/wait.h> 7#include <linux/wait.h>
8#include <linux/atomic.h>
8 9
9#ifdef CONFIG_FREEZER 10#ifdef CONFIG_FREEZER
11extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
12extern bool pm_freezing; /* PM freezing in effect */
13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
14
10/* 15/*
11 * Check if a process has been frozen 16 * Check if a process has been frozen
12 */ 17 */
13static inline int frozen(struct task_struct *p) 18static inline bool frozen(struct task_struct *p)
14{ 19{
15 return p->flags & PF_FROZEN; 20 return p->flags & PF_FROZEN;
16} 21}
17 22
18/* 23extern bool freezing_slow_path(struct task_struct *p);
19 * Check if there is a request to freeze a process
20 */
21static inline int freezing(struct task_struct *p)
22{
23 return test_tsk_thread_flag(p, TIF_FREEZE);
24}
25
26/*
27 * Request that a process be frozen
28 */
29static inline void set_freeze_flag(struct task_struct *p)
30{
31 set_tsk_thread_flag(p, TIF_FREEZE);
32}
33 24
34/* 25/*
35 * Sometimes we may need to cancel the previous 'freeze' request 26 * Check if there is a request to freeze a process
36 */ 27 */
37static inline void clear_freeze_flag(struct task_struct *p) 28static inline bool freezing(struct task_struct *p)
38{
39 clear_tsk_thread_flag(p, TIF_FREEZE);
40}
41
42static inline bool should_send_signal(struct task_struct *p)
43{ 29{
44 return !(p->flags & PF_FREEZER_NOSIG); 30 if (likely(!atomic_read(&system_freezing_cnt)))
31 return false;
32 return freezing_slow_path(p);
45} 33}
46 34
47/* Takes and releases task alloc lock using task_lock() */ 35/* Takes and releases task alloc lock using task_lock() */
48extern int thaw_process(struct task_struct *p); 36extern void __thaw_task(struct task_struct *t);
49 37
50extern void refrigerator(void); 38extern bool __refrigerator(bool check_kthr_stop);
51extern int freeze_processes(void); 39extern int freeze_processes(void);
52extern int freeze_kernel_threads(void); 40extern int freeze_kernel_threads(void);
53extern void thaw_processes(void); 41extern void thaw_processes(void);
54 42
55static inline int try_to_freeze(void) 43static inline bool try_to_freeze(void)
56{ 44{
57 if (freezing(current)) { 45 might_sleep();
58 refrigerator(); 46 if (likely(!freezing(current)))
59 return 1; 47 return false;
60 } else 48 return __refrigerator(false);
61 return 0;
62} 49}
63 50
64extern bool freeze_task(struct task_struct *p, bool sig_only); 51extern bool freeze_task(struct task_struct *p);
65extern void cancel_freezing(struct task_struct *p); 52extern bool set_freezable(void);
66 53
67#ifdef CONFIG_CGROUP_FREEZER 54#ifdef CONFIG_CGROUP_FREEZER
68extern int cgroup_freezing_or_frozen(struct task_struct *task); 55extern bool cgroup_freezing(struct task_struct *task);
69#else /* !CONFIG_CGROUP_FREEZER */ 56#else /* !CONFIG_CGROUP_FREEZER */
70static inline int cgroup_freezing_or_frozen(struct task_struct *task) 57static inline bool cgroup_freezing(struct task_struct *task)
71{ 58{
72 return 0; 59 return false;
73} 60}
74#endif /* !CONFIG_CGROUP_FREEZER */ 61#endif /* !CONFIG_CGROUP_FREEZER */
75 62
@@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
80 * appropriately in case the child has exited before the freezing of tasks is 67 * appropriately in case the child has exited before the freezing of tasks is
81 * complete. However, we don't want kernel threads to be frozen in unexpected 68 * complete. However, we don't want kernel threads to be frozen in unexpected
82 * places, so we allow them to block freeze_processes() instead or to set 69 * places, so we allow them to block freeze_processes() instead or to set
83 * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork 70 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
84 * parents. Fortunately, in the ____call_usermodehelper() case the parent won't 71 * parent won't really block freeze_processes(), since ____call_usermodehelper()
85 * really block freeze_processes(), since ____call_usermodehelper() (the child) 72 * (the child) does a little before exec/exit and it can't be frozen before
86 * does a little before exec/exit and it can't be frozen before waking up the 73 * waking up the parent.
87 * parent.
88 */ 74 */
89 75
90/* 76
91 * If the current task is a user space one, tell the freezer not to count it as 77/* Tell the freezer not to count the current task as freezable. */
92 * freezable.
93 */
94static inline void freezer_do_not_count(void) 78static inline void freezer_do_not_count(void)
95{ 79{
96 if (current->mm) 80 current->flags |= PF_FREEZER_SKIP;
97 current->flags |= PF_FREEZER_SKIP;
98} 81}
99 82
100/* 83/*
101 * If the current task is a user space one, tell the freezer to count it as 84 * Tell the freezer to count the current task as freezable again and try to
102 * freezable again and try to freeze it. 85 * freeze it.
103 */ 86 */
104static inline void freezer_count(void) 87static inline void freezer_count(void)
105{ 88{
106 if (current->mm) { 89 current->flags &= ~PF_FREEZER_SKIP;
107 current->flags &= ~PF_FREEZER_SKIP; 90 try_to_freeze();
108 try_to_freeze();
109 }
110} 91}
111 92
112/* 93/*
@@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p)
118} 99}
119 100
120/* 101/*
121 * Tell the freezer that the current task should be frozen by it 102 * These macros are intended to be used whenever you want allow a task that's
103 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
104 * that neither return any clear indication of whether a freeze event happened
105 * while in this function.
122 */ 106 */
123static inline void set_freezable(void)
124{
125 current->flags &= ~PF_NOFREEZE;
126}
127 107
128/* 108/* Like schedule(), but should not block the freezer. */
129 * Tell the freezer that the current task should be frozen by it and that it 109#define freezable_schedule() \
130 * should send a fake signal to the task to freeze it. 110({ \
131 */ 111 freezer_do_not_count(); \
132static inline void set_freezable_with_signal(void) 112 schedule(); \
133{ 113 freezer_count(); \
134 current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); 114})
135} 115
116/* Like schedule_timeout_killable(), but should not block the freezer. */
117#define freezable_schedule_timeout_killable(timeout) \
118({ \
119 long __retval; \
120 freezer_do_not_count(); \
121 __retval = schedule_timeout_killable(timeout); \
122 freezer_count(); \
123 __retval; \
124})
136 125
137/* 126/*
138 * Freezer-friendly wrappers around wait_event_interruptible(), 127 * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void)
152#define wait_event_freezable(wq, condition) \ 141#define wait_event_freezable(wq, condition) \
153({ \ 142({ \
154 int __retval; \ 143 int __retval; \
155 do { \ 144 for (;;) { \
156 __retval = wait_event_interruptible(wq, \ 145 __retval = wait_event_interruptible(wq, \
157 (condition) || freezing(current)); \ 146 (condition) || freezing(current)); \
158 if (__retval && !freezing(current)) \ 147 if (__retval || (condition)) \
159 break; \ 148 break; \
160 else if (!(condition)) \ 149 try_to_freeze(); \
161 __retval = -ERESTARTSYS; \ 150 } \
162 } while (try_to_freeze()); \
163 __retval; \ 151 __retval; \
164}) 152})
165 153
166
167#define wait_event_freezable_timeout(wq, condition, timeout) \ 154#define wait_event_freezable_timeout(wq, condition, timeout) \
168({ \ 155({ \
169 long __retval = timeout; \ 156 long __retval = timeout; \
170 do { \ 157 for (;;) { \
171 __retval = wait_event_interruptible_timeout(wq, \ 158 __retval = wait_event_interruptible_timeout(wq, \
172 (condition) || freezing(current), \ 159 (condition) || freezing(current), \
173 __retval); \ 160 __retval); \
174 } while (try_to_freeze()); \ 161 if (__retval <= 0 || (condition)) \
162 break; \
163 try_to_freeze(); \
164 } \
175 __retval; \ 165 __retval; \
176}) 166})
167
177#else /* !CONFIG_FREEZER */ 168#else /* !CONFIG_FREEZER */
178static inline int frozen(struct task_struct *p) { return 0; } 169static inline bool frozen(struct task_struct *p) { return false; }
179static inline int freezing(struct task_struct *p) { return 0; } 170static inline bool freezing(struct task_struct *p) { return false; }
180static inline void set_freeze_flag(struct task_struct *p) {} 171static inline void __thaw_task(struct task_struct *t) {}
181static inline void clear_freeze_flag(struct task_struct *p) {}
182static inline int thaw_process(struct task_struct *p) { return 1; }
183 172
184static inline void refrigerator(void) {} 173static inline bool __refrigerator(bool check_kthr_stop) { return false; }
185static inline int freeze_processes(void) { return -ENOSYS; } 174static inline int freeze_processes(void) { return -ENOSYS; }
186static inline int freeze_kernel_threads(void) { return -ENOSYS; } 175static inline int freeze_kernel_threads(void) { return -ENOSYS; }
187static inline void thaw_processes(void) {} 176static inline void thaw_processes(void) {}
188 177
189static inline int try_to_freeze(void) { return 0; } 178static inline bool try_to_freeze(void) { return false; }
190 179
191static inline void freezer_do_not_count(void) {} 180static inline void freezer_do_not_count(void) {}
192static inline void freezer_count(void) {} 181static inline void freezer_count(void) {}
193static inline int freezer_should_skip(struct task_struct *p) { return 0; } 182static inline int freezer_should_skip(struct task_struct *p) { return 0; }
194static inline void set_freezable(void) {} 183static inline void set_freezable(void) {}
195static inline void set_freezable_with_signal(void) {} 184
185#define freezable_schedule() schedule()
186
187#define freezable_schedule_timeout_killable(timeout) \
188 schedule_timeout_killable(timeout)
196 189
197#define wait_event_freezable(wq, condition) \ 190#define wait_event_freezable(wq, condition) \
198 wait_event_interruptible(wq, condition) 191 wait_event_interruptible(wq, condition)
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index b16f65390734..722f477c4ef7 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
117extern int usermodehelper_disable(void); 117extern int usermodehelper_disable(void);
118extern void usermodehelper_enable(void); 118extern void usermodehelper_enable(void);
119extern bool usermodehelper_is_disabled(void); 119extern bool usermodehelper_is_disabled(void);
120extern void read_lock_usermodehelper(void);
121extern void read_unlock_usermodehelper(void);
120 122
121#endif /* __LINUX_KMOD_H__ */ 123#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 5cac19b3a266..0714b24c0e45 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
35void kthread_bind(struct task_struct *k, unsigned int cpu); 35void kthread_bind(struct task_struct *k, unsigned int cpu);
36int kthread_stop(struct task_struct *k); 36int kthread_stop(struct task_struct *k);
37int kthread_should_stop(void); 37int kthread_should_stop(void);
38bool kthread_freezable_should_stop(bool *was_frozen);
38void *kthread_data(struct task_struct *k); 39void *kthread_data(struct task_struct *k);
39 40
40int kthreadd(void *unused); 41int kthreadd(void *unused);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 5622fa24e97b..60e9994ef405 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -256,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \
256} 256}
257#endif /* MODULE */ 257#endif /* MODULE */
258 258
259#ifdef CONFIG_PM_SLEEP
260extern int platform_pm_prepare(struct device *dev);
261extern void platform_pm_complete(struct device *dev);
262#else
263#define platform_pm_prepare NULL
264#define platform_pm_complete NULL
265#endif
266
267#ifdef CONFIG_SUSPEND 259#ifdef CONFIG_SUSPEND
268extern int platform_pm_suspend(struct device *dev); 260extern int platform_pm_suspend(struct device *dev);
269extern int platform_pm_suspend_noirq(struct device *dev);
270extern int platform_pm_resume(struct device *dev); 261extern int platform_pm_resume(struct device *dev);
271extern int platform_pm_resume_noirq(struct device *dev);
272#else 262#else
273#define platform_pm_suspend NULL 263#define platform_pm_suspend NULL
274#define platform_pm_resume NULL 264#define platform_pm_resume NULL
275#define platform_pm_suspend_noirq NULL
276#define platform_pm_resume_noirq NULL
277#endif 265#endif
278 266
279#ifdef CONFIG_HIBERNATE_CALLBACKS 267#ifdef CONFIG_HIBERNATE_CALLBACKS
280extern int platform_pm_freeze(struct device *dev); 268extern int platform_pm_freeze(struct device *dev);
281extern int platform_pm_freeze_noirq(struct device *dev);
282extern int platform_pm_thaw(struct device *dev); 269extern int platform_pm_thaw(struct device *dev);
283extern int platform_pm_thaw_noirq(struct device *dev);
284extern int platform_pm_poweroff(struct device *dev); 270extern int platform_pm_poweroff(struct device *dev);
285extern int platform_pm_poweroff_noirq(struct device *dev);
286extern int platform_pm_restore(struct device *dev); 271extern int platform_pm_restore(struct device *dev);
287extern int platform_pm_restore_noirq(struct device *dev);
288#else 272#else
289#define platform_pm_freeze NULL 273#define platform_pm_freeze NULL
290#define platform_pm_thaw NULL 274#define platform_pm_thaw NULL
291#define platform_pm_poweroff NULL 275#define platform_pm_poweroff NULL
292#define platform_pm_restore NULL 276#define platform_pm_restore NULL
293#define platform_pm_freeze_noirq NULL
294#define platform_pm_thaw_noirq NULL
295#define platform_pm_poweroff_noirq NULL
296#define platform_pm_restore_noirq NULL
297#endif 277#endif
298 278
299#ifdef CONFIG_PM_SLEEP 279#ifdef CONFIG_PM_SLEEP
300#define USE_PLATFORM_PM_SLEEP_OPS \ 280#define USE_PLATFORM_PM_SLEEP_OPS \
301 .prepare = platform_pm_prepare, \
302 .complete = platform_pm_complete, \
303 .suspend = platform_pm_suspend, \ 281 .suspend = platform_pm_suspend, \
304 .resume = platform_pm_resume, \ 282 .resume = platform_pm_resume, \
305 .freeze = platform_pm_freeze, \ 283 .freeze = platform_pm_freeze, \
306 .thaw = platform_pm_thaw, \ 284 .thaw = platform_pm_thaw, \
307 .poweroff = platform_pm_poweroff, \ 285 .poweroff = platform_pm_poweroff, \
308 .restore = platform_pm_restore, \ 286 .restore = platform_pm_restore,
309 .suspend_noirq = platform_pm_suspend_noirq, \
310 .resume_noirq = platform_pm_resume_noirq, \
311 .freeze_noirq = platform_pm_freeze_noirq, \
312 .thaw_noirq = platform_pm_thaw_noirq, \
313 .poweroff_noirq = platform_pm_poweroff_noirq, \
314 .restore_noirq = platform_pm_restore_noirq,
315#else 287#else
316#define USE_PLATFORM_PM_SLEEP_OPS 288#define USE_PLATFORM_PM_SLEEP_OPS
317#endif 289#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3f3ed83a9aa5..e4982ac3fbbc 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
300 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 300 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
301} 301}
302 302
303/*
304 * Use this for subsystems (bus types, device types, device classes) that don't
305 * need any special suspend/resume handling in addition to invoking the PM
306 * callbacks provided by device drivers supporting both the system sleep PM and
307 * runtime PM, make the pm member point to generic_subsys_pm_ops.
308 */
309#ifdef CONFIG_PM
310extern struct dev_pm_ops generic_subsys_pm_ops;
311#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
312#else
313#define GENERIC_SUBSYS_PM_OPS NULL
314#endif
315
316/** 303/**
317 * PM_EVENT_ messages 304 * PM_EVENT_ messages
318 * 305 *
@@ -521,6 +508,8 @@ struct dev_pm_info {
521 unsigned long active_jiffies; 508 unsigned long active_jiffies;
522 unsigned long suspended_jiffies; 509 unsigned long suspended_jiffies;
523 unsigned long accounting_timestamp; 510 unsigned long accounting_timestamp;
511 ktime_t suspend_time;
512 s64 max_time_suspended_ns;
524#endif 513#endif
525 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 514 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
526 struct pm_qos_constraints *constraints; 515 struct pm_qos_constraints *constraints;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 65633e5a2bc0..a03a0ad998b8 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -10,6 +10,7 @@
10#define _LINUX_PM_DOMAIN_H 10#define _LINUX_PM_DOMAIN_H
11 11
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/err.h>
13 14
14enum gpd_status { 15enum gpd_status {
15 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 16 GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -21,6 +22,23 @@ enum gpd_status {
21 22
22struct dev_power_governor { 23struct dev_power_governor {
23 bool (*power_down_ok)(struct dev_pm_domain *domain); 24 bool (*power_down_ok)(struct dev_pm_domain *domain);
25 bool (*stop_ok)(struct device *dev);
26};
27
28struct gpd_dev_ops {
29 int (*start)(struct device *dev);
30 int (*stop)(struct device *dev);
31 int (*save_state)(struct device *dev);
32 int (*restore_state)(struct device *dev);
33 int (*suspend)(struct device *dev);
34 int (*suspend_late)(struct device *dev);
35 int (*resume_early)(struct device *dev);
36 int (*resume)(struct device *dev);
37 int (*freeze)(struct device *dev);
38 int (*freeze_late)(struct device *dev);
39 int (*thaw_early)(struct device *dev);
40 int (*thaw)(struct device *dev);
41 bool (*active_wakeup)(struct device *dev);
24}; 42};
25 43
26struct generic_pm_domain { 44struct generic_pm_domain {
@@ -32,6 +50,7 @@ struct generic_pm_domain {
32 struct mutex lock; 50 struct mutex lock;
33 struct dev_power_governor *gov; 51 struct dev_power_governor *gov;
34 struct work_struct power_off_work; 52 struct work_struct power_off_work;
53 char *name;
35 unsigned int in_progress; /* Number of devices being suspended now */ 54 unsigned int in_progress; /* Number of devices being suspended now */
36 atomic_t sd_count; /* Number of subdomains with power "on" */ 55 atomic_t sd_count; /* Number of subdomains with power "on" */
37 enum gpd_status status; /* Current state of the domain */ 56 enum gpd_status status; /* Current state of the domain */
@@ -44,10 +63,13 @@ struct generic_pm_domain {
44 bool suspend_power_off; /* Power status before system suspend */ 63 bool suspend_power_off; /* Power status before system suspend */
45 bool dev_irq_safe; /* Device callbacks are IRQ-safe */ 64 bool dev_irq_safe; /* Device callbacks are IRQ-safe */
46 int (*power_off)(struct generic_pm_domain *domain); 65 int (*power_off)(struct generic_pm_domain *domain);
66 s64 power_off_latency_ns;
47 int (*power_on)(struct generic_pm_domain *domain); 67 int (*power_on)(struct generic_pm_domain *domain);
48 int (*start_device)(struct device *dev); 68 s64 power_on_latency_ns;
49 int (*stop_device)(struct device *dev); 69 struct gpd_dev_ops dev_ops;
50 bool (*active_wakeup)(struct device *dev); 70 s64 break_even_ns; /* Power break even for the entire domain. */
71 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
72 ktime_t power_off_time;
51}; 73};
52 74
53static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 75static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -62,8 +84,18 @@ struct gpd_link {
62 struct list_head slave_node; 84 struct list_head slave_node;
63}; 85};
64 86
87struct gpd_timing_data {
88 s64 stop_latency_ns;
89 s64 start_latency_ns;
90 s64 save_state_latency_ns;
91 s64 restore_state_latency_ns;
92 s64 break_even_ns;
93};
94
65struct generic_pm_domain_data { 95struct generic_pm_domain_data {
66 struct pm_domain_data base; 96 struct pm_domain_data base;
97 struct gpd_dev_ops ops;
98 struct gpd_timing_data td;
67 bool need_restore; 99 bool need_restore;
68}; 100};
69 101
@@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
73} 105}
74 106
75#ifdef CONFIG_PM_GENERIC_DOMAINS 107#ifdef CONFIG_PM_GENERIC_DOMAINS
76extern int pm_genpd_add_device(struct generic_pm_domain *genpd, 108static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
77 struct device *dev); 109{
110 return to_gpd_data(dev->power.subsys_data->domain_data);
111}
112
113extern struct dev_power_governor simple_qos_governor;
114
115extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
116extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
117 struct device *dev,
118 struct gpd_timing_data *td);
119
120static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
121 struct device *dev)
122{
123 return __pm_genpd_add_device(genpd, dev, NULL);
124}
125
78extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, 126extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
79 struct device *dev); 127 struct device *dev);
80extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 128extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
81 struct generic_pm_domain *new_subdomain); 129 struct generic_pm_domain *new_subdomain);
82extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 130extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
83 struct generic_pm_domain *target); 131 struct generic_pm_domain *target);
132extern int pm_genpd_add_callbacks(struct device *dev,
133 struct gpd_dev_ops *ops,
134 struct gpd_timing_data *td);
135extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
84extern void pm_genpd_init(struct generic_pm_domain *genpd, 136extern void pm_genpd_init(struct generic_pm_domain *genpd,
85 struct dev_power_governor *gov, bool is_off); 137 struct dev_power_governor *gov, bool is_off);
138
86extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 139extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
140
141extern bool default_stop_ok(struct device *dev);
142
143extern struct dev_power_governor pm_domain_always_on_gov;
87#else 144#else
145
146static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
147{
148 return ERR_PTR(-ENOSYS);
149}
150static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
151 struct device *dev,
152 struct gpd_timing_data *td)
153{
154 return -ENOSYS;
155}
88static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 156static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
89 struct device *dev) 157 struct device *dev)
90{ 158{
@@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
105{ 173{
106 return -ENOSYS; 174 return -ENOSYS;
107} 175}
108static inline void pm_genpd_init(struct generic_pm_domain *genpd, 176static inline int pm_genpd_add_callbacks(struct device *dev,
109 struct dev_power_governor *gov, bool is_off) {} 177 struct gpd_dev_ops *ops,
178 struct gpd_timing_data *td)
179{
180 return -ENOSYS;
181}
182static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
183{
184 return -ENOSYS;
185}
186static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
187{
188}
110static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) 189static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
111{ 190{
112 return -ENOSYS; 191 return -ENOSYS;
113} 192}
193static inline bool default_stop_ok(struct device *dev)
194{
195 return false;
196}
197#define pm_domain_always_on_gov NULL
114#endif 198#endif
115 199
200static inline int pm_genpd_remove_callbacks(struct device *dev)
201{
202 return __pm_genpd_remove_callbacks(dev, true);
203}
204
116#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME 205#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
117extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); 206extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
118extern void pm_genpd_poweroff_unused(void); 207extern void pm_genpd_poweroff_unused(void);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 83b0ea302a80..e5bbcbaa6f57 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
78int pm_qos_request_active(struct pm_qos_request *req); 78int pm_qos_request_active(struct pm_qos_request *req);
79s32 pm_qos_read_value(struct pm_qos_constraints *c); 79s32 pm_qos_read_value(struct pm_qos_constraints *c);
80 80
81s32 __dev_pm_qos_read_value(struct device *dev);
81s32 dev_pm_qos_read_value(struct device *dev); 82s32 dev_pm_qos_read_value(struct device *dev);
82int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 83int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
83 s32 value); 84 s32 value);
@@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
91int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); 92int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
92void dev_pm_qos_constraints_init(struct device *dev); 93void dev_pm_qos_constraints_init(struct device *dev);
93void dev_pm_qos_constraints_destroy(struct device *dev); 94void dev_pm_qos_constraints_destroy(struct device *dev);
95int dev_pm_qos_add_ancestor_request(struct device *dev,
96 struct dev_pm_qos_request *req, s32 value);
94#else 97#else
95static inline int pm_qos_update_target(struct pm_qos_constraints *c, 98static inline int pm_qos_update_target(struct pm_qos_constraints *c,
96 struct plist_node *node, 99 struct plist_node *node,
@@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
119static inline s32 pm_qos_read_value(struct pm_qos_constraints *c) 122static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
120 { return 0; } 123 { return 0; }
121 124
125static inline s32 __dev_pm_qos_read_value(struct device *dev)
126 { return 0; }
122static inline s32 dev_pm_qos_read_value(struct device *dev) 127static inline s32 dev_pm_qos_read_value(struct device *dev)
123 { return 0; } 128 { return 0; }
124static inline int dev_pm_qos_add_request(struct device *dev, 129static inline int dev_pm_qos_add_request(struct device *dev,
@@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
150{ 155{
151 dev->power.power_state = PMSG_INVALID; 156 dev->power.power_state = PMSG_INVALID;
152} 157}
158static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
159 struct dev_pm_qos_request *req, s32 value)
160 { return 0; }
153#endif 161#endif
154 162
155#endif 163#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d3085e72a0ee..609daae7a014 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
45extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); 45extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
46extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); 46extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
47extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); 47extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
48extern void pm_runtime_update_max_time_suspended(struct device *dev,
49 s64 delta_ns);
48 50
49static inline bool pm_children_suspended(struct device *dev) 51static inline bool pm_children_suspended(struct device *dev)
50{ 52{
@@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
148static inline unsigned long pm_runtime_autosuspend_expiration( 150static inline unsigned long pm_runtime_autosuspend_expiration(
149 struct device *dev) { return 0; } 151 struct device *dev) { return 0; }
150 152
153static inline void pm_runtime_update_max_time_suspended(struct device *dev,
154 s64 delta_ns) {}
155
151#endif /* !CONFIG_PM_RUNTIME */ 156#endif /* !CONFIG_PM_RUNTIME */
152 157
153static inline int pm_runtime_idle(struct device *dev) 158static inline int pm_runtime_idle(struct device *dev)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cf0eb342bcba..ad93e1ec8c65 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 220 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
221#define task_contributes_to_load(task) \ 221#define task_contributes_to_load(task) \
222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 222 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
223 (task->flags & PF_FREEZING) == 0) 223 (task->flags & PF_FROZEN) == 0)
224 224
225#define __set_task_state(tsk, state_value) \ 225#define __set_task_state(tsk, state_value) \
226 do { (tsk)->state = (state_value); } while (0) 226 do { (tsk)->state = (state_value); } while (0)
@@ -1787,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1787#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1787#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1788#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ 1788#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1789#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1789#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1790#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1791#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1790#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1792#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1791#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1793#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1792#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -1803,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1803#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1802#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1804#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1803#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1805#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1804#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1806#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1807 1805
1808/* 1806/*
1809 * Only the _current_ task can read/write to tsk->flags, but other 1807 * Only the _current_ task can read/write to tsk->flags, but other
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 5812fefbcedf..b160645f5599 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -95,6 +95,7 @@ struct intc_desc {
95 unsigned int num_resources; 95 unsigned int num_resources;
96 intc_enum force_enable; 96 intc_enum force_enable;
97 intc_enum force_disable; 97 intc_enum force_disable;
98 bool skip_syscore_suspend;
98 struct intc_hw_desc hw; 99 struct intc_hw_desc hw;
99}; 100};
100 101
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 57a692432f8a..95040cc33107 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/freezer.h>
9#include <asm/errno.h> 10#include <asm/errno.h>
10 11
11#ifdef CONFIG_VT 12#ifdef CONFIG_VT
@@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
331#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ 332#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
332#define PM_POST_RESTORE 0x0006 /* Restore failed */ 333#define PM_POST_RESTORE 0x0006 /* Restore failed */
333 334
335extern struct mutex pm_mutex;
336
334#ifdef CONFIG_PM_SLEEP 337#ifdef CONFIG_PM_SLEEP
335void save_processor_state(void); 338void save_processor_state(void);
336void restore_processor_state(void); 339void restore_processor_state(void);
@@ -351,6 +354,19 @@ extern bool events_check_enabled;
351extern bool pm_wakeup_pending(void); 354extern bool pm_wakeup_pending(void);
352extern bool pm_get_wakeup_count(unsigned int *count); 355extern bool pm_get_wakeup_count(unsigned int *count);
353extern bool pm_save_wakeup_count(unsigned int count); 356extern bool pm_save_wakeup_count(unsigned int count);
357
358static inline void lock_system_sleep(void)
359{
360 freezer_do_not_count();
361 mutex_lock(&pm_mutex);
362}
363
364static inline void unlock_system_sleep(void)
365{
366 mutex_unlock(&pm_mutex);
367 freezer_count();
368}
369
354#else /* !CONFIG_PM_SLEEP */ 370#else /* !CONFIG_PM_SLEEP */
355 371
356static inline int register_pm_notifier(struct notifier_block *nb) 372static inline int register_pm_notifier(struct notifier_block *nb)
@@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
366#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 382#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
367 383
368static inline bool pm_wakeup_pending(void) { return false; } 384static inline bool pm_wakeup_pending(void) { return false; }
369#endif /* !CONFIG_PM_SLEEP */
370
371extern struct mutex pm_mutex;
372 385
373#ifndef CONFIG_HIBERNATE_CALLBACKS
374static inline void lock_system_sleep(void) {} 386static inline void lock_system_sleep(void) {}
375static inline void unlock_system_sleep(void) {} 387static inline void unlock_system_sleep(void) {}
376 388
377#else 389#endif /* !CONFIG_PM_SLEEP */
378
379/* Let some subsystems like memory hotadd exclude hibernation */
380
381static inline void lock_system_sleep(void)
382{
383 mutex_lock(&pm_mutex);
384}
385
386static inline void unlock_system_sleep(void)
387{
388 mutex_unlock(&pm_mutex);
389}
390#endif
391 390
392#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS 391#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
393/* 392/*