aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/power/freezing-of-tasks.txt5
-rw-r--r--Documentation/power/runtime_pm.txt4
-rw-r--r--Documentation/trace/events-power.txt27
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/cpufreq/cpufreq.c1
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--include/linux/freezer.h5
-rw-r--r--include/linux/pm_runtime.h7
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/trace/events/power.h92
-rw-r--r--kernel/power/autosleep.c2
-rw-r--r--kernel/power/main.c29
-rw-r--r--kernel/power/process.c4
-rw-r--r--kernel/power/qos.c9
-rw-r--r--kernel/power/suspend.c69
-rw-r--r--kernel/trace/Kconfig15
-rw-r--r--kernel/trace/power-traces.c3
21 files changed, 125 insertions, 174 deletions
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 6ec291ea1c78..85894d83b352 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -223,3 +223,8 @@ since they ask the freezer to skip freezing this task, since it is anyway
223only after the entire suspend/hibernation sequence is complete. 223only after the entire suspend/hibernation sequence is complete.
224So, to summarize, use [un]lock_system_sleep() instead of directly using 224So, to summarize, use [un]lock_system_sleep() instead of directly using
225mutex_[un]lock(&pm_mutex). That would prevent freezing failures. 225mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
226
227V. Miscellaneous
228/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
229all user space processes or all freezable kernel threads, in unit of millisecond.
230The default value is 20000, with range of unsigned integer.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 03591a750f99..6c9f5d9aa115 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -426,6 +426,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
426 'power.runtime_error' is set or 'power.disable_depth' is greater than 426 'power.runtime_error' is set or 'power.disable_depth' is greater than
427 zero) 427 zero)
428 428
429 bool pm_runtime_active(struct device *dev);
430 - return true if the device's runtime PM status is 'active' or its
431 'power.disable_depth' field is not equal to zero, or false otherwise
432
429 bool pm_runtime_suspended(struct device *dev); 433 bool pm_runtime_suspended(struct device *dev);
430 - return true if the device's runtime PM status is 'suspended' and its 434 - return true if the device's runtime PM status is 'suspended' and its
431 'power.disable_depth' field is equal to zero, or false otherwise 435 'power.disable_depth' field is equal to zero, or false otherwise
diff --git a/Documentation/trace/events-power.txt b/Documentation/trace/events-power.txt
index cf794af22855..e1498ff8cf94 100644
--- a/Documentation/trace/events-power.txt
+++ b/Documentation/trace/events-power.txt
@@ -17,7 +17,7 @@ Cf. include/trace/events/power.h for the events definitions.
171. Power state switch events 171. Power state switch events
18============================ 18============================
19 19
201.1 New trace API 201.1 Trace API
21----------------- 21-----------------
22 22
23A 'cpu' event class gathers the CPU-related events: cpuidle and 23A 'cpu' event class gathers the CPU-related events: cpuidle and
@@ -41,31 +41,6 @@ The event which has 'state=4294967295' in the trace is very important to the use
41space tools which are using it to detect the end of the current state, and so to 41space tools which are using it to detect the end of the current state, and so to
42correctly draw the states diagrams and to calculate accurate statistics etc. 42correctly draw the states diagrams and to calculate accurate statistics etc.
43 43
441.2 DEPRECATED trace API
45------------------------
46
47A new Kconfig option CONFIG_EVENT_POWER_TRACING_DEPRECATED with the default value of
48'y' has been created. This allows the legacy trace power API to be used conjointly
49with the new trace API.
50The Kconfig option, the old trace API (in include/trace/events/power.h) and the
51old trace points will disappear in a future release (namely 2.6.41).
52
53power_start "type=%lu state=%lu cpu_id=%lu"
54power_frequency "type=%lu state=%lu cpu_id=%lu"
55power_end "cpu_id=%lu"
56
57The 'type' parameter takes one of those macros:
58 . POWER_NONE = 0,
59 . POWER_CSTATE = 1, /* C-State */
60 . POWER_PSTATE = 2, /* Frequency change or DVFS */
61
62The 'state' parameter is set depending on the type:
63 . Target C-state for type=POWER_CSTATE,
64 . Target frequency for type=POWER_PSTATE,
65
66power_end is used to indicate the exit of a state, corresponding to the latest
67power_start event.
68
692. Clocks events 442. Clocks events
70================ 45================
71The clock events are used for clock enable/disable and for 46The clock events are used for clock enable/disable and for
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7be3622cfc85..2d93d8b23835 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
351 if (omap_irq_pending()) 351 if (omap_irq_pending())
352 goto out; 352 goto out;
353 353
354 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
355 trace_cpu_idle(1, smp_processor_id()); 354 trace_cpu_idle(1, smp_processor_id());
356 355
357 omap_sram_idle(); 356 omap_sram_idle();
358 357
359 trace_power_end(smp_processor_id());
360 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 358 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
361 359
362out: 360out:
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf0..dcfc1f410dc4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -375,7 +375,6 @@ void cpu_idle(void)
375 */ 375 */
376void default_idle(void) 376void default_idle(void)
377{ 377{
378 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
379 trace_cpu_idle_rcuidle(1, smp_processor_id()); 378 trace_cpu_idle_rcuidle(1, smp_processor_id());
380 current_thread_info()->status &= ~TS_POLLING; 379 current_thread_info()->status &= ~TS_POLLING;
381 /* 380 /*
@@ -389,7 +388,6 @@ void default_idle(void)
389 else 388 else
390 local_irq_enable(); 389 local_irq_enable();
391 current_thread_info()->status |= TS_POLLING; 390 current_thread_info()->status |= TS_POLLING;
392 trace_power_end_rcuidle(smp_processor_id());
393 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 391 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
394} 392}
395#ifdef CONFIG_APM_MODULE 393#ifdef CONFIG_APM_MODULE
@@ -423,7 +421,6 @@ void stop_this_cpu(void *dummy)
423static void mwait_idle(void) 421static void mwait_idle(void)
424{ 422{
425 if (!need_resched()) { 423 if (!need_resched()) {
426 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
427 trace_cpu_idle_rcuidle(1, smp_processor_id()); 424 trace_cpu_idle_rcuidle(1, smp_processor_id());
428 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) 425 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
429 clflush((void *)&current_thread_info()->flags); 426 clflush((void *)&current_thread_info()->flags);
@@ -434,7 +431,6 @@ static void mwait_idle(void)
434 __sti_mwait(0, 0); 431 __sti_mwait(0, 0);
435 else 432 else
436 local_irq_enable(); 433 local_irq_enable();
437 trace_power_end_rcuidle(smp_processor_id());
438 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 434 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
439 } else 435 } else
440 local_irq_enable(); 436 local_irq_enable();
@@ -447,12 +443,10 @@ static void mwait_idle(void)
447 */ 443 */
448static void poll_idle(void) 444static void poll_idle(void)
449{ 445{
450 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
451 trace_cpu_idle_rcuidle(0, smp_processor_id()); 446 trace_cpu_idle_rcuidle(0, smp_processor_id());
452 local_irq_enable(); 447 local_irq_enable();
453 while (!need_resched()) 448 while (!need_resched())
454 cpu_relax(); 449 cpu_relax();
455 trace_power_end_rcuidle(smp_processor_id());
456 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 450 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
457} 451}
458 452
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index bd22f8667eed..908b02d5da1b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -787,7 +787,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
787 787
788 acpi_irq_handler = handler; 788 acpi_irq_handler = handler;
789 acpi_irq_context = context; 789 acpi_irq_context = context;
790 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 790 if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
791 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 791 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
792 acpi_irq_handler = NULL; 792 acpi_irq_handler = NULL;
793 return AE_NOT_ACQUIRED; 793 return AE_NOT_ACQUIRED;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index acc3a8ded29d..9a6b05a35603 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
433 */ 433 */
434void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 434void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
435{ 435{
436 if (!work_pending(&genpd->power_off_work)) 436 queue_work(pm_wq, &genpd->power_off_work);
437 queue_work(pm_wq, &genpd->power_off_work);
438} 437}
439 438
440/** 439/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e6ee5e80e546..79715e7fa43e 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
382{ 382{
383 unsigned int cec; 383 unsigned int cec;
384 384
385 /*
386 * active wakeup source should bring the system
387 * out of PM_SUSPEND_FREEZE state
388 */
389 freeze_wake();
390
385 ws->active = true; 391 ws->active = true;
386 ws->active_count++; 392 ws->active_count++;
387 ws->last_time = ktime_get(); 393 ws->last_time = ktime_get();
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1f93dbd72355..99faadf454ec 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -294,7 +294,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
294 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 294 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
295 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 295 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
296 (unsigned long)freqs->cpu); 296 (unsigned long)freqs->cpu);
297 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
298 trace_cpu_frequency(freqs->new, freqs->cpu); 297 trace_cpu_frequency(freqs->new, freqs->cpu);
299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 298 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
300 CPUFREQ_POSTCHANGE, freqs); 299 CPUFREQ_POSTCHANGE, freqs);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e1f6860e069c..eba69290e074 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -144,7 +144,6 @@ int cpuidle_idle_call(void)
144 return 0; 144 return 0;
145 } 145 }
146 146
147 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
148 trace_cpu_idle_rcuidle(next_state, dev->cpu); 147 trace_cpu_idle_rcuidle(next_state, dev->cpu);
149 148
150 if (cpuidle_state_is_coupled(dev, drv, next_state)) 149 if (cpuidle_state_is_coupled(dev, drv, next_state))
@@ -153,7 +152,6 @@ int cpuidle_idle_call(void)
153 else 152 else
154 entered_state = cpuidle_enter_state(dev, drv, next_state); 153 entered_state = cpuidle_enter_state(dev, drv, next_state);
155 154
156 trace_power_end_rcuidle(dev->cpu);
157 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 155 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
158 156
159 /* give the governor an opportunity to reflect on the outcome */ 157 /* give the governor an opportunity to reflect on the outcome */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e4238ceaa4d6..e70df40d84f6 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -13,6 +13,11 @@ extern bool pm_freezing; /* PM freezing in effect */
13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ 13extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
14 14
15/* 15/*
16 * Timeout for stopping processes
17 */
18extern unsigned int freeze_timeout_msecs;
19
20/*
16 * Check if a process has been frozen 21 * Check if a process has been frozen
17 */ 22 */
18static inline bool frozen(struct task_struct *p) 23static inline bool frozen(struct task_struct *p)
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index f271860c78d5..c785c215abfc 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -80,6 +80,12 @@ static inline bool pm_runtime_suspended(struct device *dev)
80 && !dev->power.disable_depth; 80 && !dev->power.disable_depth;
81} 81}
82 82
83static inline bool pm_runtime_active(struct device *dev)
84{
85 return dev->power.runtime_status == RPM_ACTIVE
86 || dev->power.disable_depth;
87}
88
83static inline bool pm_runtime_status_suspended(struct device *dev) 89static inline bool pm_runtime_status_suspended(struct device *dev)
84{ 90{
85 return dev->power.runtime_status == RPM_SUSPENDED; 91 return dev->power.runtime_status == RPM_SUSPENDED;
@@ -132,6 +138,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
132static inline bool device_run_wake(struct device *dev) { return false; } 138static inline bool device_run_wake(struct device *dev) { return false; }
133static inline void device_set_run_wake(struct device *dev, bool enable) {} 139static inline void device_set_run_wake(struct device *dev, bool enable) {}
134static inline bool pm_runtime_suspended(struct device *dev) { return false; } 140static inline bool pm_runtime_suspended(struct device *dev) { return false; }
141static inline bool pm_runtime_active(struct device *dev) { return true; }
135static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } 142static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
136static inline bool pm_runtime_enabled(struct device *dev) { return false; } 143static inline bool pm_runtime_enabled(struct device *dev) { return false; }
137 144
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c808d7fa579..d4e3f16d5e89 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -34,8 +34,10 @@ static inline void pm_restore_console(void)
34typedef int __bitwise suspend_state_t; 34typedef int __bitwise suspend_state_t;
35 35
36#define PM_SUSPEND_ON ((__force suspend_state_t) 0) 36#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
37#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) 37#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
38#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
38#define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 39#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
40#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
39#define PM_SUSPEND_MAX ((__force suspend_state_t) 4) 41#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
40 42
41enum suspend_stat_step { 43enum suspend_stat_step {
@@ -192,6 +194,7 @@ struct platform_suspend_ops {
192 */ 194 */
193extern void suspend_set_ops(const struct platform_suspend_ops *ops); 195extern void suspend_set_ops(const struct platform_suspend_ops *ops);
194extern int suspend_valid_only_mem(suspend_state_t state); 196extern int suspend_valid_only_mem(suspend_state_t state);
197extern void freeze_wake(void);
195 198
196/** 199/**
197 * arch_suspend_disable_irqs - disable IRQs for suspend 200 * arch_suspend_disable_irqs - disable IRQs for suspend
@@ -217,6 +220,7 @@ extern int pm_suspend(suspend_state_t state);
217 220
218static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 221static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
219static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 222static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
223static inline void freeze_wake(void) {}
220#endif /* !CONFIG_SUSPEND */ 224#endif /* !CONFIG_SUSPEND */
221 225
222/* struct pbe is used for creating lists of pages that should be restored 226/* struct pbe is used for creating lists of pages that should be restored
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 0c9783841a30..427acab5d69a 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -99,98 +99,6 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
99 TP_ARGS(name, state) 99 TP_ARGS(name, state)
100); 100);
101 101
102#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
103
104/*
105 * The power events are used for cpuidle & suspend (power_start, power_end)
106 * and for cpufreq (power_frequency)
107 */
108DECLARE_EVENT_CLASS(power,
109
110 TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
111
112 TP_ARGS(type, state, cpu_id),
113
114 TP_STRUCT__entry(
115 __field( u64, type )
116 __field( u64, state )
117 __field( u64, cpu_id )
118 ),
119
120 TP_fast_assign(
121 __entry->type = type;
122 __entry->state = state;
123 __entry->cpu_id = cpu_id;
124 ),
125
126 TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
127 (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
128);
129
130DEFINE_EVENT(power, power_start,
131
132 TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
133
134 TP_ARGS(type, state, cpu_id)
135);
136
137DEFINE_EVENT(power, power_frequency,
138
139 TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
140
141 TP_ARGS(type, state, cpu_id)
142);
143
144TRACE_EVENT(power_end,
145
146 TP_PROTO(unsigned int cpu_id),
147
148 TP_ARGS(cpu_id),
149
150 TP_STRUCT__entry(
151 __field( u64, cpu_id )
152 ),
153
154 TP_fast_assign(
155 __entry->cpu_id = cpu_id;
156 ),
157
158 TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
159
160);
161
162/* Deprecated dummy functions must be protected against multi-declartion */
163#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
164#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
165
166enum {
167 POWER_NONE = 0,
168 POWER_CSTATE = 1,
169 POWER_PSTATE = 2,
170};
171#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
172
173#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
174
175#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
176#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
177enum {
178 POWER_NONE = 0,
179 POWER_CSTATE = 1,
180 POWER_PSTATE = 2,
181};
182
183/* These dummy declaration have to be ripped out when the deprecated
184 events get removed */
185static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
186static inline void trace_power_end(u64 cpuid) {};
187static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
188static inline void trace_power_end_rcuidle(u64 cpuid) {};
189static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
190#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
191
192#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
193
194/* 102/*
195 * The clock events are used for clock enable/disable and for 103 * The clock events are used for clock enable/disable and for
196 * clock rate change 104 * clock rate change
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
index ca304046d9e2..c6422ffeda9a 100644
--- a/kernel/power/autosleep.c
+++ b/kernel/power/autosleep.c
@@ -66,7 +66,7 @@ static DECLARE_WORK(suspend_work, try_to_suspend);
66 66
67void queue_up_suspend_work(void) 67void queue_up_suspend_work(void)
68{ 68{
69 if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON) 69 if (autosleep_state > PM_SUSPEND_ON)
70 queue_work(autosleep_wq, &suspend_work); 70 queue_work(autosleep_wq, &suspend_work);
71} 71}
72 72
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 1c16f9167de1..d77663bfedeb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -313,7 +313,7 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
313static suspend_state_t decode_state(const char *buf, size_t n) 313static suspend_state_t decode_state(const char *buf, size_t n)
314{ 314{
315#ifdef CONFIG_SUSPEND 315#ifdef CONFIG_SUSPEND
316 suspend_state_t state = PM_SUSPEND_STANDBY; 316 suspend_state_t state = PM_SUSPEND_MIN;
317 const char * const *s; 317 const char * const *s;
318#endif 318#endif
319 char *p; 319 char *p;
@@ -553,6 +553,30 @@ power_attr(pm_trace_dev_match);
553 553
554#endif /* CONFIG_PM_TRACE */ 554#endif /* CONFIG_PM_TRACE */
555 555
556#ifdef CONFIG_FREEZER
557static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
558 struct kobj_attribute *attr, char *buf)
559{
560 return sprintf(buf, "%u\n", freeze_timeout_msecs);
561}
562
563static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
564 struct kobj_attribute *attr,
565 const char *buf, size_t n)
566{
567 unsigned long val;
568
569 if (kstrtoul(buf, 10, &val))
570 return -EINVAL;
571
572 freeze_timeout_msecs = val;
573 return n;
574}
575
576power_attr(pm_freeze_timeout);
577
578#endif /* CONFIG_FREEZER*/
579
556static struct attribute * g[] = { 580static struct attribute * g[] = {
557 &state_attr.attr, 581 &state_attr.attr,
558#ifdef CONFIG_PM_TRACE 582#ifdef CONFIG_PM_TRACE
@@ -576,6 +600,9 @@ static struct attribute * g[] = {
576 &pm_print_times_attr.attr, 600 &pm_print_times_attr.attr,
577#endif 601#endif
578#endif 602#endif
603#ifdef CONFIG_FREEZER
604 &pm_freeze_timeout_attr.attr,
605#endif
579 NULL, 606 NULL,
580}; 607};
581 608
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d5a258b60c6f..98088e0e71e8 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -21,7 +21,7 @@
21/* 21/*
22 * Timeout for stopping processes 22 * Timeout for stopping processes
23 */ 23 */
24#define TIMEOUT (20 * HZ) 24unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
25 25
26static int try_to_freeze_tasks(bool user_only) 26static int try_to_freeze_tasks(bool user_only)
27{ 27{
@@ -36,7 +36,7 @@ static int try_to_freeze_tasks(bool user_only)
36 36
37 do_gettimeofday(&start); 37 do_gettimeofday(&start);
38 38
39 end_time = jiffies + TIMEOUT; 39 end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
40 40
41 if (!user_only) 41 if (!user_only)
42 freeze_workqueues_begin(); 42 freeze_workqueues_begin();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 9322ff7eaad6..587dddeebf15 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -359,8 +359,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
359 return; 359 return;
360 } 360 }
361 361
362 if (delayed_work_pending(&req->work)) 362 cancel_delayed_work_sync(&req->work);
363 cancel_delayed_work_sync(&req->work);
364 363
365 if (new_value != req->node.prio) 364 if (new_value != req->node.prio)
366 pm_qos_update_target( 365 pm_qos_update_target(
@@ -386,8 +385,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
386 "%s called for unknown object.", __func__)) 385 "%s called for unknown object.", __func__))
387 return; 386 return;
388 387
389 if (delayed_work_pending(&req->work)) 388 cancel_delayed_work_sync(&req->work);
390 cancel_delayed_work_sync(&req->work);
391 389
392 if (new_value != req->node.prio) 390 if (new_value != req->node.prio)
393 pm_qos_update_target( 391 pm_qos_update_target(
@@ -416,8 +414,7 @@ void pm_qos_remove_request(struct pm_qos_request *req)
416 return; 414 return;
417 } 415 }
418 416
419 if (delayed_work_pending(&req->work)) 417 cancel_delayed_work_sync(&req->work);
420 cancel_delayed_work_sync(&req->work);
421 418
422 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, 419 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
423 &req->node, PM_QOS_REMOVE_REQ, 420 &req->node, PM_QOS_REMOVE_REQ,
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c8b7446b27df..d4feda084a3a 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -30,12 +30,38 @@
30#include "power.h" 30#include "power.h"
31 31
32const char *const pm_states[PM_SUSPEND_MAX] = { 32const char *const pm_states[PM_SUSPEND_MAX] = {
33 [PM_SUSPEND_FREEZE] = "freeze",
33 [PM_SUSPEND_STANDBY] = "standby", 34 [PM_SUSPEND_STANDBY] = "standby",
34 [PM_SUSPEND_MEM] = "mem", 35 [PM_SUSPEND_MEM] = "mem",
35}; 36};
36 37
37static const struct platform_suspend_ops *suspend_ops; 38static const struct platform_suspend_ops *suspend_ops;
38 39
40static bool need_suspend_ops(suspend_state_t state)
41{
42 return !!(state > PM_SUSPEND_FREEZE);
43}
44
45static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
46static bool suspend_freeze_wake;
47
48static void freeze_begin(void)
49{
50 suspend_freeze_wake = false;
51}
52
53static void freeze_enter(void)
54{
55 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
56}
57
58void freeze_wake(void)
59{
60 suspend_freeze_wake = true;
61 wake_up(&suspend_freeze_wait_head);
62}
63EXPORT_SYMBOL_GPL(freeze_wake);
64
39/** 65/**
40 * suspend_set_ops - Set the global suspend method table. 66 * suspend_set_ops - Set the global suspend method table.
41 * @ops: Suspend operations to use. 67 * @ops: Suspend operations to use.
@@ -50,8 +76,11 @@ EXPORT_SYMBOL_GPL(suspend_set_ops);
50 76
51bool valid_state(suspend_state_t state) 77bool valid_state(suspend_state_t state)
52{ 78{
79 if (state == PM_SUSPEND_FREEZE)
80 return true;
53 /* 81 /*
54 * All states need lowlevel support and need to be valid to the lowlevel 82 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
83 * support and need to be valid to the lowlevel
55 * implementation, no valid callback implies that none are valid. 84 * implementation, no valid callback implies that none are valid.
56 */ 85 */
57 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); 86 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
@@ -89,11 +118,11 @@ static int suspend_test(int level)
89 * hibernation). Run suspend notifiers, allocate the "suspend" console and 118 * hibernation). Run suspend notifiers, allocate the "suspend" console and
90 * freeze processes. 119 * freeze processes.
91 */ 120 */
92static int suspend_prepare(void) 121static int suspend_prepare(suspend_state_t state)
93{ 122{
94 int error; 123 int error;
95 124
96 if (!suspend_ops || !suspend_ops->enter) 125 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
97 return -EPERM; 126 return -EPERM;
98 127
99 pm_prepare_console(); 128 pm_prepare_console();
@@ -137,7 +166,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
137{ 166{
138 int error; 167 int error;
139 168
140 if (suspend_ops->prepare) { 169 if (need_suspend_ops(state) && suspend_ops->prepare) {
141 error = suspend_ops->prepare(); 170 error = suspend_ops->prepare();
142 if (error) 171 if (error)
143 goto Platform_finish; 172 goto Platform_finish;
@@ -149,12 +178,23 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
149 goto Platform_finish; 178 goto Platform_finish;
150 } 179 }
151 180
152 if (suspend_ops->prepare_late) { 181 if (need_suspend_ops(state) && suspend_ops->prepare_late) {
153 error = suspend_ops->prepare_late(); 182 error = suspend_ops->prepare_late();
154 if (error) 183 if (error)
155 goto Platform_wake; 184 goto Platform_wake;
156 } 185 }
157 186
187 /*
188 * PM_SUSPEND_FREEZE equals
189 * frozen processes + suspended devices + idle processors.
190 * Thus we should invoke freeze_enter() soon after
191 * all the devices are suspended.
192 */
193 if (state == PM_SUSPEND_FREEZE) {
194 freeze_enter();
195 goto Platform_wake;
196 }
197
158 if (suspend_test(TEST_PLATFORM)) 198 if (suspend_test(TEST_PLATFORM))
159 goto Platform_wake; 199 goto Platform_wake;
160 200
@@ -182,13 +222,13 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
182 enable_nonboot_cpus(); 222 enable_nonboot_cpus();
183 223
184 Platform_wake: 224 Platform_wake:
185 if (suspend_ops->wake) 225 if (need_suspend_ops(state) && suspend_ops->wake)
186 suspend_ops->wake(); 226 suspend_ops->wake();
187 227
188 dpm_resume_start(PMSG_RESUME); 228 dpm_resume_start(PMSG_RESUME);
189 229
190 Platform_finish: 230 Platform_finish:
191 if (suspend_ops->finish) 231 if (need_suspend_ops(state) && suspend_ops->finish)
192 suspend_ops->finish(); 232 suspend_ops->finish();
193 233
194 return error; 234 return error;
@@ -203,11 +243,11 @@ int suspend_devices_and_enter(suspend_state_t state)
203 int error; 243 int error;
204 bool wakeup = false; 244 bool wakeup = false;
205 245
206 if (!suspend_ops) 246 if (need_suspend_ops(state) && !suspend_ops)
207 return -ENOSYS; 247 return -ENOSYS;
208 248
209 trace_machine_suspend(state); 249 trace_machine_suspend(state);
210 if (suspend_ops->begin) { 250 if (need_suspend_ops(state) && suspend_ops->begin) {
211 error = suspend_ops->begin(state); 251 error = suspend_ops->begin(state);
212 if (error) 252 if (error)
213 goto Close; 253 goto Close;
@@ -226,7 +266,7 @@ int suspend_devices_and_enter(suspend_state_t state)
226 266
227 do { 267 do {
228 error = suspend_enter(state, &wakeup); 268 error = suspend_enter(state, &wakeup);
229 } while (!error && !wakeup 269 } while (!error && !wakeup && need_suspend_ops(state)
230 && suspend_ops->suspend_again && suspend_ops->suspend_again()); 270 && suspend_ops->suspend_again && suspend_ops->suspend_again());
231 271
232 Resume_devices: 272 Resume_devices:
@@ -236,13 +276,13 @@ int suspend_devices_and_enter(suspend_state_t state)
236 ftrace_start(); 276 ftrace_start();
237 resume_console(); 277 resume_console();
238 Close: 278 Close:
239 if (suspend_ops->end) 279 if (need_suspend_ops(state) && suspend_ops->end)
240 suspend_ops->end(); 280 suspend_ops->end();
241 trace_machine_suspend(PWR_EVENT_EXIT); 281 trace_machine_suspend(PWR_EVENT_EXIT);
242 return error; 282 return error;
243 283
244 Recover_platform: 284 Recover_platform:
245 if (suspend_ops->recover) 285 if (need_suspend_ops(state) && suspend_ops->recover)
246 suspend_ops->recover(); 286 suspend_ops->recover();
247 goto Resume_devices; 287 goto Resume_devices;
248} 288}
@@ -278,12 +318,15 @@ static int enter_state(suspend_state_t state)
278 if (!mutex_trylock(&pm_mutex)) 318 if (!mutex_trylock(&pm_mutex))
279 return -EBUSY; 319 return -EBUSY;
280 320
321 if (state == PM_SUSPEND_FREEZE)
322 freeze_begin();
323
281 printk(KERN_INFO "PM: Syncing filesystems ... "); 324 printk(KERN_INFO "PM: Syncing filesystems ... ");
282 sys_sync(); 325 sys_sync();
283 printk("done.\n"); 326 printk("done.\n");
284 327
285 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 328 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
286 error = suspend_prepare(); 329 error = suspend_prepare(state);
287 if (error) 330 if (error)
288 goto Unlock; 331 goto Unlock;
289 332
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5d89335a485f..ad0a067ad4b3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -78,21 +78,6 @@ config EVENT_TRACING
78 select CONTEXT_SWITCH_TRACER 78 select CONTEXT_SWITCH_TRACER
79 bool 79 bool
80 80
81config EVENT_POWER_TRACING_DEPRECATED
82 depends on EVENT_TRACING
83 bool "Deprecated power event trace API, to be removed"
84 default y
85 help
86 Provides old power event types:
87 C-state/idle accounting events:
88 power:power_start
89 power:power_end
90 and old cpufreq accounting event:
91 power:power_frequency
92 This is for userspace compatibility
93 and will vanish after 5 kernel iterations,
94 namely 3.1.
95
96config CONTEXT_SWITCH_TRACER 81config CONTEXT_SWITCH_TRACER
97 bool 82 bool
98 83
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index f55fcf61b223..1c71382b283d 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -13,8 +13,5 @@
13#define CREATE_TRACE_POINTS 13#define CREATE_TRACE_POINTS
14#include <trace/events/power.h> 14#include <trace/events/power.h>
15 15
16#ifdef EVENT_POWER_TRACING_DEPRECATED
17EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
18#endif
19EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); 16EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
20 17