aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/android/binder.c5
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/select.c4
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/freezer.h171
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/freezer.c12
-rw-r--r--kernel/futex.c3
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/power/process.c26
-rw-r--r--kernel/signal.c2
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/unix/af_unix.c3
18 files changed, 200 insertions, 68 deletions
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 1567ac296b39..1ffc2ebdf612 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -20,6 +20,7 @@
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <linux/fdtable.h> 21#include <linux/fdtable.h>
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/freezer.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
24#include <linux/list.h> 25#include <linux/list.h>
25#include <linux/miscdevice.h> 26#include <linux/miscdevice.h>
@@ -2140,13 +2141,13 @@ retry:
2140 if (!binder_has_proc_work(proc, thread)) 2141 if (!binder_has_proc_work(proc, thread))
2141 ret = -EAGAIN; 2142 ret = -EAGAIN;
2142 } else 2143 } else
2143 ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2144 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2144 } else { 2145 } else {
2145 if (non_block) { 2146 if (non_block) {
2146 if (!binder_has_thread_work(thread)) 2147 if (!binder_has_thread_work(thread))
2147 ret = -EAGAIN; 2148 ret = -EAGAIN;
2148 } else 2149 } else
2149 ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); 2150 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2150 } 2151 }
2151 2152
2152 binder_lock(__func__); 2153 binder_lock(__func__);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index bfbf4700d160..b70aa7c91394 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -447,7 +447,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
447{ 447{
448 int error; 448 int error;
449 449
450 error = wait_event_freezekillable(server->response_q, 450 error = wait_event_freezekillable_unsafe(server->response_q,
451 midQ->mid_state != MID_REQUEST_SUBMITTED); 451 midQ->mid_state != MID_REQUEST_SUBMITTED);
452 if (error < 0) 452 if (error < 0)
453 return -ERESTARTSYS; 453 return -ERESTARTSYS;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index deecc7294a67..0cff4434880d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,6 +34,7 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/anon_inodes.h> 35#include <linux/anon_inodes.h>
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/freezer.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38#include <asm/io.h> 39#include <asm/io.h>
39#include <asm/mman.h> 40#include <asm/mman.h>
@@ -1602,7 +1603,8 @@ fetch_events:
1602 } 1603 }
1603 1604
1604 spin_unlock_irqrestore(&ep->lock, flags); 1605 spin_unlock_irqrestore(&ep->lock, flags);
1605 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) 1606 if (!freezable_schedule_hrtimeout_range(to, slack,
1607 HRTIMER_MODE_ABS))
1606 timed_out = 1; 1608 timed_out = 1;
1607 1609
1608 spin_lock_irqsave(&ep->lock, flags); 1610 spin_lock_irqsave(&ep->lock, flags);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c1c7a9d78722..ce727047ee87 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -79,7 +79,7 @@ int nfs_wait_bit_killable(void *word)
79{ 79{
80 if (fatal_signal_pending(current)) 80 if (fatal_signal_pending(current))
81 return -ERESTARTSYS; 81 return -ERESTARTSYS;
82 freezable_schedule(); 82 freezable_schedule_unsafe();
83 return 0; 83 return 0;
84} 84}
85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 43ea96ced28c..ce90eb4775c2 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -33,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
33 res = rpc_call_sync(clnt, msg, flags); 33 res = rpc_call_sync(clnt, msg, flags);
34 if (res != -EJUKEBOX) 34 if (res != -EJUKEBOX)
35 break; 35 break;
36 freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); 36 freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME);
37 res = -ERESTARTSYS; 37 res = -ERESTARTSYS;
38 } while (!fatal_signal_pending(current)); 38 } while (!fatal_signal_pending(current));
39 return res; 39 return res;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d7ba5616989c..28241a42f363 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -268,7 +268,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
268 *timeout = NFS4_POLL_RETRY_MIN; 268 *timeout = NFS4_POLL_RETRY_MIN;
269 if (*timeout > NFS4_POLL_RETRY_MAX) 269 if (*timeout > NFS4_POLL_RETRY_MAX)
270 *timeout = NFS4_POLL_RETRY_MAX; 270 *timeout = NFS4_POLL_RETRY_MAX;
271 freezable_schedule_timeout_killable(*timeout); 271 freezable_schedule_timeout_killable_unsafe(*timeout);
272 if (fatal_signal_pending(current)) 272 if (fatal_signal_pending(current))
273 res = -ERESTARTSYS; 273 res = -ERESTARTSYS;
274 *timeout <<= 1; 274 *timeout <<= 1;
@@ -4528,7 +4528,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
4528static unsigned long 4528static unsigned long
4529nfs4_set_lock_task_retry(unsigned long timeout) 4529nfs4_set_lock_task_retry(unsigned long timeout)
4530{ 4530{
4531 freezable_schedule_timeout_killable(timeout); 4531 freezable_schedule_timeout_killable_unsafe(timeout);
4532 timeout <<= 1; 4532 timeout <<= 1;
4533 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4533 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4534 return NFS4_LOCK_MAXTIMEOUT; 4534 return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/select.c b/fs/select.c
index 8c1c96c27062..6b14dc7df3a4 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -27,6 +27,7 @@
27#include <linux/rcupdate.h> 27#include <linux/rcupdate.h>
28#include <linux/hrtimer.h> 28#include <linux/hrtimer.h>
29#include <linux/sched/rt.h> 29#include <linux/sched/rt.h>
30#include <linux/freezer.h>
30 31
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32 33
@@ -236,7 +237,8 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
236 237
237 set_current_state(state); 238 set_current_state(state);
238 if (!pwq->triggered) 239 if (!pwq->triggered)
239 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 240 rc = freezable_schedule_hrtimeout_range(expires, slack,
241 HRTIMER_MODE_ABS);
240 __set_current_state(TASK_RUNNING); 242 __set_current_state(TASK_RUNNING);
241 243
242 /* 244 /*
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 21ca773f77bf..822c1354f3a6 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@ struct task_struct;
51extern void debug_show_all_locks(void); 51extern void debug_show_all_locks(void);
52extern void debug_show_held_locks(struct task_struct *task); 52extern void debug_show_held_locks(struct task_struct *task);
53extern void debug_check_no_locks_freed(const void *from, unsigned long len); 53extern void debug_check_no_locks_freed(const void *from, unsigned long len);
54extern void debug_check_no_locks_held(struct task_struct *task); 54extern void debug_check_no_locks_held(void);
55#else 55#else
56static inline void debug_show_all_locks(void) 56static inline void debug_show_all_locks(void)
57{ 57{
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
67} 67}
68 68
69static inline void 69static inline void
70debug_check_no_locks_held(struct task_struct *task) 70debug_check_no_locks_held(void)
71{ 71{
72} 72}
73#endif 73#endif
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e70df40d84f6..7fd81b8c4897 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,6 +3,7 @@
3#ifndef FREEZER_H_INCLUDED 3#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED 4#define FREEZER_H_INCLUDED
5 5
6#include <linux/debug_locks.h>
6#include <linux/sched.h> 7#include <linux/sched.h>
7#include <linux/wait.h> 8#include <linux/wait.h>
8#include <linux/atomic.h> 9#include <linux/atomic.h>
@@ -46,7 +47,11 @@ extern int freeze_kernel_threads(void);
46extern void thaw_processes(void); 47extern void thaw_processes(void);
47extern void thaw_kernel_threads(void); 48extern void thaw_kernel_threads(void);
48 49
49static inline bool try_to_freeze(void) 50/*
51 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
52 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
53 */
54static inline bool try_to_freeze_unsafe(void)
50{ 55{
51 might_sleep(); 56 might_sleep();
52 if (likely(!freezing(current))) 57 if (likely(!freezing(current)))
@@ -54,6 +59,13 @@ static inline bool try_to_freeze(void)
54 return __refrigerator(false); 59 return __refrigerator(false);
55} 60}
56 61
62static inline bool try_to_freeze(void)
63{
64 if (!(current->flags & PF_NOFREEZE))
65 debug_check_no_locks_held();
66 return try_to_freeze_unsafe();
67}
68
57extern bool freeze_task(struct task_struct *p); 69extern bool freeze_task(struct task_struct *p);
58extern bool set_freezable(void); 70extern bool set_freezable(void);
59 71
@@ -115,6 +127,14 @@ static inline void freezer_count(void)
115 try_to_freeze(); 127 try_to_freeze();
116} 128}
117 129
130/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
131static inline void freezer_count_unsafe(void)
132{
133 current->flags &= ~PF_FREEZER_SKIP;
134 smp_mb();
135 try_to_freeze_unsafe();
136}
137
118/** 138/**
119 * freezer_should_skip - whether to skip a task when determining frozen 139 * freezer_should_skip - whether to skip a task when determining frozen
120 * state is reached 140 * state is reached
@@ -139,28 +159,86 @@ static inline bool freezer_should_skip(struct task_struct *p)
139} 159}
140 160
141/* 161/*
142 * These macros are intended to be used whenever you want allow a sleeping 162 * These functions are intended to be used whenever you want allow a sleeping
143 * task to be frozen. Note that neither return any clear indication of 163 * task to be frozen. Note that neither return any clear indication of
144 * whether a freeze event happened while in this function. 164 * whether a freeze event happened while in this function.
145 */ 165 */
146 166
147/* Like schedule(), but should not block the freezer. */ 167/* Like schedule(), but should not block the freezer. */
148#define freezable_schedule() \ 168static inline void freezable_schedule(void)
149({ \ 169{
150 freezer_do_not_count(); \ 170 freezer_do_not_count();
151 schedule(); \ 171 schedule();
152 freezer_count(); \ 172 freezer_count();
153}) 173}
174
175/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
176static inline void freezable_schedule_unsafe(void)
177{
178 freezer_do_not_count();
179 schedule();
180 freezer_count_unsafe();
181}
182
183/*
184 * Like freezable_schedule_timeout(), but should not block the freezer. Do not
185 * call this with locks held.
186 */
187static inline long freezable_schedule_timeout(long timeout)
188{
189 long __retval;
190 freezer_do_not_count();
191 __retval = schedule_timeout(timeout);
192 freezer_count();
193 return __retval;
194}
195
196/*
197 * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
198 * call this with locks held.
199 */
200static inline long freezable_schedule_timeout_interruptible(long timeout)
201{
202 long __retval;
203 freezer_do_not_count();
204 __retval = schedule_timeout_interruptible(timeout);
205 freezer_count();
206 return __retval;
207}
154 208
155/* Like schedule_timeout_killable(), but should not block the freezer. */ 209/* Like schedule_timeout_killable(), but should not block the freezer. */
156#define freezable_schedule_timeout_killable(timeout) \ 210static inline long freezable_schedule_timeout_killable(long timeout)
157({ \ 211{
158 long __retval; \ 212 long __retval;
159 freezer_do_not_count(); \ 213 freezer_do_not_count();
160 __retval = schedule_timeout_killable(timeout); \ 214 __retval = schedule_timeout_killable(timeout);
161 freezer_count(); \ 215 freezer_count();
162 __retval; \ 216 return __retval;
163}) 217}
218
219/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
220static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
221{
222 long __retval;
223 freezer_do_not_count();
224 __retval = schedule_timeout_killable(timeout);
225 freezer_count_unsafe();
226 return __retval;
227}
228
229/*
230 * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
231 * call this with locks held.
232 */
233static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
234 unsigned long delta, const enum hrtimer_mode mode)
235{
236 int __retval;
237 freezer_do_not_count();
238 __retval = schedule_hrtimeout_range(expires, delta, mode);
239 freezer_count();
240 return __retval;
241}
164 242
165/* 243/*
166 * Freezer-friendly wrappers around wait_event_interruptible(), 244 * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -177,33 +255,45 @@ static inline bool freezer_should_skip(struct task_struct *p)
177 __retval; \ 255 __retval; \
178}) 256})
179 257
258/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
259#define wait_event_freezekillable_unsafe(wq, condition) \
260({ \
261 int __retval; \
262 freezer_do_not_count(); \
263 __retval = wait_event_killable(wq, (condition)); \
264 freezer_count_unsafe(); \
265 __retval; \
266})
267
180#define wait_event_freezable(wq, condition) \ 268#define wait_event_freezable(wq, condition) \
181({ \ 269({ \
182 int __retval; \ 270 int __retval; \
183 for (;;) { \ 271 freezer_do_not_count(); \
184 __retval = wait_event_interruptible(wq, \ 272 __retval = wait_event_interruptible(wq, (condition)); \
185 (condition) || freezing(current)); \ 273 freezer_count(); \
186 if (__retval || (condition)) \
187 break; \
188 try_to_freeze(); \
189 } \
190 __retval; \ 274 __retval; \
191}) 275})
192 276
193#define wait_event_freezable_timeout(wq, condition, timeout) \ 277#define wait_event_freezable_timeout(wq, condition, timeout) \
194({ \ 278({ \
195 long __retval = timeout; \ 279 long __retval = timeout; \
196 for (;;) { \ 280 freezer_do_not_count(); \
197 __retval = wait_event_interruptible_timeout(wq, \ 281 __retval = wait_event_interruptible_timeout(wq, (condition), \
198 (condition) || freezing(current), \ 282 __retval); \
199 __retval); \ 283 freezer_count(); \
200 if (__retval <= 0 || (condition)) \
201 break; \
202 try_to_freeze(); \
203 } \
204 __retval; \ 284 __retval; \
205}) 285})
206 286
287#define wait_event_freezable_exclusive(wq, condition) \
288({ \
289 int __retval; \
290 freezer_do_not_count(); \
291 __retval = wait_event_interruptible_exclusive(wq, condition); \
292 freezer_count(); \
293 __retval; \
294})
295
296
207#else /* !CONFIG_FREEZER */ 297#else /* !CONFIG_FREEZER */
208static inline bool frozen(struct task_struct *p) { return false; } 298static inline bool frozen(struct task_struct *p) { return false; }
209static inline bool freezing(struct task_struct *p) { return false; } 299static inline bool freezing(struct task_struct *p) { return false; }
@@ -225,18 +315,37 @@ static inline void set_freezable(void) {}
225 315
226#define freezable_schedule() schedule() 316#define freezable_schedule() schedule()
227 317
318#define freezable_schedule_unsafe() schedule()
319
320#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
321
322#define freezable_schedule_timeout_interruptible(timeout) \
323 schedule_timeout_interruptible(timeout)
324
228#define freezable_schedule_timeout_killable(timeout) \ 325#define freezable_schedule_timeout_killable(timeout) \
229 schedule_timeout_killable(timeout) 326 schedule_timeout_killable(timeout)
230 327
328#define freezable_schedule_timeout_killable_unsafe(timeout) \
329 schedule_timeout_killable(timeout)
330
331#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
332 schedule_hrtimeout_range(expires, delta, mode)
333
231#define wait_event_freezable(wq, condition) \ 334#define wait_event_freezable(wq, condition) \
232 wait_event_interruptible(wq, condition) 335 wait_event_interruptible(wq, condition)
233 336
234#define wait_event_freezable_timeout(wq, condition, timeout) \ 337#define wait_event_freezable_timeout(wq, condition, timeout) \
235 wait_event_interruptible_timeout(wq, condition, timeout) 338 wait_event_interruptible_timeout(wq, condition, timeout)
236 339
340#define wait_event_freezable_exclusive(wq, condition) \
341 wait_event_interruptible_exclusive(wq, condition)
342
237#define wait_event_freezekillable(wq, condition) \ 343#define wait_event_freezekillable(wq, condition) \
238 wait_event_killable(wq, condition) 344 wait_event_killable(wq, condition)
239 345
346#define wait_event_freezekillable_unsafe(wq, condition) \
347 wait_event_killable(wq, condition)
348
240#endif /* !CONFIG_FREEZER */ 349#endif /* !CONFIG_FREEZER */
241 350
242#endif /* FREEZER_H_INCLUDED */ 351#endif /* FREEZER_H_INCLUDED */
diff --git a/kernel/exit.c b/kernel/exit.c
index 7bb73f9d09db..6a057750ebbb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -835,7 +835,7 @@ void do_exit(long code)
835 /* 835 /*
836 * Make sure we are holding no locks: 836 * Make sure we are holding no locks:
837 */ 837 */
838 debug_check_no_locks_held(tsk); 838 debug_check_no_locks_held();
839 /* 839 /*
840 * We can do this unlocked here. The futex code uses this flag 840 * We can do this unlocked here. The futex code uses this flag
841 * just to verify whether the pi state cleanup has been done 841 * just to verify whether the pi state cleanup has been done
diff --git a/kernel/freezer.c b/kernel/freezer.c
index c38893b0efba..8b2afc1c9df0 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -110,6 +110,18 @@ bool freeze_task(struct task_struct *p)
110{ 110{
111 unsigned long flags; 111 unsigned long flags;
112 112
113 /*
114 * This check can race with freezer_do_not_count, but worst case that
115 * will result in an extra wakeup being sent to the task. It does not
116 * race with freezer_count(), the barriers in freezer_count() and
117 * freezer_should_skip() ensure that either freezer_count() sees
118 * freezing == true in try_to_freeze() and freezes, or
119 * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
120 * normally.
121 */
122 if (freezer_should_skip(p))
123 return false;
124
113 spin_lock_irqsave(&freezer_lock, flags); 125 spin_lock_irqsave(&freezer_lock, flags);
114 if (!freezing(p) || frozen(p)) { 126 if (!freezing(p) || frozen(p)) {
115 spin_unlock_irqrestore(&freezer_lock, flags); 127 spin_unlock_irqrestore(&freezer_lock, flags);
diff --git a/kernel/futex.c b/kernel/futex.c
index b26dcfc02c94..d710fae8abbe 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -61,6 +61,7 @@
61#include <linux/nsproxy.h> 61#include <linux/nsproxy.h>
62#include <linux/ptrace.h> 62#include <linux/ptrace.h>
63#include <linux/sched/rt.h> 63#include <linux/sched/rt.h>
64#include <linux/freezer.h>
64 65
65#include <asm/futex.h> 66#include <asm/futex.h>
66 67
@@ -1807,7 +1808,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1807 * is no timeout, or if it has yet to expire. 1808 * is no timeout, or if it has yet to expire.
1808 */ 1809 */
1809 if (!timeout || timeout->task) 1810 if (!timeout || timeout->task)
1810 schedule(); 1811 freezable_schedule();
1811 } 1812 }
1812 __set_current_state(TASK_RUNNING); 1813 __set_current_state(TASK_RUNNING);
1813} 1814}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index fd4b13b131f8..3ee4d06c6fc2 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -47,6 +47,7 @@
47#include <linux/sched/sysctl.h> 47#include <linux/sched/sysctl.h>
48#include <linux/sched/rt.h> 48#include <linux/sched/rt.h>
49#include <linux/timer.h> 49#include <linux/timer.h>
50#include <linux/freezer.h>
50 51
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
52 53
@@ -1545,7 +1546,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
1545 t->task = NULL; 1546 t->task = NULL;
1546 1547
1547 if (likely(t->task)) 1548 if (likely(t->task))
1548 schedule(); 1549 freezable_schedule();
1549 1550
1550 hrtimer_cancel(&t->timer); 1551 hrtimer_cancel(&t->timer);
1551 mode = HRTIMER_MODE_ABS; 1552 mode = HRTIMER_MODE_ABS;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1f3186b37fd5..e16c45b9ee77 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4090,7 +4090,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4090} 4090}
4091EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4091EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4092 4092
4093static void print_held_locks_bug(struct task_struct *curr) 4093static void print_held_locks_bug(void)
4094{ 4094{
4095 if (!debug_locks_off()) 4095 if (!debug_locks_off())
4096 return; 4096 return;
@@ -4099,22 +4099,21 @@ static void print_held_locks_bug(struct task_struct *curr)
4099 4099
4100 printk("\n"); 4100 printk("\n");
4101 printk("=====================================\n"); 4101 printk("=====================================\n");
4102 printk("[ BUG: lock held at task exit time! ]\n"); 4102 printk("[ BUG: %s/%d still has locks held! ]\n",
4103 current->comm, task_pid_nr(current));
4103 print_kernel_ident(); 4104 print_kernel_ident();
4104 printk("-------------------------------------\n"); 4105 printk("-------------------------------------\n");
4105 printk("%s/%d is exiting with locks still held!\n", 4106 lockdep_print_held_locks(current);
4106 curr->comm, task_pid_nr(curr));
4107 lockdep_print_held_locks(curr);
4108
4109 printk("\nstack backtrace:\n"); 4107 printk("\nstack backtrace:\n");
4110 dump_stack(); 4108 dump_stack();
4111} 4109}
4112 4110
4113void debug_check_no_locks_held(struct task_struct *task) 4111void debug_check_no_locks_held(void)
4114{ 4112{
4115 if (unlikely(task->lockdep_depth > 0)) 4113 if (unlikely(current->lockdep_depth > 0))
4116 print_held_locks_bug(task); 4114 print_held_locks_bug();
4117} 4115}
4116EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4118 4117
4119void debug_show_all_locks(void) 4118void debug_show_all_locks(void)
4120{ 4119{
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 98088e0e71e8..fc0df8486449 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -30,9 +30,10 @@ static int try_to_freeze_tasks(bool user_only)
30 unsigned int todo; 30 unsigned int todo;
31 bool wq_busy = false; 31 bool wq_busy = false;
32 struct timeval start, end; 32 struct timeval start, end;
33 u64 elapsed_csecs64; 33 u64 elapsed_msecs64;
34 unsigned int elapsed_csecs; 34 unsigned int elapsed_msecs;
35 bool wakeup = false; 35 bool wakeup = false;
36 int sleep_usecs = USEC_PER_MSEC;
36 37
37 do_gettimeofday(&start); 38 do_gettimeofday(&start);
38 39
@@ -68,22 +69,25 @@ static int try_to_freeze_tasks(bool user_only)
68 69
69 /* 70 /*
70 * We need to retry, but first give the freezing tasks some 71 * We need to retry, but first give the freezing tasks some
71 * time to enter the refrigerator. 72 * time to enter the refrigerator. Start with an initial
73 * 1 ms sleep followed by exponential backoff until 8 ms.
72 */ 74 */
73 msleep(10); 75 usleep_range(sleep_usecs / 2, sleep_usecs);
76 if (sleep_usecs < 8 * USEC_PER_MSEC)
77 sleep_usecs *= 2;
74 } 78 }
75 79
76 do_gettimeofday(&end); 80 do_gettimeofday(&end);
77 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); 81 elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
78 do_div(elapsed_csecs64, NSEC_PER_SEC / 100); 82 do_div(elapsed_msecs64, NSEC_PER_MSEC);
79 elapsed_csecs = elapsed_csecs64; 83 elapsed_msecs = elapsed_msecs64;
80 84
81 if (todo) { 85 if (todo) {
82 printk("\n"); 86 printk("\n");
83 printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " 87 printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
84 "(%d tasks refusing to freeze, wq_busy=%d):\n", 88 "(%d tasks refusing to freeze, wq_busy=%d):\n",
85 wakeup ? "aborted" : "failed", 89 wakeup ? "aborted" : "failed",
86 elapsed_csecs / 100, elapsed_csecs % 100, 90 elapsed_msecs / 1000, elapsed_msecs % 1000,
87 todo - wq_busy, wq_busy); 91 todo - wq_busy, wq_busy);
88 92
89 if (!wakeup) { 93 if (!wakeup) {
@@ -96,8 +100,8 @@ static int try_to_freeze_tasks(bool user_only)
96 read_unlock(&tasklist_lock); 100 read_unlock(&tasklist_lock);
97 } 101 }
98 } else { 102 } else {
99 printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, 103 printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
100 elapsed_csecs % 100); 104 elapsed_msecs % 1000);
101 } 105 }
102 106
103 return todo ? -EBUSY : 0; 107 return todo ? -EBUSY : 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index 113411bfe8b1..50e41075ac77 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2848,7 +2848,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2848 recalc_sigpending(); 2848 recalc_sigpending();
2849 spin_unlock_irq(&tsk->sighand->siglock); 2849 spin_unlock_irq(&tsk->sighand->siglock);
2850 2850
2851 timeout = schedule_timeout_interruptible(timeout); 2851 timeout = freezable_schedule_timeout_interruptible(timeout);
2852 2852
2853 spin_lock_irq(&tsk->sighand->siglock); 2853 spin_lock_irq(&tsk->sighand->siglock);
2854 __set_task_blocked(tsk, &tsk->real_blocked); 2854 __set_task_blocked(tsk, &tsk->real_blocked);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5356b120dbf8..77d251e02593 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -254,7 +254,7 @@ static int rpc_wait_bit_killable(void *word)
254{ 254{
255 if (fatal_signal_pending(current)) 255 if (fatal_signal_pending(current))
256 return -ERESTARTSYS; 256 return -ERESTARTSYS;
257 freezable_schedule(); 257 freezable_schedule_unsafe();
258 return 0; 258 return 0;
259} 259}
260 260
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 826e09938bff..c4ce243824bb 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -114,6 +114,7 @@
114#include <linux/mount.h> 114#include <linux/mount.h>
115#include <net/checksum.h> 115#include <net/checksum.h>
116#include <linux/security.h> 116#include <linux/security.h>
117#include <linux/freezer.h>
117 118
118struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; 119struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
119EXPORT_SYMBOL_GPL(unix_socket_table); 120EXPORT_SYMBOL_GPL(unix_socket_table);
@@ -1879,7 +1880,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1879 1880
1880 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1881 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1881 unix_state_unlock(sk); 1882 unix_state_unlock(sk);
1882 timeo = schedule_timeout(timeo); 1883 timeo = freezable_schedule_timeout(timeo);
1883 unix_state_lock(sk); 1884 unix_state_lock(sk);
1884 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1885 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1885 } 1886 }