diff options
57 files changed, 303 insertions, 401 deletions
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt index 316c2ba187f4..3ab9fbd2800a 100644 --- a/Documentation/power/freezing-of-tasks.txt +++ b/Documentation/power/freezing-of-tasks.txt | |||
@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes | |||
21 | try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and | 21 | try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and |
22 | either wakes them up, if they are kernel threads, or sends fake signals to them, | 22 | either wakes them up, if they are kernel threads, or sends fake signals to them, |
23 | if they are user space processes. A task that has TIF_FREEZE set, should react | 23 | if they are user space processes. A task that has TIF_FREEZE set, should react |
24 | to it by calling the function called refrigerator() (defined in | 24 | to it by calling the function called __refrigerator() (defined in |
25 | kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state | 25 | kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state |
26 | to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. | 26 | to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. |
27 | Then, we say that the task is 'frozen' and therefore the set of functions | 27 | Then, we say that the task is 'frozen' and therefore the set of functions |
@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are | |||
29 | defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). | 29 | defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). |
30 | User space processes are generally frozen before kernel threads. | 30 | User space processes are generally frozen before kernel threads. |
31 | 31 | ||
32 | It is not recommended to call refrigerator() directly. Instead, it is | 32 | __refrigerator() must not be called directly. Instead, use the |
33 | recommended to use the try_to_freeze() function (defined in | 33 | try_to_freeze() function (defined in include/linux/freezer.h), that checks |
34 | include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the | 34 | the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the |
35 | task enter refrigerator() if the flag is set. | 35 | flag is set. |
36 | 36 | ||
37 | For user space processes try_to_freeze() is called automatically from the | 37 | For user space processes try_to_freeze() is called automatically from the |
38 | signal-handling code, but the freezable kernel threads need to call it | 38 | signal-handling code, but the freezable kernel threads need to call it |
@@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros. | |||
61 | After the system memory state has been restored from a hibernation image and | 61 | After the system memory state has been restored from a hibernation image and |
62 | devices have been reinitialized, the function thaw_processes() is called in | 62 | devices have been reinitialized, the function thaw_processes() is called in |
63 | order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that | 63 | order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that |
64 | have been frozen leave refrigerator() and continue running. | 64 | have been frozen leave __refrigerator() and continue running. |
65 | 65 | ||
66 | III. Which kernel threads are freezable? | 66 | III. Which kernel threads are freezable? |
67 | 67 | ||
68 | Kernel threads are not freezable by default. However, a kernel thread may clear | 68 | Kernel threads are not freezable by default. However, a kernel thread may clear |
69 | PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE | 69 | PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE |
70 | directly is strongly discouraged). From this point it is regarded as freezable | 70 | directly is not allowed). From this point it is regarded as freezable |
71 | and must call try_to_freeze() in a suitable place. | 71 | and must call try_to_freeze() in a suitable place. |
72 | 72 | ||
73 | IV. Why do we do that? | 73 | IV. Why do we do that? |
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index ff73db022342..28335bd40e40 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
79 | #define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ | 79 | #define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ |
80 | #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ | 80 | #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ |
81 | #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ | 81 | #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ |
82 | #define TIF_FREEZE 16 /* is freezing for suspend */ | ||
83 | 82 | ||
84 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 83 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
85 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 84 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
@@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); | |||
87 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 86 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
88 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 87 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
89 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 88 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
90 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
91 | 89 | ||
92 | /* Work to do on interrupt/exception return. */ | 90 | /* Work to do on interrupt/exception return. */ |
93 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | 91 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 7b5cc8dae06e..0f30c3a78fc1 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *); | |||
142 | #define TIF_POLLING_NRFLAG 16 | 142 | #define TIF_POLLING_NRFLAG 16 |
143 | #define TIF_USING_IWMMXT 17 | 143 | #define TIF_USING_IWMMXT 17 |
144 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 144 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
145 | #define TIF_FREEZE 19 | ||
146 | #define TIF_RESTORE_SIGMASK 20 | 145 | #define TIF_RESTORE_SIGMASK 20 |
147 | #define TIF_SECCOMP 21 | 146 | #define TIF_SECCOMP 21 |
148 | 147 | ||
@@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *); | |||
152 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 151 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
153 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 152 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
154 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | 153 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) |
155 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
156 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 154 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
157 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 155 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
158 | 156 | ||
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index 7a9c03dcb0b6..e5deda4691db 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h | |||
@@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void) | |||
85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ | 85 | #define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ |
86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ | 86 | #define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ |
87 | #define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ | 87 | #define TIF_NOTIFY_RESUME 9 /* callback before returning to user */ |
88 | #define TIF_FREEZE 29 | ||
89 | #define TIF_DEBUG 30 /* debugging enabled */ | 88 | #define TIF_DEBUG 30 /* debugging enabled */ |
90 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ | 89 | #define TIF_USERSPACE 31 /* true if FS sets userspace */ |
91 | 90 | ||
@@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void) | |||
98 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 97 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
99 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) | 98 | #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) |
100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 99 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
101 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
102 | 100 | ||
103 | /* Note: The masks below must never span more than 16 bits! */ | 101 | /* Note: The masks below must never span more than 16 bits! */ |
104 | 102 | ||
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 02560fd8a121..53ad10005ae3 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h | |||
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void) | |||
100 | TIF_NEED_RESCHED */ | 100 | TIF_NEED_RESCHED */ |
101 | #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ | 101 | #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ |
102 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 102 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
103 | #define TIF_FREEZE 6 /* is freezing for suspend */ | ||
104 | #define TIF_IRQ_SYNC 7 /* sync pipeline stage */ | 103 | #define TIF_IRQ_SYNC 7 /* sync pipeline stage */ |
105 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 104 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
106 | #define TIF_SINGLESTEP 9 | 105 | #define TIF_SINGLESTEP 9 |
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void) | |||
111 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 110 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
112 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 111 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
113 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 112 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
114 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
115 | #define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) | 113 | #define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) |
116 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 114 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
117 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 115 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) |
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h index 332f19c54557..29b92884d793 100644 --- a/arch/cris/include/asm/thread_info.h +++ b/arch/cris/include/asm/thread_info.h | |||
@@ -86,7 +86,6 @@ struct thread_info { | |||
86 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ | 86 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ |
87 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 87 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
88 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ | 88 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ |
89 | #define TIF_FREEZE 18 /* is freezing for suspend */ | ||
90 | 89 | ||
91 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 90 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
92 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 91 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
@@ -94,7 +93,6 @@ struct thread_info { | |||
94 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 93 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
95 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 94 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
96 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 95 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
97 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
98 | 96 | ||
99 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 97 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
100 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 98 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index cefbe73dc119..92d83ea99ae5 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h | |||
@@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15"); | |||
111 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 111 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
112 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 112 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
113 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ | 113 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ |
114 | #define TIF_FREEZE 18 /* freezing for suspend */ | ||
115 | 114 | ||
116 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 115 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
117 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 116 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15"); | |||
120 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 119 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
121 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 120 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
122 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 121 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
123 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
124 | 122 | ||
125 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 123 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
126 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 124 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index d6f1784bfdee..9c126e0c09aa 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h | |||
@@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void) | |||
90 | #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ | 90 | #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ |
91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 91 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
92 | #define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ | 92 | #define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ |
93 | #define TIF_FREEZE 16 /* is freezing for suspend */ | ||
94 | 93 | ||
95 | /* as above, but as bit values */ | 94 | /* as above, but as bit values */ |
96 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 95 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void) | |||
99 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 98 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
100 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 99 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
102 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
103 | 101 | ||
104 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 102 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
105 | 103 | ||
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index ff0cc84e7bcc..e054bcc4273c 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -113,7 +113,6 @@ struct thread_info { | |||
113 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ | 113 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ |
114 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ | 114 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ |
115 | #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ | 115 | #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ |
116 | #define TIF_FREEZE 20 /* is freezing for suspend */ | ||
117 | #define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */ | 116 | #define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */ |
118 | 117 | ||
119 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 118 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
@@ -126,7 +125,6 @@ struct thread_info { | |||
126 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 125 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
127 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) | 126 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) |
128 | #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) | 127 | #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) |
129 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
130 | #define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE) | 128 | #define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE) |
131 | 129 | ||
132 | /* "work to do on user-return" bits */ | 130 | /* "work to do on user-return" bits */ |
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 0227dba44068..bf8fa3c06f4e 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h | |||
@@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void) | |||
138 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 138 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
139 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 139 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
140 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 140 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
141 | #define TIF_FREEZE 19 /* is freezing for suspend */ | ||
142 | 141 | ||
143 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 142 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
144 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 143 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
@@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void) | |||
149 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 148 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
150 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 149 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
151 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 150 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
152 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
153 | 151 | ||
154 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 152 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
155 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 153 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 790988967ba7..294df1592de5 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h | |||
@@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | #define TIF_DELAYED_TRACE 14 /* single step a syscall */ | 103 | #define TIF_DELAYED_TRACE 14 /* single step a syscall */ |
104 | #define TIF_SYSCALL_TRACE 15 /* syscall trace active */ | 104 | #define TIF_SYSCALL_TRACE 15 /* syscall trace active */ |
105 | #define TIF_MEMDIE 16 /* is terminating due to OOM killer */ | 105 | #define TIF_MEMDIE 16 /* is terminating due to OOM killer */ |
106 | #define TIF_FREEZE 17 /* thread is freezing for suspend */ | ||
107 | #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */ | 106 | #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */ |
108 | 107 | ||
109 | #endif /* _ASM_M68K_THREAD_INFO_H */ | 108 | #endif /* _ASM_M68K_THREAD_INFO_H */ |
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index b73da2ac21b3..1a8ab6a5c03f 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h | |||
@@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void) | |||
125 | #define TIF_MEMDIE 6 /* is terminating due to OOM killer */ | 125 | #define TIF_MEMDIE 6 /* is terminating due to OOM killer */ |
126 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ | 126 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ |
127 | #define TIF_SECCOMP 10 /* secure computing */ | 127 | #define TIF_SECCOMP 10 /* secure computing */ |
128 | #define TIF_FREEZE 14 /* Freezing for suspend */ | ||
129 | 128 | ||
130 | /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 129 | /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
131 | #define TIF_POLLING_NRFLAG 16 | 130 | #define TIF_POLLING_NRFLAG 16 |
@@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void) | |||
137 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 136 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
138 | #define _TIF_IRET (1 << TIF_IRET) | 137 | #define _TIF_IRET (1 << TIF_IRET) |
139 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 138 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
140 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
141 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 139 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
142 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 140 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
143 | 141 | ||
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 97f8bf6639e7..0d85d8e440c5 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
117 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 117 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
118 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 118 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
119 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 119 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
120 | #define TIF_FREEZE 19 | ||
121 | #define TIF_FIXADE 20 /* Fix address errors in software */ | 120 | #define TIF_FIXADE 20 /* Fix address errors in software */ |
122 | #define TIF_LOGADE 21 /* Log address errors to syslog */ | 121 | #define TIF_LOGADE 21 /* Log address errors to syslog */ |
123 | #define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ | 122 | #define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */ |
@@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
141 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 140 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
142 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) | 141 | #define _TIF_USEDFPU (1<<TIF_USEDFPU) |
143 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 142 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
144 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
145 | #define _TIF_FIXADE (1<<TIF_FIXADE) | 143 | #define _TIF_FIXADE (1<<TIF_FIXADE) |
146 | #define _TIF_LOGADE (1<<TIF_LOGADE) | 144 | #define _TIF_LOGADE (1<<TIF_LOGADE) |
147 | #define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) | 145 | #define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS) |
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 87c213002d4c..28cf52100baa 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h | |||
@@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *); | |||
165 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ | 165 | #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ |
166 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 166 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
167 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ | 167 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ |
168 | #define TIF_FREEZE 18 /* freezing for suspend */ | ||
169 | 168 | ||
170 | #define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE) | 169 | #define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE) |
171 | #define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME) | 170 | #define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME) |
@@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *); | |||
174 | #define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP) | 173 | #define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP) |
175 | #define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK) | 174 | #define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK) |
176 | #define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG) | 175 | #define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG) |
177 | #define _TIF_FREEZE +(1 << TIF_FREEZE) | ||
178 | 176 | ||
179 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 177 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
180 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 178 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index aa8de727e90b..6d9c7c7973d0 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -58,7 +58,6 @@ struct thread_info { | |||
58 | #define TIF_32BIT 4 /* 32 bit binary */ | 58 | #define TIF_32BIT 4 /* 32 bit binary */ |
59 | #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ | 59 | #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ |
60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ | 60 | #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ |
61 | #define TIF_FREEZE 7 /* is freezing for suspend */ | ||
62 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 61 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
63 | #define TIF_SINGLESTEP 9 /* single stepping? */ | 62 | #define TIF_SINGLESTEP 9 /* single stepping? */ |
64 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ | 63 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ |
@@ -69,7 +68,6 @@ struct thread_info { | |||
69 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 68 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
70 | #define _TIF_32BIT (1 << TIF_32BIT) | 69 | #define _TIF_32BIT (1 << TIF_32BIT) |
71 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 70 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
72 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
73 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
74 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 72 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
75 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | 73 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 836f231ec1f0..964714940961 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void) | |||
109 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ | 109 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ |
110 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 110 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
111 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ | 111 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ |
112 | #define TIF_FREEZE 14 /* Freezing for suspend */ | ||
113 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ | 112 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ |
114 | #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ | 113 | #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ |
115 | 114 | ||
@@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void) | |||
127 | #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) | 126 | #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) |
128 | #define _TIF_NOERROR (1<<TIF_NOERROR) | 127 | #define _TIF_NOERROR (1<<TIF_NOERROR) |
129 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 128 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
130 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
131 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | 129 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
132 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) | 130 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) |
133 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 131 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index a23183423b14..a73038155e0d 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void) | |||
102 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 102 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
103 | #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ | 103 | #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ |
104 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ | 104 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ |
105 | #define TIF_FREEZE 21 /* thread is freezing for suspend */ | ||
106 | 105 | ||
107 | #define _TIF_SYSCALL (1<<TIF_SYSCALL) | 106 | #define _TIF_SYSCALL (1<<TIF_SYSCALL) |
108 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 107 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
@@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void) | |||
119 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 118 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
120 | #define _TIF_31BIT (1<<TIF_31BIT) | 119 | #define _TIF_31BIT (1<<TIF_31BIT) |
121 | #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) | 120 | #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) |
122 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
123 | 121 | ||
124 | #ifdef CONFIG_64BIT | 122 | #ifdef CONFIG_64BIT |
125 | #define is_32bit_task() (test_thread_flag(TIF_31BIT)) | 123 | #define is_32bit_task() (test_thread_flag(TIF_31BIT)) |
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index ea2d5089de1e..20ee40af16e9 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
@@ -122,7 +122,6 @@ extern void init_thread_xstate(void); | |||
122 | #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ | 122 | #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ |
123 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 123 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
124 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 124 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
125 | #define TIF_FREEZE 19 /* Freezing for suspend */ | ||
126 | 125 | ||
127 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 126 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
128 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 127 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -133,7 +132,6 @@ extern void init_thread_xstate(void); | |||
133 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 132 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
134 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 133 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
135 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 134 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
136 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
137 | 135 | ||
138 | /* | 136 | /* |
139 | * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we | 137 | * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we |
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index fa5753233410..5cc5888ad5a3 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h | |||
@@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) | |||
133 | #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling | 133 | #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling |
134 | * TIF_NEED_RESCHED */ | 134 | * TIF_NEED_RESCHED */ |
135 | #define TIF_MEMDIE 10 /* is terminating due to OOM killer */ | 135 | #define TIF_MEMDIE 10 /* is terminating due to OOM killer */ |
136 | #define TIF_FREEZE 11 /* is freezing for suspend */ | ||
137 | 136 | ||
138 | /* as above, but as bit values */ | 137 | /* as above, but as bit values */ |
139 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 138 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) | |||
147 | #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ | 146 | #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ |
148 | _TIF_SIGPENDING | \ | 147 | _TIF_SIGPENDING | \ |
149 | _TIF_RESTORE_SIGMASK) | 148 | _TIF_RESTORE_SIGMASK) |
150 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
151 | 149 | ||
152 | #endif /* __KERNEL__ */ | 150 | #endif /* __KERNEL__ */ |
153 | 151 | ||
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 60d86be1a533..01d057fe6a3f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h | |||
@@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
225 | /* flag bit 12 is available */ | 225 | /* flag bit 12 is available */ |
226 | #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ | 226 | #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ |
227 | #define TIF_POLLING_NRFLAG 14 | 227 | #define TIF_POLLING_NRFLAG 14 |
228 | #define TIF_FREEZE 15 /* is freezing for suspend */ | ||
229 | 228 | ||
230 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 229 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
231 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 230 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
@@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
237 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 236 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
238 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | 237 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
239 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 238 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
240 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
241 | 239 | ||
242 | #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ | 240 | #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ |
243 | _TIF_DO_NOTIFY_RESUME_MASK | \ | 241 | _TIF_DO_NOTIFY_RESUME_MASK | \ |
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 5bd1bad33fab..200c4ab1240c 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h | |||
@@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void) | |||
71 | #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ | 71 | #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ |
72 | #define TIF_SYSCALL_AUDIT 6 | 72 | #define TIF_SYSCALL_AUDIT 6 |
73 | #define TIF_RESTORE_SIGMASK 7 | 73 | #define TIF_RESTORE_SIGMASK 7 |
74 | #define TIF_FREEZE 16 /* is freezing for suspend */ | ||
75 | 74 | ||
76 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 75 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
77 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 76 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void) | |||
80 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) | 79 | #define _TIF_MEMDIE (1 << TIF_MEMDIE) |
81 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 80 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
82 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 81 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
83 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
84 | 82 | ||
85 | #endif | 83 | #endif |
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h index c270e9e04861..89f7557583b8 100644 --- a/arch/unicore32/include/asm/thread_info.h +++ b/arch/unicore32/include/asm/thread_info.h | |||
@@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void) | |||
135 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | 135 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ |
136 | #define TIF_SYSCALL_TRACE 8 | 136 | #define TIF_SYSCALL_TRACE 8 |
137 | #define TIF_MEMDIE 18 | 137 | #define TIF_MEMDIE 18 |
138 | #define TIF_FREEZE 19 | ||
139 | #define TIF_RESTORE_SIGMASK 20 | 138 | #define TIF_RESTORE_SIGMASK 20 |
140 | 139 | ||
141 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 140 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
142 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 141 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
143 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 142 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
144 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 143 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
145 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
146 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | 144 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
147 | 145 | ||
148 | /* | 146 | /* |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index a1fe5c127b52..32125af20d32 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -90,7 +90,6 @@ struct thread_info { | |||
90 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ | 90 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ |
91 | #define TIF_DEBUG 21 /* uses debug registers */ | 91 | #define TIF_DEBUG 21 /* uses debug registers */ |
92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | 92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
93 | #define TIF_FREEZE 23 /* is freezing for suspend */ | ||
94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | 93 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ |
95 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ | 94 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ |
96 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 95 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
@@ -112,7 +111,6 @@ struct thread_info { | |||
112 | #define _TIF_FORK (1 << TIF_FORK) | 111 | #define _TIF_FORK (1 << TIF_FORK) |
113 | #define _TIF_DEBUG (1 << TIF_DEBUG) | 112 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
114 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | 113 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
115 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
116 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | 114 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
117 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | 115 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
118 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 116 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 7be8accb0b0c..6abbedd09d85 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h | |||
@@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void) | |||
132 | #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ | 132 | #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ |
133 | #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ | 133 | #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ |
134 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 134 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
135 | #define TIF_FREEZE 17 /* is freezing for suspend */ | ||
136 | 135 | ||
137 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 136 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
138 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 137 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
@@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void) | |||
141 | #define _TIF_IRET (1<<TIF_IRET) | 140 | #define _TIF_IRET (1<<TIF_IRET) |
142 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 141 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
143 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 142 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
144 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
145 | 143 | ||
146 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ | 144 | #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ |
147 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ | 145 | #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ |
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index a88a78c86162..6c3defa50845 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c | |||
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data) | |||
475 | 475 | ||
476 | init_waitqueue_entry(&wait, current); | 476 | init_waitqueue_entry(&wait, current); |
477 | 477 | ||
478 | current->flags |= PF_NOFREEZE; | ||
479 | |||
480 | for (;;) { | 478 | for (;;) { |
481 | add_wait_queue(&thread->wait_q, &wait); | 479 | add_wait_queue(&thread->wait_q, &wait); |
482 | 480 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index eb1d8641cf5c..2b8661b54eaf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
214 | return error_count; | 214 | return error_count; |
215 | } | 215 | } |
216 | 216 | ||
217 | static void dmatest_callback(void *completion) | 217 | /* poor man's completion - we want to use wait_event_freezable() on it */ |
218 | struct dmatest_done { | ||
219 | bool done; | ||
220 | wait_queue_head_t *wait; | ||
221 | }; | ||
222 | |||
223 | static void dmatest_callback(void *arg) | ||
218 | { | 224 | { |
219 | complete(completion); | 225 | struct dmatest_done *done = arg; |
226 | |||
227 | done->done = true; | ||
228 | wake_up_all(done->wait); | ||
220 | } | 229 | } |
221 | 230 | ||
222 | /* | 231 | /* |
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion) | |||
235 | */ | 244 | */ |
236 | static int dmatest_func(void *data) | 245 | static int dmatest_func(void *data) |
237 | { | 246 | { |
247 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); | ||
238 | struct dmatest_thread *thread = data; | 248 | struct dmatest_thread *thread = data; |
249 | struct dmatest_done done = { .wait = &done_wait }; | ||
239 | struct dma_chan *chan; | 250 | struct dma_chan *chan; |
240 | const char *thread_name; | 251 | const char *thread_name; |
241 | unsigned int src_off, dst_off, len; | 252 | unsigned int src_off, dst_off, len; |
@@ -252,7 +263,7 @@ static int dmatest_func(void *data) | |||
252 | int i; | 263 | int i; |
253 | 264 | ||
254 | thread_name = current->comm; | 265 | thread_name = current->comm; |
255 | set_freezable_with_signal(); | 266 | set_freezable(); |
256 | 267 | ||
257 | ret = -ENOMEM; | 268 | ret = -ENOMEM; |
258 | 269 | ||
@@ -306,9 +317,6 @@ static int dmatest_func(void *data) | |||
306 | struct dma_async_tx_descriptor *tx = NULL; | 317 | struct dma_async_tx_descriptor *tx = NULL; |
307 | dma_addr_t dma_srcs[src_cnt]; | 318 | dma_addr_t dma_srcs[src_cnt]; |
308 | dma_addr_t dma_dsts[dst_cnt]; | 319 | dma_addr_t dma_dsts[dst_cnt]; |
309 | struct completion cmp; | ||
310 | unsigned long start, tmo, end = 0 /* compiler... */; | ||
311 | bool reload = true; | ||
312 | u8 align = 0; | 320 | u8 align = 0; |
313 | 321 | ||
314 | total_tests++; | 322 | total_tests++; |
@@ -391,9 +399,9 @@ static int dmatest_func(void *data) | |||
391 | continue; | 399 | continue; |
392 | } | 400 | } |
393 | 401 | ||
394 | init_completion(&cmp); | 402 | done.done = false; |
395 | tx->callback = dmatest_callback; | 403 | tx->callback = dmatest_callback; |
396 | tx->callback_param = &cmp; | 404 | tx->callback_param = &done; |
397 | cookie = tx->tx_submit(tx); | 405 | cookie = tx->tx_submit(tx); |
398 | 406 | ||
399 | if (dma_submit_error(cookie)) { | 407 | if (dma_submit_error(cookie)) { |
@@ -407,20 +415,20 @@ static int dmatest_func(void *data) | |||
407 | } | 415 | } |
408 | dma_async_issue_pending(chan); | 416 | dma_async_issue_pending(chan); |
409 | 417 | ||
410 | do { | 418 | wait_event_freezable_timeout(done_wait, done.done, |
411 | start = jiffies; | 419 | msecs_to_jiffies(timeout)); |
412 | if (reload) | ||
413 | end = start + msecs_to_jiffies(timeout); | ||
414 | else if (end <= start) | ||
415 | end = start + 1; | ||
416 | tmo = wait_for_completion_interruptible_timeout(&cmp, | ||
417 | end - start); | ||
418 | reload = try_to_freeze(); | ||
419 | } while (tmo == -ERESTARTSYS); | ||
420 | 420 | ||
421 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 421 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
422 | 422 | ||
423 | if (tmo == 0) { | 423 | if (!done.done) { |
424 | /* | ||
425 | * We're leaving the timed out dma operation with | ||
426 | * dangling pointer to done_wait. To make this | ||
427 | * correct, we'll need to allocate wait_done for | ||
428 | * each test iteration and perform "who's gonna | ||
429 | * free it this time?" dancing. For now, just | ||
430 | * leave it dangling. | ||
431 | */ | ||
424 | pr_warning("%s: #%u: test timed out\n", | 432 | pr_warning("%s: #%u: test timed out\n", |
425 | thread_name, total_tests - 1); | 433 | thread_name, total_tests - 1); |
426 | failed_tests++; | 434 | failed_tests++; |
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 3eee45ffb096..c6b456ad7342 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c | |||
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data) | |||
138 | static const unsigned max_i2c_errors = 100; | 138 | static const unsigned max_i2c_errors = 100; |
139 | int ret; | 139 | int ret; |
140 | 140 | ||
141 | current->flags |= PF_NOFREEZE; | ||
142 | |||
143 | while (!kthread_should_stop()) { | 141 | while (!kthread_should_stop()) { |
144 | int i; | 142 | int i; |
145 | union { | 143 | union { |
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 41c96b3d8152..e880c79d7bd8 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c | |||
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg) | |||
750 | 750 | ||
751 | write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); | 751 | write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); |
752 | 752 | ||
753 | refrigerator(); | 753 | try_to_freeze(); |
754 | 754 | ||
755 | if (change_speed(stir, stir->speed)) | 755 | if (change_speed(stir, stir->speed)) |
756 | break; | 756 | break; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 7b828680b21d..4b11fc91fa7d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data) | |||
2456 | u32 poll_mask, event_mask; | 2456 | u32 poll_mask, event_mask; |
2457 | unsigned int si, so; | 2457 | unsigned int si, so; |
2458 | unsigned long t; | 2458 | unsigned long t; |
2459 | unsigned int change_detector, must_reset; | 2459 | unsigned int change_detector; |
2460 | unsigned int poll_freq; | 2460 | unsigned int poll_freq; |
2461 | bool was_frozen; | ||
2461 | 2462 | ||
2462 | mutex_lock(&hotkey_thread_mutex); | 2463 | mutex_lock(&hotkey_thread_mutex); |
2463 | 2464 | ||
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data) | |||
2488 | t = 100; /* should never happen... */ | 2489 | t = 100; /* should never happen... */ |
2489 | } | 2490 | } |
2490 | t = msleep_interruptible(t); | 2491 | t = msleep_interruptible(t); |
2491 | if (unlikely(kthread_should_stop())) | 2492 | if (unlikely(kthread_freezable_should_stop(&was_frozen))) |
2492 | break; | 2493 | break; |
2493 | must_reset = try_to_freeze(); | 2494 | |
2494 | if (t > 0 && !must_reset) | 2495 | if (t > 0 && !was_frozen) |
2495 | continue; | 2496 | continue; |
2496 | 2497 | ||
2497 | mutex_lock(&hotkey_thread_data_mutex); | 2498 | mutex_lock(&hotkey_thread_data_mutex); |
2498 | if (must_reset || hotkey_config_change != change_detector) { | 2499 | if (was_frozen || hotkey_config_change != change_detector) { |
2499 | /* forget old state on thaw or config change */ | 2500 | /* forget old state on thaw or config change */ |
2500 | si = so; | 2501 | si = so; |
2501 | t = 0; | 2502 | t = 0; |
@@ -2528,10 +2529,6 @@ exit: | |||
2528 | static void hotkey_poll_stop_sync(void) | 2529 | static void hotkey_poll_stop_sync(void) |
2529 | { | 2530 | { |
2530 | if (tpacpi_hotkey_task) { | 2531 | if (tpacpi_hotkey_task) { |
2531 | if (frozen(tpacpi_hotkey_task) || | ||
2532 | freezing(tpacpi_hotkey_task)) | ||
2533 | thaw_process(tpacpi_hotkey_task); | ||
2534 | |||
2535 | kthread_stop(tpacpi_hotkey_task); | 2532 | kthread_stop(tpacpi_hotkey_task); |
2536 | tpacpi_hotkey_task = NULL; | 2533 | tpacpi_hotkey_task = NULL; |
2537 | mutex_lock(&hotkey_thread_mutex); | 2534 | mutex_lock(&hotkey_thread_mutex); |
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 480b0ed2e4de..8a7803cf88d2 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c | |||
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev) | |||
466 | struct rtsx_chip *chip = dev->chip; | 466 | struct rtsx_chip *chip = dev->chip; |
467 | struct Scsi_Host *host = rtsx_to_host(dev); | 467 | struct Scsi_Host *host = rtsx_to_host(dev); |
468 | 468 | ||
469 | current->flags |= PF_NOFREEZE; | ||
470 | |||
471 | for (;;) { | 469 | for (;;) { |
472 | if (wait_for_completion_interruptible(&dev->cmnd_ready)) | 470 | if (wait_for_completion_interruptible(&dev->cmnd_ready)) |
473 | break; | 471 | break; |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index c325e69415a1..aa84b3d77274 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us) | |||
831 | 831 | ||
832 | dev_dbg(dev, "device found\n"); | 832 | dev_dbg(dev, "device found\n"); |
833 | 833 | ||
834 | set_freezable_with_signal(); | 834 | set_freezable(); |
835 | |||
835 | /* | 836 | /* |
836 | * Wait for the timeout to expire or for a disconnect | 837 | * Wait for the timeout to expire or for a disconnect |
837 | * | 838 | * |
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us) | |||
839 | * fail to freeze, but we can't be non-freezable either. Nor can | 840 | * fail to freeze, but we can't be non-freezable either. Nor can |
840 | * khubd freeze while waiting for scanning to complete as it may | 841 | * khubd freeze while waiting for scanning to complete as it may |
841 | * hold the device lock, causing a hang when suspending devices. | 842 | * hold the device lock, causing a hang when suspending devices. |
842 | * So we request a fake signal when freezing and use | 843 | * So instead of using wait_event_freezable(), explicitly test |
843 | * interruptible sleep to kick us out of our wait early when | 844 | * for (DONT_SCAN || freezing) in interruptible wait and proceed |
844 | * freezing happens. | 845 | * if any of DONT_SCAN, freezing or timeout has happened. |
845 | */ | 846 | */ |
846 | if (delay_use > 0) { | 847 | if (delay_use > 0) { |
847 | dev_dbg(dev, "waiting for device to settle " | 848 | dev_dbg(dev, "waiting for device to settle " |
848 | "before scanning\n"); | 849 | "before scanning\n"); |
849 | wait_event_interruptible_timeout(us->delay_wait, | 850 | wait_event_interruptible_timeout(us->delay_wait, |
850 | test_bit(US_FLIDX_DONT_SCAN, &us->dflags), | 851 | test_bit(US_FLIDX_DONT_SCAN, &us->dflags) || |
851 | delay_use * HZ); | 852 | freezing(current), delay_use * HZ); |
852 | } | 853 | } |
853 | 854 | ||
854 | /* If the device is still connected, perform the scanning */ | 855 | /* If the device is still connected, perform the scanning */ |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 7ec14097fef1..98ab240072e5 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -340,7 +340,7 @@ again: | |||
340 | if (freezing(current)) { | 340 | if (freezing(current)) { |
341 | worker->working = 0; | 341 | worker->working = 0; |
342 | spin_unlock_irq(&worker->lock); | 342 | spin_unlock_irq(&worker->lock); |
343 | refrigerator(); | 343 | try_to_freeze(); |
344 | } else { | 344 | } else { |
345 | spin_unlock_irq(&worker->lock); | 345 | spin_unlock_irq(&worker->lock); |
346 | if (!kthread_should_stop()) { | 346 | if (!kthread_should_stop()) { |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 632f8f3cc9db..b09175901521 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg) | |||
1579 | btrfs_run_defrag_inodes(root->fs_info); | 1579 | btrfs_run_defrag_inodes(root->fs_info); |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | if (freezing(current)) { | 1582 | if (!try_to_freeze()) { |
1583 | refrigerator(); | ||
1584 | } else { | ||
1585 | set_current_state(TASK_INTERRUPTIBLE); | 1583 | set_current_state(TASK_INTERRUPTIBLE); |
1586 | if (!kthread_should_stop()) | 1584 | if (!kthread_should_stop()) |
1587 | schedule(); | 1585 | schedule(); |
@@ -1635,9 +1633,7 @@ sleep: | |||
1635 | wake_up_process(root->fs_info->cleaner_kthread); | 1633 | wake_up_process(root->fs_info->cleaner_kthread); |
1636 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); | 1634 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); |
1637 | 1635 | ||
1638 | if (freezing(current)) { | 1636 | if (!try_to_freeze()) { |
1639 | refrigerator(); | ||
1640 | } else { | ||
1641 | set_current_state(TASK_INTERRUPTIBLE); | 1637 | set_current_state(TASK_INTERRUPTIBLE); |
1642 | if (!kthread_should_stop() && | 1638 | if (!kthread_should_stop() && |
1643 | !btrfs_transaction_blocked(root->fs_info)) | 1639 | !btrfs_transaction_blocked(root->fs_info)) |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3858767ec672..1c7bbd00e7e5 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -2884,8 +2884,7 @@ cont_thread: | |||
2884 | } | 2884 | } |
2885 | mutex_unlock(&eli->li_list_mtx); | 2885 | mutex_unlock(&eli->li_list_mtx); |
2886 | 2886 | ||
2887 | if (freezing(current)) | 2887 | try_to_freeze(); |
2888 | refrigerator(); | ||
2889 | 2888 | ||
2890 | cur = jiffies; | 2889 | cur = jiffies; |
2891 | if ((time_after_eq(cur, next_wakeup)) || | 2890 | if ((time_after_eq(cur, next_wakeup)) || |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 73c3992b2bb4..271fde50f0ee 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -947,7 +947,7 @@ int bdi_writeback_thread(void *data) | |||
947 | 947 | ||
948 | trace_writeback_thread_start(bdi); | 948 | trace_writeback_thread_start(bdi); |
949 | 949 | ||
950 | while (!kthread_should_stop()) { | 950 | while (!kthread_freezable_should_stop(NULL)) { |
951 | /* | 951 | /* |
952 | * Remove own delayed wake-up timer, since we are already awake | 952 | * Remove own delayed wake-up timer, since we are already awake |
953 | * and we'll take care of the preriodic write-back. | 953 | * and we'll take care of the preriodic write-back. |
@@ -977,8 +977,6 @@ int bdi_writeback_thread(void *data) | |||
977 | */ | 977 | */ |
978 | schedule(); | 978 | schedule(); |
979 | } | 979 | } |
980 | |||
981 | try_to_freeze(); | ||
982 | } | 980 | } |
983 | 981 | ||
984 | /* Flush any work that raced with us exiting */ | 982 | /* Flush any work that raced with us exiting */ |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 598646434362..8154d42e4647 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -951,8 +951,8 @@ int gfs2_logd(void *data) | |||
951 | wake_up(&sdp->sd_log_waitq); | 951 | wake_up(&sdp->sd_log_waitq); |
952 | 952 | ||
953 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; | 953 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
954 | if (freezing(current)) | 954 | |
955 | refrigerator(); | 955 | try_to_freeze(); |
956 | 956 | ||
957 | do { | 957 | do { |
958 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | 958 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 7e528dc14f85..d49669e92652 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data) | |||
1427 | /* Check for & recover partially truncated inodes */ | 1427 | /* Check for & recover partially truncated inodes */ |
1428 | quotad_check_trunc_list(sdp); | 1428 | quotad_check_trunc_list(sdp); |
1429 | 1429 | ||
1430 | if (freezing(current)) | 1430 | try_to_freeze(); |
1431 | refrigerator(); | 1431 | |
1432 | t = min(quotad_timeo, statfs_timeo); | 1432 | t = min(quotad_timeo, statfs_timeo); |
1433 | 1433 | ||
1434 | prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); | 1434 | prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index fea8dd661d2b..a96cff0c5f1d 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -166,7 +166,7 @@ loop: | |||
166 | */ | 166 | */ |
167 | jbd_debug(1, "Now suspending kjournald\n"); | 167 | jbd_debug(1, "Now suspending kjournald\n"); |
168 | spin_unlock(&journal->j_state_lock); | 168 | spin_unlock(&journal->j_state_lock); |
169 | refrigerator(); | 169 | try_to_freeze(); |
170 | spin_lock(&journal->j_state_lock); | 170 | spin_lock(&journal->j_state_lock); |
171 | } else { | 171 | } else { |
172 | /* | 172 | /* |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 0fa0123151d3..c0a5f9f1b127 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -173,7 +173,7 @@ loop: | |||
173 | */ | 173 | */ |
174 | jbd_debug(1, "Now suspending kjournald2\n"); | 174 | jbd_debug(1, "Now suspending kjournald2\n"); |
175 | write_unlock(&journal->j_state_lock); | 175 | write_unlock(&journal->j_state_lock); |
176 | refrigerator(); | 176 | try_to_freeze(); |
177 | write_lock(&journal->j_state_lock); | 177 | write_lock(&journal->j_state_lock); |
178 | } else { | 178 | } else { |
179 | /* | 179 | /* |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index cc5f811ed383..2eb952c41a69 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg) | |||
2349 | 2349 | ||
2350 | if (freezing(current)) { | 2350 | if (freezing(current)) { |
2351 | spin_unlock_irq(&log_redrive_lock); | 2351 | spin_unlock_irq(&log_redrive_lock); |
2352 | refrigerator(); | 2352 | try_to_freeze(); |
2353 | } else { | 2353 | } else { |
2354 | set_current_state(TASK_INTERRUPTIBLE); | 2354 | set_current_state(TASK_INTERRUPTIBLE); |
2355 | spin_unlock_irq(&log_redrive_lock); | 2355 | spin_unlock_irq(&log_redrive_lock); |
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index af9606057dde..bb8b661bcc50 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c | |||
@@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg) | |||
2800 | 2800 | ||
2801 | if (freezing(current)) { | 2801 | if (freezing(current)) { |
2802 | LAZY_UNLOCK(flags); | 2802 | LAZY_UNLOCK(flags); |
2803 | refrigerator(); | 2803 | try_to_freeze(); |
2804 | } else { | 2804 | } else { |
2805 | DECLARE_WAITQUEUE(wq, current); | 2805 | DECLARE_WAITQUEUE(wq, current); |
2806 | 2806 | ||
@@ -2994,7 +2994,7 @@ int jfs_sync(void *arg) | |||
2994 | 2994 | ||
2995 | if (freezing(current)) { | 2995 | if (freezing(current)) { |
2996 | TXN_UNLOCK(); | 2996 | TXN_UNLOCK(); |
2997 | refrigerator(); | 2997 | try_to_freeze(); |
2998 | } else { | 2998 | } else { |
2999 | set_current_state(TASK_INTERRUPTIBLE); | 2999 | set_current_state(TASK_INTERRUPTIBLE); |
3000 | TXN_UNLOCK(); | 3000 | TXN_UNLOCK(); |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index bb24ab6c282f..0e72ad6f22aa 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg) | |||
2470 | 2470 | ||
2471 | if (freezing(current)) { | 2471 | if (freezing(current)) { |
2472 | spin_unlock(&sci->sc_state_lock); | 2472 | spin_unlock(&sci->sc_state_lock); |
2473 | refrigerator(); | 2473 | try_to_freeze(); |
2474 | spin_lock(&sci->sc_state_lock); | 2474 | spin_lock(&sci->sc_state_lock); |
2475 | } else { | 2475 | } else { |
2476 | DEFINE_WAIT(wait); | 2476 | DEFINE_WAIT(wait); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index cf0ac056815f..018829936d6d 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1703,7 +1703,7 @@ xfsbufd( | |||
1703 | 1703 | ||
1704 | if (unlikely(freezing(current))) { | 1704 | if (unlikely(freezing(current))) { |
1705 | set_bit(XBT_FORCE_SLEEP, &target->bt_flags); | 1705 | set_bit(XBT_FORCE_SLEEP, &target->bt_flags); |
1706 | refrigerator(); | 1706 | try_to_freeze(); |
1707 | } else { | 1707 | } else { |
1708 | clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); | 1708 | clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); |
1709 | } | 1709 | } |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index a5386e3ee756..09570ac22be6 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -5,71 +5,58 @@ | |||
5 | 5 | ||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
8 | #include <linux/atomic.h> | ||
8 | 9 | ||
9 | #ifdef CONFIG_FREEZER | 10 | #ifdef CONFIG_FREEZER |
11 | extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ | ||
12 | extern bool pm_freezing; /* PM freezing in effect */ | ||
13 | extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ | ||
14 | |||
10 | /* | 15 | /* |
11 | * Check if a process has been frozen | 16 | * Check if a process has been frozen |
12 | */ | 17 | */ |
13 | static inline int frozen(struct task_struct *p) | 18 | static inline bool frozen(struct task_struct *p) |
14 | { | 19 | { |
15 | return p->flags & PF_FROZEN; | 20 | return p->flags & PF_FROZEN; |
16 | } | 21 | } |
17 | 22 | ||
18 | /* | 23 | extern bool freezing_slow_path(struct task_struct *p); |
19 | * Check if there is a request to freeze a process | ||
20 | */ | ||
21 | static inline int freezing(struct task_struct *p) | ||
22 | { | ||
23 | return test_tsk_thread_flag(p, TIF_FREEZE); | ||
24 | } | ||
25 | |||
26 | /* | ||
27 | * Request that a process be frozen | ||
28 | */ | ||
29 | static inline void set_freeze_flag(struct task_struct *p) | ||
30 | { | ||
31 | set_tsk_thread_flag(p, TIF_FREEZE); | ||
32 | } | ||
33 | 24 | ||
34 | /* | 25 | /* |
35 | * Sometimes we may need to cancel the previous 'freeze' request | 26 | * Check if there is a request to freeze a process |
36 | */ | 27 | */ |
37 | static inline void clear_freeze_flag(struct task_struct *p) | 28 | static inline bool freezing(struct task_struct *p) |
38 | { | ||
39 | clear_tsk_thread_flag(p, TIF_FREEZE); | ||
40 | } | ||
41 | |||
42 | static inline bool should_send_signal(struct task_struct *p) | ||
43 | { | 29 | { |
44 | return !(p->flags & PF_FREEZER_NOSIG); | 30 | if (likely(!atomic_read(&system_freezing_cnt))) |
31 | return false; | ||
32 | return freezing_slow_path(p); | ||
45 | } | 33 | } |
46 | 34 | ||
47 | /* Takes and releases task alloc lock using task_lock() */ | 35 | /* Takes and releases task alloc lock using task_lock() */ |
48 | extern int thaw_process(struct task_struct *p); | 36 | extern void __thaw_task(struct task_struct *t); |
49 | 37 | ||
50 | extern void refrigerator(void); | 38 | extern bool __refrigerator(bool check_kthr_stop); |
51 | extern int freeze_processes(void); | 39 | extern int freeze_processes(void); |
52 | extern int freeze_kernel_threads(void); | 40 | extern int freeze_kernel_threads(void); |
53 | extern void thaw_processes(void); | 41 | extern void thaw_processes(void); |
54 | 42 | ||
55 | static inline int try_to_freeze(void) | 43 | static inline bool try_to_freeze(void) |
56 | { | 44 | { |
57 | if (freezing(current)) { | 45 | might_sleep(); |
58 | refrigerator(); | 46 | if (likely(!freezing(current))) |
59 | return 1; | 47 | return false; |
60 | } else | 48 | return __refrigerator(false); |
61 | return 0; | ||
62 | } | 49 | } |
63 | 50 | ||
64 | extern bool freeze_task(struct task_struct *p, bool sig_only); | 51 | extern bool freeze_task(struct task_struct *p); |
65 | extern void cancel_freezing(struct task_struct *p); | 52 | extern bool set_freezable(void); |
66 | 53 | ||
67 | #ifdef CONFIG_CGROUP_FREEZER | 54 | #ifdef CONFIG_CGROUP_FREEZER |
68 | extern int cgroup_freezing_or_frozen(struct task_struct *task); | 55 | extern bool cgroup_freezing(struct task_struct *task); |
69 | #else /* !CONFIG_CGROUP_FREEZER */ | 56 | #else /* !CONFIG_CGROUP_FREEZER */ |
70 | static inline int cgroup_freezing_or_frozen(struct task_struct *task) | 57 | static inline bool cgroup_freezing(struct task_struct *task) |
71 | { | 58 | { |
72 | return 0; | 59 | return false; |
73 | } | 60 | } |
74 | #endif /* !CONFIG_CGROUP_FREEZER */ | 61 | #endif /* !CONFIG_CGROUP_FREEZER */ |
75 | 62 | ||
@@ -118,23 +105,6 @@ static inline int freezer_should_skip(struct task_struct *p) | |||
118 | } | 105 | } |
119 | 106 | ||
120 | /* | 107 | /* |
121 | * Tell the freezer that the current task should be frozen by it | ||
122 | */ | ||
123 | static inline void set_freezable(void) | ||
124 | { | ||
125 | current->flags &= ~PF_NOFREEZE; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Tell the freezer that the current task should be frozen by it and that it | ||
130 | * should send a fake signal to the task to freeze it. | ||
131 | */ | ||
132 | static inline void set_freezable_with_signal(void) | ||
133 | { | ||
134 | current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Freezer-friendly wrappers around wait_event_interruptible(), | 108 | * Freezer-friendly wrappers around wait_event_interruptible(), |
139 | * wait_event_killable() and wait_event_interruptible_timeout(), originally | 109 | * wait_event_killable() and wait_event_interruptible_timeout(), originally |
140 | * defined in <linux/wait.h> | 110 | * defined in <linux/wait.h> |
@@ -152,47 +122,45 @@ static inline void set_freezable_with_signal(void) | |||
152 | #define wait_event_freezable(wq, condition) \ | 122 | #define wait_event_freezable(wq, condition) \ |
153 | ({ \ | 123 | ({ \ |
154 | int __retval; \ | 124 | int __retval; \ |
155 | do { \ | 125 | for (;;) { \ |
156 | __retval = wait_event_interruptible(wq, \ | 126 | __retval = wait_event_interruptible(wq, \ |
157 | (condition) || freezing(current)); \ | 127 | (condition) || freezing(current)); \ |
158 | if (__retval && !freezing(current)) \ | 128 | if (__retval || (condition)) \ |
159 | break; \ | 129 | break; \ |
160 | else if (!(condition)) \ | 130 | try_to_freeze(); \ |
161 | __retval = -ERESTARTSYS; \ | 131 | } \ |
162 | } while (try_to_freeze()); \ | ||
163 | __retval; \ | 132 | __retval; \ |
164 | }) | 133 | }) |
165 | 134 | ||
166 | |||
167 | #define wait_event_freezable_timeout(wq, condition, timeout) \ | 135 | #define wait_event_freezable_timeout(wq, condition, timeout) \ |
168 | ({ \ | 136 | ({ \ |
169 | long __retval = timeout; \ | 137 | long __retval = timeout; \ |
170 | do { \ | 138 | for (;;) { \ |
171 | __retval = wait_event_interruptible_timeout(wq, \ | 139 | __retval = wait_event_interruptible_timeout(wq, \ |
172 | (condition) || freezing(current), \ | 140 | (condition) || freezing(current), \ |
173 | __retval); \ | 141 | __retval); \ |
174 | } while (try_to_freeze()); \ | 142 | if (__retval <= 0 || (condition)) \ |
143 | break; \ | ||
144 | try_to_freeze(); \ | ||
145 | } \ | ||
175 | __retval; \ | 146 | __retval; \ |
176 | }) | 147 | }) |
148 | |||
177 | #else /* !CONFIG_FREEZER */ | 149 | #else /* !CONFIG_FREEZER */ |
178 | static inline int frozen(struct task_struct *p) { return 0; } | 150 | static inline bool frozen(struct task_struct *p) { return false; } |
179 | static inline int freezing(struct task_struct *p) { return 0; } | 151 | static inline bool freezing(struct task_struct *p) { return false; } |
180 | static inline void set_freeze_flag(struct task_struct *p) {} | ||
181 | static inline void clear_freeze_flag(struct task_struct *p) {} | ||
182 | static inline int thaw_process(struct task_struct *p) { return 1; } | ||
183 | 152 | ||
184 | static inline void refrigerator(void) {} | 153 | static inline bool __refrigerator(bool check_kthr_stop) { return false; } |
185 | static inline int freeze_processes(void) { return -ENOSYS; } | 154 | static inline int freeze_processes(void) { return -ENOSYS; } |
186 | static inline int freeze_kernel_threads(void) { return -ENOSYS; } | 155 | static inline int freeze_kernel_threads(void) { return -ENOSYS; } |
187 | static inline void thaw_processes(void) {} | 156 | static inline void thaw_processes(void) {} |
188 | 157 | ||
189 | static inline int try_to_freeze(void) { return 0; } | 158 | static inline bool try_to_freeze(void) { return false; } |
190 | 159 | ||
191 | static inline void freezer_do_not_count(void) {} | 160 | static inline void freezer_do_not_count(void) {} |
192 | static inline void freezer_count(void) {} | 161 | static inline void freezer_count(void) {} |
193 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } | 162 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } |
194 | static inline void set_freezable(void) {} | 163 | static inline void set_freezable(void) {} |
195 | static inline void set_freezable_with_signal(void) {} | ||
196 | 164 | ||
197 | #define wait_event_freezable(wq, condition) \ | 165 | #define wait_event_freezable(wq, condition) \ |
198 | wait_event_interruptible(wq, condition) | 166 | wait_event_interruptible(wq, condition) |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 5cac19b3a266..0714b24c0e45 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
35 | void kthread_bind(struct task_struct *k, unsigned int cpu); | 35 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
36 | int kthread_stop(struct task_struct *k); | 36 | int kthread_stop(struct task_struct *k); |
37 | int kthread_should_stop(void); | 37 | int kthread_should_stop(void); |
38 | bool kthread_freezable_should_stop(bool *was_frozen); | ||
38 | void *kthread_data(struct task_struct *k); | 39 | void *kthread_data(struct task_struct *k); |
39 | 40 | ||
40 | int kthreadd(void *unused); | 41 | int kthreadd(void *unused); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c4f3e9b9bc5..d81cce933869 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
220 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 220 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
221 | #define task_contributes_to_load(task) \ | 221 | #define task_contributes_to_load(task) \ |
222 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ | 222 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
223 | (task->flags & PF_FREEZING) == 0) | 223 | (task->flags & PF_FROZEN) == 0) |
224 | 224 | ||
225 | #define __set_task_state(tsk, state_value) \ | 225 | #define __set_task_state(tsk, state_value) \ |
226 | do { (tsk)->state = (state_value); } while (0) | 226 | do { (tsk)->state = (state_value); } while (0) |
@@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1772 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | 1772 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
1773 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ | 1773 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ |
1774 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ | 1774 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
1775 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ | ||
1776 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ | 1775 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
1777 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ | 1776 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
1778 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ | 1777 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
@@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1788 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1787 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1789 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1788 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1790 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ | 1789 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1791 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | ||
1792 | 1790 | ||
1793 | /* | 1791 | /* |
1794 | * Only the _current_ task can read/write to tsk->flags, but other | 1792 | * Only the _current_ task can read/write to tsk->flags, but other |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 5e828a2ca8e6..e411a60cc2c8 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task) | |||
48 | struct freezer, css); | 48 | struct freezer, css); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline int __cgroup_freezing_or_frozen(struct task_struct *task) | 51 | bool cgroup_freezing(struct task_struct *task) |
52 | { | 52 | { |
53 | enum freezer_state state = task_freezer(task)->state; | 53 | enum freezer_state state; |
54 | return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); | 54 | bool ret; |
55 | } | ||
56 | 55 | ||
57 | int cgroup_freezing_or_frozen(struct task_struct *task) | 56 | rcu_read_lock(); |
58 | { | 57 | state = task_freezer(task)->state; |
59 | int result; | 58 | ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN; |
60 | task_lock(task); | 59 | rcu_read_unlock(); |
61 | result = __cgroup_freezing_or_frozen(task); | 60 | |
62 | task_unlock(task); | 61 | return ret; |
63 | return result; | ||
64 | } | 62 | } |
65 | 63 | ||
66 | /* | 64 | /* |
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys; | |||
102 | * freezer_can_attach(): | 100 | * freezer_can_attach(): |
103 | * cgroup_mutex (held by caller of can_attach) | 101 | * cgroup_mutex (held by caller of can_attach) |
104 | * | 102 | * |
105 | * cgroup_freezing_or_frozen(): | ||
106 | * task->alloc_lock (to get task's cgroup) | ||
107 | * | ||
108 | * freezer_fork() (preserving fork() performance means can't take cgroup_mutex): | 103 | * freezer_fork() (preserving fork() performance means can't take cgroup_mutex): |
109 | * freezer->lock | 104 | * freezer->lock |
110 | * sighand->siglock (if the cgroup is freezing) | 105 | * sighand->siglock (if the cgroup is freezing) |
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys; | |||
130 | * write_lock css_set_lock (cgroup iterator start) | 125 | * write_lock css_set_lock (cgroup iterator start) |
131 | * task->alloc_lock | 126 | * task->alloc_lock |
132 | * read_lock css_set_lock (cgroup iterator start) | 127 | * read_lock css_set_lock (cgroup iterator start) |
133 | * task->alloc_lock (inside thaw_process(), prevents race with refrigerator()) | 128 | * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator()) |
134 | * sighand->siglock | 129 | * sighand->siglock |
135 | */ | 130 | */ |
136 | static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, | 131 | static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, |
@@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, | |||
150 | static void freezer_destroy(struct cgroup_subsys *ss, | 145 | static void freezer_destroy(struct cgroup_subsys *ss, |
151 | struct cgroup *cgroup) | 146 | struct cgroup *cgroup) |
152 | { | 147 | { |
153 | kfree(cgroup_freezer(cgroup)); | 148 | struct freezer *freezer = cgroup_freezer(cgroup); |
149 | |||
150 | if (freezer->state != CGROUP_THAWED) | ||
151 | atomic_dec(&system_freezing_cnt); | ||
152 | kfree(freezer); | ||
154 | } | 153 | } |
155 | 154 | ||
156 | /* | 155 | /* |
@@ -177,13 +176,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss, | |||
177 | 176 | ||
178 | static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | 177 | static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
179 | { | 178 | { |
180 | rcu_read_lock(); | 179 | return cgroup_freezing(tsk) ? -EBUSY : 0; |
181 | if (__cgroup_freezing_or_frozen(tsk)) { | ||
182 | rcu_read_unlock(); | ||
183 | return -EBUSY; | ||
184 | } | ||
185 | rcu_read_unlock(); | ||
186 | return 0; | ||
187 | } | 180 | } |
188 | 181 | ||
189 | static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) | 182 | static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) |
@@ -213,7 +206,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) | |||
213 | 206 | ||
214 | /* Locking avoids race with FREEZING -> THAWED transitions. */ | 207 | /* Locking avoids race with FREEZING -> THAWED transitions. */ |
215 | if (freezer->state == CGROUP_FREEZING) | 208 | if (freezer->state == CGROUP_FREEZING) |
216 | freeze_task(task, true); | 209 | freeze_task(task); |
217 | spin_unlock_irq(&freezer->lock); | 210 | spin_unlock_irq(&freezer->lock); |
218 | } | 211 | } |
219 | 212 | ||
@@ -231,7 +224,7 @@ static void update_if_frozen(struct cgroup *cgroup, | |||
231 | cgroup_iter_start(cgroup, &it); | 224 | cgroup_iter_start(cgroup, &it); |
232 | while ((task = cgroup_iter_next(cgroup, &it))) { | 225 | while ((task = cgroup_iter_next(cgroup, &it))) { |
233 | ntotal++; | 226 | ntotal++; |
234 | if (frozen(task)) | 227 | if (freezing(task) && frozen(task)) |
235 | nfrozen++; | 228 | nfrozen++; |
236 | } | 229 | } |
237 | 230 | ||
@@ -279,10 +272,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer) | |||
279 | struct task_struct *task; | 272 | struct task_struct *task; |
280 | unsigned int num_cant_freeze_now = 0; | 273 | unsigned int num_cant_freeze_now = 0; |
281 | 274 | ||
282 | freezer->state = CGROUP_FREEZING; | ||
283 | cgroup_iter_start(cgroup, &it); | 275 | cgroup_iter_start(cgroup, &it); |
284 | while ((task = cgroup_iter_next(cgroup, &it))) { | 276 | while ((task = cgroup_iter_next(cgroup, &it))) { |
285 | if (!freeze_task(task, true)) | 277 | if (!freeze_task(task)) |
286 | continue; | 278 | continue; |
287 | if (frozen(task)) | 279 | if (frozen(task)) |
288 | continue; | 280 | continue; |
@@ -300,12 +292,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer) | |||
300 | struct task_struct *task; | 292 | struct task_struct *task; |
301 | 293 | ||
302 | cgroup_iter_start(cgroup, &it); | 294 | cgroup_iter_start(cgroup, &it); |
303 | while ((task = cgroup_iter_next(cgroup, &it))) { | 295 | while ((task = cgroup_iter_next(cgroup, &it))) |
304 | thaw_process(task); | 296 | __thaw_task(task); |
305 | } | ||
306 | cgroup_iter_end(cgroup, &it); | 297 | cgroup_iter_end(cgroup, &it); |
307 | |||
308 | freezer->state = CGROUP_THAWED; | ||
309 | } | 298 | } |
310 | 299 | ||
311 | static int freezer_change_state(struct cgroup *cgroup, | 300 | static int freezer_change_state(struct cgroup *cgroup, |
@@ -319,20 +308,24 @@ static int freezer_change_state(struct cgroup *cgroup, | |||
319 | spin_lock_irq(&freezer->lock); | 308 | spin_lock_irq(&freezer->lock); |
320 | 309 | ||
321 | update_if_frozen(cgroup, freezer); | 310 | update_if_frozen(cgroup, freezer); |
322 | if (goal_state == freezer->state) | ||
323 | goto out; | ||
324 | 311 | ||
325 | switch (goal_state) { | 312 | switch (goal_state) { |
326 | case CGROUP_THAWED: | 313 | case CGROUP_THAWED: |
314 | if (freezer->state != CGROUP_THAWED) | ||
315 | atomic_dec(&system_freezing_cnt); | ||
316 | freezer->state = CGROUP_THAWED; | ||
327 | unfreeze_cgroup(cgroup, freezer); | 317 | unfreeze_cgroup(cgroup, freezer); |
328 | break; | 318 | break; |
329 | case CGROUP_FROZEN: | 319 | case CGROUP_FROZEN: |
320 | if (freezer->state == CGROUP_THAWED) | ||
321 | atomic_inc(&system_freezing_cnt); | ||
322 | freezer->state = CGROUP_FREEZING; | ||
330 | retval = try_to_freeze_cgroup(cgroup, freezer); | 323 | retval = try_to_freeze_cgroup(cgroup, freezer); |
331 | break; | 324 | break; |
332 | default: | 325 | default: |
333 | BUG(); | 326 | BUG(); |
334 | } | 327 | } |
335 | out: | 328 | |
336 | spin_unlock_irq(&freezer->lock); | 329 | spin_unlock_irq(&freezer->lock); |
337 | 330 | ||
338 | return retval; | 331 | return retval; |
diff --git a/kernel/exit.c b/kernel/exit.c index d0b7d988f873..95a4141d07e7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk) | |||
679 | tsk->mm = NULL; | 679 | tsk->mm = NULL; |
680 | up_read(&mm->mmap_sem); | 680 | up_read(&mm->mmap_sem); |
681 | enter_lazy_tlb(mm, current); | 681 | enter_lazy_tlb(mm, current); |
682 | /* We don't want this task to be frozen prematurely */ | ||
683 | clear_freeze_flag(tsk); | ||
684 | task_unlock(tsk); | 682 | task_unlock(tsk); |
685 | mm_update_next_owner(mm); | 683 | mm_update_next_owner(mm); |
686 | mmput(mm); | 684 | mmput(mm); |
@@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code) | |||
1040 | exit_rcu(); | 1038 | exit_rcu(); |
1041 | /* causes final put_task_struct in finish_task_switch(). */ | 1039 | /* causes final put_task_struct in finish_task_switch(). */ |
1042 | tsk->state = TASK_DEAD; | 1040 | tsk->state = TASK_DEAD; |
1041 | tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ | ||
1043 | schedule(); | 1042 | schedule(); |
1044 | BUG(); | 1043 | BUG(); |
1045 | /* Avoid "noreturn function does return". */ | 1044 | /* Avoid "noreturn function does return". */ |
diff --git a/kernel/fork.c b/kernel/fork.c index da4a6a10d088..827808613847 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) | |||
992 | new_flags |= PF_FORKNOEXEC; | 992 | new_flags |= PF_FORKNOEXEC; |
993 | new_flags |= PF_STARTING; | 993 | new_flags |= PF_STARTING; |
994 | p->flags = new_flags; | 994 | p->flags = new_flags; |
995 | clear_freeze_flag(p); | ||
996 | } | 995 | } |
997 | 996 | ||
998 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | 997 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
diff --git a/kernel/freezer.c b/kernel/freezer.c index 7be56c534397..9815b8d1eed5 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -9,101 +9,114 @@ | |||
9 | #include <linux/export.h> | 9 | #include <linux/export.h> |
10 | #include <linux/syscalls.h> | 10 | #include <linux/syscalls.h> |
11 | #include <linux/freezer.h> | 11 | #include <linux/freezer.h> |
12 | #include <linux/kthread.h> | ||
12 | 13 | ||
13 | /* | 14 | /* total number of freezing conditions in effect */ |
14 | * freezing is complete, mark current process as frozen | 15 | atomic_t system_freezing_cnt = ATOMIC_INIT(0); |
16 | EXPORT_SYMBOL(system_freezing_cnt); | ||
17 | |||
18 | /* indicate whether PM freezing is in effect, protected by pm_mutex */ | ||
19 | bool pm_freezing; | ||
20 | bool pm_nosig_freezing; | ||
21 | |||
22 | /* protects freezing and frozen transitions */ | ||
23 | static DEFINE_SPINLOCK(freezer_lock); | ||
24 | |||
25 | /** | ||
26 | * freezing_slow_path - slow path for testing whether a task needs to be frozen | ||
27 | * @p: task to be tested | ||
28 | * | ||
29 | * This function is called by freezing() if system_freezing_cnt isn't zero | ||
30 | * and tests whether @p needs to enter and stay in frozen state. Can be | ||
31 | * called under any context. The freezers are responsible for ensuring the | ||
32 | * target tasks see the updated state. | ||
15 | */ | 33 | */ |
16 | static inline void frozen_process(void) | 34 | bool freezing_slow_path(struct task_struct *p) |
17 | { | 35 | { |
18 | if (!unlikely(current->flags & PF_NOFREEZE)) { | 36 | if (p->flags & PF_NOFREEZE) |
19 | current->flags |= PF_FROZEN; | 37 | return false; |
20 | smp_wmb(); | 38 | |
21 | } | 39 | if (pm_nosig_freezing || cgroup_freezing(p)) |
22 | clear_freeze_flag(current); | 40 | return true; |
41 | |||
42 | if (pm_freezing && !(p->flags & PF_KTHREAD)) | ||
43 | return true; | ||
44 | |||
45 | return false; | ||
23 | } | 46 | } |
47 | EXPORT_SYMBOL(freezing_slow_path); | ||
24 | 48 | ||
25 | /* Refrigerator is place where frozen processes are stored :-). */ | 49 | /* Refrigerator is place where frozen processes are stored :-). */ |
26 | void refrigerator(void) | 50 | bool __refrigerator(bool check_kthr_stop) |
27 | { | 51 | { |
28 | /* Hmm, should we be allowed to suspend when there are realtime | 52 | /* Hmm, should we be allowed to suspend when there are realtime |
29 | processes around? */ | 53 | processes around? */ |
30 | long save; | 54 | bool was_frozen = false; |
55 | long save = current->state; | ||
31 | 56 | ||
32 | task_lock(current); | ||
33 | if (freezing(current)) { | ||
34 | frozen_process(); | ||
35 | task_unlock(current); | ||
36 | } else { | ||
37 | task_unlock(current); | ||
38 | return; | ||
39 | } | ||
40 | save = current->state; | ||
41 | pr_debug("%s entered refrigerator\n", current->comm); | 57 | pr_debug("%s entered refrigerator\n", current->comm); |
42 | 58 | ||
43 | spin_lock_irq(¤t->sighand->siglock); | ||
44 | recalc_sigpending(); /* We sent fake signal, clean it up */ | ||
45 | spin_unlock_irq(¤t->sighand->siglock); | ||
46 | |||
47 | /* prevent accounting of that task to load */ | ||
48 | current->flags |= PF_FREEZING; | ||
49 | |||
50 | for (;;) { | 59 | for (;;) { |
51 | set_current_state(TASK_UNINTERRUPTIBLE); | 60 | set_current_state(TASK_UNINTERRUPTIBLE); |
52 | if (!frozen(current)) | 61 | |
62 | spin_lock_irq(&freezer_lock); | ||
63 | current->flags |= PF_FROZEN; | ||
64 | if (!freezing(current) || | ||
65 | (check_kthr_stop && kthread_should_stop())) | ||
66 | current->flags &= ~PF_FROZEN; | ||
67 | spin_unlock_irq(&freezer_lock); | ||
68 | |||
69 | if (!(current->flags & PF_FROZEN)) | ||
53 | break; | 70 | break; |
71 | was_frozen = true; | ||
54 | schedule(); | 72 | schedule(); |
55 | } | 73 | } |
56 | 74 | ||
57 | /* Remove the accounting blocker */ | ||
58 | current->flags &= ~PF_FREEZING; | ||
59 | |||
60 | pr_debug("%s left refrigerator\n", current->comm); | 75 | pr_debug("%s left refrigerator\n", current->comm); |
61 | __set_current_state(save); | 76 | |
77 | /* | ||
78 | * Restore saved task state before returning. The mb'd version | ||
79 | * needs to be used; otherwise, it might silently break | ||
80 | * synchronization which depends on ordered task state change. | ||
81 | */ | ||
82 | set_current_state(save); | ||
83 | |||
84 | return was_frozen; | ||
62 | } | 85 | } |
63 | EXPORT_SYMBOL(refrigerator); | 86 | EXPORT_SYMBOL(__refrigerator); |
64 | 87 | ||
65 | static void fake_signal_wake_up(struct task_struct *p) | 88 | static void fake_signal_wake_up(struct task_struct *p) |
66 | { | 89 | { |
67 | unsigned long flags; | 90 | unsigned long flags; |
68 | 91 | ||
69 | spin_lock_irqsave(&p->sighand->siglock, flags); | 92 | if (lock_task_sighand(p, &flags)) { |
70 | signal_wake_up(p, 0); | 93 | signal_wake_up(p, 0); |
71 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 94 | unlock_task_sighand(p, &flags); |
95 | } | ||
72 | } | 96 | } |
73 | 97 | ||
74 | /** | 98 | /** |
75 | * freeze_task - send a freeze request to given task | 99 | * freeze_task - send a freeze request to given task |
76 | * @p: task to send the request to | 100 | * @p: task to send the request to |
77 | * @sig_only: if set, the request will only be sent if the task has the | 101 | * |
78 | * PF_FREEZER_NOSIG flag unset | 102 | * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE |
79 | * Return value: 'false', if @sig_only is set and the task has | 103 | * flag and either sending a fake signal to it or waking it up, depending |
80 | * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise | 104 | * on whether it has %PF_FREEZER_NOSIG set. |
81 | * | 105 | * |
82 | * The freeze request is sent by setting the tasks's TIF_FREEZE flag and | 106 | * RETURNS: |
83 | * either sending a fake signal to it or waking it up, depending on whether | 107 | * %false, if @p is not freezing or already frozen; %true, otherwise |
84 | * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task | ||
85 | * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its | ||
86 | * TIF_FREEZE flag will not be set. | ||
87 | */ | 108 | */ |
88 | bool freeze_task(struct task_struct *p, bool sig_only) | 109 | bool freeze_task(struct task_struct *p) |
89 | { | 110 | { |
90 | /* | 111 | unsigned long flags; |
91 | * We first check if the task is freezing and next if it has already | 112 | |
92 | * been frozen to avoid the race with frozen_process() which first marks | 113 | spin_lock_irqsave(&freezer_lock, flags); |
93 | * the task as frozen and next clears its TIF_FREEZE. | 114 | if (!freezing(p) || frozen(p)) { |
94 | */ | 115 | spin_unlock_irqrestore(&freezer_lock, flags); |
95 | if (!freezing(p)) { | 116 | return false; |
96 | smp_rmb(); | ||
97 | if (frozen(p)) | ||
98 | return false; | ||
99 | |||
100 | if (!sig_only || should_send_signal(p)) | ||
101 | set_freeze_flag(p); | ||
102 | else | ||
103 | return false; | ||
104 | } | 117 | } |
105 | 118 | ||
106 | if (should_send_signal(p)) { | 119 | if (!(p->flags & PF_KTHREAD)) { |
107 | fake_signal_wake_up(p); | 120 | fake_signal_wake_up(p); |
108 | /* | 121 | /* |
109 | * fake_signal_wake_up() goes through p's scheduler | 122 | * fake_signal_wake_up() goes through p's scheduler |
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only) | |||
111 | * TASK_RUNNING transition can't race with task state | 124 | * TASK_RUNNING transition can't race with task state |
112 | * testing in try_to_freeze_tasks(). | 125 | * testing in try_to_freeze_tasks(). |
113 | */ | 126 | */ |
114 | } else if (sig_only) { | ||
115 | return false; | ||
116 | } else { | 127 | } else { |
117 | wake_up_state(p, TASK_INTERRUPTIBLE); | 128 | wake_up_state(p, TASK_INTERRUPTIBLE); |
118 | } | 129 | } |
119 | 130 | ||
131 | spin_unlock_irqrestore(&freezer_lock, flags); | ||
120 | return true; | 132 | return true; |
121 | } | 133 | } |
122 | 134 | ||
123 | void cancel_freezing(struct task_struct *p) | 135 | void __thaw_task(struct task_struct *p) |
124 | { | 136 | { |
125 | unsigned long flags; | 137 | unsigned long flags; |
126 | 138 | ||
127 | if (freezing(p)) { | 139 | /* |
128 | pr_debug(" clean up: %s\n", p->comm); | 140 | * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to |
129 | clear_freeze_flag(p); | 141 | * be visible to @p as waking up implies wmb. Waking up inside |
130 | spin_lock_irqsave(&p->sighand->siglock, flags); | 142 | * freezer_lock also prevents wakeups from leaking outside |
131 | recalc_sigpending_and_wake(p); | 143 | * refrigerator. |
132 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 144 | */ |
133 | } | 145 | spin_lock_irqsave(&freezer_lock, flags); |
134 | } | 146 | if (frozen(p)) |
135 | 147 | wake_up_process(p); | |
136 | static int __thaw_process(struct task_struct *p) | 148 | spin_unlock_irqrestore(&freezer_lock, flags); |
137 | { | ||
138 | if (frozen(p)) { | ||
139 | p->flags &= ~PF_FROZEN; | ||
140 | return 1; | ||
141 | } | ||
142 | clear_freeze_flag(p); | ||
143 | return 0; | ||
144 | } | 149 | } |
145 | 150 | ||
146 | /* | 151 | /** |
147 | * Wake up a frozen process | 152 | * set_freezable - make %current freezable |
148 | * | 153 | * |
149 | * task_lock() is needed to prevent the race with refrigerator() which may | 154 | * Mark %current freezable and enter refrigerator if necessary. |
150 | * occur if the freezing of tasks fails. Namely, without the lock, if the | ||
151 | * freezing of tasks failed, thaw_tasks() might have run before a task in | ||
152 | * refrigerator() could call frozen_process(), in which case the task would be | ||
153 | * frozen and no one would thaw it. | ||
154 | */ | 155 | */ |
155 | int thaw_process(struct task_struct *p) | 156 | bool set_freezable(void) |
156 | { | 157 | { |
157 | task_lock(p); | 158 | might_sleep(); |
158 | if (__thaw_process(p) == 1) { | 159 | |
159 | task_unlock(p); | 160 | /* |
160 | wake_up_process(p); | 161 | * Modify flags while holding freezer_lock. This ensures the |
161 | return 1; | 162 | * freezer notices that we aren't frozen yet or the freezing |
162 | } | 163 | * condition is visible to try_to_freeze() below. |
163 | task_unlock(p); | 164 | */ |
164 | return 0; | 165 | spin_lock_irq(&freezer_lock); |
166 | current->flags &= ~PF_NOFREEZE; | ||
167 | spin_unlock_irq(&freezer_lock); | ||
168 | |||
169 | return try_to_freeze(); | ||
165 | } | 170 | } |
166 | EXPORT_SYMBOL(thaw_process); | 171 | EXPORT_SYMBOL(set_freezable); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index b6d216a92639..3d3de633702e 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -59,6 +59,31 @@ int kthread_should_stop(void) | |||
59 | EXPORT_SYMBOL(kthread_should_stop); | 59 | EXPORT_SYMBOL(kthread_should_stop); |
60 | 60 | ||
61 | /** | 61 | /** |
62 | * kthread_freezable_should_stop - should this freezable kthread return now? | ||
63 | * @was_frozen: optional out parameter, indicates whether %current was frozen | ||
64 | * | ||
65 | * kthread_should_stop() for freezable kthreads, which will enter | ||
66 | * refrigerator if necessary. This function is safe from kthread_stop() / | ||
67 | * freezer deadlock and freezable kthreads should use this function instead | ||
68 | * of calling try_to_freeze() directly. | ||
69 | */ | ||
70 | bool kthread_freezable_should_stop(bool *was_frozen) | ||
71 | { | ||
72 | bool frozen = false; | ||
73 | |||
74 | might_sleep(); | ||
75 | |||
76 | if (unlikely(freezing(current))) | ||
77 | frozen = __refrigerator(true); | ||
78 | |||
79 | if (was_frozen) | ||
80 | *was_frozen = frozen; | ||
81 | |||
82 | return kthread_should_stop(); | ||
83 | } | ||
84 | EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); | ||
85 | |||
86 | /** | ||
62 | * kthread_data - return data value specified on kthread creation | 87 | * kthread_data - return data value specified on kthread creation |
63 | * @task: kthread task in question | 88 | * @task: kthread task in question |
64 | * | 89 | * |
@@ -257,7 +282,7 @@ int kthreadd(void *unused) | |||
257 | set_cpus_allowed_ptr(tsk, cpu_all_mask); | 282 | set_cpus_allowed_ptr(tsk, cpu_all_mask); |
258 | set_mems_allowed(node_states[N_HIGH_MEMORY]); | 283 | set_mems_allowed(node_states[N_HIGH_MEMORY]); |
259 | 284 | ||
260 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; | 285 | current->flags |= PF_NOFREEZE; |
261 | 286 | ||
262 | for (;;) { | 287 | for (;;) { |
263 | set_current_state(TASK_INTERRUPTIBLE); | 288 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index a6b0503574ee..e7ffa8952083 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -611,17 +611,6 @@ static void power_down(void) | |||
611 | while(1); | 611 | while(1); |
612 | } | 612 | } |
613 | 613 | ||
614 | static int prepare_processes(void) | ||
615 | { | ||
616 | int error = 0; | ||
617 | |||
618 | if (freeze_processes()) { | ||
619 | error = -EBUSY; | ||
620 | thaw_processes(); | ||
621 | } | ||
622 | return error; | ||
623 | } | ||
624 | |||
625 | /** | 614 | /** |
626 | * hibernate - Carry out system hibernation, including saving the image. | 615 | * hibernate - Carry out system hibernation, including saving the image. |
627 | */ | 616 | */ |
@@ -654,7 +643,7 @@ int hibernate(void) | |||
654 | sys_sync(); | 643 | sys_sync(); |
655 | printk("done.\n"); | 644 | printk("done.\n"); |
656 | 645 | ||
657 | error = prepare_processes(); | 646 | error = freeze_processes(); |
658 | if (error) | 647 | if (error) |
659 | goto Finish; | 648 | goto Finish; |
660 | 649 | ||
@@ -815,7 +804,7 @@ static int software_resume(void) | |||
815 | goto close_finish; | 804 | goto close_finish; |
816 | 805 | ||
817 | pr_debug("PM: Preparing processes for restore.\n"); | 806 | pr_debug("PM: Preparing processes for restore.\n"); |
818 | error = prepare_processes(); | 807 | error = freeze_processes(); |
819 | if (error) { | 808 | if (error) { |
820 | swsusp_close(FMODE_READ); | 809 | swsusp_close(FMODE_READ); |
821 | goto Done; | 810 | goto Done; |
diff --git a/kernel/power/process.c b/kernel/power/process.c index addbbe5531bc..77274c9ba2f1 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -22,16 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | #define TIMEOUT (20 * HZ) | 23 | #define TIMEOUT (20 * HZ) |
24 | 24 | ||
25 | static inline int freezable(struct task_struct * p) | 25 | static int try_to_freeze_tasks(bool user_only) |
26 | { | ||
27 | if ((p == current) || | ||
28 | (p->flags & PF_NOFREEZE) || | ||
29 | (p->exit_state != 0)) | ||
30 | return 0; | ||
31 | return 1; | ||
32 | } | ||
33 | |||
34 | static int try_to_freeze_tasks(bool sig_only) | ||
35 | { | 26 | { |
36 | struct task_struct *g, *p; | 27 | struct task_struct *g, *p; |
37 | unsigned long end_time; | 28 | unsigned long end_time; |
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only) | |||
46 | 37 | ||
47 | end_time = jiffies + TIMEOUT; | 38 | end_time = jiffies + TIMEOUT; |
48 | 39 | ||
49 | if (!sig_only) | 40 | if (!user_only) |
50 | freeze_workqueues_begin(); | 41 | freeze_workqueues_begin(); |
51 | 42 | ||
52 | while (true) { | 43 | while (true) { |
53 | todo = 0; | 44 | todo = 0; |
54 | read_lock(&tasklist_lock); | 45 | read_lock(&tasklist_lock); |
55 | do_each_thread(g, p) { | 46 | do_each_thread(g, p) { |
56 | if (frozen(p) || !freezable(p)) | 47 | if (p == current || !freeze_task(p)) |
57 | continue; | ||
58 | |||
59 | if (!freeze_task(p, sig_only)) | ||
60 | continue; | 48 | continue; |
61 | 49 | ||
62 | /* | 50 | /* |
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
77 | } while_each_thread(g, p); | 65 | } while_each_thread(g, p); |
78 | read_unlock(&tasklist_lock); | 66 | read_unlock(&tasklist_lock); |
79 | 67 | ||
80 | if (!sig_only) { | 68 | if (!user_only) { |
81 | wq_busy = freeze_workqueues_busy(); | 69 | wq_busy = freeze_workqueues_busy(); |
82 | todo += wq_busy; | 70 | todo += wq_busy; |
83 | } | 71 | } |
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only) | |||
103 | elapsed_csecs = elapsed_csecs64; | 91 | elapsed_csecs = elapsed_csecs64; |
104 | 92 | ||
105 | if (todo) { | 93 | if (todo) { |
106 | /* This does not unfreeze processes that are already frozen | ||
107 | * (we have slightly ugly calling convention in that respect, | ||
108 | * and caller must call thaw_processes() if something fails), | ||
109 | * but it cleans up leftover PF_FREEZE requests. | ||
110 | */ | ||
111 | printk("\n"); | 94 | printk("\n"); |
112 | printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " | 95 | printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " |
113 | "(%d tasks refusing to freeze, wq_busy=%d):\n", | 96 | "(%d tasks refusing to freeze, wq_busy=%d):\n", |
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only) | |||
115 | elapsed_csecs / 100, elapsed_csecs % 100, | 98 | elapsed_csecs / 100, elapsed_csecs % 100, |
116 | todo - wq_busy, wq_busy); | 99 | todo - wq_busy, wq_busy); |
117 | 100 | ||
118 | thaw_workqueues(); | ||
119 | |||
120 | read_lock(&tasklist_lock); | 101 | read_lock(&tasklist_lock); |
121 | do_each_thread(g, p) { | 102 | do_each_thread(g, p) { |
122 | task_lock(p); | 103 | if (!wakeup && !freezer_should_skip(p) && |
123 | if (!wakeup && freezing(p) && !freezer_should_skip(p)) | 104 | p != current && freezing(p) && !frozen(p)) |
124 | sched_show_task(p); | 105 | sched_show_task(p); |
125 | cancel_freezing(p); | ||
126 | task_unlock(p); | ||
127 | } while_each_thread(g, p); | 106 | } while_each_thread(g, p); |
128 | read_unlock(&tasklist_lock); | 107 | read_unlock(&tasklist_lock); |
129 | } else { | 108 | } else { |
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only) | |||
136 | 115 | ||
137 | /** | 116 | /** |
138 | * freeze_processes - Signal user space processes to enter the refrigerator. | 117 | * freeze_processes - Signal user space processes to enter the refrigerator. |
118 | * | ||
119 | * On success, returns 0. On failure, -errno and system is fully thawed. | ||
139 | */ | 120 | */ |
140 | int freeze_processes(void) | 121 | int freeze_processes(void) |
141 | { | 122 | { |
142 | int error; | 123 | int error; |
143 | 124 | ||
125 | if (!pm_freezing) | ||
126 | atomic_inc(&system_freezing_cnt); | ||
127 | |||
144 | printk("Freezing user space processes ... "); | 128 | printk("Freezing user space processes ... "); |
129 | pm_freezing = true; | ||
145 | error = try_to_freeze_tasks(true); | 130 | error = try_to_freeze_tasks(true); |
146 | if (!error) { | 131 | if (!error) { |
147 | printk("done."); | 132 | printk("done."); |
@@ -150,17 +135,22 @@ int freeze_processes(void) | |||
150 | printk("\n"); | 135 | printk("\n"); |
151 | BUG_ON(in_atomic()); | 136 | BUG_ON(in_atomic()); |
152 | 137 | ||
138 | if (error) | ||
139 | thaw_processes(); | ||
153 | return error; | 140 | return error; |
154 | } | 141 | } |
155 | 142 | ||
156 | /** | 143 | /** |
157 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. | 144 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. |
145 | * | ||
146 | * On success, returns 0. On failure, -errno and system is fully thawed. | ||
158 | */ | 147 | */ |
159 | int freeze_kernel_threads(void) | 148 | int freeze_kernel_threads(void) |
160 | { | 149 | { |
161 | int error; | 150 | int error; |
162 | 151 | ||
163 | printk("Freezing remaining freezable tasks ... "); | 152 | printk("Freezing remaining freezable tasks ... "); |
153 | pm_nosig_freezing = true; | ||
164 | error = try_to_freeze_tasks(false); | 154 | error = try_to_freeze_tasks(false); |
165 | if (!error) | 155 | if (!error) |
166 | printk("done."); | 156 | printk("done."); |
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void) | |||
168 | printk("\n"); | 158 | printk("\n"); |
169 | BUG_ON(in_atomic()); | 159 | BUG_ON(in_atomic()); |
170 | 160 | ||
161 | if (error) | ||
162 | thaw_processes(); | ||
171 | return error; | 163 | return error; |
172 | } | 164 | } |
173 | 165 | ||
174 | static void thaw_tasks(bool nosig_only) | 166 | void thaw_processes(void) |
175 | { | 167 | { |
176 | struct task_struct *g, *p; | 168 | struct task_struct *g, *p; |
177 | 169 | ||
178 | read_lock(&tasklist_lock); | 170 | if (pm_freezing) |
179 | do_each_thread(g, p) { | 171 | atomic_dec(&system_freezing_cnt); |
180 | if (!freezable(p)) | 172 | pm_freezing = false; |
181 | continue; | 173 | pm_nosig_freezing = false; |
182 | 174 | ||
183 | if (nosig_only && should_send_signal(p)) | 175 | oom_killer_enable(); |
184 | continue; | 176 | |
177 | printk("Restarting tasks ... "); | ||
185 | 178 | ||
186 | if (cgroup_freezing_or_frozen(p)) | 179 | thaw_workqueues(); |
187 | continue; | ||
188 | 180 | ||
189 | thaw_process(p); | 181 | read_lock(&tasklist_lock); |
182 | do_each_thread(g, p) { | ||
183 | __thaw_task(p); | ||
190 | } while_each_thread(g, p); | 184 | } while_each_thread(g, p); |
191 | read_unlock(&tasklist_lock); | 185 | read_unlock(&tasklist_lock); |
192 | } | ||
193 | 186 | ||
194 | void thaw_processes(void) | ||
195 | { | ||
196 | oom_killer_enable(); | ||
197 | |||
198 | printk("Restarting tasks ... "); | ||
199 | thaw_workqueues(); | ||
200 | thaw_tasks(true); | ||
201 | thaw_tasks(false); | ||
202 | schedule(); | 187 | schedule(); |
203 | printk("done.\n"); | 188 | printk("done.\n"); |
204 | } | 189 | } |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4953dc054c53..d336b27d1104 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -106,13 +106,11 @@ static int suspend_prepare(void) | |||
106 | goto Finish; | 106 | goto Finish; |
107 | 107 | ||
108 | error = suspend_freeze_processes(); | 108 | error = suspend_freeze_processes(); |
109 | if (error) { | 109 | if (!error) |
110 | suspend_stats.failed_freeze++; | ||
111 | dpm_save_failed_step(SUSPEND_FREEZE); | ||
112 | } else | ||
113 | return 0; | 110 | return 0; |
114 | 111 | ||
115 | suspend_thaw_processes(); | 112 | suspend_stats.failed_freeze++; |
113 | dpm_save_failed_step(SUSPEND_FREEZE); | ||
116 | usermodehelper_enable(); | 114 | usermodehelper_enable(); |
117 | Finish: | 115 | Finish: |
118 | pm_notifier_call_chain(PM_POST_SUSPEND); | 116 | pm_notifier_call_chain(PM_POST_SUSPEND); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 6d8f535c2b88..7cc3f5bc5c24 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -257,10 +257,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
257 | break; | 257 | break; |
258 | 258 | ||
259 | error = freeze_processes(); | 259 | error = freeze_processes(); |
260 | if (error) { | 260 | if (error) |
261 | thaw_processes(); | ||
262 | usermodehelper_enable(); | 261 | usermodehelper_enable(); |
263 | } | ||
264 | if (!error) | 262 | if (!error) |
265 | data->frozen = 1; | 263 | data->frozen = 1; |
266 | break; | 264 | break; |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 71034f41a2ba..7ba8feae11b8 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) | |||
600 | 600 | ||
601 | /* | 601 | /* |
602 | * Finally, kill the kernel thread. We don't need to be RCU | 602 | * Finally, kill the kernel thread. We don't need to be RCU |
603 | * safe anymore, since the bdi is gone from visibility. Force | 603 | * safe anymore, since the bdi is gone from visibility. |
604 | * unfreeze of the thread before calling kthread_stop(), otherwise | ||
605 | * it would never exet if it is currently stuck in the refrigerator. | ||
606 | */ | 604 | */ |
607 | if (bdi->wb.task) { | 605 | if (bdi->wb.task) |
608 | thaw_process(bdi->wb.task); | ||
609 | kthread_stop(bdi->wb.task); | 606 | kthread_stop(bdi->wb.task); |
610 | } | ||
611 | } | 607 | } |
612 | 608 | ||
613 | /* | 609 | /* |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 76f2c5ae908e..3134ee2fb2e8 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
328 | */ | 328 | */ |
329 | if (test_tsk_thread_flag(p, TIF_MEMDIE)) { | 329 | if (test_tsk_thread_flag(p, TIF_MEMDIE)) { |
330 | if (unlikely(frozen(p))) | 330 | if (unlikely(frozen(p))) |
331 | thaw_process(p); | 331 | __thaw_task(p); |
332 | return ERR_PTR(-1UL); | 332 | return ERR_PTR(-1UL); |
333 | } | 333 | } |
334 | if (!p->mm) | 334 | if (!p->mm) |