diff options
Diffstat (limited to 'include/linux/freezer.h')
-rw-r--r-- | include/linux/freezer.h | 159 |
1 files changed, 76 insertions, 83 deletions
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index a5386e3ee756..0ab54e16a91f 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -5,71 +5,58 @@ | |||
5 | 5 | ||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
8 | #include <linux/atomic.h> | ||
8 | 9 | ||
9 | #ifdef CONFIG_FREEZER | 10 | #ifdef CONFIG_FREEZER |
11 | extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ | ||
12 | extern bool pm_freezing; /* PM freezing in effect */ | ||
13 | extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ | ||
14 | |||
10 | /* | 15 | /* |
11 | * Check if a process has been frozen | 16 | * Check if a process has been frozen |
12 | */ | 17 | */ |
13 | static inline int frozen(struct task_struct *p) | 18 | static inline bool frozen(struct task_struct *p) |
14 | { | 19 | { |
15 | return p->flags & PF_FROZEN; | 20 | return p->flags & PF_FROZEN; |
16 | } | 21 | } |
17 | 22 | ||
18 | /* | 23 | extern bool freezing_slow_path(struct task_struct *p); |
19 | * Check if there is a request to freeze a process | ||
20 | */ | ||
21 | static inline int freezing(struct task_struct *p) | ||
22 | { | ||
23 | return test_tsk_thread_flag(p, TIF_FREEZE); | ||
24 | } | ||
25 | |||
26 | /* | ||
27 | * Request that a process be frozen | ||
28 | */ | ||
29 | static inline void set_freeze_flag(struct task_struct *p) | ||
30 | { | ||
31 | set_tsk_thread_flag(p, TIF_FREEZE); | ||
32 | } | ||
33 | 24 | ||
34 | /* | 25 | /* |
35 | * Sometimes we may need to cancel the previous 'freeze' request | 26 | * Check if there is a request to freeze a process |
36 | */ | 27 | */ |
37 | static inline void clear_freeze_flag(struct task_struct *p) | 28 | static inline bool freezing(struct task_struct *p) |
38 | { | ||
39 | clear_tsk_thread_flag(p, TIF_FREEZE); | ||
40 | } | ||
41 | |||
42 | static inline bool should_send_signal(struct task_struct *p) | ||
43 | { | 29 | { |
44 | return !(p->flags & PF_FREEZER_NOSIG); | 30 | if (likely(!atomic_read(&system_freezing_cnt))) |
31 | return false; | ||
32 | return freezing_slow_path(p); | ||
45 | } | 33 | } |
46 | 34 | ||
47 | /* Takes and releases task alloc lock using task_lock() */ | 35 | /* Takes and releases task alloc lock using task_lock() */ |
48 | extern int thaw_process(struct task_struct *p); | 36 | extern void __thaw_task(struct task_struct *t); |
49 | 37 | ||
50 | extern void refrigerator(void); | 38 | extern bool __refrigerator(bool check_kthr_stop); |
51 | extern int freeze_processes(void); | 39 | extern int freeze_processes(void); |
52 | extern int freeze_kernel_threads(void); | 40 | extern int freeze_kernel_threads(void); |
53 | extern void thaw_processes(void); | 41 | extern void thaw_processes(void); |
54 | 42 | ||
55 | static inline int try_to_freeze(void) | 43 | static inline bool try_to_freeze(void) |
56 | { | 44 | { |
57 | if (freezing(current)) { | 45 | might_sleep(); |
58 | refrigerator(); | 46 | if (likely(!freezing(current))) |
59 | return 1; | 47 | return false; |
60 | } else | 48 | return __refrigerator(false); |
61 | return 0; | ||
62 | } | 49 | } |
63 | 50 | ||
64 | extern bool freeze_task(struct task_struct *p, bool sig_only); | 51 | extern bool freeze_task(struct task_struct *p); |
65 | extern void cancel_freezing(struct task_struct *p); | 52 | extern bool set_freezable(void); |
66 | 53 | ||
67 | #ifdef CONFIG_CGROUP_FREEZER | 54 | #ifdef CONFIG_CGROUP_FREEZER |
68 | extern int cgroup_freezing_or_frozen(struct task_struct *task); | 55 | extern bool cgroup_freezing(struct task_struct *task); |
69 | #else /* !CONFIG_CGROUP_FREEZER */ | 56 | #else /* !CONFIG_CGROUP_FREEZER */ |
70 | static inline int cgroup_freezing_or_frozen(struct task_struct *task) | 57 | static inline bool cgroup_freezing(struct task_struct *task) |
71 | { | 58 | { |
72 | return 0; | 59 | return false; |
73 | } | 60 | } |
74 | #endif /* !CONFIG_CGROUP_FREEZER */ | 61 | #endif /* !CONFIG_CGROUP_FREEZER */ |
75 | 62 | ||
@@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task) | |||
80 | * appropriately in case the child has exited before the freezing of tasks is | 67 | * appropriately in case the child has exited before the freezing of tasks is |
81 | * complete. However, we don't want kernel threads to be frozen in unexpected | 68 | * complete. However, we don't want kernel threads to be frozen in unexpected |
82 | * places, so we allow them to block freeze_processes() instead or to set | 69 | * places, so we allow them to block freeze_processes() instead or to set |
83 | * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork | 70 | * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the |
84 | * parents. Fortunately, in the ____call_usermodehelper() case the parent won't | 71 | * parent won't really block freeze_processes(), since ____call_usermodehelper() |
85 | * really block freeze_processes(), since ____call_usermodehelper() (the child) | 72 | * (the child) does a little before exec/exit and it can't be frozen before |
86 | * does a little before exec/exit and it can't be frozen before waking up the | 73 | * waking up the parent. |
87 | * parent. | ||
88 | */ | 74 | */ |
89 | 75 | ||
90 | /* | 76 | |
91 | * If the current task is a user space one, tell the freezer not to count it as | 77 | /* Tell the freezer not to count the current task as freezable. */ |
92 | * freezable. | ||
93 | */ | ||
94 | static inline void freezer_do_not_count(void) | 78 | static inline void freezer_do_not_count(void) |
95 | { | 79 | { |
96 | if (current->mm) | 80 | current->flags |= PF_FREEZER_SKIP; |
97 | current->flags |= PF_FREEZER_SKIP; | ||
98 | } | 81 | } |
99 | 82 | ||
100 | /* | 83 | /* |
101 | * If the current task is a user space one, tell the freezer to count it as | 84 | * Tell the freezer to count the current task as freezable again and try to |
102 | * freezable again and try to freeze it. | 85 | * freeze it. |
103 | */ | 86 | */ |
104 | static inline void freezer_count(void) | 87 | static inline void freezer_count(void) |
105 | { | 88 | { |
106 | if (current->mm) { | 89 | current->flags &= ~PF_FREEZER_SKIP; |
107 | current->flags &= ~PF_FREEZER_SKIP; | 90 | try_to_freeze(); |
108 | try_to_freeze(); | ||
109 | } | ||
110 | } | 91 | } |
111 | 92 | ||
112 | /* | 93 | /* |
@@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p) | |||
118 | } | 99 | } |
119 | 100 | ||
120 | /* | 101 | /* |
121 | * Tell the freezer that the current task should be frozen by it | 102 | * These macros are intended to be used whenever you want allow a task that's |
103 | * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note | ||
104 | * that neither return any clear indication of whether a freeze event happened | ||
105 | * while in this function. | ||
122 | */ | 106 | */ |
123 | static inline void set_freezable(void) | ||
124 | { | ||
125 | current->flags &= ~PF_NOFREEZE; | ||
126 | } | ||
127 | 107 | ||
128 | /* | 108 | /* Like schedule(), but should not block the freezer. */ |
129 | * Tell the freezer that the current task should be frozen by it and that it | 109 | #define freezable_schedule() \ |
130 | * should send a fake signal to the task to freeze it. | 110 | ({ \ |
131 | */ | 111 | freezer_do_not_count(); \ |
132 | static inline void set_freezable_with_signal(void) | 112 | schedule(); \ |
133 | { | 113 | freezer_count(); \ |
134 | current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); | 114 | }) |
135 | } | 115 | |
116 | /* Like schedule_timeout_killable(), but should not block the freezer. */ | ||
117 | #define freezable_schedule_timeout_killable(timeout) \ | ||
118 | ({ \ | ||
119 | long __retval; \ | ||
120 | freezer_do_not_count(); \ | ||
121 | __retval = schedule_timeout_killable(timeout); \ | ||
122 | freezer_count(); \ | ||
123 | __retval; \ | ||
124 | }) | ||
136 | 125 | ||
137 | /* | 126 | /* |
138 | * Freezer-friendly wrappers around wait_event_interruptible(), | 127 | * Freezer-friendly wrappers around wait_event_interruptible(), |
@@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void) | |||
152 | #define wait_event_freezable(wq, condition) \ | 141 | #define wait_event_freezable(wq, condition) \ |
153 | ({ \ | 142 | ({ \ |
154 | int __retval; \ | 143 | int __retval; \ |
155 | do { \ | 144 | for (;;) { \ |
156 | __retval = wait_event_interruptible(wq, \ | 145 | __retval = wait_event_interruptible(wq, \ |
157 | (condition) || freezing(current)); \ | 146 | (condition) || freezing(current)); \ |
158 | if (__retval && !freezing(current)) \ | 147 | if (__retval || (condition)) \ |
159 | break; \ | 148 | break; \ |
160 | else if (!(condition)) \ | 149 | try_to_freeze(); \ |
161 | __retval = -ERESTARTSYS; \ | 150 | } \ |
162 | } while (try_to_freeze()); \ | ||
163 | __retval; \ | 151 | __retval; \ |
164 | }) | 152 | }) |
165 | 153 | ||
166 | |||
167 | #define wait_event_freezable_timeout(wq, condition, timeout) \ | 154 | #define wait_event_freezable_timeout(wq, condition, timeout) \ |
168 | ({ \ | 155 | ({ \ |
169 | long __retval = timeout; \ | 156 | long __retval = timeout; \ |
170 | do { \ | 157 | for (;;) { \ |
171 | __retval = wait_event_interruptible_timeout(wq, \ | 158 | __retval = wait_event_interruptible_timeout(wq, \ |
172 | (condition) || freezing(current), \ | 159 | (condition) || freezing(current), \ |
173 | __retval); \ | 160 | __retval); \ |
174 | } while (try_to_freeze()); \ | 161 | if (__retval <= 0 || (condition)) \ |
162 | break; \ | ||
163 | try_to_freeze(); \ | ||
164 | } \ | ||
175 | __retval; \ | 165 | __retval; \ |
176 | }) | 166 | }) |
167 | |||
177 | #else /* !CONFIG_FREEZER */ | 168 | #else /* !CONFIG_FREEZER */ |
178 | static inline int frozen(struct task_struct *p) { return 0; } | 169 | static inline bool frozen(struct task_struct *p) { return false; } |
179 | static inline int freezing(struct task_struct *p) { return 0; } | 170 | static inline bool freezing(struct task_struct *p) { return false; } |
180 | static inline void set_freeze_flag(struct task_struct *p) {} | 171 | static inline void __thaw_task(struct task_struct *t) {} |
181 | static inline void clear_freeze_flag(struct task_struct *p) {} | ||
182 | static inline int thaw_process(struct task_struct *p) { return 1; } | ||
183 | 172 | ||
184 | static inline void refrigerator(void) {} | 173 | static inline bool __refrigerator(bool check_kthr_stop) { return false; } |
185 | static inline int freeze_processes(void) { return -ENOSYS; } | 174 | static inline int freeze_processes(void) { return -ENOSYS; } |
186 | static inline int freeze_kernel_threads(void) { return -ENOSYS; } | 175 | static inline int freeze_kernel_threads(void) { return -ENOSYS; } |
187 | static inline void thaw_processes(void) {} | 176 | static inline void thaw_processes(void) {} |
188 | 177 | ||
189 | static inline int try_to_freeze(void) { return 0; } | 178 | static inline bool try_to_freeze(void) { return false; } |
190 | 179 | ||
191 | static inline void freezer_do_not_count(void) {} | 180 | static inline void freezer_do_not_count(void) {} |
192 | static inline void freezer_count(void) {} | 181 | static inline void freezer_count(void) {} |
193 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } | 182 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } |
194 | static inline void set_freezable(void) {} | 183 | static inline void set_freezable(void) {} |
195 | static inline void set_freezable_with_signal(void) {} | 184 | |
185 | #define freezable_schedule() schedule() | ||
186 | |||
187 | #define freezable_schedule_timeout_killable(timeout) \ | ||
188 | schedule_timeout_killable(timeout) | ||
196 | 189 | ||
197 | #define wait_event_freezable(wq, condition) \ | 190 | #define wait_event_freezable(wq, condition) \ |
198 | wait_event_interruptible(wq, condition) | 191 | wait_event_interruptible(wq, condition) |