diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/completion.h | 1 | ||||
-rw-r--r-- | include/linux/mutex.h | 5 | ||||
-rw-r--r-- | include/linux/nfs_fs.h | 9 | ||||
-rw-r--r-- | include/linux/nfs_mount.h | 2 | ||||
-rw-r--r-- | include/linux/pagemap.h | 14 | ||||
-rw-r--r-- | include/linux/sched.h | 36 | ||||
-rw-r--r-- | include/linux/sunrpc/clnt.h | 4 | ||||
-rw-r--r-- | include/linux/sunrpc/sched.h | 2 | ||||
-rw-r--r-- | include/linux/wait.h | 52 |
9 files changed, 102 insertions, 23 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h index 33d6aaf94447..d2961b66d53d 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -44,6 +44,7 @@ static inline void init_completion(struct completion *x) | |||
44 | 44 | ||
45 | extern void wait_for_completion(struct completion *); | 45 | extern void wait_for_completion(struct completion *); |
46 | extern int wait_for_completion_interruptible(struct completion *x); | 46 | extern int wait_for_completion_interruptible(struct completion *x); |
47 | extern int wait_for_completion_killable(struct completion *x); | ||
47 | extern unsigned long wait_for_completion_timeout(struct completion *x, | 48 | extern unsigned long wait_for_completion_timeout(struct completion *x, |
48 | unsigned long timeout); | 49 | unsigned long timeout); |
49 | extern unsigned long wait_for_completion_interruptible_timeout( | 50 | extern unsigned long wait_for_completion_interruptible_timeout( |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 601479772b98..05c590352dd7 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -125,15 +125,20 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) | |||
125 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 125 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
126 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, | 126 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
127 | unsigned int subclass); | 127 | unsigned int subclass); |
128 | extern int __must_check mutex_lock_killable_nested(struct mutex *lock, | ||
129 | unsigned int subclass); | ||
128 | 130 | ||
129 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) | 131 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) |
130 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) | 132 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) |
133 | #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) | ||
131 | #else | 134 | #else |
132 | extern void fastcall mutex_lock(struct mutex *lock); | 135 | extern void fastcall mutex_lock(struct mutex *lock); |
133 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | 136 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); |
137 | extern int __must_check fastcall mutex_lock_killable(struct mutex *lock); | ||
134 | 138 | ||
135 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 139 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
136 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 140 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
141 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) | ||
137 | #endif | 142 | #endif |
138 | 143 | ||
139 | /* | 144 | /* |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 099ddb4481c0..a69ba80f2dfe 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -556,14 +556,7 @@ extern void * nfs_root_data(void); | |||
556 | 556 | ||
557 | #define nfs_wait_event(clnt, wq, condition) \ | 557 | #define nfs_wait_event(clnt, wq, condition) \ |
558 | ({ \ | 558 | ({ \ |
559 | int __retval = 0; \ | 559 | int __retval = wait_event_killable(wq, condition); \ |
560 | if (clnt->cl_intr) { \ | ||
561 | sigset_t oldmask; \ | ||
562 | rpc_clnt_sigmask(clnt, &oldmask); \ | ||
563 | __retval = wait_event_interruptible(wq, condition); \ | ||
564 | rpc_clnt_sigunmask(clnt, &oldmask); \ | ||
565 | } else \ | ||
566 | wait_event(wq, condition); \ | ||
567 | __retval; \ | 560 | __retval; \ |
568 | }) | 561 | }) |
569 | 562 | ||
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index a3ade89a64d2..df7c6b7a7ebb 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h | |||
@@ -48,7 +48,7 @@ struct nfs_mount_data { | |||
48 | /* bits in the flags field */ | 48 | /* bits in the flags field */ |
49 | 49 | ||
50 | #define NFS_MOUNT_SOFT 0x0001 /* 1 */ | 50 | #define NFS_MOUNT_SOFT 0x0001 /* 1 */ |
51 | #define NFS_MOUNT_INTR 0x0002 /* 1 */ | 51 | #define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ |
52 | #define NFS_MOUNT_SECURE 0x0004 /* 1 */ | 52 | #define NFS_MOUNT_SECURE 0x0004 /* 1 */ |
53 | #define NFS_MOUNT_POSIX 0x0008 /* 1 */ | 53 | #define NFS_MOUNT_POSIX 0x0008 /* 1 */ |
54 | #define NFS_MOUNT_NOCTO 0x0010 /* 1 */ | 54 | #define NFS_MOUNT_NOCTO 0x0010 /* 1 */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index db8a410ae9e1..4b62a105622b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |||
157 | } | 157 | } |
158 | 158 | ||
159 | extern void FASTCALL(__lock_page(struct page *page)); | 159 | extern void FASTCALL(__lock_page(struct page *page)); |
160 | extern int FASTCALL(__lock_page_killable(struct page *page)); | ||
160 | extern void FASTCALL(__lock_page_nosync(struct page *page)); | 161 | extern void FASTCALL(__lock_page_nosync(struct page *page)); |
161 | extern void FASTCALL(unlock_page(struct page *page)); | 162 | extern void FASTCALL(unlock_page(struct page *page)); |
162 | 163 | ||
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page) | |||
171 | } | 172 | } |
172 | 173 | ||
173 | /* | 174 | /* |
175 | * lock_page_killable is like lock_page but can be interrupted by fatal | ||
176 | * signals. It returns 0 if it locked the page and -EINTR if it was | ||
177 | * killed while waiting. | ||
178 | */ | ||
179 | static inline int lock_page_killable(struct page *page) | ||
180 | { | ||
181 | might_sleep(); | ||
182 | if (TestSetPageLocked(page)) | ||
183 | return __lock_page_killable(page); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | /* | ||
174 | * lock_page_nosync should only be used if we can't pin the page's inode. | 188 | * lock_page_nosync should only be used if we can't pin the page's inode. |
175 | * Doesn't play quite so well with block device plugging. | 189 | * Doesn't play quite so well with block device plugging. |
176 | */ | 190 | */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9d4797609aa5..6c333579d9da 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -172,13 +172,35 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
172 | #define TASK_RUNNING 0 | 172 | #define TASK_RUNNING 0 |
173 | #define TASK_INTERRUPTIBLE 1 | 173 | #define TASK_INTERRUPTIBLE 1 |
174 | #define TASK_UNINTERRUPTIBLE 2 | 174 | #define TASK_UNINTERRUPTIBLE 2 |
175 | #define TASK_STOPPED 4 | 175 | #define __TASK_STOPPED 4 |
176 | #define TASK_TRACED 8 | 176 | #define __TASK_TRACED 8 |
177 | /* in tsk->exit_state */ | 177 | /* in tsk->exit_state */ |
178 | #define EXIT_ZOMBIE 16 | 178 | #define EXIT_ZOMBIE 16 |
179 | #define EXIT_DEAD 32 | 179 | #define EXIT_DEAD 32 |
180 | /* in tsk->state again */ | 180 | /* in tsk->state again */ |
181 | #define TASK_DEAD 64 | 181 | #define TASK_DEAD 64 |
182 | #define TASK_WAKEKILL 128 | ||
183 | |||
184 | /* Convenience macros for the sake of set_task_state */ | ||
185 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | ||
186 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) | ||
187 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) | ||
188 | |||
189 | /* Convenience macros for the sake of wake_up */ | ||
190 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) | ||
191 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) | ||
192 | |||
193 | /* get_task_state() */ | ||
194 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ | ||
195 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ | ||
196 | __TASK_TRACED) | ||
197 | |||
198 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) | ||
199 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) | ||
200 | #define task_is_stopped_or_traced(task) \ | ||
201 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | ||
202 | #define task_contributes_to_load(task) \ | ||
203 | ((task->state & TASK_UNINTERRUPTIBLE) != 0) | ||
182 | 204 | ||
183 | #define __set_task_state(tsk, state_value) \ | 205 | #define __set_task_state(tsk, state_value) \ |
184 | do { (tsk)->state = (state_value); } while (0) | 206 | do { (tsk)->state = (state_value); } while (0) |
@@ -302,6 +324,7 @@ extern int in_sched_functions(unsigned long addr); | |||
302 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX | 324 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
303 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | 325 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); |
304 | extern signed long schedule_timeout_interruptible(signed long timeout); | 326 | extern signed long schedule_timeout_interruptible(signed long timeout); |
327 | extern signed long schedule_timeout_killable(signed long timeout); | ||
305 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 328 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
306 | asmlinkage void schedule(void); | 329 | asmlinkage void schedule(void); |
307 | 330 | ||
@@ -1892,7 +1915,14 @@ static inline int signal_pending(struct task_struct *p) | |||
1892 | { | 1915 | { |
1893 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 1916 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
1894 | } | 1917 | } |
1895 | 1918 | ||
1919 | extern int FASTCALL(__fatal_signal_pending(struct task_struct *p)); | ||
1920 | |||
1921 | static inline int fatal_signal_pending(struct task_struct *p) | ||
1922 | { | ||
1923 | return signal_pending(p) && __fatal_signal_pending(p); | ||
1924 | } | ||
1925 | |||
1896 | static inline int need_resched(void) | 1926 | static inline int need_resched(void) |
1897 | { | 1927 | { |
1898 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 1928 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 3e9addc741c1..129a86e25d29 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -41,7 +41,6 @@ struct rpc_clnt { | |||
41 | struct rpc_iostats * cl_metrics; /* per-client statistics */ | 41 | struct rpc_iostats * cl_metrics; /* per-client statistics */ |
42 | 42 | ||
43 | unsigned int cl_softrtry : 1,/* soft timeouts */ | 43 | unsigned int cl_softrtry : 1,/* soft timeouts */ |
44 | cl_intr : 1,/* interruptible */ | ||
45 | cl_discrtry : 1,/* disconnect before retry */ | 44 | cl_discrtry : 1,/* disconnect before retry */ |
46 | cl_autobind : 1;/* use getport() */ | 45 | cl_autobind : 1;/* use getport() */ |
47 | 46 | ||
@@ -111,7 +110,6 @@ struct rpc_create_args { | |||
111 | 110 | ||
112 | /* Values for "flags" field */ | 111 | /* Values for "flags" field */ |
113 | #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) | 112 | #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) |
114 | #define RPC_CLNT_CREATE_INTR (1UL << 1) | ||
115 | #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) | 113 | #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) |
116 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) | 114 | #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) |
117 | #define RPC_CLNT_CREATE_NOPING (1UL << 4) | 115 | #define RPC_CLNT_CREATE_NOPING (1UL << 4) |
@@ -137,8 +135,6 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, | |||
137 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, | 135 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, |
138 | int flags); | 136 | int flags); |
139 | void rpc_restart_call(struct rpc_task *); | 137 | void rpc_restart_call(struct rpc_task *); |
140 | void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset); | ||
141 | void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); | ||
142 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); | 138 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); |
143 | size_t rpc_max_payload(struct rpc_clnt *); | 139 | size_t rpc_max_payload(struct rpc_clnt *); |
144 | void rpc_force_rebind(struct rpc_clnt *); | 140 | void rpc_force_rebind(struct rpc_clnt *); |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ce3d1b132729..f689f02e6793 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -137,7 +137,6 @@ struct rpc_task_setup { | |||
137 | #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ | 137 | #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ |
138 | #define RPC_TASK_KILLED 0x0100 /* task was killed */ | 138 | #define RPC_TASK_KILLED 0x0100 /* task was killed */ |
139 | #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ | 139 | #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ |
140 | #define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */ | ||
141 | 140 | ||
142 | #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) | 141 | #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) |
143 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) | 142 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) |
@@ -145,7 +144,6 @@ struct rpc_task_setup { | |||
145 | #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) | 144 | #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) |
146 | #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) | 145 | #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) |
147 | #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) | 146 | #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) |
148 | #define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR) | ||
149 | 147 | ||
150 | #define RPC_TASK_RUNNING 0 | 148 | #define RPC_TASK_RUNNING 0 |
151 | #define RPC_TASK_QUEUED 1 | 149 | #define RPC_TASK_QUEUED 1 |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 0e686280450b..1f4fb0a81ecd 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -152,14 +152,15 @@ int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned)); | |||
152 | int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned)); | 152 | int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned)); |
153 | wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int)); | 153 | wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int)); |
154 | 154 | ||
155 | #define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL) | 155 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
156 | #define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL) | 156 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) |
157 | #define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL) | 157 | #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) |
158 | #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL) | ||
159 | |||
158 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) | 160 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) |
159 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) | 161 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) |
160 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) | 162 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) |
161 | #define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) | 163 | #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) |
162 | #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) | ||
163 | 164 | ||
164 | #define __wait_event(wq, condition) \ | 165 | #define __wait_event(wq, condition) \ |
165 | do { \ | 166 | do { \ |
@@ -345,6 +346,47 @@ do { \ | |||
345 | __ret; \ | 346 | __ret; \ |
346 | }) | 347 | }) |
347 | 348 | ||
349 | #define __wait_event_killable(wq, condition, ret) \ | ||
350 | do { \ | ||
351 | DEFINE_WAIT(__wait); \ | ||
352 | \ | ||
353 | for (;;) { \ | ||
354 | prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \ | ||
355 | if (condition) \ | ||
356 | break; \ | ||
357 | if (!fatal_signal_pending(current)) { \ | ||
358 | schedule(); \ | ||
359 | continue; \ | ||
360 | } \ | ||
361 | ret = -ERESTARTSYS; \ | ||
362 | break; \ | ||
363 | } \ | ||
364 | finish_wait(&wq, &__wait); \ | ||
365 | } while (0) | ||
366 | |||
367 | /** | ||
368 | * wait_event_killable - sleep until a condition gets true | ||
369 | * @wq: the waitqueue to wait on | ||
370 | * @condition: a C expression for the event to wait for | ||
371 | * | ||
372 | * The process is put to sleep (TASK_KILLABLE) until the | ||
373 | * @condition evaluates to true or a signal is received. | ||
374 | * The @condition is checked each time the waitqueue @wq is woken up. | ||
375 | * | ||
376 | * wake_up() has to be called after changing any variable that could | ||
377 | * change the result of the wait condition. | ||
378 | * | ||
379 | * The function will return -ERESTARTSYS if it was interrupted by a | ||
380 | * signal and 0 if @condition evaluated to true. | ||
381 | */ | ||
382 | #define wait_event_killable(wq, condition) \ | ||
383 | ({ \ | ||
384 | int __ret = 0; \ | ||
385 | if (!(condition)) \ | ||
386 | __wait_event_killable(wq, condition, __ret); \ | ||
387 | __ret; \ | ||
388 | }) | ||
389 | |||
348 | /* | 390 | /* |
349 | * Must be called with the spinlock in the wait_queue_head_t held. | 391 | * Must be called with the spinlock in the wait_queue_head_t held. |
350 | */ | 392 | */ |