summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2017-11-02 11:27:44 -0400
committerDavid Howells <dhowells@redhat.com>2017-11-13 10:38:16 -0500
commit5e4def20381678ba3ce0a4e117f97e378ecd81bc (patch)
tree455d2c682d9430a06bf2eb91b12e7eba9b1a645e
parent81445e63e67a1e98b1c2575fa2b406d4289d2754 (diff)
Pass mode to wait_on_atomic_t() action funcs and provide default actions
Make wait_on_atomic_t() pass the TASK_* mode onto its action function as an extra argument and make it 'unsigned int throughout. Also, consolidate a bunch of identical action functions into a default function that can do the appropriate thing for the mode. Also, change the argument name in the bit_wait*() function declarations to reflect the fact that it's the mode and not the bit number. [Peter Z gives this a grudging ACK, but thinks that the whole atomic_t wait should be done differently, though he's not immediately sure as to how] Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Peter Zijlstra <peterz@infradead.org> cc: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/mips/kernel/traps.c14
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c10
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c8
-rw-r--r--fs/afs/rxrpc.c8
-rw-r--r--fs/btrfs/extent-tree.c27
-rw-r--r--fs/fscache/cookie.c2
-rw-r--r--fs/fscache/internal.h2
-rw-r--r--fs/fscache/main.c9
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/ocfs2/filecheck.c8
-rw-r--r--include/linux/wait_bit.h15
-rw-r--r--kernel/sched/wait_bit.c18
14 files changed, 37 insertions, 98 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5669d3b8bd38..5d19ed07e99d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1233,18 +1233,6 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1233 return NOTIFY_OK; 1233 return NOTIFY_OK;
1234} 1234}
1235 1235
1236static int wait_on_fp_mode_switch(atomic_t *p)
1237{
1238 /*
1239 * The FP mode for this task is currently being switched. That may
1240 * involve modifications to the format of this tasks FP context which
1241 * make it unsafe to proceed with execution for the moment. Instead,
1242 * schedule some other task.
1243 */
1244 schedule();
1245 return 0;
1246}
1247
1248static int enable_restore_fp_context(int msa) 1236static int enable_restore_fp_context(int msa)
1249{ 1237{
1250 int err, was_fpu_owner, prior_msa; 1238 int err, was_fpu_owner, prior_msa;
@@ -1254,7 +1242,7 @@ static int enable_restore_fp_context(int msa)
1254 * complete before proceeding. 1242 * complete before proceeding.
1255 */ 1243 */
1256 wait_on_atomic_t(&current->mm->context.fp_mode_switching, 1244 wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1257 wait_on_fp_mode_switch, TASK_KILLABLE); 1245 atomic_t_wait, TASK_KILLABLE);
1258 1246
1259 if (!used_math()) { 1247 if (!used_math()) {
1260 /* First time FP context user. */ 1248 /* First time FP context user. */
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index d34e5096887a..053044201e31 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -263,12 +263,6 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
263 return aux_dev; 263 return aux_dev;
264} 264}
265 265
266static int auxdev_wait_atomic_t(atomic_t *p)
267{
268 schedule();
269 return 0;
270}
271
272void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) 266void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
273{ 267{
274 struct drm_dp_aux_dev *aux_dev; 268 struct drm_dp_aux_dev *aux_dev;
@@ -283,7 +277,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
283 mutex_unlock(&aux_idr_mutex); 277 mutex_unlock(&aux_idr_mutex);
284 278
285 atomic_dec(&aux_dev->usecount); 279 atomic_dec(&aux_dev->usecount);
286 wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t, 280 wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
287 TASK_UNINTERRUPTIBLE); 281 TASK_UNINTERRUPTIBLE);
288 282
289 minor = aux_dev->index; 283 minor = aux_dev->index;
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 828904b7d468..54fc571b1102 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -271,13 +271,7 @@ struct igt_wakeup {
271 u32 seqno; 271 u32 seqno;
272}; 272};
273 273
274static int wait_atomic(atomic_t *p) 274static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
275{
276 schedule();
277 return 0;
278}
279
280static int wait_atomic_timeout(atomic_t *p)
281{ 275{
282 return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT; 276 return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
283} 277}
@@ -348,7 +342,7 @@ static void igt_wake_all_sync(atomic_t *ready,
348 atomic_set(ready, 0); 342 atomic_set(ready, 0);
349 wake_up_all(wq); 343 wake_up_all(wq);
350 344
351 wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE); 345 wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
352 atomic_set(ready, count); 346 atomic_set(ready, count);
353 atomic_set(done, count); 347 atomic_set(done, count);
354} 348}
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index c09490876516..e374c7d1a618 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -88,12 +88,6 @@ unlock:
88 return ret; 88 return ret;
89} 89}
90 90
91static int core_deinit_wait_atomic_t(atomic_t *p)
92{
93 schedule();
94 return 0;
95}
96
97int hfi_core_deinit(struct venus_core *core, bool blocking) 91int hfi_core_deinit(struct venus_core *core, bool blocking)
98{ 92{
99 int ret = 0, empty; 93 int ret = 0, empty;
@@ -112,7 +106,7 @@ int hfi_core_deinit(struct venus_core *core, bool blocking)
112 106
113 if (!empty) { 107 if (!empty) {
114 mutex_unlock(&core->lock); 108 mutex_unlock(&core->lock);
115 wait_on_atomic_t(&core->insts_count, core_deinit_wait_atomic_t, 109 wait_on_atomic_t(&core->insts_count, atomic_t_wait,
116 TASK_UNINTERRUPTIBLE); 110 TASK_UNINTERRUPTIBLE);
117 mutex_lock(&core->lock); 111 mutex_lock(&core->lock);
118 } 112 }
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index bb1e2caa1720..77f5420a1a24 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -41,12 +41,6 @@ static void afs_charge_preallocation(struct work_struct *);
41 41
42static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation); 42static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation);
43 43
44static int afs_wait_atomic_t(atomic_t *p)
45{
46 schedule();
47 return 0;
48}
49
50/* 44/*
51 * open an RxRPC socket and bind it to be a server for callback notifications 45 * open an RxRPC socket and bind it to be a server for callback notifications
52 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 46 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -121,7 +115,7 @@ void afs_close_socket(void)
121 } 115 }
122 116
123 _debug("outstanding %u", atomic_read(&afs_outstanding_calls)); 117 _debug("outstanding %u", atomic_read(&afs_outstanding_calls));
124 wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t, 118 wait_on_atomic_t(&afs_outstanding_calls, atomic_t_wait,
125 TASK_UNINTERRUPTIBLE); 119 TASK_UNINTERRUPTIBLE);
126 _debug("no outstanding calls"); 120 _debug("no outstanding calls");
127 121
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e2d7e86b51d1..24cefde30e30 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4016,16 +4016,9 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
4016 btrfs_put_block_group(bg); 4016 btrfs_put_block_group(bg);
4017} 4017}
4018 4018
4019static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
4020{
4021 schedule();
4022 return 0;
4023}
4024
4025void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) 4019void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
4026{ 4020{
4027 wait_on_atomic_t(&bg->nocow_writers, 4021 wait_on_atomic_t(&bg->nocow_writers, atomic_t_wait,
4028 btrfs_wait_nocow_writers_atomic_t,
4029 TASK_UNINTERRUPTIBLE); 4022 TASK_UNINTERRUPTIBLE);
4030} 4023}
4031 4024
@@ -6595,12 +6588,6 @@ void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6595 btrfs_put_block_group(bg); 6588 btrfs_put_block_group(bg);
6596} 6589}
6597 6590
6598static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
6599{
6600 schedule();
6601 return 0;
6602}
6603
6604void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) 6591void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6605{ 6592{
6606 struct btrfs_space_info *space_info = bg->space_info; 6593 struct btrfs_space_info *space_info = bg->space_info;
@@ -6623,8 +6610,7 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6623 down_write(&space_info->groups_sem); 6610 down_write(&space_info->groups_sem);
6624 up_write(&space_info->groups_sem); 6611 up_write(&space_info->groups_sem);
6625 6612
6626 wait_on_atomic_t(&bg->reservations, 6613 wait_on_atomic_t(&bg->reservations, atomic_t_wait,
6627 btrfs_wait_bg_reservations_atomic_t,
6628 TASK_UNINTERRUPTIBLE); 6614 TASK_UNINTERRUPTIBLE);
6629} 6615}
6630 6616
@@ -11106,12 +11092,6 @@ int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
11106 return 1; 11092 return 1;
11107} 11093}
11108 11094
11109static int wait_snapshotting_atomic_t(atomic_t *a)
11110{
11111 schedule();
11112 return 0;
11113}
11114
11115void btrfs_wait_for_snapshot_creation(struct btrfs_root *root) 11095void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11116{ 11096{
11117 while (true) { 11097 while (true) {
@@ -11120,8 +11100,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11120 ret = btrfs_start_write_no_snapshotting(root); 11100 ret = btrfs_start_write_no_snapshotting(root);
11121 if (ret) 11101 if (ret)
11122 break; 11102 break;
11123 wait_on_atomic_t(&root->will_be_snapshotted, 11103 wait_on_atomic_t(&root->will_be_snapshotted, atomic_t_wait,
11124 wait_snapshotting_atomic_t,
11125 TASK_UNINTERRUPTIBLE); 11104 TASK_UNINTERRUPTIBLE);
11126 } 11105 }
11127} 11106}
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 40d61077bead..ff84258132bb 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -558,7 +558,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
558 * have completed. 558 * have completed.
559 */ 559 */
560 if (!atomic_dec_and_test(&cookie->n_active)) 560 if (!atomic_dec_and_test(&cookie->n_active))
561 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, 561 wait_on_atomic_t(&cookie->n_active, atomic_t_wait,
562 TASK_UNINTERRUPTIBLE); 562 TASK_UNINTERRUPTIBLE);
563 563
564 /* Make sure any pending writes are cancelled. */ 564 /* Make sure any pending writes are cancelled. */
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 97ec45110957..0ff4b49a0037 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -97,8 +97,6 @@ static inline bool fscache_object_congested(void)
97 return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); 97 return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
98} 98}
99 99
100extern int fscache_wait_atomic_t(atomic_t *);
101
102/* 100/*
103 * object.c 101 * object.c
104 */ 102 */
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index b39d487ccfb0..249968dcbf5c 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -195,12 +195,3 @@ static void __exit fscache_exit(void)
195} 195}
196 196
197module_exit(fscache_exit); 197module_exit(fscache_exit);
198
199/*
200 * wait_on_atomic_t() sleep function for uninterruptible waiting
201 */
202int fscache_wait_atomic_t(atomic_t *p)
203{
204 schedule();
205 return 0;
206}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 134d9f560240..1629056aa2c9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -85,9 +85,9 @@ int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
85} 85}
86EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 86EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
87 87
88int nfs_wait_atomic_killable(atomic_t *p) 88int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode)
89{ 89{
90 return nfs_wait_killable(TASK_KILLABLE); 90 return nfs_wait_killable(mode);
91} 91}
92 92
93/** 93/**
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index f9a4a5524bd5..5ab17fd4700a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -388,7 +388,7 @@ extern void nfs_evict_inode(struct inode *);
388void nfs_zap_acl_cache(struct inode *inode); 388void nfs_zap_acl_cache(struct inode *inode);
389extern bool nfs_check_cache_invalid(struct inode *, unsigned long); 389extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
390extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); 390extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
391extern int nfs_wait_atomic_killable(atomic_t *p); 391extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
392 392
393/* super.c */ 393/* super.c */
394extern const struct super_operations nfs_sops; 394extern const struct super_operations nfs_sops;
diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
index 2cabbcf2f28e..e87279e49ba3 100644
--- a/fs/ocfs2/filecheck.c
+++ b/fs/ocfs2/filecheck.c
@@ -129,19 +129,13 @@ static struct kobj_attribute ocfs2_attr_filecheck_set =
129 ocfs2_filecheck_show, 129 ocfs2_filecheck_show,
130 ocfs2_filecheck_store); 130 ocfs2_filecheck_store);
131 131
132static int ocfs2_filecheck_sysfs_wait(atomic_t *p)
133{
134 schedule();
135 return 0;
136}
137
138static void 132static void
139ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry) 133ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry)
140{ 134{
141 struct ocfs2_filecheck_entry *p; 135 struct ocfs2_filecheck_entry *p;
142 136
143 if (!atomic_dec_and_test(&entry->fs_count)) 137 if (!atomic_dec_and_test(&entry->fs_count))
144 wait_on_atomic_t(&entry->fs_count, ocfs2_filecheck_sysfs_wait, 138 wait_on_atomic_t(&entry->fs_count, atomic_t_wait,
145 TASK_UNINTERRUPTIBLE); 139 TASK_UNINTERRUPTIBLE);
146 140
147 spin_lock(&entry->fs_fcheck->fc_lock); 141 spin_lock(&entry->fs_fcheck->fc_lock);
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index af0d495430d7..61b39eaf7cad 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -26,6 +26,8 @@ struct wait_bit_queue_entry {
26 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } 26 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
27 27
28typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); 28typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
29typedef int wait_atomic_t_action_f(atomic_t *counter, unsigned int mode);
30
29void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); 31void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
30int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); 32int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
31int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); 33int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
@@ -34,7 +36,7 @@ void wake_up_atomic_t(atomic_t *p);
34int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); 36int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
35int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); 37int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
36int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); 38int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
37int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode); 39int out_of_line_wait_on_atomic_t(atomic_t *p, wait_atomic_t_action_f action, unsigned int mode);
38struct wait_queue_head *bit_waitqueue(void *word, int bit); 40struct wait_queue_head *bit_waitqueue(void *word, int bit);
39extern void __init wait_bit_init(void); 41extern void __init wait_bit_init(void);
40 42
@@ -51,10 +53,11 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
51 }, \ 53 }, \
52 } 54 }
53 55
54extern int bit_wait(struct wait_bit_key *key, int bit); 56extern int bit_wait(struct wait_bit_key *key, int mode);
55extern int bit_wait_io(struct wait_bit_key *key, int bit); 57extern int bit_wait_io(struct wait_bit_key *key, int mode);
56extern int bit_wait_timeout(struct wait_bit_key *key, int bit); 58extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
57extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit); 59extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
60extern int atomic_t_wait(atomic_t *counter, unsigned int mode);
58 61
59/** 62/**
60 * wait_on_bit - wait for a bit to be cleared 63 * wait_on_bit - wait for a bit to be cleared
@@ -251,7 +254,7 @@ wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
251 * outside of the target 'word'. 254 * outside of the target 'word'.
252 */ 255 */
253static inline 256static inline
254int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) 257int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode)
255{ 258{
256 might_sleep(); 259 might_sleep();
257 if (atomic_read(val) == 0) 260 if (atomic_read(val) == 0)
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
index f8159698aa4d..84cb3acd9260 100644
--- a/kernel/sched/wait_bit.c
+++ b/kernel/sched/wait_bit.c
@@ -183,7 +183,7 @@ static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mo
183 */ 183 */
184static __sched 184static __sched
185int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, 185int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
186 int (*action)(atomic_t *), unsigned mode) 186 wait_atomic_t_action_f action, unsigned int mode)
187{ 187{
188 atomic_t *val; 188 atomic_t *val;
189 int ret = 0; 189 int ret = 0;
@@ -193,7 +193,7 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en
193 val = wbq_entry->key.flags; 193 val = wbq_entry->key.flags;
194 if (atomic_read(val) == 0) 194 if (atomic_read(val) == 0)
195 break; 195 break;
196 ret = (*action)(val); 196 ret = (*action)(val, mode);
197 } while (!ret && atomic_read(val) != 0); 197 } while (!ret && atomic_read(val) != 0);
198 finish_wait(wq_head, &wbq_entry->wq_entry); 198 finish_wait(wq_head, &wbq_entry->wq_entry);
199 return ret; 199 return ret;
@@ -210,8 +210,9 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en
210 }, \ 210 }, \
211 } 211 }
212 212
213__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), 213__sched int out_of_line_wait_on_atomic_t(atomic_t *p,
214 unsigned mode) 214 wait_atomic_t_action_f action,
215 unsigned int mode)
215{ 216{
216 struct wait_queue_head *wq_head = atomic_t_waitqueue(p); 217 struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
217 DEFINE_WAIT_ATOMIC_T(wq_entry, p); 218 DEFINE_WAIT_ATOMIC_T(wq_entry, p);
@@ -220,6 +221,15 @@ __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
220} 221}
221EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); 222EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
222 223
224__sched int atomic_t_wait(atomic_t *counter, unsigned int mode)
225{
226 schedule();
227 if (signal_pending_state(mode, current))
228 return -EINTR;
229 return 0;
230}
231EXPORT_SYMBOL(atomic_t_wait);
232
223/** 233/**
224 * wake_up_atomic_t - Wake up a waiter on a atomic_t 234 * wake_up_atomic_t - Wake up a waiter on a atomic_t
225 * @p: The atomic_t being waited on, a kernel virtual address 235 * @p: The atomic_t being waited on, a kernel virtual address