diff options
38 files changed, 195 insertions, 275 deletions
diff --git a/Documentation/filesystems/caching/operations.txt b/Documentation/filesystems/caching/operations.txt index bee2a5f93d60..a1c052cbba35 100644 --- a/Documentation/filesystems/caching/operations.txt +++ b/Documentation/filesystems/caching/operations.txt | |||
@@ -90,7 +90,7 @@ operations: | |||
90 | to be cleared before proceeding: | 90 | to be cleared before proceeding: |
91 | 91 | ||
92 | wait_on_bit(&op->flags, FSCACHE_OP_WAITING, | 92 | wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
93 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | 93 | TASK_UNINTERRUPTIBLE); |
94 | 94 | ||
95 | 95 | ||
96 | (2) The operation may be fast asynchronous (FSCACHE_OP_FAST), in which case it | 96 | (2) The operation may be fast asynchronous (FSCACHE_OP_FAST), in which case it |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 4e84095833db..96c92b75452f 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -615,16 +615,6 @@ static void write_endio(struct bio *bio, int error) | |||
615 | } | 615 | } |
616 | 616 | ||
617 | /* | 617 | /* |
618 | * This function is called when wait_on_bit is actually waiting. | ||
619 | */ | ||
620 | static int do_io_schedule(void *word) | ||
621 | { | ||
622 | io_schedule(); | ||
623 | |||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * Initiate a write on a dirty buffer, but don't wait for it. | 618 | * Initiate a write on a dirty buffer, but don't wait for it. |
629 | * | 619 | * |
630 | * - If the buffer is not dirty, exit. | 620 | * - If the buffer is not dirty, exit. |
@@ -640,8 +630,7 @@ static void __write_dirty_buffer(struct dm_buffer *b, | |||
640 | return; | 630 | return; |
641 | 631 | ||
642 | clear_bit(B_DIRTY, &b->state); | 632 | clear_bit(B_DIRTY, &b->state); |
643 | wait_on_bit_lock(&b->state, B_WRITING, | 633 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
644 | do_io_schedule, TASK_UNINTERRUPTIBLE); | ||
645 | 634 | ||
646 | if (!write_list) | 635 | if (!write_list) |
647 | submit_io(b, WRITE, b->block, write_endio); | 636 | submit_io(b, WRITE, b->block, write_endio); |
@@ -675,9 +664,9 @@ static void __make_buffer_clean(struct dm_buffer *b) | |||
675 | if (!b->state) /* fast case */ | 664 | if (!b->state) /* fast case */ |
676 | return; | 665 | return; |
677 | 666 | ||
678 | wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); | 667 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
679 | __write_dirty_buffer(b, NULL); | 668 | __write_dirty_buffer(b, NULL); |
680 | wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE); | 669 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
681 | } | 670 | } |
682 | 671 | ||
683 | /* | 672 | /* |
@@ -1030,7 +1019,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, | |||
1030 | if (need_submit) | 1019 | if (need_submit) |
1031 | submit_io(b, READ, b->block, read_endio); | 1020 | submit_io(b, READ, b->block, read_endio); |
1032 | 1021 | ||
1033 | wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); | 1022 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1034 | 1023 | ||
1035 | if (b->read_error) { | 1024 | if (b->read_error) { |
1036 | int error = b->read_error; | 1025 | int error = b->read_error; |
@@ -1209,15 +1198,13 @@ again: | |||
1209 | dropped_lock = 1; | 1198 | dropped_lock = 1; |
1210 | b->hold_count++; | 1199 | b->hold_count++; |
1211 | dm_bufio_unlock(c); | 1200 | dm_bufio_unlock(c); |
1212 | wait_on_bit(&b->state, B_WRITING, | 1201 | wait_on_bit_io(&b->state, B_WRITING, |
1213 | do_io_schedule, | 1202 | TASK_UNINTERRUPTIBLE); |
1214 | TASK_UNINTERRUPTIBLE); | ||
1215 | dm_bufio_lock(c); | 1203 | dm_bufio_lock(c); |
1216 | b->hold_count--; | 1204 | b->hold_count--; |
1217 | } else | 1205 | } else |
1218 | wait_on_bit(&b->state, B_WRITING, | 1206 | wait_on_bit_io(&b->state, B_WRITING, |
1219 | do_io_schedule, | 1207 | TASK_UNINTERRUPTIBLE); |
1220 | TASK_UNINTERRUPTIBLE); | ||
1221 | } | 1208 | } |
1222 | 1209 | ||
1223 | if (!test_bit(B_DIRTY, &b->state) && | 1210 | if (!test_bit(B_DIRTY, &b->state) && |
@@ -1321,15 +1308,15 @@ retry: | |||
1321 | 1308 | ||
1322 | __write_dirty_buffer(b, NULL); | 1309 | __write_dirty_buffer(b, NULL); |
1323 | if (b->hold_count == 1) { | 1310 | if (b->hold_count == 1) { |
1324 | wait_on_bit(&b->state, B_WRITING, | 1311 | wait_on_bit_io(&b->state, B_WRITING, |
1325 | do_io_schedule, TASK_UNINTERRUPTIBLE); | 1312 | TASK_UNINTERRUPTIBLE); |
1326 | set_bit(B_DIRTY, &b->state); | 1313 | set_bit(B_DIRTY, &b->state); |
1327 | __unlink_buffer(b); | 1314 | __unlink_buffer(b); |
1328 | __link_buffer(b, new_block, LIST_DIRTY); | 1315 | __link_buffer(b, new_block, LIST_DIRTY); |
1329 | } else { | 1316 | } else { |
1330 | sector_t old_block; | 1317 | sector_t old_block; |
1331 | wait_on_bit_lock(&b->state, B_WRITING, | 1318 | wait_on_bit_lock_io(&b->state, B_WRITING, |
1332 | do_io_schedule, TASK_UNINTERRUPTIBLE); | 1319 | TASK_UNINTERRUPTIBLE); |
1333 | /* | 1320 | /* |
1334 | * Relink buffer to "new_block" so that write_callback | 1321 | * Relink buffer to "new_block" so that write_callback |
1335 | * sees "new_block" as a block number. | 1322 | * sees "new_block" as a block number. |
@@ -1341,8 +1328,8 @@ retry: | |||
1341 | __unlink_buffer(b); | 1328 | __unlink_buffer(b); |
1342 | __link_buffer(b, new_block, b->list_mode); | 1329 | __link_buffer(b, new_block, b->list_mode); |
1343 | submit_io(b, WRITE, new_block, write_endio); | 1330 | submit_io(b, WRITE, new_block, write_endio); |
1344 | wait_on_bit(&b->state, B_WRITING, | 1331 | wait_on_bit_io(&b->state, B_WRITING, |
1345 | do_io_schedule, TASK_UNINTERRUPTIBLE); | 1332 | TASK_UNINTERRUPTIBLE); |
1346 | __unlink_buffer(b); | 1333 | __unlink_buffer(b); |
1347 | __link_buffer(b, old_block, b->list_mode); | 1334 | __link_buffer(b, old_block, b->list_mode); |
1348 | } | 1335 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5bd2290cfb1e..864b03f47727 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1032,21 +1032,13 @@ static void start_merge(struct dm_snapshot *s) | |||
1032 | snapshot_merge_next_chunks(s); | 1032 | snapshot_merge_next_chunks(s); |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | static int wait_schedule(void *ptr) | ||
1036 | { | ||
1037 | schedule(); | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | /* | 1035 | /* |
1043 | * Stop the merging process and wait until it finishes. | 1036 | * Stop the merging process and wait until it finishes. |
1044 | */ | 1037 | */ |
1045 | static void stop_merge(struct dm_snapshot *s) | 1038 | static void stop_merge(struct dm_snapshot *s) |
1046 | { | 1039 | { |
1047 | set_bit(SHUTDOWN_MERGE, &s->state_bits); | 1040 | set_bit(SHUTDOWN_MERGE, &s->state_bits); |
1048 | wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, | 1041 | wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); |
1049 | TASK_UNINTERRUPTIBLE); | ||
1050 | clear_bit(SHUTDOWN_MERGE, &s->state_bits); | 1042 | clear_bit(SHUTDOWN_MERGE, &s->state_bits); |
1051 | } | 1043 | } |
1052 | 1044 | ||
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c index e35580618936..f296394bb7c5 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c | |||
@@ -253,13 +253,6 @@ static int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap) | |||
253 | return usb_urb_exitv2(&adap->stream); | 253 | return usb_urb_exitv2(&adap->stream); |
254 | } | 254 | } |
255 | 255 | ||
256 | static int wait_schedule(void *ptr) | ||
257 | { | ||
258 | schedule(); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed) | 256 | static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed) |
264 | { | 257 | { |
265 | struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv; | 258 | struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv; |
@@ -273,8 +266,7 @@ static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed) | |||
273 | dvbdmxfeed->pid, dvbdmxfeed->index); | 266 | dvbdmxfeed->pid, dvbdmxfeed->index); |
274 | 267 | ||
275 | /* wait init is done */ | 268 | /* wait init is done */ |
276 | wait_on_bit(&adap->state_bits, ADAP_INIT, wait_schedule, | 269 | wait_on_bit(&adap->state_bits, ADAP_INIT, TASK_UNINTERRUPTIBLE); |
277 | TASK_UNINTERRUPTIBLE); | ||
278 | 270 | ||
279 | if (adap->active_fe == -1) | 271 | if (adap->active_fe == -1) |
280 | return -EINVAL; | 272 | return -EINVAL; |
@@ -568,7 +560,7 @@ static int dvb_usb_fe_sleep(struct dvb_frontend *fe) | |||
568 | 560 | ||
569 | if (!adap->suspend_resume_active) { | 561 | if (!adap->suspend_resume_active) { |
570 | set_bit(ADAP_SLEEP, &adap->state_bits); | 562 | set_bit(ADAP_SLEEP, &adap->state_bits); |
571 | wait_on_bit(&adap->state_bits, ADAP_STREAMING, wait_schedule, | 563 | wait_on_bit(&adap->state_bits, ADAP_STREAMING, |
572 | TASK_UNINTERRUPTIBLE); | 564 | TASK_UNINTERRUPTIBLE); |
573 | } | 565 | } |
574 | 566 | ||
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a389820d158b..3e11aab9f391 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -3437,16 +3437,10 @@ done_unlocked: | |||
3437 | return 0; | 3437 | return 0; |
3438 | } | 3438 | } |
3439 | 3439 | ||
3440 | static int eb_wait(void *word) | ||
3441 | { | ||
3442 | io_schedule(); | ||
3443 | return 0; | ||
3444 | } | ||
3445 | |||
3446 | void wait_on_extent_buffer_writeback(struct extent_buffer *eb) | 3440 | void wait_on_extent_buffer_writeback(struct extent_buffer *eb) |
3447 | { | 3441 | { |
3448 | wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait, | 3442 | wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, |
3449 | TASK_UNINTERRUPTIBLE); | 3443 | TASK_UNINTERRUPTIBLE); |
3450 | } | 3444 | } |
3451 | 3445 | ||
3452 | static noinline_for_stack int | 3446 | static noinline_for_stack int |
diff --git a/fs/buffer.c b/fs/buffer.c index eba6e4f621ce..8f05111bbb8b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -61,16 +61,9 @@ inline void touch_buffer(struct buffer_head *bh) | |||
61 | } | 61 | } |
62 | EXPORT_SYMBOL(touch_buffer); | 62 | EXPORT_SYMBOL(touch_buffer); |
63 | 63 | ||
64 | static int sleep_on_buffer(void *word) | ||
65 | { | ||
66 | io_schedule(); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | void __lock_buffer(struct buffer_head *bh) | 64 | void __lock_buffer(struct buffer_head *bh) |
71 | { | 65 | { |
72 | wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, | 66 | wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); |
73 | TASK_UNINTERRUPTIBLE); | ||
74 | } | 67 | } |
75 | EXPORT_SYMBOL(__lock_buffer); | 68 | EXPORT_SYMBOL(__lock_buffer); |
76 | 69 | ||
@@ -123,7 +116,7 @@ EXPORT_SYMBOL(buffer_check_dirty_writeback); | |||
123 | */ | 116 | */ |
124 | void __wait_on_buffer(struct buffer_head * bh) | 117 | void __wait_on_buffer(struct buffer_head * bh) |
125 | { | 118 | { |
126 | wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); | 119 | wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); |
127 | } | 120 | } |
128 | EXPORT_SYMBOL(__wait_on_buffer); | 121 | EXPORT_SYMBOL(__wait_on_buffer); |
129 | 122 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 20d75b8ddb26..b98366f21f9e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -3934,13 +3934,6 @@ cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) | |||
3934 | return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); | 3934 | return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); |
3935 | } | 3935 | } |
3936 | 3936 | ||
3937 | static int | ||
3938 | cifs_sb_tcon_pending_wait(void *unused) | ||
3939 | { | ||
3940 | schedule(); | ||
3941 | return signal_pending(current) ? -ERESTARTSYS : 0; | ||
3942 | } | ||
3943 | |||
3944 | /* find and return a tlink with given uid */ | 3937 | /* find and return a tlink with given uid */ |
3945 | static struct tcon_link * | 3938 | static struct tcon_link * |
3946 | tlink_rb_search(struct rb_root *root, kuid_t uid) | 3939 | tlink_rb_search(struct rb_root *root, kuid_t uid) |
@@ -4039,11 +4032,10 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb) | |||
4039 | } else { | 4032 | } else { |
4040 | wait_for_construction: | 4033 | wait_for_construction: |
4041 | ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, | 4034 | ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, |
4042 | cifs_sb_tcon_pending_wait, | ||
4043 | TASK_INTERRUPTIBLE); | 4035 | TASK_INTERRUPTIBLE); |
4044 | if (ret) { | 4036 | if (ret) { |
4045 | cifs_put_tlink(tlink); | 4037 | cifs_put_tlink(tlink); |
4046 | return ERR_PTR(ret); | 4038 | return ERR_PTR(-ERESTARTSYS); |
4047 | } | 4039 | } |
4048 | 4040 | ||
4049 | /* if it's good, return it */ | 4041 | /* if it's good, return it */ |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e90a1e9aa627..b88b1ade4d3d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3618,13 +3618,6 @@ static int cifs_launder_page(struct page *page) | |||
3618 | return rc; | 3618 | return rc; |
3619 | } | 3619 | } |
3620 | 3620 | ||
3621 | static int | ||
3622 | cifs_pending_writers_wait(void *unused) | ||
3623 | { | ||
3624 | schedule(); | ||
3625 | return 0; | ||
3626 | } | ||
3627 | |||
3628 | void cifs_oplock_break(struct work_struct *work) | 3621 | void cifs_oplock_break(struct work_struct *work) |
3629 | { | 3622 | { |
3630 | struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, | 3623 | struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, |
@@ -3636,7 +3629,7 @@ void cifs_oplock_break(struct work_struct *work) | |||
3636 | int rc = 0; | 3629 | int rc = 0; |
3637 | 3630 | ||
3638 | wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, | 3631 | wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, |
3639 | cifs_pending_writers_wait, TASK_UNINTERRUPTIBLE); | 3632 | TASK_UNINTERRUPTIBLE); |
3640 | 3633 | ||
3641 | server->ops->downgrade_oplock(server, cinode, | 3634 | server->ops->downgrade_oplock(server, cinode, |
3642 | test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); | 3635 | test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index a174605f6afa..213c4580b4e3 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1794,8 +1794,8 @@ cifs_revalidate_mapping(struct inode *inode) | |||
1794 | int rc; | 1794 | int rc; |
1795 | unsigned long *flags = &CIFS_I(inode)->flags; | 1795 | unsigned long *flags = &CIFS_I(inode)->flags; |
1796 | 1796 | ||
1797 | rc = wait_on_bit_lock(flags, CIFS_INO_LOCK, cifs_wait_bit_killable, | 1797 | rc = wait_on_bit_lock_action(flags, CIFS_INO_LOCK, cifs_wait_bit_killable, |
1798 | TASK_KILLABLE); | 1798 | TASK_KILLABLE); |
1799 | if (rc) | 1799 | if (rc) |
1800 | return rc; | 1800 | return rc; |
1801 | 1801 | ||
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 3b0c62e622da..6bf55d0ed494 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -582,7 +582,7 @@ int cifs_get_writer(struct cifsInodeInfo *cinode) | |||
582 | 582 | ||
583 | start: | 583 | start: |
584 | rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, | 584 | rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, |
585 | cifs_oplock_break_wait, TASK_KILLABLE); | 585 | TASK_KILLABLE); |
586 | if (rc) | 586 | if (rc) |
587 | return rc; | 587 | return rc; |
588 | 588 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index be568b7311d6..ef9bef118342 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -342,7 +342,8 @@ static void __inode_wait_for_writeback(struct inode *inode) | |||
342 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); | 342 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); |
343 | while (inode->i_state & I_SYNC) { | 343 | while (inode->i_state & I_SYNC) { |
344 | spin_unlock(&inode->i_lock); | 344 | spin_unlock(&inode->i_lock); |
345 | __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); | 345 | __wait_on_bit(wqh, &wq, bit_wait, |
346 | TASK_UNINTERRUPTIBLE); | ||
346 | spin_lock(&inode->i_lock); | 347 | spin_lock(&inode->i_lock); |
347 | } | 348 | } |
348 | } | 349 | } |
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index aec01be91b0a..89acec742e0b 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c | |||
@@ -160,7 +160,7 @@ void __fscache_enable_cookie(struct fscache_cookie *cookie, | |||
160 | _enter("%p", cookie); | 160 | _enter("%p", cookie); |
161 | 161 | ||
162 | wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK, | 162 | wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK, |
163 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | 163 | TASK_UNINTERRUPTIBLE); |
164 | 164 | ||
165 | if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags)) | 165 | if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags)) |
166 | goto out_unlock; | 166 | goto out_unlock; |
@@ -255,7 +255,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) | |||
255 | if (!fscache_defer_lookup) { | 255 | if (!fscache_defer_lookup) { |
256 | _debug("non-deferred lookup %p", &cookie->flags); | 256 | _debug("non-deferred lookup %p", &cookie->flags); |
257 | wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, | 257 | wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, |
258 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | 258 | TASK_UNINTERRUPTIBLE); |
259 | _debug("complete"); | 259 | _debug("complete"); |
260 | if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags)) | 260 | if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags)) |
261 | goto unavailable; | 261 | goto unavailable; |
@@ -463,7 +463,6 @@ void __fscache_wait_on_invalidate(struct fscache_cookie *cookie) | |||
463 | _enter("%p", cookie); | 463 | _enter("%p", cookie); |
464 | 464 | ||
465 | wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING, | 465 | wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING, |
466 | fscache_wait_bit_interruptible, | ||
467 | TASK_UNINTERRUPTIBLE); | 466 | TASK_UNINTERRUPTIBLE); |
468 | 467 | ||
469 | _leave(""); | 468 | _leave(""); |
@@ -525,7 +524,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | |||
525 | } | 524 | } |
526 | 525 | ||
527 | wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK, | 526 | wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK, |
528 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | 527 | TASK_UNINTERRUPTIBLE); |
529 | if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags)) | 528 | if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags)) |
530 | goto out_unlock_enable; | 529 | goto out_unlock_enable; |
531 | 530 | ||
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index bc6c08fcfddd..7872a62ef30c 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h | |||
@@ -97,8 +97,6 @@ static inline bool fscache_object_congested(void) | |||
97 | return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); | 97 | return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); |
98 | } | 98 | } |
99 | 99 | ||
100 | extern int fscache_wait_bit(void *); | ||
101 | extern int fscache_wait_bit_interruptible(void *); | ||
102 | extern int fscache_wait_atomic_t(atomic_t *); | 100 | extern int fscache_wait_atomic_t(atomic_t *); |
103 | 101 | ||
104 | /* | 102 | /* |
diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 63f868e869b9..a31b83c5cbd9 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c | |||
@@ -197,24 +197,6 @@ static void __exit fscache_exit(void) | |||
197 | module_exit(fscache_exit); | 197 | module_exit(fscache_exit); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * wait_on_bit() sleep function for uninterruptible waiting | ||
201 | */ | ||
202 | int fscache_wait_bit(void *flags) | ||
203 | { | ||
204 | schedule(); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * wait_on_bit() sleep function for interruptible waiting | ||
210 | */ | ||
211 | int fscache_wait_bit_interruptible(void *flags) | ||
212 | { | ||
213 | schedule(); | ||
214 | return signal_pending(current); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * wait_on_atomic_t() sleep function for uninterruptible waiting | 200 | * wait_on_atomic_t() sleep function for uninterruptible waiting |
219 | */ | 201 | */ |
220 | int fscache_wait_atomic_t(atomic_t *p) | 202 | int fscache_wait_atomic_t(atomic_t *p) |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index ed70714503fa..85332b9d19d1 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -298,7 +298,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) | |||
298 | 298 | ||
299 | jif = jiffies; | 299 | jif = jiffies; |
300 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, | 300 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, |
301 | fscache_wait_bit_interruptible, | ||
302 | TASK_INTERRUPTIBLE) != 0) { | 301 | TASK_INTERRUPTIBLE) != 0) { |
303 | fscache_stat(&fscache_n_retrievals_intr); | 302 | fscache_stat(&fscache_n_retrievals_intr); |
304 | _leave(" = -ERESTARTSYS"); | 303 | _leave(" = -ERESTARTSYS"); |
@@ -342,7 +341,6 @@ int fscache_wait_for_operation_activation(struct fscache_object *object, | |||
342 | if (stat_op_waits) | 341 | if (stat_op_waits) |
343 | fscache_stat(stat_op_waits); | 342 | fscache_stat(stat_op_waits); |
344 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, | 343 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
345 | fscache_wait_bit_interruptible, | ||
346 | TASK_INTERRUPTIBLE) != 0) { | 344 | TASK_INTERRUPTIBLE) != 0) { |
347 | ret = fscache_cancel_op(op, do_cancel); | 345 | ret = fscache_cancel_op(op, do_cancel); |
348 | if (ret == 0) | 346 | if (ret == 0) |
@@ -351,7 +349,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object, | |||
351 | /* it's been removed from the pending queue by another party, | 349 | /* it's been removed from the pending queue by another party, |
352 | * so we should get to run shortly */ | 350 | * so we should get to run shortly */ |
353 | wait_on_bit(&op->flags, FSCACHE_OP_WAITING, | 351 | wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
354 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | 352 | TASK_UNINTERRUPTIBLE); |
355 | } | 353 | } |
356 | _debug("<<< GO"); | 354 | _debug("<<< GO"); |
357 | 355 | ||
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index c355f7320e44..770e16716d81 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -856,27 +856,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh) | |||
856 | } | 856 | } |
857 | 857 | ||
858 | /** | 858 | /** |
859 | * gfs2_glock_holder_wait | ||
860 | * @word: unused | ||
861 | * | ||
862 | * This function and gfs2_glock_demote_wait both show up in the WCHAN | ||
863 | * field. Thus I've separated these otherwise identical functions in | ||
864 | * order to be more informative to the user. | ||
865 | */ | ||
866 | |||
867 | static int gfs2_glock_holder_wait(void *word) | ||
868 | { | ||
869 | schedule(); | ||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | static int gfs2_glock_demote_wait(void *word) | ||
874 | { | ||
875 | schedule(); | ||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | /** | ||
880 | * gfs2_glock_wait - wait on a glock acquisition | 859 | * gfs2_glock_wait - wait on a glock acquisition |
881 | * @gh: the glock holder | 860 | * @gh: the glock holder |
882 | * | 861 | * |
@@ -888,7 +867,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh) | |||
888 | unsigned long time1 = jiffies; | 867 | unsigned long time1 = jiffies; |
889 | 868 | ||
890 | might_sleep(); | 869 | might_sleep(); |
891 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE); | 870 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); |
892 | if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */ | 871 | if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */ |
893 | /* Lengthen the minimum hold time. */ | 872 | /* Lengthen the minimum hold time. */ |
894 | gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + | 873 | gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + |
@@ -1128,7 +1107,7 @@ void gfs2_glock_dq_wait(struct gfs2_holder *gh) | |||
1128 | struct gfs2_glock *gl = gh->gh_gl; | 1107 | struct gfs2_glock *gl = gh->gh_gl; |
1129 | gfs2_glock_dq(gh); | 1108 | gfs2_glock_dq(gh); |
1130 | might_sleep(); | 1109 | might_sleep(); |
1131 | wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE); | 1110 | wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); |
1132 | } | 1111 | } |
1133 | 1112 | ||
1134 | /** | 1113 | /** |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 91f274de1246..992ca5b1e045 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -936,12 +936,6 @@ fail: | |||
936 | return error; | 936 | return error; |
937 | } | 937 | } |
938 | 938 | ||
939 | static int dlm_recovery_wait(void *word) | ||
940 | { | ||
941 | schedule(); | ||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | static int control_first_done(struct gfs2_sbd *sdp) | 939 | static int control_first_done(struct gfs2_sbd *sdp) |
946 | { | 940 | { |
947 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 941 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
@@ -976,7 +970,7 @@ restart: | |||
976 | fs_info(sdp, "control_first_done wait gen %u\n", start_gen); | 970 | fs_info(sdp, "control_first_done wait gen %u\n", start_gen); |
977 | 971 | ||
978 | wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, | 972 | wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, |
979 | dlm_recovery_wait, TASK_UNINTERRUPTIBLE); | 973 | TASK_UNINTERRUPTIBLE); |
980 | goto restart; | 974 | goto restart; |
981 | } | 975 | } |
982 | 976 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index bc564c0d6d16..d3eae244076e 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -1024,20 +1024,13 @@ void gfs2_lm_unmount(struct gfs2_sbd *sdp) | |||
1024 | lm->lm_unmount(sdp); | 1024 | lm->lm_unmount(sdp); |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | static int gfs2_journalid_wait(void *word) | ||
1028 | { | ||
1029 | if (signal_pending(current)) | ||
1030 | return -EINTR; | ||
1031 | schedule(); | ||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | static int wait_on_journal(struct gfs2_sbd *sdp) | 1027 | static int wait_on_journal(struct gfs2_sbd *sdp) |
1036 | { | 1028 | { |
1037 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | 1029 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) |
1038 | return 0; | 1030 | return 0; |
1039 | 1031 | ||
1040 | return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE); | 1032 | return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE) |
1033 | ? -EINTR : 0; | ||
1041 | } | 1034 | } |
1042 | 1035 | ||
1043 | void gfs2_online_uevent(struct gfs2_sbd *sdp) | 1036 | void gfs2_online_uevent(struct gfs2_sbd *sdp) |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 94555d4c5698..573bd3b758fa 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
@@ -591,12 +591,6 @@ done: | |||
591 | wake_up_bit(&jd->jd_flags, JDF_RECOVERY); | 591 | wake_up_bit(&jd->jd_flags, JDF_RECOVERY); |
592 | } | 592 | } |
593 | 593 | ||
594 | static int gfs2_recovery_wait(void *word) | ||
595 | { | ||
596 | schedule(); | ||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) | 594 | int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) |
601 | { | 595 | { |
602 | int rv; | 596 | int rv; |
@@ -609,7 +603,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) | |||
609 | BUG_ON(!rv); | 603 | BUG_ON(!rv); |
610 | 604 | ||
611 | if (wait) | 605 | if (wait) |
612 | wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, | 606 | wait_on_bit(&jd->jd_flags, JDF_RECOVERY, |
613 | TASK_UNINTERRUPTIBLE); | 607 | TASK_UNINTERRUPTIBLE); |
614 | 608 | ||
615 | return wait ? jd->jd_recover_error : 0; | 609 | return wait ? jd->jd_recover_error : 0; |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 1319b5c4ec68..2607ff13d486 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -864,12 +864,6 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) | |||
864 | return error; | 864 | return error; |
865 | } | 865 | } |
866 | 866 | ||
867 | static int gfs2_umount_recovery_wait(void *word) | ||
868 | { | ||
869 | schedule(); | ||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | /** | 867 | /** |
874 | * gfs2_put_super - Unmount the filesystem | 868 | * gfs2_put_super - Unmount the filesystem |
875 | * @sb: The VFS superblock | 869 | * @sb: The VFS superblock |
@@ -894,7 +888,7 @@ restart: | |||
894 | continue; | 888 | continue; |
895 | spin_unlock(&sdp->sd_jindex_spin); | 889 | spin_unlock(&sdp->sd_jindex_spin); |
896 | wait_on_bit(&jd->jd_flags, JDF_RECOVERY, | 890 | wait_on_bit(&jd->jd_flags, JDF_RECOVERY, |
897 | gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE); | 891 | TASK_UNINTERRUPTIBLE); |
898 | goto restart; | 892 | goto restart; |
899 | } | 893 | } |
900 | spin_unlock(&sdp->sd_jindex_spin); | 894 | spin_unlock(&sdp->sd_jindex_spin); |
diff --git a/fs/inode.c b/fs/inode.c index 6eecb7ff0b9a..5938f3928944 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -1695,13 +1695,6 @@ int inode_needs_sync(struct inode *inode) | |||
1695 | } | 1695 | } |
1696 | EXPORT_SYMBOL(inode_needs_sync); | 1696 | EXPORT_SYMBOL(inode_needs_sync); |
1697 | 1697 | ||
1698 | int inode_wait(void *word) | ||
1699 | { | ||
1700 | schedule(); | ||
1701 | return 0; | ||
1702 | } | ||
1703 | EXPORT_SYMBOL(inode_wait); | ||
1704 | |||
1705 | /* | 1698 | /* |
1706 | * If we try to find an inode in the inode hash while it is being | 1699 | * If we try to find an inode in the inode hash while it is being |
1707 | * deleted, we have to wait until the filesystem completes its | 1700 | * deleted, we have to wait until the filesystem completes its |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6f0f590cc5a3..5f09370c90a8 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -763,12 +763,6 @@ static void warn_dirty_buffer(struct buffer_head *bh) | |||
763 | bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); | 763 | bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); |
764 | } | 764 | } |
765 | 765 | ||
766 | static int sleep_on_shadow_bh(void *word) | ||
767 | { | ||
768 | io_schedule(); | ||
769 | return 0; | ||
770 | } | ||
771 | |||
772 | /* | 766 | /* |
773 | * If the buffer is already part of the current transaction, then there | 767 | * If the buffer is already part of the current transaction, then there |
774 | * is nothing we need to do. If it is already part of a prior | 768 | * is nothing we need to do. If it is already part of a prior |
@@ -906,8 +900,8 @@ repeat: | |||
906 | if (buffer_shadow(bh)) { | 900 | if (buffer_shadow(bh)) { |
907 | JBUFFER_TRACE(jh, "on shadow: sleep"); | 901 | JBUFFER_TRACE(jh, "on shadow: sleep"); |
908 | jbd_unlock_bh_state(bh); | 902 | jbd_unlock_bh_state(bh); |
909 | wait_on_bit(&bh->b_state, BH_Shadow, | 903 | wait_on_bit_io(&bh->b_state, BH_Shadow, |
910 | sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE); | 904 | TASK_UNINTERRUPTIBLE); |
911 | goto repeat; | 905 | goto repeat; |
912 | } | 906 | } |
913 | 907 | ||
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 4042ff58fe3f..524dd80d1898 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -361,8 +361,8 @@ start: | |||
361 | * Prevent starvation issues if someone is doing a consistency | 361 | * Prevent starvation issues if someone is doing a consistency |
362 | * sync-to-disk | 362 | * sync-to-disk |
363 | */ | 363 | */ |
364 | ret = wait_on_bit(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, | 364 | ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, |
365 | nfs_wait_bit_killable, TASK_KILLABLE); | 365 | nfs_wait_bit_killable, TASK_KILLABLE); |
366 | if (ret) | 366 | if (ret) |
367 | return ret; | 367 | return ret; |
368 | 368 | ||
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index 44bf0140a4c7..e2a0361e24c6 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c | |||
@@ -783,8 +783,8 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j) | |||
783 | static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds) | 783 | static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds) |
784 | { | 784 | { |
785 | might_sleep(); | 785 | might_sleep(); |
786 | wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, | 786 | wait_on_bit_action(&ds->ds_state, NFS4DS_CONNECTING, |
787 | nfs_wait_bit_killable, TASK_KILLABLE); | 787 | nfs_wait_bit_killable, TASK_KILLABLE); |
788 | } | 788 | } |
789 | 789 | ||
790 | static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) | 790 | static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 9927913c97c2..b7b710e7d08e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1074,8 +1074,8 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) | |||
1074 | * the bit lock here if it looks like we're going to be doing that. | 1074 | * the bit lock here if it looks like we're going to be doing that. |
1075 | */ | 1075 | */ |
1076 | for (;;) { | 1076 | for (;;) { |
1077 | ret = wait_on_bit(bitlock, NFS_INO_INVALIDATING, | 1077 | ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING, |
1078 | nfs_wait_bit_killable, TASK_KILLABLE); | 1078 | nfs_wait_bit_killable, TASK_KILLABLE); |
1079 | if (ret) | 1079 | if (ret) |
1080 | goto out; | 1080 | goto out; |
1081 | spin_lock(&inode->i_lock); | 1081 | spin_lock(&inode->i_lock); |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 848f6853c59e..42f121182167 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1251,8 +1251,8 @@ int nfs4_wait_clnt_recover(struct nfs_client *clp) | |||
1251 | might_sleep(); | 1251 | might_sleep(); |
1252 | 1252 | ||
1253 | atomic_inc(&clp->cl_count); | 1253 | atomic_inc(&clp->cl_count); |
1254 | res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, | 1254 | res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, |
1255 | nfs_wait_bit_killable, TASK_KILLABLE); | 1255 | nfs_wait_bit_killable, TASK_KILLABLE); |
1256 | if (res) | 1256 | if (res) |
1257 | goto out; | 1257 | goto out; |
1258 | if (clp->cl_cons_state < 0) | 1258 | if (clp->cl_cons_state < 0) |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index b6ee3a6ee96d..6104d3500b49 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -138,12 +138,6 @@ nfs_iocounter_wait(struct nfs_io_counter *c) | |||
138 | return __nfs_iocounter_wait(c); | 138 | return __nfs_iocounter_wait(c); |
139 | } | 139 | } |
140 | 140 | ||
141 | static int nfs_wait_bit_uninterruptible(void *word) | ||
142 | { | ||
143 | io_schedule(); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | /* | 141 | /* |
148 | * nfs_page_group_lock - lock the head of the page group | 142 | * nfs_page_group_lock - lock the head of the page group |
149 | * @req - request in group that is to be locked | 143 | * @req - request in group that is to be locked |
@@ -158,7 +152,6 @@ nfs_page_group_lock(struct nfs_page *req) | |||
158 | WARN_ON_ONCE(head != head->wb_head); | 152 | WARN_ON_ONCE(head != head->wb_head); |
159 | 153 | ||
160 | wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | 154 | wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, |
161 | nfs_wait_bit_uninterruptible, | ||
162 | TASK_UNINTERRUPTIBLE); | 155 | TASK_UNINTERRUPTIBLE); |
163 | } | 156 | } |
164 | 157 | ||
@@ -425,9 +418,8 @@ void nfs_release_request(struct nfs_page *req) | |||
425 | int | 418 | int |
426 | nfs_wait_on_request(struct nfs_page *req) | 419 | nfs_wait_on_request(struct nfs_page *req) |
427 | { | 420 | { |
428 | return wait_on_bit(&req->wb_flags, PG_BUSY, | 421 | return wait_on_bit_io(&req->wb_flags, PG_BUSY, |
429 | nfs_wait_bit_uninterruptible, | 422 | TASK_UNINTERRUPTIBLE); |
430 | TASK_UNINTERRUPTIBLE); | ||
431 | } | 423 | } |
432 | 424 | ||
433 | /* | 425 | /* |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 6fdcd233d6f7..a8914b335617 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1885,7 +1885,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) | |||
1885 | if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { | 1885 | if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { |
1886 | if (!sync) | 1886 | if (!sync) |
1887 | goto out; | 1887 | goto out; |
1888 | status = wait_on_bit_lock(&nfsi->flags, | 1888 | status = wait_on_bit_lock_action(&nfsi->flags, |
1889 | NFS_INO_LAYOUTCOMMITTING, | 1889 | NFS_INO_LAYOUTCOMMITTING, |
1890 | nfs_wait_bit_killable, | 1890 | nfs_wait_bit_killable, |
1891 | TASK_KILLABLE); | 1891 | TASK_KILLABLE); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 98ff061ccaf3..f05f321f9d3d 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -397,7 +397,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
397 | int err; | 397 | int err; |
398 | 398 | ||
399 | /* Stop dirtying of new pages while we sync */ | 399 | /* Stop dirtying of new pages while we sync */ |
400 | err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING, | 400 | err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING, |
401 | nfs_wait_bit_killable, TASK_KILLABLE); | 401 | nfs_wait_bit_killable, TASK_KILLABLE); |
402 | if (err) | 402 | if (err) |
403 | goto out_err; | 403 | goto out_err; |
@@ -1475,7 +1475,7 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1475 | return error; | 1475 | return error; |
1476 | if (!may_wait) | 1476 | if (!may_wait) |
1477 | goto out_mark_dirty; | 1477 | goto out_mark_dirty; |
1478 | error = wait_on_bit(&NFS_I(inode)->flags, | 1478 | error = wait_on_bit_action(&NFS_I(inode)->flags, |
1479 | NFS_INO_COMMIT, | 1479 | NFS_INO_COMMIT, |
1480 | nfs_wait_bit_killable, | 1480 | nfs_wait_bit_killable, |
1481 | TASK_KILLABLE); | 1481 | TASK_KILLABLE); |
diff --git a/include/linux/wait.h b/include/linux/wait.h index bd68819f0815..73960ff09e56 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -854,11 +854,14 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
854 | (wait)->flags = 0; \ | 854 | (wait)->flags = 0; \ |
855 | } while (0) | 855 | } while (0) |
856 | 856 | ||
857 | |||
858 | extern int bit_wait(void *); | ||
859 | extern int bit_wait_io(void *); | ||
860 | |||
857 | /** | 861 | /** |
858 | * wait_on_bit - wait for a bit to be cleared | 862 | * wait_on_bit - wait for a bit to be cleared |
859 | * @word: the word being waited on, a kernel virtual address | 863 | * @word: the word being waited on, a kernel virtual address |
860 | * @bit: the bit of the word being waited on | 864 | * @bit: the bit of the word being waited on |
861 | * @action: the function used to sleep, which may take special actions | ||
862 | * @mode: the task state to sleep in | 865 | * @mode: the task state to sleep in |
863 | * | 866 | * |
864 | * There is a standard hashed waitqueue table for generic use. This | 867 | * There is a standard hashed waitqueue table for generic use. This |
@@ -867,9 +870,62 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
867 | * call wait_on_bit() in threads waiting for the bit to clear. | 870 | * call wait_on_bit() in threads waiting for the bit to clear. |
868 | * One uses wait_on_bit() where one is waiting for the bit to clear, | 871 | * One uses wait_on_bit() where one is waiting for the bit to clear, |
869 | * but has no intention of setting it. | 872 | * but has no intention of setting it. |
873 | * Returned value will be zero if the bit was cleared, or non-zero | ||
874 | * if the process received a signal and the mode permitted wakeup | ||
875 | * on that signal. | ||
876 | */ | ||
877 | static inline int | ||
878 | wait_on_bit(void *word, int bit, unsigned mode) | ||
879 | { | ||
880 | if (!test_bit(bit, word)) | ||
881 | return 0; | ||
882 | return out_of_line_wait_on_bit(word, bit, | ||
883 | bit_wait, | ||
884 | mode); | ||
885 | } | ||
886 | |||
887 | /** | ||
888 | * wait_on_bit_io - wait for a bit to be cleared | ||
889 | * @word: the word being waited on, a kernel virtual address | ||
890 | * @bit: the bit of the word being waited on | ||
891 | * @mode: the task state to sleep in | ||
892 | * | ||
893 | * Use the standard hashed waitqueue table to wait for a bit | ||
894 | * to be cleared. This is similar to wait_on_bit(), but calls | ||
895 | * io_schedule() instead of schedule() for the actual waiting. | ||
896 | * | ||
897 | * Returned value will be zero if the bit was cleared, or non-zero | ||
898 | * if the process received a signal and the mode permitted wakeup | ||
899 | * on that signal. | ||
900 | */ | ||
901 | static inline int | ||
902 | wait_on_bit_io(void *word, int bit, unsigned mode) | ||
903 | { | ||
904 | if (!test_bit(bit, word)) | ||
905 | return 0; | ||
906 | return out_of_line_wait_on_bit(word, bit, | ||
907 | bit_wait_io, | ||
908 | mode); | ||
909 | } | ||
910 | |||
911 | /** | ||
912 | * wait_on_bit_action - wait for a bit to be cleared | ||
913 | * @word: the word being waited on, a kernel virtual address | ||
914 | * @bit: the bit of the word being waited on | ||
915 | * @action: the function used to sleep, which may take special actions | ||
916 | * @mode: the task state to sleep in | ||
917 | * | ||
918 | * Use the standard hashed waitqueue table to wait for a bit | ||
919 | * to be cleared, and allow the waiting action to be specified. | ||
920 | * This is like wait_on_bit() but allows fine control of how the waiting | ||
921 | * is done. | ||
922 | * | ||
923 | * Returned value will be zero if the bit was cleared, or non-zero | ||
924 | * if the process received a signal and the mode permitted wakeup | ||
925 | * on that signal. | ||
870 | */ | 926 | */ |
871 | static inline int | 927 | static inline int |
872 | wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) | 928 | wait_on_bit_action(void *word, int bit, int (*action)(void *), unsigned mode) |
873 | { | 929 | { |
874 | if (!test_bit(bit, word)) | 930 | if (!test_bit(bit, word)) |
875 | return 0; | 931 | return 0; |
@@ -880,7 +936,6 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) | |||
880 | * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it | 936 | * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it |
881 | * @word: the word being waited on, a kernel virtual address | 937 | * @word: the word being waited on, a kernel virtual address |
882 | * @bit: the bit of the word being waited on | 938 | * @bit: the bit of the word being waited on |
883 | * @action: the function used to sleep, which may take special actions | ||
884 | * @mode: the task state to sleep in | 939 | * @mode: the task state to sleep in |
885 | * | 940 | * |
886 | * There is a standard hashed waitqueue table for generic use. This | 941 | * There is a standard hashed waitqueue table for generic use. This |
@@ -891,9 +946,61 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) | |||
891 | * wait_on_bit() in threads waiting to be able to set the bit. | 946 | * wait_on_bit() in threads waiting to be able to set the bit. |
892 | * One uses wait_on_bit_lock() where one is waiting for the bit to | 947 | * One uses wait_on_bit_lock() where one is waiting for the bit to |
893 | * clear with the intention of setting it, and when done, clearing it. | 948 | * clear with the intention of setting it, and when done, clearing it. |
949 | * | ||
950 | * Returns zero if the bit was (eventually) found to be clear and was | ||
951 | * set. Returns non-zero if a signal was delivered to the process and | ||
952 | * the @mode allows that signal to wake the process. | ||
953 | */ | ||
954 | static inline int | ||
955 | wait_on_bit_lock(void *word, int bit, unsigned mode) | ||
956 | { | ||
957 | if (!test_and_set_bit(bit, word)) | ||
958 | return 0; | ||
959 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); | ||
960 | } | ||
961 | |||
962 | /** | ||
963 | * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it | ||
964 | * @word: the word being waited on, a kernel virtual address | ||
965 | * @bit: the bit of the word being waited on | ||
966 | * @mode: the task state to sleep in | ||
967 | * | ||
968 | * Use the standard hashed waitqueue table to wait for a bit | ||
969 | * to be cleared and then to atomically set it. This is similar | ||
970 | * to wait_on_bit(), but calls io_schedule() instead of schedule() | ||
971 | * for the actual waiting. | ||
972 | * | ||
973 | * Returns zero if the bit was (eventually) found to be clear and was | ||
974 | * set. Returns non-zero if a signal was delivered to the process and | ||
975 | * the @mode allows that signal to wake the process. | ||
976 | */ | ||
977 | static inline int | ||
978 | wait_on_bit_lock_io(void *word, int bit, unsigned mode) | ||
979 | { | ||
980 | if (!test_and_set_bit(bit, word)) | ||
981 | return 0; | ||
982 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); | ||
983 | } | ||
984 | |||
985 | /** | ||
986 | * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it | ||
987 | * @word: the word being waited on, a kernel virtual address | ||
988 | * @bit: the bit of the word being waited on | ||
989 | * @action: the function used to sleep, which may take special actions | ||
990 | * @mode: the task state to sleep in | ||
991 | * | ||
992 | * Use the standard hashed waitqueue table to wait for a bit | ||
993 | * to be cleared and then to set it, and allow the waiting action | ||
994 | * to be specified. | ||
995 | * This is like wait_on_bit() but allows fine control of how the waiting | ||
996 | * is done. | ||
997 | * | ||
998 | * Returns zero if the bit was (eventually) found to be clear and was | ||
999 | * set. Returns non-zero if a signal was delivered to the process and | ||
1000 | * the @mode allows that signal to wake the process. | ||
894 | */ | 1001 | */ |
895 | static inline int | 1002 | static inline int |
896 | wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) | 1003 | wait_on_bit_lock_action(void *word, int bit, int (*action)(void *), unsigned mode) |
897 | { | 1004 | { |
898 | if (!test_and_set_bit(bit, word)) | 1005 | if (!test_and_set_bit(bit, word)) |
899 | return 0; | 1006 | return 0; |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5777c13849ba..a219be961c0a 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -90,7 +90,6 @@ struct writeback_control { | |||
90 | * fs/fs-writeback.c | 90 | * fs/fs-writeback.c |
91 | */ | 91 | */ |
92 | struct bdi_writeback; | 92 | struct bdi_writeback; |
93 | int inode_wait(void *); | ||
94 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); | 93 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
95 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | 94 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
96 | enum wb_reason reason); | 95 | enum wb_reason reason); |
@@ -105,7 +104,7 @@ void inode_wait_for_writeback(struct inode *inode); | |||
105 | static inline void wait_on_inode(struct inode *inode) | 104 | static inline void wait_on_inode(struct inode *inode) |
106 | { | 105 | { |
107 | might_sleep(); | 106 | might_sleep(); |
108 | wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); | 107 | wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); |
109 | } | 108 | } |
110 | 109 | ||
111 | /* | 110 | /* |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index adf98622cb32..54e75226c2c4 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -28,12 +28,6 @@ | |||
28 | #include <linux/compat.h> | 28 | #include <linux/compat.h> |
29 | 29 | ||
30 | 30 | ||
31 | static int ptrace_trapping_sleep_fn(void *flags) | ||
32 | { | ||
33 | schedule(); | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | /* | 31 | /* |
38 | * ptrace a task: make the debugger its new parent and | 32 | * ptrace a task: make the debugger its new parent and |
39 | * move it to the ptrace list. | 33 | * move it to the ptrace list. |
@@ -371,7 +365,7 @@ unlock_creds: | |||
371 | out: | 365 | out: |
372 | if (!retval) { | 366 | if (!retval) { |
373 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, | 367 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, |
374 | ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); | 368 | TASK_UNINTERRUPTIBLE); |
375 | proc_ptrace_connector(task, PTRACE_ATTACH); | 369 | proc_ptrace_connector(task, PTRACE_ATTACH); |
376 | } | 370 | } |
377 | 371 | ||
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 0ffa20ae657b..a104879e88f2 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -502,3 +502,21 @@ void wake_up_atomic_t(atomic_t *p) | |||
502 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); | 502 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); |
503 | } | 503 | } |
504 | EXPORT_SYMBOL(wake_up_atomic_t); | 504 | EXPORT_SYMBOL(wake_up_atomic_t); |
505 | |||
506 | __sched int bit_wait(void *word) | ||
507 | { | ||
508 | if (signal_pending_state(current->state, current)) | ||
509 | return 1; | ||
510 | schedule(); | ||
511 | return 0; | ||
512 | } | ||
513 | EXPORT_SYMBOL(bit_wait); | ||
514 | |||
515 | __sched int bit_wait_io(void *word) | ||
516 | { | ||
517 | if (signal_pending_state(current->state, current)) | ||
518 | return 1; | ||
519 | io_schedule(); | ||
520 | return 0; | ||
521 | } | ||
522 | EXPORT_SYMBOL(bit_wait_io); | ||
diff --git a/mm/filemap.c b/mm/filemap.c index dafb06f70a09..d175917e2411 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -241,18 +241,6 @@ void delete_from_page_cache(struct page *page) | |||
241 | } | 241 | } |
242 | EXPORT_SYMBOL(delete_from_page_cache); | 242 | EXPORT_SYMBOL(delete_from_page_cache); |
243 | 243 | ||
244 | static int sleep_on_page(void *word) | ||
245 | { | ||
246 | io_schedule(); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int sleep_on_page_killable(void *word) | ||
251 | { | ||
252 | sleep_on_page(word); | ||
253 | return fatal_signal_pending(current) ? -EINTR : 0; | ||
254 | } | ||
255 | |||
256 | static int filemap_check_errors(struct address_space *mapping) | 244 | static int filemap_check_errors(struct address_space *mapping) |
257 | { | 245 | { |
258 | int ret = 0; | 246 | int ret = 0; |
@@ -692,7 +680,7 @@ void wait_on_page_bit(struct page *page, int bit_nr) | |||
692 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | 680 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); |
693 | 681 | ||
694 | if (test_bit(bit_nr, &page->flags)) | 682 | if (test_bit(bit_nr, &page->flags)) |
695 | __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, | 683 | __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, |
696 | TASK_UNINTERRUPTIBLE); | 684 | TASK_UNINTERRUPTIBLE); |
697 | } | 685 | } |
698 | EXPORT_SYMBOL(wait_on_page_bit); | 686 | EXPORT_SYMBOL(wait_on_page_bit); |
@@ -705,7 +693,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) | |||
705 | return 0; | 693 | return 0; |
706 | 694 | ||
707 | return __wait_on_bit(page_waitqueue(page), &wait, | 695 | return __wait_on_bit(page_waitqueue(page), &wait, |
708 | sleep_on_page_killable, TASK_KILLABLE); | 696 | bit_wait_io, TASK_KILLABLE); |
709 | } | 697 | } |
710 | 698 | ||
711 | /** | 699 | /** |
@@ -806,7 +794,7 @@ void __lock_page(struct page *page) | |||
806 | { | 794 | { |
807 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 795 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
808 | 796 | ||
809 | __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, | 797 | __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, |
810 | TASK_UNINTERRUPTIBLE); | 798 | TASK_UNINTERRUPTIBLE); |
811 | } | 799 | } |
812 | EXPORT_SYMBOL(__lock_page); | 800 | EXPORT_SYMBOL(__lock_page); |
@@ -816,7 +804,7 @@ int __lock_page_killable(struct page *page) | |||
816 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 804 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
817 | 805 | ||
818 | return __wait_on_bit_lock(page_waitqueue(page), &wait, | 806 | return __wait_on_bit_lock(page_waitqueue(page), &wait, |
819 | sleep_on_page_killable, TASK_KILLABLE); | 807 | bit_wait_io, TASK_KILLABLE); |
820 | } | 808 | } |
821 | EXPORT_SYMBOL_GPL(__lock_page_killable); | 809 | EXPORT_SYMBOL_GPL(__lock_page_killable); |
822 | 810 | ||
@@ -1978,18 +1978,12 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage) | |||
1978 | #endif /* CONFIG_MIGRATION */ | 1978 | #endif /* CONFIG_MIGRATION */ |
1979 | 1979 | ||
1980 | #ifdef CONFIG_MEMORY_HOTREMOVE | 1980 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1981 | static int just_wait(void *word) | ||
1982 | { | ||
1983 | schedule(); | ||
1984 | return 0; | ||
1985 | } | ||
1986 | |||
1987 | static void wait_while_offlining(void) | 1981 | static void wait_while_offlining(void) |
1988 | { | 1982 | { |
1989 | while (ksm_run & KSM_RUN_OFFLINE) { | 1983 | while (ksm_run & KSM_RUN_OFFLINE) { |
1990 | mutex_unlock(&ksm_thread_mutex); | 1984 | mutex_unlock(&ksm_thread_mutex); |
1991 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), | 1985 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), |
1992 | just_wait, TASK_UNINTERRUPTIBLE); | 1986 | TASK_UNINTERRUPTIBLE); |
1993 | mutex_lock(&ksm_thread_mutex); | 1987 | mutex_lock(&ksm_thread_mutex); |
1994 | } | 1988 | } |
1995 | } | 1989 | } |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 0a43cce9a914..e090bffe1bf8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -2186,12 +2186,6 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt) | |||
2186 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); | 2186 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); |
2187 | } | 2187 | } |
2188 | 2188 | ||
2189 | static int wait_inquiry(void *word) | ||
2190 | { | ||
2191 | schedule(); | ||
2192 | return signal_pending(current); | ||
2193 | } | ||
2194 | |||
2195 | int hci_inquiry(void __user *arg) | 2189 | int hci_inquiry(void __user *arg) |
2196 | { | 2190 | { |
2197 | __u8 __user *ptr = arg; | 2191 | __u8 __user *ptr = arg; |
@@ -2242,7 +2236,7 @@ int hci_inquiry(void __user *arg) | |||
2242 | /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is | 2236 | /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is |
2243 | * cleared). If it is interrupted by a signal, return -EINTR. | 2237 | * cleared). If it is interrupted by a signal, return -EINTR. |
2244 | */ | 2238 | */ |
2245 | if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry, | 2239 | if (wait_on_bit(&hdev->flags, HCI_INQUIRY, |
2246 | TASK_INTERRUPTIBLE)) | 2240 | TASK_INTERRUPTIBLE)) |
2247 | return -EINTR; | 2241 | return -EINTR; |
2248 | } | 2242 | } |
diff --git a/security/keys/gc.c b/security/keys/gc.c index d3222b6d7d59..9609a7f0faea 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c | |||
@@ -92,15 +92,6 @@ static void key_gc_timer_func(unsigned long data) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * wait_on_bit() sleep function for uninterruptible waiting | ||
96 | */ | ||
97 | static int key_gc_wait_bit(void *flags) | ||
98 | { | ||
99 | schedule(); | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Reap keys of dead type. | 95 | * Reap keys of dead type. |
105 | * | 96 | * |
106 | * We use three flags to make sure we see three complete cycles of the garbage | 97 | * We use three flags to make sure we see three complete cycles of the garbage |
@@ -123,7 +114,7 @@ void key_gc_keytype(struct key_type *ktype) | |||
123 | schedule_work(&key_gc_work); | 114 | schedule_work(&key_gc_work); |
124 | 115 | ||
125 | kdebug("sleep"); | 116 | kdebug("sleep"); |
126 | wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, | 117 | wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, |
127 | TASK_UNINTERRUPTIBLE); | 118 | TASK_UNINTERRUPTIBLE); |
128 | 119 | ||
129 | key_gc_dead_keytype = NULL; | 120 | key_gc_dead_keytype = NULL; |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 381411941cc1..26a94f18af94 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -21,24 +21,6 @@ | |||
21 | 21 | ||
22 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ | 22 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ |
23 | 23 | ||
24 | /* | ||
25 | * wait_on_bit() sleep function for uninterruptible waiting | ||
26 | */ | ||
27 | static int key_wait_bit(void *flags) | ||
28 | { | ||
29 | schedule(); | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * wait_on_bit() sleep function for interruptible waiting | ||
35 | */ | ||
36 | static int key_wait_bit_intr(void *flags) | ||
37 | { | ||
38 | schedule(); | ||
39 | return signal_pending(current) ? -ERESTARTSYS : 0; | ||
40 | } | ||
41 | |||
42 | /** | 24 | /** |
43 | * complete_request_key - Complete the construction of a key. | 25 | * complete_request_key - Complete the construction of a key. |
44 | * @cons: The key construction record. | 26 | * @cons: The key construction record. |
@@ -592,10 +574,9 @@ int wait_for_key_construction(struct key *key, bool intr) | |||
592 | int ret; | 574 | int ret; |
593 | 575 | ||
594 | ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, | 576 | ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, |
595 | intr ? key_wait_bit_intr : key_wait_bit, | ||
596 | intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | 577 | intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
597 | if (ret < 0) | 578 | if (ret) |
598 | return ret; | 579 | return -ERESTARTSYS; |
599 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { | 580 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { |
600 | smp_rmb(); | 581 | smp_rmb(); |
601 | return key->type_data.reject_error; | 582 | return key->type_data.reject_error; |