diff options
-rw-r--r-- | fs/btrfs/compression.c | 7 | ||||
-rw-r--r-- | fs/btrfs/delayed-inode.c | 9 | ||||
-rw-r--r-- | fs/btrfs/dev-replace.c | 10 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 7 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 9 | ||||
-rw-r--r-- | fs/btrfs/locking.c | 34 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.c | 14 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 7 | ||||
-rw-r--r-- | fs/btrfs/tree-log.c | 34 |
9 files changed, 40 insertions, 91 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 1061575a7d25..d3e447b45bf7 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -990,12 +990,7 @@ static void __free_workspace(int type, struct list_head *workspace, | |||
990 | btrfs_compress_op[idx]->free_workspace(workspace); | 990 | btrfs_compress_op[idx]->free_workspace(workspace); |
991 | atomic_dec(total_ws); | 991 | atomic_dec(total_ws); |
992 | wake: | 992 | wake: |
993 | /* | 993 | cond_wake_up(ws_wait); |
994 | * Make sure counter is updated before we wake up waiters. | ||
995 | */ | ||
996 | smp_mb(); | ||
997 | if (waitqueue_active(ws_wait)) | ||
998 | wake_up(ws_wait); | ||
999 | } | 994 | } |
1000 | 995 | ||
1001 | static void free_workspace(int type, struct list_head *ws) | 996 | static void free_workspace(int type, struct list_head *ws) |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index a8d492dbd3e7..fe6caa7e698b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -460,13 +460,10 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root) | |||
460 | { | 460 | { |
461 | int seq = atomic_inc_return(&delayed_root->items_seq); | 461 | int seq = atomic_inc_return(&delayed_root->items_seq); |
462 | 462 | ||
463 | /* | 463 | /* atomic_dec_return implies a barrier */ |
464 | * atomic_dec_return implies a barrier for waitqueue_active | ||
465 | */ | ||
466 | if ((atomic_dec_return(&delayed_root->items) < | 464 | if ((atomic_dec_return(&delayed_root->items) < |
467 | BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && | 465 | BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0)) |
468 | waitqueue_active(&delayed_root->wait)) | 466 | cond_wake_up_nomb(&delayed_root->wait); |
469 | wake_up(&delayed_root->wait); | ||
470 | } | 467 | } |
471 | 468 | ||
472 | static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) | 469 | static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) |
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 12f703e127dd..89946285203d 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c | |||
@@ -1009,9 +1009,9 @@ void btrfs_dev_replace_clear_lock_blocking( | |||
1009 | ASSERT(atomic_read(&dev_replace->read_locks) > 0); | 1009 | ASSERT(atomic_read(&dev_replace->read_locks) > 0); |
1010 | ASSERT(atomic_read(&dev_replace->blocking_readers) > 0); | 1010 | ASSERT(atomic_read(&dev_replace->blocking_readers) > 0); |
1011 | read_lock(&dev_replace->lock); | 1011 | read_lock(&dev_replace->lock); |
1012 | if (atomic_dec_and_test(&dev_replace->blocking_readers) && | 1012 | /* Barrier implied by atomic_dec_and_test */ |
1013 | waitqueue_active(&dev_replace->read_lock_wq)) | 1013 | if (atomic_dec_and_test(&dev_replace->blocking_readers)) |
1014 | wake_up(&dev_replace->read_lock_wq); | 1014 | cond_wake_up_nomb(&dev_replace->read_lock_wq); |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) | 1017 | void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) |
@@ -1022,9 +1022,7 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) | |||
1022 | void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) | 1022 | void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) |
1023 | { | 1023 | { |
1024 | percpu_counter_sub(&fs_info->bio_counter, amount); | 1024 | percpu_counter_sub(&fs_info->bio_counter, amount); |
1025 | 1025 | cond_wake_up_nomb(&fs_info->replace_wait); | |
1026 | if (waitqueue_active(&fs_info->replace_wait)) | ||
1027 | wake_up(&fs_info->replace_wait); | ||
1028 | } | 1026 | } |
1029 | 1027 | ||
1030 | void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) | 1028 | void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5be54cedb56f..fa2ed14532c1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -11081,12 +11081,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) | |||
11081 | void btrfs_end_write_no_snapshotting(struct btrfs_root *root) | 11081 | void btrfs_end_write_no_snapshotting(struct btrfs_root *root) |
11082 | { | 11082 | { |
11083 | percpu_counter_dec(&root->subv_writers->counter); | 11083 | percpu_counter_dec(&root->subv_writers->counter); |
11084 | /* | 11084 | cond_wake_up(&root->subv_writers->wait); |
11085 | * Make sure counter is updated before we wake up waiters. | ||
11086 | */ | ||
11087 | smp_mb(); | ||
11088 | if (waitqueue_active(&root->subv_writers->wait)) | ||
11089 | wake_up(&root->subv_writers->wait); | ||
11090 | } | 11085 | } |
11091 | 11086 | ||
11092 | int btrfs_start_write_no_snapshotting(struct btrfs_root *root) | 11087 | int btrfs_start_write_no_snapshotting(struct btrfs_root *root) |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6757fe136177..563e63fa2fce 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1156,13 +1156,10 @@ static noinline void async_cow_submit(struct btrfs_work *work) | |||
1156 | nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> | 1156 | nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> |
1157 | PAGE_SHIFT; | 1157 | PAGE_SHIFT; |
1158 | 1158 | ||
1159 | /* | 1159 | /* atomic_sub_return implies a barrier */ |
1160 | * atomic_sub_return implies a barrier for waitqueue_active | ||
1161 | */ | ||
1162 | if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < | 1160 | if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < |
1163 | 5 * SZ_1M && | 1161 | 5 * SZ_1M) |
1164 | waitqueue_active(&fs_info->async_submit_wait)) | 1162 | cond_wake_up_nomb(&fs_info->async_submit_wait); |
1165 | wake_up(&fs_info->async_submit_wait); | ||
1166 | 1163 | ||
1167 | if (async_cow->inode) | 1164 | if (async_cow->inode) |
1168 | submit_compressed_extents(async_cow->inode, async_cow); | 1165 | submit_compressed_extents(async_cow->inode, async_cow); |
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index e4faefac9d16..1da768e5ef75 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -66,22 +66,16 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
66 | write_lock(&eb->lock); | 66 | write_lock(&eb->lock); |
67 | WARN_ON(atomic_read(&eb->spinning_writers)); | 67 | WARN_ON(atomic_read(&eb->spinning_writers)); |
68 | atomic_inc(&eb->spinning_writers); | 68 | atomic_inc(&eb->spinning_writers); |
69 | /* | 69 | /* atomic_dec_and_test implies a barrier */ |
70 | * atomic_dec_and_test implies a barrier for waitqueue_active | 70 | if (atomic_dec_and_test(&eb->blocking_writers)) |
71 | */ | 71 | cond_wake_up_nomb(&eb->write_lock_wq); |
72 | if (atomic_dec_and_test(&eb->blocking_writers) && | ||
73 | waitqueue_active(&eb->write_lock_wq)) | ||
74 | wake_up(&eb->write_lock_wq); | ||
75 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { | 72 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { |
76 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); | 73 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); |
77 | read_lock(&eb->lock); | 74 | read_lock(&eb->lock); |
78 | atomic_inc(&eb->spinning_readers); | 75 | atomic_inc(&eb->spinning_readers); |
79 | /* | 76 | /* atomic_dec_and_test implies a barrier */ |
80 | * atomic_dec_and_test implies a barrier for waitqueue_active | 77 | if (atomic_dec_and_test(&eb->blocking_readers)) |
81 | */ | 78 | cond_wake_up_nomb(&eb->read_lock_wq); |
82 | if (atomic_dec_and_test(&eb->blocking_readers) && | ||
83 | waitqueue_active(&eb->read_lock_wq)) | ||
84 | wake_up(&eb->read_lock_wq); | ||
85 | } | 79 | } |
86 | } | 80 | } |
87 | 81 | ||
@@ -221,12 +215,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | |||
221 | } | 215 | } |
222 | btrfs_assert_tree_read_locked(eb); | 216 | btrfs_assert_tree_read_locked(eb); |
223 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 217 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
224 | /* | 218 | /* atomic_dec_and_test implies a barrier */ |
225 | * atomic_dec_and_test implies a barrier for waitqueue_active | 219 | if (atomic_dec_and_test(&eb->blocking_readers)) |
226 | */ | 220 | cond_wake_up_nomb(&eb->read_lock_wq); |
227 | if (atomic_dec_and_test(&eb->blocking_readers) && | ||
228 | waitqueue_active(&eb->read_lock_wq)) | ||
229 | wake_up(&eb->read_lock_wq); | ||
230 | atomic_dec(&eb->read_locks); | 221 | atomic_dec(&eb->read_locks); |
231 | } | 222 | } |
232 | 223 | ||
@@ -275,12 +266,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb) | |||
275 | if (blockers) { | 266 | if (blockers) { |
276 | WARN_ON(atomic_read(&eb->spinning_writers)); | 267 | WARN_ON(atomic_read(&eb->spinning_writers)); |
277 | atomic_dec(&eb->blocking_writers); | 268 | atomic_dec(&eb->blocking_writers); |
278 | /* | 269 | /* Use the lighter barrier after atomic */ |
279 | * Make sure counter is updated before we wake up waiters. | ||
280 | */ | ||
281 | smp_mb__after_atomic(); | 270 | smp_mb__after_atomic(); |
282 | if (waitqueue_active(&eb->write_lock_wq)) | 271 | cond_wake_up_nomb(&eb->write_lock_wq); |
283 | wake_up(&eb->write_lock_wq); | ||
284 | } else { | 272 | } else { |
285 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 273 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
286 | atomic_dec(&eb->spinning_writers); | 274 | atomic_dec(&eb->spinning_writers); |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 6db8bb2f2c28..2e1a1694a33d 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -343,11 +343,8 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, | |||
343 | 343 | ||
344 | if (entry->bytes_left == 0) { | 344 | if (entry->bytes_left == 0) { |
345 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | 345 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
346 | /* | 346 | /* test_and_set_bit implies a barrier */ |
347 | * Implicit memory barrier after test_and_set_bit | 347 | cond_wake_up_nomb(&entry->wait); |
348 | */ | ||
349 | if (waitqueue_active(&entry->wait)) | ||
350 | wake_up(&entry->wait); | ||
351 | } else { | 348 | } else { |
352 | ret = 1; | 349 | ret = 1; |
353 | } | 350 | } |
@@ -410,11 +407,8 @@ have_entry: | |||
410 | 407 | ||
411 | if (entry->bytes_left == 0) { | 408 | if (entry->bytes_left == 0) { |
412 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | 409 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
413 | /* | 410 | /* test_and_set_bit implies a barrier */ |
414 | * Implicit memory barrier after test_and_set_bit | 411 | cond_wake_up_nomb(&entry->wait); |
415 | */ | ||
416 | if (waitqueue_active(&entry->wait)) | ||
417 | wake_up(&entry->wait); | ||
418 | } else { | 412 | } else { |
419 | ret = 1; | 413 | ret = 1; |
420 | } | 414 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c944b4769e3c..ff841abb756e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -877,12 +877,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
877 | atomic_dec(&cur_trans->num_writers); | 877 | atomic_dec(&cur_trans->num_writers); |
878 | extwriter_counter_dec(cur_trans, trans->type); | 878 | extwriter_counter_dec(cur_trans, trans->type); |
879 | 879 | ||
880 | /* | 880 | cond_wake_up(&cur_trans->writer_wait); |
881 | * Make sure counter is updated before we wake up waiters. | ||
882 | */ | ||
883 | smp_mb(); | ||
884 | if (waitqueue_active(&cur_trans->writer_wait)) | ||
885 | wake_up(&cur_trans->writer_wait); | ||
886 | btrfs_put_transaction(cur_trans); | 881 | btrfs_put_transaction(cur_trans); |
887 | 882 | ||
888 | if (current->journal_info == trans) | 883 | if (current->journal_info == trans) |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 2009cea65d89..f8220ec02036 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -222,11 +222,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root) | |||
222 | void btrfs_end_log_trans(struct btrfs_root *root) | 222 | void btrfs_end_log_trans(struct btrfs_root *root) |
223 | { | 223 | { |
224 | if (atomic_dec_and_test(&root->log_writers)) { | 224 | if (atomic_dec_and_test(&root->log_writers)) { |
225 | /* | 225 | /* atomic_dec_and_test implies a barrier */ |
226 | * Implicit memory barrier after atomic_dec_and_test | 226 | cond_wake_up_nomb(&root->log_writer_wait); |
227 | */ | ||
228 | if (waitqueue_active(&root->log_writer_wait)) | ||
229 | wake_up(&root->log_writer_wait); | ||
230 | } | 227 | } |
231 | } | 228 | } |
232 | 229 | ||
@@ -2988,11 +2985,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2988 | 2985 | ||
2989 | mutex_lock(&log_root_tree->log_mutex); | 2986 | mutex_lock(&log_root_tree->log_mutex); |
2990 | if (atomic_dec_and_test(&log_root_tree->log_writers)) { | 2987 | if (atomic_dec_and_test(&log_root_tree->log_writers)) { |
2991 | /* | 2988 | /* atomic_dec_and_test implies a barrier */ |
2992 | * Implicit memory barrier after atomic_dec_and_test | 2989 | cond_wake_up_nomb(&log_root_tree->log_writer_wait); |
2993 | */ | ||
2994 | if (waitqueue_active(&log_root_tree->log_writer_wait)) | ||
2995 | wake_up(&log_root_tree->log_writer_wait); | ||
2996 | } | 2990 | } |
2997 | 2991 | ||
2998 | if (ret) { | 2992 | if (ret) { |
@@ -3116,13 +3110,11 @@ out_wake_log_root: | |||
3116 | mutex_unlock(&log_root_tree->log_mutex); | 3110 | mutex_unlock(&log_root_tree->log_mutex); |
3117 | 3111 | ||
3118 | /* | 3112 | /* |
3119 | * The barrier before waitqueue_active is needed so all the updates | 3113 | * The barrier before waitqueue_active (in cond_wake_up) is needed so |
3120 | * above are seen by the woken threads. It might not be necessary, but | 3114 | * all the updates above are seen by the woken threads. It might not be |
3121 | * proving that seems to be hard. | 3115 | * necessary, but proving that seems to be hard. |
3122 | */ | 3116 | */ |
3123 | smp_mb(); | 3117 | cond_wake_up(&log_root_tree->log_commit_wait[index2]); |
3124 | if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) | ||
3125 | wake_up(&log_root_tree->log_commit_wait[index2]); | ||
3126 | out: | 3118 | out: |
3127 | mutex_lock(&root->log_mutex); | 3119 | mutex_lock(&root->log_mutex); |
3128 | btrfs_remove_all_log_ctxs(root, index1, ret); | 3120 | btrfs_remove_all_log_ctxs(root, index1, ret); |
@@ -3131,13 +3123,11 @@ out: | |||
3131 | mutex_unlock(&root->log_mutex); | 3123 | mutex_unlock(&root->log_mutex); |
3132 | 3124 | ||
3133 | /* | 3125 | /* |
3134 | * The barrier before waitqueue_active is needed so all the updates | 3126 | * The barrier before waitqueue_active (in cond_wake_up) is needed so |
3135 | * above are seen by the woken threads. It might not be necessary, but | 3127 | * all the updates above are seen by the woken threads. It might not be |
3136 | * proving that seems to be hard. | 3128 | * necessary, but proving that seems to be hard. |
3137 | */ | 3129 | */ |
3138 | smp_mb(); | 3130 | cond_wake_up(&root->log_commit_wait[index1]); |
3139 | if (waitqueue_active(&root->log_commit_wait[index1])) | ||
3140 | wake_up(&root->log_commit_wait[index1]); | ||
3141 | return ret; | 3131 | return ret; |
3142 | } | 3132 | } |
3143 | 3133 | ||