summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2015-02-16 13:41:40 -0500
committerDavid Sterba <dsterba@suse.com>2015-10-10 12:42:00 -0400
commitee86395458072760d62e66aad10a5e9e8902b8cf (patch)
treeb3293bde46967a886b5d066028c5eceebd810dac
parent779adf0f647651f5a45eeee3442c881300ce989e (diff)
btrfs: comment the rest of implicit barriers before waitqueue_active
There are atomic operations that imply the barrier for waitqueue_active mixed in an if-condition. Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/delayed-inode.c4
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/locking.c9
-rw-r--r--fs/btrfs/volumes.c3
5 files changed, 22 insertions, 0 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index a2ae42720a6a..e0941fbb913c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -463,6 +463,10 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
463static void finish_one_item(struct btrfs_delayed_root *delayed_root) 463static void finish_one_item(struct btrfs_delayed_root *delayed_root)
464{ 464{
465 int seq = atomic_inc_return(&delayed_root->items_seq); 465 int seq = atomic_inc_return(&delayed_root->items_seq);
466
467 /*
468 * atomic_dec_return implies a barrier for waitqueue_active
469 */
466 if ((atomic_dec_return(&delayed_root->items) < 470 if ((atomic_dec_return(&delayed_root->items) <
467 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && 471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
468 waitqueue_active(&delayed_root->wait)) 472 waitqueue_active(&delayed_root->wait))
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 295795aebe0b..379526ffd84d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -802,6 +802,9 @@ static void run_one_async_done(struct btrfs_work *work)
802 limit = btrfs_async_submit_limit(fs_info); 802 limit = btrfs_async_submit_limit(fs_info);
803 limit = limit * 2 / 3; 803 limit = limit * 2 / 3;
804 804
805 /*
806 * atomic_dec_return implies a barrier for waitqueue_active
807 */
805 if (atomic_dec_return(&fs_info->nr_async_submits) < limit && 808 if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
806 waitqueue_active(&fs_info->async_submit_wait)) 809 waitqueue_active(&fs_info->async_submit_wait))
807 wake_up(&fs_info->async_submit_wait); 810 wake_up(&fs_info->async_submit_wait);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 611b66d73e80..7be4abe25e06 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1096,6 +1096,9 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1096 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1096 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1097 PAGE_CACHE_SHIFT; 1097 PAGE_CACHE_SHIFT;
1098 1098
1099 /*
1100 * atomic_sub_return implies a barrier for waitqueue_active
1101 */
1099 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 1102 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1100 5 * 1024 * 1024 && 1103 5 * 1024 * 1024 &&
1101 waitqueue_active(&root->fs_info->async_submit_wait)) 1104 waitqueue_active(&root->fs_info->async_submit_wait))
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 03f8630dbaf2..8077461fc56a 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -79,6 +79,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
79 write_lock(&eb->lock); 79 write_lock(&eb->lock);
80 WARN_ON(atomic_read(&eb->spinning_writers)); 80 WARN_ON(atomic_read(&eb->spinning_writers));
81 atomic_inc(&eb->spinning_writers); 81 atomic_inc(&eb->spinning_writers);
82 /*
83 * atomic_dec_and_test implies a barrier for waitqueue_active
84 */
82 if (atomic_dec_and_test(&eb->blocking_writers) && 85 if (atomic_dec_and_test(&eb->blocking_writers) &&
83 waitqueue_active(&eb->write_lock_wq)) 86 waitqueue_active(&eb->write_lock_wq))
84 wake_up(&eb->write_lock_wq); 87 wake_up(&eb->write_lock_wq);
@@ -86,6 +89,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
86 BUG_ON(atomic_read(&eb->blocking_readers) == 0); 89 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
87 read_lock(&eb->lock); 90 read_lock(&eb->lock);
88 atomic_inc(&eb->spinning_readers); 91 atomic_inc(&eb->spinning_readers);
92 /*
93 * atomic_dec_and_test implies a barrier for waitqueue_active
94 */
89 if (atomic_dec_and_test(&eb->blocking_readers) && 95 if (atomic_dec_and_test(&eb->blocking_readers) &&
90 waitqueue_active(&eb->read_lock_wq)) 96 waitqueue_active(&eb->read_lock_wq))
91 wake_up(&eb->read_lock_wq); 97 wake_up(&eb->read_lock_wq);
@@ -229,6 +235,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
229 } 235 }
230 btrfs_assert_tree_read_locked(eb); 236 btrfs_assert_tree_read_locked(eb);
231 WARN_ON(atomic_read(&eb->blocking_readers) == 0); 237 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
238 /*
239 * atomic_dec_and_test implies a barrier for waitqueue_active
240 */
232 if (atomic_dec_and_test(&eb->blocking_readers) && 241 if (atomic_dec_and_test(&eb->blocking_readers) &&
233 waitqueue_active(&eb->read_lock_wq)) 242 waitqueue_active(&eb->read_lock_wq))
234 wake_up(&eb->read_lock_wq); 243 wake_up(&eb->read_lock_wq);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6fc735869c18..ff3527192409 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -345,6 +345,9 @@ loop_lock:
345 pending = pending->bi_next; 345 pending = pending->bi_next;
346 cur->bi_next = NULL; 346 cur->bi_next = NULL;
347 347
348 /*
349 * atomic_dec_return implies a barrier for waitqueue_active
350 */
348 if (atomic_dec_return(&fs_info->nr_async_bios) < limit && 351 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
349 waitqueue_active(&fs_info->async_submit_wait)) 352 waitqueue_active(&fs_info->async_submit_wait))
350 wake_up(&fs_info->async_submit_wait); 353 wake_up(&fs_info->async_submit_wait);