aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 11:59:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 11:59:41 -0400
commita4c20b9a574b9720acf6c647eaff5e7e1e688086 (patch)
treed25c1d51c6f55de593028fde1276ea9976214014 /fs/btrfs/disk-io.c
parent9b51f04424e17051a89ab32d892ca66b2a104825 (diff)
parente3efe3db932b55ed34ba32862f568abae32046d0 (diff)
Merge branch 'for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: "These are the percpu changes for the v4.13-rc1 merge window. There are a couple visibility related changes - tracepoints and allocator stats through debugfs, along with __ro_after_init markings and a cosmetic rename in percpu_counter. Please note that the simple O(#elements_in_the_chunk) area allocator used by percpu allocator is again showing scalability issues, primarily with bpf allocating and freeing large number of counters. Dennis is working on the replacement allocator and the percpu allocator will be seeing increased churns in the coming cycles" * 'for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: fix static checker warnings in pcpu_destroy_chunk percpu: fix early calls for spinlock in pcpu_stats percpu: resolve err may not be initialized in pcpu_alloc percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch percpu: add tracepoint support for percpu memory percpu: expose statistics about percpu memory via debugfs percpu: migrate percpu data structures to internal header percpu: add missing lockdep_assert_held to func pcpu_free_area mark most percpu globals as __ro_after_init
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5abcbdc743fa..086dcbadce09 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1256,9 +1256,9 @@ void clean_tree_block(struct btrfs_fs_info *fs_info,
1256 btrfs_assert_tree_locked(buf); 1256 btrfs_assert_tree_locked(buf);
1257 1257
1258 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1258 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1259 __percpu_counter_add(&fs_info->dirty_metadata_bytes, 1259 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1260 -buf->len, 1260 -buf->len,
1261 fs_info->dirty_metadata_batch); 1261 fs_info->dirty_metadata_batch);
1262 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1262 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1263 btrfs_set_lock_blocking(buf); 1263 btrfs_set_lock_blocking(buf);
1264 clear_extent_buffer_dirty(buf); 1264 clear_extent_buffer_dirty(buf);
@@ -4047,9 +4047,9 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4047 buf->start, transid, fs_info->generation); 4047 buf->start, transid, fs_info->generation);
4048 was_dirty = set_extent_buffer_dirty(buf); 4048 was_dirty = set_extent_buffer_dirty(buf);
4049 if (!was_dirty) 4049 if (!was_dirty)
4050 __percpu_counter_add(&fs_info->dirty_metadata_bytes, 4050 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4051 buf->len, 4051 buf->len,
4052 fs_info->dirty_metadata_batch); 4052 fs_info->dirty_metadata_batch);
4053#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4053#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4054 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { 4054 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
4055 btrfs_print_leaf(fs_info, buf); 4055 btrfs_print_leaf(fs_info, buf);