aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>2015-04-16 15:46:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 09:04:03 -0400
commitead8ecffa3e180202c1096a39f14bbecffb139a1 (patch)
treee62f1c805c06e493bc0810a0951fe9db71352b84 /fs
parent6fb7a61e98ac311a65bc652a12611d9899994f49 (diff)
nilfs2: use set_mask_bits() for operations on buffer state bitmap
nilfs_forget_buffer(), nilfs_clear_dirty_page(), and nilfs_segctor_complete_write() are using a bunch of atomic bit operations against buffer state bitmap. This reduces the number of them by utilizing set_mask_bits() macro. Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/nilfs2/page.c24
-rw-r--r--fs/nilfs2/segment.c14
2 files changed, 18 insertions, 20 deletions
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 700ecbcca55d..45d650addd56 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -89,18 +89,16 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
89void nilfs_forget_buffer(struct buffer_head *bh) 89void nilfs_forget_buffer(struct buffer_head *bh)
90{ 90{
91 struct page *page = bh->b_page; 91 struct page *page = bh->b_page;
92 const unsigned long clear_bits =
93 (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
94 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
95 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
92 96
93 lock_buffer(bh); 97 lock_buffer(bh);
94 clear_buffer_nilfs_volatile(bh); 98 set_mask_bits(&bh->b_state, clear_bits, 0);
95 clear_buffer_nilfs_checked(bh);
96 clear_buffer_nilfs_redirected(bh);
97 clear_buffer_async_write(bh);
98 clear_buffer_dirty(bh);
99 if (nilfs_page_buffers_clean(page)) 99 if (nilfs_page_buffers_clean(page))
100 __nilfs_clear_page_dirty(page); 100 __nilfs_clear_page_dirty(page);
101 101
102 clear_buffer_uptodate(bh);
103 clear_buffer_mapped(bh);
104 bh->b_blocknr = -1; 102 bh->b_blocknr = -1;
105 ClearPageUptodate(page); 103 ClearPageUptodate(page);
106 ClearPageMappedToDisk(page); 104 ClearPageMappedToDisk(page);
@@ -421,6 +419,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
421 419
422 if (page_has_buffers(page)) { 420 if (page_has_buffers(page)) {
423 struct buffer_head *bh, *head; 421 struct buffer_head *bh, *head;
422 const unsigned long clear_bits =
423 (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
424 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
425 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
424 426
425 bh = head = page_buffers(page); 427 bh = head = page_buffers(page);
426 do { 428 do {
@@ -430,13 +432,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
430 "discard block %llu, size %zu", 432 "discard block %llu, size %zu",
431 (u64)bh->b_blocknr, bh->b_size); 433 (u64)bh->b_blocknr, bh->b_size);
432 } 434 }
433 clear_buffer_async_write(bh); 435 set_mask_bits(&bh->b_state, clear_bits, 0);
434 clear_buffer_dirty(bh);
435 clear_buffer_nilfs_volatile(bh);
436 clear_buffer_nilfs_checked(bh);
437 clear_buffer_nilfs_redirected(bh);
438 clear_buffer_uptodate(bh);
439 clear_buffer_mapped(bh);
440 unlock_buffer(bh); 436 unlock_buffer(bh);
441 } while (bh = bh->b_this_page, bh != head); 437 } while (bh = bh->b_this_page, bh != head);
442 } 438 }
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index c9a4e6013445..c6abbad9b8e3 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -24,6 +24,7 @@
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/buffer_head.h> 25#include <linux/buffer_head.h>
26#include <linux/writeback.h> 26#include <linux/writeback.h>
27#include <linux/bitops.h>
27#include <linux/bio.h> 28#include <linux/bio.h>
28#include <linux/completion.h> 29#include <linux/completion.h>
29#include <linux/blkdev.h> 30#include <linux/blkdev.h>
@@ -1785,12 +1786,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1785 */ 1786 */
1786 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1787 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1787 b_assoc_buffers) { 1788 b_assoc_buffers) {
1788 set_buffer_uptodate(bh); 1789 const unsigned long set_bits = (1 << BH_Uptodate);
1789 clear_buffer_dirty(bh); 1790 const unsigned long clear_bits =
1790 clear_buffer_async_write(bh); 1791 (1 << BH_Dirty | 1 << BH_Async_Write |
1791 clear_buffer_delay(bh); 1792 1 << BH_Delay | 1 << BH_NILFS_Volatile |
1792 clear_buffer_nilfs_volatile(bh); 1793 1 << BH_NILFS_Redirected);
1793 clear_buffer_nilfs_redirected(bh); 1794
1795 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1794 if (bh == segbuf->sb_super_root) { 1796 if (bh == segbuf->sb_super_root) {
1795 if (bh->b_page != bd_page) { 1797 if (bh->b_page != bd_page) {
1796 end_page_writeback(bd_page); 1798 end_page_writeback(bd_page);