diff options
author | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2011-04-03 23:53:28 -0400 |
---|---|---|
committer | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2011-05-10 09:21:44 -0400 |
commit | 1cb2d38cb3e59d58e8321a0592e84b5761afb063 (patch) | |
tree | 151e7ffccd72f786a3da511b0f23906961835a22 /fs/nilfs2 | |
parent | eaae0f37d83bed7ccd0c6d0f52de1de44f92aecc (diff) |
nilfs2: get rid of private page allocator
Previously, nilfs was cloning pages for mmapped region to freeze their
data and ensure consistency of checksum during writeback cycles. A
private page allocator was used for this page cloning. But, we no
longer need to do that since clear_page_dirty_for_io function sets up
pte so that vm_ops->page_mkwrite function is called right before the
mmapped pages are modified and nilfs_page_mkwrite function can safely
wait for the pages to be written back to disk.
So, this stops making a copy of mmapped pages during writeback, and
eliminates the private page allocation and deallocation functions from
nilfs.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2')
-rw-r--r-- | fs/nilfs2/file.c | 1 | ||||
-rw-r--r-- | fs/nilfs2/page.c | 53 | ||||
-rw-r--r-- | fs/nilfs2/page.h | 4 | ||||
-rw-r--r-- | fs/nilfs2/segbuf.c | 12 | ||||
-rw-r--r-- | fs/nilfs2/segment.c | 153 | ||||
-rw-r--r-- | fs/nilfs2/segment.h | 2 |
6 files changed, 18 insertions, 207 deletions
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 397e73258631..d7eeca62febd 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c | |||
@@ -111,7 +111,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
111 | nilfs_transaction_commit(inode->i_sb); | 111 | nilfs_transaction_commit(inode->i_sb); |
112 | 112 | ||
113 | mapped: | 113 | mapped: |
114 | SetPageChecked(page); | ||
115 | wait_on_page_writeback(page); | 114 | wait_on_page_writeback(page); |
116 | return VM_FAULT_LOCKED; | 115 | return VM_FAULT_LOCKED; |
117 | } | 116 | } |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 1168059c7efd..b3b988c2018f 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -37,8 +37,7 @@ | |||
37 | 37 | ||
38 | #define NILFS_BUFFER_INHERENT_BITS \ | 38 | #define NILFS_BUFFER_INHERENT_BITS \ |
39 | ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ | 39 | ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ |
40 | (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated) | \ | 40 | (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked)) |
41 | (1UL << BH_NILFS_Checked)) | ||
42 | 41 | ||
43 | static struct buffer_head * | 42 | static struct buffer_head * |
44 | __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, | 43 | __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, |
@@ -217,56 +216,6 @@ void nilfs_page_bug(struct page *page) | |||
217 | } | 216 | } |
218 | 217 | ||
219 | /** | 218 | /** |
220 | * nilfs_alloc_private_page - allocate a private page with buffer heads | ||
221 | * | ||
222 | * Return Value: On success, a pointer to the allocated page is returned. | ||
223 | * On error, NULL is returned. | ||
224 | */ | ||
225 | struct page *nilfs_alloc_private_page(struct block_device *bdev, int size, | ||
226 | unsigned long state) | ||
227 | { | ||
228 | struct buffer_head *bh, *head, *tail; | ||
229 | struct page *page; | ||
230 | |||
231 | page = alloc_page(GFP_NOFS); /* page_count of the returned page is 1 */ | ||
232 | if (unlikely(!page)) | ||
233 | return NULL; | ||
234 | |||
235 | lock_page(page); | ||
236 | head = alloc_page_buffers(page, size, 0); | ||
237 | if (unlikely(!head)) { | ||
238 | unlock_page(page); | ||
239 | __free_page(page); | ||
240 | return NULL; | ||
241 | } | ||
242 | |||
243 | bh = head; | ||
244 | do { | ||
245 | bh->b_state = (1UL << BH_NILFS_Allocated) | state; | ||
246 | tail = bh; | ||
247 | bh->b_bdev = bdev; | ||
248 | bh = bh->b_this_page; | ||
249 | } while (bh); | ||
250 | |||
251 | tail->b_this_page = head; | ||
252 | attach_page_buffers(page, head); | ||
253 | |||
254 | return page; | ||
255 | } | ||
256 | |||
257 | void nilfs_free_private_page(struct page *page) | ||
258 | { | ||
259 | BUG_ON(!PageLocked(page)); | ||
260 | BUG_ON(page->mapping); | ||
261 | |||
262 | if (page_has_buffers(page) && !try_to_free_buffers(page)) | ||
263 | NILFS_PAGE_BUG(page, "failed to free page"); | ||
264 | |||
265 | unlock_page(page); | ||
266 | __free_page(page); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * nilfs_copy_page -- copy the page with buffers | 219 | * nilfs_copy_page -- copy the page with buffers |
271 | * @dst: destination page | 220 | * @dst: destination page |
272 | * @src: source page | 221 | * @src: source page |
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index f06b79ad7493..f827afabd548 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h | |||
@@ -38,7 +38,6 @@ enum { | |||
38 | BH_NILFS_Redirected, | 38 | BH_NILFS_Redirected, |
39 | }; | 39 | }; |
40 | 40 | ||
41 | BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */ | ||
42 | BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */ | 41 | BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */ |
43 | BUFFER_FNS(NILFS_Volatile, nilfs_volatile) | 42 | BUFFER_FNS(NILFS_Volatile, nilfs_volatile) |
44 | BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ | 43 | BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ |
@@ -54,9 +53,6 @@ void nilfs_forget_buffer(struct buffer_head *); | |||
54 | void nilfs_copy_buffer(struct buffer_head *, struct buffer_head *); | 53 | void nilfs_copy_buffer(struct buffer_head *, struct buffer_head *); |
55 | int nilfs_page_buffers_clean(struct page *); | 54 | int nilfs_page_buffers_clean(struct page *); |
56 | void nilfs_page_bug(struct page *); | 55 | void nilfs_page_bug(struct page *); |
57 | struct page *nilfs_alloc_private_page(struct block_device *, int, | ||
58 | unsigned long); | ||
59 | void nilfs_free_private_page(struct page *); | ||
60 | 56 | ||
61 | int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); | 57 | int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); |
62 | void nilfs_copy_back_pages(struct address_space *, struct address_space *); | 58 | void nilfs_copy_back_pages(struct address_space *, struct address_space *); |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 2853ff20f85a..410ec2b1af4f 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -254,18 +254,6 @@ static void nilfs_release_buffers(struct list_head *list) | |||
254 | 254 | ||
255 | list_for_each_entry_safe(bh, n, list, b_assoc_buffers) { | 255 | list_for_each_entry_safe(bh, n, list, b_assoc_buffers) { |
256 | list_del_init(&bh->b_assoc_buffers); | 256 | list_del_init(&bh->b_assoc_buffers); |
257 | if (buffer_nilfs_allocated(bh)) { | ||
258 | struct page *clone_page = bh->b_page; | ||
259 | |||
260 | /* remove clone page */ | ||
261 | brelse(bh); | ||
262 | page_cache_release(clone_page); /* for each bh */ | ||
263 | if (page_count(clone_page) <= 2) { | ||
264 | lock_page(clone_page); | ||
265 | nilfs_free_private_page(clone_page); | ||
266 | } | ||
267 | continue; | ||
268 | } | ||
269 | brelse(bh); | 257 | brelse(bh); |
270 | } | 258 | } |
271 | } | 259 | } |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 5deeadda9083..abbfab974700 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1556,83 +1556,24 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) | |||
1556 | return 0; | 1556 | return 0; |
1557 | } | 1557 | } |
1558 | 1558 | ||
1559 | static int | 1559 | static void nilfs_begin_page_io(struct page *page) |
1560 | nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out) | ||
1561 | { | ||
1562 | struct page *clone_page; | ||
1563 | struct buffer_head *bh, *head, *bh2; | ||
1564 | void *kaddr; | ||
1565 | |||
1566 | bh = head = page_buffers(page); | ||
1567 | |||
1568 | clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0); | ||
1569 | if (unlikely(!clone_page)) | ||
1570 | return -ENOMEM; | ||
1571 | |||
1572 | bh2 = page_buffers(clone_page); | ||
1573 | kaddr = kmap_atomic(page, KM_USER0); | ||
1574 | do { | ||
1575 | if (list_empty(&bh->b_assoc_buffers)) | ||
1576 | continue; | ||
1577 | get_bh(bh2); | ||
1578 | page_cache_get(clone_page); /* for each bh */ | ||
1579 | memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size); | ||
1580 | bh2->b_blocknr = bh->b_blocknr; | ||
1581 | list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers); | ||
1582 | list_add_tail(&bh->b_assoc_buffers, out); | ||
1583 | } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head); | ||
1584 | kunmap_atomic(kaddr, KM_USER0); | ||
1585 | |||
1586 | if (!TestSetPageWriteback(clone_page)) | ||
1587 | account_page_writeback(clone_page); | ||
1588 | unlock_page(clone_page); | ||
1589 | |||
1590 | return 0; | ||
1591 | } | ||
1592 | |||
1593 | static int nilfs_test_page_to_be_frozen(struct page *page) | ||
1594 | { | ||
1595 | struct address_space *mapping = page->mapping; | ||
1596 | |||
1597 | if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode)) | ||
1598 | return 0; | ||
1599 | |||
1600 | if (page_mapped(page)) { | ||
1601 | ClearPageChecked(page); | ||
1602 | return 1; | ||
1603 | } | ||
1604 | return PageChecked(page); | ||
1605 | } | ||
1606 | |||
1607 | static int nilfs_begin_page_io(struct page *page, struct list_head *out) | ||
1608 | { | 1560 | { |
1609 | if (!page || PageWriteback(page)) | 1561 | if (!page || PageWriteback(page)) |
1610 | /* For split b-tree node pages, this function may be called | 1562 | /* For split b-tree node pages, this function may be called |
1611 | twice. We ignore the 2nd or later calls by this check. */ | 1563 | twice. We ignore the 2nd or later calls by this check. */ |
1612 | return 0; | 1564 | return; |
1613 | 1565 | ||
1614 | lock_page(page); | 1566 | lock_page(page); |
1615 | clear_page_dirty_for_io(page); | 1567 | clear_page_dirty_for_io(page); |
1616 | set_page_writeback(page); | 1568 | set_page_writeback(page); |
1617 | unlock_page(page); | 1569 | unlock_page(page); |
1618 | |||
1619 | if (nilfs_test_page_to_be_frozen(page)) { | ||
1620 | int err = nilfs_copy_replace_page_buffers(page, out); | ||
1621 | if (unlikely(err)) | ||
1622 | return err; | ||
1623 | } | ||
1624 | return 0; | ||
1625 | } | 1570 | } |
1626 | 1571 | ||
1627 | static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, | 1572 | static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) |
1628 | struct page **failed_page) | ||
1629 | { | 1573 | { |
1630 | struct nilfs_segment_buffer *segbuf; | 1574 | struct nilfs_segment_buffer *segbuf; |
1631 | struct page *bd_page = NULL, *fs_page = NULL; | 1575 | struct page *bd_page = NULL, *fs_page = NULL; |
1632 | struct list_head *list = &sci->sc_copied_buffers; | ||
1633 | int err; | ||
1634 | 1576 | ||
1635 | *failed_page = NULL; | ||
1636 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | 1577 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { |
1637 | struct buffer_head *bh; | 1578 | struct buffer_head *bh; |
1638 | 1579 | ||
@@ -1662,11 +1603,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, | |||
1662 | break; | 1603 | break; |
1663 | } | 1604 | } |
1664 | if (bh->b_page != fs_page) { | 1605 | if (bh->b_page != fs_page) { |
1665 | err = nilfs_begin_page_io(fs_page, list); | 1606 | nilfs_begin_page_io(fs_page); |
1666 | if (unlikely(err)) { | ||
1667 | *failed_page = fs_page; | ||
1668 | goto out; | ||
1669 | } | ||
1670 | fs_page = bh->b_page; | 1607 | fs_page = bh->b_page; |
1671 | } | 1608 | } |
1672 | } | 1609 | } |
@@ -1677,11 +1614,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, | |||
1677 | set_page_writeback(bd_page); | 1614 | set_page_writeback(bd_page); |
1678 | unlock_page(bd_page); | 1615 | unlock_page(bd_page); |
1679 | } | 1616 | } |
1680 | err = nilfs_begin_page_io(fs_page, list); | 1617 | nilfs_begin_page_io(fs_page); |
1681 | if (unlikely(err)) | ||
1682 | *failed_page = fs_page; | ||
1683 | out: | ||
1684 | return err; | ||
1685 | } | 1618 | } |
1686 | 1619 | ||
1687 | static int nilfs_segctor_write(struct nilfs_sc_info *sci, | 1620 | static int nilfs_segctor_write(struct nilfs_sc_info *sci, |
@@ -1694,24 +1627,6 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci, | |||
1694 | return ret; | 1627 | return ret; |
1695 | } | 1628 | } |
1696 | 1629 | ||
1697 | static void __nilfs_end_page_io(struct page *page, int err) | ||
1698 | { | ||
1699 | if (!err) { | ||
1700 | if (!nilfs_page_buffers_clean(page)) | ||
1701 | __set_page_dirty_nobuffers(page); | ||
1702 | ClearPageError(page); | ||
1703 | } else { | ||
1704 | __set_page_dirty_nobuffers(page); | ||
1705 | SetPageError(page); | ||
1706 | } | ||
1707 | |||
1708 | if (buffer_nilfs_allocated(page_buffers(page))) { | ||
1709 | if (TestClearPageWriteback(page)) | ||
1710 | dec_zone_page_state(page, NR_WRITEBACK); | ||
1711 | } else | ||
1712 | end_page_writeback(page); | ||
1713 | } | ||
1714 | |||
1715 | static void nilfs_end_page_io(struct page *page, int err) | 1630 | static void nilfs_end_page_io(struct page *page, int err) |
1716 | { | 1631 | { |
1717 | if (!page) | 1632 | if (!page) |
@@ -1738,40 +1653,19 @@ static void nilfs_end_page_io(struct page *page, int err) | |||
1738 | return; | 1653 | return; |
1739 | } | 1654 | } |
1740 | 1655 | ||
1741 | __nilfs_end_page_io(page, err); | 1656 | if (!err) { |
1742 | } | 1657 | if (!nilfs_page_buffers_clean(page)) |
1743 | 1658 | __set_page_dirty_nobuffers(page); | |
1744 | static void nilfs_clear_copied_buffers(struct list_head *list, int err) | 1659 | ClearPageError(page); |
1745 | { | 1660 | } else { |
1746 | struct buffer_head *bh, *head; | 1661 | __set_page_dirty_nobuffers(page); |
1747 | struct page *page; | 1662 | SetPageError(page); |
1748 | |||
1749 | while (!list_empty(list)) { | ||
1750 | bh = list_entry(list->next, struct buffer_head, | ||
1751 | b_assoc_buffers); | ||
1752 | page = bh->b_page; | ||
1753 | page_cache_get(page); | ||
1754 | head = bh = page_buffers(page); | ||
1755 | do { | ||
1756 | if (!list_empty(&bh->b_assoc_buffers)) { | ||
1757 | list_del_init(&bh->b_assoc_buffers); | ||
1758 | if (!err) { | ||
1759 | set_buffer_uptodate(bh); | ||
1760 | clear_buffer_dirty(bh); | ||
1761 | clear_buffer_delay(bh); | ||
1762 | clear_buffer_nilfs_volatile(bh); | ||
1763 | } | ||
1764 | brelse(bh); /* for b_assoc_buffers */ | ||
1765 | } | ||
1766 | } while ((bh = bh->b_this_page) != head); | ||
1767 | |||
1768 | __nilfs_end_page_io(page, err); | ||
1769 | page_cache_release(page); | ||
1770 | } | 1663 | } |
1664 | |||
1665 | end_page_writeback(page); | ||
1771 | } | 1666 | } |
1772 | 1667 | ||
1773 | static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page, | 1668 | static void nilfs_abort_logs(struct list_head *logs, int err) |
1774 | int err) | ||
1775 | { | 1669 | { |
1776 | struct nilfs_segment_buffer *segbuf; | 1670 | struct nilfs_segment_buffer *segbuf; |
1777 | struct page *bd_page = NULL, *fs_page = NULL; | 1671 | struct page *bd_page = NULL, *fs_page = NULL; |
@@ -1801,8 +1695,6 @@ static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page, | |||
1801 | } | 1695 | } |
1802 | if (bh->b_page != fs_page) { | 1696 | if (bh->b_page != fs_page) { |
1803 | nilfs_end_page_io(fs_page, err); | 1697 | nilfs_end_page_io(fs_page, err); |
1804 | if (fs_page && fs_page == failed_page) | ||
1805 | return; | ||
1806 | fs_page = bh->b_page; | 1698 | fs_page = bh->b_page; |
1807 | } | 1699 | } |
1808 | } | 1700 | } |
@@ -1821,12 +1713,11 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, | |||
1821 | 1713 | ||
1822 | list_splice_tail_init(&sci->sc_write_logs, &logs); | 1714 | list_splice_tail_init(&sci->sc_write_logs, &logs); |
1823 | ret = nilfs_wait_on_logs(&logs); | 1715 | ret = nilfs_wait_on_logs(&logs); |
1824 | nilfs_abort_logs(&logs, NULL, ret ? : err); | 1716 | nilfs_abort_logs(&logs, ret ? : err); |
1825 | 1717 | ||
1826 | list_splice_tail_init(&sci->sc_segbufs, &logs); | 1718 | list_splice_tail_init(&sci->sc_segbufs, &logs); |
1827 | nilfs_cancel_segusage(&logs, nilfs->ns_sufile); | 1719 | nilfs_cancel_segusage(&logs, nilfs->ns_sufile); |
1828 | nilfs_free_incomplete_logs(&logs, nilfs); | 1720 | nilfs_free_incomplete_logs(&logs, nilfs); |
1829 | nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err); | ||
1830 | 1721 | ||
1831 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { | 1722 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1832 | ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, | 1723 | ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, |
@@ -1920,8 +1811,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) | |||
1920 | 1811 | ||
1921 | nilfs_end_page_io(fs_page, 0); | 1812 | nilfs_end_page_io(fs_page, 0); |
1922 | 1813 | ||
1923 | nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0); | ||
1924 | |||
1925 | nilfs_drop_collected_inodes(&sci->sc_dirty_files); | 1814 | nilfs_drop_collected_inodes(&sci->sc_dirty_files); |
1926 | 1815 | ||
1927 | if (nilfs_doing_gc()) | 1816 | if (nilfs_doing_gc()) |
@@ -2024,7 +1913,6 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
2024 | static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) | 1913 | static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) |
2025 | { | 1914 | { |
2026 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; | 1915 | struct the_nilfs *nilfs = sci->sc_super->s_fs_info; |
2027 | struct page *failed_page; | ||
2028 | int err; | 1916 | int err; |
2029 | 1917 | ||
2030 | sci->sc_stage.scnt = NILFS_ST_INIT; | 1918 | sci->sc_stage.scnt = NILFS_ST_INIT; |
@@ -2079,11 +1967,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) | |||
2079 | nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); | 1967 | nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); |
2080 | 1968 | ||
2081 | /* Write partial segments */ | 1969 | /* Write partial segments */ |
2082 | err = nilfs_segctor_prepare_write(sci, &failed_page); | 1970 | nilfs_segctor_prepare_write(sci); |
2083 | if (err) { | ||
2084 | nilfs_abort_logs(&sci->sc_segbufs, failed_page, err); | ||
2085 | goto failed_to_write; | ||
2086 | } | ||
2087 | 1971 | ||
2088 | nilfs_add_checksums_on_logs(&sci->sc_segbufs, | 1972 | nilfs_add_checksums_on_logs(&sci->sc_segbufs, |
2089 | nilfs->ns_crc_seed); | 1973 | nilfs->ns_crc_seed); |
@@ -2685,7 +2569,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, | |||
2685 | INIT_LIST_HEAD(&sci->sc_segbufs); | 2569 | INIT_LIST_HEAD(&sci->sc_segbufs); |
2686 | INIT_LIST_HEAD(&sci->sc_write_logs); | 2570 | INIT_LIST_HEAD(&sci->sc_write_logs); |
2687 | INIT_LIST_HEAD(&sci->sc_gc_inodes); | 2571 | INIT_LIST_HEAD(&sci->sc_gc_inodes); |
2688 | INIT_LIST_HEAD(&sci->sc_copied_buffers); | ||
2689 | init_timer(&sci->sc_timer); | 2572 | init_timer(&sci->sc_timer); |
2690 | 2573 | ||
2691 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; | 2574 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; |
@@ -2739,8 +2622,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) | |||
2739 | if (flag || !nilfs_segctor_confirm(sci)) | 2622 | if (flag || !nilfs_segctor_confirm(sci)) |
2740 | nilfs_segctor_write_out(sci); | 2623 | nilfs_segctor_write_out(sci); |
2741 | 2624 | ||
2742 | WARN_ON(!list_empty(&sci->sc_copied_buffers)); | ||
2743 | |||
2744 | if (!list_empty(&sci->sc_dirty_files)) { | 2625 | if (!list_empty(&sci->sc_dirty_files)) { |
2745 | nilfs_warning(sci->sc_super, __func__, | 2626 | nilfs_warning(sci->sc_super, __func__, |
2746 | "dirty file(s) after the final construction\n"); | 2627 | "dirty file(s) after the final construction\n"); |
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 6c02a86745fb..38a1d0013314 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h | |||
@@ -92,7 +92,6 @@ struct nilfs_segsum_pointer { | |||
92 | * @sc_nblk_inc: Block count of current generation | 92 | * @sc_nblk_inc: Block count of current generation |
93 | * @sc_dirty_files: List of files to be written | 93 | * @sc_dirty_files: List of files to be written |
94 | * @sc_gc_inodes: List of GC inodes having blocks to be written | 94 | * @sc_gc_inodes: List of GC inodes having blocks to be written |
95 | * @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data | ||
96 | * @sc_freesegs: array of segment numbers to be freed | 95 | * @sc_freesegs: array of segment numbers to be freed |
97 | * @sc_nfreesegs: number of segments on @sc_freesegs | 96 | * @sc_nfreesegs: number of segments on @sc_freesegs |
98 | * @sc_dsync_inode: inode whose data pages are written for a sync operation | 97 | * @sc_dsync_inode: inode whose data pages are written for a sync operation |
@@ -136,7 +135,6 @@ struct nilfs_sc_info { | |||
136 | 135 | ||
137 | struct list_head sc_dirty_files; | 136 | struct list_head sc_dirty_files; |
138 | struct list_head sc_gc_inodes; | 137 | struct list_head sc_gc_inodes; |
139 | struct list_head sc_copied_buffers; | ||
140 | 138 | ||
141 | __u64 *sc_freesegs; | 139 | __u64 *sc_freesegs; |
142 | size_t sc_nfreesegs; | 140 | size_t sc_nfreesegs; |