diff options
Diffstat (limited to 'fs')
39 files changed, 199 insertions, 263 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c index 106e43db1115..11dd0526b96b 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -308,7 +308,7 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error, | |||
308 | _enter("{%x:%u},%lx-%lx", | 308 | _enter("{%x:%u},%lx-%lx", |
309 | vnode->fid.vid, vnode->fid.vnode, first, last); | 309 | vnode->fid.vid, vnode->fid.vnode, first, last); |
310 | 310 | ||
311 | pagevec_init(&pv, 0); | 311 | pagevec_init(&pv); |
312 | 312 | ||
313 | do { | 313 | do { |
314 | _debug("kill %lx-%lx", first, last); | 314 | _debug("kill %lx-%lx", first, last); |
@@ -497,20 +497,13 @@ static int afs_writepages_region(struct address_space *mapping, | |||
497 | _enter(",,%lx,%lx,", index, end); | 497 | _enter(",,%lx,%lx,", index, end); |
498 | 498 | ||
499 | do { | 499 | do { |
500 | n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, | 500 | n = find_get_pages_range_tag(mapping, &index, end, |
501 | 1, &page); | 501 | PAGECACHE_TAG_DIRTY, 1, &page); |
502 | if (!n) | 502 | if (!n) |
503 | break; | 503 | break; |
504 | 504 | ||
505 | _debug("wback %lx", page->index); | 505 | _debug("wback %lx", page->index); |
506 | 506 | ||
507 | if (page->index > end) { | ||
508 | *_next = index; | ||
509 | put_page(page); | ||
510 | _leave(" = 0 [%lx]", *_next); | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | /* at this point we hold neither mapping->tree_lock nor lock on | 507 | /* at this point we hold neither mapping->tree_lock nor lock on |
515 | * the page itself: the page may be truncated or invalidated | 508 | * the page itself: the page may be truncated or invalidated |
516 | * (changing page->mapping to NULL), or even swizzled back from | 509 | * (changing page->mapping to NULL), or even swizzled back from |
@@ -609,7 +602,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) | |||
609 | 602 | ||
610 | ASSERT(wb != NULL); | 603 | ASSERT(wb != NULL); |
611 | 604 | ||
612 | pagevec_init(&pv, 0); | 605 | pagevec_init(&pv); |
613 | 606 | ||
614 | do { | 607 | do { |
615 | _debug("done %lx-%lx", first, last); | 608 | _debug("done %lx-%lx", first, last); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index adbbc017191c..16045ea86fc1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -3797,7 +3797,7 @@ int btree_write_cache_pages(struct address_space *mapping, | |||
3797 | int scanned = 0; | 3797 | int scanned = 0; |
3798 | int tag; | 3798 | int tag; |
3799 | 3799 | ||
3800 | pagevec_init(&pvec, 0); | 3800 | pagevec_init(&pvec); |
3801 | if (wbc->range_cyclic) { | 3801 | if (wbc->range_cyclic) { |
3802 | index = mapping->writeback_index; /* Start from prev offset */ | 3802 | index = mapping->writeback_index; /* Start from prev offset */ |
3803 | end = -1; | 3803 | end = -1; |
@@ -3814,8 +3814,8 @@ retry: | |||
3814 | if (wbc->sync_mode == WB_SYNC_ALL) | 3814 | if (wbc->sync_mode == WB_SYNC_ALL) |
3815 | tag_pages_for_writeback(mapping, index, end); | 3815 | tag_pages_for_writeback(mapping, index, end); |
3816 | while (!done && !nr_to_write_done && (index <= end) && | 3816 | while (!done && !nr_to_write_done && (index <= end) && |
3817 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 3817 | (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, |
3818 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | 3818 | tag))) { |
3819 | unsigned i; | 3819 | unsigned i; |
3820 | 3820 | ||
3821 | scanned = 1; | 3821 | scanned = 1; |
@@ -3825,11 +3825,6 @@ retry: | |||
3825 | if (!PagePrivate(page)) | 3825 | if (!PagePrivate(page)) |
3826 | continue; | 3826 | continue; |
3827 | 3827 | ||
3828 | if (!wbc->range_cyclic && page->index > end) { | ||
3829 | done = 1; | ||
3830 | break; | ||
3831 | } | ||
3832 | |||
3833 | spin_lock(&mapping->private_lock); | 3828 | spin_lock(&mapping->private_lock); |
3834 | if (!PagePrivate(page)) { | 3829 | if (!PagePrivate(page)) { |
3835 | spin_unlock(&mapping->private_lock); | 3830 | spin_unlock(&mapping->private_lock); |
@@ -3941,7 +3936,7 @@ static int extent_write_cache_pages(struct address_space *mapping, | |||
3941 | if (!igrab(inode)) | 3936 | if (!igrab(inode)) |
3942 | return 0; | 3937 | return 0; |
3943 | 3938 | ||
3944 | pagevec_init(&pvec, 0); | 3939 | pagevec_init(&pvec); |
3945 | if (wbc->range_cyclic) { | 3940 | if (wbc->range_cyclic) { |
3946 | index = mapping->writeback_index; /* Start from prev offset */ | 3941 | index = mapping->writeback_index; /* Start from prev offset */ |
3947 | end = -1; | 3942 | end = -1; |
@@ -3961,8 +3956,8 @@ retry: | |||
3961 | tag_pages_for_writeback(mapping, index, end); | 3956 | tag_pages_for_writeback(mapping, index, end); |
3962 | done_index = index; | 3957 | done_index = index; |
3963 | while (!done && !nr_to_write_done && (index <= end) && | 3958 | while (!done && !nr_to_write_done && (index <= end) && |
3964 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 3959 | (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, |
3965 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | 3960 | &index, end, tag))) { |
3966 | unsigned i; | 3961 | unsigned i; |
3967 | 3962 | ||
3968 | scanned = 1; | 3963 | scanned = 1; |
@@ -3987,12 +3982,6 @@ retry: | |||
3987 | continue; | 3982 | continue; |
3988 | } | 3983 | } |
3989 | 3984 | ||
3990 | if (!wbc->range_cyclic && page->index > end) { | ||
3991 | done = 1; | ||
3992 | unlock_page(page); | ||
3993 | continue; | ||
3994 | } | ||
3995 | |||
3996 | if (wbc->sync_mode != WB_SYNC_NONE) { | 3985 | if (wbc->sync_mode != WB_SYNC_NONE) { |
3997 | if (PageWriteback(page)) | 3986 | if (PageWriteback(page)) |
3998 | flush_fn(data); | 3987 | flush_fn(data); |
diff --git a/fs/buffer.c b/fs/buffer.c index 1c18a22a6013..0736a6a2e2f0 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1592,7 +1592,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) | |||
1592 | struct buffer_head *head; | 1592 | struct buffer_head *head; |
1593 | 1593 | ||
1594 | end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); | 1594 | end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); |
1595 | pagevec_init(&pvec, 0); | 1595 | pagevec_init(&pvec); |
1596 | while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { | 1596 | while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { |
1597 | count = pagevec_count(&pvec); | 1597 | count = pagevec_count(&pvec); |
1598 | for (i = 0; i < count; i++) { | 1598 | for (i = 0; i < count; i++) { |
@@ -3514,7 +3514,7 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, | |||
3514 | if (length <= 0) | 3514 | if (length <= 0) |
3515 | return -ENOENT; | 3515 | return -ENOENT; |
3516 | 3516 | ||
3517 | pagevec_init(&pvec, 0); | 3517 | pagevec_init(&pvec); |
3518 | 3518 | ||
3519 | do { | 3519 | do { |
3520 | unsigned nr_pages, i; | 3520 | unsigned nr_pages, i; |
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 18d7aa61ef0f..883bc7bb12c5 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -256,8 +256,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, | |||
256 | goto backing_page_already_present; | 256 | goto backing_page_already_present; |
257 | 257 | ||
258 | if (!newpage) { | 258 | if (!newpage) { |
259 | newpage = __page_cache_alloc(cachefiles_gfp | | 259 | newpage = __page_cache_alloc(cachefiles_gfp); |
260 | __GFP_COLD); | ||
261 | if (!newpage) | 260 | if (!newpage) |
262 | goto nomem_monitor; | 261 | goto nomem_monitor; |
263 | } | 262 | } |
@@ -493,8 +492,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, | |||
493 | goto backing_page_already_present; | 492 | goto backing_page_already_present; |
494 | 493 | ||
495 | if (!newpage) { | 494 | if (!newpage) { |
496 | newpage = __page_cache_alloc(cachefiles_gfp | | 495 | newpage = __page_cache_alloc(cachefiles_gfp); |
497 | __GFP_COLD); | ||
498 | if (!newpage) | 496 | if (!newpage) |
499 | goto nomem; | 497 | goto nomem; |
500 | } | 498 | } |
@@ -710,7 +708,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, | |||
710 | /* calculate the shift required to use bmap */ | 708 | /* calculate the shift required to use bmap */ |
711 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | 709 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; |
712 | 710 | ||
713 | pagevec_init(&pagevec, 0); | 711 | pagevec_init(&pagevec); |
714 | 712 | ||
715 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; | 713 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
716 | op->op.flags |= FSCACHE_OP_ASYNC; | 714 | op->op.flags |= FSCACHE_OP_ASYNC; |
@@ -844,7 +842,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op, | |||
844 | 842 | ||
845 | ret = cachefiles_has_space(cache, 0, *nr_pages); | 843 | ret = cachefiles_has_space(cache, 0, *nr_pages); |
846 | if (ret == 0) { | 844 | if (ret == 0) { |
847 | pagevec_init(&pagevec, 0); | 845 | pagevec_init(&pagevec); |
848 | 846 | ||
849 | list_for_each_entry(page, pages, lru) { | 847 | list_for_each_entry(page, pages, lru) { |
850 | if (pagevec_add(&pagevec, page) == 0) | 848 | if (pagevec_add(&pagevec, page) == 0) |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 4d622654bfbc..dbf07051aacd 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -680,7 +680,7 @@ static void ceph_release_pages(struct page **pages, int num) | |||
680 | struct pagevec pvec; | 680 | struct pagevec pvec; |
681 | int i; | 681 | int i; |
682 | 682 | ||
683 | pagevec_init(&pvec, 0); | 683 | pagevec_init(&pvec); |
684 | for (i = 0; i < num; i++) { | 684 | for (i = 0; i < num; i++) { |
685 | if (pagevec_add(&pvec, pages[i]) == 0) | 685 | if (pagevec_add(&pvec, pages[i]) == 0) |
686 | pagevec_release(&pvec); | 686 | pagevec_release(&pvec); |
@@ -811,7 +811,7 @@ static int ceph_writepages_start(struct address_space *mapping, | |||
811 | if (fsc->mount_options->wsize < wsize) | 811 | if (fsc->mount_options->wsize < wsize) |
812 | wsize = fsc->mount_options->wsize; | 812 | wsize = fsc->mount_options->wsize; |
813 | 813 | ||
814 | pagevec_init(&pvec, 0); | 814 | pagevec_init(&pvec); |
815 | 815 | ||
816 | start_index = wbc->range_cyclic ? mapping->writeback_index : 0; | 816 | start_index = wbc->range_cyclic ? mapping->writeback_index : 0; |
817 | index = start_index; | 817 | index = start_index; |
@@ -870,15 +870,10 @@ retry: | |||
870 | max_pages = wsize >> PAGE_SHIFT; | 870 | max_pages = wsize >> PAGE_SHIFT; |
871 | 871 | ||
872 | get_more_pages: | 872 | get_more_pages: |
873 | pvec_pages = min_t(unsigned, PAGEVEC_SIZE, | 873 | pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, |
874 | max_pages - locked_pages); | 874 | end, PAGECACHE_TAG_DIRTY, |
875 | if (end - index < (u64)(pvec_pages - 1)) | 875 | max_pages - locked_pages); |
876 | pvec_pages = (unsigned)(end - index) + 1; | 876 | dout("pagevec_lookup_range_tag got %d\n", pvec_pages); |
877 | |||
878 | pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, | ||
879 | PAGECACHE_TAG_DIRTY, | ||
880 | pvec_pages); | ||
881 | dout("pagevec_lookup_tag got %d\n", pvec_pages); | ||
882 | if (!pvec_pages && !locked_pages) | 877 | if (!pvec_pages && !locked_pages) |
883 | break; | 878 | break; |
884 | for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { | 879 | for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { |
@@ -896,16 +891,6 @@ get_more_pages: | |||
896 | unlock_page(page); | 891 | unlock_page(page); |
897 | continue; | 892 | continue; |
898 | } | 893 | } |
899 | if (page->index > end) { | ||
900 | dout("end of range %p\n", page); | ||
901 | /* can't be range_cyclic (1st pass) because | ||
902 | * end == -1 in that case. */ | ||
903 | stop = true; | ||
904 | if (ceph_wbc.head_snapc) | ||
905 | done = true; | ||
906 | unlock_page(page); | ||
907 | break; | ||
908 | } | ||
909 | if (strip_unit_end && (page->index > strip_unit_end)) { | 894 | if (strip_unit_end && (page->index > strip_unit_end)) { |
910 | dout("end of strip unit %p\n", page); | 895 | dout("end of strip unit %p\n", page); |
911 | unlock_page(page); | 896 | unlock_page(page); |
@@ -1177,8 +1162,7 @@ release_pvec_pages: | |||
1177 | index = 0; | 1162 | index = 0; |
1178 | while ((index <= end) && | 1163 | while ((index <= end) && |
1179 | (nr = pagevec_lookup_tag(&pvec, mapping, &index, | 1164 | (nr = pagevec_lookup_tag(&pvec, mapping, &index, |
1180 | PAGECACHE_TAG_WRITEBACK, | 1165 | PAGECACHE_TAG_WRITEBACK))) { |
1181 | PAGEVEC_SIZE))) { | ||
1182 | for (i = 0; i < nr; i++) { | 1166 | for (i = 0; i < nr; i++) { |
1183 | page = pvec.pages[i]; | 1167 | page = pvec.pages[i]; |
1184 | if (page_snap_context(page) != snapc) | 1168 | if (page_snap_context(page) != snapc) |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 92fdf9c35de2..df9f682708c6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1963,8 +1963,6 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, | |||
1963 | pgoff_t end, pgoff_t *index, | 1963 | pgoff_t end, pgoff_t *index, |
1964 | unsigned int *found_pages) | 1964 | unsigned int *found_pages) |
1965 | { | 1965 | { |
1966 | unsigned int nr_pages; | ||
1967 | struct page **pages; | ||
1968 | struct cifs_writedata *wdata; | 1966 | struct cifs_writedata *wdata; |
1969 | 1967 | ||
1970 | wdata = cifs_writedata_alloc((unsigned int)tofind, | 1968 | wdata = cifs_writedata_alloc((unsigned int)tofind, |
@@ -1972,23 +1970,8 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, | |||
1972 | if (!wdata) | 1970 | if (!wdata) |
1973 | return NULL; | 1971 | return NULL; |
1974 | 1972 | ||
1975 | /* | 1973 | *found_pages = find_get_pages_range_tag(mapping, index, end, |
1976 | * find_get_pages_tag seems to return a max of 256 on each | 1974 | PAGECACHE_TAG_DIRTY, tofind, wdata->pages); |
1977 | * iteration, so we must call it several times in order to | ||
1978 | * fill the array or the wsize is effectively limited to | ||
1979 | * 256 * PAGE_SIZE. | ||
1980 | */ | ||
1981 | *found_pages = 0; | ||
1982 | pages = wdata->pages; | ||
1983 | do { | ||
1984 | nr_pages = find_get_pages_tag(mapping, index, | ||
1985 | PAGECACHE_TAG_DIRTY, tofind, | ||
1986 | pages); | ||
1987 | *found_pages += nr_pages; | ||
1988 | tofind -= nr_pages; | ||
1989 | pages += nr_pages; | ||
1990 | } while (nr_pages && tofind && *index <= end); | ||
1991 | |||
1992 | return wdata; | 1975 | return wdata; |
1993 | } | 1976 | } |
1994 | 1977 | ||
@@ -565,7 +565,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, | |||
565 | ret = __radix_tree_lookup(page_tree, index, &node, &slot); | 565 | ret = __radix_tree_lookup(page_tree, index, &node, &slot); |
566 | WARN_ON_ONCE(ret != entry); | 566 | WARN_ON_ONCE(ret != entry); |
567 | __radix_tree_replace(page_tree, node, slot, | 567 | __radix_tree_replace(page_tree, node, slot, |
568 | new_entry, NULL, NULL); | 568 | new_entry, NULL); |
569 | entry = new_entry; | 569 | entry = new_entry; |
570 | } | 570 | } |
571 | 571 | ||
@@ -614,6 +614,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
614 | if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) | 614 | if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) |
615 | continue; | 615 | continue; |
616 | 616 | ||
617 | /* | ||
618 | * No need to call mmu_notifier_invalidate_range() as we are | ||
619 | * downgrading page table protection not changing it to point | ||
620 | * to a new page. | ||
621 | * | ||
622 | * See Documentation/vm/mmu_notifier.txt | ||
623 | */ | ||
617 | if (pmdp) { | 624 | if (pmdp) { |
618 | #ifdef CONFIG_FS_DAX_PMD | 625 | #ifdef CONFIG_FS_DAX_PMD |
619 | pmd_t pmd; | 626 | pmd_t pmd; |
@@ -628,7 +635,6 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
628 | pmd = pmd_wrprotect(pmd); | 635 | pmd = pmd_wrprotect(pmd); |
629 | pmd = pmd_mkclean(pmd); | 636 | pmd = pmd_mkclean(pmd); |
630 | set_pmd_at(vma->vm_mm, address, pmdp, pmd); | 637 | set_pmd_at(vma->vm_mm, address, pmdp, pmd); |
631 | mmu_notifier_invalidate_range(vma->vm_mm, start, end); | ||
632 | unlock_pmd: | 638 | unlock_pmd: |
633 | spin_unlock(ptl); | 639 | spin_unlock(ptl); |
634 | #endif | 640 | #endif |
@@ -643,7 +649,6 @@ unlock_pmd: | |||
643 | pte = pte_wrprotect(pte); | 649 | pte = pte_wrprotect(pte); |
644 | pte = pte_mkclean(pte); | 650 | pte = pte_mkclean(pte); |
645 | set_pte_at(vma->vm_mm, address, ptep, pte); | 651 | set_pte_at(vma->vm_mm, address, ptep, pte); |
646 | mmu_notifier_invalidate_range(vma->vm_mm, start, end); | ||
647 | unlock_pte: | 652 | unlock_pte: |
648 | pte_unmap_unlock(ptep, ptl); | 653 | pte_unmap_unlock(ptep, ptl); |
649 | } | 654 | } |
@@ -789,7 +794,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, | |||
789 | 794 | ||
790 | tag_pages_for_writeback(mapping, start_index, end_index); | 795 | tag_pages_for_writeback(mapping, start_index, end_index); |
791 | 796 | ||
792 | pagevec_init(&pvec, 0); | 797 | pagevec_init(&pvec); |
793 | while (!done) { | 798 | while (!done) { |
794 | pvec.nr = find_get_entries_tag(mapping, start_index, | 799 | pvec.nr = find_get_entries_tag(mapping, start_index, |
795 | PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, | 800 | PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE, |
diff --git a/fs/dcache.c b/fs/dcache.c index bcc9f6981569..5c7df1df81ff 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -2705,8 +2705,6 @@ static void swap_names(struct dentry *dentry, struct dentry *target) | |||
2705 | */ | 2705 | */ |
2706 | unsigned int i; | 2706 | unsigned int i; |
2707 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); | 2707 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); |
2708 | kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN); | ||
2709 | kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN); | ||
2710 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { | 2708 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { |
2711 | swap(((long *) &dentry->d_iname)[i], | 2709 | swap(((long *) &dentry->d_iname)[i], |
2712 | ((long *) &target->d_iname)[i]); | 2710 | ((long *) &target->d_iname)[i]); |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 6b801186baa5..25aeaa7328ba 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -660,7 +660,7 @@ static struct ecryptfs_cache_info { | |||
660 | struct kmem_cache **cache; | 660 | struct kmem_cache **cache; |
661 | const char *name; | 661 | const char *name; |
662 | size_t size; | 662 | size_t size; |
663 | unsigned long flags; | 663 | slab_flags_t flags; |
664 | void (*ctor)(void *obj); | 664 | void (*ctor)(void *obj); |
665 | } ecryptfs_cache_infos[] = { | 665 | } ecryptfs_cache_infos[] = { |
666 | { | 666 | { |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2633150e41b9..8d2b582fb141 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1719,7 +1719,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, | |||
1719 | ext4_es_remove_extent(inode, start, last - start + 1); | 1719 | ext4_es_remove_extent(inode, start, last - start + 1); |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | pagevec_init(&pvec, 0); | 1722 | pagevec_init(&pvec); |
1723 | while (index <= end) { | 1723 | while (index <= end) { |
1724 | nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); | 1724 | nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); |
1725 | if (nr_pages == 0) | 1725 | if (nr_pages == 0) |
@@ -2345,7 +2345,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) | |||
2345 | lblk = start << bpp_bits; | 2345 | lblk = start << bpp_bits; |
2346 | pblock = mpd->map.m_pblk; | 2346 | pblock = mpd->map.m_pblk; |
2347 | 2347 | ||
2348 | pagevec_init(&pvec, 0); | 2348 | pagevec_init(&pvec); |
2349 | while (start <= end) { | 2349 | while (start <= end) { |
2350 | nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, | 2350 | nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, |
2351 | &start, end); | 2351 | &start, end); |
@@ -2616,12 +2616,12 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) | |||
2616 | else | 2616 | else |
2617 | tag = PAGECACHE_TAG_DIRTY; | 2617 | tag = PAGECACHE_TAG_DIRTY; |
2618 | 2618 | ||
2619 | pagevec_init(&pvec, 0); | 2619 | pagevec_init(&pvec); |
2620 | mpd->map.m_len = 0; | 2620 | mpd->map.m_len = 0; |
2621 | mpd->next_page = index; | 2621 | mpd->next_page = index; |
2622 | while (index <= end) { | 2622 | while (index <= end) { |
2623 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 2623 | nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, |
2624 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | 2624 | tag); |
2625 | if (nr_pages == 0) | 2625 | if (nr_pages == 0) |
2626 | goto out; | 2626 | goto out; |
2627 | 2627 | ||
@@ -2629,16 +2629,6 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) | |||
2629 | struct page *page = pvec.pages[i]; | 2629 | struct page *page = pvec.pages[i]; |
2630 | 2630 | ||
2631 | /* | 2631 | /* |
2632 | * At this point, the page may be truncated or | ||
2633 | * invalidated (changing page->mapping to NULL), or | ||
2634 | * even swizzled back from swapper_space to tmpfs file | ||
2635 | * mapping. However, page->index will not change | ||
2636 | * because we have a reference on the page. | ||
2637 | */ | ||
2638 | if (page->index > end) | ||
2639 | goto out; | ||
2640 | |||
2641 | /* | ||
2642 | * Accumulated enough dirty pages? This doesn't apply | 2632 | * Accumulated enough dirty pages? This doesn't apply |
2643 | * to WB_SYNC_ALL mode. For integrity sync we have to | 2633 | * to WB_SYNC_ALL mode. For integrity sync we have to |
2644 | * keep going because someone may be concurrently | 2634 | * keep going because someone may be concurrently |
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 04fe1df052b2..0bb8e2c022d3 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
@@ -305,25 +305,22 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, | |||
305 | long nr_to_write, enum iostat_type io_type) | 305 | long nr_to_write, enum iostat_type io_type) |
306 | { | 306 | { |
307 | struct address_space *mapping = META_MAPPING(sbi); | 307 | struct address_space *mapping = META_MAPPING(sbi); |
308 | pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX; | 308 | pgoff_t index = 0, prev = ULONG_MAX; |
309 | struct pagevec pvec; | 309 | struct pagevec pvec; |
310 | long nwritten = 0; | 310 | long nwritten = 0; |
311 | int nr_pages; | ||
311 | struct writeback_control wbc = { | 312 | struct writeback_control wbc = { |
312 | .for_reclaim = 0, | 313 | .for_reclaim = 0, |
313 | }; | 314 | }; |
314 | struct blk_plug plug; | 315 | struct blk_plug plug; |
315 | 316 | ||
316 | pagevec_init(&pvec, 0); | 317 | pagevec_init(&pvec); |
317 | 318 | ||
318 | blk_start_plug(&plug); | 319 | blk_start_plug(&plug); |
319 | 320 | ||
320 | while (index <= end) { | 321 | while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, |
321 | int i, nr_pages; | 322 | PAGECACHE_TAG_DIRTY))) { |
322 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 323 | int i; |
323 | PAGECACHE_TAG_DIRTY, | ||
324 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
325 | if (unlikely(nr_pages == 0)) | ||
326 | break; | ||
327 | 324 | ||
328 | for (i = 0; i < nr_pages; i++) { | 325 | for (i = 0; i < nr_pages; i++) { |
329 | struct page *page = pvec.pages[i]; | 326 | struct page *page = pvec.pages[i]; |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 36b535207c88..7b3ad5d8e2e9 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -1635,7 +1635,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping, | |||
1635 | int range_whole = 0; | 1635 | int range_whole = 0; |
1636 | int tag; | 1636 | int tag; |
1637 | 1637 | ||
1638 | pagevec_init(&pvec, 0); | 1638 | pagevec_init(&pvec); |
1639 | 1639 | ||
1640 | if (get_dirty_pages(mapping->host) <= | 1640 | if (get_dirty_pages(mapping->host) <= |
1641 | SM_I(F2FS_M_SB(mapping))->min_hot_blocks) | 1641 | SM_I(F2FS_M_SB(mapping))->min_hot_blocks) |
@@ -1669,8 +1669,8 @@ retry: | |||
1669 | while (!done && (index <= end)) { | 1669 | while (!done && (index <= end)) { |
1670 | int i; | 1670 | int i; |
1671 | 1671 | ||
1672 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 1672 | nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, |
1673 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1); | 1673 | tag); |
1674 | if (nr_pages == 0) | 1674 | if (nr_pages == 0) |
1675 | break; | 1675 | break; |
1676 | 1676 | ||
@@ -1678,11 +1678,6 @@ retry: | |||
1678 | struct page *page = pvec.pages[i]; | 1678 | struct page *page = pvec.pages[i]; |
1679 | bool submitted = false; | 1679 | bool submitted = false; |
1680 | 1680 | ||
1681 | if (page->index > end) { | ||
1682 | done = 1; | ||
1683 | break; | ||
1684 | } | ||
1685 | |||
1686 | done_index = page->index; | 1681 | done_index = page->index; |
1687 | retry_write: | 1682 | retry_write: |
1688 | lock_page(page); | 1683 | lock_page(page); |
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 517e112c8a9a..f78b76ec4707 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -313,18 +313,19 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
313 | static pgoff_t __get_first_dirty_index(struct address_space *mapping, | 313 | static pgoff_t __get_first_dirty_index(struct address_space *mapping, |
314 | pgoff_t pgofs, int whence) | 314 | pgoff_t pgofs, int whence) |
315 | { | 315 | { |
316 | struct pagevec pvec; | 316 | struct page *page; |
317 | int nr_pages; | 317 | int nr_pages; |
318 | 318 | ||
319 | if (whence != SEEK_DATA) | 319 | if (whence != SEEK_DATA) |
320 | return 0; | 320 | return 0; |
321 | 321 | ||
322 | /* find first dirty page index */ | 322 | /* find first dirty page index */ |
323 | pagevec_init(&pvec, 0); | 323 | nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, |
324 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs, | 324 | 1, &page); |
325 | PAGECACHE_TAG_DIRTY, 1); | 325 | if (!nr_pages) |
326 | pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX; | 326 | return ULONG_MAX; |
327 | pagevec_release(&pvec); | 327 | pgofs = page->index; |
328 | put_page(page); | ||
328 | return pgofs; | 329 | return pgofs; |
329 | } | 330 | } |
330 | 331 | ||
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index fca87835a1da..b33dac9592ca 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -1277,21 +1277,17 @@ release_page: | |||
1277 | 1277 | ||
1278 | static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) | 1278 | static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) |
1279 | { | 1279 | { |
1280 | pgoff_t index, end; | 1280 | pgoff_t index; |
1281 | struct pagevec pvec; | 1281 | struct pagevec pvec; |
1282 | struct page *last_page = NULL; | 1282 | struct page *last_page = NULL; |
1283 | int nr_pages; | ||
1283 | 1284 | ||
1284 | pagevec_init(&pvec, 0); | 1285 | pagevec_init(&pvec); |
1285 | index = 0; | 1286 | index = 0; |
1286 | end = ULONG_MAX; | 1287 | |
1287 | 1288 | while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | |
1288 | while (index <= end) { | 1289 | PAGECACHE_TAG_DIRTY))) { |
1289 | int i, nr_pages; | 1290 | int i; |
1290 | nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | ||
1291 | PAGECACHE_TAG_DIRTY, | ||
1292 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
1293 | if (nr_pages == 0) | ||
1294 | break; | ||
1295 | 1291 | ||
1296 | for (i = 0; i < nr_pages; i++) { | 1292 | for (i = 0; i < nr_pages; i++) { |
1297 | struct page *page = pvec.pages[i]; | 1293 | struct page *page = pvec.pages[i]; |
@@ -1425,13 +1421,14 @@ static int f2fs_write_node_page(struct page *page, | |||
1425 | int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, | 1421 | int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, |
1426 | struct writeback_control *wbc, bool atomic) | 1422 | struct writeback_control *wbc, bool atomic) |
1427 | { | 1423 | { |
1428 | pgoff_t index, end; | 1424 | pgoff_t index; |
1429 | pgoff_t last_idx = ULONG_MAX; | 1425 | pgoff_t last_idx = ULONG_MAX; |
1430 | struct pagevec pvec; | 1426 | struct pagevec pvec; |
1431 | int ret = 0; | 1427 | int ret = 0; |
1432 | struct page *last_page = NULL; | 1428 | struct page *last_page = NULL; |
1433 | bool marked = false; | 1429 | bool marked = false; |
1434 | nid_t ino = inode->i_ino; | 1430 | nid_t ino = inode->i_ino; |
1431 | int nr_pages; | ||
1435 | 1432 | ||
1436 | if (atomic) { | 1433 | if (atomic) { |
1437 | last_page = last_fsync_dnode(sbi, ino); | 1434 | last_page = last_fsync_dnode(sbi, ino); |
@@ -1439,17 +1436,12 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, | |||
1439 | return PTR_ERR_OR_ZERO(last_page); | 1436 | return PTR_ERR_OR_ZERO(last_page); |
1440 | } | 1437 | } |
1441 | retry: | 1438 | retry: |
1442 | pagevec_init(&pvec, 0); | 1439 | pagevec_init(&pvec); |
1443 | index = 0; | 1440 | index = 0; |
1444 | end = ULONG_MAX; | 1441 | |
1445 | 1442 | while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | |
1446 | while (index <= end) { | 1443 | PAGECACHE_TAG_DIRTY))) { |
1447 | int i, nr_pages; | 1444 | int i; |
1448 | nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | ||
1449 | PAGECACHE_TAG_DIRTY, | ||
1450 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
1451 | if (nr_pages == 0) | ||
1452 | break; | ||
1453 | 1445 | ||
1454 | for (i = 0; i < nr_pages; i++) { | 1446 | for (i = 0; i < nr_pages; i++) { |
1455 | struct page *page = pvec.pages[i]; | 1447 | struct page *page = pvec.pages[i]; |
@@ -1548,25 +1540,21 @@ out: | |||
1548 | int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, | 1540 | int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, |
1549 | bool do_balance, enum iostat_type io_type) | 1541 | bool do_balance, enum iostat_type io_type) |
1550 | { | 1542 | { |
1551 | pgoff_t index, end; | 1543 | pgoff_t index; |
1552 | struct pagevec pvec; | 1544 | struct pagevec pvec; |
1553 | int step = 0; | 1545 | int step = 0; |
1554 | int nwritten = 0; | 1546 | int nwritten = 0; |
1555 | int ret = 0; | 1547 | int ret = 0; |
1548 | int nr_pages; | ||
1556 | 1549 | ||
1557 | pagevec_init(&pvec, 0); | 1550 | pagevec_init(&pvec); |
1558 | 1551 | ||
1559 | next_step: | 1552 | next_step: |
1560 | index = 0; | 1553 | index = 0; |
1561 | end = ULONG_MAX; | 1554 | |
1562 | 1555 | while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | |
1563 | while (index <= end) { | 1556 | PAGECACHE_TAG_DIRTY))) { |
1564 | int i, nr_pages; | 1557 | int i; |
1565 | nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | ||
1566 | PAGECACHE_TAG_DIRTY, | ||
1567 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
1568 | if (nr_pages == 0) | ||
1569 | break; | ||
1570 | 1558 | ||
1571 | for (i = 0; i < nr_pages; i++) { | 1559 | for (i = 0; i < nr_pages; i++) { |
1572 | struct page *page = pvec.pages[i]; | 1560 | struct page *page = pvec.pages[i]; |
@@ -1655,27 +1643,20 @@ out: | |||
1655 | 1643 | ||
1656 | int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) | 1644 | int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) |
1657 | { | 1645 | { |
1658 | pgoff_t index = 0, end = ULONG_MAX; | 1646 | pgoff_t index = 0; |
1659 | struct pagevec pvec; | 1647 | struct pagevec pvec; |
1660 | int ret2, ret = 0; | 1648 | int ret2, ret = 0; |
1649 | int nr_pages; | ||
1661 | 1650 | ||
1662 | pagevec_init(&pvec, 0); | 1651 | pagevec_init(&pvec); |
1663 | 1652 | ||
1664 | while (index <= end) { | 1653 | while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, |
1665 | int i, nr_pages; | 1654 | PAGECACHE_TAG_WRITEBACK))) { |
1666 | nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, | 1655 | int i; |
1667 | PAGECACHE_TAG_WRITEBACK, | ||
1668 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
1669 | if (nr_pages == 0) | ||
1670 | break; | ||
1671 | 1656 | ||
1672 | for (i = 0; i < nr_pages; i++) { | 1657 | for (i = 0; i < nr_pages; i++) { |
1673 | struct page *page = pvec.pages[i]; | 1658 | struct page *page = pvec.pages[i]; |
1674 | 1659 | ||
1675 | /* until radix tree lookup accepts end_index */ | ||
1676 | if (unlikely(page->index > end)) | ||
1677 | continue; | ||
1678 | |||
1679 | if (ino && ino_of_node(page) == ino) { | 1660 | if (ino && ino_of_node(page) == ino) { |
1680 | f2fs_wait_on_page_writeback(page, NODE, true); | 1661 | f2fs_wait_on_page_writeback(page, NODE, true); |
1681 | if (TestClearPageError(page)) | 1662 | if (TestClearPageError(page)) |
diff --git a/fs/file_table.c b/fs/file_table.c index 49e1f2f1a4cb..2dc9f38bd195 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -312,7 +312,7 @@ void put_filp(struct file *file) | |||
312 | void __init files_init(void) | 312 | void __init files_init(void) |
313 | { | 313 | { |
314 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, | 314 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, |
315 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 315 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); |
316 | percpu_counter_init(&nr_files, 0, GFP_KERNEL); | 316 | percpu_counter_init(&nr_files, 0, GFP_KERNEL); |
317 | } | 317 | } |
318 | 318 | ||
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 0ad3fd3ad0b4..961029e04027 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -1175,7 +1175,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, | |||
1175 | return; | 1175 | return; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | pagevec_init(&pvec, 0); | 1178 | pagevec_init(&pvec); |
1179 | next = 0; | 1179 | next = 0; |
1180 | do { | 1180 | do { |
1181 | if (!pagevec_lookup(&pvec, mapping, &next)) | 1181 | if (!pagevec_lookup(&pvec, mapping, &next)) |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index a42d89371748..17f0d05bfd4c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -1636,7 +1636,7 @@ out_finish: | |||
1636 | 1636 | ||
1637 | static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) | 1637 | static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) |
1638 | { | 1638 | { |
1639 | release_pages(req->pages, req->num_pages, false); | 1639 | release_pages(req->pages, req->num_pages); |
1640 | } | 1640 | } |
1641 | 1641 | ||
1642 | static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | 1642 | static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index a79e320349cd..2f504d615d92 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -1273,9 +1273,9 @@ static int __init fuse_fs_init(void) | |||
1273 | int err; | 1273 | int err; |
1274 | 1274 | ||
1275 | fuse_inode_cachep = kmem_cache_create("fuse_inode", | 1275 | fuse_inode_cachep = kmem_cache_create("fuse_inode", |
1276 | sizeof(struct fuse_inode), 0, | 1276 | sizeof(struct fuse_inode), 0, |
1277 | SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, | 1277 | SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT, |
1278 | fuse_inode_init_once); | 1278 | fuse_inode_init_once); |
1279 | err = -ENOMEM; | 1279 | err = -ENOMEM; |
1280 | if (!fuse_inode_cachep) | 1280 | if (!fuse_inode_cachep) |
1281 | goto out; | 1281 | goto out; |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 68ed06962537..1daf15a1f00c 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -280,22 +280,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, | |||
280 | for(i = 0; i < nr_pages; i++) { | 280 | for(i = 0; i < nr_pages; i++) { |
281 | struct page *page = pvec->pages[i]; | 281 | struct page *page = pvec->pages[i]; |
282 | 282 | ||
283 | /* | ||
284 | * At this point, the page may be truncated or | ||
285 | * invalidated (changing page->mapping to NULL), or | ||
286 | * even swizzled back from swapper_space to tmpfs file | ||
287 | * mapping. However, page->index will not change | ||
288 | * because we have a reference on the page. | ||
289 | */ | ||
290 | if (page->index > end) { | ||
291 | /* | ||
292 | * can't be range_cyclic (1st pass) because | ||
293 | * end == -1 in that case. | ||
294 | */ | ||
295 | ret = 1; | ||
296 | break; | ||
297 | } | ||
298 | |||
299 | *done_index = page->index; | 283 | *done_index = page->index; |
300 | 284 | ||
301 | lock_page(page); | 285 | lock_page(page); |
@@ -387,7 +371,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, | |||
387 | int range_whole = 0; | 371 | int range_whole = 0; |
388 | int tag; | 372 | int tag; |
389 | 373 | ||
390 | pagevec_init(&pvec, 0); | 374 | pagevec_init(&pvec); |
391 | if (wbc->range_cyclic) { | 375 | if (wbc->range_cyclic) { |
392 | writeback_index = mapping->writeback_index; /* prev offset */ | 376 | writeback_index = mapping->writeback_index; /* prev offset */ |
393 | index = writeback_index; | 377 | index = writeback_index; |
@@ -413,8 +397,8 @@ retry: | |||
413 | tag_pages_for_writeback(mapping, index, end); | 397 | tag_pages_for_writeback(mapping, index, end); |
414 | done_index = index; | 398 | done_index = index; |
415 | while (!done && (index <= end)) { | 399 | while (!done && (index <= end)) { |
416 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | 400 | nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, |
417 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | 401 | tag); |
418 | if (nr_pages == 0) | 402 | if (nr_pages == 0) |
419 | break; | 403 | break; |
420 | 404 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ed113ea17aff..1e76730aac0d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -407,7 +407,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
407 | 407 | ||
408 | memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); | 408 | memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); |
409 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); | 409 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
410 | pagevec_init(&pvec, 0); | 410 | pagevec_init(&pvec); |
411 | next = start; | 411 | next = start; |
412 | while (next < end) { | 412 | while (next < end) { |
413 | /* | 413 | /* |
@@ -668,7 +668,6 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
668 | return error; | 668 | return error; |
669 | 669 | ||
670 | if (ia_valid & ATTR_SIZE) { | 670 | if (ia_valid & ATTR_SIZE) { |
671 | error = -EINVAL; | ||
672 | if (attr->ia_size & ~huge_page_mask(h)) | 671 | if (attr->ia_size & ~huge_page_mask(h)) |
673 | return -EINVAL; | 672 | return -EINVAL; |
674 | error = hugetlb_vmtruncate(inode, attr->ia_size); | 673 | error = hugetlb_vmtruncate(inode, attr->ia_size); |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 06ffa135dfa6..16a7a67a11c9 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -2156,10 +2156,10 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, | |||
2156 | level++) | 2156 | level++) |
2157 | INIT_LIST_HEAD(&lists[level]); | 2157 | INIT_LIST_HEAD(&lists[level]); |
2158 | 2158 | ||
2159 | pagevec_init(&pvec, 0); | 2159 | pagevec_init(&pvec); |
2160 | 2160 | ||
2161 | while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY, | 2161 | while (pagevec_lookup_tag(&pvec, btcache, &index, |
2162 | PAGEVEC_SIZE)) { | 2162 | PAGECACHE_TAG_DIRTY)) { |
2163 | for (i = 0; i < pagevec_count(&pvec); i++) { | 2163 | for (i = 0; i < pagevec_count(&pvec); i++) { |
2164 | bh = head = page_buffers(pvec.pages[i]); | 2164 | bh = head = page_buffers(pvec.pages[i]); |
2165 | do { | 2165 | do { |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 8616c46d33da..68241512d7c1 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -255,10 +255,9 @@ int nilfs_copy_dirty_pages(struct address_space *dmap, | |||
255 | pgoff_t index = 0; | 255 | pgoff_t index = 0; |
256 | int err = 0; | 256 | int err = 0; |
257 | 257 | ||
258 | pagevec_init(&pvec, 0); | 258 | pagevec_init(&pvec); |
259 | repeat: | 259 | repeat: |
260 | if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY, | 260 | if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) |
261 | PAGEVEC_SIZE)) | ||
262 | return 0; | 261 | return 0; |
263 | 262 | ||
264 | for (i = 0; i < pagevec_count(&pvec); i++) { | 263 | for (i = 0; i < pagevec_count(&pvec); i++) { |
@@ -310,7 +309,7 @@ void nilfs_copy_back_pages(struct address_space *dmap, | |||
310 | pgoff_t index = 0; | 309 | pgoff_t index = 0; |
311 | int err; | 310 | int err; |
312 | 311 | ||
313 | pagevec_init(&pvec, 0); | 312 | pagevec_init(&pvec); |
314 | repeat: | 313 | repeat: |
315 | n = pagevec_lookup(&pvec, smap, &index); | 314 | n = pagevec_lookup(&pvec, smap, &index); |
316 | if (!n) | 315 | if (!n) |
@@ -374,10 +373,10 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) | |||
374 | unsigned int i; | 373 | unsigned int i; |
375 | pgoff_t index = 0; | 374 | pgoff_t index = 0; |
376 | 375 | ||
377 | pagevec_init(&pvec, 0); | 376 | pagevec_init(&pvec); |
378 | 377 | ||
379 | while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, | 378 | while (pagevec_lookup_tag(&pvec, mapping, &index, |
380 | PAGEVEC_SIZE)) { | 379 | PAGECACHE_TAG_DIRTY)) { |
381 | for (i = 0; i < pagevec_count(&pvec); i++) { | 380 | for (i = 0; i < pagevec_count(&pvec); i++) { |
382 | struct page *page = pvec.pages[i]; | 381 | struct page *page = pvec.pages[i]; |
383 | 382 | ||
@@ -519,7 +518,7 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, | |||
519 | index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); | 518 | index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); |
520 | nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); | 519 | nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); |
521 | 520 | ||
522 | pagevec_init(&pvec, 0); | 521 | pagevec_init(&pvec); |
523 | 522 | ||
524 | repeat: | 523 | repeat: |
525 | pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, | 524 | pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 70ded52dc1dd..f65392fecb5c 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -708,21 +708,17 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, | |||
708 | index = start >> PAGE_SHIFT; | 708 | index = start >> PAGE_SHIFT; |
709 | last = end >> PAGE_SHIFT; | 709 | last = end >> PAGE_SHIFT; |
710 | } | 710 | } |
711 | pagevec_init(&pvec, 0); | 711 | pagevec_init(&pvec); |
712 | repeat: | 712 | repeat: |
713 | if (unlikely(index > last) || | 713 | if (unlikely(index > last) || |
714 | !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, | 714 | !pagevec_lookup_range_tag(&pvec, mapping, &index, last, |
715 | min_t(pgoff_t, last - index, | 715 | PAGECACHE_TAG_DIRTY)) |
716 | PAGEVEC_SIZE - 1) + 1)) | ||
717 | return ndirties; | 716 | return ndirties; |
718 | 717 | ||
719 | for (i = 0; i < pagevec_count(&pvec); i++) { | 718 | for (i = 0; i < pagevec_count(&pvec); i++) { |
720 | struct buffer_head *bh, *head; | 719 | struct buffer_head *bh, *head; |
721 | struct page *page = pvec.pages[i]; | 720 | struct page *page = pvec.pages[i]; |
722 | 721 | ||
723 | if (unlikely(page->index > last)) | ||
724 | break; | ||
725 | |||
726 | lock_page(page); | 722 | lock_page(page); |
727 | if (!page_has_buffers(page)) | 723 | if (!page_has_buffers(page)) |
728 | create_empty_buffers(page, i_blocksize(inode), 0); | 724 | create_empty_buffers(page, i_blocksize(inode), 0); |
@@ -757,10 +753,10 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode, | |||
757 | unsigned int i; | 753 | unsigned int i; |
758 | pgoff_t index = 0; | 754 | pgoff_t index = 0; |
759 | 755 | ||
760 | pagevec_init(&pvec, 0); | 756 | pagevec_init(&pvec); |
761 | 757 | ||
762 | while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, | 758 | while (pagevec_lookup_tag(&pvec, mapping, &index, |
763 | PAGEVEC_SIZE)) { | 759 | PAGECACHE_TAG_DIRTY)) { |
764 | for (i = 0; i < pagevec_count(&pvec); i++) { | 760 | for (i = 0; i < pagevec_count(&pvec); i++) { |
765 | bh = head = page_buffers(pvec.pages[i]); | 761 | bh = head = page_buffers(pvec.pages[i]); |
766 | do { | 762 | do { |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index addd7c5f2d3e..ab5105f9767e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -3585,8 +3585,6 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, | |||
3585 | * The easy case - we can just plop the record right in. | 3585 | * The easy case - we can just plop the record right in. |
3586 | */ | 3586 | */ |
3587 | *left_rec = *split_rec; | 3587 | *left_rec = *split_rec; |
3588 | |||
3589 | has_empty_extent = 0; | ||
3590 | } else | 3588 | } else |
3591 | le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); | 3589 | le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); |
3592 | 3590 | ||
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 88a31e9340a0..d1516327b787 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -134,6 +134,19 @@ bail: | |||
134 | return err; | 134 | return err; |
135 | } | 135 | } |
136 | 136 | ||
137 | static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, | ||
138 | struct buffer_head *bh_result, int create) | ||
139 | { | ||
140 | int ret = 0; | ||
141 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | ||
142 | |||
143 | down_read(&oi->ip_alloc_sem); | ||
144 | ret = ocfs2_get_block(inode, iblock, bh_result, create); | ||
145 | up_read(&oi->ip_alloc_sem); | ||
146 | |||
147 | return ret; | ||
148 | } | ||
149 | |||
137 | int ocfs2_get_block(struct inode *inode, sector_t iblock, | 150 | int ocfs2_get_block(struct inode *inode, sector_t iblock, |
138 | struct buffer_head *bh_result, int create) | 151 | struct buffer_head *bh_result, int create) |
139 | { | 152 | { |
@@ -2128,7 +2141,7 @@ static void ocfs2_dio_free_write_ctx(struct inode *inode, | |||
2128 | * called like this: dio->get_blocks(dio->inode, fs_startblk, | 2141 | * called like this: dio->get_blocks(dio->inode, fs_startblk, |
2129 | * fs_count, map_bh, dio->rw == WRITE); | 2142 | * fs_count, map_bh, dio->rw == WRITE); |
2130 | */ | 2143 | */ |
2131 | static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, | 2144 | static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, |
2132 | struct buffer_head *bh_result, int create) | 2145 | struct buffer_head *bh_result, int create) |
2133 | { | 2146 | { |
2134 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2147 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
@@ -2154,12 +2167,9 @@ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, | |||
2154 | * while file size will be changed. | 2167 | * while file size will be changed. |
2155 | */ | 2168 | */ |
2156 | if (pos + total_len <= i_size_read(inode)) { | 2169 | if (pos + total_len <= i_size_read(inode)) { |
2157 | down_read(&oi->ip_alloc_sem); | ||
2158 | /* This is the fast path for re-write. */ | ||
2159 | ret = ocfs2_get_block(inode, iblock, bh_result, create); | ||
2160 | |||
2161 | up_read(&oi->ip_alloc_sem); | ||
2162 | 2170 | ||
2171 | /* This is the fast path for re-write. */ | ||
2172 | ret = ocfs2_lock_get_block(inode, iblock, bh_result, create); | ||
2163 | if (buffer_mapped(bh_result) && | 2173 | if (buffer_mapped(bh_result) && |
2164 | !buffer_new(bh_result) && | 2174 | !buffer_new(bh_result) && |
2165 | ret == 0) | 2175 | ret == 0) |
@@ -2424,9 +2434,9 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |||
2424 | return 0; | 2434 | return 0; |
2425 | 2435 | ||
2426 | if (iov_iter_rw(iter) == READ) | 2436 | if (iov_iter_rw(iter) == READ) |
2427 | get_block = ocfs2_get_block; | 2437 | get_block = ocfs2_lock_get_block; |
2428 | else | 2438 | else |
2429 | get_block = ocfs2_dio_get_block; | 2439 | get_block = ocfs2_dio_wr_get_block; |
2430 | 2440 | ||
2431 | return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, | 2441 | return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, |
2432 | iter, get_block, | 2442 | iter, get_block, |
diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h index b97bcc6dde7c..b1bb70c8ca4d 100644 --- a/fs/ocfs2/buffer_head_io.h +++ b/fs/ocfs2/buffer_head_io.h | |||
@@ -28,9 +28,6 @@ | |||
28 | 28 | ||
29 | #include <linux/buffer_head.h> | 29 | #include <linux/buffer_head.h> |
30 | 30 | ||
31 | void ocfs2_end_buffer_io_sync(struct buffer_head *bh, | ||
32 | int uptodate); | ||
33 | |||
34 | int ocfs2_write_block(struct ocfs2_super *osb, | 31 | int ocfs2_write_block(struct ocfs2_super *osb, |
35 | struct buffer_head *bh, | 32 | struct buffer_head *bh, |
36 | struct ocfs2_caching_info *ci); | 33 | struct ocfs2_caching_info *ci); |
diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h index 3ef5137dc362..a9e67efc0004 100644 --- a/fs/ocfs2/cluster/heartbeat.h +++ b/fs/ocfs2/cluster/heartbeat.h | |||
@@ -79,10 +79,8 @@ void o2hb_fill_node_map(unsigned long *map, | |||
79 | unsigned bytes); | 79 | unsigned bytes); |
80 | void o2hb_exit(void); | 80 | void o2hb_exit(void); |
81 | int o2hb_init(void); | 81 | int o2hb_init(void); |
82 | int o2hb_check_node_heartbeating(u8 node_num); | ||
83 | int o2hb_check_node_heartbeating_no_sem(u8 node_num); | 82 | int o2hb_check_node_heartbeating_no_sem(u8 node_num); |
84 | int o2hb_check_node_heartbeating_from_callback(u8 node_num); | 83 | int o2hb_check_node_heartbeating_from_callback(u8 node_num); |
85 | int o2hb_check_local_node_heartbeating(void); | ||
86 | void o2hb_stop_all_regions(void); | 84 | void o2hb_stop_all_regions(void); |
87 | int o2hb_get_all_regions(char *region_uuids, u8 numregions); | 85 | int o2hb_get_all_regions(char *region_uuids, u8 numregions); |
88 | int o2hb_global_heartbeat_active(void); | 86 | int o2hb_global_heartbeat_active(void); |
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index a51200ece93d..da64c3a20eeb 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c | |||
@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { | |||
40 | "panic", /* O2NM_FENCE_PANIC */ | 40 | "panic", /* O2NM_FENCE_PANIC */ |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static inline void o2nm_lock_subsystem(void); | ||
44 | static inline void o2nm_unlock_subsystem(void); | ||
45 | |||
43 | struct o2nm_node *o2nm_get_node_by_num(u8 node_num) | 46 | struct o2nm_node *o2nm_get_node_by_num(u8 node_num) |
44 | { | 47 | { |
45 | struct o2nm_node *node = NULL; | 48 | struct o2nm_node *node = NULL; |
@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) | |||
181 | { | 184 | { |
182 | /* through the first node_set .parent | 185 | /* through the first node_set .parent |
183 | * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ | 186 | * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ |
184 | return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); | 187 | if (node->nd_item.ci_parent) |
188 | return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); | ||
189 | else | ||
190 | return NULL; | ||
185 | } | 191 | } |
186 | 192 | ||
187 | enum { | 193 | enum { |
@@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, | |||
194 | size_t count) | 200 | size_t count) |
195 | { | 201 | { |
196 | struct o2nm_node *node = to_o2nm_node(item); | 202 | struct o2nm_node *node = to_o2nm_node(item); |
197 | struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); | 203 | struct o2nm_cluster *cluster; |
198 | unsigned long tmp; | 204 | unsigned long tmp; |
199 | char *p = (char *)page; | 205 | char *p = (char *)page; |
200 | int ret = 0; | 206 | int ret = 0; |
@@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, | |||
214 | !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) | 220 | !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) |
215 | return -EINVAL; /* XXX */ | 221 | return -EINVAL; /* XXX */ |
216 | 222 | ||
223 | o2nm_lock_subsystem(); | ||
224 | cluster = to_o2nm_cluster_from_node(node); | ||
225 | if (!cluster) { | ||
226 | o2nm_unlock_subsystem(); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
217 | write_lock(&cluster->cl_nodes_lock); | 230 | write_lock(&cluster->cl_nodes_lock); |
218 | if (cluster->cl_nodes[tmp]) | 231 | if (cluster->cl_nodes[tmp]) |
219 | ret = -EEXIST; | 232 | ret = -EEXIST; |
@@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, | |||
226 | set_bit(tmp, cluster->cl_nodes_bitmap); | 239 | set_bit(tmp, cluster->cl_nodes_bitmap); |
227 | } | 240 | } |
228 | write_unlock(&cluster->cl_nodes_lock); | 241 | write_unlock(&cluster->cl_nodes_lock); |
242 | o2nm_unlock_subsystem(); | ||
243 | |||
229 | if (ret) | 244 | if (ret) |
230 | return ret; | 245 | return ret; |
231 | 246 | ||
@@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, | |||
269 | size_t count) | 284 | size_t count) |
270 | { | 285 | { |
271 | struct o2nm_node *node = to_o2nm_node(item); | 286 | struct o2nm_node *node = to_o2nm_node(item); |
272 | struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); | 287 | struct o2nm_cluster *cluster; |
273 | int ret, i; | 288 | int ret, i; |
274 | struct rb_node **p, *parent; | 289 | struct rb_node **p, *parent; |
275 | unsigned int octets[4]; | 290 | unsigned int octets[4]; |
@@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, | |||
286 | be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); | 301 | be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); |
287 | } | 302 | } |
288 | 303 | ||
304 | o2nm_lock_subsystem(); | ||
305 | cluster = to_o2nm_cluster_from_node(node); | ||
306 | if (!cluster) { | ||
307 | o2nm_unlock_subsystem(); | ||
308 | return -EINVAL; | ||
309 | } | ||
310 | |||
289 | ret = 0; | 311 | ret = 0; |
290 | write_lock(&cluster->cl_nodes_lock); | 312 | write_lock(&cluster->cl_nodes_lock); |
291 | if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) | 313 | if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) |
@@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, | |||
298 | rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); | 320 | rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); |
299 | } | 321 | } |
300 | write_unlock(&cluster->cl_nodes_lock); | 322 | write_unlock(&cluster->cl_nodes_lock); |
323 | o2nm_unlock_subsystem(); | ||
324 | |||
301 | if (ret) | 325 | if (ret) |
302 | return ret; | 326 | return ret; |
303 | 327 | ||
@@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, | |||
315 | size_t count) | 339 | size_t count) |
316 | { | 340 | { |
317 | struct o2nm_node *node = to_o2nm_node(item); | 341 | struct o2nm_node *node = to_o2nm_node(item); |
318 | struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); | 342 | struct o2nm_cluster *cluster; |
319 | unsigned long tmp; | 343 | unsigned long tmp; |
320 | char *p = (char *)page; | 344 | char *p = (char *)page; |
321 | ssize_t ret; | 345 | ssize_t ret; |
@@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, | |||
333 | !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) | 357 | !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) |
334 | return -EINVAL; /* XXX */ | 358 | return -EINVAL; /* XXX */ |
335 | 359 | ||
360 | o2nm_lock_subsystem(); | ||
361 | cluster = to_o2nm_cluster_from_node(node); | ||
362 | if (!cluster) { | ||
363 | ret = -EINVAL; | ||
364 | goto out; | ||
365 | } | ||
366 | |||
336 | /* the only failure case is trying to set a new local node | 367 | /* the only failure case is trying to set a new local node |
337 | * when a different one is already set */ | 368 | * when a different one is already set */ |
338 | if (tmp && tmp == cluster->cl_has_local && | 369 | if (tmp && tmp == cluster->cl_has_local && |
339 | cluster->cl_local_node != node->nd_num) | 370 | cluster->cl_local_node != node->nd_num) { |
340 | return -EBUSY; | 371 | ret = -EBUSY; |
372 | goto out; | ||
373 | } | ||
341 | 374 | ||
342 | /* bring up the rx thread if we're setting the new local node. */ | 375 | /* bring up the rx thread if we're setting the new local node. */ |
343 | if (tmp && !cluster->cl_has_local) { | 376 | if (tmp && !cluster->cl_has_local) { |
344 | ret = o2net_start_listening(node); | 377 | ret = o2net_start_listening(node); |
345 | if (ret) | 378 | if (ret) |
346 | return ret; | 379 | goto out; |
347 | } | 380 | } |
348 | 381 | ||
349 | if (!tmp && cluster->cl_has_local && | 382 | if (!tmp && cluster->cl_has_local && |
@@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, | |||
358 | cluster->cl_local_node = node->nd_num; | 391 | cluster->cl_local_node = node->nd_num; |
359 | } | 392 | } |
360 | 393 | ||
361 | return count; | 394 | ret = count; |
395 | |||
396 | out: | ||
397 | o2nm_unlock_subsystem(); | ||
398 | return ret; | ||
362 | } | 399 | } |
363 | 400 | ||
364 | CONFIGFS_ATTR(o2nm_node_, num); | 401 | CONFIGFS_ATTR(o2nm_node_, num); |
@@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = { | |||
738 | }, | 775 | }, |
739 | }; | 776 | }; |
740 | 777 | ||
778 | static inline void o2nm_lock_subsystem(void) | ||
779 | { | ||
780 | mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); | ||
781 | } | ||
782 | |||
783 | static inline void o2nm_unlock_subsystem(void) | ||
784 | { | ||
785 | mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); | ||
786 | } | ||
787 | |||
741 | int o2nm_depend_item(struct config_item *item) | 788 | int o2nm_depend_item(struct config_item *item) |
742 | { | 789 | { |
743 | return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); | 790 | return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index a2b19fbdcf46..e1fea149f50b 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -394,7 +394,6 @@ int dlm_domain_fully_joined(struct dlm_ctxt *dlm) | |||
394 | static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm) | 394 | static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm) |
395 | { | 395 | { |
396 | if (dlm->dlm_worker) { | 396 | if (dlm->dlm_worker) { |
397 | flush_workqueue(dlm->dlm_worker); | ||
398 | destroy_workqueue(dlm->dlm_worker); | 397 | destroy_workqueue(dlm->dlm_worker); |
399 | dlm->dlm_worker = NULL; | 398 | dlm->dlm_worker = NULL; |
400 | } | 399 | } |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 3e04279446e8..9c3e0f13ca87 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2616,7 +2616,9 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, | |||
2616 | * otherwise the assert_master from the new | 2616 | * otherwise the assert_master from the new |
2617 | * master will destroy this. | 2617 | * master will destroy this. |
2618 | */ | 2618 | */ |
2619 | dlm_get_mle_inuse(mle); | 2619 | if (ret != -EEXIST) |
2620 | dlm_get_mle_inuse(mle); | ||
2621 | |||
2620 | spin_unlock(&dlm->master_lock); | 2622 | spin_unlock(&dlm->master_lock); |
2621 | spin_unlock(&dlm->spinlock); | 2623 | spin_unlock(&dlm->spinlock); |
2622 | 2624 | ||
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 74407c6dd592..ec8f75813beb 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -2419,6 +2419,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2419 | dlm_lockres_put(res); | 2419 | dlm_lockres_put(res); |
2420 | continue; | 2420 | continue; |
2421 | } | 2421 | } |
2422 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
2422 | } else if (res->owner == dlm->node_num) { | 2423 | } else if (res->owner == dlm->node_num) { |
2423 | dlm_free_dead_locks(dlm, res, dead_node); | 2424 | dlm_free_dead_locks(dlm, res, dead_node); |
2424 | __dlm_lockres_calc_usage(dlm, res); | 2425 | __dlm_lockres_calc_usage(dlm, res); |
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 988137de08f5..9c7c18c0e129 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c | |||
@@ -670,7 +670,6 @@ static void __exit exit_dlmfs_fs(void) | |||
670 | { | 670 | { |
671 | unregister_filesystem(&dlmfs_fs_type); | 671 | unregister_filesystem(&dlmfs_fs_type); |
672 | 672 | ||
673 | flush_workqueue(user_dlm_worker); | ||
674 | destroy_workqueue(user_dlm_worker); | 673 | destroy_workqueue(user_dlm_worker); |
675 | 674 | ||
676 | /* | 675 | /* |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 6e41fc8fabbe..dc455d45a66a 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1161,6 +1161,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
1161 | } | 1161 | } |
1162 | size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; | 1162 | size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; |
1163 | if (size_change) { | 1163 | if (size_change) { |
1164 | /* | ||
1165 | * Here we should wait dio to finish before inode lock | ||
1166 | * to avoid a deadlock between ocfs2_setattr() and | ||
1167 | * ocfs2_dio_end_io_write() | ||
1168 | */ | ||
1169 | inode_dio_wait(inode); | ||
1170 | |||
1164 | status = ocfs2_rw_lock(inode, 1); | 1171 | status = ocfs2_rw_lock(inode, 1); |
1165 | if (status < 0) { | 1172 | if (status < 0) { |
1166 | mlog_errno(status); | 1173 | mlog_errno(status); |
@@ -1200,8 +1207,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
1200 | if (status) | 1207 | if (status) |
1201 | goto bail_unlock; | 1208 | goto bail_unlock; |
1202 | 1209 | ||
1203 | inode_dio_wait(inode); | ||
1204 | |||
1205 | if (i_size_read(inode) >= attr->ia_size) { | 1210 | if (i_size_read(inode) >= attr->ia_size) { |
1206 | if (ocfs2_should_order_data(inode)) { | 1211 | if (ocfs2_should_order_data(inode)) { |
1207 | status = ocfs2_begin_ordered_truncate(inode, | 1212 | status = ocfs2_begin_ordered_truncate(inode, |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 71f22c8fbffd..9f0b95abc09f 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -1147,12 +1147,9 @@ int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb, | |||
1147 | GLOBAL_BITMAP_SYSTEM_INODE, | 1147 | GLOBAL_BITMAP_SYSTEM_INODE, |
1148 | OCFS2_INVALID_SLOT, NULL, | 1148 | OCFS2_INVALID_SLOT, NULL, |
1149 | ALLOC_NEW_GROUP); | 1149 | ALLOC_NEW_GROUP); |
1150 | if (status < 0 && status != -ENOSPC) { | 1150 | if (status < 0 && status != -ENOSPC) |
1151 | mlog_errno(status); | 1151 | mlog_errno(status); |
1152 | goto bail; | ||
1153 | } | ||
1154 | 1152 | ||
1155 | bail: | ||
1156 | return status; | 1153 | return status; |
1157 | } | 1154 | } |
1158 | 1155 | ||
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 80733496b22a..040bbb6a6e4b 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -2521,10 +2521,8 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb) | |||
2521 | /* This function assumes that the caller has the main osb resource */ | 2521 | /* This function assumes that the caller has the main osb resource */ |
2522 | 2522 | ||
2523 | /* ocfs2_initializer_super have already created this workqueue */ | 2523 | /* ocfs2_initializer_super have already created this workqueue */ |
2524 | if (osb->ocfs2_wq) { | 2524 | if (osb->ocfs2_wq) |
2525 | flush_workqueue(osb->ocfs2_wq); | ||
2526 | destroy_workqueue(osb->ocfs2_wq); | 2525 | destroy_workqueue(osb->ocfs2_wq); |
2527 | } | ||
2528 | 2526 | ||
2529 | ocfs2_free_slot_info(osb); | 2527 | ocfs2_free_slot_info(osb); |
2530 | 2528 | ||
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h index b023e4f3d740..d4550c8bbc41 100644 --- a/fs/ocfs2/super.h +++ b/fs/ocfs2/super.h | |||
@@ -26,9 +26,6 @@ | |||
26 | #ifndef OCFS2_SUPER_H | 26 | #ifndef OCFS2_SUPER_H |
27 | #define OCFS2_SUPER_H | 27 | #define OCFS2_SUPER_H |
28 | 28 | ||
29 | int ocfs2_publish_get_mount_state(struct ocfs2_super *osb, | ||
30 | int node_num); | ||
31 | |||
32 | __printf(3, 4) | 29 | __printf(3, 4) |
33 | int __ocfs2_error(struct super_block *sb, const char *function, | 30 | int __ocfs2_error(struct super_block *sb, const char *function, |
34 | const char *fmt, ...); | 31 | const char *fmt, ...); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6744bd706ecf..875231c36cb3 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | void task_mem(struct seq_file *m, struct mm_struct *mm) | 27 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
28 | { | 28 | { |
29 | unsigned long text, lib, swap, ptes, pmds, anon, file, shmem; | 29 | unsigned long text, lib, swap, anon, file, shmem; |
30 | unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; | 30 | unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; |
31 | 31 | ||
32 | anon = get_mm_counter(mm, MM_ANONPAGES); | 32 | anon = get_mm_counter(mm, MM_ANONPAGES); |
@@ -50,8 +50,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | |||
50 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; | 50 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; |
51 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; | 51 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; |
52 | swap = get_mm_counter(mm, MM_SWAPENTS); | 52 | swap = get_mm_counter(mm, MM_SWAPENTS); |
53 | ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); | ||
54 | pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); | ||
55 | seq_printf(m, | 53 | seq_printf(m, |
56 | "VmPeak:\t%8lu kB\n" | 54 | "VmPeak:\t%8lu kB\n" |
57 | "VmSize:\t%8lu kB\n" | 55 | "VmSize:\t%8lu kB\n" |
@@ -67,7 +65,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | |||
67 | "VmExe:\t%8lu kB\n" | 65 | "VmExe:\t%8lu kB\n" |
68 | "VmLib:\t%8lu kB\n" | 66 | "VmLib:\t%8lu kB\n" |
69 | "VmPTE:\t%8lu kB\n" | 67 | "VmPTE:\t%8lu kB\n" |
70 | "VmPMD:\t%8lu kB\n" | ||
71 | "VmSwap:\t%8lu kB\n", | 68 | "VmSwap:\t%8lu kB\n", |
72 | hiwater_vm << (PAGE_SHIFT-10), | 69 | hiwater_vm << (PAGE_SHIFT-10), |
73 | total_vm << (PAGE_SHIFT-10), | 70 | total_vm << (PAGE_SHIFT-10), |
@@ -80,8 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | |||
80 | shmem << (PAGE_SHIFT-10), | 77 | shmem << (PAGE_SHIFT-10), |
81 | mm->data_vm << (PAGE_SHIFT-10), | 78 | mm->data_vm << (PAGE_SHIFT-10), |
82 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, | 79 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, |
83 | ptes >> 10, | 80 | mm_pgtables_bytes(mm) >> 10, |
84 | pmds >> 10, | ||
85 | swap << (PAGE_SHIFT-10)); | 81 | swap << (PAGE_SHIFT-10)); |
86 | hugetlb_report_usage(m, mm); | 82 | hugetlb_report_usage(m, mm); |
87 | } | 83 | } |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index f46d133c0949..ac9a4e65ca49 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -668,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) | |||
668 | ctx->features = octx->features; | 668 | ctx->features = octx->features; |
669 | ctx->released = false; | 669 | ctx->released = false; |
670 | ctx->mm = vma->vm_mm; | 670 | ctx->mm = vma->vm_mm; |
671 | atomic_inc(&ctx->mm->mm_count); | 671 | mmgrab(ctx->mm); |
672 | 672 | ||
673 | userfaultfd_ctx_get(octx); | 673 | userfaultfd_ctx_get(octx); |
674 | fctx->orig = octx; | 674 | fctx->orig = octx; |
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 758f37ac5ad3..4b87472f35bc 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h | |||
@@ -104,7 +104,7 @@ kmem_zone_init(int size, char *zone_name) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static inline kmem_zone_t * | 106 | static inline kmem_zone_t * |
107 | kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, | 107 | kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags, |
108 | void (*construct)(void *)) | 108 | void (*construct)(void *)) |
109 | { | 109 | { |
110 | return kmem_cache_create(zone_name, size, 0, flags, construct); | 110 | return kmem_cache_create(zone_name, size, 0, flags, construct); |