diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 7 | ||||
-rw-r--r-- | fs/btrfs/ordered-data.c | 11 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 8 | ||||
-rw-r--r-- | fs/coredump.c | 2 | ||||
-rw-r--r-- | fs/direct-io.c | 14 | ||||
-rw-r--r-- | fs/fuse/dev.c | 51 | ||||
-rw-r--r-- | fs/fuse/dir.c | 41 | ||||
-rw-r--r-- | fs/fuse/file.c | 8 | ||||
-rw-r--r-- | fs/fuse/inode.c | 27 | ||||
-rw-r--r-- | fs/gfs2/file.c | 4 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 14 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 4 | ||||
-rw-r--r-- | fs/gfs2/lock_dlm.c | 4 | ||||
-rw-r--r-- | fs/gfs2/rgrp.c | 4 | ||||
-rw-r--r-- | fs/namei.c | 3 | ||||
-rw-r--r-- | fs/nfs/direct.c | 2 | ||||
-rw-r--r-- | fs/nfs/internal.h | 1 | ||||
-rw-r--r-- | fs/nfs/nfs3acl.c | 43 | ||||
-rw-r--r-- | fs/nfs/nfs3proc.c | 4 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 20 | ||||
-rw-r--r-- | fs/nfs/write.c | 335 | ||||
-rw-r--r-- | fs/nfsd/nfs4xdr.c | 4 | ||||
-rw-r--r-- | fs/quota/dquot.c | 2 | ||||
-rw-r--r-- | fs/xattr.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap.c | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_util.c | 53 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_util.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_btree.c | 82 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_sb.c | 25 |
31 files changed, 574 insertions, 219 deletions
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm) | |||
830 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) | 830 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
831 | { | 831 | { |
832 | struct kioctx_cpu *kcpu; | 832 | struct kioctx_cpu *kcpu; |
833 | unsigned long flags; | ||
833 | 834 | ||
834 | preempt_disable(); | 835 | preempt_disable(); |
835 | kcpu = this_cpu_ptr(ctx->cpu); | 836 | kcpu = this_cpu_ptr(ctx->cpu); |
836 | 837 | ||
838 | local_irq_save(flags); | ||
837 | kcpu->reqs_available += nr; | 839 | kcpu->reqs_available += nr; |
840 | |||
838 | while (kcpu->reqs_available >= ctx->req_batch * 2) { | 841 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
839 | kcpu->reqs_available -= ctx->req_batch; | 842 | kcpu->reqs_available -= ctx->req_batch; |
840 | atomic_add(ctx->req_batch, &ctx->reqs_available); | 843 | atomic_add(ctx->req_batch, &ctx->reqs_available); |
841 | } | 844 | } |
842 | 845 | ||
846 | local_irq_restore(flags); | ||
843 | preempt_enable(); | 847 | preempt_enable(); |
844 | } | 848 | } |
845 | 849 | ||
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
847 | { | 851 | { |
848 | struct kioctx_cpu *kcpu; | 852 | struct kioctx_cpu *kcpu; |
849 | bool ret = false; | 853 | bool ret = false; |
854 | unsigned long flags; | ||
850 | 855 | ||
851 | preempt_disable(); | 856 | preempt_disable(); |
852 | kcpu = this_cpu_ptr(ctx->cpu); | 857 | kcpu = this_cpu_ptr(ctx->cpu); |
853 | 858 | ||
859 | local_irq_save(flags); | ||
854 | if (!kcpu->reqs_available) { | 860 | if (!kcpu->reqs_available) { |
855 | int old, avail = atomic_read(&ctx->reqs_available); | 861 | int old, avail = atomic_read(&ctx->reqs_available); |
856 | 862 | ||
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
869 | ret = true; | 875 | ret = true; |
870 | kcpu->reqs_available--; | 876 | kcpu->reqs_available--; |
871 | out: | 877 | out: |
878 | local_irq_restore(flags); | ||
872 | preempt_enable(); | 879 | preempt_enable(); |
873 | return ret; | 880 | return ret; |
874 | } | 881 | } |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e12441c7cf1d..7187b14faa6c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid) | |||
484 | log_list); | 484 | log_list); |
485 | list_del_init(&ordered->log_list); | 485 | list_del_init(&ordered->log_list); |
486 | spin_unlock_irq(&log->log_extents_lock[index]); | 486 | spin_unlock_irq(&log->log_extents_lock[index]); |
487 | |||
488 | if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && | ||
489 | !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { | ||
490 | struct inode *inode = ordered->inode; | ||
491 | u64 start = ordered->file_offset; | ||
492 | u64 end = ordered->file_offset + ordered->len - 1; | ||
493 | |||
494 | WARN_ON(!inode); | ||
495 | filemap_fdatawrite_range(inode->i_mapping, start, end); | ||
496 | } | ||
487 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, | 497 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, |
488 | &ordered->flags)); | 498 | &ordered->flags)); |
499 | |||
489 | btrfs_put_ordered_extent(ordered); | 500 | btrfs_put_ordered_extent(ordered); |
490 | spin_lock_irq(&log->log_extents_lock[index]); | 501 | spin_lock_irq(&log->log_extents_lock[index]); |
491 | } | 502 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6104676857f5..6cb82f62cb7c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1680 | if (device->bdev == root->fs_info->fs_devices->latest_bdev) | 1680 | if (device->bdev == root->fs_info->fs_devices->latest_bdev) |
1681 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; | 1681 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; |
1682 | 1682 | ||
1683 | if (device->bdev) | 1683 | if (device->bdev) { |
1684 | device->fs_devices->open_devices--; | 1684 | device->fs_devices->open_devices--; |
1685 | 1685 | /* remove sysfs entry */ | |
1686 | /* remove sysfs entry */ | 1686 | btrfs_kobj_rm_device(root->fs_info, device); |
1687 | btrfs_kobj_rm_device(root->fs_info, device); | 1687 | } |
1688 | 1688 | ||
1689 | call_rcu(&device->rcu, free_device); | 1689 | call_rcu(&device->rcu, free_device); |
1690 | 1690 | ||
diff --git a/fs/coredump.c b/fs/coredump.c index 0b2528fb640e..a93f7e6ea4cf 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |||
306 | if (unlikely(nr < 0)) | 306 | if (unlikely(nr < 0)) |
307 | return nr; | 307 | return nr; |
308 | 308 | ||
309 | tsk->flags = PF_DUMPCORE; | 309 | tsk->flags |= PF_DUMPCORE; |
310 | if (atomic_read(&mm->mm_users) == nr + 1) | 310 | if (atomic_read(&mm->mm_users) == nr + 1) |
311 | goto done; | 311 | goto done; |
312 | /* | 312 | /* |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 98040ba388ac..194d0d122cae 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) | |||
198 | * L1 cache. | 198 | * L1 cache. |
199 | */ | 199 | */ |
200 | static inline struct page *dio_get_page(struct dio *dio, | 200 | static inline struct page *dio_get_page(struct dio *dio, |
201 | struct dio_submit *sdio, size_t *from, size_t *to) | 201 | struct dio_submit *sdio) |
202 | { | 202 | { |
203 | int n; | ||
204 | if (dio_pages_present(sdio) == 0) { | 203 | if (dio_pages_present(sdio) == 0) { |
205 | int ret; | 204 | int ret; |
206 | 205 | ||
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio, | |||
209 | return ERR_PTR(ret); | 208 | return ERR_PTR(ret); |
210 | BUG_ON(dio_pages_present(sdio) == 0); | 209 | BUG_ON(dio_pages_present(sdio) == 0); |
211 | } | 210 | } |
212 | n = sdio->head++; | 211 | return dio->pages[sdio->head]; |
213 | *from = n ? 0 : sdio->from; | ||
214 | *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE; | ||
215 | return dio->pages[n]; | ||
216 | } | 212 | } |
217 | 213 | ||
218 | /** | 214 | /** |
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, | |||
911 | while (sdio->block_in_file < sdio->final_block_in_request) { | 907 | while (sdio->block_in_file < sdio->final_block_in_request) { |
912 | struct page *page; | 908 | struct page *page; |
913 | size_t from, to; | 909 | size_t from, to; |
914 | page = dio_get_page(dio, sdio, &from, &to); | 910 | |
911 | page = dio_get_page(dio, sdio); | ||
915 | if (IS_ERR(page)) { | 912 | if (IS_ERR(page)) { |
916 | ret = PTR_ERR(page); | 913 | ret = PTR_ERR(page); |
917 | goto out; | 914 | goto out; |
918 | } | 915 | } |
916 | from = sdio->head ? 0 : sdio->from; | ||
917 | to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; | ||
918 | sdio->head++; | ||
919 | 919 | ||
920 | while (from < to) { | 920 | while (from < to) { |
921 | unsigned this_chunk_bytes; /* # of bytes mapped */ | 921 | unsigned this_chunk_bytes; /* # of bytes mapped */ |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 098f97bdcf1b..ca887314aba9 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -643,9 +643,8 @@ struct fuse_copy_state { | |||
643 | unsigned long seglen; | 643 | unsigned long seglen; |
644 | unsigned long addr; | 644 | unsigned long addr; |
645 | struct page *pg; | 645 | struct page *pg; |
646 | void *mapaddr; | ||
647 | void *buf; | ||
648 | unsigned len; | 646 | unsigned len; |
647 | unsigned offset; | ||
649 | unsigned move_pages:1; | 648 | unsigned move_pages:1; |
650 | }; | 649 | }; |
651 | 650 | ||
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs) | |||
666 | if (cs->currbuf) { | 665 | if (cs->currbuf) { |
667 | struct pipe_buffer *buf = cs->currbuf; | 666 | struct pipe_buffer *buf = cs->currbuf; |
668 | 667 | ||
669 | if (!cs->write) { | 668 | if (cs->write) |
670 | kunmap_atomic(cs->mapaddr); | ||
671 | } else { | ||
672 | kunmap_atomic(cs->mapaddr); | ||
673 | buf->len = PAGE_SIZE - cs->len; | 669 | buf->len = PAGE_SIZE - cs->len; |
674 | } | ||
675 | cs->currbuf = NULL; | 670 | cs->currbuf = NULL; |
676 | cs->mapaddr = NULL; | 671 | } else if (cs->pg) { |
677 | } else if (cs->mapaddr) { | ||
678 | kunmap_atomic(cs->mapaddr); | ||
679 | if (cs->write) { | 672 | if (cs->write) { |
680 | flush_dcache_page(cs->pg); | 673 | flush_dcache_page(cs->pg); |
681 | set_page_dirty_lock(cs->pg); | 674 | set_page_dirty_lock(cs->pg); |
682 | } | 675 | } |
683 | put_page(cs->pg); | 676 | put_page(cs->pg); |
684 | cs->mapaddr = NULL; | ||
685 | } | 677 | } |
678 | cs->pg = NULL; | ||
686 | } | 679 | } |
687 | 680 | ||
688 | /* | 681 | /* |
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs) | |||
691 | */ | 684 | */ |
692 | static int fuse_copy_fill(struct fuse_copy_state *cs) | 685 | static int fuse_copy_fill(struct fuse_copy_state *cs) |
693 | { | 686 | { |
694 | unsigned long offset; | 687 | struct page *page; |
695 | int err; | 688 | int err; |
696 | 689 | ||
697 | unlock_request(cs->fc, cs->req); | 690 | unlock_request(cs->fc, cs->req); |
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
706 | 699 | ||
707 | BUG_ON(!cs->nr_segs); | 700 | BUG_ON(!cs->nr_segs); |
708 | cs->currbuf = buf; | 701 | cs->currbuf = buf; |
709 | cs->mapaddr = kmap_atomic(buf->page); | 702 | cs->pg = buf->page; |
703 | cs->offset = buf->offset; | ||
710 | cs->len = buf->len; | 704 | cs->len = buf->len; |
711 | cs->buf = cs->mapaddr + buf->offset; | ||
712 | cs->pipebufs++; | 705 | cs->pipebufs++; |
713 | cs->nr_segs--; | 706 | cs->nr_segs--; |
714 | } else { | 707 | } else { |
715 | struct page *page; | ||
716 | |||
717 | if (cs->nr_segs == cs->pipe->buffers) | 708 | if (cs->nr_segs == cs->pipe->buffers) |
718 | return -EIO; | 709 | return -EIO; |
719 | 710 | ||
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
726 | buf->len = 0; | 717 | buf->len = 0; |
727 | 718 | ||
728 | cs->currbuf = buf; | 719 | cs->currbuf = buf; |
729 | cs->mapaddr = kmap_atomic(page); | 720 | cs->pg = page; |
730 | cs->buf = cs->mapaddr; | 721 | cs->offset = 0; |
731 | cs->len = PAGE_SIZE; | 722 | cs->len = PAGE_SIZE; |
732 | cs->pipebufs++; | 723 | cs->pipebufs++; |
733 | cs->nr_segs++; | 724 | cs->nr_segs++; |
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
740 | cs->iov++; | 731 | cs->iov++; |
741 | cs->nr_segs--; | 732 | cs->nr_segs--; |
742 | } | 733 | } |
743 | err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); | 734 | err = get_user_pages_fast(cs->addr, 1, cs->write, &page); |
744 | if (err < 0) | 735 | if (err < 0) |
745 | return err; | 736 | return err; |
746 | BUG_ON(err != 1); | 737 | BUG_ON(err != 1); |
747 | offset = cs->addr % PAGE_SIZE; | 738 | cs->pg = page; |
748 | cs->mapaddr = kmap_atomic(cs->pg); | 739 | cs->offset = cs->addr % PAGE_SIZE; |
749 | cs->buf = cs->mapaddr + offset; | 740 | cs->len = min(PAGE_SIZE - cs->offset, cs->seglen); |
750 | cs->len = min(PAGE_SIZE - offset, cs->seglen); | ||
751 | cs->seglen -= cs->len; | 741 | cs->seglen -= cs->len; |
752 | cs->addr += cs->len; | 742 | cs->addr += cs->len; |
753 | } | 743 | } |
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) | |||
760 | { | 750 | { |
761 | unsigned ncpy = min(*size, cs->len); | 751 | unsigned ncpy = min(*size, cs->len); |
762 | if (val) { | 752 | if (val) { |
753 | void *pgaddr = kmap_atomic(cs->pg); | ||
754 | void *buf = pgaddr + cs->offset; | ||
755 | |||
763 | if (cs->write) | 756 | if (cs->write) |
764 | memcpy(cs->buf, *val, ncpy); | 757 | memcpy(buf, *val, ncpy); |
765 | else | 758 | else |
766 | memcpy(*val, cs->buf, ncpy); | 759 | memcpy(*val, buf, ncpy); |
760 | |||
761 | kunmap_atomic(pgaddr); | ||
767 | *val += ncpy; | 762 | *val += ncpy; |
768 | } | 763 | } |
769 | *size -= ncpy; | 764 | *size -= ncpy; |
770 | cs->len -= ncpy; | 765 | cs->len -= ncpy; |
771 | cs->buf += ncpy; | 766 | cs->offset += ncpy; |
772 | return ncpy; | 767 | return ncpy; |
773 | } | 768 | } |
774 | 769 | ||
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |||
874 | out_fallback_unlock: | 869 | out_fallback_unlock: |
875 | unlock_page(newpage); | 870 | unlock_page(newpage); |
876 | out_fallback: | 871 | out_fallback: |
877 | cs->mapaddr = kmap_atomic(buf->page); | 872 | cs->pg = buf->page; |
878 | cs->buf = cs->mapaddr + buf->offset; | 873 | cs->offset = buf->offset; |
879 | 874 | ||
880 | err = lock_request(cs->fc, cs->req); | 875 | err = lock_request(cs->fc, cs->req); |
881 | if (err) | 876 | if (err) |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 42198359fa1b..0c6048247a34 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) | |||
198 | inode = ACCESS_ONCE(entry->d_inode); | 198 | inode = ACCESS_ONCE(entry->d_inode); |
199 | if (inode && is_bad_inode(inode)) | 199 | if (inode && is_bad_inode(inode)) |
200 | goto invalid; | 200 | goto invalid; |
201 | else if (fuse_dentry_time(entry) < get_jiffies_64()) { | 201 | else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || |
202 | (flags & LOOKUP_REVAL)) { | ||
202 | int err; | 203 | int err; |
203 | struct fuse_entry_out outarg; | 204 | struct fuse_entry_out outarg; |
204 | struct fuse_req *req; | 205 | struct fuse_req *req; |
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, | |||
814 | return err; | 815 | return err; |
815 | } | 816 | } |
816 | 817 | ||
817 | static int fuse_rename(struct inode *olddir, struct dentry *oldent, | ||
818 | struct inode *newdir, struct dentry *newent) | ||
819 | { | ||
820 | return fuse_rename_common(olddir, oldent, newdir, newent, 0, | ||
821 | FUSE_RENAME, sizeof(struct fuse_rename_in)); | ||
822 | } | ||
823 | |||
824 | static int fuse_rename2(struct inode *olddir, struct dentry *oldent, | 818 | static int fuse_rename2(struct inode *olddir, struct dentry *oldent, |
825 | struct inode *newdir, struct dentry *newent, | 819 | struct inode *newdir, struct dentry *newent, |
826 | unsigned int flags) | 820 | unsigned int flags) |
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent, | |||
831 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) | 825 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) |
832 | return -EINVAL; | 826 | return -EINVAL; |
833 | 827 | ||
834 | if (fc->no_rename2 || fc->minor < 23) | 828 | if (flags) { |
835 | return -EINVAL; | 829 | if (fc->no_rename2 || fc->minor < 23) |
830 | return -EINVAL; | ||
836 | 831 | ||
837 | err = fuse_rename_common(olddir, oldent, newdir, newent, flags, | 832 | err = fuse_rename_common(olddir, oldent, newdir, newent, flags, |
838 | FUSE_RENAME2, sizeof(struct fuse_rename2_in)); | 833 | FUSE_RENAME2, |
839 | if (err == -ENOSYS) { | 834 | sizeof(struct fuse_rename2_in)); |
840 | fc->no_rename2 = 1; | 835 | if (err == -ENOSYS) { |
841 | err = -EINVAL; | 836 | fc->no_rename2 = 1; |
837 | err = -EINVAL; | ||
838 | } | ||
839 | } else { | ||
840 | err = fuse_rename_common(olddir, oldent, newdir, newent, 0, | ||
841 | FUSE_RENAME, | ||
842 | sizeof(struct fuse_rename_in)); | ||
842 | } | 843 | } |
844 | |||
843 | return err; | 845 | return err; |
846 | } | ||
844 | 847 | ||
848 | static int fuse_rename(struct inode *olddir, struct dentry *oldent, | ||
849 | struct inode *newdir, struct dentry *newent) | ||
850 | { | ||
851 | return fuse_rename2(olddir, oldent, newdir, newent, 0); | ||
845 | } | 852 | } |
846 | 853 | ||
847 | static int fuse_link(struct dentry *entry, struct inode *newdir, | 854 | static int fuse_link(struct dentry *entry, struct inode *newdir, |
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, | |||
985 | int err; | 992 | int err; |
986 | bool r; | 993 | bool r; |
987 | 994 | ||
988 | if (fi->i_time < get_jiffies_64()) { | 995 | if (time_before64(fi->i_time, get_jiffies_64())) { |
989 | r = true; | 996 | r = true; |
990 | err = fuse_do_getattr(inode, stat, file); | 997 | err = fuse_do_getattr(inode, stat, file); |
991 | } else { | 998 | } else { |
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask) | |||
1171 | ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { | 1178 | ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { |
1172 | struct fuse_inode *fi = get_fuse_inode(inode); | 1179 | struct fuse_inode *fi = get_fuse_inode(inode); |
1173 | 1180 | ||
1174 | if (fi->i_time < get_jiffies_64()) { | 1181 | if (time_before64(fi->i_time, get_jiffies_64())) { |
1175 | refreshed = true; | 1182 | refreshed = true; |
1176 | 1183 | ||
1177 | err = fuse_perm_getattr(inode, mask); | 1184 | err = fuse_perm_getattr(inode, mask); |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6e16dad13e9b..40ac2628ddcf 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page) | |||
1687 | error = -EIO; | 1687 | error = -EIO; |
1688 | req->ff = fuse_write_file_get(fc, fi); | 1688 | req->ff = fuse_write_file_get(fc, fi); |
1689 | if (!req->ff) | 1689 | if (!req->ff) |
1690 | goto err_free; | 1690 | goto err_nofile; |
1691 | 1691 | ||
1692 | fuse_write_fill(req, req->ff, page_offset(page), 0); | 1692 | fuse_write_fill(req, req->ff, page_offset(page), 0); |
1693 | 1693 | ||
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page) | |||
1715 | 1715 | ||
1716 | return 0; | 1716 | return 0; |
1717 | 1717 | ||
1718 | err_nofile: | ||
1719 | __free_page(tmp_page); | ||
1718 | err_free: | 1720 | err_free: |
1719 | fuse_request_free(req); | 1721 | fuse_request_free(req); |
1720 | err: | 1722 | err: |
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping, | |||
1955 | data.ff = NULL; | 1957 | data.ff = NULL; |
1956 | 1958 | ||
1957 | err = -ENOMEM; | 1959 | err = -ENOMEM; |
1958 | data.orig_pages = kzalloc(sizeof(struct page *) * | 1960 | data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, |
1959 | FUSE_MAX_PAGES_PER_REQ, | 1961 | sizeof(struct page *), |
1960 | GFP_NOFS); | 1962 | GFP_NOFS); |
1961 | if (!data.orig_pages) | 1963 | if (!data.orig_pages) |
1962 | goto out; | 1964 | goto out; |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 754dcf23de8a..03246cd9d47a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -478,6 +478,17 @@ static const match_table_t tokens = { | |||
478 | {OPT_ERR, NULL} | 478 | {OPT_ERR, NULL} |
479 | }; | 479 | }; |
480 | 480 | ||
481 | static int fuse_match_uint(substring_t *s, unsigned int *res) | ||
482 | { | ||
483 | int err = -ENOMEM; | ||
484 | char *buf = match_strdup(s); | ||
485 | if (buf) { | ||
486 | err = kstrtouint(buf, 10, res); | ||
487 | kfree(buf); | ||
488 | } | ||
489 | return err; | ||
490 | } | ||
491 | |||
481 | static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) | 492 | static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) |
482 | { | 493 | { |
483 | char *p; | 494 | char *p; |
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) | |||
488 | while ((p = strsep(&opt, ",")) != NULL) { | 499 | while ((p = strsep(&opt, ",")) != NULL) { |
489 | int token; | 500 | int token; |
490 | int value; | 501 | int value; |
502 | unsigned uv; | ||
491 | substring_t args[MAX_OPT_ARGS]; | 503 | substring_t args[MAX_OPT_ARGS]; |
492 | if (!*p) | 504 | if (!*p) |
493 | continue; | 505 | continue; |
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) | |||
511 | break; | 523 | break; |
512 | 524 | ||
513 | case OPT_USER_ID: | 525 | case OPT_USER_ID: |
514 | if (match_int(&args[0], &value)) | 526 | if (fuse_match_uint(&args[0], &uv)) |
515 | return 0; | 527 | return 0; |
516 | d->user_id = make_kuid(current_user_ns(), value); | 528 | d->user_id = make_kuid(current_user_ns(), uv); |
517 | if (!uid_valid(d->user_id)) | 529 | if (!uid_valid(d->user_id)) |
518 | return 0; | 530 | return 0; |
519 | d->user_id_present = 1; | 531 | d->user_id_present = 1; |
520 | break; | 532 | break; |
521 | 533 | ||
522 | case OPT_GROUP_ID: | 534 | case OPT_GROUP_ID: |
523 | if (match_int(&args[0], &value)) | 535 | if (fuse_match_uint(&args[0], &uv)) |
524 | return 0; | 536 | return 0; |
525 | d->group_id = make_kgid(current_user_ns(), value); | 537 | d->group_id = make_kgid(current_user_ns(), uv); |
526 | if (!gid_valid(d->group_id)) | 538 | if (!gid_valid(d->group_id)) |
527 | return 0; | 539 | return 0; |
528 | d->group_id_present = 1; | 540 | d->group_id_present = 1; |
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
895 | fc->writeback_cache = 1; | 907 | fc->writeback_cache = 1; |
896 | if (arg->time_gran && arg->time_gran <= 1000000000) | 908 | if (arg->time_gran && arg->time_gran <= 1000000000) |
897 | fc->sb->s_time_gran = arg->time_gran; | 909 | fc->sb->s_time_gran = arg->time_gran; |
898 | else | ||
899 | fc->sb->s_time_gran = 1000000000; | ||
900 | |||
901 | } else { | 910 | } else { |
902 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; | 911 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
903 | fc->no_lock = 1; | 912 | fc->no_lock = 1; |
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) | |||
926 | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | | 935 | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | |
927 | FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | | 936 | FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | |
928 | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | | 937 | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | |
929 | FUSE_WRITEBACK_CACHE; | 938 | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; |
930 | req->in.h.opcode = FUSE_INIT; | 939 | req->in.h.opcode = FUSE_INIT; |
931 | req->in.numargs = 1; | 940 | req->in.numargs = 1; |
932 | req->in.args[0].size = sizeof(*arg); | 941 | req->in.args[0].size = sizeof(*arg); |
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1006 | 1015 | ||
1007 | sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); | 1016 | sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); |
1008 | 1017 | ||
1009 | if (!parse_fuse_opt((char *) data, &d, is_bdev)) | 1018 | if (!parse_fuse_opt(data, &d, is_bdev)) |
1010 | goto err; | 1019 | goto err; |
1011 | 1020 | ||
1012 | if (is_bdev) { | 1021 | if (is_bdev) { |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 4fc3a3046174..26b3f952e6b1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) | |||
981 | int error = 0; | 981 | int error = 0; |
982 | 982 | ||
983 | state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; | 983 | state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; |
984 | flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; | 984 | flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT; |
985 | 985 | ||
986 | mutex_lock(&fp->f_fl_mutex); | 986 | mutex_lock(&fp->f_fl_mutex); |
987 | 987 | ||
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) | |||
991 | goto out; | 991 | goto out; |
992 | flock_lock_file_wait(file, | 992 | flock_lock_file_wait(file, |
993 | &(struct file_lock){.fl_type = F_UNLCK}); | 993 | &(struct file_lock){.fl_type = F_UNLCK}); |
994 | gfs2_glock_dq_wait(fl_gh); | 994 | gfs2_glock_dq(fl_gh); |
995 | gfs2_holder_reinit(state, flags, fl_gh); | 995 | gfs2_holder_reinit(state, flags, fl_gh); |
996 | } else { | 996 | } else { |
997 | error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, | 997 | error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 770e16716d81..7f513b1ceb2c 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
731 | cachep = gfs2_glock_aspace_cachep; | 731 | cachep = gfs2_glock_aspace_cachep; |
732 | else | 732 | else |
733 | cachep = gfs2_glock_cachep; | 733 | cachep = gfs2_glock_cachep; |
734 | gl = kmem_cache_alloc(cachep, GFP_KERNEL); | 734 | gl = kmem_cache_alloc(cachep, GFP_NOFS); |
735 | if (!gl) | 735 | if (!gl) |
736 | return -ENOMEM; | 736 | return -ENOMEM; |
737 | 737 | ||
738 | memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); | 738 | memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); |
739 | 739 | ||
740 | if (glops->go_flags & GLOF_LVB) { | 740 | if (glops->go_flags & GLOF_LVB) { |
741 | gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); | 741 | gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); |
742 | if (!gl->gl_lksb.sb_lvbptr) { | 742 | if (!gl->gl_lksb.sb_lvbptr) { |
743 | kmem_cache_free(cachep, gl); | 743 | kmem_cache_free(cachep, gl); |
744 | return -ENOMEM; | 744 | return -ENOMEM; |
@@ -1383,12 +1383,16 @@ __acquires(&lru_lock) | |||
1383 | gl = list_entry(list->next, struct gfs2_glock, gl_lru); | 1383 | gl = list_entry(list->next, struct gfs2_glock, gl_lru); |
1384 | list_del_init(&gl->gl_lru); | 1384 | list_del_init(&gl->gl_lru); |
1385 | if (!spin_trylock(&gl->gl_spin)) { | 1385 | if (!spin_trylock(&gl->gl_spin)) { |
1386 | add_back_to_lru: | ||
1386 | list_add(&gl->gl_lru, &lru_list); | 1387 | list_add(&gl->gl_lru, &lru_list); |
1387 | atomic_inc(&lru_count); | 1388 | atomic_inc(&lru_count); |
1388 | continue; | 1389 | continue; |
1389 | } | 1390 | } |
1391 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | ||
1392 | spin_unlock(&gl->gl_spin); | ||
1393 | goto add_back_to_lru; | ||
1394 | } | ||
1390 | clear_bit(GLF_LRU, &gl->gl_flags); | 1395 | clear_bit(GLF_LRU, &gl->gl_flags); |
1391 | spin_unlock(&lru_lock); | ||
1392 | gl->gl_lockref.count++; | 1396 | gl->gl_lockref.count++; |
1393 | if (demote_ok(gl)) | 1397 | if (demote_ok(gl)) |
1394 | handle_callback(gl, LM_ST_UNLOCKED, 0, false); | 1398 | handle_callback(gl, LM_ST_UNLOCKED, 0, false); |
@@ -1396,7 +1400,7 @@ __acquires(&lru_lock) | |||
1396 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1400 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1397 | gl->gl_lockref.count--; | 1401 | gl->gl_lockref.count--; |
1398 | spin_unlock(&gl->gl_spin); | 1402 | spin_unlock(&gl->gl_spin); |
1399 | spin_lock(&lru_lock); | 1403 | cond_resched_lock(&lru_lock); |
1400 | } | 1404 | } |
1401 | } | 1405 | } |
1402 | 1406 | ||
@@ -1421,7 +1425,7 @@ static long gfs2_scan_glock_lru(int nr) | |||
1421 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); | 1425 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); |
1422 | 1426 | ||
1423 | /* Test for being demotable */ | 1427 | /* Test for being demotable */ |
1424 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1428 | if (!test_bit(GLF_LOCK, &gl->gl_flags)) { |
1425 | list_move(&gl->gl_lru, &dispose); | 1429 | list_move(&gl->gl_lru, &dispose); |
1426 | atomic_dec(&lru_count); | 1430 | atomic_dec(&lru_count); |
1427 | freed++; | 1431 | freed++; |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index fc1100781bbc..2ffc67dce87f 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl) | |||
234 | * inode_go_inval - prepare a inode glock to be released | 234 | * inode_go_inval - prepare a inode glock to be released |
235 | * @gl: the glock | 235 | * @gl: the glock |
236 | * @flags: | 236 | * @flags: |
237 | * | 237 | * |
238 | * Normally we invlidate everything, but if we are moving into | 238 | * Normally we invalidate everything, but if we are moving into |
239 | * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we | 239 | * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we |
240 | * can keep hold of the metadata, since it won't have changed. | 240 | * can keep hold of the metadata, since it won't have changed. |
241 | * | 241 | * |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 992ca5b1e045..641383a9c1bb 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -1030,8 +1030,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1030 | 1030 | ||
1031 | new_size = old_size + RECOVER_SIZE_INC; | 1031 | new_size = old_size + RECOVER_SIZE_INC; |
1032 | 1032 | ||
1033 | submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); | 1033 | submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); |
1034 | result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); | 1034 | result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); |
1035 | if (!submit || !result) { | 1035 | if (!submit || !result) { |
1036 | kfree(submit); | 1036 | kfree(submit); |
1037 | kfree(result); | 1037 | kfree(result); |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index db629d1bd1bd..f4cb9c0d6bbd 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le | |||
337 | 337 | ||
338 | /** | 338 | /** |
339 | * gfs2_free_extlen - Return extent length of free blocks | 339 | * gfs2_free_extlen - Return extent length of free blocks |
340 | * @rbm: Starting position | 340 | * @rrbm: Starting position |
341 | * @len: Max length to check | 341 | * @len: Max length to check |
342 | * | 342 | * |
343 | * Starting at the block specified by the rbm, see how many free blocks | 343 | * Starting at the block specified by the rbm, see how many free blocks |
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) | |||
2522 | 2522 | ||
2523 | /** | 2523 | /** |
2524 | * gfs2_rlist_free - free a resource group list | 2524 | * gfs2_rlist_free - free a resource group list |
2525 | * @list: the list of resource groups | 2525 | * @rlist: the list of resource groups |
2526 | * | 2526 | * |
2527 | */ | 2527 | */ |
2528 | 2528 | ||
diff --git a/fs/namei.c b/fs/namei.c index 985c6f368485..9eb787e5c167 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -2256,9 +2256,10 @@ done: | |||
2256 | goto out; | 2256 | goto out; |
2257 | } | 2257 | } |
2258 | path->dentry = dentry; | 2258 | path->dentry = dentry; |
2259 | path->mnt = mntget(nd->path.mnt); | 2259 | path->mnt = nd->path.mnt; |
2260 | if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) | 2260 | if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) |
2261 | return 1; | 2261 | return 1; |
2262 | mntget(path->mnt); | ||
2262 | follow_mount(path); | 2263 | follow_mount(path); |
2263 | error = 0; | 2264 | error = 0; |
2264 | out: | 2265 | out: |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 8f98138cbc43..f11b9eed0de1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
756 | spin_unlock(&dreq->lock); | 756 | spin_unlock(&dreq->lock); |
757 | 757 | ||
758 | while (!list_empty(&hdr->pages)) { | 758 | while (!list_empty(&hdr->pages)) { |
759 | bool do_destroy = true; | ||
760 | 759 | ||
761 | req = nfs_list_entry(hdr->pages.next); | 760 | req = nfs_list_entry(hdr->pages.next); |
762 | nfs_list_remove_request(req); | 761 | nfs_list_remove_request(req); |
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
765 | case NFS_IOHDR_NEED_COMMIT: | 764 | case NFS_IOHDR_NEED_COMMIT: |
766 | kref_get(&req->wb_kref); | 765 | kref_get(&req->wb_kref); |
767 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 766 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
768 | do_destroy = false; | ||
769 | } | 767 | } |
770 | nfs_unlock_and_release_request(req); | 768 | nfs_unlock_and_release_request(req); |
771 | } | 769 | } |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e0193d63630c..617f36611d4a 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *); | |||
244 | int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); | 244 | int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); |
245 | int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, | 245 | int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, |
246 | const struct rpc_call_ops *, int, int); | 246 | const struct rpc_call_ops *, int, int); |
247 | void nfs_free_request(struct nfs_page *req); | ||
247 | 248 | ||
248 | static inline void nfs_iocounter_init(struct nfs_io_counter *c) | 249 | static inline void nfs_iocounter_init(struct nfs_io_counter *c) |
249 | { | 250 | { |
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 871d6eda8dba..8f854dde4150 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c | |||
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = { | |||
247 | &posix_acl_default_xattr_handler, | 247 | &posix_acl_default_xattr_handler, |
248 | NULL, | 248 | NULL, |
249 | }; | 249 | }; |
250 | |||
251 | static int | ||
252 | nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data, | ||
253 | size_t size, ssize_t *result) | ||
254 | { | ||
255 | struct posix_acl *acl; | ||
256 | char *p = data + *result; | ||
257 | |||
258 | acl = get_acl(inode, type); | ||
259 | if (!acl) | ||
260 | return 0; | ||
261 | |||
262 | posix_acl_release(acl); | ||
263 | |||
264 | *result += strlen(name); | ||
265 | *result += 1; | ||
266 | if (!size) | ||
267 | return 0; | ||
268 | if (*result > size) | ||
269 | return -ERANGE; | ||
270 | |||
271 | strcpy(p, name); | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | ssize_t | ||
276 | nfs3_listxattr(struct dentry *dentry, char *data, size_t size) | ||
277 | { | ||
278 | struct inode *inode = dentry->d_inode; | ||
279 | ssize_t result = 0; | ||
280 | int error; | ||
281 | |||
282 | error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS, | ||
283 | POSIX_ACL_XATTR_ACCESS, data, size, &result); | ||
284 | if (error) | ||
285 | return error; | ||
286 | |||
287 | error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT, | ||
288 | POSIX_ACL_XATTR_DEFAULT, data, size, &result); | ||
289 | if (error) | ||
290 | return error; | ||
291 | return result; | ||
292 | } | ||
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index e7daa42bbc86..f0afa291fd58 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = { | |||
885 | .getattr = nfs_getattr, | 885 | .getattr = nfs_getattr, |
886 | .setattr = nfs_setattr, | 886 | .setattr = nfs_setattr, |
887 | #ifdef CONFIG_NFS_V3_ACL | 887 | #ifdef CONFIG_NFS_V3_ACL |
888 | .listxattr = generic_listxattr, | 888 | .listxattr = nfs3_listxattr, |
889 | .getxattr = generic_getxattr, | 889 | .getxattr = generic_getxattr, |
890 | .setxattr = generic_setxattr, | 890 | .setxattr = generic_setxattr, |
891 | .removexattr = generic_removexattr, | 891 | .removexattr = generic_removexattr, |
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = { | |||
899 | .getattr = nfs_getattr, | 899 | .getattr = nfs_getattr, |
900 | .setattr = nfs_setattr, | 900 | .setattr = nfs_setattr, |
901 | #ifdef CONFIG_NFS_V3_ACL | 901 | #ifdef CONFIG_NFS_V3_ACL |
902 | .listxattr = generic_listxattr, | 902 | .listxattr = nfs3_listxattr, |
903 | .getxattr = generic_getxattr, | 903 | .getxattr = generic_getxattr, |
904 | .setxattr = generic_setxattr, | 904 | .setxattr = generic_setxattr, |
905 | .removexattr = generic_removexattr, | 905 | .removexattr = generic_removexattr, |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 745a612dbe22..0be5050638f7 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -29,8 +29,6 @@ | |||
29 | static struct kmem_cache *nfs_page_cachep; | 29 | static struct kmem_cache *nfs_page_cachep; |
30 | static const struct rpc_call_ops nfs_pgio_common_ops; | 30 | static const struct rpc_call_ops nfs_pgio_common_ops; |
31 | 31 | ||
32 | static void nfs_free_request(struct nfs_page *); | ||
33 | |||
34 | static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) | 32 | static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) |
35 | { | 33 | { |
36 | p->npages = pagecount; | 34 | p->npages = pagecount; |
@@ -232,20 +230,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) | |||
232 | WARN_ON_ONCE(prev == req); | 230 | WARN_ON_ONCE(prev == req); |
233 | 231 | ||
234 | if (!prev) { | 232 | if (!prev) { |
233 | /* a head request */ | ||
235 | req->wb_head = req; | 234 | req->wb_head = req; |
236 | req->wb_this_page = req; | 235 | req->wb_this_page = req; |
237 | } else { | 236 | } else { |
237 | /* a subrequest */ | ||
238 | WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); | 238 | WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); |
239 | WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); | 239 | WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); |
240 | req->wb_head = prev->wb_head; | 240 | req->wb_head = prev->wb_head; |
241 | req->wb_this_page = prev->wb_this_page; | 241 | req->wb_this_page = prev->wb_this_page; |
242 | prev->wb_this_page = req; | 242 | prev->wb_this_page = req; |
243 | 243 | ||
244 | /* All subrequests take a ref on the head request until | ||
245 | * nfs_page_group_destroy is called */ | ||
246 | kref_get(&req->wb_head->wb_kref); | ||
247 | |||
244 | /* grab extra ref if head request has extra ref from | 248 | /* grab extra ref if head request has extra ref from |
245 | * the write/commit path to handle handoff between write | 249 | * the write/commit path to handle handoff between write |
246 | * and commit lists */ | 250 | * and commit lists */ |
247 | if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) | 251 | if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) { |
252 | set_bit(PG_INODE_REF, &req->wb_flags); | ||
248 | kref_get(&req->wb_kref); | 253 | kref_get(&req->wb_kref); |
254 | } | ||
249 | } | 255 | } |
250 | } | 256 | } |
251 | 257 | ||
@@ -262,6 +268,10 @@ nfs_page_group_destroy(struct kref *kref) | |||
262 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); | 268 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
263 | struct nfs_page *tmp, *next; | 269 | struct nfs_page *tmp, *next; |
264 | 270 | ||
271 | /* subrequests must release the ref on the head request */ | ||
272 | if (req->wb_head != req) | ||
273 | nfs_release_request(req->wb_head); | ||
274 | |||
265 | if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) | 275 | if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) |
266 | return; | 276 | return; |
267 | 277 | ||
@@ -387,7 +397,7 @@ static void nfs_clear_request(struct nfs_page *req) | |||
387 | * | 397 | * |
388 | * Note: Should never be called with the spinlock held! | 398 | * Note: Should never be called with the spinlock held! |
389 | */ | 399 | */ |
390 | static void nfs_free_request(struct nfs_page *req) | 400 | void nfs_free_request(struct nfs_page *req) |
391 | { | 401 | { |
392 | WARN_ON_ONCE(req->wb_this_page != req); | 402 | WARN_ON_ONCE(req->wb_this_page != req); |
393 | 403 | ||
@@ -917,7 +927,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
917 | nfs_pageio_doio(desc); | 927 | nfs_pageio_doio(desc); |
918 | if (desc->pg_error < 0) | 928 | if (desc->pg_error < 0) |
919 | return 0; | 929 | return 0; |
920 | desc->pg_moreio = 0; | ||
921 | if (desc->pg_recoalesce) | 930 | if (desc->pg_recoalesce) |
922 | return 0; | 931 | return 0; |
923 | /* retry add_request for this subreq */ | 932 | /* retry add_request for this subreq */ |
@@ -964,6 +973,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) | |||
964 | desc->pg_count = 0; | 973 | desc->pg_count = 0; |
965 | desc->pg_base = 0; | 974 | desc->pg_base = 0; |
966 | desc->pg_recoalesce = 0; | 975 | desc->pg_recoalesce = 0; |
976 | desc->pg_moreio = 0; | ||
967 | 977 | ||
968 | while (!list_empty(&head)) { | 978 | while (!list_empty(&head)) { |
969 | struct nfs_page *req; | 979 | struct nfs_page *req; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f05f321f9d3d..962c9ee758be 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops; | |||
46 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; | 46 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; |
47 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; | 47 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; |
48 | static const struct nfs_rw_ops nfs_rw_write_ops; | 48 | static const struct nfs_rw_ops nfs_rw_write_ops; |
49 | static void nfs_clear_request_commit(struct nfs_page *req); | ||
49 | 50 | ||
50 | static struct kmem_cache *nfs_wdata_cachep; | 51 | static struct kmem_cache *nfs_wdata_cachep; |
51 | static mempool_t *nfs_wdata_mempool; | 52 | static mempool_t *nfs_wdata_mempool; |
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) | |||
91 | set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); | 92 | set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); |
92 | } | 93 | } |
93 | 94 | ||
95 | /* | ||
96 | * nfs_page_find_head_request_locked - find head request associated with @page | ||
97 | * | ||
98 | * must be called while holding the inode lock. | ||
99 | * | ||
100 | * returns matching head request with reference held, or NULL if not found. | ||
101 | */ | ||
94 | static struct nfs_page * | 102 | static struct nfs_page * |
95 | nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) | 103 | nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) |
96 | { | 104 | { |
97 | struct nfs_page *req = NULL; | 105 | struct nfs_page *req = NULL; |
98 | 106 | ||
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) | |||
104 | /* Linearly search the commit list for the correct req */ | 112 | /* Linearly search the commit list for the correct req */ |
105 | list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { | 113 | list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { |
106 | if (freq->wb_page == page) { | 114 | if (freq->wb_page == page) { |
107 | req = freq; | 115 | req = freq->wb_head; |
108 | break; | 116 | break; |
109 | } | 117 | } |
110 | } | 118 | } |
111 | } | 119 | } |
112 | 120 | ||
113 | if (req) | 121 | if (req) { |
122 | WARN_ON_ONCE(req->wb_head != req); | ||
123 | |||
114 | kref_get(&req->wb_kref); | 124 | kref_get(&req->wb_kref); |
125 | } | ||
115 | 126 | ||
116 | return req; | 127 | return req; |
117 | } | 128 | } |
118 | 129 | ||
119 | static struct nfs_page *nfs_page_find_request(struct page *page) | 130 | /* |
131 | * nfs_page_find_head_request - find head request associated with @page | ||
132 | * | ||
133 | * returns matching head request with reference held, or NULL if not found. | ||
134 | */ | ||
135 | static struct nfs_page *nfs_page_find_head_request(struct page *page) | ||
120 | { | 136 | { |
121 | struct inode *inode = page_file_mapping(page)->host; | 137 | struct inode *inode = page_file_mapping(page)->host; |
122 | struct nfs_page *req = NULL; | 138 | struct nfs_page *req = NULL; |
123 | 139 | ||
124 | spin_lock(&inode->i_lock); | 140 | spin_lock(&inode->i_lock); |
125 | req = nfs_page_find_request_locked(NFS_I(inode), page); | 141 | req = nfs_page_find_head_request_locked(NFS_I(inode), page); |
126 | spin_unlock(&inode->i_lock); | 142 | spin_unlock(&inode->i_lock); |
127 | return req; | 143 | return req; |
128 | } | 144 | } |
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req) | |||
274 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); | 290 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); |
275 | } | 291 | } |
276 | 292 | ||
277 | static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) | 293 | |
294 | /* nfs_page_group_clear_bits | ||
295 | * @req - an nfs request | ||
296 | * clears all page group related bits from @req | ||
297 | */ | ||
298 | static void | ||
299 | nfs_page_group_clear_bits(struct nfs_page *req) | ||
300 | { | ||
301 | clear_bit(PG_TEARDOWN, &req->wb_flags); | ||
302 | clear_bit(PG_UNLOCKPAGE, &req->wb_flags); | ||
303 | clear_bit(PG_UPTODATE, &req->wb_flags); | ||
304 | clear_bit(PG_WB_END, &req->wb_flags); | ||
305 | clear_bit(PG_REMOVE, &req->wb_flags); | ||
306 | } | ||
307 | |||
308 | |||
309 | /* | ||
310 | * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req | ||
311 | * | ||
312 | * this is a helper function for nfs_lock_and_join_requests | ||
313 | * | ||
314 | * @inode - inode associated with request page group, must be holding inode lock | ||
315 | * @head - head request of page group, must be holding head lock | ||
316 | * @req - request that couldn't lock and needs to wait on the req bit lock | ||
317 | * @nonblock - if true, don't actually wait | ||
318 | * | ||
319 | * NOTE: this must be called holding page_group bit lock and inode spin lock | ||
320 | * and BOTH will be released before returning. | ||
321 | * | ||
322 | * returns 0 on success, < 0 on error. | ||
323 | */ | ||
324 | static int | ||
325 | nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, | ||
326 | struct nfs_page *req, bool nonblock) | ||
327 | __releases(&inode->i_lock) | ||
328 | { | ||
329 | struct nfs_page *tmp; | ||
330 | int ret; | ||
331 | |||
332 | /* relinquish all the locks successfully grabbed this run */ | ||
333 | for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) | ||
334 | nfs_unlock_request(tmp); | ||
335 | |||
336 | WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); | ||
337 | |||
338 | /* grab a ref on the request that will be waited on */ | ||
339 | kref_get(&req->wb_kref); | ||
340 | |||
341 | nfs_page_group_unlock(head); | ||
342 | spin_unlock(&inode->i_lock); | ||
343 | |||
344 | /* release ref from nfs_page_find_head_request_locked */ | ||
345 | nfs_release_request(head); | ||
346 | |||
347 | if (!nonblock) | ||
348 | ret = nfs_wait_on_request(req); | ||
349 | else | ||
350 | ret = -EAGAIN; | ||
351 | nfs_release_request(req); | ||
352 | |||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests | ||
358 | * | ||
359 | * @destroy_list - request list (using wb_this_page) terminated by @old_head | ||
360 | * @old_head - the old head of the list | ||
361 | * | ||
362 | * All subrequests must be locked and removed from all lists, so at this point | ||
363 | * they are only "active" in this function, and possibly in nfs_wait_on_request | ||
364 | * with a reference held by some other context. | ||
365 | */ | ||
366 | static void | ||
367 | nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, | ||
368 | struct nfs_page *old_head) | ||
369 | { | ||
370 | while (destroy_list) { | ||
371 | struct nfs_page *subreq = destroy_list; | ||
372 | |||
373 | destroy_list = (subreq->wb_this_page == old_head) ? | ||
374 | NULL : subreq->wb_this_page; | ||
375 | |||
376 | WARN_ON_ONCE(old_head != subreq->wb_head); | ||
377 | |||
378 | /* make sure old group is not used */ | ||
379 | subreq->wb_head = subreq; | ||
380 | subreq->wb_this_page = subreq; | ||
381 | |||
382 | nfs_clear_request_commit(subreq); | ||
383 | |||
384 | /* subreq is now totally disconnected from page group or any | ||
385 | * write / commit lists. last chance to wake any waiters */ | ||
386 | nfs_unlock_request(subreq); | ||
387 | |||
388 | if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) { | ||
389 | /* release ref on old head request */ | ||
390 | nfs_release_request(old_head); | ||
391 | |||
392 | nfs_page_group_clear_bits(subreq); | ||
393 | |||
394 | /* release the PG_INODE_REF reference */ | ||
395 | if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) | ||
396 | nfs_release_request(subreq); | ||
397 | else | ||
398 | WARN_ON_ONCE(1); | ||
399 | } else { | ||
400 | WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags)); | ||
401 | /* zombie requests have already released the last | ||
402 | * reference and were waiting on the rest of the | ||
403 | * group to complete. Since it's no longer part of a | ||
404 | * group, simply free the request */ | ||
405 | nfs_page_group_clear_bits(subreq); | ||
406 | nfs_free_request(subreq); | ||
407 | } | ||
408 | } | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * nfs_lock_and_join_requests - join all subreqs to the head req and return | ||
413 | * a locked reference, cancelling any pending | ||
414 | * operations for this page. | ||
415 | * | ||
416 | * @page - the page used to lookup the "page group" of nfs_page structures | ||
417 | * @nonblock - if true, don't block waiting for request locks | ||
418 | * | ||
419 | * This function joins all sub requests to the head request by first | ||
420 | * locking all requests in the group, cancelling any pending operations | ||
421 | * and finally updating the head request to cover the whole range covered by | ||
422 | * the (former) group. All subrequests are removed from any write or commit | ||
423 | * lists, unlinked from the group and destroyed. | ||
424 | * | ||
425 | * Returns a locked, referenced pointer to the head request - which after | ||
426 | * this call is guaranteed to be the only request associated with the page. | ||
427 | * Returns NULL if no requests are found for @page, or a ERR_PTR if an | ||
428 | * error was encountered. | ||
429 | */ | ||
430 | static struct nfs_page * | ||
431 | nfs_lock_and_join_requests(struct page *page, bool nonblock) | ||
278 | { | 432 | { |
279 | struct inode *inode = page_file_mapping(page)->host; | 433 | struct inode *inode = page_file_mapping(page)->host; |
280 | struct nfs_page *req; | 434 | struct nfs_page *head, *subreq; |
435 | struct nfs_page *destroy_list = NULL; | ||
436 | unsigned int total_bytes; | ||
281 | int ret; | 437 | int ret; |
282 | 438 | ||
439 | try_again: | ||
440 | total_bytes = 0; | ||
441 | |||
442 | WARN_ON_ONCE(destroy_list); | ||
443 | |||
283 | spin_lock(&inode->i_lock); | 444 | spin_lock(&inode->i_lock); |
284 | for (;;) { | 445 | |
285 | req = nfs_page_find_request_locked(NFS_I(inode), page); | 446 | /* |
286 | if (req == NULL) | 447 | * A reference is taken only on the head request which acts as a |
287 | break; | 448 | * reference to the whole page group - the group will not be destroyed |
288 | if (nfs_lock_request(req)) | 449 | * until the head reference is released. |
289 | break; | 450 | */ |
290 | /* Note: If we hold the page lock, as is the case in nfs_writepage, | 451 | head = nfs_page_find_head_request_locked(NFS_I(inode), page); |
291 | * then the call to nfs_lock_request() will always | 452 | |
292 | * succeed provided that someone hasn't already marked the | 453 | if (!head) { |
293 | * request as dirty (in which case we don't care). | ||
294 | */ | ||
295 | spin_unlock(&inode->i_lock); | 454 | spin_unlock(&inode->i_lock); |
296 | if (!nonblock) | 455 | return NULL; |
297 | ret = nfs_wait_on_request(req); | 456 | } |
298 | else | 457 | |
299 | ret = -EAGAIN; | 458 | /* lock each request in the page group */ |
300 | nfs_release_request(req); | 459 | nfs_page_group_lock(head); |
301 | if (ret != 0) | 460 | subreq = head; |
461 | do { | ||
462 | /* | ||
463 | * Subrequests are always contiguous, non overlapping | ||
464 | * and in order. If not, it's a programming error. | ||
465 | */ | ||
466 | WARN_ON_ONCE(subreq->wb_offset != | ||
467 | (head->wb_offset + total_bytes)); | ||
468 | |||
469 | /* keep track of how many bytes this group covers */ | ||
470 | total_bytes += subreq->wb_bytes; | ||
471 | |||
472 | if (!nfs_lock_request(subreq)) { | ||
473 | /* releases page group bit lock and | ||
474 | * inode spin lock and all references */ | ||
475 | ret = nfs_unroll_locks_and_wait(inode, head, | ||
476 | subreq, nonblock); | ||
477 | |||
478 | if (ret == 0) | ||
479 | goto try_again; | ||
480 | |||
302 | return ERR_PTR(ret); | 481 | return ERR_PTR(ret); |
303 | spin_lock(&inode->i_lock); | 482 | } |
483 | |||
484 | subreq = subreq->wb_this_page; | ||
485 | } while (subreq != head); | ||
486 | |||
487 | /* Now that all requests are locked, make sure they aren't on any list. | ||
488 | * Commit list removal accounting is done after locks are dropped */ | ||
489 | subreq = head; | ||
490 | do { | ||
491 | nfs_list_remove_request(subreq); | ||
492 | subreq = subreq->wb_this_page; | ||
493 | } while (subreq != head); | ||
494 | |||
495 | /* unlink subrequests from head, destroy them later */ | ||
496 | if (head->wb_this_page != head) { | ||
497 | /* destroy list will be terminated by head */ | ||
498 | destroy_list = head->wb_this_page; | ||
499 | head->wb_this_page = head; | ||
500 | |||
501 | /* change head request to cover whole range that | ||
502 | * the former page group covered */ | ||
503 | head->wb_bytes = total_bytes; | ||
304 | } | 504 | } |
505 | |||
506 | /* | ||
507 | * prepare head request to be added to new pgio descriptor | ||
508 | */ | ||
509 | nfs_page_group_clear_bits(head); | ||
510 | |||
511 | /* | ||
512 | * some part of the group was still on the inode list - otherwise | ||
513 | * the group wouldn't be involved in async write. | ||
514 | * grab a reference for the head request, iff it needs one. | ||
515 | */ | ||
516 | if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags)) | ||
517 | kref_get(&head->wb_kref); | ||
518 | |||
519 | nfs_page_group_unlock(head); | ||
520 | |||
521 | /* drop lock to clear_request_commit the head req and clean up | ||
522 | * requests on destroy list */ | ||
305 | spin_unlock(&inode->i_lock); | 523 | spin_unlock(&inode->i_lock); |
306 | return req; | 524 | |
525 | nfs_destroy_unlinked_subrequests(destroy_list, head); | ||
526 | |||
527 | /* clean up commit list state */ | ||
528 | nfs_clear_request_commit(head); | ||
529 | |||
530 | /* still holds ref on head from nfs_page_find_head_request_locked | ||
531 | * and still has lock on head from lock loop */ | ||
532 | return head; | ||
307 | } | 533 | } |
308 | 534 | ||
309 | /* | 535 | /* |
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
316 | struct nfs_page *req; | 542 | struct nfs_page *req; |
317 | int ret = 0; | 543 | int ret = 0; |
318 | 544 | ||
319 | req = nfs_find_and_lock_request(page, nonblock); | 545 | req = nfs_lock_and_join_requests(page, nonblock); |
320 | if (!req) | 546 | if (!req) |
321 | goto out; | 547 | goto out; |
322 | ret = PTR_ERR(req); | 548 | ret = PTR_ERR(req); |
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
448 | set_page_private(req->wb_page, (unsigned long)req); | 674 | set_page_private(req->wb_page, (unsigned long)req); |
449 | } | 675 | } |
450 | nfsi->npages++; | 676 | nfsi->npages++; |
451 | set_bit(PG_INODE_REF, &req->wb_flags); | 677 | /* this a head request for a page group - mark it as having an |
678 | * extra reference so sub groups can follow suit */ | ||
679 | WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); | ||
452 | kref_get(&req->wb_kref); | 680 | kref_get(&req->wb_kref); |
453 | spin_unlock(&inode->i_lock); | 681 | spin_unlock(&inode->i_lock); |
454 | } | 682 | } |
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
474 | nfsi->npages--; | 702 | nfsi->npages--; |
475 | spin_unlock(&inode->i_lock); | 703 | spin_unlock(&inode->i_lock); |
476 | } | 704 | } |
477 | nfs_release_request(req); | 705 | |
706 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) | ||
707 | nfs_release_request(req); | ||
478 | } | 708 | } |
479 | 709 | ||
480 | static void | 710 | static void |
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
638 | { | 868 | { |
639 | struct nfs_commit_info cinfo; | 869 | struct nfs_commit_info cinfo; |
640 | unsigned long bytes = 0; | 870 | unsigned long bytes = 0; |
641 | bool do_destroy; | ||
642 | 871 | ||
643 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) | 872 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) |
644 | goto out; | 873 | goto out; |
@@ -668,7 +897,6 @@ remove_req: | |||
668 | next: | 897 | next: |
669 | nfs_unlock_request(req); | 898 | nfs_unlock_request(req); |
670 | nfs_end_page_writeback(req); | 899 | nfs_end_page_writeback(req); |
671 | do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags); | ||
672 | nfs_release_request(req); | 900 | nfs_release_request(req); |
673 | } | 901 | } |
674 | out: | 902 | out: |
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
769 | spin_lock(&inode->i_lock); | 997 | spin_lock(&inode->i_lock); |
770 | 998 | ||
771 | for (;;) { | 999 | for (;;) { |
772 | req = nfs_page_find_request_locked(NFS_I(inode), page); | 1000 | req = nfs_page_find_head_request_locked(NFS_I(inode), page); |
773 | if (req == NULL) | 1001 | if (req == NULL) |
774 | goto out_unlock; | 1002 | goto out_unlock; |
775 | 1003 | ||
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
877 | * dropped page. | 1105 | * dropped page. |
878 | */ | 1106 | */ |
879 | do { | 1107 | do { |
880 | req = nfs_page_find_request(page); | 1108 | req = nfs_page_find_head_request(page); |
881 | if (req == NULL) | 1109 | if (req == NULL) |
882 | return 0; | 1110 | return 0; |
883 | l_ctx = req->wb_lock_context; | 1111 | l_ctx = req->wb_lock_context; |
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) | |||
1569 | struct nfs_page *req; | 1797 | struct nfs_page *req; |
1570 | int ret = 0; | 1798 | int ret = 0; |
1571 | 1799 | ||
1572 | for (;;) { | 1800 | wait_on_page_writeback(page); |
1573 | wait_on_page_writeback(page); | 1801 | |
1574 | req = nfs_page_find_request(page); | 1802 | /* blocking call to cancel all requests and join to a single (head) |
1575 | if (req == NULL) | 1803 | * request */ |
1576 | break; | 1804 | req = nfs_lock_and_join_requests(page, false); |
1577 | if (nfs_lock_request(req)) { | 1805 | |
1578 | nfs_clear_request_commit(req); | 1806 | if (IS_ERR(req)) { |
1579 | nfs_inode_remove_request(req); | 1807 | ret = PTR_ERR(req); |
1580 | /* | 1808 | } else if (req) { |
1581 | * In case nfs_inode_remove_request has marked the | 1809 | /* all requests from this page have been cancelled by |
1582 | * page as being dirty | 1810 | * nfs_lock_and_join_requests, so just remove the head |
1583 | */ | 1811 | * request from the inode / page_private pointer and |
1584 | cancel_dirty_page(page, PAGE_CACHE_SIZE); | 1812 | * release it */ |
1585 | nfs_unlock_and_release_request(req); | 1813 | nfs_inode_remove_request(req); |
1586 | break; | 1814 | /* |
1587 | } | 1815 | * In case nfs_inode_remove_request has marked the |
1588 | ret = nfs_wait_on_request(req); | 1816 | * page as being dirty |
1589 | nfs_release_request(req); | 1817 | */ |
1590 | if (ret < 0) | 1818 | cancel_dirty_page(page, PAGE_CACHE_SIZE); |
1591 | break; | 1819 | nfs_unlock_and_release_request(req); |
1592 | } | 1820 | } |
1821 | |||
1593 | return ret; | 1822 | return ret; |
1594 | } | 1823 | } |
1595 | 1824 | ||
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b56b1cc02718..944275c8f56d 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -2879,6 +2879,7 @@ again: | |||
2879 | * return the conflicting open: | 2879 | * return the conflicting open: |
2880 | */ | 2880 | */ |
2881 | if (conf->len) { | 2881 | if (conf->len) { |
2882 | kfree(conf->data); | ||
2882 | conf->len = 0; | 2883 | conf->len = 0; |
2883 | conf->data = NULL; | 2884 | conf->data = NULL; |
2884 | goto again; | 2885 | goto again; |
@@ -2891,6 +2892,7 @@ again: | |||
2891 | if (conf->len) { | 2892 | if (conf->len) { |
2892 | p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); | 2893 | p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); |
2893 | p = xdr_encode_opaque(p, conf->data, conf->len); | 2894 | p = xdr_encode_opaque(p, conf->data, conf->len); |
2895 | kfree(conf->data); | ||
2894 | } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ | 2896 | } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ |
2895 | p = xdr_encode_hyper(p, (u64)0); /* clientid */ | 2897 | p = xdr_encode_hyper(p, (u64)0); /* clientid */ |
2896 | *p++ = cpu_to_be32(0); /* length of owner name */ | 2898 | *p++ = cpu_to_be32(0); /* length of owner name */ |
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo | |||
2907 | nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); | 2909 | nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); |
2908 | else if (nfserr == nfserr_denied) | 2910 | else if (nfserr == nfserr_denied) |
2909 | nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); | 2911 | nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); |
2910 | kfree(lock->lk_denied.ld_owner.data); | 2912 | |
2911 | return nfserr; | 2913 | return nfserr; |
2912 | } | 2914 | } |
2913 | 2915 | ||
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 9cd5f63715c0..7f30bdc57d13 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |||
702 | struct dquot *dquot; | 702 | struct dquot *dquot; |
703 | unsigned long freed = 0; | 703 | unsigned long freed = 0; |
704 | 704 | ||
705 | spin_lock(&dq_list_lock); | ||
705 | head = free_dquots.prev; | 706 | head = free_dquots.prev; |
706 | while (head != &free_dquots && sc->nr_to_scan) { | 707 | while (head != &free_dquots && sc->nr_to_scan) { |
707 | dquot = list_entry(head, struct dquot, dq_free); | 708 | dquot = list_entry(head, struct dquot, dq_free); |
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |||
713 | freed++; | 714 | freed++; |
714 | head = free_dquots.prev; | 715 | head = free_dquots.prev; |
715 | } | 716 | } |
717 | spin_unlock(&dq_list_lock); | ||
716 | return freed; | 718 | return freed; |
717 | } | 719 | } |
718 | 720 | ||
diff --git a/fs/xattr.c b/fs/xattr.c index 3377dff18404..c69e6d43a0d2 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size) | |||
843 | 843 | ||
844 | /* wrap around? */ | 844 | /* wrap around? */ |
845 | len = sizeof(*new_xattr) + size; | 845 | len = sizeof(*new_xattr) + size; |
846 | if (len <= sizeof(*new_xattr)) | 846 | if (len < sizeof(*new_xattr)) |
847 | return NULL; | 847 | return NULL; |
848 | 848 | ||
849 | new_xattr = kmalloc(len, GFP_KERNEL); | 849 | new_xattr = kmalloc(len, GFP_KERNEL); |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 96175df211b1..75c3fe5f3d9d 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay( | |||
4298 | } | 4298 | } |
4299 | 4299 | ||
4300 | 4300 | ||
4301 | int | 4301 | static int |
4302 | __xfs_bmapi_allocate( | 4302 | xfs_bmapi_allocate( |
4303 | struct xfs_bmalloca *bma) | 4303 | struct xfs_bmalloca *bma) |
4304 | { | 4304 | { |
4305 | struct xfs_mount *mp = bma->ip->i_mount; | 4305 | struct xfs_mount *mp = bma->ip->i_mount; |
@@ -4578,9 +4578,6 @@ xfs_bmapi_write( | |||
4578 | bma.flist = flist; | 4578 | bma.flist = flist; |
4579 | bma.firstblock = firstblock; | 4579 | bma.firstblock = firstblock; |
4580 | 4580 | ||
4581 | if (flags & XFS_BMAPI_STACK_SWITCH) | ||
4582 | bma.stack_switch = 1; | ||
4583 | |||
4584 | while (bno < end && n < *nmap) { | 4581 | while (bno < end && n < *nmap) { |
4585 | inhole = eof || bma.got.br_startoff > bno; | 4582 | inhole = eof || bma.got.br_startoff > bno; |
4586 | wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); | 4583 | wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 38ba36e9b2f0..b879ca56a64c 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free | |||
77 | * from written to unwritten, otherwise convert from unwritten to written. | 77 | * from written to unwritten, otherwise convert from unwritten to written. |
78 | */ | 78 | */ |
79 | #define XFS_BMAPI_CONVERT 0x040 | 79 | #define XFS_BMAPI_CONVERT 0x040 |
80 | #define XFS_BMAPI_STACK_SWITCH 0x080 | ||
81 | 80 | ||
82 | #define XFS_BMAPI_FLAGS \ | 81 | #define XFS_BMAPI_FLAGS \ |
83 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ | 82 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ |
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free | |||
86 | { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ | 85 | { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ |
87 | { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ | 86 | { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ |
88 | { XFS_BMAPI_CONTIG, "CONTIG" }, \ | 87 | { XFS_BMAPI_CONTIG, "CONTIG" }, \ |
89 | { XFS_BMAPI_CONVERT, "CONVERT" }, \ | 88 | { XFS_BMAPI_CONVERT, "CONVERT" } |
90 | { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" } | ||
91 | 89 | ||
92 | 90 | ||
93 | static inline int xfs_bmapi_aflag(int w) | 91 | static inline int xfs_bmapi_aflag(int w) |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 703b3ec1796c..64731ef3324d 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc( | |||
249 | } | 249 | } |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * Stack switching interfaces for allocation | ||
253 | */ | ||
254 | static void | ||
255 | xfs_bmapi_allocate_worker( | ||
256 | struct work_struct *work) | ||
257 | { | ||
258 | struct xfs_bmalloca *args = container_of(work, | ||
259 | struct xfs_bmalloca, work); | ||
260 | unsigned long pflags; | ||
261 | unsigned long new_pflags = PF_FSTRANS; | ||
262 | |||
263 | /* | ||
264 | * we are in a transaction context here, but may also be doing work | ||
265 | * in kswapd context, and hence we may need to inherit that state | ||
266 | * temporarily to ensure that we don't block waiting for memory reclaim | ||
267 | * in any way. | ||
268 | */ | ||
269 | if (args->kswapd) | ||
270 | new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; | ||
271 | |||
272 | current_set_flags_nested(&pflags, new_pflags); | ||
273 | |||
274 | args->result = __xfs_bmapi_allocate(args); | ||
275 | complete(args->done); | ||
276 | |||
277 | current_restore_flags_nested(&pflags, new_pflags); | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Some allocation requests often come in with little stack to work on. Push | ||
282 | * them off to a worker thread so there is lots of stack to use. Otherwise just | ||
283 | * call directly to avoid the context switch overhead here. | ||
284 | */ | ||
285 | int | ||
286 | xfs_bmapi_allocate( | ||
287 | struct xfs_bmalloca *args) | ||
288 | { | ||
289 | DECLARE_COMPLETION_ONSTACK(done); | ||
290 | |||
291 | if (!args->stack_switch) | ||
292 | return __xfs_bmapi_allocate(args); | ||
293 | |||
294 | |||
295 | args->done = &done; | ||
296 | args->kswapd = current_is_kswapd(); | ||
297 | INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); | ||
298 | queue_work(xfs_alloc_wq, &args->work); | ||
299 | wait_for_completion(&done); | ||
300 | destroy_work_on_stack(&args->work); | ||
301 | return args->result; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Check if the endoff is outside the last extent. If so the caller will grow | 252 | * Check if the endoff is outside the last extent. If so the caller will grow |
306 | * the allocation to a stripe unit boundary. All offsets are considered outside | 253 | * the allocation to a stripe unit boundary. All offsets are considered outside |
307 | * the end of file for an empty fork, so 1 is returned in *eof in that case. | 254 | * the end of file for an empty fork, so 1 is returned in *eof in that case. |
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 075f72232a64..2fdb72d2c908 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h | |||
@@ -55,8 +55,6 @@ struct xfs_bmalloca { | |||
55 | bool userdata;/* set if is user data */ | 55 | bool userdata;/* set if is user data */ |
56 | bool aeof; /* allocated space at eof */ | 56 | bool aeof; /* allocated space at eof */ |
57 | bool conv; /* overwriting unwritten extents */ | 57 | bool conv; /* overwriting unwritten extents */ |
58 | bool stack_switch; | ||
59 | bool kswapd; /* allocation in kswapd context */ | ||
60 | int flags; | 58 | int flags; |
61 | struct completion *done; | 59 | struct completion *done; |
62 | struct work_struct work; | 60 | struct work_struct work; |
@@ -66,8 +64,6 @@ struct xfs_bmalloca { | |||
66 | int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, | 64 | int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, |
67 | int *committed); | 65 | int *committed); |
68 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); | 66 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); |
69 | int xfs_bmapi_allocate(struct xfs_bmalloca *args); | ||
70 | int __xfs_bmapi_allocate(struct xfs_bmalloca *args); | ||
71 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, | 67 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, |
72 | int whichfork, int *eof); | 68 | int whichfork, int *eof); |
73 | int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, | 69 | int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index bf810c6baf2b..cf893bc1e373 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
34 | #include "xfs_trace.h" | 34 | #include "xfs_trace.h" |
35 | #include "xfs_cksum.h" | 35 | #include "xfs_cksum.h" |
36 | #include "xfs_alloc.h" | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * Cursor allocation zone. | 39 | * Cursor allocation zone. |
@@ -2323,7 +2324,7 @@ error1: | |||
2323 | * record (to be inserted into parent). | 2324 | * record (to be inserted into parent). |
2324 | */ | 2325 | */ |
2325 | STATIC int /* error */ | 2326 | STATIC int /* error */ |
2326 | xfs_btree_split( | 2327 | __xfs_btree_split( |
2327 | struct xfs_btree_cur *cur, | 2328 | struct xfs_btree_cur *cur, |
2328 | int level, | 2329 | int level, |
2329 | union xfs_btree_ptr *ptrp, | 2330 | union xfs_btree_ptr *ptrp, |
@@ -2503,6 +2504,85 @@ error0: | |||
2503 | return error; | 2504 | return error; |
2504 | } | 2505 | } |
2505 | 2506 | ||
2507 | struct xfs_btree_split_args { | ||
2508 | struct xfs_btree_cur *cur; | ||
2509 | int level; | ||
2510 | union xfs_btree_ptr *ptrp; | ||
2511 | union xfs_btree_key *key; | ||
2512 | struct xfs_btree_cur **curp; | ||
2513 | int *stat; /* success/failure */ | ||
2514 | int result; | ||
2515 | bool kswapd; /* allocation in kswapd context */ | ||
2516 | struct completion *done; | ||
2517 | struct work_struct work; | ||
2518 | }; | ||
2519 | |||
2520 | /* | ||
2521 | * Stack switching interfaces for allocation | ||
2522 | */ | ||
2523 | static void | ||
2524 | xfs_btree_split_worker( | ||
2525 | struct work_struct *work) | ||
2526 | { | ||
2527 | struct xfs_btree_split_args *args = container_of(work, | ||
2528 | struct xfs_btree_split_args, work); | ||
2529 | unsigned long pflags; | ||
2530 | unsigned long new_pflags = PF_FSTRANS; | ||
2531 | |||
2532 | /* | ||
2533 | * we are in a transaction context here, but may also be doing work | ||
2534 | * in kswapd context, and hence we may need to inherit that state | ||
2535 | * temporarily to ensure that we don't block waiting for memory reclaim | ||
2536 | * in any way. | ||
2537 | */ | ||
2538 | if (args->kswapd) | ||
2539 | new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; | ||
2540 | |||
2541 | current_set_flags_nested(&pflags, new_pflags); | ||
2542 | |||
2543 | args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, | ||
2544 | args->key, args->curp, args->stat); | ||
2545 | complete(args->done); | ||
2546 | |||
2547 | current_restore_flags_nested(&pflags, new_pflags); | ||
2548 | } | ||
2549 | |||
2550 | /* | ||
2551 | * BMBT split requests often come in with little stack to work on. Push | ||
2552 | * them off to a worker thread so there is lots of stack to use. For the other | ||
2553 | * btree types, just call directly to avoid the context switch overhead here. | ||
2554 | */ | ||
2555 | STATIC int /* error */ | ||
2556 | xfs_btree_split( | ||
2557 | struct xfs_btree_cur *cur, | ||
2558 | int level, | ||
2559 | union xfs_btree_ptr *ptrp, | ||
2560 | union xfs_btree_key *key, | ||
2561 | struct xfs_btree_cur **curp, | ||
2562 | int *stat) /* success/failure */ | ||
2563 | { | ||
2564 | struct xfs_btree_split_args args; | ||
2565 | DECLARE_COMPLETION_ONSTACK(done); | ||
2566 | |||
2567 | if (cur->bc_btnum != XFS_BTNUM_BMAP) | ||
2568 | return __xfs_btree_split(cur, level, ptrp, key, curp, stat); | ||
2569 | |||
2570 | args.cur = cur; | ||
2571 | args.level = level; | ||
2572 | args.ptrp = ptrp; | ||
2573 | args.key = key; | ||
2574 | args.curp = curp; | ||
2575 | args.stat = stat; | ||
2576 | args.done = &done; | ||
2577 | args.kswapd = current_is_kswapd(); | ||
2578 | INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker); | ||
2579 | queue_work(xfs_alloc_wq, &args.work); | ||
2580 | wait_for_completion(&done); | ||
2581 | destroy_work_on_stack(&args.work); | ||
2582 | return args.result; | ||
2583 | } | ||
2584 | |||
2585 | |||
2506 | /* | 2586 | /* |
2507 | * Copy the old inode root contents into a real block and make the | 2587 | * Copy the old inode root contents into a real block and make the |
2508 | * broot point to it. | 2588 | * broot point to it. |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 6c5eb4c551e3..6d3ec2b6ee29 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate( | |||
749 | * pointer that the caller gave to us. | 749 | * pointer that the caller gave to us. |
750 | */ | 750 | */ |
751 | error = xfs_bmapi_write(tp, ip, map_start_fsb, | 751 | error = xfs_bmapi_write(tp, ip, map_start_fsb, |
752 | count_fsb, | 752 | count_fsb, 0, |
753 | XFS_BMAPI_STACK_SWITCH, | ||
754 | &first_block, 1, | 753 | &first_block, 1, |
755 | imap, &nimaps, &free_list); | 754 | imap, &nimaps, &free_list); |
756 | if (error) | 755 | if (error) |
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c index c3453b11f563..7703fa6770ff 100644 --- a/fs/xfs/xfs_sb.c +++ b/fs/xfs/xfs_sb.c | |||
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk( | |||
483 | } | 483 | } |
484 | 484 | ||
485 | /* | 485 | /* |
486 | * GQUOTINO and PQUOTINO cannot be used together in versions | 486 | * GQUOTINO and PQUOTINO cannot be used together in versions of |
487 | * of superblock that do not have pquotino. from->sb_flags | 487 | * superblock that do not have pquotino. from->sb_flags tells us which |
488 | * tells us which quota is active and should be copied to | 488 | * quota is active and should be copied to disk. If neither are active, |
489 | * disk. | 489 | * make sure we write NULLFSINO to the sb_gquotino field as a quota |
490 | * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature | ||
491 | * bit is set. | ||
492 | * | ||
493 | * Note that we don't need to handle the sb_uquotino or sb_pquotino here | ||
494 | * as they do not require any translation. Hence the main sb field loop | ||
495 | * will write them appropriately from the in-core superblock. | ||
490 | */ | 496 | */ |
491 | if ((*fields & XFS_SB_GQUOTINO) && | 497 | if ((*fields & XFS_SB_GQUOTINO) && |
492 | (from->sb_qflags & XFS_GQUOTA_ACCT)) | 498 | (from->sb_qflags & XFS_GQUOTA_ACCT)) |
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk( | |||
494 | else if ((*fields & XFS_SB_PQUOTINO) && | 500 | else if ((*fields & XFS_SB_PQUOTINO) && |
495 | (from->sb_qflags & XFS_PQUOTA_ACCT)) | 501 | (from->sb_qflags & XFS_PQUOTA_ACCT)) |
496 | to->sb_gquotino = cpu_to_be64(from->sb_pquotino); | 502 | to->sb_gquotino = cpu_to_be64(from->sb_pquotino); |
503 | else { | ||
504 | /* | ||
505 | * We can't rely on just the fields being logged to tell us | ||
506 | * that it is safe to write NULLFSINO - we should only do that | ||
507 | * if quotas are not actually enabled. Hence only write | ||
508 | * NULLFSINO if both in-core quota inodes are NULL. | ||
509 | */ | ||
510 | if (from->sb_gquotino == NULLFSINO && | ||
511 | from->sb_pquotino == NULLFSINO) | ||
512 | to->sb_gquotino = cpu_to_be64(NULLFSINO); | ||
513 | } | ||
497 | 514 | ||
498 | *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); | 515 | *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); |
499 | } | 516 | } |