aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c7
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/disk-io.c5
-rw-r--r--fs/btrfs/extent-tree.c5
-rw-r--r--fs/btrfs/ioctl.c37
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/print-tree.c9
-rw-r--r--fs/btrfs/raid56.c5
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/sysfs.c32
-rw-r--r--fs/btrfs/sysfs.h4
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/btrfs/volumes.c32
-rw-r--r--fs/btrfs/zlib.c2
-rw-r--r--fs/ext4/balloc.c16
-rw-r--r--fs/ext4/extents_status.c4
-rw-r--r--fs/ext4/ialloc.c37
-rw-r--r--fs/ext4/indirect.c24
-rw-r--r--fs/ext4/mballoc.c12
-rw-r--r--fs/ext4/super.c60
-rw-r--r--fs/f2fs/data.c23
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h6
-rw-r--r--fs/f2fs/file.c12
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/namei.c13
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/segment.c5
-rw-r--r--fs/f2fs/super.c4
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c41
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/inode.c22
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/kernfs/file.c69
-rw-r--r--fs/kernfs/mount.c30
-rw-r--r--fs/mbcache.c3
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3acl.c43
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c335
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4xdr.c17
-rw-r--r--fs/proc/stat.c22
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/seq_file.c30
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_bmap.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c53
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_btree.c82
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_sb.c25
62 files changed, 939 insertions, 381 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 955947ef3e02..1c9c5f0a9e2b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
830static void put_reqs_available(struct kioctx *ctx, unsigned nr) 830static void put_reqs_available(struct kioctx *ctx, unsigned nr)
831{ 831{
832 struct kioctx_cpu *kcpu; 832 struct kioctx_cpu *kcpu;
833 unsigned long flags;
833 834
834 preempt_disable(); 835 preempt_disable();
835 kcpu = this_cpu_ptr(ctx->cpu); 836 kcpu = this_cpu_ptr(ctx->cpu);
836 837
838 local_irq_save(flags);
837 kcpu->reqs_available += nr; 839 kcpu->reqs_available += nr;
840
838 while (kcpu->reqs_available >= ctx->req_batch * 2) { 841 while (kcpu->reqs_available >= ctx->req_batch * 2) {
839 kcpu->reqs_available -= ctx->req_batch; 842 kcpu->reqs_available -= ctx->req_batch;
840 atomic_add(ctx->req_batch, &ctx->reqs_available); 843 atomic_add(ctx->req_batch, &ctx->reqs_available);
841 } 844 }
842 845
846 local_irq_restore(flags);
843 preempt_enable(); 847 preempt_enable();
844} 848}
845 849
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
847{ 851{
848 struct kioctx_cpu *kcpu; 852 struct kioctx_cpu *kcpu;
849 bool ret = false; 853 bool ret = false;
854 unsigned long flags;
850 855
851 preempt_disable(); 856 preempt_disable();
852 kcpu = this_cpu_ptr(ctx->cpu); 857 kcpu = this_cpu_ptr(ctx->cpu);
853 858
859 local_irq_save(flags);
854 if (!kcpu->reqs_available) { 860 if (!kcpu->reqs_available) {
855 int old, avail = atomic_read(&ctx->reqs_available); 861 int old, avail = atomic_read(&ctx->reqs_available);
856 862
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
869 ret = true; 875 ret = true;
870 kcpu->reqs_available--; 876 kcpu->reqs_available--;
871out: 877out:
878 local_irq_restore(flags);
872 preempt_enable(); 879 preempt_enable();
873 return ret; 880 return ret;
874} 881}
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d7bd395ab586..1c55388ae633 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
210 int pipefd; 210 int pipefd;
211 struct autofs_sb_info *sbi; 211 struct autofs_sb_info *sbi;
212 struct autofs_info *ino; 212 struct autofs_info *ino;
213 int pgrp; 213 int pgrp = 0;
214 bool pgrp_set = false; 214 bool pgrp_set = false;
215 int ret = -EINVAL; 215 int ret = -EINVAL;
216 216
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 92371c414228..1daea0b47187 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -821,7 +821,7 @@ static void free_workspace(int type, struct list_head *workspace)
821 821
822 spin_lock(workspace_lock); 822 spin_lock(workspace_lock);
823 if (*num_workspace < num_online_cpus()) { 823 if (*num_workspace < num_online_cpus()) {
824 list_add_tail(workspace, idle_workspace); 824 list_add(workspace, idle_workspace);
825 (*num_workspace)++; 825 (*num_workspace)++;
826 spin_unlock(workspace_lock); 826 spin_unlock(workspace_lock);
827 goto wake; 827 goto wake;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 2af6e66fe788..eea26e1b2fda 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -36,6 +36,7 @@
36#include "check-integrity.h" 36#include "check-integrity.h"
37#include "rcu-string.h" 37#include "rcu-string.h"
38#include "dev-replace.h" 38#include "dev-replace.h"
39#include "sysfs.h"
39 40
40static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 41static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
41 int scrub_ret); 42 int scrub_ret);
@@ -562,6 +563,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
562 fs_info->fs_devices->latest_bdev = tgt_device->bdev; 563 fs_info->fs_devices->latest_bdev = tgt_device->bdev;
563 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); 564 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
564 565
566 /* replace the sysfs entry */
567 btrfs_kobj_rm_device(fs_info, src_device);
568 btrfs_kobj_add_device(fs_info, tgt_device);
569
565 btrfs_rm_dev_replace_blocked(fs_info); 570 btrfs_rm_dev_replace_blocked(fs_info);
566 571
567 btrfs_rm_dev_replace_srcdev(fs_info, src_device); 572 btrfs_rm_dev_replace_srcdev(fs_info, src_device);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8bb4aa19898f..08e65e9cf2aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -369,7 +369,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
369out: 369out:
370 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 370 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
371 &cached_state, GFP_NOFS); 371 &cached_state, GFP_NOFS);
372 btrfs_tree_read_unlock_blocking(eb); 372 if (need_lock)
373 btrfs_tree_read_unlock_blocking(eb);
373 return ret; 374 return ret;
374} 375}
375 376
@@ -2904,7 +2905,9 @@ retry_root_backup:
2904 if (ret) 2905 if (ret)
2905 goto fail_qgroup; 2906 goto fail_qgroup;
2906 2907
2908 mutex_lock(&fs_info->cleaner_mutex);
2907 ret = btrfs_recover_relocation(tree_root); 2909 ret = btrfs_recover_relocation(tree_root);
2910 mutex_unlock(&fs_info->cleaner_mutex);
2908 if (ret < 0) { 2911 if (ret < 0) {
2909 printk(KERN_WARNING 2912 printk(KERN_WARNING
2910 "BTRFS: failed to recover relocation\n"); 2913 "BTRFS: failed to recover relocation\n");
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 99c253918208..813537f362f9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5678,7 +5678,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5678 struct btrfs_caching_control *next; 5678 struct btrfs_caching_control *next;
5679 struct btrfs_caching_control *caching_ctl; 5679 struct btrfs_caching_control *caching_ctl;
5680 struct btrfs_block_group_cache *cache; 5680 struct btrfs_block_group_cache *cache;
5681 struct btrfs_space_info *space_info;
5682 5681
5683 down_write(&fs_info->commit_root_sem); 5682 down_write(&fs_info->commit_root_sem);
5684 5683
@@ -5701,9 +5700,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5701 5700
5702 up_write(&fs_info->commit_root_sem); 5701 up_write(&fs_info->commit_root_sem);
5703 5702
5704 list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5705 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5706
5707 update_global_block_rsv(fs_info); 5703 update_global_block_rsv(fs_info);
5708} 5704}
5709 5705
@@ -5741,6 +5737,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5741 spin_lock(&cache->lock); 5737 spin_lock(&cache->lock);
5742 cache->pinned -= len; 5738 cache->pinned -= len;
5743 space_info->bytes_pinned -= len; 5739 space_info->bytes_pinned -= len;
5740 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5744 if (cache->ro) { 5741 if (cache->ro) {
5745 space_info->bytes_readonly += len; 5742 space_info->bytes_readonly += len;
5746 readonly = true; 5743 readonly = true;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0d321c23069a..47aceb494d1d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -136,19 +136,22 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
136void btrfs_update_iflags(struct inode *inode) 136void btrfs_update_iflags(struct inode *inode)
137{ 137{
138 struct btrfs_inode *ip = BTRFS_I(inode); 138 struct btrfs_inode *ip = BTRFS_I(inode);
139 139 unsigned int new_fl = 0;
140 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
141 140
142 if (ip->flags & BTRFS_INODE_SYNC) 141 if (ip->flags & BTRFS_INODE_SYNC)
143 inode->i_flags |= S_SYNC; 142 new_fl |= S_SYNC;
144 if (ip->flags & BTRFS_INODE_IMMUTABLE) 143 if (ip->flags & BTRFS_INODE_IMMUTABLE)
145 inode->i_flags |= S_IMMUTABLE; 144 new_fl |= S_IMMUTABLE;
146 if (ip->flags & BTRFS_INODE_APPEND) 145 if (ip->flags & BTRFS_INODE_APPEND)
147 inode->i_flags |= S_APPEND; 146 new_fl |= S_APPEND;
148 if (ip->flags & BTRFS_INODE_NOATIME) 147 if (ip->flags & BTRFS_INODE_NOATIME)
149 inode->i_flags |= S_NOATIME; 148 new_fl |= S_NOATIME;
150 if (ip->flags & BTRFS_INODE_DIRSYNC) 149 if (ip->flags & BTRFS_INODE_DIRSYNC)
151 inode->i_flags |= S_DIRSYNC; 150 new_fl |= S_DIRSYNC;
151
152 set_mask_bits(&inode->i_flags,
153 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
154 new_fl);
152} 155}
153 156
154/* 157/*
@@ -3139,7 +3142,6 @@ out:
3139static void clone_update_extent_map(struct inode *inode, 3142static void clone_update_extent_map(struct inode *inode,
3140 const struct btrfs_trans_handle *trans, 3143 const struct btrfs_trans_handle *trans,
3141 const struct btrfs_path *path, 3144 const struct btrfs_path *path,
3142 struct btrfs_file_extent_item *fi,
3143 const u64 hole_offset, 3145 const u64 hole_offset,
3144 const u64 hole_len) 3146 const u64 hole_len)
3145{ 3147{
@@ -3154,7 +3156,11 @@ static void clone_update_extent_map(struct inode *inode,
3154 return; 3156 return;
3155 } 3157 }
3156 3158
3157 if (fi) { 3159 if (path) {
3160 struct btrfs_file_extent_item *fi;
3161
3162 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3163 struct btrfs_file_extent_item);
3158 btrfs_extent_item_to_extent_map(inode, path, fi, false, em); 3164 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3159 em->generation = -1; 3165 em->generation = -1;
3160 if (btrfs_file_extent_type(path->nodes[0], fi) == 3166 if (btrfs_file_extent_type(path->nodes[0], fi) ==
@@ -3508,18 +3514,15 @@ process_slot:
3508 btrfs_item_ptr_offset(leaf, slot), 3514 btrfs_item_ptr_offset(leaf, slot),
3509 size); 3515 size);
3510 inode_add_bytes(inode, datal); 3516 inode_add_bytes(inode, datal);
3511 extent = btrfs_item_ptr(leaf, slot,
3512 struct btrfs_file_extent_item);
3513 } 3517 }
3514 3518
3515 /* If we have an implicit hole (NO_HOLES feature). */ 3519 /* If we have an implicit hole (NO_HOLES feature). */
3516 if (drop_start < new_key.offset) 3520 if (drop_start < new_key.offset)
3517 clone_update_extent_map(inode, trans, 3521 clone_update_extent_map(inode, trans,
3518 path, NULL, drop_start, 3522 NULL, drop_start,
3519 new_key.offset - drop_start); 3523 new_key.offset - drop_start);
3520 3524
3521 clone_update_extent_map(inode, trans, path, 3525 clone_update_extent_map(inode, trans, path, 0, 0);
3522 extent, 0, 0);
3523 3526
3524 btrfs_mark_buffer_dirty(leaf); 3527 btrfs_mark_buffer_dirty(leaf);
3525 btrfs_release_path(path); 3528 btrfs_release_path(path);
@@ -3562,12 +3565,10 @@ process_slot:
3562 btrfs_end_transaction(trans, root); 3565 btrfs_end_transaction(trans, root);
3563 goto out; 3566 goto out;
3564 } 3567 }
3568 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3569 destoff + len - last_dest_end);
3565 ret = clone_finish_inode_update(trans, inode, destoff + len, 3570 ret = clone_finish_inode_update(trans, inode, destoff + len,
3566 destoff, olen); 3571 destoff, olen);
3567 if (ret)
3568 goto out;
3569 clone_update_extent_map(inode, trans, path, NULL, last_dest_end,
3570 destoff + len - last_dest_end);
3571 } 3572 }
3572 3573
3573out: 3574out:
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c7cf1d..7187b14faa6c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
484 log_list); 484 log_list);
485 list_del_init(&ordered->log_list); 485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]); 486 spin_unlock_irq(&log->log_extents_lock[index]);
487
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
493
494 WARN_ON(!inode);
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
496 }
487 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
488 &ordered->flags)); 498 &ordered->flags));
499
489 btrfs_put_ordered_extent(ordered); 500 btrfs_put_ordered_extent(ordered);
490 spin_lock_irq(&log->log_extents_lock[index]); 501 spin_lock_irq(&log->log_extents_lock[index]);
491 } 502 }
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 6efd70d3b64f..9626b4ad3b9a 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -54,7 +54,7 @@ static void print_extent_data_ref(struct extent_buffer *eb,
54 btrfs_extent_data_ref_count(eb, ref)); 54 btrfs_extent_data_ref_count(eb, ref));
55} 55}
56 56
57static void print_extent_item(struct extent_buffer *eb, int slot) 57static void print_extent_item(struct extent_buffer *eb, int slot, int type)
58{ 58{
59 struct btrfs_extent_item *ei; 59 struct btrfs_extent_item *ei;
60 struct btrfs_extent_inline_ref *iref; 60 struct btrfs_extent_inline_ref *iref;
@@ -63,7 +63,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
63 struct btrfs_disk_key key; 63 struct btrfs_disk_key key;
64 unsigned long end; 64 unsigned long end;
65 unsigned long ptr; 65 unsigned long ptr;
66 int type;
67 u32 item_size = btrfs_item_size_nr(eb, slot); 66 u32 item_size = btrfs_item_size_nr(eb, slot);
68 u64 flags; 67 u64 flags;
69 u64 offset; 68 u64 offset;
@@ -88,7 +87,8 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
88 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), 87 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
89 flags); 88 flags);
90 89
91 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 90 if ((type == BTRFS_EXTENT_ITEM_KEY) &&
91 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
92 struct btrfs_tree_block_info *info; 92 struct btrfs_tree_block_info *info;
93 info = (struct btrfs_tree_block_info *)(ei + 1); 93 info = (struct btrfs_tree_block_info *)(ei + 1);
94 btrfs_tree_block_key(eb, info, &key); 94 btrfs_tree_block_key(eb, info, &key);
@@ -223,7 +223,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
223 btrfs_disk_root_refs(l, ri)); 223 btrfs_disk_root_refs(l, ri));
224 break; 224 break;
225 case BTRFS_EXTENT_ITEM_KEY: 225 case BTRFS_EXTENT_ITEM_KEY:
226 print_extent_item(l, i); 226 case BTRFS_METADATA_ITEM_KEY:
227 print_extent_item(l, i, type);
227 break; 228 break;
228 case BTRFS_TREE_BLOCK_REF_KEY: 229 case BTRFS_TREE_BLOCK_REF_KEY:
229 printk(KERN_INFO "\t\ttree block backref\n"); 230 printk(KERN_INFO "\t\ttree block backref\n");
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 4055291a523e..4a88f073fdd7 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1956,9 +1956,10 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1956 * pages are going to be uptodate. 1956 * pages are going to be uptodate.
1957 */ 1957 */
1958 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 1958 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1959 if (rbio->faila == stripe || 1959 if (rbio->faila == stripe || rbio->failb == stripe) {
1960 rbio->failb == stripe) 1960 atomic_inc(&rbio->bbio->error);
1961 continue; 1961 continue;
1962 }
1962 1963
1963 for (pagenr = 0; pagenr < nr_pages; pagenr++) { 1964 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1964 struct page *p; 1965 struct page *p;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4662d92a4b73..8e16bca69c56 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -522,9 +522,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
522 case Opt_ssd_spread: 522 case Opt_ssd_spread:
523 btrfs_set_and_info(root, SSD_SPREAD, 523 btrfs_set_and_info(root, SSD_SPREAD,
524 "use spread ssd allocation scheme"); 524 "use spread ssd allocation scheme");
525 btrfs_set_opt(info->mount_opt, SSD);
525 break; 526 break;
526 case Opt_nossd: 527 case Opt_nossd:
527 btrfs_clear_and_info(root, NOSSD, 528 btrfs_set_and_info(root, NOSSD,
528 "not using ssd allocation scheme"); 529 "not using ssd allocation scheme");
529 btrfs_clear_opt(info->mount_opt, SSD); 530 btrfs_clear_opt(info->mount_opt, SSD);
530 break; 531 break;
@@ -1467,7 +1468,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1467 goto restore; 1468 goto restore;
1468 1469
1469 /* recover relocation */ 1470 /* recover relocation */
1471 mutex_lock(&fs_info->cleaner_mutex);
1470 ret = btrfs_recover_relocation(root); 1472 ret = btrfs_recover_relocation(root);
1473 mutex_unlock(&fs_info->cleaner_mutex);
1471 if (ret) 1474 if (ret)
1472 goto restore; 1475 goto restore;
1473 1476
@@ -1808,6 +1811,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
1808 list_for_each_entry(dev, head, dev_list) { 1811 list_for_each_entry(dev, head, dev_list) {
1809 if (dev->missing) 1812 if (dev->missing)
1810 continue; 1813 continue;
1814 if (!dev->name)
1815 continue;
1811 if (!first_dev || dev->devid < first_dev->devid) 1816 if (!first_dev || dev->devid < first_dev->devid)
1812 first_dev = dev; 1817 first_dev = dev;
1813 } 1818 }
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index df39458f1487..78699364f537 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -605,14 +605,37 @@ static void init_feature_attrs(void)
605 } 605 }
606} 606}
607 607
608static int add_device_membership(struct btrfs_fs_info *fs_info) 608int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
609 struct btrfs_device *one_device)
610{
611 struct hd_struct *disk;
612 struct kobject *disk_kobj;
613
614 if (!fs_info->device_dir_kobj)
615 return -EINVAL;
616
617 if (one_device) {
618 disk = one_device->bdev->bd_part;
619 disk_kobj = &part_to_dev(disk)->kobj;
620
621 sysfs_remove_link(fs_info->device_dir_kobj,
622 disk_kobj->name);
623 }
624
625 return 0;
626}
627
628int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
629 struct btrfs_device *one_device)
609{ 630{
610 int error = 0; 631 int error = 0;
611 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 632 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
612 struct btrfs_device *dev; 633 struct btrfs_device *dev;
613 634
614 fs_info->device_dir_kobj = kobject_create_and_add("devices", 635 if (!fs_info->device_dir_kobj)
636 fs_info->device_dir_kobj = kobject_create_and_add("devices",
615 &fs_info->super_kobj); 637 &fs_info->super_kobj);
638
616 if (!fs_info->device_dir_kobj) 639 if (!fs_info->device_dir_kobj)
617 return -ENOMEM; 640 return -ENOMEM;
618 641
@@ -623,6 +646,9 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
623 if (!dev->bdev) 646 if (!dev->bdev)
624 continue; 647 continue;
625 648
649 if (one_device && one_device != dev)
650 continue;
651
626 disk = dev->bdev->bd_part; 652 disk = dev->bdev->bd_part;
627 disk_kobj = &part_to_dev(disk)->kobj; 653 disk_kobj = &part_to_dev(disk)->kobj;
628 654
@@ -666,7 +692,7 @@ int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
666 if (error) 692 if (error)
667 goto failure; 693 goto failure;
668 694
669 error = add_device_membership(fs_info); 695 error = btrfs_kobj_add_device(fs_info, NULL);
670 if (error) 696 if (error)
671 goto failure; 697 goto failure;
672 698
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index 9ab576318a84..ac46df37504c 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -66,4 +66,8 @@ char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
66extern const char * const btrfs_feature_set_names[3]; 66extern const char * const btrfs_feature_set_names[3];
67extern struct kobj_type space_info_ktype; 67extern struct kobj_type space_info_ktype;
68extern struct kobj_type btrfs_raid_ktype; 68extern struct kobj_type btrfs_raid_ktype;
69int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
70 struct btrfs_device *one_device);
71int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
72 struct btrfs_device *one_device);
69#endif /* _BTRFS_SYSFS_H_ */ 73#endif /* _BTRFS_SYSFS_H_ */
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 511839c04f11..5f379affdf23 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -386,11 +386,13 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
386 bool reloc_reserved = false; 386 bool reloc_reserved = false;
387 int ret; 387 int ret;
388 388
389 /* Send isn't supposed to start transactions. */
390 ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB);
391
389 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 392 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
390 return ERR_PTR(-EROFS); 393 return ERR_PTR(-EROFS);
391 394
392 if (current->journal_info && 395 if (current->journal_info) {
393 current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
394 WARN_ON(type & TRANS_EXTWRITERS); 396 WARN_ON(type & TRANS_EXTWRITERS);
395 h = current->journal_info; 397 h = current->journal_info;
396 h->use_count++; 398 h->use_count++;
@@ -491,6 +493,7 @@ again:
491 smp_mb(); 493 smp_mb();
492 if (cur_trans->state >= TRANS_STATE_BLOCKED && 494 if (cur_trans->state >= TRANS_STATE_BLOCKED &&
493 may_wait_transaction(root, type)) { 495 may_wait_transaction(root, type)) {
496 current->journal_info = h;
494 btrfs_commit_transaction(h, root); 497 btrfs_commit_transaction(h, root);
495 goto again; 498 goto again;
496 } 499 }
@@ -1615,11 +1618,6 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1615 int ret; 1618 int ret;
1616 1619
1617 ret = btrfs_run_delayed_items(trans, root); 1620 ret = btrfs_run_delayed_items(trans, root);
1618 /*
1619 * running the delayed items may have added new refs. account
1620 * them now so that they hinder processing of more delayed refs
1621 * as little as possible.
1622 */
1623 if (ret) 1621 if (ret)
1624 return ret; 1622 return ret;
1625 1623
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c83b24251e53..6cb82f62cb7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -40,6 +40,7 @@
40#include "rcu-string.h" 40#include "rcu-string.h"
41#include "math.h" 41#include "math.h"
42#include "dev-replace.h" 42#include "dev-replace.h"
43#include "sysfs.h"
43 44
44static int init_first_rw_device(struct btrfs_trans_handle *trans, 45static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 struct btrfs_root *root, 46 struct btrfs_root *root,
@@ -554,12 +555,14 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
554 * This is ok to do without rcu read locked because we hold the 555 * This is ok to do without rcu read locked because we hold the
555 * uuid mutex so nothing we touch in here is going to disappear. 556 * uuid mutex so nothing we touch in here is going to disappear.
556 */ 557 */
557 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); 558 if (orig_dev->name) {
558 if (!name) { 559 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
559 kfree(device); 560 if (!name) {
560 goto error; 561 kfree(device);
562 goto error;
563 }
564 rcu_assign_pointer(device->name, name);
561 } 565 }
562 rcu_assign_pointer(device->name, name);
563 566
564 list_add(&device->dev_list, &fs_devices->devices); 567 list_add(&device->dev_list, &fs_devices->devices);
565 device->fs_devices = fs_devices; 568 device->fs_devices = fs_devices;
@@ -1677,8 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1677 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1678 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1679 1682
1680 if (device->bdev) 1683 if (device->bdev) {
1681 device->fs_devices->open_devices--; 1684 device->fs_devices->open_devices--;
1685 /* remove sysfs entry */
1686 btrfs_kobj_rm_device(root->fs_info, device);
1687 }
1682 1688
1683 call_rcu(&device->rcu, free_device); 1689 call_rcu(&device->rcu, free_device);
1684 1690
@@ -2143,9 +2149,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2143 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy); 2149 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2144 btrfs_set_super_num_devices(root->fs_info->super_copy, 2150 btrfs_set_super_num_devices(root->fs_info->super_copy,
2145 total_bytes + 1); 2151 total_bytes + 1);
2152
2153 /* add sysfs device entry */
2154 btrfs_kobj_add_device(root->fs_info, device);
2155
2146 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2156 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2147 2157
2148 if (seeding_dev) { 2158 if (seeding_dev) {
2159 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2149 ret = init_first_rw_device(trans, root, device); 2160 ret = init_first_rw_device(trans, root, device);
2150 if (ret) { 2161 if (ret) {
2151 btrfs_abort_transaction(trans, root, ret); 2162 btrfs_abort_transaction(trans, root, ret);
@@ -2156,6 +2167,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2156 btrfs_abort_transaction(trans, root, ret); 2167 btrfs_abort_transaction(trans, root, ret);
2157 goto error_trans; 2168 goto error_trans;
2158 } 2169 }
2170
2171 /* Sprouting would change fsid of the mounted root,
2172 * so rename the fsid on the sysfs
2173 */
2174 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2175 root->fs_info->fsid);
2176 if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
2177 goto error_trans;
2159 } else { 2178 } else {
2160 ret = btrfs_add_device(trans, root, device); 2179 ret = btrfs_add_device(trans, root, device);
2161 if (ret) { 2180 if (ret) {
@@ -2205,6 +2224,7 @@ error_trans:
2205 unlock_chunks(root); 2224 unlock_chunks(root);
2206 btrfs_end_transaction(trans, root); 2225 btrfs_end_transaction(trans, root);
2207 rcu_string_free(device->name); 2226 rcu_string_free(device->name);
2227 btrfs_kobj_rm_device(root->fs_info, device);
2208 kfree(device); 2228 kfree(device);
2209error: 2229error:
2210 blkdev_put(bdev, FMODE_EXCL); 2230 blkdev_put(bdev, FMODE_EXCL);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 4f196314c0c1..b67d8fc81277 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -136,7 +136,7 @@ static int zlib_compress_pages(struct list_head *ws,
136 if (workspace->def_strm.total_in > 8192 && 136 if (workspace->def_strm.total_in > 8192 &&
137 workspace->def_strm.total_in < 137 workspace->def_strm.total_in <
138 workspace->def_strm.total_out) { 138 workspace->def_strm.total_out) {
139 ret = -EIO; 139 ret = -E2BIG;
140 goto out; 140 goto out;
141 } 141 }
142 /* we need another page for writing out. Test this 142 /* we need another page for writing out. Test this
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 0762d143e252..fca382037ddd 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -194,7 +194,16 @@ static void ext4_init_block_bitmap(struct super_block *sb,
194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195 ext4_error(sb, "Checksum bad for group %u", block_group); 195 ext4_error(sb, "Checksum bad for group %u", block_group);
196 grp = ext4_get_group_info(sb, block_group); 196 grp = ext4_get_group_info(sb, block_group);
197 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
198 percpu_counter_sub(&sbi->s_freeclusters_counter,
199 grp->bb_free);
197 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 200 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
201 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
202 int count;
203 count = ext4_free_inodes_count(sb, gdp);
204 percpu_counter_sub(&sbi->s_freeinodes_counter,
205 count);
206 }
198 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 207 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
199 return; 208 return;
200 } 209 }
@@ -359,6 +368,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
359{ 368{
360 ext4_fsblk_t blk; 369 ext4_fsblk_t blk;
361 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); 370 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
371 struct ext4_sb_info *sbi = EXT4_SB(sb);
362 372
363 if (buffer_verified(bh)) 373 if (buffer_verified(bh))
364 return; 374 return;
@@ -369,6 +379,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
369 ext4_unlock_group(sb, block_group); 379 ext4_unlock_group(sb, block_group);
370 ext4_error(sb, "bg %u: block %llu: invalid block bitmap", 380 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
371 block_group, blk); 381 block_group, blk);
382 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
383 percpu_counter_sub(&sbi->s_freeclusters_counter,
384 grp->bb_free);
372 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 385 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
373 return; 386 return;
374 } 387 }
@@ -376,6 +389,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
376 desc, bh))) { 389 desc, bh))) {
377 ext4_unlock_group(sb, block_group); 390 ext4_unlock_group(sb, block_group);
378 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); 391 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
392 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
393 percpu_counter_sub(&sbi->s_freeclusters_counter,
394 grp->bb_free);
379 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 395 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
380 return; 396 return;
381 } 397 }
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 3f5c188953a4..0b7e28e7eaa4 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -966,10 +966,10 @@ retry:
966 continue; 966 continue;
967 } 967 }
968 968
969 if (ei->i_es_lru_nr == 0 || ei == locked_ei) 969 if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
970 !write_trylock(&ei->i_es_lock))
970 continue; 971 continue;
971 972
972 write_lock(&ei->i_es_lock);
973 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); 973 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
974 if (ei->i_es_lru_nr == 0) 974 if (ei->i_es_lru_nr == 0)
975 list_del_init(&ei->i_es_lru); 975 list_del_init(&ei->i_es_lru);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 0ee59a6644e2..5b87fc36aab8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -71,6 +71,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
71 struct ext4_group_desc *gdp) 71 struct ext4_group_desc *gdp)
72{ 72{
73 struct ext4_group_info *grp; 73 struct ext4_group_info *grp;
74 struct ext4_sb_info *sbi = EXT4_SB(sb);
74 J_ASSERT_BH(bh, buffer_locked(bh)); 75 J_ASSERT_BH(bh, buffer_locked(bh));
75 76
76 /* If checksum is bad mark all blocks and inodes use to prevent 77 /* If checksum is bad mark all blocks and inodes use to prevent
@@ -78,7 +79,16 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 79 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79 ext4_error(sb, "Checksum bad for group %u", block_group); 80 ext4_error(sb, "Checksum bad for group %u", block_group);
80 grp = ext4_get_group_info(sb, block_group); 81 grp = ext4_get_group_info(sb, block_group);
82 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
83 percpu_counter_sub(&sbi->s_freeclusters_counter,
84 grp->bb_free);
81 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 85 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
86 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
87 int count;
88 count = ext4_free_inodes_count(sb, gdp);
89 percpu_counter_sub(&sbi->s_freeinodes_counter,
90 count);
91 }
82 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 92 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
83 return 0; 93 return 0;
84 } 94 }
@@ -116,6 +126,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
116 struct buffer_head *bh = NULL; 126 struct buffer_head *bh = NULL;
117 ext4_fsblk_t bitmap_blk; 127 ext4_fsblk_t bitmap_blk;
118 struct ext4_group_info *grp; 128 struct ext4_group_info *grp;
129 struct ext4_sb_info *sbi = EXT4_SB(sb);
119 130
120 desc = ext4_get_group_desc(sb, block_group, NULL); 131 desc = ext4_get_group_desc(sb, block_group, NULL);
121 if (!desc) 132 if (!desc)
@@ -185,6 +196,12 @@ verify:
185 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, " 196 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
186 "inode_bitmap = %llu", block_group, bitmap_blk); 197 "inode_bitmap = %llu", block_group, bitmap_blk);
187 grp = ext4_get_group_info(sb, block_group); 198 grp = ext4_get_group_info(sb, block_group);
199 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
200 int count;
201 count = ext4_free_inodes_count(sb, desc);
202 percpu_counter_sub(&sbi->s_freeinodes_counter,
203 count);
204 }
188 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 205 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
189 return NULL; 206 return NULL;
190 } 207 }
@@ -321,6 +338,12 @@ out:
321 fatal = err; 338 fatal = err;
322 } else { 339 } else {
323 ext4_error(sb, "bit already cleared for inode %lu", ino); 340 ext4_error(sb, "bit already cleared for inode %lu", ino);
341 if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
342 int count;
343 count = ext4_free_inodes_count(sb, gdp);
344 percpu_counter_sub(&sbi->s_freeinodes_counter,
345 count);
346 }
324 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 347 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
325 } 348 }
326 349
@@ -851,6 +874,13 @@ got:
851 goto out; 874 goto out;
852 } 875 }
853 876
877 BUFFER_TRACE(group_desc_bh, "get_write_access");
878 err = ext4_journal_get_write_access(handle, group_desc_bh);
879 if (err) {
880 ext4_std_error(sb, err);
881 goto out;
882 }
883
854 /* We may have to initialize the block bitmap if it isn't already */ 884 /* We may have to initialize the block bitmap if it isn't already */
855 if (ext4_has_group_desc_csum(sb) && 885 if (ext4_has_group_desc_csum(sb) &&
856 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 886 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -887,13 +917,6 @@ got:
887 } 917 }
888 } 918 }
889 919
890 BUFFER_TRACE(group_desc_bh, "get_write_access");
891 err = ext4_journal_get_write_access(handle, group_desc_bh);
892 if (err) {
893 ext4_std_error(sb, err);
894 goto out;
895 }
896
897 /* Update the relevant bg descriptor fields */ 920 /* Update the relevant bg descriptor fields */
898 if (ext4_has_group_desc_csum(sb)) { 921 if (ext4_has_group_desc_csum(sb)) {
899 int free; 922 int free;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 8a57e9fcd1b9..fd69da194826 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
389 return 0; 389 return 0;
390failed: 390failed:
391 for (; i >= 0; i--) { 391 for (; i >= 0; i--) {
392 if (i != indirect_blks && branch[i].bh) 392 /*
393 * We want to ext4_forget() only freshly allocated indirect
394 * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
395 * buffer at branch[0].bh is indirect block / inode already
396 * existing before ext4_alloc_branch() was called.
397 */
398 if (i > 0 && i != indirect_blks && branch[i].bh)
393 ext4_forget(handle, 1, inode, branch[i].bh, 399 ext4_forget(handle, 1, inode, branch[i].bh,
394 branch[i].bh->b_blocknr); 400 branch[i].bh->b_blocknr);
395 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 401 ext4_free_blocks(handle, inode, NULL, new_blocks[i],
@@ -1310,16 +1316,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
1310 blk = *i_data; 1316 blk = *i_data;
1311 if (level > 0) { 1317 if (level > 0) {
1312 ext4_lblk_t first2; 1318 ext4_lblk_t first2;
1319 ext4_lblk_t count2;
1320
1313 bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); 1321 bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
1314 if (!bh) { 1322 if (!bh) {
1315 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), 1323 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
1316 "Read failure"); 1324 "Read failure");
1317 return -EIO; 1325 return -EIO;
1318 } 1326 }
1319 first2 = (first > offset) ? first - offset : 0; 1327 if (first > offset) {
1328 first2 = first - offset;
1329 count2 = count;
1330 } else {
1331 first2 = 0;
1332 count2 = count - (offset - first);
1333 }
1320 ret = free_hole_blocks(handle, inode, bh, 1334 ret = free_hole_blocks(handle, inode, bh,
1321 (__le32 *)bh->b_data, level - 1, 1335 (__le32 *)bh->b_data, level - 1,
1322 first2, count - offset, 1336 first2, count2,
1323 inode->i_sb->s_blocksize >> 2); 1337 inode->i_sb->s_blocksize >> 2);
1324 if (ret) { 1338 if (ret) {
1325 brelse(bh); 1339 brelse(bh);
@@ -1329,8 +1343,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
1329 if (level == 0 || 1343 if (level == 0 ||
1330 (bh && all_zeroes((__le32 *)bh->b_data, 1344 (bh && all_zeroes((__le32 *)bh->b_data,
1331 (__le32 *)bh->b_data + addr_per_block))) { 1345 (__le32 *)bh->b_data + addr_per_block))) {
1332 ext4_free_data(handle, inode, parent_bh, &blk, &blk+1); 1346 ext4_free_data(handle, inode, parent_bh,
1333 *i_data = 0; 1347 i_data, i_data + 1);
1334 } 1348 }
1335 brelse(bh); 1349 brelse(bh);
1336 bh = NULL; 1350 bh = NULL;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 59e31622cc6e..2dcb936be90e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -722,6 +722,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
722 void *buddy, void *bitmap, ext4_group_t group) 722 void *buddy, void *bitmap, ext4_group_t group)
723{ 723{
724 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 724 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
725 struct ext4_sb_info *sbi = EXT4_SB(sb);
725 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 726 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
726 ext4_grpblk_t i = 0; 727 ext4_grpblk_t i = 0;
727 ext4_grpblk_t first; 728 ext4_grpblk_t first;
@@ -751,14 +752,17 @@ void ext4_mb_generate_buddy(struct super_block *sb,
751 752
752 if (free != grp->bb_free) { 753 if (free != grp->bb_free) {
753 ext4_grp_locked_error(sb, group, 0, 0, 754 ext4_grp_locked_error(sb, group, 0, 0,
754 "%u clusters in bitmap, %u in gd; " 755 "block bitmap and bg descriptor "
755 "block bitmap corrupt.", 756 "inconsistent: %u vs %u free clusters",
756 free, grp->bb_free); 757 free, grp->bb_free);
757 /* 758 /*
758 * If we intend to continue, we consider group descriptor 759 * If we intend to continue, we consider group descriptor
759 * corrupt and update bb_free using bitmap value 760 * corrupt and update bb_free using bitmap value
760 */ 761 */
761 grp->bb_free = free; 762 grp->bb_free = free;
763 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
764 percpu_counter_sub(&sbi->s_freeclusters_counter,
765 grp->bb_free);
762 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 766 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
763 } 767 }
764 mb_set_largest_free_order(sb, grp); 768 mb_set_largest_free_order(sb, grp);
@@ -1431,6 +1435,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1431 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1435 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1432 1436
1433 if (unlikely(block != -1)) { 1437 if (unlikely(block != -1)) {
1438 struct ext4_sb_info *sbi = EXT4_SB(sb);
1434 ext4_fsblk_t blocknr; 1439 ext4_fsblk_t blocknr;
1435 1440
1436 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1441 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
@@ -1441,6 +1446,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1441 "freeing already freed block " 1446 "freeing already freed block "
1442 "(bit %u); block bitmap corrupt.", 1447 "(bit %u); block bitmap corrupt.",
1443 block); 1448 block);
1449 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1450 percpu_counter_sub(&sbi->s_freeclusters_counter,
1451 e4b->bd_info->bb_free);
1444 /* Mark the block group as corrupt. */ 1452 /* Mark the block group as corrupt. */
1445 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1453 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1446 &e4b->bd_info->bb_state); 1454 &e4b->bd_info->bb_state);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b9b9aabfb4d2..6df7bc611dbd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1525,8 +1525,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1525 arg = JBD2_DEFAULT_MAX_COMMIT_AGE; 1525 arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
1526 sbi->s_commit_interval = HZ * arg; 1526 sbi->s_commit_interval = HZ * arg;
1527 } else if (token == Opt_max_batch_time) { 1527 } else if (token == Opt_max_batch_time) {
1528 if (arg == 0)
1529 arg = EXT4_DEF_MAX_BATCH_TIME;
1530 sbi->s_max_batch_time = arg; 1528 sbi->s_max_batch_time = arg;
1531 } else if (token == Opt_min_batch_time) { 1529 } else if (token == Opt_min_batch_time) {
1532 sbi->s_min_batch_time = arg; 1530 sbi->s_min_batch_time = arg;
@@ -2809,10 +2807,11 @@ static void print_daily_error_info(unsigned long arg)
2809 es = sbi->s_es; 2807 es = sbi->s_es;
2810 2808
2811 if (es->s_error_count) 2809 if (es->s_error_count)
2812 ext4_msg(sb, KERN_NOTICE, "error count: %u", 2810 /* fsck newer than v1.41.13 is needed to clean this condition. */
2811 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2813 le32_to_cpu(es->s_error_count)); 2812 le32_to_cpu(es->s_error_count));
2814 if (es->s_first_error_time) { 2813 if (es->s_first_error_time) {
2815 printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", 2814 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2816 sb->s_id, le32_to_cpu(es->s_first_error_time), 2815 sb->s_id, le32_to_cpu(es->s_first_error_time),
2817 (int) sizeof(es->s_first_error_func), 2816 (int) sizeof(es->s_first_error_func),
2818 es->s_first_error_func, 2817 es->s_first_error_func,
@@ -2826,7 +2825,7 @@ static void print_daily_error_info(unsigned long arg)
2826 printk("\n"); 2825 printk("\n");
2827 } 2826 }
2828 if (es->s_last_error_time) { 2827 if (es->s_last_error_time) {
2829 printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", 2828 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2830 sb->s_id, le32_to_cpu(es->s_last_error_time), 2829 sb->s_id, le32_to_cpu(es->s_last_error_time),
2831 (int) sizeof(es->s_last_error_func), 2830 (int) sizeof(es->s_last_error_func),
2832 es->s_last_error_func, 2831 es->s_last_error_func,
@@ -3880,38 +3879,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3880 goto failed_mount2; 3879 goto failed_mount2;
3881 } 3880 }
3882 } 3881 }
3883
3884 /*
3885 * set up enough so that it can read an inode,
3886 * and create new inode for buddy allocator
3887 */
3888 sbi->s_gdb_count = db_count;
3889 if (!test_opt(sb, NOLOAD) &&
3890 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3891 sb->s_op = &ext4_sops;
3892 else
3893 sb->s_op = &ext4_nojournal_sops;
3894
3895 ext4_ext_init(sb);
3896 err = ext4_mb_init(sb);
3897 if (err) {
3898 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
3899 err);
3900 goto failed_mount2;
3901 }
3902
3903 if (!ext4_check_descriptors(sb, &first_not_zeroed)) { 3882 if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
3904 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 3883 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3905 goto failed_mount2a; 3884 goto failed_mount2;
3906 } 3885 }
3907 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 3886 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
3908 if (!ext4_fill_flex_info(sb)) { 3887 if (!ext4_fill_flex_info(sb)) {
3909 ext4_msg(sb, KERN_ERR, 3888 ext4_msg(sb, KERN_ERR,
3910 "unable to initialize " 3889 "unable to initialize "
3911 "flex_bg meta info!"); 3890 "flex_bg meta info!");
3912 goto failed_mount2a; 3891 goto failed_mount2;
3913 } 3892 }
3914 3893
3894 sbi->s_gdb_count = db_count;
3915 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3895 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3916 spin_lock_init(&sbi->s_next_gen_lock); 3896 spin_lock_init(&sbi->s_next_gen_lock);
3917 3897
@@ -3946,6 +3926,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3946 sbi->s_stripe = ext4_get_stripe_size(sbi); 3926 sbi->s_stripe = ext4_get_stripe_size(sbi);
3947 sbi->s_extent_max_zeroout_kb = 32; 3927 sbi->s_extent_max_zeroout_kb = 32;
3948 3928
3929 /*
3930 * set up enough so that it can read an inode
3931 */
3932 if (!test_opt(sb, NOLOAD) &&
3933 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3934 sb->s_op = &ext4_sops;
3935 else
3936 sb->s_op = &ext4_nojournal_sops;
3949 sb->s_export_op = &ext4_export_ops; 3937 sb->s_export_op = &ext4_export_ops;
3950 sb->s_xattr = ext4_xattr_handlers; 3938 sb->s_xattr = ext4_xattr_handlers;
3951#ifdef CONFIG_QUOTA 3939#ifdef CONFIG_QUOTA
@@ -4135,13 +4123,21 @@ no_journal:
4135 if (err) { 4123 if (err) {
4136 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " 4124 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
4137 "reserved pool", ext4_calculate_resv_clusters(sb)); 4125 "reserved pool", ext4_calculate_resv_clusters(sb));
4138 goto failed_mount5; 4126 goto failed_mount4a;
4139 } 4127 }
4140 4128
4141 err = ext4_setup_system_zone(sb); 4129 err = ext4_setup_system_zone(sb);
4142 if (err) { 4130 if (err) {
4143 ext4_msg(sb, KERN_ERR, "failed to initialize system " 4131 ext4_msg(sb, KERN_ERR, "failed to initialize system "
4144 "zone (%d)", err); 4132 "zone (%d)", err);
4133 goto failed_mount4a;
4134 }
4135
4136 ext4_ext_init(sb);
4137 err = ext4_mb_init(sb);
4138 if (err) {
4139 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4140 err);
4145 goto failed_mount5; 4141 goto failed_mount5;
4146 } 4142 }
4147 4143
@@ -4218,8 +4214,11 @@ failed_mount8:
4218failed_mount7: 4214failed_mount7:
4219 ext4_unregister_li_request(sb); 4215 ext4_unregister_li_request(sb);
4220failed_mount6: 4216failed_mount6:
4221 ext4_release_system_zone(sb); 4217 ext4_mb_release(sb);
4222failed_mount5: 4218failed_mount5:
4219 ext4_ext_release(sb);
4220 ext4_release_system_zone(sb);
4221failed_mount4a:
4223 dput(sb->s_root); 4222 dput(sb->s_root);
4224 sb->s_root = NULL; 4223 sb->s_root = NULL;
4225failed_mount4: 4224failed_mount4:
@@ -4243,14 +4242,11 @@ failed_mount3:
4243 percpu_counter_destroy(&sbi->s_extent_cache_cnt); 4242 percpu_counter_destroy(&sbi->s_extent_cache_cnt);
4244 if (sbi->s_mmp_tsk) 4243 if (sbi->s_mmp_tsk)
4245 kthread_stop(sbi->s_mmp_tsk); 4244 kthread_stop(sbi->s_mmp_tsk);
4246failed_mount2a:
4247 ext4_mb_release(sb);
4248failed_mount2: 4245failed_mount2:
4249 for (i = 0; i < db_count; i++) 4246 for (i = 0; i < db_count; i++)
4250 brelse(sbi->s_group_desc[i]); 4247 brelse(sbi->s_group_desc[i]);
4251 ext4_kvfree(sbi->s_group_desc); 4248 ext4_kvfree(sbi->s_group_desc);
4252failed_mount: 4249failed_mount:
4253 ext4_ext_release(sb);
4254 if (sbi->s_chksum_driver) 4250 if (sbi->s_chksum_driver)
4255 crypto_free_shash(sbi->s_chksum_driver); 4251 crypto_free_shash(sbi->s_chksum_driver);
4256 if (sbi->s_proc) { 4252 if (sbi->s_proc) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0924521306b4..f8cf619edb5f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -608,8 +608,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
608 * b. do not use extent cache for better performance 608 * b. do not use extent cache for better performance
609 * c. give the block addresses to blockdev 609 * c. give the block addresses to blockdev
610 */ 610 */
611static int get_data_block(struct inode *inode, sector_t iblock, 611static int __get_data_block(struct inode *inode, sector_t iblock,
612 struct buffer_head *bh_result, int create) 612 struct buffer_head *bh_result, int create, bool fiemap)
613{ 613{
614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
615 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 615 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
@@ -637,7 +637,7 @@ static int get_data_block(struct inode *inode, sector_t iblock,
637 err = 0; 637 err = 0;
638 goto unlock_out; 638 goto unlock_out;
639 } 639 }
640 if (dn.data_blkaddr == NEW_ADDR) 640 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
641 goto put_out; 641 goto put_out;
642 642
643 if (dn.data_blkaddr != NULL_ADDR) { 643 if (dn.data_blkaddr != NULL_ADDR) {
@@ -671,7 +671,7 @@ get_next:
671 err = 0; 671 err = 0;
672 goto unlock_out; 672 goto unlock_out;
673 } 673 }
674 if (dn.data_blkaddr == NEW_ADDR) 674 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
675 goto put_out; 675 goto put_out;
676 676
677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -708,10 +708,23 @@ out:
708 return err; 708 return err;
709} 709}
710 710
711static int get_data_block(struct inode *inode, sector_t iblock,
712 struct buffer_head *bh_result, int create)
713{
714 return __get_data_block(inode, iblock, bh_result, create, false);
715}
716
717static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
718 struct buffer_head *bh_result, int create)
719{
720 return __get_data_block(inode, iblock, bh_result, create, true);
721}
722
711int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 723int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
712 u64 start, u64 len) 724 u64 start, u64 len)
713{ 725{
714 return generic_block_fiemap(inode, fieinfo, start, len, get_data_block); 726 return generic_block_fiemap(inode, fieinfo,
727 start, len, get_data_block_fiemap);
715} 728}
716 729
717static int f2fs_read_data_page(struct file *file, struct page *page) 730static int f2fs_read_data_page(struct file *file, struct page *page)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 966acb039e3b..a4addd72ebbd 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -376,11 +376,11 @@ static struct page *init_inode_metadata(struct inode *inode,
376 376
377put_error: 377put_error:
378 f2fs_put_page(page, 1); 378 f2fs_put_page(page, 1);
379error:
379 /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ 380 /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
380 truncate_inode_pages(&inode->i_data, 0); 381 truncate_inode_pages(&inode->i_data, 0);
381 truncate_blocks(inode, 0); 382 truncate_blocks(inode, 0);
382 remove_dirty_dir_inode(inode); 383 remove_dirty_dir_inode(inode);
383error:
384 remove_inode_page(inode); 384 remove_inode_page(inode);
385 return ERR_PTR(err); 385 return ERR_PTR(err);
386} 386}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e51c732b0dd9..58df97e174d0 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -342,9 +342,6 @@ struct f2fs_sm_info {
342 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 342 struct dirty_seglist_info *dirty_info; /* dirty segment information */
343 struct curseg_info *curseg_array; /* active segment information */ 343 struct curseg_info *curseg_array; /* active segment information */
344 344
345 struct list_head wblist_head; /* list of under-writeback pages */
346 spinlock_t wblist_lock; /* lock for checkpoint */
347
348 block_t seg0_blkaddr; /* block address of 0'th segment */ 345 block_t seg0_blkaddr; /* block address of 0'th segment */
349 block_t main_blkaddr; /* start block address of main area */ 346 block_t main_blkaddr; /* start block address of main area */
350 block_t ssa_blkaddr; /* start block address of SSA area */ 347 block_t ssa_blkaddr; /* start block address of SSA area */
@@ -644,7 +641,8 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
644 */ 641 */
645static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 642static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
646{ 643{
647 WARN_ON((nid >= NM_I(sbi)->max_nid)); 644 if (unlikely(nid < F2FS_ROOT_INO(sbi)))
645 return -EINVAL;
648 if (unlikely(nid >= NM_I(sbi)->max_nid)) 646 if (unlikely(nid >= NM_I(sbi)->max_nid))
649 return -EINVAL; 647 return -EINVAL;
650 return 0; 648 return 0;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c58e33075719..7d8b96275092 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -659,16 +659,19 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
659 off_start = offset & (PAGE_CACHE_SIZE - 1); 659 off_start = offset & (PAGE_CACHE_SIZE - 1);
660 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 660 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
661 661
662 f2fs_lock_op(sbi);
663
662 for (index = pg_start; index <= pg_end; index++) { 664 for (index = pg_start; index <= pg_end; index++) {
663 struct dnode_of_data dn; 665 struct dnode_of_data dn;
664 666
665 f2fs_lock_op(sbi); 667 if (index == pg_end && !off_end)
668 goto noalloc;
669
666 set_new_dnode(&dn, inode, NULL, NULL, 0); 670 set_new_dnode(&dn, inode, NULL, NULL, 0);
667 ret = f2fs_reserve_block(&dn, index); 671 ret = f2fs_reserve_block(&dn, index);
668 f2fs_unlock_op(sbi);
669 if (ret) 672 if (ret)
670 break; 673 break;
671 674noalloc:
672 if (pg_start == pg_end) 675 if (pg_start == pg_end)
673 new_size = offset + len; 676 new_size = offset + len;
674 else if (index == pg_start && off_start) 677 else if (index == pg_start && off_start)
@@ -683,8 +686,9 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
683 i_size_read(inode) < new_size) { 686 i_size_read(inode) < new_size) {
684 i_size_write(inode, new_size); 687 i_size_write(inode, new_size);
685 mark_inode_dirty(inode); 688 mark_inode_dirty(inode);
686 f2fs_write_inode(inode, NULL); 689 update_inode_page(inode);
687 } 690 }
691 f2fs_unlock_op(sbi);
688 692
689 return ret; 693 return ret;
690} 694}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index adc622c6bdce..2cf6962f6cc8 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -78,6 +78,7 @@ static int do_read_inode(struct inode *inode)
78 if (check_nid_range(sbi, inode->i_ino)) { 78 if (check_nid_range(sbi, inode->i_ino)) {
79 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu", 79 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
80 (unsigned long) inode->i_ino); 80 (unsigned long) inode->i_ino);
81 WARN_ON(1);
81 return -EINVAL; 82 return -EINVAL;
82 } 83 }
83 84
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 9138c32aa698..a6bdddc33ce2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -417,9 +417,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
417 } 417 }
418 418
419 f2fs_set_link(new_dir, new_entry, new_page, old_inode); 419 f2fs_set_link(new_dir, new_entry, new_page, old_inode);
420 down_write(&F2FS_I(old_inode)->i_sem);
421 F2FS_I(old_inode)->i_pino = new_dir->i_ino;
422 up_write(&F2FS_I(old_inode)->i_sem);
423 420
424 new_inode->i_ctime = CURRENT_TIME; 421 new_inode->i_ctime = CURRENT_TIME;
425 down_write(&F2FS_I(new_inode)->i_sem); 422 down_write(&F2FS_I(new_inode)->i_sem);
@@ -448,6 +445,10 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
448 } 445 }
449 } 446 }
450 447
448 down_write(&F2FS_I(old_inode)->i_sem);
449 file_lost_pino(old_inode);
450 up_write(&F2FS_I(old_inode)->i_sem);
451
451 old_inode->i_ctime = CURRENT_TIME; 452 old_inode->i_ctime = CURRENT_TIME;
452 mark_inode_dirty(old_inode); 453 mark_inode_dirty(old_inode);
453 454
@@ -457,9 +458,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
457 if (old_dir != new_dir) { 458 if (old_dir != new_dir) {
458 f2fs_set_link(old_inode, old_dir_entry, 459 f2fs_set_link(old_inode, old_dir_entry,
459 old_dir_page, new_dir); 460 old_dir_page, new_dir);
460 down_write(&F2FS_I(old_inode)->i_sem);
461 F2FS_I(old_inode)->i_pino = new_dir->i_ino;
462 up_write(&F2FS_I(old_inode)->i_sem);
463 update_inode_page(old_inode); 461 update_inode_page(old_inode);
464 } else { 462 } else {
465 kunmap(old_dir_page); 463 kunmap(old_dir_page);
@@ -474,7 +472,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
474 return 0; 472 return 0;
475 473
476put_out_dir: 474put_out_dir:
477 f2fs_put_page(new_page, 1); 475 kunmap(new_page);
476 f2fs_put_page(new_page, 0);
478out_dir: 477out_dir:
479 if (old_dir_entry) { 478 if (old_dir_entry) {
480 kunmap(old_dir_page); 479 kunmap(old_dir_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9dfb9a042fd2..4b697ccc9b0c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -42,6 +42,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; 42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
44 } else if (type == DIRTY_DENTS) { 44 } else if (type == DIRTY_DENTS) {
45 if (sbi->sb->s_bdi->dirty_exceeded)
46 return false;
45 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 47 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
46 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); 48 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
47 } 49 }
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f25f0e07e26f..d04613df710a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -272,14 +272,15 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
272 return -ENOMEM; 272 return -ENOMEM;
273 spin_lock_init(&fcc->issue_lock); 273 spin_lock_init(&fcc->issue_lock);
274 init_waitqueue_head(&fcc->flush_wait_queue); 274 init_waitqueue_head(&fcc->flush_wait_queue);
275 sbi->sm_info->cmd_control_info = fcc;
275 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 276 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
276 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 277 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
277 if (IS_ERR(fcc->f2fs_issue_flush)) { 278 if (IS_ERR(fcc->f2fs_issue_flush)) {
278 err = PTR_ERR(fcc->f2fs_issue_flush); 279 err = PTR_ERR(fcc->f2fs_issue_flush);
279 kfree(fcc); 280 kfree(fcc);
281 sbi->sm_info->cmd_control_info = NULL;
280 return err; 282 return err;
281 } 283 }
282 sbi->sm_info->cmd_control_info = fcc;
283 284
284 return err; 285 return err;
285} 286}
@@ -1885,8 +1886,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
1885 1886
1886 /* init sm info */ 1887 /* init sm info */
1887 sbi->sm_info = sm_info; 1888 sbi->sm_info = sm_info;
1888 INIT_LIST_HEAD(&sm_info->wblist_head);
1889 spin_lock_init(&sm_info->wblist_lock);
1890 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1889 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1891 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1890 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1892 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1891 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index b2b18637cb9e..8f96d9372ade 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -689,9 +689,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
689 struct f2fs_sb_info *sbi = F2FS_SB(sb); 689 struct f2fs_sb_info *sbi = F2FS_SB(sb);
690 struct inode *inode; 690 struct inode *inode;
691 691
692 if (unlikely(ino < F2FS_ROOT_INO(sbi))) 692 if (check_nid_range(sbi, ino))
693 return ERR_PTR(-ESTALE);
694 if (unlikely(ino >= NM_I(sbi)->max_nid))
695 return ERR_PTR(-ESTALE); 693 return ERR_PTR(-ESTALE);
696 694
697 /* 695 /*
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 098f97bdcf1b..ca887314aba9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -643,9 +643,8 @@ struct fuse_copy_state {
643 unsigned long seglen; 643 unsigned long seglen;
644 unsigned long addr; 644 unsigned long addr;
645 struct page *pg; 645 struct page *pg;
646 void *mapaddr;
647 void *buf;
648 unsigned len; 646 unsigned len;
647 unsigned offset;
649 unsigned move_pages:1; 648 unsigned move_pages:1;
650}; 649};
651 650
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
666 if (cs->currbuf) { 665 if (cs->currbuf) {
667 struct pipe_buffer *buf = cs->currbuf; 666 struct pipe_buffer *buf = cs->currbuf;
668 667
669 if (!cs->write) { 668 if (cs->write)
670 kunmap_atomic(cs->mapaddr);
671 } else {
672 kunmap_atomic(cs->mapaddr);
673 buf->len = PAGE_SIZE - cs->len; 669 buf->len = PAGE_SIZE - cs->len;
674 }
675 cs->currbuf = NULL; 670 cs->currbuf = NULL;
676 cs->mapaddr = NULL; 671 } else if (cs->pg) {
677 } else if (cs->mapaddr) {
678 kunmap_atomic(cs->mapaddr);
679 if (cs->write) { 672 if (cs->write) {
680 flush_dcache_page(cs->pg); 673 flush_dcache_page(cs->pg);
681 set_page_dirty_lock(cs->pg); 674 set_page_dirty_lock(cs->pg);
682 } 675 }
683 put_page(cs->pg); 676 put_page(cs->pg);
684 cs->mapaddr = NULL;
685 } 677 }
678 cs->pg = NULL;
686} 679}
687 680
688/* 681/*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
691 */ 684 */
692static int fuse_copy_fill(struct fuse_copy_state *cs) 685static int fuse_copy_fill(struct fuse_copy_state *cs)
693{ 686{
694 unsigned long offset; 687 struct page *page;
695 int err; 688 int err;
696 689
697 unlock_request(cs->fc, cs->req); 690 unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
706 699
707 BUG_ON(!cs->nr_segs); 700 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf; 701 cs->currbuf = buf;
709 cs->mapaddr = kmap_atomic(buf->page); 702 cs->pg = buf->page;
703 cs->offset = buf->offset;
710 cs->len = buf->len; 704 cs->len = buf->len;
711 cs->buf = cs->mapaddr + buf->offset;
712 cs->pipebufs++; 705 cs->pipebufs++;
713 cs->nr_segs--; 706 cs->nr_segs--;
714 } else { 707 } else {
715 struct page *page;
716
717 if (cs->nr_segs == cs->pipe->buffers) 708 if (cs->nr_segs == cs->pipe->buffers)
718 return -EIO; 709 return -EIO;
719 710
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
726 buf->len = 0; 717 buf->len = 0;
727 718
728 cs->currbuf = buf; 719 cs->currbuf = buf;
729 cs->mapaddr = kmap_atomic(page); 720 cs->pg = page;
730 cs->buf = cs->mapaddr; 721 cs->offset = 0;
731 cs->len = PAGE_SIZE; 722 cs->len = PAGE_SIZE;
732 cs->pipebufs++; 723 cs->pipebufs++;
733 cs->nr_segs++; 724 cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
740 cs->iov++; 731 cs->iov++;
741 cs->nr_segs--; 732 cs->nr_segs--;
742 } 733 }
743 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); 734 err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
744 if (err < 0) 735 if (err < 0)
745 return err; 736 return err;
746 BUG_ON(err != 1); 737 BUG_ON(err != 1);
747 offset = cs->addr % PAGE_SIZE; 738 cs->pg = page;
748 cs->mapaddr = kmap_atomic(cs->pg); 739 cs->offset = cs->addr % PAGE_SIZE;
749 cs->buf = cs->mapaddr + offset; 740 cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
750 cs->len = min(PAGE_SIZE - offset, cs->seglen);
751 cs->seglen -= cs->len; 741 cs->seglen -= cs->len;
752 cs->addr += cs->len; 742 cs->addr += cs->len;
753 } 743 }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
760{ 750{
761 unsigned ncpy = min(*size, cs->len); 751 unsigned ncpy = min(*size, cs->len);
762 if (val) { 752 if (val) {
753 void *pgaddr = kmap_atomic(cs->pg);
754 void *buf = pgaddr + cs->offset;
755
763 if (cs->write) 756 if (cs->write)
764 memcpy(cs->buf, *val, ncpy); 757 memcpy(buf, *val, ncpy);
765 else 758 else
766 memcpy(*val, cs->buf, ncpy); 759 memcpy(*val, buf, ncpy);
760
761 kunmap_atomic(pgaddr);
767 *val += ncpy; 762 *val += ncpy;
768 } 763 }
769 *size -= ncpy; 764 *size -= ncpy;
770 cs->len -= ncpy; 765 cs->len -= ncpy;
771 cs->buf += ncpy; 766 cs->offset += ncpy;
772 return ncpy; 767 return ncpy;
773} 768}
774 769
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
874out_fallback_unlock: 869out_fallback_unlock:
875 unlock_page(newpage); 870 unlock_page(newpage);
876out_fallback: 871out_fallback:
877 cs->mapaddr = kmap_atomic(buf->page); 872 cs->pg = buf->page;
878 cs->buf = cs->mapaddr + buf->offset; 873 cs->offset = buf->offset;
879 874
880 err = lock_request(cs->fc, cs->req); 875 err = lock_request(cs->fc, cs->req);
881 if (err) 876 if (err)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 42198359fa1b..0c6048247a34 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
198 inode = ACCESS_ONCE(entry->d_inode); 198 inode = ACCESS_ONCE(entry->d_inode);
199 if (inode && is_bad_inode(inode)) 199 if (inode && is_bad_inode(inode))
200 goto invalid; 200 goto invalid;
201 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 201 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
202 (flags & LOOKUP_REVAL)) {
202 int err; 203 int err;
203 struct fuse_entry_out outarg; 204 struct fuse_entry_out outarg;
204 struct fuse_req *req; 205 struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
814 return err; 815 return err;
815} 816}
816 817
817static int fuse_rename(struct inode *olddir, struct dentry *oldent,
818 struct inode *newdir, struct dentry *newent)
819{
820 return fuse_rename_common(olddir, oldent, newdir, newent, 0,
821 FUSE_RENAME, sizeof(struct fuse_rename_in));
822}
823
824static int fuse_rename2(struct inode *olddir, struct dentry *oldent, 818static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
825 struct inode *newdir, struct dentry *newent, 819 struct inode *newdir, struct dentry *newent,
826 unsigned int flags) 820 unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
831 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 825 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
832 return -EINVAL; 826 return -EINVAL;
833 827
834 if (fc->no_rename2 || fc->minor < 23) 828 if (flags) {
835 return -EINVAL; 829 if (fc->no_rename2 || fc->minor < 23)
830 return -EINVAL;
836 831
837 err = fuse_rename_common(olddir, oldent, newdir, newent, flags, 832 err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
838 FUSE_RENAME2, sizeof(struct fuse_rename2_in)); 833 FUSE_RENAME2,
839 if (err == -ENOSYS) { 834 sizeof(struct fuse_rename2_in));
840 fc->no_rename2 = 1; 835 if (err == -ENOSYS) {
841 err = -EINVAL; 836 fc->no_rename2 = 1;
837 err = -EINVAL;
838 }
839 } else {
840 err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
841 FUSE_RENAME,
842 sizeof(struct fuse_rename_in));
842 } 843 }
844
843 return err; 845 return err;
846}
844 847
848static int fuse_rename(struct inode *olddir, struct dentry *oldent,
849 struct inode *newdir, struct dentry *newent)
850{
851 return fuse_rename2(olddir, oldent, newdir, newent, 0);
845} 852}
846 853
847static int fuse_link(struct dentry *entry, struct inode *newdir, 854static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
985 int err; 992 int err;
986 bool r; 993 bool r;
987 994
988 if (fi->i_time < get_jiffies_64()) { 995 if (time_before64(fi->i_time, get_jiffies_64())) {
989 r = true; 996 r = true;
990 err = fuse_do_getattr(inode, stat, file); 997 err = fuse_do_getattr(inode, stat, file);
991 } else { 998 } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
1171 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { 1178 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1172 struct fuse_inode *fi = get_fuse_inode(inode); 1179 struct fuse_inode *fi = get_fuse_inode(inode);
1173 1180
1174 if (fi->i_time < get_jiffies_64()) { 1181 if (time_before64(fi->i_time, get_jiffies_64())) {
1175 refreshed = true; 1182 refreshed = true;
1176 1183
1177 err = fuse_perm_getattr(inode, mask); 1184 err = fuse_perm_getattr(inode, mask);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e16dad13e9b..40ac2628ddcf 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
1687 error = -EIO; 1687 error = -EIO;
1688 req->ff = fuse_write_file_get(fc, fi); 1688 req->ff = fuse_write_file_get(fc, fi);
1689 if (!req->ff) 1689 if (!req->ff)
1690 goto err_free; 1690 goto err_nofile;
1691 1691
1692 fuse_write_fill(req, req->ff, page_offset(page), 0); 1692 fuse_write_fill(req, req->ff, page_offset(page), 0);
1693 1693
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
1715 1715
1716 return 0; 1716 return 0;
1717 1717
1718err_nofile:
1719 __free_page(tmp_page);
1718err_free: 1720err_free:
1719 fuse_request_free(req); 1721 fuse_request_free(req);
1720err: 1722err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
1955 data.ff = NULL; 1957 data.ff = NULL;
1956 1958
1957 err = -ENOMEM; 1959 err = -ENOMEM;
1958 data.orig_pages = kzalloc(sizeof(struct page *) * 1960 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
1959 FUSE_MAX_PAGES_PER_REQ, 1961 sizeof(struct page *),
1960 GFP_NOFS); 1962 GFP_NOFS);
1961 if (!data.orig_pages) 1963 if (!data.orig_pages)
1962 goto out; 1964 goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 754dcf23de8a..8474028d7848 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
478 {OPT_ERR, NULL} 478 {OPT_ERR, NULL}
479}; 479};
480 480
481static int fuse_match_uint(substring_t *s, unsigned int *res)
482{
483 int err = -ENOMEM;
484 char *buf = match_strdup(s);
485 if (buf) {
486 err = kstrtouint(buf, 10, res);
487 kfree(buf);
488 }
489 return err;
490}
491
481static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) 492static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
482{ 493{
483 char *p; 494 char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
488 while ((p = strsep(&opt, ",")) != NULL) { 499 while ((p = strsep(&opt, ",")) != NULL) {
489 int token; 500 int token;
490 int value; 501 int value;
502 unsigned uv;
491 substring_t args[MAX_OPT_ARGS]; 503 substring_t args[MAX_OPT_ARGS];
492 if (!*p) 504 if (!*p)
493 continue; 505 continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
511 break; 523 break;
512 524
513 case OPT_USER_ID: 525 case OPT_USER_ID:
514 if (match_int(&args[0], &value)) 526 if (fuse_match_uint(&args[0], &uv))
515 return 0; 527 return 0;
516 d->user_id = make_kuid(current_user_ns(), value); 528 d->user_id = make_kuid(current_user_ns(), uv);
517 if (!uid_valid(d->user_id)) 529 if (!uid_valid(d->user_id))
518 return 0; 530 return 0;
519 d->user_id_present = 1; 531 d->user_id_present = 1;
520 break; 532 break;
521 533
522 case OPT_GROUP_ID: 534 case OPT_GROUP_ID:
523 if (match_int(&args[0], &value)) 535 if (fuse_match_uint(&args[0], &uv))
524 return 0; 536 return 0;
525 d->group_id = make_kgid(current_user_ns(), value); 537 d->group_id = make_kgid(current_user_ns(), uv);
526 if (!gid_valid(d->group_id)) 538 if (!gid_valid(d->group_id))
527 return 0; 539 return 0;
528 d->group_id_present = 1; 540 d->group_id_present = 1;
@@ -1006,7 +1018,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1006 1018
1007 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1019 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
1008 1020
1009 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 1021 if (!parse_fuse_opt(data, &d, is_bdev))
1010 goto err; 1022 goto err;
1011 1023
1012 if (is_bdev) { 1024 if (is_bdev) {
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
981 int error = 0; 981 int error = 0;
982 982
983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
985 985
986 mutex_lock(&fp->f_fl_mutex); 986 mutex_lock(&fp->f_fl_mutex);
987 987
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
991 goto out; 991 goto out;
992 flock_lock_file_wait(file, 992 flock_lock_file_wait(file,
993 &(struct file_lock){.fl_type = F_UNLCK}); 993 &(struct file_lock){.fl_type = F_UNLCK});
994 gfs2_glock_dq_wait(fl_gh); 994 gfs2_glock_dq(fl_gh);
995 gfs2_holder_reinit(state, flags, fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh);
996 } else { 996 } else {
997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f7320e44..ee4e04fe60fc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
731 cachep = gfs2_glock_aspace_cachep; 731 cachep = gfs2_glock_aspace_cachep;
732 else 732 else
733 cachep = gfs2_glock_cachep; 733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 734 gl = kmem_cache_alloc(cachep, GFP_NOFS);
735 if (!gl) 735 if (!gl)
736 return -ENOMEM; 736 return -ENOMEM;
737 737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739 739
740 if (glops->go_flags & GLOF_LVB) { 740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); 741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
742 if (!gl->gl_lksb.sb_lvbptr) { 742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl); 743 kmem_cache_free(cachep, gl);
744 return -ENOMEM; 744 return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1405 list_del_init(&gl->gl_lru); 1405 list_del_init(&gl->gl_lru);
1406 if (!spin_trylock(&gl->gl_spin)) { 1406 if (!spin_trylock(&gl->gl_spin)) {
1407add_back_to_lru:
1407 list_add(&gl->gl_lru, &lru_list); 1408 list_add(&gl->gl_lru, &lru_list);
1408 atomic_inc(&lru_count); 1409 atomic_inc(&lru_count);
1409 continue; 1410 continue;
1410 } 1411 }
1412 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1413 spin_unlock(&gl->gl_spin);
1414 goto add_back_to_lru;
1415 }
1411 clear_bit(GLF_LRU, &gl->gl_flags); 1416 clear_bit(GLF_LRU, &gl->gl_flags);
1412 spin_unlock(&lru_lock);
1413 gl->gl_lockref.count++; 1417 gl->gl_lockref.count++;
1414 if (demote_ok(gl)) 1418 if (demote_ok(gl))
1415 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1419 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
1417 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1421 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1418 gl->gl_lockref.count--; 1422 gl->gl_lockref.count--;
1419 spin_unlock(&gl->gl_spin); 1423 spin_unlock(&gl->gl_spin);
1420 spin_lock(&lru_lock); 1424 cond_resched_lock(&lru_lock);
1421 } 1425 }
1422} 1426}
1423 1427
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
1442 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1446 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1443 1447
1444 /* Test for being demotable */ 1448 /* Test for being demotable */
1445 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1449 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1446 list_move(&gl->gl_lru, &dispose); 1450 list_move(&gl->gl_lru, &dispose);
1447 atomic_dec(&lru_count); 1451 atomic_dec(&lru_count);
1448 freed++; 1452 freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
234 * inode_go_inval - prepare a inode glock to be released 234 * inode_go_inval - prepare a inode glock to be released
235 * @gl: the glock 235 * @gl: the glock
236 * @flags: 236 * @flags:
237 * 237 *
238 * Normally we invlidate everything, but if we are moving into 238 * Normally we invalidate everything, but if we are moving into
239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
240 * can keep hold of the metadata, since it won't have changed. 240 * can keep hold of the metadata, since it won't have changed.
241 * 241 *
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274de1246..4fafea1c9ecf 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1036 1036
1037 new_size = old_size + RECOVER_SIZE_INC; 1037 new_size = old_size + RECOVER_SIZE_INC;
1038 1038
1039 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1039 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1040 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1040 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1041 if (!submit || !result) { 1041 if (!submit || !result) {
1042 kfree(submit); 1042 kfree(submit);
1043 kfree(result); 1043 kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
337 337
338/** 338/**
339 * gfs2_free_extlen - Return extent length of free blocks 339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rbm: Starting position 340 * @rrbm: Starting position
341 * @len: Max length to check 341 * @len: Max length to check
342 * 342 *
343 * Starting at the block specified by the rbm, see how many free blocks 343 * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2522 2522
2523/** 2523/**
2524 * gfs2_rlist_free - free a resource group list 2524 * gfs2_rlist_free - free a resource group list
2525 * @list: the list of resource groups 2525 * @rlist: the list of resource groups
2526 * 2526 *
2527 */ 2527 */
2528 2528
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 38cfcf5f6fce..6f0f590cc5a3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1588,9 +1588,12 @@ int jbd2_journal_stop(handle_t *handle)
1588 * to perform a synchronous write. We do this to detect the 1588 * to perform a synchronous write. We do this to detect the
1589 * case where a single process is doing a stream of sync 1589 * case where a single process is doing a stream of sync
1590 * writes. No point in waiting for joiners in that case. 1590 * writes. No point in waiting for joiners in that case.
1591 *
1592 * Setting max_batch_time to 0 disables this completely.
1591 */ 1593 */
1592 pid = current->pid; 1594 pid = current->pid;
1593 if (handle->h_sync && journal->j_last_sync_writer != pid) { 1595 if (handle->h_sync && journal->j_last_sync_writer != pid &&
1596 journal->j_max_batch_time) {
1594 u64 commit_time, trans_time; 1597 u64 commit_time, trans_time;
1595 1598
1596 journal->j_last_sync_writer = pid; 1599 journal->j_last_sync_writer = pid;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e3d37f607f97..d895b4b7b661 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -39,6 +39,19 @@ struct kernfs_open_node {
39 struct list_head files; /* goes through kernfs_open_file.list */ 39 struct list_head files; /* goes through kernfs_open_file.list */
40}; 40};
41 41
42/*
43 * kernfs_notify() may be called from any context and bounces notifications
44 * through a work item. To minimize space overhead in kernfs_node, the
45 * pending queue is implemented as a singly linked list of kernfs_nodes.
46 * The list is terminated with the self pointer so that whether a
47 * kernfs_node is on the list or not can be determined by testing the next
48 * pointer for NULL.
49 */
50#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
51
52static DEFINE_SPINLOCK(kernfs_notify_lock);
53static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
54
42static struct kernfs_open_file *kernfs_of(struct file *file) 55static struct kernfs_open_file *kernfs_of(struct file *file)
43{ 56{
44 return ((struct seq_file *)file->private_data)->private; 57 return ((struct seq_file *)file->private_data)->private;
@@ -783,24 +796,25 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
783 return DEFAULT_POLLMASK|POLLERR|POLLPRI; 796 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
784} 797}
785 798
786/** 799static void kernfs_notify_workfn(struct work_struct *work)
787 * kernfs_notify - notify a kernfs file
788 * @kn: file to notify
789 *
790 * Notify @kn such that poll(2) on @kn wakes up.
791 */
792void kernfs_notify(struct kernfs_node *kn)
793{ 800{
794 struct kernfs_root *root = kernfs_root(kn); 801 struct kernfs_node *kn;
795 struct kernfs_open_node *on; 802 struct kernfs_open_node *on;
796 struct kernfs_super_info *info; 803 struct kernfs_super_info *info;
797 unsigned long flags; 804repeat:
798 805 /* pop one off the notify_list */
799 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) 806 spin_lock_irq(&kernfs_notify_lock);
807 kn = kernfs_notify_list;
808 if (kn == KERNFS_NOTIFY_EOL) {
809 spin_unlock_irq(&kernfs_notify_lock);
800 return; 810 return;
811 }
812 kernfs_notify_list = kn->attr.notify_next;
813 kn->attr.notify_next = NULL;
814 spin_unlock_irq(&kernfs_notify_lock);
801 815
802 /* kick poll */ 816 /* kick poll */
803 spin_lock_irqsave(&kernfs_open_node_lock, flags); 817 spin_lock_irq(&kernfs_open_node_lock);
804 818
805 on = kn->attr.open; 819 on = kn->attr.open;
806 if (on) { 820 if (on) {
@@ -808,12 +822,12 @@ void kernfs_notify(struct kernfs_node *kn)
808 wake_up_interruptible(&on->poll); 822 wake_up_interruptible(&on->poll);
809 } 823 }
810 824
811 spin_unlock_irqrestore(&kernfs_open_node_lock, flags); 825 spin_unlock_irq(&kernfs_open_node_lock);
812 826
813 /* kick fsnotify */ 827 /* kick fsnotify */
814 mutex_lock(&kernfs_mutex); 828 mutex_lock(&kernfs_mutex);
815 829
816 list_for_each_entry(info, &root->supers, node) { 830 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
817 struct inode *inode; 831 struct inode *inode;
818 struct dentry *dentry; 832 struct dentry *dentry;
819 833
@@ -833,6 +847,33 @@ void kernfs_notify(struct kernfs_node *kn)
833 } 847 }
834 848
835 mutex_unlock(&kernfs_mutex); 849 mutex_unlock(&kernfs_mutex);
850 kernfs_put(kn);
851 goto repeat;
852}
853
854/**
855 * kernfs_notify - notify a kernfs file
856 * @kn: file to notify
857 *
858 * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
859 * context.
860 */
861void kernfs_notify(struct kernfs_node *kn)
862{
863 static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
864 unsigned long flags;
865
866 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
867 return;
868
869 spin_lock_irqsave(&kernfs_notify_lock, flags);
870 if (!kn->attr.notify_next) {
871 kernfs_get(kn);
872 kn->attr.notify_next = kernfs_notify_list;
873 kernfs_notify_list = kn;
874 schedule_work(&kernfs_notify_work);
875 }
876 spin_unlock_irqrestore(&kernfs_notify_lock, flags);
836} 877}
837EXPORT_SYMBOL_GPL(kernfs_notify); 878EXPORT_SYMBOL_GPL(kernfs_notify);
838 879
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index d171b98a6cdd..f973ae9b05f1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -211,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb)
211 kernfs_put(root_kn); 211 kernfs_put(root_kn);
212} 212}
213 213
214/**
215 * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
216 * @kernfs_root: the kernfs_root in question
217 * @ns: the namespace tag
218 *
219 * Pin the superblock so the superblock won't be destroyed in subsequent
220 * operations. This can be used to block ->kill_sb() which may be useful
221 * for kernfs users which dynamically manage superblocks.
222 *
223 * Returns NULL if there's no superblock associated to this kernfs_root, or
224 * -EINVAL if the superblock is being freed.
225 */
226struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
227{
228 struct kernfs_super_info *info;
229 struct super_block *sb = NULL;
230
231 mutex_lock(&kernfs_mutex);
232 list_for_each_entry(info, &root->supers, node) {
233 if (info->ns == ns) {
234 sb = info->sb;
235 if (!atomic_inc_not_zero(&info->sb->s_active))
236 sb = ERR_PTR(-EINVAL);
237 break;
238 }
239 }
240 mutex_unlock(&kernfs_mutex);
241 return sb;
242}
243
214void __init kernfs_init(void) 244void __init kernfs_init(void)
215{ 245{
216 kernfs_node_cache = kmem_cache_create("kernfs_node_cache", 246 kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
diff --git a/fs/mbcache.c b/fs/mbcache.c
index bf166e388f0d..187477ded6b3 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -73,6 +73,7 @@
73#include <linux/mbcache.h> 73#include <linux/mbcache.h>
74#include <linux/init.h> 74#include <linux/init.h>
75#include <linux/blockgroup_lock.h> 75#include <linux/blockgroup_lock.h>
76#include <linux/log2.h>
76 77
77#ifdef MB_CACHE_DEBUG 78#ifdef MB_CACHE_DEBUG
78# define mb_debug(f...) do { \ 79# define mb_debug(f...) do { \
@@ -93,7 +94,7 @@
93 94
94#define MB_CACHE_WRITER ((unsigned short)~0U >> 1) 95#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
95 96
96#define MB_CACHE_ENTRY_LOCK_BITS __builtin_log2(NR_BG_LOCKS) 97#define MB_CACHE_ENTRY_LOCK_BITS ilog2(NR_BG_LOCKS)
97#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \ 98#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
98 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS)) 99 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
99 100
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138cbc43..f11b9eed0de1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
756 spin_unlock(&dreq->lock); 756 spin_unlock(&dreq->lock);
757 757
758 while (!list_empty(&hdr->pages)) { 758 while (!list_empty(&hdr->pages)) {
759 bool do_destroy = true;
760 759
761 req = nfs_list_entry(hdr->pages.next); 760 req = nfs_list_entry(hdr->pages.next);
762 nfs_list_remove_request(req); 761 nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
765 case NFS_IOHDR_NEED_COMMIT: 764 case NFS_IOHDR_NEED_COMMIT:
766 kref_get(&req->wb_kref); 765 kref_get(&req->wb_kref);
767 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 766 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
768 do_destroy = false;
769 } 767 }
770 nfs_unlock_and_release_request(req); 768 nfs_unlock_and_release_request(req);
771 } 769 }
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf46660e..f415cbf9f6c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); 244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, 245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
246 const struct rpc_call_ops *, int, int); 246 const struct rpc_call_ops *, int, int);
247void nfs_free_request(struct nfs_page *req);
247 248
248static inline void nfs_iocounter_init(struct nfs_io_counter *c) 249static inline void nfs_iocounter_init(struct nfs_io_counter *c)
249{ 250{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6eda8dba..8f854dde4150 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
247 &posix_acl_default_xattr_handler, 247 &posix_acl_default_xattr_handler,
248 NULL, 248 NULL,
249}; 249};
250
251static int
252nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
253 size_t size, ssize_t *result)
254{
255 struct posix_acl *acl;
256 char *p = data + *result;
257
258 acl = get_acl(inode, type);
259 if (!acl)
260 return 0;
261
262 posix_acl_release(acl);
263
264 *result += strlen(name);
265 *result += 1;
266 if (!size)
267 return 0;
268 if (*result > size)
269 return -ERANGE;
270
271 strcpy(p, name);
272 return 0;
273}
274
275ssize_t
276nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
277{
278 struct inode *inode = dentry->d_inode;
279 ssize_t result = 0;
280 int error;
281
282 error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
283 POSIX_ACL_XATTR_ACCESS, data, size, &result);
284 if (error)
285 return error;
286
287 error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
288 POSIX_ACL_XATTR_DEFAULT, data, size, &result);
289 if (error)
290 return error;
291 return result;
292}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42bbc86..f0afa291fd58 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
885 .getattr = nfs_getattr, 885 .getattr = nfs_getattr,
886 .setattr = nfs_setattr, 886 .setattr = nfs_setattr,
887#ifdef CONFIG_NFS_V3_ACL 887#ifdef CONFIG_NFS_V3_ACL
888 .listxattr = generic_listxattr, 888 .listxattr = nfs3_listxattr,
889 .getxattr = generic_getxattr, 889 .getxattr = generic_getxattr,
890 .setxattr = generic_setxattr, 890 .setxattr = generic_setxattr,
891 .removexattr = generic_removexattr, 891 .removexattr = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
899 .getattr = nfs_getattr, 899 .getattr = nfs_getattr,
900 .setattr = nfs_setattr, 900 .setattr = nfs_setattr,
901#ifdef CONFIG_NFS_V3_ACL 901#ifdef CONFIG_NFS_V3_ACL
902 .listxattr = generic_listxattr, 902 .listxattr = nfs3_listxattr,
903 .getxattr = generic_getxattr, 903 .getxattr = generic_getxattr,
904 .setxattr = generic_setxattr, 904 .setxattr = generic_setxattr,
905 .removexattr = generic_removexattr, 905 .removexattr = generic_removexattr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6ee96d..17fab89f6358 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
29static struct kmem_cache *nfs_page_cachep; 29static struct kmem_cache *nfs_page_cachep;
30static const struct rpc_call_ops nfs_pgio_common_ops; 30static const struct rpc_call_ops nfs_pgio_common_ops;
31 31
32static void nfs_free_request(struct nfs_page *);
33
34static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
35{ 33{
36 p->npages = pagecount; 34 p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
239 WARN_ON_ONCE(prev == req); 237 WARN_ON_ONCE(prev == req);
240 238
241 if (!prev) { 239 if (!prev) {
240 /* a head request */
242 req->wb_head = req; 241 req->wb_head = req;
243 req->wb_this_page = req; 242 req->wb_this_page = req;
244 } else { 243 } else {
244 /* a subrequest */
245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
247 req->wb_head = prev->wb_head; 247 req->wb_head = prev->wb_head;
248 req->wb_this_page = prev->wb_this_page; 248 req->wb_this_page = prev->wb_this_page;
249 prev->wb_this_page = req; 249 prev->wb_this_page = req;
250 250
251 /* All subrequests take a ref on the head request until
252 * nfs_page_group_destroy is called */
253 kref_get(&req->wb_head->wb_kref);
254
251 /* grab extra ref if head request has extra ref from 255 /* grab extra ref if head request has extra ref from
252 * the write/commit path to handle handoff between write 256 * the write/commit path to handle handoff between write
253 * and commit lists */ 257 * and commit lists */
254 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) 258 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
259 set_bit(PG_INODE_REF, &req->wb_flags);
255 kref_get(&req->wb_kref); 260 kref_get(&req->wb_kref);
261 }
256 } 262 }
257} 263}
258 264
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
269 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 275 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
270 struct nfs_page *tmp, *next; 276 struct nfs_page *tmp, *next;
271 277
278 /* subrequests must release the ref on the head request */
279 if (req->wb_head != req)
280 nfs_release_request(req->wb_head);
281
272 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 282 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
273 return; 283 return;
274 284
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
394 * 404 *
395 * Note: Should never be called with the spinlock held! 405 * Note: Should never be called with the spinlock held!
396 */ 406 */
397static void nfs_free_request(struct nfs_page *req) 407void nfs_free_request(struct nfs_page *req)
398{ 408{
399 WARN_ON_ONCE(req->wb_this_page != req); 409 WARN_ON_ONCE(req->wb_this_page != req);
400 410
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
925 nfs_pageio_doio(desc); 935 nfs_pageio_doio(desc);
926 if (desc->pg_error < 0) 936 if (desc->pg_error < 0)
927 return 0; 937 return 0;
928 desc->pg_moreio = 0;
929 if (desc->pg_recoalesce) 938 if (desc->pg_recoalesce)
930 return 0; 939 return 0;
931 /* retry add_request for this subreq */ 940 /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
972 desc->pg_count = 0; 981 desc->pg_count = 0;
973 desc->pg_base = 0; 982 desc->pg_base = 0;
974 desc->pg_recoalesce = 0; 983 desc->pg_recoalesce = 0;
984 desc->pg_moreio = 0;
975 985
976 while (!list_empty(&head)) { 986 while (!list_empty(&head)) {
977 struct nfs_page *req; 987 struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 98ff061ccaf3..5e2f10304548 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 47static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48static const struct nfs_rw_ops nfs_rw_write_ops; 48static const struct nfs_rw_ops nfs_rw_write_ops;
49static void nfs_clear_request_commit(struct nfs_page *req);
49 50
50static struct kmem_cache *nfs_wdata_cachep; 51static struct kmem_cache *nfs_wdata_cachep;
51static mempool_t *nfs_wdata_mempool; 52static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
91 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 92 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
92} 93}
93 94
95/*
96 * nfs_page_find_head_request_locked - find head request associated with @page
97 *
98 * must be called while holding the inode lock.
99 *
100 * returns matching head request with reference held, or NULL if not found.
101 */
94static struct nfs_page * 102static struct nfs_page *
95nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) 103nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
96{ 104{
97 struct nfs_page *req = NULL; 105 struct nfs_page *req = NULL;
98 106
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
104 /* Linearly search the commit list for the correct req */ 112 /* Linearly search the commit list for the correct req */
105 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { 113 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
106 if (freq->wb_page == page) { 114 if (freq->wb_page == page) {
107 req = freq; 115 req = freq->wb_head;
108 break; 116 break;
109 } 117 }
110 } 118 }
111 } 119 }
112 120
113 if (req) 121 if (req) {
122 WARN_ON_ONCE(req->wb_head != req);
123
114 kref_get(&req->wb_kref); 124 kref_get(&req->wb_kref);
125 }
115 126
116 return req; 127 return req;
117} 128}
118 129
119static struct nfs_page *nfs_page_find_request(struct page *page) 130/*
131 * nfs_page_find_head_request - find head request associated with @page
132 *
133 * returns matching head request with reference held, or NULL if not found.
134 */
135static struct nfs_page *nfs_page_find_head_request(struct page *page)
120{ 136{
121 struct inode *inode = page_file_mapping(page)->host; 137 struct inode *inode = page_file_mapping(page)->host;
122 struct nfs_page *req = NULL; 138 struct nfs_page *req = NULL;
123 139
124 spin_lock(&inode->i_lock); 140 spin_lock(&inode->i_lock);
125 req = nfs_page_find_request_locked(NFS_I(inode), page); 141 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
126 spin_unlock(&inode->i_lock); 142 spin_unlock(&inode->i_lock);
127 return req; 143 return req;
128} 144}
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
274 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 290 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
275} 291}
276 292
277static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 293
294/* nfs_page_group_clear_bits
295 * @req - an nfs request
296 * clears all page group related bits from @req
297 */
298static void
299nfs_page_group_clear_bits(struct nfs_page *req)
300{
301 clear_bit(PG_TEARDOWN, &req->wb_flags);
302 clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
303 clear_bit(PG_UPTODATE, &req->wb_flags);
304 clear_bit(PG_WB_END, &req->wb_flags);
305 clear_bit(PG_REMOVE, &req->wb_flags);
306}
307
308
309/*
310 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
311 *
312 * this is a helper function for nfs_lock_and_join_requests
313 *
314 * @inode - inode associated with request page group, must be holding inode lock
315 * @head - head request of page group, must be holding head lock
316 * @req - request that couldn't lock and needs to wait on the req bit lock
317 * @nonblock - if true, don't actually wait
318 *
319 * NOTE: this must be called holding page_group bit lock and inode spin lock
320 * and BOTH will be released before returning.
321 *
322 * returns 0 on success, < 0 on error.
323 */
324static int
325nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
326 struct nfs_page *req, bool nonblock)
327 __releases(&inode->i_lock)
328{
329 struct nfs_page *tmp;
330 int ret;
331
332 /* relinquish all the locks successfully grabbed this run */
333 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
334 nfs_unlock_request(tmp);
335
336 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
337
338 /* grab a ref on the request that will be waited on */
339 kref_get(&req->wb_kref);
340
341 nfs_page_group_unlock(head);
342 spin_unlock(&inode->i_lock);
343
344 /* release ref from nfs_page_find_head_request_locked */
345 nfs_release_request(head);
346
347 if (!nonblock)
348 ret = nfs_wait_on_request(req);
349 else
350 ret = -EAGAIN;
351 nfs_release_request(req);
352
353 return ret;
354}
355
356/*
357 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
358 *
359 * @destroy_list - request list (using wb_this_page) terminated by @old_head
360 * @old_head - the old head of the list
361 *
362 * All subrequests must be locked and removed from all lists, so at this point
363 * they are only "active" in this function, and possibly in nfs_wait_on_request
364 * with a reference held by some other context.
365 */
366static void
367nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
368 struct nfs_page *old_head)
369{
370 while (destroy_list) {
371 struct nfs_page *subreq = destroy_list;
372
373 destroy_list = (subreq->wb_this_page == old_head) ?
374 NULL : subreq->wb_this_page;
375
376 WARN_ON_ONCE(old_head != subreq->wb_head);
377
378 /* make sure old group is not used */
379 subreq->wb_head = subreq;
380 subreq->wb_this_page = subreq;
381
382 nfs_clear_request_commit(subreq);
383
384 /* subreq is now totally disconnected from page group or any
385 * write / commit lists. last chance to wake any waiters */
386 nfs_unlock_request(subreq);
387
388 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
389 /* release ref on old head request */
390 nfs_release_request(old_head);
391
392 nfs_page_group_clear_bits(subreq);
393
394 /* release the PG_INODE_REF reference */
395 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
396 nfs_release_request(subreq);
397 else
398 WARN_ON_ONCE(1);
399 } else {
400 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
401 /* zombie requests have already released the last
402 * reference and were waiting on the rest of the
403 * group to complete. Since it's no longer part of a
404 * group, simply free the request */
405 nfs_page_group_clear_bits(subreq);
406 nfs_free_request(subreq);
407 }
408 }
409}
410
411/*
412 * nfs_lock_and_join_requests - join all subreqs to the head req and return
413 * a locked reference, cancelling any pending
414 * operations for this page.
415 *
416 * @page - the page used to lookup the "page group" of nfs_page structures
417 * @nonblock - if true, don't block waiting for request locks
418 *
419 * This function joins all sub requests to the head request by first
420 * locking all requests in the group, cancelling any pending operations
421 * and finally updating the head request to cover the whole range covered by
422 * the (former) group. All subrequests are removed from any write or commit
423 * lists, unlinked from the group and destroyed.
424 *
425 * Returns a locked, referenced pointer to the head request - which after
426 * this call is guaranteed to be the only request associated with the page.
427 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
428 * error was encountered.
429 */
430static struct nfs_page *
431nfs_lock_and_join_requests(struct page *page, bool nonblock)
278{ 432{
279 struct inode *inode = page_file_mapping(page)->host; 433 struct inode *inode = page_file_mapping(page)->host;
280 struct nfs_page *req; 434 struct nfs_page *head, *subreq;
435 struct nfs_page *destroy_list = NULL;
436 unsigned int total_bytes;
281 int ret; 437 int ret;
282 438
439try_again:
440 total_bytes = 0;
441
442 WARN_ON_ONCE(destroy_list);
443
283 spin_lock(&inode->i_lock); 444 spin_lock(&inode->i_lock);
284 for (;;) { 445
285 req = nfs_page_find_request_locked(NFS_I(inode), page); 446 /*
286 if (req == NULL) 447 * A reference is taken only on the head request which acts as a
287 break; 448 * reference to the whole page group - the group will not be destroyed
288 if (nfs_lock_request(req)) 449 * until the head reference is released.
289 break; 450 */
290 /* Note: If we hold the page lock, as is the case in nfs_writepage, 451 head = nfs_page_find_head_request_locked(NFS_I(inode), page);
291 * then the call to nfs_lock_request() will always 452
292 * succeed provided that someone hasn't already marked the 453 if (!head) {
293 * request as dirty (in which case we don't care).
294 */
295 spin_unlock(&inode->i_lock); 454 spin_unlock(&inode->i_lock);
296 if (!nonblock) 455 return NULL;
297 ret = nfs_wait_on_request(req); 456 }
298 else 457
299 ret = -EAGAIN; 458 /* lock each request in the page group */
300 nfs_release_request(req); 459 nfs_page_group_lock(head);
301 if (ret != 0) 460 subreq = head;
461 do {
462 /*
463 * Subrequests are always contiguous, non overlapping
464 * and in order. If not, it's a programming error.
465 */
466 WARN_ON_ONCE(subreq->wb_offset !=
467 (head->wb_offset + total_bytes));
468
469 /* keep track of how many bytes this group covers */
470 total_bytes += subreq->wb_bytes;
471
472 if (!nfs_lock_request(subreq)) {
473 /* releases page group bit lock and
474 * inode spin lock and all references */
475 ret = nfs_unroll_locks_and_wait(inode, head,
476 subreq, nonblock);
477
478 if (ret == 0)
479 goto try_again;
480
302 return ERR_PTR(ret); 481 return ERR_PTR(ret);
303 spin_lock(&inode->i_lock); 482 }
483
484 subreq = subreq->wb_this_page;
485 } while (subreq != head);
486
487 /* Now that all requests are locked, make sure they aren't on any list.
488 * Commit list removal accounting is done after locks are dropped */
489 subreq = head;
490 do {
491 nfs_list_remove_request(subreq);
492 subreq = subreq->wb_this_page;
493 } while (subreq != head);
494
495 /* unlink subrequests from head, destroy them later */
496 if (head->wb_this_page != head) {
497 /* destroy list will be terminated by head */
498 destroy_list = head->wb_this_page;
499 head->wb_this_page = head;
500
501 /* change head request to cover whole range that
502 * the former page group covered */
503 head->wb_bytes = total_bytes;
304 } 504 }
505
506 /*
507 * prepare head request to be added to new pgio descriptor
508 */
509 nfs_page_group_clear_bits(head);
510
511 /*
512 * some part of the group was still on the inode list - otherwise
513 * the group wouldn't be involved in async write.
514 * grab a reference for the head request, iff it needs one.
515 */
516 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
517 kref_get(&head->wb_kref);
518
519 nfs_page_group_unlock(head);
520
521 /* drop lock to clear_request_commit the head req and clean up
522 * requests on destroy list */
305 spin_unlock(&inode->i_lock); 523 spin_unlock(&inode->i_lock);
306 return req; 524
525 nfs_destroy_unlinked_subrequests(destroy_list, head);
526
527 /* clean up commit list state */
528 nfs_clear_request_commit(head);
529
530 /* still holds ref on head from nfs_page_find_head_request_locked
531 * and still has lock on head from lock loop */
532 return head;
307} 533}
308 534
309/* 535/*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
316 struct nfs_page *req; 542 struct nfs_page *req;
317 int ret = 0; 543 int ret = 0;
318 544
319 req = nfs_find_and_lock_request(page, nonblock); 545 req = nfs_lock_and_join_requests(page, nonblock);
320 if (!req) 546 if (!req)
321 goto out; 547 goto out;
322 ret = PTR_ERR(req); 548 ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
448 set_page_private(req->wb_page, (unsigned long)req); 674 set_page_private(req->wb_page, (unsigned long)req);
449 } 675 }
450 nfsi->npages++; 676 nfsi->npages++;
451 set_bit(PG_INODE_REF, &req->wb_flags); 677 /* this a head request for a page group - mark it as having an
678 * extra reference so sub groups can follow suit */
679 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
452 kref_get(&req->wb_kref); 680 kref_get(&req->wb_kref);
453 spin_unlock(&inode->i_lock); 681 spin_unlock(&inode->i_lock);
454} 682}
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
474 nfsi->npages--; 702 nfsi->npages--;
475 spin_unlock(&inode->i_lock); 703 spin_unlock(&inode->i_lock);
476 } 704 }
477 nfs_release_request(req); 705
706 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
707 nfs_release_request(req);
478} 708}
479 709
480static void 710static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
638{ 868{
639 struct nfs_commit_info cinfo; 869 struct nfs_commit_info cinfo;
640 unsigned long bytes = 0; 870 unsigned long bytes = 0;
641 bool do_destroy;
642 871
643 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 872 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
644 goto out; 873 goto out;
@@ -668,7 +897,6 @@ remove_req:
668next: 897next:
669 nfs_unlock_request(req); 898 nfs_unlock_request(req);
670 nfs_end_page_writeback(req); 899 nfs_end_page_writeback(req);
671 do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
672 nfs_release_request(req); 900 nfs_release_request(req);
673 } 901 }
674out: 902out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
769 spin_lock(&inode->i_lock); 997 spin_lock(&inode->i_lock);
770 998
771 for (;;) { 999 for (;;) {
772 req = nfs_page_find_request_locked(NFS_I(inode), page); 1000 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
773 if (req == NULL) 1001 if (req == NULL)
774 goto out_unlock; 1002 goto out_unlock;
775 1003
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
877 * dropped page. 1105 * dropped page.
878 */ 1106 */
879 do { 1107 do {
880 req = nfs_page_find_request(page); 1108 req = nfs_page_find_head_request(page);
881 if (req == NULL) 1109 if (req == NULL)
882 return 0; 1110 return 0;
883 l_ctx = req->wb_lock_context; 1111 l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1569 struct nfs_page *req; 1797 struct nfs_page *req;
1570 int ret = 0; 1798 int ret = 0;
1571 1799
1572 for (;;) { 1800 wait_on_page_writeback(page);
1573 wait_on_page_writeback(page); 1801
1574 req = nfs_page_find_request(page); 1802 /* blocking call to cancel all requests and join to a single (head)
1575 if (req == NULL) 1803 * request */
1576 break; 1804 req = nfs_lock_and_join_requests(page, false);
1577 if (nfs_lock_request(req)) { 1805
1578 nfs_clear_request_commit(req); 1806 if (IS_ERR(req)) {
1579 nfs_inode_remove_request(req); 1807 ret = PTR_ERR(req);
1580 /* 1808 } else if (req) {
1581 * In case nfs_inode_remove_request has marked the 1809 /* all requests from this page have been cancelled by
1582 * page as being dirty 1810 * nfs_lock_and_join_requests, so just remove the head
1583 */ 1811 * request from the inode / page_private pointer and
1584 cancel_dirty_page(page, PAGE_CACHE_SIZE); 1812 * release it */
1585 nfs_unlock_and_release_request(req); 1813 nfs_inode_remove_request(req);
1586 break; 1814 /*
1587 } 1815 * In case nfs_inode_remove_request has marked the
1588 ret = nfs_wait_on_request(req); 1816 * page as being dirty
1589 nfs_release_request(req); 1817 */
1590 if (ret < 0) 1818 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1591 break; 1819 nfs_unlock_and_release_request(req);
1592 } 1820 }
1821
1593 return ret; 1822 return ret;
1594} 1823}
1595 1824
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 6851b003f2a4..8f029db5d271 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -617,15 +617,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
617 617
618 switch (create->cr_type) { 618 switch (create->cr_type) {
619 case NF4LNK: 619 case NF4LNK:
620 /* ugh! we have to null-terminate the linktext, or
621 * vfs_symlink() will choke. it is always safe to
622 * null-terminate by brute force, since at worst we
623 * will overwrite the first byte of the create namelen
624 * in the XDR buffer, which has already been extracted
625 * during XDR decode.
626 */
627 create->cr_linkname[create->cr_linklen] = 0;
628
629 status = nfsd_symlink(rqstp, &cstate->current_fh, 620 status = nfsd_symlink(rqstp, &cstate->current_fh,
630 create->cr_name, create->cr_namelen, 621 create->cr_name, create->cr_namelen,
631 create->cr_linkname, create->cr_linklen, 622 create->cr_linkname, create->cr_linklen,
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 83baf2bfe9e9..b56b1cc02718 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -600,7 +600,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
600 READ_BUF(4); 600 READ_BUF(4);
601 create->cr_linklen = be32_to_cpup(p++); 601 create->cr_linklen = be32_to_cpup(p++);
602 READ_BUF(create->cr_linklen); 602 READ_BUF(create->cr_linklen);
603 SAVEMEM(create->cr_linkname, create->cr_linklen); 603 /*
604 * The VFS will want a null-terminated string, and
605 * null-terminating in place isn't safe since this might
606 * end on a page boundary:
607 */
608 create->cr_linkname =
609 kmalloc(create->cr_linklen + 1, GFP_KERNEL);
610 if (!create->cr_linkname)
611 return nfserr_jukebox;
612 memcpy(create->cr_linkname, p, create->cr_linklen);
613 create->cr_linkname[create->cr_linklen] = '\0';
614 defer_free(argp, kfree, create->cr_linkname);
604 break; 615 break;
605 case NF4BLK: 616 case NF4BLK:
606 case NF4CHR: 617 case NF4CHR:
@@ -2630,7 +2641,7 @@ nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
2630{ 2641{
2631 __be32 *p; 2642 __be32 *p;
2632 2643
2633 p = xdr_reserve_space(xdr, 6); 2644 p = xdr_reserve_space(xdr, 20);
2634 if (!p) 2645 if (!p)
2635 return NULL; 2646 return NULL;
2636 *p++ = htonl(2); 2647 *p++ = htonl(2);
@@ -3267,7 +3278,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
3267 3278
3268 wire_count = htonl(maxcount); 3279 wire_count = htonl(maxcount);
3269 write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4); 3280 write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
3270 xdr_truncate_encode(xdr, length_offset + 4 + maxcount); 3281 xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
3271 if (maxcount & 3) 3282 if (maxcount & 3)
3272 write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, 3283 write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
3273 &zero, 4 - (maxcount&3)); 3284 &zero, 4 - (maxcount&3));
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 9d231e9e5f0e..bf2d03f8fd3e 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
184 184
185static int stat_open(struct inode *inode, struct file *file) 185static int stat_open(struct inode *inode, struct file *file)
186{ 186{
187 size_t size = 1024 + 128 * num_possible_cpus(); 187 size_t size = 1024 + 128 * num_online_cpus();
188 char *buf;
189 struct seq_file *m;
190 int res;
191 188
192 /* minimum size to display an interrupt count : 2 bytes */ 189 /* minimum size to display an interrupt count : 2 bytes */
193 size += 2 * nr_irqs; 190 size += 2 * nr_irqs;
194 191 return single_open_size(file, show_stat, NULL, size);
195 /* don't ask for more than the kmalloc() max size */
196 if (size > KMALLOC_MAX_SIZE)
197 size = KMALLOC_MAX_SIZE;
198 buf = kmalloc(size, GFP_KERNEL);
199 if (!buf)
200 return -ENOMEM;
201
202 res = single_open(file, show_stat, NULL);
203 if (!res) {
204 m = file->private_data;
205 m->buf = buf;
206 m->size = ksize(buf);
207 } else
208 kfree(buf);
209 return res;
210} 192}
211 193
212static const struct file_operations proc_stat_operations = { 194static const struct file_operations proc_stat_operations = {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9cd5f63715c0..7f30bdc57d13 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
702 struct dquot *dquot; 702 struct dquot *dquot;
703 unsigned long freed = 0; 703 unsigned long freed = 0;
704 704
705 spin_lock(&dq_list_lock);
705 head = free_dquots.prev; 706 head = free_dquots.prev;
706 while (head != &free_dquots && sc->nr_to_scan) { 707 while (head != &free_dquots && sc->nr_to_scan) {
707 dquot = list_entry(head, struct dquot, dq_free); 708 dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
713 freed++; 714 freed++;
714 head = free_dquots.prev; 715 head = free_dquots.prev;
715 } 716 }
717 spin_unlock(&dq_list_lock);
716 return freed; 718 return freed;
717} 719}
718 720
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1d641bb108d2..3857b720cb1b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -8,8 +8,10 @@
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/seq_file.h> 10#include <linux/seq_file.h>
11#include <linux/vmalloc.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12#include <linux/cred.h> 13#include <linux/cred.h>
14#include <linux/mm.h>
13 15
14#include <asm/uaccess.h> 16#include <asm/uaccess.h>
15#include <asm/page.h> 17#include <asm/page.h>
@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
30 m->count = m->size; 32 m->count = m->size;
31} 33}
32 34
35static void *seq_buf_alloc(unsigned long size)
36{
37 void *buf;
38
39 buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
40 if (!buf && size > PAGE_SIZE)
41 buf = vmalloc(size);
42 return buf;
43}
44
33/** 45/**
34 * seq_open - initialize sequential file 46 * seq_open - initialize sequential file
35 * @file: file we initialize 47 * @file: file we initialize
@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
96 return 0; 108 return 0;
97 } 109 }
98 if (!m->buf) { 110 if (!m->buf) {
99 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); 111 m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
100 if (!m->buf) 112 if (!m->buf)
101 return -ENOMEM; 113 return -ENOMEM;
102 } 114 }
@@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset)
135 147
136Eoverflow: 148Eoverflow:
137 m->op->stop(m, p); 149 m->op->stop(m, p);
138 kfree(m->buf); 150 kvfree(m->buf);
139 m->count = 0; 151 m->count = 0;
140 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); 152 m->buf = seq_buf_alloc(m->size <<= 1);
141 return !m->buf ? -ENOMEM : -EAGAIN; 153 return !m->buf ? -ENOMEM : -EAGAIN;
142} 154}
143 155
@@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
192 204
193 /* grab buffer if we didn't have one */ 205 /* grab buffer if we didn't have one */
194 if (!m->buf) { 206 if (!m->buf) {
195 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); 207 m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
196 if (!m->buf) 208 if (!m->buf)
197 goto Enomem; 209 goto Enomem;
198 } 210 }
@@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
232 if (m->count < m->size) 244 if (m->count < m->size)
233 goto Fill; 245 goto Fill;
234 m->op->stop(m, p); 246 m->op->stop(m, p);
235 kfree(m->buf); 247 kvfree(m->buf);
236 m->count = 0; 248 m->count = 0;
237 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); 249 m->buf = seq_buf_alloc(m->size <<= 1);
238 if (!m->buf) 250 if (!m->buf)
239 goto Enomem; 251 goto Enomem;
240 m->version = 0; 252 m->version = 0;
@@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek);
350int seq_release(struct inode *inode, struct file *file) 362int seq_release(struct inode *inode, struct file *file)
351{ 363{
352 struct seq_file *m = file->private_data; 364 struct seq_file *m = file->private_data;
353 kfree(m->buf); 365 kvfree(m->buf);
354 kfree(m); 366 kfree(m);
355 return 0; 367 return 0;
356} 368}
@@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open);
605int single_open_size(struct file *file, int (*show)(struct seq_file *, void *), 617int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
606 void *data, size_t size) 618 void *data, size_t size)
607{ 619{
608 char *buf = kmalloc(size, GFP_KERNEL); 620 char *buf = seq_buf_alloc(size);
609 int ret; 621 int ret;
610 if (!buf) 622 if (!buf)
611 return -ENOMEM; 623 return -ENOMEM;
612 ret = single_open(file, show, data); 624 ret = single_open(file, show, data);
613 if (ret) { 625 if (ret) {
614 kfree(buf); 626 kvfree(buf);
615 return ret; 627 return ret;
616 } 628 }
617 ((struct seq_file *)file->private_data)->buf = buf; 629 ((struct seq_file *)file->private_data)->buf = buf;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df211b1..75c3fe5f3d9d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
4298} 4298}
4299 4299
4300 4300
4301int 4301static int
4302__xfs_bmapi_allocate( 4302xfs_bmapi_allocate(
4303 struct xfs_bmalloca *bma) 4303 struct xfs_bmalloca *bma)
4304{ 4304{
4305 struct xfs_mount *mp = bma->ip->i_mount; 4305 struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
4578 bma.flist = flist; 4578 bma.flist = flist;
4579 bma.firstblock = firstblock; 4579 bma.firstblock = firstblock;
4580 4580
4581 if (flags & XFS_BMAPI_STACK_SWITCH)
4582 bma.stack_switch = 1;
4583
4584 while (bno < end && n < *nmap) { 4581 while (bno < end && n < *nmap) {
4585 inhole = eof || bma.got.br_startoff > bno; 4582 inhole = eof || bma.got.br_startoff > bno;
4586 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4583 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e9b2f0..b879ca56a64c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free
77 * from written to unwritten, otherwise convert from unwritten to written. 77 * from written to unwritten, otherwise convert from unwritten to written.
78 */ 78 */
79#define XFS_BMAPI_CONVERT 0x040 79#define XFS_BMAPI_CONVERT 0x040
80#define XFS_BMAPI_STACK_SWITCH 0x080
81 80
82#define XFS_BMAPI_FLAGS \ 81#define XFS_BMAPI_FLAGS \
83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 82 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free
86 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
87 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
88 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 87 { XFS_BMAPI_CONTIG, "CONTIG" }, \
89 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 88 { XFS_BMAPI_CONVERT, "CONVERT" }
90 { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
91 89
92 90
93static inline int xfs_bmapi_aflag(int w) 91static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec1796c..64731ef3324d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc(
249} 249}
250 250
251/* 251/*
252 * Stack switching interfaces for allocation
253 */
254static void
255xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257{
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
261 unsigned long new_pflags = PF_FSTRANS;
262
263 /*
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
267 * in any way.
268 */
269 if (args->kswapd)
270 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
271
272 current_set_flags_nested(&pflags, new_pflags);
273
274 args->result = __xfs_bmapi_allocate(args);
275 complete(args->done);
276
277 current_restore_flags_nested(&pflags, new_pflags);
278}
279
280/*
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
284 */
285int
286xfs_bmapi_allocate(
287 struct xfs_bmalloca *args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290
291 if (!args->stack_switch)
292 return __xfs_bmapi_allocate(args);
293
294
295 args->done = &done;
296 args->kswapd = current_is_kswapd();
297 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
298 queue_work(xfs_alloc_wq, &args->work);
299 wait_for_completion(&done);
300 destroy_work_on_stack(&args->work);
301 return args->result;
302}
303
304/*
305 * Check if the endoff is outside the last extent. If so the caller will grow 252 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside 253 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case. 254 * the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f72232a64..2fdb72d2c908 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
55 bool userdata;/* set if is user data */ 55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */ 56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */ 57 bool conv; /* overwriting unwritten extents */
58 bool stack_switch;
59 bool kswapd; /* allocation in kswapd context */
60 int flags; 58 int flags;
61 struct completion *done; 59 struct completion *done;
62 struct work_struct work; 60 struct work_struct work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
66int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, 64int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
67 int *committed); 65 int *committed);
68int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 66int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
69int xfs_bmapi_allocate(struct xfs_bmalloca *args);
70int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
71int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 67int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
72 int whichfork, int *eof); 68 int whichfork, int *eof);
73int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, 69int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6baf2b..cf893bc1e373 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
33#include "xfs_error.h" 33#include "xfs_error.h"
34#include "xfs_trace.h" 34#include "xfs_trace.h"
35#include "xfs_cksum.h" 35#include "xfs_cksum.h"
36#include "xfs_alloc.h"
36 37
37/* 38/*
38 * Cursor allocation zone. 39 * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
2323 * record (to be inserted into parent). 2324 * record (to be inserted into parent).
2324 */ 2325 */
2325STATIC int /* error */ 2326STATIC int /* error */
2326xfs_btree_split( 2327__xfs_btree_split(
2327 struct xfs_btree_cur *cur, 2328 struct xfs_btree_cur *cur,
2328 int level, 2329 int level,
2329 union xfs_btree_ptr *ptrp, 2330 union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@ error0:
2503 return error; 2504 return error;
2504} 2505}
2505 2506
2507struct xfs_btree_split_args {
2508 struct xfs_btree_cur *cur;
2509 int level;
2510 union xfs_btree_ptr *ptrp;
2511 union xfs_btree_key *key;
2512 struct xfs_btree_cur **curp;
2513 int *stat; /* success/failure */
2514 int result;
2515 bool kswapd; /* allocation in kswapd context */
2516 struct completion *done;
2517 struct work_struct work;
2518};
2519
2520/*
2521 * Stack switching interfaces for allocation
2522 */
2523static void
2524xfs_btree_split_worker(
2525 struct work_struct *work)
2526{
2527 struct xfs_btree_split_args *args = container_of(work,
2528 struct xfs_btree_split_args, work);
2529 unsigned long pflags;
2530 unsigned long new_pflags = PF_FSTRANS;
2531
2532 /*
2533 * we are in a transaction context here, but may also be doing work
2534 * in kswapd context, and hence we may need to inherit that state
2535 * temporarily to ensure that we don't block waiting for memory reclaim
2536 * in any way.
2537 */
2538 if (args->kswapd)
2539 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2540
2541 current_set_flags_nested(&pflags, new_pflags);
2542
2543 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2544 args->key, args->curp, args->stat);
2545 complete(args->done);
2546
2547 current_restore_flags_nested(&pflags, new_pflags);
2548}
2549
2550/*
2551 * BMBT split requests often come in with little stack to work on. Push
2552 * them off to a worker thread so there is lots of stack to use. For the other
2553 * btree types, just call directly to avoid the context switch overhead here.
2554 */
2555STATIC int /* error */
2556xfs_btree_split(
2557 struct xfs_btree_cur *cur,
2558 int level,
2559 union xfs_btree_ptr *ptrp,
2560 union xfs_btree_key *key,
2561 struct xfs_btree_cur **curp,
2562 int *stat) /* success/failure */
2563{
2564 struct xfs_btree_split_args args;
2565 DECLARE_COMPLETION_ONSTACK(done);
2566
2567 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2568 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2569
2570 args.cur = cur;
2571 args.level = level;
2572 args.ptrp = ptrp;
2573 args.key = key;
2574 args.curp = curp;
2575 args.stat = stat;
2576 args.done = &done;
2577 args.kswapd = current_is_kswapd();
2578 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2579 queue_work(xfs_alloc_wq, &args.work);
2580 wait_for_completion(&done);
2581 destroy_work_on_stack(&args.work);
2582 return args.result;
2583}
2584
2585
2506/* 2586/*
2507 * Copy the old inode root contents into a real block and make the 2587 * Copy the old inode root contents into a real block and make the
2508 * broot point to it. 2588 * broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c551e3..6d3ec2b6ee29 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
749 * pointer that the caller gave to us. 749 * pointer that the caller gave to us.
750 */ 750 */
751 error = xfs_bmapi_write(tp, ip, map_start_fsb, 751 error = xfs_bmapi_write(tp, ip, map_start_fsb,
752 count_fsb, 752 count_fsb, 0,
753 XFS_BMAPI_STACK_SWITCH,
754 &first_block, 1, 753 &first_block, 1,
755 imap, &nimaps, &free_list); 754 imap, &nimaps, &free_list);
756 if (error) 755 if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b11f563..7703fa6770ff 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
483 } 483 }
484 484
485 /* 485 /*
486 * GQUOTINO and PQUOTINO cannot be used together in versions 486 * GQUOTINO and PQUOTINO cannot be used together in versions of
487 * of superblock that do not have pquotino. from->sb_flags 487 * superblock that do not have pquotino. from->sb_flags tells us which
488 * tells us which quota is active and should be copied to 488 * quota is active and should be copied to disk. If neither are active,
489 * disk. 489 * make sure we write NULLFSINO to the sb_gquotino field as a quota
490 * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
491 * bit is set.
492 *
493 * Note that we don't need to handle the sb_uquotino or sb_pquotino here
494 * as they do not require any translation. Hence the main sb field loop
495 * will write them appropriately from the in-core superblock.
490 */ 496 */
491 if ((*fields & XFS_SB_GQUOTINO) && 497 if ((*fields & XFS_SB_GQUOTINO) &&
492 (from->sb_qflags & XFS_GQUOTA_ACCT)) 498 (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
494 else if ((*fields & XFS_SB_PQUOTINO) && 500 else if ((*fields & XFS_SB_PQUOTINO) &&
495 (from->sb_qflags & XFS_PQUOTA_ACCT)) 501 (from->sb_qflags & XFS_PQUOTA_ACCT))
496 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 502 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
503 else {
504 /*
505 * We can't rely on just the fields being logged to tell us
506 * that it is safe to write NULLFSINO - we should only do that
507 * if quotas are not actually enabled. Hence only write
508 * NULLFSINO if both in-core quota inodes are NULL.
509 */
510 if (from->sb_gquotino == NULLFSINO &&
511 from->sb_pquotino == NULLFSINO)
512 to->sb_gquotino = cpu_to_be64(NULLFSINO);
513 }
497 514
498 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 515 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
499} 516}