aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c8
-rw-r--r--fs/btrfs/check-integrity.c32
-rw-r--r--fs/btrfs/check-integrity.h2
-rw-r--r--fs/btrfs/extent-tree.c22
-rw-r--r--fs/btrfs/extent_io.c12
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/relocation.c81
-rw-r--r--fs/btrfs/scrub.c33
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/hfsplus/wrapper.c17
-rw-r--r--fs/logfs/dev_bdev.c13
-rw-r--r--fs/namei.c7
-rw-r--r--fs/nfs/blocklayout/blocklayout.h1
-rw-r--r--fs/nfs/blocklayout/extents.c2
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h15
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4proc.c30
-rw-r--r--fs/nfsd/nfscache.c9
-rw-r--r--fs/proc/inode.c14
-rw-r--r--fs/squashfs/file_direct.c5
-rw-r--r--fs/xfs/xfs_discard.c5
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c3
29 files changed, 188 insertions, 161 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 08159ed13649..6efb7f6cb22e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -367,8 +367,10 @@ static int aio_setup_ring(struct kioctx *ctx)
367 if (nr_pages > AIO_RING_PAGES) { 367 if (nr_pages > AIO_RING_PAGES) {
368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 368 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
369 GFP_KERNEL); 369 GFP_KERNEL);
370 if (!ctx->ring_pages) 370 if (!ctx->ring_pages) {
371 put_aio_ring_file(ctx);
371 return -ENOMEM; 372 return -ENOMEM;
373 }
372 } 374 }
373 375
374 ctx->mmap_size = nr_pages * PAGE_SIZE; 376 ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -645,7 +647,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
645 aio_nr + nr_events < aio_nr) { 647 aio_nr + nr_events < aio_nr) {
646 spin_unlock(&aio_nr_lock); 648 spin_unlock(&aio_nr_lock);
647 err = -EAGAIN; 649 err = -EAGAIN;
648 goto err; 650 goto err_ctx;
649 } 651 }
650 aio_nr += ctx->max_reqs; 652 aio_nr += ctx->max_reqs;
651 spin_unlock(&aio_nr_lock); 653 spin_unlock(&aio_nr_lock);
@@ -662,6 +664,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
662 664
663err_cleanup: 665err_cleanup:
664 aio_nr_sub(ctx->max_reqs); 666 aio_nr_sub(ctx->max_reqs);
667err_ctx:
668 aio_free_ring(ctx);
665err: 669err:
666 free_percpu(ctx->cpu); 670 free_percpu(ctx->cpu);
667 free_percpu(ctx->reqs.pcpu_count); 671 free_percpu(ctx->reqs.pcpu_count);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index b50764bef141..131d82800b3a 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -333,7 +333,6 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
333static int btrfsic_read_block(struct btrfsic_state *state, 333static int btrfsic_read_block(struct btrfsic_state *state,
334 struct btrfsic_block_data_ctx *block_ctx); 334 struct btrfsic_block_data_ctx *block_ctx);
335static void btrfsic_dump_database(struct btrfsic_state *state); 335static void btrfsic_dump_database(struct btrfsic_state *state);
336static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
337static int btrfsic_test_for_metadata(struct btrfsic_state *state, 336static int btrfsic_test_for_metadata(struct btrfsic_state *state,
338 char **datav, unsigned int num_pages); 337 char **datav, unsigned int num_pages);
339static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 338static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
@@ -1687,7 +1686,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1687 for (i = 0; i < num_pages;) { 1686 for (i = 0; i < num_pages;) {
1688 struct bio *bio; 1687 struct bio *bio;
1689 unsigned int j; 1688 unsigned int j;
1690 DECLARE_COMPLETION_ONSTACK(complete);
1691 1689
1692 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); 1690 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
1693 if (!bio) { 1691 if (!bio) {
@@ -1698,8 +1696,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1698 } 1696 }
1699 bio->bi_bdev = block_ctx->dev->bdev; 1697 bio->bi_bdev = block_ctx->dev->bdev;
1700 bio->bi_sector = dev_bytenr >> 9; 1698 bio->bi_sector = dev_bytenr >> 9;
1701 bio->bi_end_io = btrfsic_complete_bio_end_io;
1702 bio->bi_private = &complete;
1703 1699
1704 for (j = i; j < num_pages; j++) { 1700 for (j = i; j < num_pages; j++) {
1705 ret = bio_add_page(bio, block_ctx->pagev[j], 1701 ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1712,12 +1708,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1712 "btrfsic: error, failed to add a single page!\n"); 1708 "btrfsic: error, failed to add a single page!\n");
1713 return -1; 1709 return -1;
1714 } 1710 }
1715 submit_bio(READ, bio); 1711 if (submit_bio_wait(READ, bio)) {
1716
1717 /* this will also unplug the queue */
1718 wait_for_completion(&complete);
1719
1720 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1721 printk(KERN_INFO 1712 printk(KERN_INFO
1722 "btrfsic: read error at logical %llu dev %s!\n", 1713 "btrfsic: read error at logical %llu dev %s!\n",
1723 block_ctx->start, block_ctx->dev->name); 1714 block_ctx->start, block_ctx->dev->name);
@@ -1740,11 +1731,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1740 return block_ctx->len; 1731 return block_ctx->len;
1741} 1732}
1742 1733
1743static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
1744{
1745 complete((struct completion *)bio->bi_private);
1746}
1747
1748static void btrfsic_dump_database(struct btrfsic_state *state) 1734static void btrfsic_dump_database(struct btrfsic_state *state)
1749{ 1735{
1750 struct list_head *elem_all; 1736 struct list_head *elem_all;
@@ -3008,14 +2994,12 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
3008 return submit_bh(rw, bh); 2994 return submit_bh(rw, bh);
3009} 2995}
3010 2996
3011void btrfsic_submit_bio(int rw, struct bio *bio) 2997static void __btrfsic_submit_bio(int rw, struct bio *bio)
3012{ 2998{
3013 struct btrfsic_dev_state *dev_state; 2999 struct btrfsic_dev_state *dev_state;
3014 3000
3015 if (!btrfsic_is_initialized) { 3001 if (!btrfsic_is_initialized)
3016 submit_bio(rw, bio);
3017 return; 3002 return;
3018 }
3019 3003
3020 mutex_lock(&btrfsic_mutex); 3004 mutex_lock(&btrfsic_mutex);
3021 /* since btrfsic_submit_bio() is also called before 3005 /* since btrfsic_submit_bio() is also called before
@@ -3106,10 +3090,20 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
3106 } 3090 }
3107leave: 3091leave:
3108 mutex_unlock(&btrfsic_mutex); 3092 mutex_unlock(&btrfsic_mutex);
3093}
3109 3094
3095void btrfsic_submit_bio(int rw, struct bio *bio)
3096{
3097 __btrfsic_submit_bio(rw, bio);
3110 submit_bio(rw, bio); 3098 submit_bio(rw, bio);
3111} 3099}
3112 3100
3101int btrfsic_submit_bio_wait(int rw, struct bio *bio)
3102{
3103 __btrfsic_submit_bio(rw, bio);
3104 return submit_bio_wait(rw, bio);
3105}
3106
3113int btrfsic_mount(struct btrfs_root *root, 3107int btrfsic_mount(struct btrfs_root *root,
3114 struct btrfs_fs_devices *fs_devices, 3108 struct btrfs_fs_devices *fs_devices,
3115 int including_extent_data, u32 print_mask) 3109 int including_extent_data, u32 print_mask)
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index 8b59175cc502..13b8566c97ab 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -22,9 +22,11 @@
22#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 22#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
23int btrfsic_submit_bh(int rw, struct buffer_head *bh); 23int btrfsic_submit_bh(int rw, struct buffer_head *bh);
24void btrfsic_submit_bio(int rw, struct bio *bio); 24void btrfsic_submit_bio(int rw, struct bio *bio);
25int btrfsic_submit_bio_wait(int rw, struct bio *bio);
25#else 26#else
26#define btrfsic_submit_bh submit_bh 27#define btrfsic_submit_bh submit_bh
27#define btrfsic_submit_bio submit_bio 28#define btrfsic_submit_bio submit_bio
29#define btrfsic_submit_bio_wait submit_bio_wait
28#endif 30#endif
29 31
30int btrfsic_mount(struct btrfs_root *root, 32int btrfsic_mount(struct btrfs_root *root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 45d98d01028f..9c01509dd8ab 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
767 if (!path) 767 if (!path)
768 return -ENOMEM; 768 return -ENOMEM;
769 769
770 if (metadata) {
771 key.objectid = bytenr;
772 key.type = BTRFS_METADATA_ITEM_KEY;
773 key.offset = offset;
774 } else {
775 key.objectid = bytenr;
776 key.type = BTRFS_EXTENT_ITEM_KEY;
777 key.offset = offset;
778 }
779
780 if (!trans) { 770 if (!trans) {
781 path->skip_locking = 1; 771 path->skip_locking = 1;
782 path->search_commit_root = 1; 772 path->search_commit_root = 1;
783 } 773 }
774
775search_again:
776 key.objectid = bytenr;
777 key.offset = offset;
778 if (metadata)
779 key.type = BTRFS_METADATA_ITEM_KEY;
780 else
781 key.type = BTRFS_EXTENT_ITEM_KEY;
782
784again: 783again:
785 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 784 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
786 &key, path, 0, 0); 785 &key, path, 0, 0);
@@ -788,7 +787,6 @@ again:
788 goto out_free; 787 goto out_free;
789 788
790 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 789 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791 metadata = 0;
792 if (path->slots[0]) { 790 if (path->slots[0]) {
793 path->slots[0]--; 791 path->slots[0]--;
794 btrfs_item_key_to_cpu(path->nodes[0], &key, 792 btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -855,7 +853,7 @@ again:
855 mutex_lock(&head->mutex); 853 mutex_lock(&head->mutex);
856 mutex_unlock(&head->mutex); 854 mutex_unlock(&head->mutex);
857 btrfs_put_delayed_ref(&head->node); 855 btrfs_put_delayed_ref(&head->node);
858 goto again; 856 goto search_again;
859 } 857 }
860 if (head->extent_op && head->extent_op->update_flags) 858 if (head->extent_op && head->extent_op->update_flags)
861 extent_flags |= head->extent_op->flags_to_set; 859 extent_flags |= head->extent_op->flags_to_set;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8e457fca0a0b..ff43802a7c88 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1952,11 +1952,6 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1952 return err; 1952 return err;
1953} 1953}
1954 1954
1955static void repair_io_failure_callback(struct bio *bio, int err)
1956{
1957 complete(bio->bi_private);
1958}
1959
1960/* 1955/*
1961 * this bypasses the standard btrfs submit functions deliberately, as 1956 * this bypasses the standard btrfs submit functions deliberately, as
1962 * the standard behavior is to write all copies in a raid setup. here we only 1957 * the standard behavior is to write all copies in a raid setup. here we only
@@ -1973,7 +1968,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1973{ 1968{
1974 struct bio *bio; 1969 struct bio *bio;
1975 struct btrfs_device *dev; 1970 struct btrfs_device *dev;
1976 DECLARE_COMPLETION_ONSTACK(compl);
1977 u64 map_length = 0; 1971 u64 map_length = 0;
1978 u64 sector; 1972 u64 sector;
1979 struct btrfs_bio *bbio = NULL; 1973 struct btrfs_bio *bbio = NULL;
@@ -1990,8 +1984,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1990 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1991 if (!bio) 1985 if (!bio)
1992 return -EIO; 1986 return -EIO;
1993 bio->bi_private = &compl;
1994 bio->bi_end_io = repair_io_failure_callback;
1995 bio->bi_size = 0; 1987 bio->bi_size = 0;
1996 map_length = length; 1988 map_length = length;
1997 1989
@@ -2012,10 +2004,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2012 } 2004 }
2013 bio->bi_bdev = dev->bdev; 2005 bio->bi_bdev = dev->bdev;
2014 bio_add_page(bio, page, length, start - page_offset(page)); 2006 bio_add_page(bio, page, length, start - page_offset(page));
2015 btrfsic_submit_bio(WRITE_SYNC, bio);
2016 wait_for_completion(&compl);
2017 2007
2018 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 2008 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2019 /* try to remap that extent elsewhere? */ 2009 /* try to remap that extent elsewhere? */
2020 bio_put(bio); 2010 bio_put(bio);
2021 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 2011 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a111622598b0..21da5762b0b1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2121 2121
2122 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); 2122 err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
2123 if (err == -EINTR) 2123 if (err == -EINTR)
2124 goto out; 2124 goto out_drop_write;
2125 dentry = lookup_one_len(vol_args->name, parent, namelen); 2125 dentry = lookup_one_len(vol_args->name, parent, namelen);
2126 if (IS_ERR(dentry)) { 2126 if (IS_ERR(dentry)) {
2127 err = PTR_ERR(dentry); 2127 err = PTR_ERR(dentry);
@@ -2284,6 +2284,7 @@ out_dput:
2284 dput(dentry); 2284 dput(dentry);
2285out_unlock_dir: 2285out_unlock_dir:
2286 mutex_unlock(&dir->i_mutex); 2286 mutex_unlock(&dir->i_mutex);
2287out_drop_write:
2287 mnt_drop_write_file(file); 2288 mnt_drop_write_file(file);
2288out: 2289out:
2289 kfree(vol_args); 2290 kfree(vol_args);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ce459a7cb16d..429c73c374b8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid)
571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 571 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
572 root_objectid == BTRFS_DEV_TREE_OBJECTID || 572 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
573 root_objectid == BTRFS_TREE_LOG_OBJECTID || 573 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
574 root_objectid == BTRFS_CSUM_TREE_OBJECTID) 574 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
575 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
576 root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
575 return 1; 577 return 1;
576 return 0; 578 return 0;
577} 579}
@@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
1264} 1266}
1265 1267
1266/* 1268/*
1267 * helper to update/delete the 'address of tree root -> reloc tree' 1269 * helper to delete the 'address of tree root -> reloc tree'
1268 * mapping 1270 * mapping
1269 */ 1271 */
1270static int __update_reloc_root(struct btrfs_root *root, int del) 1272static void __del_reloc_root(struct btrfs_root *root)
1271{ 1273{
1272 struct rb_node *rb_node; 1274 struct rb_node *rb_node;
1273 struct mapping_node *node = NULL; 1275 struct mapping_node *node = NULL;
@@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1275 1277
1276 spin_lock(&rc->reloc_root_tree.lock); 1278 spin_lock(&rc->reloc_root_tree.lock);
1277 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1279 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1278 root->commit_root->start); 1280 root->node->start);
1279 if (rb_node) { 1281 if (rb_node) {
1280 node = rb_entry(rb_node, struct mapping_node, rb_node); 1282 node = rb_entry(rb_node, struct mapping_node, rb_node);
1281 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1283 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1283 spin_unlock(&rc->reloc_root_tree.lock); 1285 spin_unlock(&rc->reloc_root_tree.lock);
1284 1286
1285 if (!node) 1287 if (!node)
1286 return 0; 1288 return;
1287 BUG_ON((struct btrfs_root *)node->data != root); 1289 BUG_ON((struct btrfs_root *)node->data != root);
1288 1290
1289 if (!del) { 1291 spin_lock(&root->fs_info->trans_lock);
1290 spin_lock(&rc->reloc_root_tree.lock); 1292 list_del_init(&root->root_list);
1291 node->bytenr = root->node->start; 1293 spin_unlock(&root->fs_info->trans_lock);
1292 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1294 kfree(node);
1293 node->bytenr, &node->rb_node); 1295}
1294 spin_unlock(&rc->reloc_root_tree.lock); 1296
1295 if (rb_node) 1297/*
1296 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1298 * helper to update the 'address of tree root -> reloc tree'
1297 } else { 1299 * mapping
1298 spin_lock(&root->fs_info->trans_lock); 1300 */
1299 list_del_init(&root->root_list); 1301static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1300 spin_unlock(&root->fs_info->trans_lock); 1302{
1301 kfree(node); 1303 struct rb_node *rb_node;
1304 struct mapping_node *node = NULL;
1305 struct reloc_control *rc = root->fs_info->reloc_ctl;
1306
1307 spin_lock(&rc->reloc_root_tree.lock);
1308 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1309 root->node->start);
1310 if (rb_node) {
1311 node = rb_entry(rb_node, struct mapping_node, rb_node);
1312 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1302 } 1313 }
1314 spin_unlock(&rc->reloc_root_tree.lock);
1315
1316 if (!node)
1317 return 0;
1318 BUG_ON((struct btrfs_root *)node->data != root);
1319
1320 spin_lock(&rc->reloc_root_tree.lock);
1321 node->bytenr = new_bytenr;
1322 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1323 node->bytenr, &node->rb_node);
1324 spin_unlock(&rc->reloc_root_tree.lock);
1325 if (rb_node)
1326 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1303 return 0; 1327 return 0;
1304} 1328}
1305 1329
@@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1420{ 1444{
1421 struct btrfs_root *reloc_root; 1445 struct btrfs_root *reloc_root;
1422 struct btrfs_root_item *root_item; 1446 struct btrfs_root_item *root_item;
1423 int del = 0;
1424 int ret; 1447 int ret;
1425 1448
1426 if (!root->reloc_root) 1449 if (!root->reloc_root)
@@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1432 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1455 if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1433 btrfs_root_refs(root_item) == 0) { 1456 btrfs_root_refs(root_item) == 0) {
1434 root->reloc_root = NULL; 1457 root->reloc_root = NULL;
1435 del = 1; 1458 __del_reloc_root(reloc_root);
1436 } 1459 }
1437 1460
1438 __update_reloc_root(reloc_root, del);
1439
1440 if (reloc_root->commit_root != reloc_root->node) { 1461 if (reloc_root->commit_root != reloc_root->node) {
1441 btrfs_set_root_node(root_item, reloc_root->node); 1462 btrfs_set_root_node(root_item, reloc_root->node);
1442 free_extent_buffer(reloc_root->commit_root); 1463 free_extent_buffer(reloc_root->commit_root);
@@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list)
2287 while (!list_empty(list)) { 2308 while (!list_empty(list)) {
2288 reloc_root = list_entry(list->next, struct btrfs_root, 2309 reloc_root = list_entry(list->next, struct btrfs_root,
2289 root_list); 2310 root_list);
2290 __update_reloc_root(reloc_root, 1); 2311 __del_reloc_root(reloc_root);
2291 free_extent_buffer(reloc_root->node); 2312 free_extent_buffer(reloc_root->node);
2292 free_extent_buffer(reloc_root->commit_root); 2313 free_extent_buffer(reloc_root->commit_root);
2293 kfree(reloc_root); 2314 kfree(reloc_root);
@@ -2332,7 +2353,7 @@ again:
2332 2353
2333 ret = merge_reloc_root(rc, root); 2354 ret = merge_reloc_root(rc, root);
2334 if (ret) { 2355 if (ret) {
2335 __update_reloc_root(reloc_root, 1); 2356 __del_reloc_root(reloc_root);
2336 free_extent_buffer(reloc_root->node); 2357 free_extent_buffer(reloc_root->node);
2337 free_extent_buffer(reloc_root->commit_root); 2358 free_extent_buffer(reloc_root->commit_root);
2338 kfree(reloc_root); 2359 kfree(reloc_root);
@@ -2388,6 +2409,13 @@ out:
2388 btrfs_std_error(root->fs_info, ret); 2409 btrfs_std_error(root->fs_info, ret);
2389 if (!list_empty(&reloc_roots)) 2410 if (!list_empty(&reloc_roots))
2390 free_reloc_roots(&reloc_roots); 2411 free_reloc_roots(&reloc_roots);
2412
2413 /* new reloc root may be added */
2414 mutex_lock(&root->fs_info->reloc_mutex);
2415 list_splice_init(&rc->reloc_roots, &reloc_roots);
2416 mutex_unlock(&root->fs_info->reloc_mutex);
2417 if (!list_empty(&reloc_roots))
2418 free_reloc_roots(&reloc_roots);
2391 } 2419 }
2392 2420
2393 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2421 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
@@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4522 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4550 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4523 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4551 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4524 4552
4553 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4554 if (buf == root->node)
4555 __update_reloc_root(root, cow->start);
4556 }
4557
4525 level = btrfs_header_level(buf); 4558 level = btrfs_header_level(buf);
4526 if (btrfs_header_generation(buf) <= 4559 if (btrfs_header_generation(buf) <=
4527 btrfs_root_last_snapshot(&root->root_item)) 4560 btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 561e2f16ba3e..1fd3f33c330a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -208,7 +208,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
208 int is_metadata, int have_csum, 208 int is_metadata, int have_csum,
209 const u8 *csum, u64 generation, 209 const u8 *csum, u64 generation,
210 u16 csum_size); 210 u16 csum_size);
211static void scrub_complete_bio_end_io(struct bio *bio, int err);
212static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 211static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
213 struct scrub_block *sblock_good, 212 struct scrub_block *sblock_good,
214 int force_write); 213 int force_write);
@@ -1294,7 +1293,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1294 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1293 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1295 struct bio *bio; 1294 struct bio *bio;
1296 struct scrub_page *page = sblock->pagev[page_num]; 1295 struct scrub_page *page = sblock->pagev[page_num];
1297 DECLARE_COMPLETION_ONSTACK(complete);
1298 1296
1299 if (page->dev->bdev == NULL) { 1297 if (page->dev->bdev == NULL) {
1300 page->io_error = 1; 1298 page->io_error = 1;
@@ -1311,18 +1309,11 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1311 } 1309 }
1312 bio->bi_bdev = page->dev->bdev; 1310 bio->bi_bdev = page->dev->bdev;
1313 bio->bi_sector = page->physical >> 9; 1311 bio->bi_sector = page->physical >> 9;
1314 bio->bi_end_io = scrub_complete_bio_end_io;
1315 bio->bi_private = &complete;
1316 1312
1317 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1313 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1318 btrfsic_submit_bio(READ, bio); 1314 if (btrfsic_submit_bio_wait(READ, bio))
1319
1320 /* this will also unplug the queue */
1321 wait_for_completion(&complete);
1322
1323 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1324 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1325 sblock->no_io_error_seen = 0; 1315 sblock->no_io_error_seen = 0;
1316
1326 bio_put(bio); 1317 bio_put(bio);
1327 } 1318 }
1328 1319
@@ -1391,11 +1382,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1391 sblock->checksum_error = 1; 1382 sblock->checksum_error = 1;
1392} 1383}
1393 1384
1394static void scrub_complete_bio_end_io(struct bio *bio, int err)
1395{
1396 complete((struct completion *)bio->bi_private);
1397}
1398
1399static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, 1385static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1400 struct scrub_block *sblock_good, 1386 struct scrub_block *sblock_good,
1401 int force_write) 1387 int force_write)
@@ -1430,7 +1416,6 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1430 sblock_bad->checksum_error || page_bad->io_error) { 1416 sblock_bad->checksum_error || page_bad->io_error) {
1431 struct bio *bio; 1417 struct bio *bio;
1432 int ret; 1418 int ret;
1433 DECLARE_COMPLETION_ONSTACK(complete);
1434 1419
1435 if (!page_bad->dev->bdev) { 1420 if (!page_bad->dev->bdev) {
1436 printk_ratelimited(KERN_WARNING 1421 printk_ratelimited(KERN_WARNING
@@ -1443,19 +1428,14 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1443 return -EIO; 1428 return -EIO;
1444 bio->bi_bdev = page_bad->dev->bdev; 1429 bio->bi_bdev = page_bad->dev->bdev;
1445 bio->bi_sector = page_bad->physical >> 9; 1430 bio->bi_sector = page_bad->physical >> 9;
1446 bio->bi_end_io = scrub_complete_bio_end_io;
1447 bio->bi_private = &complete;
1448 1431
1449 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1450 if (PAGE_SIZE != ret) { 1433 if (PAGE_SIZE != ret) {
1451 bio_put(bio); 1434 bio_put(bio);
1452 return -EIO; 1435 return -EIO;
1453 } 1436 }
1454 btrfsic_submit_bio(WRITE, bio);
1455 1437
1456 /* this will also unplug the queue */ 1438 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1457 wait_for_completion(&complete);
1458 if (!bio_flagged(bio, BIO_UPTODATE)) {
1459 btrfs_dev_stat_inc_and_print(page_bad->dev, 1439 btrfs_dev_stat_inc_and_print(page_bad->dev,
1460 BTRFS_DEV_STAT_WRITE_ERRS); 1440 BTRFS_DEV_STAT_WRITE_ERRS);
1461 btrfs_dev_replace_stats_inc( 1441 btrfs_dev_replace_stats_inc(
@@ -3375,7 +3355,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3375 struct bio *bio; 3355 struct bio *bio;
3376 struct btrfs_device *dev; 3356 struct btrfs_device *dev;
3377 int ret; 3357 int ret;
3378 DECLARE_COMPLETION_ONSTACK(compl);
3379 3358
3380 dev = sctx->wr_ctx.tgtdev; 3359 dev = sctx->wr_ctx.tgtdev;
3381 if (!dev) 3360 if (!dev)
@@ -3392,8 +3371,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3392 spin_unlock(&sctx->stat_lock); 3371 spin_unlock(&sctx->stat_lock);
3393 return -ENOMEM; 3372 return -ENOMEM;
3394 } 3373 }
3395 bio->bi_private = &compl;
3396 bio->bi_end_io = scrub_complete_bio_end_io;
3397 bio->bi_size = 0; 3374 bio->bi_size = 0;
3398 bio->bi_sector = physical_for_dev_replace >> 9; 3375 bio->bi_sector = physical_for_dev_replace >> 9;
3399 bio->bi_bdev = dev->bdev; 3376 bio->bi_bdev = dev->bdev;
@@ -3404,10 +3381,8 @@ leave_with_eio:
3404 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); 3381 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
3405 return -EIO; 3382 return -EIO;
3406 } 3383 }
3407 btrfsic_submit_bio(WRITE_SYNC, bio);
3408 wait_for_completion(&compl);
3409 3384
3410 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 3385 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
3411 goto leave_with_eio; 3386 goto leave_with_eio;
3412 3387
3413 bio_put(bio); 3388 bio_put(bio);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6837fe87f3a6..945d1db98f26 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
4723 } 4723 }
4724 4724
4725 if (!access_ok(VERIFY_READ, arg->clone_sources, 4725 if (!access_ok(VERIFY_READ, arg->clone_sources,
4726 sizeof(*arg->clone_sources * 4726 sizeof(*arg->clone_sources) *
4727 arg->clone_sources_count))) { 4727 arg->clone_sources_count)) {
4728 ret = -EFAULT; 4728 ret = -EFAULT;
4729 goto out; 4729 goto out;
4730 } 4730 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2d8ac1bf0cf9..d71a11d13dfa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
432 } else { 432 } else {
433 printk(KERN_INFO "btrfs: setting nodatacow\n"); 433 printk(KERN_INFO "btrfs: setting nodatacow\n");
434 } 434 }
435 info->compress_type = BTRFS_COMPRESS_NONE;
436 btrfs_clear_opt(info->mount_opt, COMPRESS); 435 btrfs_clear_opt(info->mount_opt, COMPRESS);
437 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 436 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
438 btrfs_set_opt(info->mount_opt, NODATACOW); 437 btrfs_set_opt(info->mount_opt, NODATACOW);
@@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
461 btrfs_set_fs_incompat(info, COMPRESS_LZO); 460 btrfs_set_fs_incompat(info, COMPRESS_LZO);
462 } else if (strncmp(args[0].from, "no", 2) == 0) { 461 } else if (strncmp(args[0].from, "no", 2) == 0) {
463 compress_type = "no"; 462 compress_type = "no";
464 info->compress_type = BTRFS_COMPRESS_NONE;
465 btrfs_clear_opt(info->mount_opt, COMPRESS); 463 btrfs_clear_opt(info->mount_opt, COMPRESS);
466 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 464 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
467 compress_force = false; 465 compress_force = false;
@@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
474 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); 472 btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
475 pr_info("btrfs: force %s compression\n", 473 pr_info("btrfs: force %s compression\n",
476 compress_type); 474 compress_type);
477 } else 475 } else if (btrfs_test_opt(root, COMPRESS)) {
478 pr_info("btrfs: use %s compression\n", 476 pr_info("btrfs: use %s compression\n",
479 compress_type); 477 compress_type);
478 }
480 break; 479 break;
481 case Opt_ssd: 480 case Opt_ssd:
482 printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); 481 printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
diff --git a/fs/dcache.c b/fs/dcache.c
index 4bdb300b16e2..6055d61811d3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -192,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
192 if (!tcount) 192 if (!tcount)
193 return 0; 193 return 0;
194 } 194 }
195 mask = ~(~0ul << tcount*8); 195 mask = bytemask_from_count(tcount);
196 return unlikely(!!((a ^ b) & mask)); 196 return unlikely(!!((a ^ b) & mask));
197} 197}
198 198
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 79b65c3b9e87..8b5e2584c840 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1852,8 +1852,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1852 goto error_tgt_fput; 1852 goto error_tgt_fput;
1853 1853
1854 /* Check if EPOLLWAKEUP is allowed */ 1854 /* Check if EPOLLWAKEUP is allowed */
1855 if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) 1855 ep_take_care_of_epollwakeup(&epds);
1856 epds.events &= ~EPOLLWAKEUP;
1857 1856
1858 /* 1857 /*
1859 * We have to check that the file structure underneath the file descriptor 1858 * We have to check that the file structure underneath the file descriptor
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index b51a6079108d..e9a97a0d4314 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -24,13 +24,6 @@ struct hfsplus_wd {
24 u16 embed_count; 24 u16 embed_count;
25}; 25};
26 26
27static void hfsplus_end_io_sync(struct bio *bio, int err)
28{
29 if (err)
30 clear_bit(BIO_UPTODATE, &bio->bi_flags);
31 complete(bio->bi_private);
32}
33
34/* 27/*
35 * hfsplus_submit_bio - Perfrom block I/O 28 * hfsplus_submit_bio - Perfrom block I/O
36 * @sb: super block of volume for I/O 29 * @sb: super block of volume for I/O
@@ -53,7 +46,6 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
53int hfsplus_submit_bio(struct super_block *sb, sector_t sector, 46int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
54 void *buf, void **data, int rw) 47 void *buf, void **data, int rw)
55{ 48{
56 DECLARE_COMPLETION_ONSTACK(wait);
57 struct bio *bio; 49 struct bio *bio;
58 int ret = 0; 50 int ret = 0;
59 u64 io_size; 51 u64 io_size;
@@ -73,8 +65,6 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
73 bio = bio_alloc(GFP_NOIO, 1); 65 bio = bio_alloc(GFP_NOIO, 1);
74 bio->bi_sector = sector; 66 bio->bi_sector = sector;
75 bio->bi_bdev = sb->s_bdev; 67 bio->bi_bdev = sb->s_bdev;
76 bio->bi_end_io = hfsplus_end_io_sync;
77 bio->bi_private = &wait;
78 68
79 if (!(rw & WRITE) && data) 69 if (!(rw & WRITE) && data)
80 *data = (u8 *)buf + offset; 70 *data = (u8 *)buf + offset;
@@ -93,12 +83,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
93 buf = (u8 *)buf + len; 83 buf = (u8 *)buf + len;
94 } 84 }
95 85
96 submit_bio(rw, bio); 86 ret = submit_bio_wait(rw, bio);
97 wait_for_completion(&wait);
98
99 if (!bio_flagged(bio, BIO_UPTODATE))
100 ret = -EIO;
101
102out: 87out:
103 bio_put(bio); 88 bio_put(bio);
104 return ret < 0 ? ret : 0; 89 return ret < 0 ? ret : 0;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 550475ca6a0e..0f95f0d0b313 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -14,16 +14,10 @@
14 14
15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16 16
17static void request_complete(struct bio *bio, int err)
18{
19 complete((struct completion *)bio->bi_private);
20}
21
22static int sync_request(struct page *page, struct block_device *bdev, int rw) 17static int sync_request(struct page *page, struct block_device *bdev, int rw)
23{ 18{
24 struct bio bio; 19 struct bio bio;
25 struct bio_vec bio_vec; 20 struct bio_vec bio_vec;
26 struct completion complete;
27 21
28 bio_init(&bio); 22 bio_init(&bio);
29 bio.bi_max_vecs = 1; 23 bio.bi_max_vecs = 1;
@@ -35,13 +29,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
35 bio.bi_size = PAGE_SIZE; 29 bio.bi_size = PAGE_SIZE;
36 bio.bi_bdev = bdev; 30 bio.bi_bdev = bdev;
37 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 31 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
38 init_completion(&complete);
39 bio.bi_private = &complete;
40 bio.bi_end_io = request_complete;
41 32
42 submit_bio(rw, &bio); 33 return submit_bio_wait(rw, &bio);
43 wait_for_completion(&complete);
44 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
45} 34}
46 35
47static int bdev_readpage(void *_sb, struct page *page) 36static int bdev_readpage(void *_sb, struct page *page)
diff --git a/fs/namei.c b/fs/namei.c
index c53d3a9547f9..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1598,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
1598 * do a "get_unaligned()" if this helps and is sufficiently 1598 * do a "get_unaligned()" if this helps and is sufficiently
1599 * fast. 1599 * fast.
1600 * 1600 *
1601 * - Little-endian machines (so that we can generate the mask
1602 * of low bytes efficiently). Again, we *could* do a byte
1603 * swapping load on big-endian architectures if that is not
1604 * expensive enough to make the optimization worthless.
1605 *
1606 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1601 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1607 * do not trap on the (extremely unlikely) case of a page 1602 * do not trap on the (extremely unlikely) case of a page
1608 * crossing operation. 1603 * crossing operation.
@@ -1646,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1646 if (!len) 1641 if (!len)
1647 goto done; 1642 goto done;
1648 } 1643 }
1649 mask = ~(~0ul << len*8); 1644 mask = bytemask_from_count(len);
1650 hash += mask & a; 1645 hash += mask & a;
1651done: 1646done:
1652 return fold_hash(hash); 1647 return fold_hash(hash);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 8485978993e8..9838fb020473 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -36,6 +36,7 @@
36#include <linux/nfs_fs.h> 36#include <linux/nfs_fs.h>
37#include <linux/sunrpc/rpc_pipe_fs.h> 37#include <linux/sunrpc/rpc_pipe_fs.h>
38 38
39#include "../nfs4_fs.h"
39#include "../pnfs.h" 40#include "../pnfs.h"
40#include "../netns.h" 41#include "../netns.h"
41 42
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
index 9c3e117c3ed1..4d0161442565 100644
--- a/fs/nfs/blocklayout/extents.c
+++ b/fs/nfs/blocklayout/extents.c
@@ -44,7 +44,7 @@
44static inline sector_t normalize(sector_t s, int base) 44static inline sector_t normalize(sector_t s, int base)
45{ 45{
46 sector_t tmp = s; /* Since do_div modifies its argument */ 46 sector_t tmp = s; /* Since do_div modifies its argument */
47 return s - do_div(tmp, base); 47 return s - sector_div(tmp, base);
48} 48}
49 49
50static inline sector_t normalize_up(sector_t s, int base) 50static inline sector_t normalize_up(sector_t s, int base)
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index fc0f95ec7358..d25f10fb4926 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -46,7 +46,9 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
46#include <linux/sunrpc/cache.h> 46#include <linux/sunrpc/cache.h>
47#include <linux/sunrpc/svcauth.h> 47#include <linux/sunrpc/svcauth.h>
48#include <linux/sunrpc/rpc_pipe_fs.h> 48#include <linux/sunrpc/rpc_pipe_fs.h>
49#include <linux/nfs_fs.h>
49 50
51#include "nfs4_fs.h"
50#include "dns_resolve.h" 52#include "dns_resolve.h"
51#include "cache_lib.h" 53#include "cache_lib.h"
52#include "netns.h" 54#include "netns.h"
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 18ab2da4eeb6..00ad1c2b217d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -312,7 +312,7 @@ struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
312} 312}
313EXPORT_SYMBOL_GPL(nfs4_label_alloc); 313EXPORT_SYMBOL_GPL(nfs4_label_alloc);
314#else 314#else
315void inline nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 315void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
316 struct nfs4_label *label) 316 struct nfs4_label *label)
317{ 317{
318} 318}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bca6a3e3c49c..8b5cc04a8611 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -269,6 +269,21 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
269extern struct rpc_procinfo nfs4_procedures[]; 269extern struct rpc_procinfo nfs4_procedures[];
270#endif 270#endif
271 271
272#ifdef CONFIG_NFS_V4_SECURITY_LABEL
273extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
274static inline void nfs4_label_free(struct nfs4_label *label)
275{
276 if (label) {
277 kfree(label->label);
278 kfree(label);
279 }
280 return;
281}
282#else
283static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
284static inline void nfs4_label_free(void *label) {}
285#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
286
272/* proc.c */ 287/* proc.c */
273void nfs_close_context(struct nfs_open_context *ctx, int is_sync); 288void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
274extern struct nfs_client *nfs_init_client(struct nfs_client *clp, 289extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3ce79b04522e..5609edc742a0 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -9,6 +9,14 @@
9#ifndef __LINUX_FS_NFS_NFS4_FS_H 9#ifndef __LINUX_FS_NFS_NFS4_FS_H
10#define __LINUX_FS_NFS_NFS4_FS_H 10#define __LINUX_FS_NFS_NFS4_FS_H
11 11
12#if defined(CONFIG_NFS_V4_2)
13#define NFS4_MAX_MINOR_VERSION 2
14#elif defined(CONFIG_NFS_V4_1)
15#define NFS4_MAX_MINOR_VERSION 1
16#else
17#define NFS4_MAX_MINOR_VERSION 0
18#endif
19
12#if IS_ENABLED(CONFIG_NFS_V4) 20#if IS_ENABLED(CONFIG_NFS_V4)
13 21
14#define NFS4_MAX_LOOP_ON_RECOVER (10) 22#define NFS4_MAX_LOOP_ON_RECOVER (10)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 659990c0109e..15052b81df42 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2518,9 +2518,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2518 calldata->roc_barrier); 2518 calldata->roc_barrier);
2519 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2519 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2520 renew_lease(server, calldata->timestamp); 2520 renew_lease(server, calldata->timestamp);
2521 nfs4_close_clear_stateid_flags(state,
2522 calldata->arg.fmode);
2523 break; 2521 break;
2522 case -NFS4ERR_ADMIN_REVOKED:
2524 case -NFS4ERR_STALE_STATEID: 2523 case -NFS4ERR_STALE_STATEID:
2525 case -NFS4ERR_OLD_STATEID: 2524 case -NFS4ERR_OLD_STATEID:
2526 case -NFS4ERR_BAD_STATEID: 2525 case -NFS4ERR_BAD_STATEID:
@@ -2528,9 +2527,13 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
2528 if (calldata->arg.fmode == 0) 2527 if (calldata->arg.fmode == 0)
2529 break; 2528 break;
2530 default: 2529 default:
2531 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2530 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
2532 rpc_restart_call_prepare(task); 2531 rpc_restart_call_prepare(task);
2532 goto out_release;
2533 }
2533 } 2534 }
2535 nfs4_close_clear_stateid_flags(state, calldata->arg.fmode);
2536out_release:
2534 nfs_release_seqid(calldata->arg.seqid); 2537 nfs_release_seqid(calldata->arg.seqid);
2535 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2538 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2536 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2539 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
@@ -4802,7 +4805,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4802 dprintk("%s ERROR %d, Reset session\n", __func__, 4805 dprintk("%s ERROR %d, Reset session\n", __func__,
4803 task->tk_status); 4806 task->tk_status);
4804 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4807 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4805 goto restart_call; 4808 goto wait_on_recovery;
4806#endif /* CONFIG_NFS_V4_1 */ 4809#endif /* CONFIG_NFS_V4_1 */
4807 case -NFS4ERR_DELAY: 4810 case -NFS4ERR_DELAY:
4808 nfs_inc_server_stats(server, NFSIOS_DELAY); 4811 nfs_inc_server_stats(server, NFSIOS_DELAY);
@@ -4987,11 +4990,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4987 4990
4988 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 4991 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
4989 switch (task->tk_status) { 4992 switch (task->tk_status) {
4990 case -NFS4ERR_STALE_STATEID:
4991 case -NFS4ERR_EXPIRED:
4992 case 0: 4993 case 0:
4993 renew_lease(data->res.server, data->timestamp); 4994 renew_lease(data->res.server, data->timestamp);
4994 break; 4995 break;
4996 case -NFS4ERR_ADMIN_REVOKED:
4997 case -NFS4ERR_DELEG_REVOKED:
4998 case -NFS4ERR_BAD_STATEID:
4999 case -NFS4ERR_OLD_STATEID:
5000 case -NFS4ERR_STALE_STATEID:
5001 case -NFS4ERR_EXPIRED:
5002 task->tk_status = 0;
5003 break;
4995 default: 5004 default:
4996 if (nfs4_async_handle_error(task, data->res.server, NULL) == 5005 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4997 -EAGAIN) { 5006 -EAGAIN) {
@@ -7589,7 +7598,14 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7589 return; 7598 return;
7590 7599
7591 server = NFS_SERVER(lrp->args.inode); 7600 server = NFS_SERVER(lrp->args.inode);
7592 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 7601 switch (task->tk_status) {
7602 default:
7603 task->tk_status = 0;
7604 case 0:
7605 break;
7606 case -NFS4ERR_DELAY:
7607 if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN)
7608 break;
7593 rpc_restart_call_prepare(task); 7609 rpc_restart_call_prepare(task);
7594 return; 7610 return;
7595 } 7611 }
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 9186c7ce0b14..b6af150c96b8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
132} 132}
133 133
134static void 134static void
135nfsd_reply_cache_unhash(struct svc_cacherep *rp)
136{
137 hlist_del_init(&rp->c_hash);
138 list_del_init(&rp->c_lru);
139}
140
141static void
135nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 142nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
136{ 143{
137 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 144 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
417 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 424 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
418 if (nfsd_cache_entry_expired(rp) || 425 if (nfsd_cache_entry_expired(rp) ||
419 num_drc_entries >= max_drc_entries) { 426 num_drc_entries >= max_drc_entries) {
420 lru_put_end(rp); 427 nfsd_reply_cache_unhash(rp);
421 prune_cache_entries(); 428 prune_cache_entries();
422 goto search_cache; 429 goto search_cache;
423 } 430 }
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 28955d4b7218..124fc43c7090 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
292{ 292{
293 struct proc_dir_entry *pde = PDE(file_inode(file)); 293 struct proc_dir_entry *pde = PDE(file_inode(file));
294 unsigned long rv = -EIO; 294 unsigned long rv = -EIO;
295 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, 295
296 unsigned long, unsigned long) = NULL;
297 if (use_pde(pde)) { 296 if (use_pde(pde)) {
297 typeof(proc_reg_get_unmapped_area) *get_area;
298
299 get_area = pde->proc_fops->get_unmapped_area;
298#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
299 get_area = current->mm->get_unmapped_area; 301 if (!get_area)
302 get_area = current->mm->get_unmapped_area;
300#endif 303#endif
301 if (pde->proc_fops->get_unmapped_area) 304
302 get_area = pde->proc_fops->get_unmapped_area;
303 if (get_area) 305 if (get_area)
304 rv = get_area(file, orig_addr, len, pgoff, flags); 306 rv = get_area(file, orig_addr, len, pgoff, flags);
307 else
308 rv = orig_addr;
305 unuse_pde(pde); 309 unuse_pde(pde);
306 } 310 }
307 return rv; 311 return rv;
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 2943b2bfae48..62a0de6632e1 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -84,6 +84,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
84 */ 84 */
85 res = squashfs_read_cache(target_page, block, bsize, pages, 85 res = squashfs_read_cache(target_page, block, bsize, pages,
86 page); 86 page);
87 if (res < 0)
88 goto mark_errored;
89
87 goto out; 90 goto out;
88 } 91 }
89 92
@@ -119,7 +122,7 @@ mark_errored:
119 * dealt with by the caller 122 * dealt with by the caller
120 */ 123 */
121 for (i = 0; i < pages; i++) { 124 for (i = 0; i < pages; i++) {
122 if (page[i] == target_page) 125 if (page[i] == NULL || page[i] == target_page)
123 continue; 126 continue;
124 flush_dcache_page(page[i]); 127 flush_dcache_page(page[i]);
125 SetPageError(page[i]); 128 SetPageError(page[i]);
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8367d6dc18c9..4f11ef011139 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -157,7 +157,7 @@ xfs_ioc_trim(
157 struct xfs_mount *mp, 157 struct xfs_mount *mp,
158 struct fstrim_range __user *urange) 158 struct fstrim_range __user *urange)
159{ 159{
160 struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; 160 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
161 unsigned int granularity = q->limits.discard_granularity; 161 unsigned int granularity = q->limits.discard_granularity;
162 struct fstrim_range range; 162 struct fstrim_range range;
163 xfs_daddr_t start, end, minlen; 163 xfs_daddr_t start, end, minlen;
@@ -180,7 +180,8 @@ xfs_ioc_trim(
180 * matter as trimming blocks is an advisory interface. 180 * matter as trimming blocks is an advisory interface.
181 */ 181 */
182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || 182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) 183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
184 range.len < mp->m_sb.sb_blocksize)
184 return -XFS_ERROR(EINVAL); 185 return -XFS_ERROR(EINVAL);
185 186
186 start = BTOBB(range.start); 187 start = BTOBB(range.start);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a6e54b3319bd..02fb943cbf22 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -220,6 +220,8 @@ xfs_growfs_data_private(
220 */ 220 */
221 nfree = 0; 221 nfree = 0;
222 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { 222 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
223 __be32 *agfl_bno;
224
223 /* 225 /*
224 * AG freespace header block 226 * AG freespace header block
225 */ 227 */
@@ -279,8 +281,10 @@ xfs_growfs_data_private(
279 agfl->agfl_seqno = cpu_to_be32(agno); 281 agfl->agfl_seqno = cpu_to_be32(agno);
280 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid); 282 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
281 } 283 }
284
285 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
282 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) 286 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
283 agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 287 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
284 288
285 error = xfs_bwrite(bp); 289 error = xfs_bwrite(bp);
286 xfs_buf_relse(bp); 290 xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 4d613401a5e0..33ad9a77791f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -442,7 +442,8 @@ xfs_attrlist_by_handle(
442 return -XFS_ERROR(EPERM); 442 return -XFS_ERROR(EPERM);
443 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 443 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
444 return -XFS_ERROR(EFAULT); 444 return -XFS_ERROR(EFAULT);
445 if (al_hreq.buflen > XATTR_LIST_MAX) 445 if (al_hreq.buflen < sizeof(struct attrlist) ||
446 al_hreq.buflen > XATTR_LIST_MAX)
446 return -XFS_ERROR(EINVAL); 447 return -XFS_ERROR(EINVAL);
447 448
448 /* 449 /*
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e8fb1231db81..a7992f8de9d3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle(
356 if (copy_from_user(&al_hreq, arg, 356 if (copy_from_user(&al_hreq, arg,
357 sizeof(compat_xfs_fsop_attrlist_handlereq_t))) 357 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
358 return -XFS_ERROR(EFAULT); 358 return -XFS_ERROR(EFAULT);
359 if (al_hreq.buflen > XATTR_LIST_MAX) 359 if (al_hreq.buflen < sizeof(struct attrlist) ||
360 al_hreq.buflen > XATTR_LIST_MAX)
360 return -XFS_ERROR(EINVAL); 361 return -XFS_ERROR(EINVAL);
361 362
362 /* 363 /*