aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 16:50:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 16:50:21 -0500
commit6bec0035286119eefc32a5b1102127e6a4032cb2 (patch)
tree440fab001b4c877b0b0c9fd62d8392e956e387e3
parent5d8e7fb6916556e9b476de33404e8c9e2c9aee61 (diff)
parent15d0f5ea348b9c4e6d41df294dde38a56a39c7bf (diff)
Merge branch 'for-3.20/bdi' of git://git.kernel.dk/linux-block
Pull backing device changes from Jens Axboe: "This contains a cleanup of how the backing device is handled, in preparation for a rework of the life time rules. In this part, the most important change is to split the unrelated nommu mmap flags from it, but also removing a backing_dev_info pointer from the address_space (and inode), and a cleanup of other various minor bits. Christoph did all the work here, I just fixed an oops with pages that have a swap backing. Arnd fixed a missing export, and Oleg killed the lustre backing_dev_info from staging. Last patch was from Al, unexporting parts that are now no longer needed outside" * 'for-3.20/bdi' of git://git.kernel.dk/linux-block: Make super_blocks and sb_lock static mtd: export new mtd_mmap_capabilities fs: make inode_to_bdi() handle NULL inode staging/lustre/llite: get rid of backing_dev_info fs: remove default_backing_dev_info fs: don't reassign dirty inodes to default_backing_dev_info nfs: don't call bdi_unregister ceph: remove call to bdi_unregister fs: remove mapping->backing_dev_info fs: export inode_to_bdi and use it in favor of mapping->backing_dev_info nilfs2: set up s_bdi like the generic mount_bdev code block_dev: get bdev inode bdi directly from the block device block_dev: only write bdev inode on close fs: introduce f_op->mmap_capabilities for nommu mmap support fs: kill BDI_CAP_SWAP_BACKED fs: deduplicate noop_backing_dev_info
-rw-r--r--Documentation/nommu-mmap.txt8
-rw-r--r--block/blk-core.c2
-rw-r--r--drivers/char/mem.c64
-rw-r--r--drivers/char/raw.c4
-rw-r--r--drivers/mtd/mtdchar.c72
-rw-r--r--drivers/mtd/mtdconcat.c10
-rw-r--r--drivers/mtd/mtdcore.c81
-rw-r--r--drivers/mtd/mtdpart.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c6
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/afs/volume.c2
-rw-r--r--fs/aio.c13
-rw-r--r--fs/block_dev.c37
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/super.c20
-rw-r--r--fs/char_dev.c24
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/coda/inode.c2
-rw-r--r--fs/configfs/configfs_internal.h2
-rw-r--r--fs/configfs/inode.c17
-rw-r--r--fs/configfs/mount.c11
-rw-r--r--fs/ecryptfs/inode.c1
-rw-r--r--fs/ecryptfs/main.c2
-rw-r--r--fs/exofs/inode.c2
-rw-r--r--fs/exofs/super.c2
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fs-writeback.c14
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/glock.c1
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hugetlbfs/inode.c13
-rw-r--r--fs/inode.c13
-rw-r--r--fs/kernfs/inode.c13
-rw-r--r--fs/kernfs/kernfs-internal.h1
-rw-r--r--fs/kernfs/mount.c1
-rw-r--r--fs/ncpfs/inode.c3
-rw-r--r--fs/nfs/filelayout/filelayout.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs4super.c1
-rw-r--r--fs/nfs/super.c24
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/mdt.c6
-rw-r--r--fs/nilfs2/page.c4
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/nilfs2/super.c6
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c14
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ramfs/file-nommu.c7
-rw-r--r--fs/ramfs/inode.c21
-rw-r--r--fs/romfs/mmap-nommu.c10
-rw-r--r--fs/romfs/super.c3
-rw-r--r--fs/super.c12
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/ubifs/super.c5
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--include/linux/backing-dev.h53
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/trace/events/writeback.h12
-rw-r--r--mm/backing-dev.c107
-rw-r--r--mm/fadvise.c4
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/filemap_xip.c3
-rw-r--r--mm/madvise.c17
-rw-r--r--mm/nommu.c69
-rw-r--r--mm/page-writeback.c29
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/shmem.c24
-rw-r--r--mm/swap.c2
-rw-r--r--mm/swap_state.c6
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmscan.c4
-rw-r--r--security/security.c13
87 files changed, 305 insertions, 704 deletions
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index 8e1ddec2c78a..ae57b9ea0d41 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -43,12 +43,12 @@ and it's also much more restricted in the latter case:
43 even if this was created by another process. 43 even if this was created by another process.
44 44
45 - If possible, the file mapping will be directly on the backing device 45 - If possible, the file mapping will be directly on the backing device
46 if the backing device has the BDI_CAP_MAP_DIRECT capability and 46 if the backing device has the NOMMU_MAP_DIRECT capability and
47 appropriate mapping protection capabilities. Ramfs, romfs, cramfs 47 appropriate mapping protection capabilities. Ramfs, romfs, cramfs
48 and mtd might all permit this. 48 and mtd might all permit this.
49 49
50 - If the backing device device can't or won't permit direct sharing, 50 - If the backing device device can't or won't permit direct sharing,
51 but does have the BDI_CAP_MAP_COPY capability, then a copy of the 51 but does have the NOMMU_MAP_COPY capability, then a copy of the
52 appropriate bit of the file will be read into a contiguous bit of 52 appropriate bit of the file will be read into a contiguous bit of
53 memory and any extraneous space beyond the EOF will be cleared 53 memory and any extraneous space beyond the EOF will be cleared
54 54
@@ -220,7 +220,7 @@ directly (can't be copied).
220 220
221The file->f_op->mmap() operation will be called to actually inaugurate the 221The file->f_op->mmap() operation will be called to actually inaugurate the
222mapping. It can be rejected at that point. Returning the ENOSYS error will 222mapping. It can be rejected at that point. Returning the ENOSYS error will
223cause the mapping to be copied instead if BDI_CAP_MAP_COPY is specified. 223cause the mapping to be copied instead if NOMMU_MAP_COPY is specified.
224 224
225The vm_ops->close() routine will be invoked when the last mapping on a chardev 225The vm_ops->close() routine will be invoked when the last mapping on a chardev
226is removed. An existing mapping will be shared, partially or not, if possible 226is removed. An existing mapping will be shared, partially or not, if possible
@@ -232,7 +232,7 @@ want to handle it, despite the fact it's got an operation. For instance, it
232might try directing the call to a secondary driver which turns out not to 232might try directing the call to a secondary driver which turns out not to
233implement it. Such is the case for the framebuffer driver which attempts to 233implement it. Such is the case for the framebuffer driver which attempts to
234direct the call to the device-specific driver. Under such circumstances, the 234direct the call to the device-specific driver. Under such circumstances, the
235mapping request will be rejected if BDI_CAP_MAP_COPY is not specified, and a 235mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a
236copy mapped otherwise. 236copy mapped otherwise.
237 237
238IMPORTANT NOTE: 238IMPORTANT NOTE:
diff --git a/block/blk-core.c b/block/blk-core.c
index 3ad405571dcc..928aac29bccd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -607,7 +607,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
607 q->backing_dev_info.ra_pages = 607 q->backing_dev_info.ra_pages =
608 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 608 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
609 q->backing_dev_info.state = 0; 609 q->backing_dev_info.state = 0;
610 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 610 q->backing_dev_info.capabilities = 0;
611 q->backing_dev_info.name = "block"; 611 q->backing_dev_info.name = "block";
612 q->node = node_id; 612 q->node = node_id;
613 613
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 4c58333b4257..9a6b63783a94 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -287,13 +287,24 @@ static unsigned long get_unmapped_area_mem(struct file *file,
287 return pgoff << PAGE_SHIFT; 287 return pgoff << PAGE_SHIFT;
288} 288}
289 289
290/* permit direct mmap, for read, write or exec */
291static unsigned memory_mmap_capabilities(struct file *file)
292{
293 return NOMMU_MAP_DIRECT |
294 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
295}
296
297static unsigned zero_mmap_capabilities(struct file *file)
298{
299 return NOMMU_MAP_COPY;
300}
301
290/* can't do an in-place private mapping if there's no MMU */ 302/* can't do an in-place private mapping if there's no MMU */
291static inline int private_mapping_ok(struct vm_area_struct *vma) 303static inline int private_mapping_ok(struct vm_area_struct *vma)
292{ 304{
293 return vma->vm_flags & VM_MAYSHARE; 305 return vma->vm_flags & VM_MAYSHARE;
294} 306}
295#else 307#else
296#define get_unmapped_area_mem NULL
297 308
298static inline int private_mapping_ok(struct vm_area_struct *vma) 309static inline int private_mapping_ok(struct vm_area_struct *vma)
299{ 310{
@@ -721,7 +732,10 @@ static const struct file_operations mem_fops = {
721 .write = write_mem, 732 .write = write_mem,
722 .mmap = mmap_mem, 733 .mmap = mmap_mem,
723 .open = open_mem, 734 .open = open_mem,
735#ifndef CONFIG_MMU
724 .get_unmapped_area = get_unmapped_area_mem, 736 .get_unmapped_area = get_unmapped_area_mem,
737 .mmap_capabilities = memory_mmap_capabilities,
738#endif
725}; 739};
726 740
727#ifdef CONFIG_DEVKMEM 741#ifdef CONFIG_DEVKMEM
@@ -731,7 +745,10 @@ static const struct file_operations kmem_fops = {
731 .write = write_kmem, 745 .write = write_kmem,
732 .mmap = mmap_kmem, 746 .mmap = mmap_kmem,
733 .open = open_kmem, 747 .open = open_kmem,
748#ifndef CONFIG_MMU
734 .get_unmapped_area = get_unmapped_area_mem, 749 .get_unmapped_area = get_unmapped_area_mem,
750 .mmap_capabilities = memory_mmap_capabilities,
751#endif
735}; 752};
736#endif 753#endif
737 754
@@ -760,16 +777,9 @@ static const struct file_operations zero_fops = {
760 .read_iter = read_iter_zero, 777 .read_iter = read_iter_zero,
761 .aio_write = aio_write_zero, 778 .aio_write = aio_write_zero,
762 .mmap = mmap_zero, 779 .mmap = mmap_zero,
763}; 780#ifndef CONFIG_MMU
764 781 .mmap_capabilities = zero_mmap_capabilities,
765/* 782#endif
766 * capabilities for /dev/zero
767 * - permits private mappings, "copies" are taken of the source of zeros
768 * - no writeback happens
769 */
770static struct backing_dev_info zero_bdi = {
771 .name = "char/mem",
772 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
773}; 783};
774 784
775static const struct file_operations full_fops = { 785static const struct file_operations full_fops = {
@@ -783,22 +793,22 @@ static const struct memdev {
783 const char *name; 793 const char *name;
784 umode_t mode; 794 umode_t mode;
785 const struct file_operations *fops; 795 const struct file_operations *fops;
786 struct backing_dev_info *dev_info; 796 fmode_t fmode;
787} devlist[] = { 797} devlist[] = {
788 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, 798 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
789#ifdef CONFIG_DEVKMEM 799#ifdef CONFIG_DEVKMEM
790 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, 800 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
791#endif 801#endif
792 [3] = { "null", 0666, &null_fops, NULL }, 802 [3] = { "null", 0666, &null_fops, 0 },
793#ifdef CONFIG_DEVPORT 803#ifdef CONFIG_DEVPORT
794 [4] = { "port", 0, &port_fops, NULL }, 804 [4] = { "port", 0, &port_fops, 0 },
795#endif 805#endif
796 [5] = { "zero", 0666, &zero_fops, &zero_bdi }, 806 [5] = { "zero", 0666, &zero_fops, 0 },
797 [7] = { "full", 0666, &full_fops, NULL }, 807 [7] = { "full", 0666, &full_fops, 0 },
798 [8] = { "random", 0666, &random_fops, NULL }, 808 [8] = { "random", 0666, &random_fops, 0 },
799 [9] = { "urandom", 0666, &urandom_fops, NULL }, 809 [9] = { "urandom", 0666, &urandom_fops, 0 },
800#ifdef CONFIG_PRINTK 810#ifdef CONFIG_PRINTK
801 [11] = { "kmsg", 0644, &kmsg_fops, NULL }, 811 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
802#endif 812#endif
803}; 813};
804 814
@@ -816,12 +826,7 @@ static int memory_open(struct inode *inode, struct file *filp)
816 return -ENXIO; 826 return -ENXIO;
817 827
818 filp->f_op = dev->fops; 828 filp->f_op = dev->fops;
819 if (dev->dev_info) 829 filp->f_mode |= dev->fmode;
820 filp->f_mapping->backing_dev_info = dev->dev_info;
821
822 /* Is /dev/mem or /dev/kmem ? */
823 if (dev->dev_info == &directly_mappable_cdev_bdi)
824 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
825 830
826 if (dev->fops->open) 831 if (dev->fops->open)
827 return dev->fops->open(inode, filp); 832 return dev->fops->open(inode, filp);
@@ -846,11 +851,6 @@ static struct class *mem_class;
846static int __init chr_dev_init(void) 851static int __init chr_dev_init(void)
847{ 852{
848 int minor; 853 int minor;
849 int err;
850
851 err = bdi_init(&zero_bdi);
852 if (err)
853 return err;
854 854
855 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) 855 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
856 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 856 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index a24891b97547..6e29bf2db536 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -104,11 +104,9 @@ static int raw_release(struct inode *inode, struct file *filp)
104 104
105 mutex_lock(&raw_mutex); 105 mutex_lock(&raw_mutex);
106 bdev = raw_devices[minor].binding; 106 bdev = raw_devices[minor].binding;
107 if (--raw_devices[minor].inuse == 0) { 107 if (--raw_devices[minor].inuse == 0)
108 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ 108 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
109 inode->i_mapping = &inode->i_data; 109 inode->i_mapping = &inode->i_data;
110 inode->i_mapping->backing_dev_info = &default_backing_dev_info;
111 }
112 mutex_unlock(&raw_mutex); 110 mutex_unlock(&raw_mutex);
113 111
114 blkdev_put(bdev, filp->f_mode | FMODE_EXCL); 112 blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 53563955931b..55fa27ecf4e1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -49,7 +49,6 @@ static DEFINE_MUTEX(mtd_mutex);
49 */ 49 */
50struct mtd_file_info { 50struct mtd_file_info {
51 struct mtd_info *mtd; 51 struct mtd_info *mtd;
52 struct inode *ino;
53 enum mtd_file_modes mode; 52 enum mtd_file_modes mode;
54}; 53};
55 54
@@ -59,10 +58,6 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
59 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 58 return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
60} 59}
61 60
62static int count;
63static struct vfsmount *mnt;
64static struct file_system_type mtd_inodefs_type;
65
66static int mtdchar_open(struct inode *inode, struct file *file) 61static int mtdchar_open(struct inode *inode, struct file *file)
67{ 62{
68 int minor = iminor(inode); 63 int minor = iminor(inode);
@@ -70,7 +65,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
70 int ret = 0; 65 int ret = 0;
71 struct mtd_info *mtd; 66 struct mtd_info *mtd;
72 struct mtd_file_info *mfi; 67 struct mtd_file_info *mfi;
73 struct inode *mtd_ino;
74 68
75 pr_debug("MTD_open\n"); 69 pr_debug("MTD_open\n");
76 70
@@ -78,10 +72,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
78 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 72 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
79 return -EACCES; 73 return -EACCES;
80 74
81 ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
82 if (ret)
83 return ret;
84
85 mutex_lock(&mtd_mutex); 75 mutex_lock(&mtd_mutex);
86 mtd = get_mtd_device(NULL, devnum); 76 mtd = get_mtd_device(NULL, devnum);
87 77
@@ -95,43 +85,26 @@ static int mtdchar_open(struct inode *inode, struct file *file)
95 goto out1; 85 goto out1;
96 } 86 }
97 87
98 mtd_ino = iget_locked(mnt->mnt_sb, devnum);
99 if (!mtd_ino) {
100 ret = -ENOMEM;
101 goto out1;
102 }
103 if (mtd_ino->i_state & I_NEW) {
104 mtd_ino->i_private = mtd;
105 mtd_ino->i_mode = S_IFCHR;
106 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
107 unlock_new_inode(mtd_ino);
108 }
109 file->f_mapping = mtd_ino->i_mapping;
110
111 /* You can't open it RW if it's not a writeable device */ 88 /* You can't open it RW if it's not a writeable device */
112 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 89 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
113 ret = -EACCES; 90 ret = -EACCES;
114 goto out2; 91 goto out1;
115 } 92 }
116 93
117 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 94 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
118 if (!mfi) { 95 if (!mfi) {
119 ret = -ENOMEM; 96 ret = -ENOMEM;
120 goto out2; 97 goto out1;
121 } 98 }
122 mfi->ino = mtd_ino;
123 mfi->mtd = mtd; 99 mfi->mtd = mtd;
124 file->private_data = mfi; 100 file->private_data = mfi;
125 mutex_unlock(&mtd_mutex); 101 mutex_unlock(&mtd_mutex);
126 return 0; 102 return 0;
127 103
128out2:
129 iput(mtd_ino);
130out1: 104out1:
131 put_mtd_device(mtd); 105 put_mtd_device(mtd);
132out: 106out:
133 mutex_unlock(&mtd_mutex); 107 mutex_unlock(&mtd_mutex);
134 simple_release_fs(&mnt, &count);
135 return ret; 108 return ret;
136} /* mtdchar_open */ 109} /* mtdchar_open */
137 110
@@ -148,12 +121,9 @@ static int mtdchar_close(struct inode *inode, struct file *file)
148 if ((file->f_mode & FMODE_WRITE)) 121 if ((file->f_mode & FMODE_WRITE))
149 mtd_sync(mtd); 122 mtd_sync(mtd);
150 123
151 iput(mfi->ino);
152
153 put_mtd_device(mtd); 124 put_mtd_device(mtd);
154 file->private_data = NULL; 125 file->private_data = NULL;
155 kfree(mfi); 126 kfree(mfi);
156 simple_release_fs(&mnt, &count);
157 127
158 return 0; 128 return 0;
159} /* mtdchar_close */ 129} /* mtdchar_close */
@@ -1117,6 +1087,13 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
1117 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1087 ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1118 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1088 return ret == -EOPNOTSUPP ? -ENODEV : ret;
1119} 1089}
1090
1091static unsigned mtdchar_mmap_capabilities(struct file *file)
1092{
1093 struct mtd_file_info *mfi = file->private_data;
1094
1095 return mtd_mmap_capabilities(mfi->mtd);
1096}
1120#endif 1097#endif
1121 1098
1122/* 1099/*
@@ -1160,27 +1137,10 @@ static const struct file_operations mtd_fops = {
1160 .mmap = mtdchar_mmap, 1137 .mmap = mtdchar_mmap,
1161#ifndef CONFIG_MMU 1138#ifndef CONFIG_MMU
1162 .get_unmapped_area = mtdchar_get_unmapped_area, 1139 .get_unmapped_area = mtdchar_get_unmapped_area,
1140 .mmap_capabilities = mtdchar_mmap_capabilities,
1163#endif 1141#endif
1164}; 1142};
1165 1143
1166static const struct super_operations mtd_ops = {
1167 .drop_inode = generic_delete_inode,
1168 .statfs = simple_statfs,
1169};
1170
1171static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1172 int flags, const char *dev_name, void *data)
1173{
1174 return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
1175}
1176
1177static struct file_system_type mtd_inodefs_type = {
1178 .name = "mtd_inodefs",
1179 .mount = mtd_inodefs_mount,
1180 .kill_sb = kill_anon_super,
1181};
1182MODULE_ALIAS_FS("mtd_inodefs");
1183
1184int __init init_mtdchar(void) 1144int __init init_mtdchar(void)
1185{ 1145{
1186 int ret; 1146 int ret;
@@ -1193,23 +1153,11 @@ int __init init_mtdchar(void)
1193 return ret; 1153 return ret;
1194 } 1154 }
1195 1155
1196 ret = register_filesystem(&mtd_inodefs_type);
1197 if (ret) {
1198 pr_err("Can't register mtd_inodefs filesystem, error %d\n",
1199 ret);
1200 goto err_unregister_chdev;
1201 }
1202
1203 return ret;
1204
1205err_unregister_chdev:
1206 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1207 return ret; 1156 return ret;
1208} 1157}
1209 1158
1210void __exit cleanup_mtdchar(void) 1159void __exit cleanup_mtdchar(void)
1211{ 1160{
1212 unregister_filesystem(&mtd_inodefs_type);
1213 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1161 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1214} 1162}
1215 1163
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index b9000563b9f4..eacc3aac7327 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -732,8 +732,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
732 732
733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
734 734
735 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
736
737 concat->subdev[0] = subdev[0]; 735 concat->subdev[0] = subdev[0];
738 736
739 for (i = 1; i < num_devs; i++) { 737 for (i = 1; i < num_devs; i++) {
@@ -761,14 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
761 subdev[i]->flags & MTD_WRITEABLE; 759 subdev[i]->flags & MTD_WRITEABLE;
762 } 760 }
763 761
764 /* only permit direct mapping if the BDIs are all the same
765 * - copy-mapping is still permitted
766 */
767 if (concat->mtd.backing_dev_info !=
768 subdev[i]->backing_dev_info)
769 concat->mtd.backing_dev_info =
770 &default_backing_dev_info;
771
772 concat->mtd.size += subdev[i]->size; 762 concat->mtd.size += subdev[i]->size;
773 concat->mtd.ecc_stats.badblocks += 763 concat->mtd.ecc_stats.badblocks +=
774 subdev[i]->ecc_stats.badblocks; 764 subdev[i]->ecc_stats.badblocks;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 4c611871d7e6..0ec4d6ea1e4b 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -43,33 +43,7 @@
43 43
44#include "mtdcore.h" 44#include "mtdcore.h"
45 45
46/* 46static struct backing_dev_info mtd_bdi = {
47 * backing device capabilities for non-mappable devices (such as NAND flash)
48 * - permits private mappings, copies are taken of the data
49 */
50static struct backing_dev_info mtd_bdi_unmappable = {
51 .capabilities = BDI_CAP_MAP_COPY,
52};
53
54/*
55 * backing device capabilities for R/O mappable devices (such as ROM)
56 * - permits private mappings, copies are taken of the data
57 * - permits non-writable shared mappings
58 */
59static struct backing_dev_info mtd_bdi_ro_mappable = {
60 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
61 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
62};
63
64/*
65 * backing device capabilities for writable mappable devices (such as RAM)
66 * - permits private mappings, copies are taken of the data
67 * - permits non-writable shared mappings
68 */
69static struct backing_dev_info mtd_bdi_rw_mappable = {
70 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
71 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
72 BDI_CAP_WRITE_MAP),
73}; 47};
74 48
75static int mtd_cls_suspend(struct device *dev, pm_message_t state); 49static int mtd_cls_suspend(struct device *dev, pm_message_t state);
@@ -365,6 +339,23 @@ static struct device_type mtd_devtype = {
365 .release = mtd_release, 339 .release = mtd_release,
366}; 340};
367 341
342#ifndef CONFIG_MMU
343unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
344{
345 switch (mtd->type) {
346 case MTD_RAM:
347 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
348 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
349 case MTD_ROM:
350 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
351 NOMMU_MAP_READ;
352 default:
353 return NOMMU_MAP_COPY;
354 }
355}
356EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
357#endif
358
368/** 359/**
369 * add_mtd_device - register an MTD device 360 * add_mtd_device - register an MTD device
370 * @mtd: pointer to new MTD device info structure 361 * @mtd: pointer to new MTD device info structure
@@ -380,19 +371,7 @@ int add_mtd_device(struct mtd_info *mtd)
380 struct mtd_notifier *not; 371 struct mtd_notifier *not;
381 int i, error; 372 int i, error;
382 373
383 if (!mtd->backing_dev_info) { 374 mtd->backing_dev_info = &mtd_bdi;
384 switch (mtd->type) {
385 case MTD_RAM:
386 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
387 break;
388 case MTD_ROM:
389 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
390 break;
391 default:
392 mtd->backing_dev_info = &mtd_bdi_unmappable;
393 break;
394 }
395 }
396 375
397 BUG_ON(mtd->writesize == 0); 376 BUG_ON(mtd->writesize == 0);
398 mutex_lock(&mtd_table_mutex); 377 mutex_lock(&mtd_table_mutex);
@@ -1237,17 +1216,9 @@ static int __init init_mtd(void)
1237 if (ret) 1216 if (ret)
1238 goto err_reg; 1217 goto err_reg;
1239 1218
1240 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); 1219 ret = mtd_bdi_init(&mtd_bdi, "mtd");
1241 if (ret)
1242 goto err_bdi1;
1243
1244 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1245 if (ret)
1246 goto err_bdi2;
1247
1248 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1249 if (ret) 1220 if (ret)
1250 goto err_bdi3; 1221 goto err_bdi;
1251 1222
1252 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); 1223 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1253 1224
@@ -1260,11 +1231,7 @@ static int __init init_mtd(void)
1260out_procfs: 1231out_procfs:
1261 if (proc_mtd) 1232 if (proc_mtd)
1262 remove_proc_entry("mtd", NULL); 1233 remove_proc_entry("mtd", NULL);
1263err_bdi3: 1234err_bdi:
1264 bdi_destroy(&mtd_bdi_ro_mappable);
1265err_bdi2:
1266 bdi_destroy(&mtd_bdi_unmappable);
1267err_bdi1:
1268 class_unregister(&mtd_class); 1235 class_unregister(&mtd_class);
1269err_reg: 1236err_reg:
1270 pr_err("Error registering mtd class or bdi: %d\n", ret); 1237 pr_err("Error registering mtd class or bdi: %d\n", ret);
@@ -1277,9 +1244,7 @@ static void __exit cleanup_mtd(void)
1277 if (proc_mtd) 1244 if (proc_mtd)
1278 remove_proc_entry("mtd", NULL); 1245 remove_proc_entry("mtd", NULL);
1279 class_unregister(&mtd_class); 1246 class_unregister(&mtd_class);
1280 bdi_destroy(&mtd_bdi_unmappable); 1247 bdi_destroy(&mtd_bdi);
1281 bdi_destroy(&mtd_bdi_ro_mappable);
1282 bdi_destroy(&mtd_bdi_rw_mappable);
1283} 1248}
1284 1249
1285module_init(init_mtd); 1250module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3e3a7d074d5..e779de315ade 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -378,7 +378,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
378 378
379 slave->mtd.name = name; 379 slave->mtd.name = name;
380 slave->mtd.owner = master->owner; 380 slave->mtd.owner = master->owner;
381 slave->mtd.backing_dev_info = master->backing_dev_info;
382 381
383 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 382 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
384 * to have the same data be in two different partitions. 383 * to have the same data be in two different partitions.
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index a3367bfb1456..45aaa1cc56bc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -987,7 +987,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
987 if (err) 987 if (err)
988 goto out_free; 988 goto out_free;
989 lsi->lsi_flags |= LSI_BDI_INITIALIZED; 989 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
990 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY; 990 lsi->lsi_bdi.capabilities = 0;
991 err = ll_bdi_register(&lsi->lsi_bdi); 991 err = ll_bdi_register(&lsi->lsi_bdi);
992 if (err) 992 if (err)
993 goto out_free; 993 goto out_free;
@@ -1812,10 +1812,6 @@ void ll_read_inode2(struct inode *inode, void *opaque)
1812 1812
1813 /* OIDEBUG(inode); */ 1813 /* OIDEBUG(inode); */
1814 1814
1815 /* initializing backing dev info. */
1816 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
1817
1818
1819 if (S_ISREG(inode->i_mode)) { 1815 if (S_ISREG(inode->i_mode)) {
1820 struct ll_sb_info *sbi = ll_i2sbi(inode); 1816 struct ll_sb_info *sbi = ll_i2sbi(inode);
1821 1817
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 6894b085f0ee..620d93489539 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -335,7 +335,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
335 } 335 }
336 init_rwsem(&v9ses->rename_sem); 336 init_rwsem(&v9ses->rename_sem);
337 337
338 rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); 338 rc = bdi_setup_and_register(&v9ses->bdi, "9p");
339 if (rc) { 339 if (rc) {
340 kfree(v9ses->aname); 340 kfree(v9ses->aname);
341 kfree(v9ses->uname); 341 kfree(v9ses->uname);
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 2b607257820c..d142a2449e65 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -106,7 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
106 volume->cell = params->cell; 106 volume->cell = params->cell;
107 volume->vid = vlocation->vldb.vid[params->type]; 107 volume->vid = vlocation->vldb.vid[params->type];
108 108
109 ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY); 109 ret = bdi_setup_and_register(&volume->bdi, "afs");
110 if (ret) 110 if (ret)
111 goto error_bdi; 111 goto error_bdi;
112 112
diff --git a/fs/aio.c b/fs/aio.c
index c428871f1093..118a2e0088d8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,15 +165,6 @@ static struct vfsmount *aio_mnt;
165static const struct file_operations aio_ring_fops; 165static const struct file_operations aio_ring_fops;
166static const struct address_space_operations aio_ctx_aops; 166static const struct address_space_operations aio_ctx_aops;
167 167
168/* Backing dev info for aio fs.
169 * -no dirty page accounting or writeback happens
170 */
171static struct backing_dev_info aio_fs_backing_dev_info = {
172 .name = "aiofs",
173 .state = 0,
174 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
175};
176
177static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 168static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
178{ 169{
179 struct qstr this = QSTR_INIT("[aio]", 5); 170 struct qstr this = QSTR_INIT("[aio]", 5);
@@ -185,7 +176,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
185 176
186 inode->i_mapping->a_ops = &aio_ctx_aops; 177 inode->i_mapping->a_ops = &aio_ctx_aops;
187 inode->i_mapping->private_data = ctx; 178 inode->i_mapping->private_data = ctx;
188 inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
189 inode->i_size = PAGE_SIZE * nr_pages; 179 inode->i_size = PAGE_SIZE * nr_pages;
190 180
191 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); 181 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -230,9 +220,6 @@ static int __init aio_setup(void)
230 if (IS_ERR(aio_mnt)) 220 if (IS_ERR(aio_mnt))
231 panic("Failed to create aio fs mount."); 221 panic("Failed to create aio fs mount.");
232 222
233 if (bdi_init(&aio_fs_backing_dev_info))
234 panic("Failed to init aio fs backing dev info.");
235
236 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 223 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
237 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 224 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
238 225
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b48c41bf0f86..a9f92794d7a0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -49,23 +49,15 @@ inline struct block_device *I_BDEV(struct inode *inode)
49} 49}
50EXPORT_SYMBOL(I_BDEV); 50EXPORT_SYMBOL(I_BDEV);
51 51
52/* 52static void bdev_write_inode(struct inode *inode)
53 * Move the inode from its current bdi to a new bdi. Make sure the inode
54 * is clean before moving so that it doesn't linger on the old bdi.
55 */
56static void bdev_inode_switch_bdi(struct inode *inode,
57 struct backing_dev_info *dst)
58{ 53{
59 while (true) { 54 spin_lock(&inode->i_lock);
60 spin_lock(&inode->i_lock); 55 while (inode->i_state & I_DIRTY) {
61 if (!(inode->i_state & I_DIRTY)) {
62 inode->i_data.backing_dev_info = dst;
63 spin_unlock(&inode->i_lock);
64 return;
65 }
66 spin_unlock(&inode->i_lock); 56 spin_unlock(&inode->i_lock);
67 WARN_ON_ONCE(write_inode_now(inode, true)); 57 WARN_ON_ONCE(write_inode_now(inode, true));
58 spin_lock(&inode->i_lock);
68 } 59 }
60 spin_unlock(&inode->i_lock);
69} 61}
70 62
71/* Kill _all_ buffers and pagecache , dirty or not.. */ 63/* Kill _all_ buffers and pagecache , dirty or not.. */
@@ -584,7 +576,6 @@ struct block_device *bdget(dev_t dev)
584 inode->i_bdev = bdev; 576 inode->i_bdev = bdev;
585 inode->i_data.a_ops = &def_blk_aops; 577 inode->i_data.a_ops = &def_blk_aops;
586 mapping_set_gfp_mask(&inode->i_data, GFP_USER); 578 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
587 inode->i_data.backing_dev_info = &default_backing_dev_info;
588 spin_lock(&bdev_lock); 579 spin_lock(&bdev_lock);
589 list_add(&bdev->bd_list, &all_bdevs); 580 list_add(&bdev->bd_list, &all_bdevs);
590 spin_unlock(&bdev_lock); 581 spin_unlock(&bdev_lock);
@@ -1145,8 +1136,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1145 bdev->bd_queue = disk->queue; 1136 bdev->bd_queue = disk->queue;
1146 bdev->bd_contains = bdev; 1137 bdev->bd_contains = bdev;
1147 if (!partno) { 1138 if (!partno) {
1148 struct backing_dev_info *bdi;
1149
1150 ret = -ENXIO; 1139 ret = -ENXIO;
1151 bdev->bd_part = disk_get_part(disk, partno); 1140 bdev->bd_part = disk_get_part(disk, partno);
1152 if (!bdev->bd_part) 1141 if (!bdev->bd_part)
@@ -1172,11 +1161,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1172 } 1161 }
1173 } 1162 }
1174 1163
1175 if (!ret) { 1164 if (!ret)
1176 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1165 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1177 bdi = blk_get_backing_dev_info(bdev);
1178 bdev_inode_switch_bdi(bdev->bd_inode, bdi);
1179 }
1180 1166
1181 /* 1167 /*
1182 * If the device is invalidated, rescan partition 1168 * If the device is invalidated, rescan partition
@@ -1203,8 +1189,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1203 if (ret) 1189 if (ret)
1204 goto out_clear; 1190 goto out_clear;
1205 bdev->bd_contains = whole; 1191 bdev->bd_contains = whole;
1206 bdev_inode_switch_bdi(bdev->bd_inode,
1207 whole->bd_inode->i_data.backing_dev_info);
1208 bdev->bd_part = disk_get_part(disk, partno); 1192 bdev->bd_part = disk_get_part(disk, partno);
1209 if (!(disk->flags & GENHD_FL_UP) || 1193 if (!(disk->flags & GENHD_FL_UP) ||
1210 !bdev->bd_part || !bdev->bd_part->nr_sects) { 1194 !bdev->bd_part || !bdev->bd_part->nr_sects) {
@@ -1244,7 +1228,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1244 bdev->bd_disk = NULL; 1228 bdev->bd_disk = NULL;
1245 bdev->bd_part = NULL; 1229 bdev->bd_part = NULL;
1246 bdev->bd_queue = NULL; 1230 bdev->bd_queue = NULL;
1247 bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info);
1248 if (bdev != bdev->bd_contains) 1231 if (bdev != bdev->bd_contains)
1249 __blkdev_put(bdev->bd_contains, mode, 1); 1232 __blkdev_put(bdev->bd_contains, mode, 1);
1250 bdev->bd_contains = NULL; 1233 bdev->bd_contains = NULL;
@@ -1464,11 +1447,11 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1464 WARN_ON_ONCE(bdev->bd_holders); 1447 WARN_ON_ONCE(bdev->bd_holders);
1465 sync_blockdev(bdev); 1448 sync_blockdev(bdev);
1466 kill_bdev(bdev); 1449 kill_bdev(bdev);
1467 /* ->release can cause the old bdi to disappear, 1450 /*
1468 * so must switch it out first 1451 * ->release can cause the queue to disappear, so flush all
1452 * dirty data before.
1469 */ 1453 */
1470 bdev_inode_switch_bdi(bdev->bd_inode, 1454 bdev_write_inode(bdev->bd_inode);
1471 &default_backing_dev_info);
1472 } 1455 }
1473 if (bdev->bd_contains == bdev) { 1456 if (bdev->bd_contains == bdev) {
1474 if (disk->fops->release) 1457 if (disk->fops->release)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8c63419a7f70..1afb18226da8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1715,12 +1715,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1715{ 1715{
1716 int err; 1716 int err;
1717 1717
1718 bdi->capabilities = BDI_CAP_MAP_COPY; 1718 err = bdi_setup_and_register(bdi, "btrfs");
1719 err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1720 if (err) 1719 if (err)
1721 return err; 1720 return err;
1722 1721
1723 bdi->ra_pages = default_backing_dev_info.ra_pages; 1722 bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
1724 bdi->congested_fn = btrfs_congested_fn; 1723 bdi->congested_fn = btrfs_congested_fn;
1725 bdi->congested_data = info; 1724 bdi->congested_data = info;
1726 return 0; 1725 return 0;
@@ -2319,7 +2318,6 @@ int open_ctree(struct super_block *sb,
2319 */ 2318 */
2320 fs_info->btree_inode->i_size = OFFSET_MAX; 2319 fs_info->btree_inode->i_size = OFFSET_MAX;
2321 fs_info->btree_inode->i_mapping->a_ops = &btree_aops; 2320 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2322 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2323 2321
2324 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); 2322 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2325 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, 2323 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a606ab551296..b78bbbac900d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1746,7 +1746,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1746 1746
1747 mutex_lock(&inode->i_mutex); 1747 mutex_lock(&inode->i_mutex);
1748 1748
1749 current->backing_dev_info = inode->i_mapping->backing_dev_info; 1749 current->backing_dev_info = inode_to_bdi(inode);
1750 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1750 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1751 if (err) { 1751 if (err) {
1752 mutex_unlock(&inode->i_mutex); 1752 mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8bf326affb94..54bcf639d1cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3608,7 +3608,6 @@ cache_acl:
3608 switch (inode->i_mode & S_IFMT) { 3608 switch (inode->i_mode & S_IFMT) {
3609 case S_IFREG: 3609 case S_IFREG:
3610 inode->i_mapping->a_ops = &btrfs_aops; 3610 inode->i_mapping->a_ops = &btrfs_aops;
3611 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3612 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 3611 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3613 inode->i_fop = &btrfs_file_operations; 3612 inode->i_fop = &btrfs_file_operations;
3614 inode->i_op = &btrfs_file_inode_operations; 3613 inode->i_op = &btrfs_file_inode_operations;
@@ -3623,7 +3622,6 @@ cache_acl:
3623 case S_IFLNK: 3622 case S_IFLNK:
3624 inode->i_op = &btrfs_symlink_inode_operations; 3623 inode->i_op = &btrfs_symlink_inode_operations;
3625 inode->i_mapping->a_ops = &btrfs_symlink_aops; 3624 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3626 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3627 break; 3625 break;
3628 default: 3626 default:
3629 inode->i_op = &btrfs_special_inode_operations; 3627 inode->i_op = &btrfs_special_inode_operations;
@@ -6088,7 +6086,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
6088 inode->i_fop = &btrfs_file_operations; 6086 inode->i_fop = &btrfs_file_operations;
6089 inode->i_op = &btrfs_file_inode_operations; 6087 inode->i_op = &btrfs_file_inode_operations;
6090 inode->i_mapping->a_ops = &btrfs_aops; 6088 inode->i_mapping->a_ops = &btrfs_aops;
6091 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
6092 6089
6093 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6090 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6094 if (err) 6091 if (err)
@@ -9203,7 +9200,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9203 inode->i_fop = &btrfs_file_operations; 9200 inode->i_fop = &btrfs_file_operations;
9204 inode->i_op = &btrfs_file_inode_operations; 9201 inode->i_op = &btrfs_file_inode_operations;
9205 inode->i_mapping->a_ops = &btrfs_aops; 9202 inode->i_mapping->a_ops = &btrfs_aops;
9206 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
9207 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9203 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9208 9204
9209 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 9205 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
@@ -9247,7 +9243,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9247 9243
9248 inode->i_op = &btrfs_symlink_inode_operations; 9244 inode->i_op = &btrfs_symlink_inode_operations;
9249 inode->i_mapping->a_ops = &btrfs_symlink_aops; 9245 inode->i_mapping->a_ops = &btrfs_symlink_aops;
9250 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
9251 inode_set_bytes(inode, name_len); 9246 inode_set_bytes(inode, name_len);
9252 btrfs_i_size_write(inode, name_len); 9247 btrfs_i_size_write(inode, name_len);
9253 err = btrfs_update_inode(trans, root, inode); 9248 err = btrfs_update_inode(trans, root, inode);
@@ -9459,7 +9454,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9459 inode->i_op = &btrfs_file_inode_operations; 9454 inode->i_op = &btrfs_file_inode_operations;
9460 9455
9461 inode->i_mapping->a_ops = &btrfs_aops; 9456 inode->i_mapping->a_ops = &btrfs_aops;
9462 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
9463 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9457 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9464 9458
9465 ret = btrfs_init_inode_security(trans, inode, dir, NULL); 9459 ret = btrfs_init_inode_security(trans, inode, dir, NULL);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce74b394b49d..905986dd4c3c 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -945,7 +945,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
945 mutex_lock(&inode->i_mutex); 945 mutex_lock(&inode->i_mutex);
946 946
947 /* We can write back this queue in page reclaim */ 947 /* We can write back this queue in page reclaim */
948 current->backing_dev_info = file->f_mapping->backing_dev_info; 948 current->backing_dev_info = inode_to_bdi(inode);
949 949
950 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 950 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
951 if (err) 951 if (err)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index f61a74115beb..6b5173605154 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -783,8 +783,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
783 } 783 }
784 784
785 inode->i_mapping->a_ops = &ceph_aops; 785 inode->i_mapping->a_ops = &ceph_aops;
786 inode->i_mapping->backing_dev_info =
787 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
788 786
789 switch (inode->i_mode & S_IFMT) { 787 switch (inode->i_mode & S_IFMT) {
790 case S_IFIFO: 788 case S_IFIFO:
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 50f06cddc94b..5ae62587a71d 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -40,17 +40,6 @@ static void ceph_put_super(struct super_block *s)
40 40
41 dout("put_super\n"); 41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc); 42 ceph_mdsc_close_sessions(fsc->mdsc);
43
44 /*
45 * ensure we release the bdi before put_anon_super releases
46 * the device name.
47 */
48 if (s->s_bdi == &fsc->backing_dev_info) {
49 bdi_unregister(&fsc->backing_dev_info);
50 s->s_bdi = NULL;
51 }
52
53 return;
54} 43}
55 44
56static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) 45static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -910,7 +899,7 @@ static int ceph_register_bdi(struct super_block *sb,
910 >> PAGE_SHIFT; 899 >> PAGE_SHIFT;
911 else 900 else
912 fsc->backing_dev_info.ra_pages = 901 fsc->backing_dev_info.ra_pages =
913 default_backing_dev_info.ra_pages; 902 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
914 903
915 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", 904 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
916 atomic_long_inc_return(&bdi_seq)); 905 atomic_long_inc_return(&bdi_seq));
@@ -1002,11 +991,16 @@ out_final:
1002static void ceph_kill_sb(struct super_block *s) 991static void ceph_kill_sb(struct super_block *s)
1003{ 992{
1004 struct ceph_fs_client *fsc = ceph_sb_to_client(s); 993 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
994 dev_t dev = s->s_dev;
995
1005 dout("kill_sb %p\n", s); 996 dout("kill_sb %p\n", s);
997
1006 ceph_mdsc_pre_umount(fsc->mdsc); 998 ceph_mdsc_pre_umount(fsc->mdsc);
1007 kill_anon_super(s); /* will call put_super after sb is r/o */ 999 generic_shutdown_super(s);
1008 ceph_mdsc_destroy(fsc); 1000 ceph_mdsc_destroy(fsc);
1001
1009 destroy_fs_client(fsc); 1002 destroy_fs_client(fsc);
1003 free_anon_bdev(dev);
1010} 1004}
1011 1005
1012static struct file_system_type ceph_fs_type = { 1006static struct file_system_type ceph_fs_type = {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 67b2007f10fe..ea06a3d0364c 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -24,27 +24,6 @@
24 24
25#include "internal.h" 25#include "internal.h"
26 26
27/*
28 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
29 * devices
30 * - permits shared-mmap for read, write and/or exec
31 * - does not permit private mmap in NOMMU mode (can't do COW)
32 * - no readahead or I/O queue unplugging required
33 */
34struct backing_dev_info directly_mappable_cdev_bdi = {
35 .name = "char",
36 .capabilities = (
37#ifdef CONFIG_MMU
38 /* permit private copies of the data to be taken */
39 BDI_CAP_MAP_COPY |
40#endif
41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
44 /* no writeback happens */
45 BDI_CAP_NO_ACCT_AND_WRITEBACK),
46};
47
48static struct kobj_map *cdev_map; 27static struct kobj_map *cdev_map;
49 28
50static DEFINE_MUTEX(chrdevs_lock); 29static DEFINE_MUTEX(chrdevs_lock);
@@ -575,8 +554,6 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data)
575void __init chrdev_init(void) 554void __init chrdev_init(void)
576{ 555{
577 cdev_map = kobj_map_init(base_probe, &chrdevs_lock); 556 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
578 if (bdi_init(&directly_mappable_cdev_bdi))
579 panic("Failed to init directly mappable cdev bdi");
580} 557}
581 558
582 559
@@ -590,4 +567,3 @@ EXPORT_SYMBOL(cdev_del);
590EXPORT_SYMBOL(cdev_add); 567EXPORT_SYMBOL(cdev_add);
591EXPORT_SYMBOL(__register_chrdev); 568EXPORT_SYMBOL(__register_chrdev);
592EXPORT_SYMBOL(__unregister_chrdev); 569EXPORT_SYMBOL(__unregister_chrdev);
593EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2a772da16b83..d3aa999ab785 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3446,7 +3446,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3446 int referral_walks_count = 0; 3446 int referral_walks_count = 0;
3447#endif 3447#endif
3448 3448
3449 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 3449 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs");
3450 if (rc) 3450 if (rc)
3451 return rc; 3451 return rc;
3452 3452
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0c3ce464cae4..2d4f37235ed0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -937,8 +937,6 @@ retry_iget5_locked:
937 inode->i_flags |= S_NOATIME | S_NOCMTIME; 937 inode->i_flags |= S_NOATIME | S_NOCMTIME;
938 if (inode->i_state & I_NEW) { 938 if (inode->i_state & I_NEW) {
939 inode->i_ino = hash; 939 inode->i_ino = hash;
940 if (S_ISREG(inode->i_mode))
941 inode->i_data.backing_dev_info = sb->s_bdi;
942#ifdef CONFIG_CIFS_FSCACHE 940#ifdef CONFIG_CIFS_FSCACHE
943 /* initialize per-inode cache cookie pointer */ 941 /* initialize per-inode cache cookie pointer */
944 CIFS_I(inode)->fscache = NULL; 942 CIFS_I(inode)->fscache = NULL;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index b945410bfcd5..82ec68b59208 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -183,7 +183,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
183 goto unlock_out; 183 goto unlock_out;
184 } 184 }
185 185
186 error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY); 186 error = bdi_setup_and_register(&vc->bdi, "coda");
187 if (error) 187 if (error)
188 goto unlock_out; 188 goto unlock_out;
189 189
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index bd4a3c167091..a315677e44d3 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -70,8 +70,6 @@ extern int configfs_is_root(struct config_item *item);
70 70
71extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); 71extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
72extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); 72extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
73extern int configfs_inode_init(void);
74extern void configfs_inode_exit(void);
75 73
76extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 74extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
77extern int configfs_make_dirent(struct configfs_dirent *, 75extern int configfs_make_dirent(struct configfs_dirent *,
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 5946ad98053f..65af86147154 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -50,12 +50,6 @@ static const struct address_space_operations configfs_aops = {
50 .write_end = simple_write_end, 50 .write_end = simple_write_end,
51}; 51};
52 52
53static struct backing_dev_info configfs_backing_dev_info = {
54 .name = "configfs",
55 .ra_pages = 0, /* No readahead */
56 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
57};
58
59static const struct inode_operations configfs_inode_operations ={ 53static const struct inode_operations configfs_inode_operations ={
60 .setattr = configfs_setattr, 54 .setattr = configfs_setattr,
61}; 55};
@@ -137,7 +131,6 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
137 if (inode) { 131 if (inode) {
138 inode->i_ino = get_next_ino(); 132 inode->i_ino = get_next_ino();
139 inode->i_mapping->a_ops = &configfs_aops; 133 inode->i_mapping->a_ops = &configfs_aops;
140 inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
141 inode->i_op = &configfs_inode_operations; 134 inode->i_op = &configfs_inode_operations;
142 135
143 if (sd->s_iattr) { 136 if (sd->s_iattr) {
@@ -283,13 +276,3 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
283 } 276 }
284 mutex_unlock(&dir->d_inode->i_mutex); 277 mutex_unlock(&dir->d_inode->i_mutex);
285} 278}
286
287int __init configfs_inode_init(void)
288{
289 return bdi_init(&configfs_backing_dev_info);
290}
291
292void configfs_inode_exit(void)
293{
294 bdi_destroy(&configfs_backing_dev_info);
295}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index f6c285833390..da94e41bdbf6 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -145,19 +145,13 @@ static int __init configfs_init(void)
145 if (!config_kobj) 145 if (!config_kobj)
146 goto out2; 146 goto out2;
147 147
148 err = configfs_inode_init();
149 if (err)
150 goto out3;
151
152 err = register_filesystem(&configfs_fs_type); 148 err = register_filesystem(&configfs_fs_type);
153 if (err) 149 if (err)
154 goto out4; 150 goto out3;
155 151
156 return 0; 152 return 0;
157out4:
158 pr_err("Unable to register filesystem!\n");
159 configfs_inode_exit();
160out3: 153out3:
154 pr_err("Unable to register filesystem!\n");
161 kobject_put(config_kobj); 155 kobject_put(config_kobj);
162out2: 156out2:
163 kmem_cache_destroy(configfs_dir_cachep); 157 kmem_cache_destroy(configfs_dir_cachep);
@@ -172,7 +166,6 @@ static void __exit configfs_exit(void)
172 kobject_put(config_kobj); 166 kobject_put(config_kobj);
173 kmem_cache_destroy(configfs_dir_cachep); 167 kmem_cache_destroy(configfs_dir_cachep);
174 configfs_dir_cachep = NULL; 168 configfs_dir_cachep = NULL;
175 configfs_inode_exit();
176} 169}
177 170
178MODULE_AUTHOR("Oracle"); 171MODULE_AUTHOR("Oracle");
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 1686dc2da9fd..34b36a504059 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -67,7 +67,6 @@ static int ecryptfs_inode_set(struct inode *inode, void *opaque)
67 inode->i_ino = lower_inode->i_ino; 67 inode->i_ino = lower_inode->i_ino;
68 inode->i_version++; 68 inode->i_version++;
69 inode->i_mapping->a_ops = &ecryptfs_aops; 69 inode->i_mapping->a_ops = &ecryptfs_aops;
70 inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
71 70
72 if (S_ISLNK(inode->i_mode)) 71 if (S_ISLNK(inode->i_mode))
73 inode->i_op = &ecryptfs_symlink_iops; 72 inode->i_op = &ecryptfs_symlink_iops;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index d9eb84bda559..1895d60f4122 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -520,7 +520,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
520 goto out; 520 goto out;
521 } 521 }
522 522
523 rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); 523 rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs");
524 if (rc) 524 if (rc)
525 goto out1; 525 goto out1;
526 526
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index f1d3d4eb8c4f..6fc91df99ff8 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1214,7 +1214,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
1214 memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); 1214 memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
1215 } 1215 }
1216 1216
1217 inode->i_mapping->backing_dev_info = sb->s_bdi;
1218 if (S_ISREG(inode->i_mode)) { 1217 if (S_ISREG(inode->i_mode)) {
1219 inode->i_op = &exofs_file_inode_operations; 1218 inode->i_op = &exofs_file_inode_operations;
1220 inode->i_fop = &exofs_file_operations; 1219 inode->i_fop = &exofs_file_operations;
@@ -1314,7 +1313,6 @@ struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
1314 1313
1315 set_obj_2bcreated(oi); 1314 set_obj_2bcreated(oi);
1316 1315
1317 inode->i_mapping->backing_dev_info = sb->s_bdi;
1318 inode_init_owner(inode, dir, mode); 1316 inode_init_owner(inode, dir, mode);
1319 inode->i_ino = sbi->s_nextid++; 1317 inode->i_ino = sbi->s_nextid++;
1320 inode->i_blkbits = EXOFS_BLKSHIFT; 1318 inode->i_blkbits = EXOFS_BLKSHIFT;
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 95965503afcb..fcc2e565f540 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -836,7 +836,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
836 goto free_sbi; 836 goto free_sbi;
837 } 837 }
838 838
839 ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY); 839 ret = bdi_setup_and_register(&sbi->bdi, "exofs");
840 if (ret) { 840 if (ret) {
841 EXOFS_DBGMSG("Failed to bdi_setup_and_register\n"); 841 EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
842 dput(sb->s_root); 842 dput(sb->s_root);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 7d66fb0e4cca..6c14bb8322fa 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -170,7 +170,7 @@ static void ext2_preread_inode(struct inode *inode)
170 struct ext2_group_desc * gdp; 170 struct ext2_group_desc * gdp;
171 struct backing_dev_info *bdi; 171 struct backing_dev_info *bdi;
172 172
173 bdi = inode->i_mapping->backing_dev_info; 173 bdi = inode_to_bdi(inode);
174 if (bdi_read_congested(bdi)) 174 if (bdi_read_congested(bdi))
175 return; 175 return;
176 if (bdi_write_congested(bdi)) 176 if (bdi_write_congested(bdi))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ac64edbe501d..64c39c7c594f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -334,7 +334,7 @@ static void save_error_info(struct super_block *sb, const char *func,
334static int block_device_ejected(struct super_block *sb) 334static int block_device_ejected(struct super_block *sb)
335{ 335{
336 struct inode *bd_inode = sb->s_bdev->bd_inode; 336 struct inode *bd_inode = sb->s_bdev->bd_inode;
337 struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info; 337 struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
338 338
339 return bdi->dev == NULL; 339 return bdi->dev == NULL;
340} 340}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 2d609a5fbfea..c399152de397 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -66,15 +66,21 @@ int writeback_in_progress(struct backing_dev_info *bdi)
66} 66}
67EXPORT_SYMBOL(writeback_in_progress); 67EXPORT_SYMBOL(writeback_in_progress);
68 68
69static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 69struct backing_dev_info *inode_to_bdi(struct inode *inode)
70{ 70{
71 struct super_block *sb = inode->i_sb; 71 struct super_block *sb;
72 72
73 if (sb_is_blkdev_sb(sb)) 73 if (!inode)
74 return inode->i_mapping->backing_dev_info; 74 return &noop_backing_dev_info;
75 75
76 sb = inode->i_sb;
77#ifdef CONFIG_BLOCK
78 if (sb_is_blkdev_sb(sb))
79 return blk_get_backing_dev_info(I_BDEV(inode));
80#endif
76 return sb->s_bdi; 81 return sb->s_bdi;
77} 82}
83EXPORT_SYMBOL_GPL(inode_to_bdi);
78 84
79static inline struct inode *wb_inode(struct list_head *head) 85static inline struct inode *wb_inode(struct list_head *head)
80{ 86{
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d769e594855b..c01ec3bdcfd8 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1159,7 +1159,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1159 mutex_lock(&inode->i_mutex); 1159 mutex_lock(&inode->i_mutex);
1160 1160
1161 /* We can write back this queue in page reclaim */ 1161 /* We can write back this queue in page reclaim */
1162 current->backing_dev_info = mapping->backing_dev_info; 1162 current->backing_dev_info = inode_to_bdi(inode);
1163 1163
1164 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1164 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1165 if (err) 1165 if (err)
@@ -1464,7 +1464,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1464{ 1464{
1465 struct inode *inode = req->inode; 1465 struct inode *inode = req->inode;
1466 struct fuse_inode *fi = get_fuse_inode(inode); 1466 struct fuse_inode *fi = get_fuse_inode(inode);
1467 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 1467 struct backing_dev_info *bdi = inode_to_bdi(inode);
1468 int i; 1468 int i;
1469 1469
1470 list_del(&req->writepages_entry); 1470 list_del(&req->writepages_entry);
@@ -1658,7 +1658,7 @@ static int fuse_writepage_locked(struct page *page)
1658 req->end = fuse_writepage_end; 1658 req->end = fuse_writepage_end;
1659 req->inode = inode; 1659 req->inode = inode;
1660 1660
1661 inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); 1661 inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
1662 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1662 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1663 1663
1664 spin_lock(&fc->lock); 1664 spin_lock(&fc->lock);
@@ -1768,7 +1768,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1768 1768
1769 if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || 1769 if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
1770 old_req->state == FUSE_REQ_PENDING)) { 1770 old_req->state == FUSE_REQ_PENDING)) {
1771 struct backing_dev_info *bdi = page->mapping->backing_dev_info; 1771 struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
1772 1772
1773 copy_highpage(old_req->pages[0], page); 1773 copy_highpage(old_req->pages[0], page);
1774 spin_unlock(&fc->lock); 1774 spin_unlock(&fc->lock);
@@ -1872,7 +1872,7 @@ static int fuse_writepages_fill(struct page *page,
1872 req->page_descs[req->num_pages].offset = 0; 1872 req->page_descs[req->num_pages].offset = 0;
1873 req->page_descs[req->num_pages].length = PAGE_SIZE; 1873 req->page_descs[req->num_pages].length = PAGE_SIZE;
1874 1874
1875 inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK); 1875 inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
1876 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1876 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1877 1877
1878 err = 0; 1878 err = 0;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f38256e4476e..e8799c11424b 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -308,7 +308,6 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
308 if (!fc->writeback_cache || !S_ISREG(attr->mode)) 308 if (!fc->writeback_cache || !S_ISREG(attr->mode))
309 inode->i_flags |= S_NOCMTIME; 309 inode->i_flags |= S_NOCMTIME;
310 inode->i_generation = generation; 310 inode->i_generation = generation;
311 inode->i_data.backing_dev_info = &fc->bdi;
312 fuse_init_inode(inode, attr); 311 fuse_init_inode(inode, attr);
313 unlock_new_inode(inode); 312 unlock_new_inode(inode);
314 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { 313 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 805b37fed638..4ad4f94edebe 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -289,7 +289,7 @@ continue_unlock:
289 if (!clear_page_dirty_for_io(page)) 289 if (!clear_page_dirty_for_io(page))
290 goto continue_unlock; 290 goto continue_unlock;
291 291
292 trace_wbc_writepage(wbc, mapping->backing_dev_info); 292 trace_wbc_writepage(wbc, inode_to_bdi(inode));
293 293
294 ret = __gfs2_jdata_writepage(page, wbc); 294 ret = __gfs2_jdata_writepage(page, wbc);
295 if (unlikely(ret)) { 295 if (unlikely(ret)) {
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index aeb7bc958a18..f42dffba056a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -768,7 +768,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
768 mapping->flags = 0; 768 mapping->flags = 0;
769 mapping_set_gfp_mask(mapping, GFP_NOFS); 769 mapping_set_gfp_mask(mapping, GFP_NOFS);
770 mapping->private_data = NULL; 770 mapping->private_data = NULL;
771 mapping->backing_dev_info = s->s_bdi;
772 mapping->writeback_index = 0; 771 mapping->writeback_index = 0;
773 } 772 }
774 773
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8633ad328ee2..efc8e254787c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -112,7 +112,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
112 mapping->flags = 0; 112 mapping->flags = 0;
113 mapping_set_gfp_mask(mapping, GFP_NOFS); 113 mapping_set_gfp_mask(mapping, GFP_NOFS);
114 mapping->private_data = NULL; 114 mapping->private_data = NULL;
115 mapping->backing_dev_info = sb->s_bdi;
116 mapping->writeback_index = 0; 115 mapping->writeback_index = 0;
117 116
118 spin_lock_init(&sdp->sd_log_lock); 117 spin_lock_init(&sdp->sd_log_lock);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 5b327f837de7..1666382b198d 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -743,7 +743,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
743 struct gfs2_inode *ip = GFS2_I(inode); 743 struct gfs2_inode *ip = GFS2_I(inode);
744 struct gfs2_sbd *sdp = GFS2_SB(inode); 744 struct gfs2_sbd *sdp = GFS2_SB(inode);
745 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); 745 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
746 struct backing_dev_info *bdi = metamapping->backing_dev_info; 746 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
747 int ret = 0; 747 int ret = 0;
748 748
749 if (wbc->sync_mode == WB_SYNC_ALL) 749 if (wbc->sync_mode == WB_SYNC_ALL)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 5eba47f593f8..c274aca8e8dc 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -62,12 +62,6 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
62 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 62 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
63} 63}
64 64
65static struct backing_dev_info hugetlbfs_backing_dev_info = {
66 .name = "hugetlbfs",
67 .ra_pages = 0, /* No readahead */
68 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
69};
70
71int sysctl_hugetlb_shm_group; 65int sysctl_hugetlb_shm_group;
72 66
73enum { 67enum {
@@ -498,7 +492,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
498 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 492 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
499 &hugetlbfs_i_mmap_rwsem_key); 493 &hugetlbfs_i_mmap_rwsem_key);
500 inode->i_mapping->a_ops = &hugetlbfs_aops; 494 inode->i_mapping->a_ops = &hugetlbfs_aops;
501 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
502 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 495 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
503 inode->i_mapping->private_data = resv_map; 496 inode->i_mapping->private_data = resv_map;
504 info = HUGETLBFS_I(inode); 497 info = HUGETLBFS_I(inode);
@@ -1032,10 +1025,6 @@ static int __init init_hugetlbfs_fs(void)
1032 return -ENOTSUPP; 1025 return -ENOTSUPP;
1033 } 1026 }
1034 1027
1035 error = bdi_init(&hugetlbfs_backing_dev_info);
1036 if (error)
1037 return error;
1038
1039 error = -ENOMEM; 1028 error = -ENOMEM;
1040 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 1029 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1041 sizeof(struct hugetlbfs_inode_info), 1030 sizeof(struct hugetlbfs_inode_info),
@@ -1071,7 +1060,6 @@ static int __init init_hugetlbfs_fs(void)
1071 out: 1060 out:
1072 kmem_cache_destroy(hugetlbfs_inode_cachep); 1061 kmem_cache_destroy(hugetlbfs_inode_cachep);
1073 out2: 1062 out2:
1074 bdi_destroy(&hugetlbfs_backing_dev_info);
1075 return error; 1063 return error;
1076} 1064}
1077 1065
@@ -1091,7 +1079,6 @@ static void __exit exit_hugetlbfs_fs(void)
1091 for_each_hstate(h) 1079 for_each_hstate(h)
1092 kern_unmount(hugetlbfs_vfsmount[i++]); 1080 kern_unmount(hugetlbfs_vfsmount[i++]);
1093 unregister_filesystem(&hugetlbfs_fs_type); 1081 unregister_filesystem(&hugetlbfs_fs_type);
1094 bdi_destroy(&hugetlbfs_backing_dev_info);
1095} 1082}
1096 1083
1097module_init(init_hugetlbfs_fs) 1084module_init(init_hugetlbfs_fs)
diff --git a/fs/inode.c b/fs/inode.c
index 3a53b1da3fb8..b7871577571d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -170,20 +170,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
170 atomic_set(&mapping->i_mmap_writable, 0); 170 atomic_set(&mapping->i_mmap_writable, 0);
171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
172 mapping->private_data = NULL; 172 mapping->private_data = NULL;
173 mapping->backing_dev_info = &default_backing_dev_info;
174 mapping->writeback_index = 0; 173 mapping->writeback_index = 0;
175
176 /*
177 * If the block_device provides a backing_dev_info for client
178 * inodes then use that. Otherwise the inode share the bdev's
179 * backing_dev_info.
180 */
181 if (sb->s_bdev) {
182 struct backing_dev_info *bdi;
183
184 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
185 mapping->backing_dev_info = bdi;
186 }
187 inode->i_private = NULL; 174 inode->i_private = NULL;
188 inode->i_mapping = mapping; 175 inode->i_mapping = mapping;
189 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 176 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 985217626e66..9000874a945b 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -24,12 +24,6 @@ static const struct address_space_operations kernfs_aops = {
24 .write_end = simple_write_end, 24 .write_end = simple_write_end,
25}; 25};
26 26
27static struct backing_dev_info kernfs_bdi = {
28 .name = "kernfs",
29 .ra_pages = 0, /* No readahead */
30 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
31};
32
33static const struct inode_operations kernfs_iops = { 27static const struct inode_operations kernfs_iops = {
34 .permission = kernfs_iop_permission, 28 .permission = kernfs_iop_permission,
35 .setattr = kernfs_iop_setattr, 29 .setattr = kernfs_iop_setattr,
@@ -40,12 +34,6 @@ static const struct inode_operations kernfs_iops = {
40 .listxattr = kernfs_iop_listxattr, 34 .listxattr = kernfs_iop_listxattr,
41}; 35};
42 36
43void __init kernfs_inode_init(void)
44{
45 if (bdi_init(&kernfs_bdi))
46 panic("failed to init kernfs_bdi");
47}
48
49static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn) 37static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
50{ 38{
51 static DEFINE_MUTEX(iattr_mutex); 39 static DEFINE_MUTEX(iattr_mutex);
@@ -298,7 +286,6 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
298 kernfs_get(kn); 286 kernfs_get(kn);
299 inode->i_private = kn; 287 inode->i_private = kn;
300 inode->i_mapping->a_ops = &kernfs_aops; 288 inode->i_mapping->a_ops = &kernfs_aops;
301 inode->i_mapping->backing_dev_info = &kernfs_bdi;
302 inode->i_op = &kernfs_iops; 289 inode->i_op = &kernfs_iops;
303 290
304 set_default_inode_attr(inode, kn->mode); 291 set_default_inode_attr(inode, kn->mode);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index dc84a3ef9ca2..af9fa7499919 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -88,7 +88,6 @@ int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
88ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf, 88ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
89 size_t size); 89 size_t size);
90ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); 90ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
91void kernfs_inode_init(void);
92 91
93/* 92/*
94 * dir.c 93 * dir.c
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index f973ae9b05f1..8eaf417187f1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -246,5 +246,4 @@ void __init kernfs_init(void)
246 kernfs_node_cache = kmem_cache_create("kernfs_node_cache", 246 kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
247 sizeof(struct kernfs_node), 247 sizeof(struct kernfs_node),
248 0, SLAB_PANIC, NULL); 248 0, SLAB_PANIC, NULL);
249 kernfs_inode_init();
250} 249}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index e31e589369a4..01a9e16e9782 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -267,7 +267,6 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
267 if (inode) { 267 if (inode) {
268 atomic_set(&NCP_FINFO(inode)->opened, info->opened); 268 atomic_set(&NCP_FINFO(inode)->opened, info->opened);
269 269
270 inode->i_mapping->backing_dev_info = sb->s_bdi;
271 inode->i_ino = info->ino; 270 inode->i_ino = info->ino;
272 ncp_set_attr(inode, info); 271 ncp_set_attr(inode, info);
273 if (S_ISREG(inode->i_mode)) { 272 if (S_ISREG(inode->i_mode)) {
@@ -560,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
560 server = NCP_SBP(sb); 559 server = NCP_SBP(sb);
561 memset(server, 0, sizeof(*server)); 560 memset(server, 0, sizeof(*server));
562 561
563 error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); 562 error = bdi_setup_and_register(&server->bdi, "ncpfs");
564 if (error) 563 if (error)
565 goto out_fput; 564 goto out_fput;
566 565
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 3c9769441f36..7ae1c263c5cf 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -1002,7 +1002,7 @@ mds_commit:
1002 spin_unlock(cinfo->lock); 1002 spin_unlock(cinfo->lock);
1003 if (!cinfo->dreq) { 1003 if (!cinfo->dreq) {
1004 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1004 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1005 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 1005 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1006 BDI_RECLAIMABLE); 1006 BDI_RECLAIMABLE);
1007 __mark_inode_dirty(req->wb_context->dentry->d_inode, 1007 __mark_inode_dirty(req->wb_context->dentry->d_inode,
1008 I_DIRTY_DATASYNC); 1008 I_DIRTY_DATASYNC);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f29fb7d7e8f8..c22ecaa86c1c 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1366,7 +1366,7 @@ ff_layout_mark_request_commit(struct nfs_page *req,
1366 spin_unlock(cinfo->lock); 1366 spin_unlock(cinfo->lock);
1367 if (!cinfo->dreq) { 1367 if (!cinfo->dreq) {
1368 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1368 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1369 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 1369 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1370 BDI_RECLAIMABLE); 1370 BDI_RECLAIMABLE);
1371 __mark_inode_dirty(req->wb_context->dentry->d_inode, 1371 __mark_inode_dirty(req->wb_context->dentry->d_inode,
1372 I_DIRTY_DATASYNC); 1372 I_DIRTY_DATASYNC);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d2398c193bda..e4f0dcef8f54 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -388,7 +388,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
388 if (S_ISREG(inode->i_mode)) { 388 if (S_ISREG(inode->i_mode)) {
389 inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops; 389 inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
390 inode->i_data.a_ops = &nfs_file_aops; 390 inode->i_data.a_ops = &nfs_file_aops;
391 inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
392 } else if (S_ISDIR(inode->i_mode)) { 391 } else if (S_ISDIR(inode->i_mode)) {
393 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; 392 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
394 inode->i_fop = &nfs_dir_operations; 393 inode->i_fop = &nfs_dir_operations;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 21469e6e3834..212b8c883d22 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -430,7 +430,6 @@ int nfs_show_options(struct seq_file *, struct dentry *);
430int nfs_show_devname(struct seq_file *, struct dentry *); 430int nfs_show_devname(struct seq_file *, struct dentry *);
431int nfs_show_path(struct seq_file *, struct dentry *); 431int nfs_show_path(struct seq_file *, struct dentry *);
432int nfs_show_stats(struct seq_file *, struct dentry *); 432int nfs_show_stats(struct seq_file *, struct dentry *);
433void nfs_put_super(struct super_block *);
434int nfs_remount(struct super_block *sb, int *flags, char *raw_data); 433int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
435 434
436/* write.c */ 435/* write.c */
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 48cea3c30e5d..75090feeafad 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -53,7 +53,6 @@ static const struct super_operations nfs4_sops = {
53 .destroy_inode = nfs_destroy_inode, 53 .destroy_inode = nfs_destroy_inode,
54 .write_inode = nfs4_write_inode, 54 .write_inode = nfs4_write_inode,
55 .drop_inode = nfs_drop_inode, 55 .drop_inode = nfs_drop_inode,
56 .put_super = nfs_put_super,
57 .statfs = nfs_statfs, 56 .statfs = nfs_statfs,
58 .evict_inode = nfs4_evict_inode, 57 .evict_inode = nfs4_evict_inode,
59 .umount_begin = nfs_umount_begin, 58 .umount_begin = nfs_umount_begin,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 368d9395d2e7..322b2de02988 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -311,7 +311,6 @@ const struct super_operations nfs_sops = {
311 .destroy_inode = nfs_destroy_inode, 311 .destroy_inode = nfs_destroy_inode,
312 .write_inode = nfs_write_inode, 312 .write_inode = nfs_write_inode,
313 .drop_inode = nfs_drop_inode, 313 .drop_inode = nfs_drop_inode,
314 .put_super = nfs_put_super,
315 .statfs = nfs_statfs, 314 .statfs = nfs_statfs,
316 .evict_inode = nfs_evict_inode, 315 .evict_inode = nfs_evict_inode,
317 .umount_begin = nfs_umount_begin, 316 .umount_begin = nfs_umount_begin,
@@ -2572,7 +2571,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
2572 error = nfs_bdi_register(server); 2571 error = nfs_bdi_register(server);
2573 if (error) { 2572 if (error) {
2574 mntroot = ERR_PTR(error); 2573 mntroot = ERR_PTR(error);
2575 goto error_splat_bdi; 2574 goto error_splat_super;
2576 } 2575 }
2577 server->super = s; 2576 server->super = s;
2578 } 2577 }
@@ -2604,9 +2603,6 @@ error_splat_root:
2604 dput(mntroot); 2603 dput(mntroot);
2605 mntroot = ERR_PTR(error); 2604 mntroot = ERR_PTR(error);
2606error_splat_super: 2605error_splat_super:
2607 if (server && !s->s_root)
2608 bdi_unregister(&server->backing_dev_info);
2609error_splat_bdi:
2610 deactivate_locked_super(s); 2606 deactivate_locked_super(s);
2611 goto out; 2607 goto out;
2612} 2608}
@@ -2654,27 +2650,19 @@ out:
2654EXPORT_SYMBOL_GPL(nfs_fs_mount); 2650EXPORT_SYMBOL_GPL(nfs_fs_mount);
2655 2651
2656/* 2652/*
2657 * Ensure that we unregister the bdi before kill_anon_super
2658 * releases the device name
2659 */
2660void nfs_put_super(struct super_block *s)
2661{
2662 struct nfs_server *server = NFS_SB(s);
2663
2664 bdi_unregister(&server->backing_dev_info);
2665}
2666EXPORT_SYMBOL_GPL(nfs_put_super);
2667
2668/*
2669 * Destroy an NFS2/3 superblock 2653 * Destroy an NFS2/3 superblock
2670 */ 2654 */
2671void nfs_kill_super(struct super_block *s) 2655void nfs_kill_super(struct super_block *s)
2672{ 2656{
2673 struct nfs_server *server = NFS_SB(s); 2657 struct nfs_server *server = NFS_SB(s);
2658 dev_t dev = s->s_dev;
2659
2660 generic_shutdown_super(s);
2674 2661
2675 kill_anon_super(s);
2676 nfs_fscache_release_super_cookie(s); 2662 nfs_fscache_release_super_cookie(s);
2663
2677 nfs_free_server(server); 2664 nfs_free_server(server);
2665 free_anon_bdev(dev);
2678} 2666}
2679EXPORT_SYMBOL_GPL(nfs_kill_super); 2667EXPORT_SYMBOL_GPL(nfs_kill_super);
2680 2668
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index bcf83e535f29..88a6d2196ece 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -791,7 +791,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
791 spin_unlock(cinfo->lock); 791 spin_unlock(cinfo->lock);
792 if (!cinfo->dreq) { 792 if (!cinfo->dreq) {
793 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 793 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
794 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 794 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
795 BDI_RECLAIMABLE); 795 BDI_RECLAIMABLE);
796 __mark_inode_dirty(req->wb_context->dentry->d_inode, 796 __mark_inode_dirty(req->wb_context->dentry->d_inode,
797 I_DIRTY_DATASYNC); 797 I_DIRTY_DATASYNC);
@@ -858,7 +858,7 @@ static void
858nfs_clear_page_commit(struct page *page) 858nfs_clear_page_commit(struct page *page)
859{ 859{
860 dec_zone_page_state(page, NR_UNSTABLE_NFS); 860 dec_zone_page_state(page, NR_UNSTABLE_NFS);
861 dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); 861 dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE);
862} 862}
863 863
864/* Called holding inode (/cinfo) lock */ 864/* Called holding inode (/cinfo) lock */
@@ -1607,7 +1607,7 @@ void nfs_retry_commit(struct list_head *page_list,
1607 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1607 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1608 if (!cinfo->dreq) { 1608 if (!cinfo->dreq) {
1609 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1609 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1610 dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 1610 dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1611 BDI_RECLAIMABLE); 1611 BDI_RECLAIMABLE);
1612 } 1612 }
1613 nfs_unlock_and_release_request(req); 1613 nfs_unlock_and_release_request(req);
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 57ceaf33d177..748ca238915a 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -172,7 +172,6 @@ int nilfs_init_gcinode(struct inode *inode)
172 inode->i_mode = S_IFREG; 172 inode->i_mode = S_IFREG;
173 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 173 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
174 inode->i_mapping->a_ops = &empty_aops; 174 inode->i_mapping->a_ops = &empty_aops;
175 inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
176 175
177 ii->i_flags = 0; 176 ii->i_flags = 0;
178 nilfs_bmap_init_gc(ii->i_bmap); 177 nilfs_bmap_init_gc(ii->i_bmap);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index c4dcd1db57ee..892cf5ffdb8e 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -429,7 +429,6 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
429 429
430 inode->i_mode = S_IFREG; 430 inode->i_mode = S_IFREG;
431 mapping_set_gfp_mask(inode->i_mapping, gfp_mask); 431 mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
432 inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
433 432
434 inode->i_op = &def_mdt_iops; 433 inode->i_op = &def_mdt_iops;
435 inode->i_fop = &def_mdt_fops; 434 inode->i_fop = &def_mdt_fops;
@@ -457,13 +456,12 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
457 struct nilfs_shadow_map *shadow) 456 struct nilfs_shadow_map *shadow)
458{ 457{
459 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 458 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
460 struct backing_dev_info *bdi = inode->i_sb->s_bdi;
461 459
462 INIT_LIST_HEAD(&shadow->frozen_buffers); 460 INIT_LIST_HEAD(&shadow->frozen_buffers);
463 address_space_init_once(&shadow->frozen_data); 461 address_space_init_once(&shadow->frozen_data);
464 nilfs_mapping_init(&shadow->frozen_data, inode, bdi); 462 nilfs_mapping_init(&shadow->frozen_data, inode);
465 address_space_init_once(&shadow->frozen_btnodes); 463 address_space_init_once(&shadow->frozen_btnodes);
466 nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi); 464 nilfs_mapping_init(&shadow->frozen_btnodes, inode);
467 mi->mi_shadow = shadow; 465 mi->mi_shadow = shadow;
468 return 0; 466 return 0;
469} 467}
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index da276640f776..700ecbcca55d 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -461,14 +461,12 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
461 return nc; 461 return nc;
462} 462}
463 463
464void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, 464void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
465 struct backing_dev_info *bdi)
466{ 465{
467 mapping->host = inode; 466 mapping->host = inode;
468 mapping->flags = 0; 467 mapping->flags = 0;
469 mapping_set_gfp_mask(mapping, GFP_NOFS); 468 mapping_set_gfp_mask(mapping, GFP_NOFS);
470 mapping->private_data = NULL; 469 mapping->private_data = NULL;
471 mapping->backing_dev_info = bdi;
472 mapping->a_ops = &empty_aops; 470 mapping->a_ops = &empty_aops;
473} 471}
474 472
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index ef30c5c2426f..a43b8287d012 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -57,8 +57,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
57void nilfs_copy_back_pages(struct address_space *, struct address_space *); 57void nilfs_copy_back_pages(struct address_space *, struct address_space *);
58void nilfs_clear_dirty_page(struct page *, bool); 58void nilfs_clear_dirty_page(struct page *, bool);
59void nilfs_clear_dirty_pages(struct address_space *, bool); 59void nilfs_clear_dirty_pages(struct address_space *, bool);
60void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, 60void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
61 struct backing_dev_info *bdi);
62unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); 61unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
63unsigned long nilfs_find_uncommitted_extent(struct inode *inode, 62unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
64 sector_t start_blk, 63 sector_t start_blk,
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 2e5b3ec85b8f..5bc2a1cf73c3 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -166,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
166 ii->i_state = 0; 166 ii->i_state = 0;
167 ii->i_cno = 0; 167 ii->i_cno = 0;
168 ii->vfs_inode.i_version = 1; 168 ii->vfs_inode.i_version = 1;
169 nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi); 169 nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
170 return &ii->vfs_inode; 170 return &ii->vfs_inode;
171} 171}
172 172
@@ -1057,7 +1057,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
1057{ 1057{
1058 struct the_nilfs *nilfs; 1058 struct the_nilfs *nilfs;
1059 struct nilfs_root *fsroot; 1059 struct nilfs_root *fsroot;
1060 struct backing_dev_info *bdi;
1061 __u64 cno; 1060 __u64 cno;
1062 int err; 1061 int err;
1063 1062
@@ -1077,8 +1076,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
1077 sb->s_time_gran = 1; 1076 sb->s_time_gran = 1;
1078 sb->s_max_links = NILFS_LINK_MAX; 1077 sb->s_max_links = NILFS_LINK_MAX;
1079 1078
1080 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 1079 sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
1081 sb->s_bdi = bdi ? : &default_backing_dev_info;
1082 1080
1083 err = load_nilfs(nilfs, sb); 1081 err = load_nilfs(nilfs, sb);
1084 if (err) 1082 if (err)
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 643faa44f22b..1da9b2d184dc 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -19,6 +19,7 @@
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/backing-dev.h>
22#include <linux/buffer_head.h> 23#include <linux/buffer_head.h>
23#include <linux/gfp.h> 24#include <linux/gfp.h>
24#include <linux/pagemap.h> 25#include <linux/pagemap.h>
@@ -2091,7 +2092,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
2091 count = iov_length(iov, nr_segs); 2092 count = iov_length(iov, nr_segs);
2092 pos = *ppos; 2093 pos = *ppos;
2093 /* We can write back this queue in page reclaim. */ 2094 /* We can write back this queue in page reclaim. */
2094 current->backing_dev_info = mapping->backing_dev_info; 2095 current->backing_dev_info = inode_to_bdi(inode);
2095 written = 0; 2096 written = 0;
2096 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2097 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2097 if (err) 2098 if (err)
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 57c40e34f56f..061ba6a91bf2 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -390,12 +390,6 @@ clear_fields:
390 ip->ip_conn = NULL; 390 ip->ip_conn = NULL;
391} 391}
392 392
393static struct backing_dev_info dlmfs_backing_dev_info = {
394 .name = "ocfs2-dlmfs",
395 .ra_pages = 0, /* No readahead */
396 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
397};
398
399static struct inode *dlmfs_get_root_inode(struct super_block *sb) 393static struct inode *dlmfs_get_root_inode(struct super_block *sb)
400{ 394{
401 struct inode *inode = new_inode(sb); 395 struct inode *inode = new_inode(sb);
@@ -404,7 +398,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
404 if (inode) { 398 if (inode) {
405 inode->i_ino = get_next_ino(); 399 inode->i_ino = get_next_ino();
406 inode_init_owner(inode, NULL, mode); 400 inode_init_owner(inode, NULL, mode);
407 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
408 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 401 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
409 inc_nlink(inode); 402 inc_nlink(inode);
410 403
@@ -428,7 +421,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
428 421
429 inode->i_ino = get_next_ino(); 422 inode->i_ino = get_next_ino();
430 inode_init_owner(inode, parent, mode); 423 inode_init_owner(inode, parent, mode);
431 inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
432 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 424 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
433 425
434 ip = DLMFS_I(inode); 426 ip = DLMFS_I(inode);
@@ -643,10 +635,6 @@ static int __init init_dlmfs_fs(void)
643 int status; 635 int status;
644 int cleanup_inode = 0, cleanup_worker = 0; 636 int cleanup_inode = 0, cleanup_worker = 0;
645 637
646 status = bdi_init(&dlmfs_backing_dev_info);
647 if (status)
648 return status;
649
650 dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", 638 dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
651 sizeof(struct dlmfs_inode_private), 639 sizeof(struct dlmfs_inode_private),
652 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 640 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
@@ -673,7 +661,6 @@ bail:
673 kmem_cache_destroy(dlmfs_inode_cache); 661 kmem_cache_destroy(dlmfs_inode_cache);
674 if (cleanup_worker) 662 if (cleanup_worker)
675 destroy_workqueue(user_dlm_worker); 663 destroy_workqueue(user_dlm_worker);
676 bdi_destroy(&dlmfs_backing_dev_info);
677 } else 664 } else
678 printk("OCFS2 User DLM kernel interface loaded\n"); 665 printk("OCFS2 User DLM kernel interface loaded\n");
679 return status; 666 return status;
@@ -693,7 +680,6 @@ static void __exit exit_dlmfs_fs(void)
693 rcu_barrier(); 680 rcu_barrier();
694 kmem_cache_destroy(dlmfs_inode_cache); 681 kmem_cache_destroy(dlmfs_inode_cache);
695 682
696 bdi_destroy(&dlmfs_backing_dev_info);
697} 683}
698 684
699MODULE_AUTHOR("Oracle"); 685MODULE_AUTHOR("Oracle");
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 245db4f504da..e0f04d55fd05 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2363,7 +2363,7 @@ relock:
2363 goto out_dio; 2363 goto out_dio;
2364 } 2364 }
2365 } else { 2365 } else {
2366 current->backing_dev_info = file->f_mapping->backing_dev_info; 2366 current->backing_dev_info = inode_to_bdi(inode);
2367 written = generic_perform_write(file, from, *ppos); 2367 written = generic_perform_write(file, from, *ppos);
2368 if (likely(written >= 0)) 2368 if (likely(written >= 0))
2369 iocb->ki_pos = *ppos + written; 2369 iocb->ki_pos = *ppos + written;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index bbafbde3471a..f6ab41b39612 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -34,7 +34,14 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
34 unsigned long flags); 34 unsigned long flags);
35static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 35static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
36 36
37static unsigned ramfs_mmap_capabilities(struct file *file)
38{
39 return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ |
40 NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
41}
42
37const struct file_operations ramfs_file_operations = { 43const struct file_operations ramfs_file_operations = {
44 .mmap_capabilities = ramfs_mmap_capabilities,
38 .mmap = ramfs_nommu_mmap, 45 .mmap = ramfs_nommu_mmap,
39 .get_unmapped_area = ramfs_nommu_get_unmapped_area, 46 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
40 .read = new_sync_read, 47 .read = new_sync_read,
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d365b1c4eb3c..889d558b4e05 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -50,14 +50,6 @@ static const struct address_space_operations ramfs_aops = {
50 .set_page_dirty = __set_page_dirty_no_writeback, 50 .set_page_dirty = __set_page_dirty_no_writeback,
51}; 51};
52 52
53static struct backing_dev_info ramfs_backing_dev_info = {
54 .name = "ramfs",
55 .ra_pages = 0, /* No readahead */
56 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
57 BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
58 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
59};
60
61struct inode *ramfs_get_inode(struct super_block *sb, 53struct inode *ramfs_get_inode(struct super_block *sb,
62 const struct inode *dir, umode_t mode, dev_t dev) 54 const struct inode *dir, umode_t mode, dev_t dev)
63{ 55{
@@ -67,7 +59,6 @@ struct inode *ramfs_get_inode(struct super_block *sb,
67 inode->i_ino = get_next_ino(); 59 inode->i_ino = get_next_ino();
68 inode_init_owner(inode, dir, mode); 60 inode_init_owner(inode, dir, mode);
69 inode->i_mapping->a_ops = &ramfs_aops; 61 inode->i_mapping->a_ops = &ramfs_aops;
70 inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
71 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); 62 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
72 mapping_set_unevictable(inode->i_mapping); 63 mapping_set_unevictable(inode->i_mapping);
73 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 64 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -267,19 +258,9 @@ static struct file_system_type ramfs_fs_type = {
267int __init init_ramfs_fs(void) 258int __init init_ramfs_fs(void)
268{ 259{
269 static unsigned long once; 260 static unsigned long once;
270 int err;
271 261
272 if (test_and_set_bit(0, &once)) 262 if (test_and_set_bit(0, &once))
273 return 0; 263 return 0;
274 264 return register_filesystem(&ramfs_fs_type);
275 err = bdi_init(&ramfs_backing_dev_info);
276 if (err)
277 return err;
278
279 err = register_filesystem(&ramfs_fs_type);
280 if (err)
281 bdi_destroy(&ramfs_backing_dev_info);
282
283 return err;
284} 265}
285fs_initcall(init_ramfs_fs); 266fs_initcall(init_ramfs_fs);
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index ea06c7554860..7da9e2153953 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -70,6 +70,15 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
70 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS; 70 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
71} 71}
72 72
73static unsigned romfs_mmap_capabilities(struct file *file)
74{
75 struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd;
76
77 if (!mtd)
78 return NOMMU_MAP_COPY;
79 return mtd_mmap_capabilities(mtd);
80}
81
73const struct file_operations romfs_ro_fops = { 82const struct file_operations romfs_ro_fops = {
74 .llseek = generic_file_llseek, 83 .llseek = generic_file_llseek,
75 .read = new_sync_read, 84 .read = new_sync_read,
@@ -77,4 +86,5 @@ const struct file_operations romfs_ro_fops = {
77 .splice_read = generic_file_splice_read, 86 .splice_read = generic_file_splice_read,
78 .mmap = romfs_mmap, 87 .mmap = romfs_mmap,
79 .get_unmapped_area = romfs_get_unmapped_area, 88 .get_unmapped_area = romfs_get_unmapped_area,
89 .mmap_capabilities = romfs_mmap_capabilities,
80}; 90};
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index e98dd88197d5..268733cda397 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -355,9 +355,6 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
355 case ROMFH_REG: 355 case ROMFH_REG:
356 i->i_fop = &romfs_ro_fops; 356 i->i_fop = &romfs_ro_fops;
357 i->i_data.a_ops = &romfs_aops; 357 i->i_data.a_ops = &romfs_aops;
358 if (i->i_sb->s_mtd)
359 i->i_data.backing_dev_info =
360 i->i_sb->s_mtd->backing_dev_info;
361 if (nextfh & ROMFH_EXEC) 358 if (nextfh & ROMFH_EXEC)
362 mode |= S_IXUGO; 359 mode |= S_IXUGO;
363 break; 360 break;
diff --git a/fs/super.c b/fs/super.c
index eae088f6aaae..05a021638b11 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -36,8 +36,8 @@
36#include "internal.h" 36#include "internal.h"
37 37
38 38
39LIST_HEAD(super_blocks); 39static LIST_HEAD(super_blocks);
40DEFINE_SPINLOCK(sb_lock); 40static DEFINE_SPINLOCK(sb_lock);
41 41
42static char *sb_writers_name[SB_FREEZE_LEVELS] = { 42static char *sb_writers_name[SB_FREEZE_LEVELS] = {
43 "sb_writers", 43 "sb_writers",
@@ -185,8 +185,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
185 } 185 }
186 init_waitqueue_head(&s->s_writers.wait); 186 init_waitqueue_head(&s->s_writers.wait);
187 init_waitqueue_head(&s->s_writers.wait_unfrozen); 187 init_waitqueue_head(&s->s_writers.wait_unfrozen);
188 s->s_bdi = &noop_backing_dev_info;
188 s->s_flags = flags; 189 s->s_flags = flags;
189 s->s_bdi = &default_backing_dev_info;
190 INIT_HLIST_NODE(&s->s_instances); 190 INIT_HLIST_NODE(&s->s_instances);
191 INIT_HLIST_BL_HEAD(&s->s_anon); 191 INIT_HLIST_BL_HEAD(&s->s_anon);
192 INIT_LIST_HEAD(&s->s_inodes); 192 INIT_LIST_HEAD(&s->s_inodes);
@@ -863,10 +863,7 @@ EXPORT_SYMBOL(free_anon_bdev);
863 863
864int set_anon_super(struct super_block *s, void *data) 864int set_anon_super(struct super_block *s, void *data)
865{ 865{
866 int error = get_anon_bdev(&s->s_dev); 866 return get_anon_bdev(&s->s_dev);
867 if (!error)
868 s->s_bdi = &noop_backing_dev_info;
869 return error;
870} 867}
871 868
872EXPORT_SYMBOL(set_anon_super); 869EXPORT_SYMBOL(set_anon_super);
@@ -1111,7 +1108,6 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1111 sb = root->d_sb; 1108 sb = root->d_sb;
1112 BUG_ON(!sb); 1109 BUG_ON(!sb);
1113 WARN_ON(!sb->s_bdi); 1110 WARN_ON(!sb->s_bdi);
1114 WARN_ON(sb->s_bdi == &default_backing_dev_info);
1115 sb->s_flags |= MS_BORN; 1111 sb->s_flags |= MS_BORN;
1116 1112
1117 error = security_sb_kern_mount(sb, flags, secdata); 1113 error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ea41649e4ca5..c49b1981ac95 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -108,8 +108,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
108 inode->i_mtime = inode->i_atime = inode->i_ctime = 108 inode->i_mtime = inode->i_atime = inode->i_ctime =
109 ubifs_current_time(inode); 109 ubifs_current_time(inode);
110 inode->i_mapping->nrpages = 0; 110 inode->i_mapping->nrpages = 0;
111 /* Disable readahead */
112 inode->i_mapping->backing_dev_info = &c->bdi;
113 111
114 switch (mode & S_IFMT) { 112 switch (mode & S_IFMT) {
115 case S_IFREG: 113 case S_IFREG:
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 106bf20629ce..6197154f36ca 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -156,9 +156,6 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
156 if (err) 156 if (err)
157 goto out_invalid; 157 goto out_invalid;
158 158
159 /* Disable read-ahead */
160 inode->i_mapping->backing_dev_info = &c->bdi;
161
162 switch (inode->i_mode & S_IFMT) { 159 switch (inode->i_mode & S_IFMT) {
163 case S_IFREG: 160 case S_IFREG:
164 inode->i_mapping->a_ops = &ubifs_file_address_operations; 161 inode->i_mapping->a_ops = &ubifs_file_address_operations;
@@ -2017,7 +2014,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
2017 * Read-ahead will be disabled because @c->bdi.ra_pages is 0. 2014 * Read-ahead will be disabled because @c->bdi.ra_pages is 0.
2018 */ 2015 */
2019 c->bdi.name = "ubifs", 2016 c->bdi.name = "ubifs",
2020 c->bdi.capabilities = BDI_CAP_MAP_COPY; 2017 c->bdi.capabilities = 0;
2021 err = bdi_init(&c->bdi); 2018 err = bdi_init(&c->bdi);
2022 if (err) 2019 if (err)
2023 goto out_close; 2020 goto out_close;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f2d05a19d68c..1cdba95c78cb 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -735,7 +735,7 @@ xfs_file_buffered_aio_write(
735 735
736 iov_iter_truncate(from, count); 736 iov_iter_truncate(from, count);
737 /* We can write back this queue in page reclaim */ 737 /* We can write back this queue in page reclaim */
738 current->backing_dev_info = mapping->backing_dev_info; 738 current->backing_dev_info = inode_to_bdi(inode);
739 739
740write_retry: 740write_retry:
741 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 741 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5da6012b7a14..d94077fea1f8 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -106,6 +106,8 @@ struct backing_dev_info {
106#endif 106#endif
107}; 107};
108 108
109struct backing_dev_info *inode_to_bdi(struct inode *inode);
110
109int __must_check bdi_init(struct backing_dev_info *bdi); 111int __must_check bdi_init(struct backing_dev_info *bdi);
110void bdi_destroy(struct backing_dev_info *bdi); 112void bdi_destroy(struct backing_dev_info *bdi);
111 113
@@ -114,7 +116,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
114 const char *fmt, ...); 116 const char *fmt, ...);
115int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 117int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
116void bdi_unregister(struct backing_dev_info *bdi); 118void bdi_unregister(struct backing_dev_info *bdi);
117int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 119int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
118void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 120void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
119 enum wb_reason reason); 121 enum wb_reason reason);
120void bdi_start_background_writeback(struct backing_dev_info *bdi); 122void bdi_start_background_writeback(struct backing_dev_info *bdi);
@@ -228,46 +230,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
228 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 230 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
229 * BDI_CAP_NO_WRITEBACK: Don't write pages back 231 * BDI_CAP_NO_WRITEBACK: Don't write pages back
230 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 232 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
231 *
232 * These flags let !MMU mmap() govern direct device mapping vs immediate
233 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
234 *
235 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
236 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
237 * BDI_CAP_READ_MAP: Can be mapped for reading
238 * BDI_CAP_WRITE_MAP: Can be mapped for writing
239 * BDI_CAP_EXEC_MAP: Can be mapped for execution
240 *
241 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
242 *
243 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 233 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
244 */ 234 */
245#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 235#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
246#define BDI_CAP_NO_WRITEBACK 0x00000002 236#define BDI_CAP_NO_WRITEBACK 0x00000002
247#define BDI_CAP_MAP_COPY 0x00000004 237#define BDI_CAP_NO_ACCT_WB 0x00000004
248#define BDI_CAP_MAP_DIRECT 0x00000008 238#define BDI_CAP_STABLE_WRITES 0x00000008
249#define BDI_CAP_READ_MAP 0x00000010 239#define BDI_CAP_STRICTLIMIT 0x00000010
250#define BDI_CAP_WRITE_MAP 0x00000020
251#define BDI_CAP_EXEC_MAP 0x00000040
252#define BDI_CAP_NO_ACCT_WB 0x00000080
253#define BDI_CAP_SWAP_BACKED 0x00000100
254#define BDI_CAP_STABLE_WRITES 0x00000200
255#define BDI_CAP_STRICTLIMIT 0x00000400
256
257#define BDI_CAP_VMFLAGS \
258 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
259 240
260#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 241#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
261 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 242 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
262 243
263#if defined(VM_MAYREAD) && \
264 (BDI_CAP_READ_MAP != VM_MAYREAD || \
265 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
266 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
267#error please change backing_dev_info::capabilities flags
268#endif
269
270extern struct backing_dev_info default_backing_dev_info;
271extern struct backing_dev_info noop_backing_dev_info; 244extern struct backing_dev_info noop_backing_dev_info;
272 245
273int writeback_in_progress(struct backing_dev_info *bdi); 246int writeback_in_progress(struct backing_dev_info *bdi);
@@ -329,24 +302,14 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
329 BDI_CAP_NO_WRITEBACK)); 302 BDI_CAP_NO_WRITEBACK));
330} 303}
331 304
332static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
333{
334 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
335}
336
337static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 305static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
338{ 306{
339 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 307 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
340} 308}
341 309
342static inline bool mapping_cap_account_dirty(struct address_space *mapping) 310static inline bool mapping_cap_account_dirty(struct address_space *mapping)
343{ 311{
344 return bdi_cap_account_dirty(mapping->backing_dev_info); 312 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
345}
346
347static inline bool mapping_cap_swap_backed(struct address_space *mapping)
348{
349 return bdi_cap_swap_backed(mapping->backing_dev_info);
350} 313}
351 314
352static inline int bdi_sched_wait(void *word) 315static inline int bdi_sched_wait(void *word)
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index fb4591977b03..f8763615a5f2 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -30,6 +30,4 @@ void cdev_del(struct cdev *);
30 30
31void cd_forget(struct inode *); 31void cd_forget(struct inode *);
32 32
33extern struct backing_dev_info directly_mappable_cdev_bdi;
34
35#endif 33#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cdcb1e9d9613..ec0f1dc66b9b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -34,6 +34,7 @@
34#include <asm/byteorder.h> 34#include <asm/byteorder.h>
35#include <uapi/linux/fs.h> 35#include <uapi/linux/fs.h>
36 36
37struct backing_dev_info;
37struct export_operations; 38struct export_operations;
38struct hd_geometry; 39struct hd_geometry;
39struct iovec; 40struct iovec;
@@ -394,7 +395,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
394 loff_t pos, unsigned len, unsigned copied, 395 loff_t pos, unsigned len, unsigned copied,
395 struct page *page, void *fsdata); 396 struct page *page, void *fsdata);
396 397
397struct backing_dev_info;
398struct address_space { 398struct address_space {
399 struct inode *host; /* owner: inode, block_device */ 399 struct inode *host; /* owner: inode, block_device */
400 struct radix_tree_root page_tree; /* radix tree of all pages */ 400 struct radix_tree_root page_tree; /* radix tree of all pages */
@@ -408,7 +408,6 @@ struct address_space {
408 pgoff_t writeback_index;/* writeback starts here */ 408 pgoff_t writeback_index;/* writeback starts here */
409 const struct address_space_operations *a_ops; /* methods */ 409 const struct address_space_operations *a_ops; /* methods */
410 unsigned long flags; /* error bits/gfp mask */ 410 unsigned long flags; /* error bits/gfp mask */
411 struct backing_dev_info *backing_dev_info; /* device readahead, etc */
412 spinlock_t private_lock; /* for use by the address_space */ 411 spinlock_t private_lock; /* for use by the address_space */
413 struct list_head private_list; /* ditto */ 412 struct list_head private_list; /* ditto */
414 void *private_data; /* ditto */ 413 void *private_data; /* ditto */
@@ -1201,8 +1200,6 @@ struct mm_struct;
1201#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ 1200#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1202#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ 1201#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1203 1202
1204extern struct list_head super_blocks;
1205extern spinlock_t sb_lock;
1206 1203
1207/* Possible states of 'frozen' field */ 1204/* Possible states of 'frozen' field */
1208enum { 1205enum {
@@ -1519,6 +1516,26 @@ struct block_device_operations;
1519#define HAVE_COMPAT_IOCTL 1 1516#define HAVE_COMPAT_IOCTL 1
1520#define HAVE_UNLOCKED_IOCTL 1 1517#define HAVE_UNLOCKED_IOCTL 1
1521 1518
1519/*
1520 * These flags let !MMU mmap() govern direct device mapping vs immediate
1521 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
1522 *
1523 * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
1524 * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
1525 * NOMMU_MAP_READ: Can be mapped for reading
1526 * NOMMU_MAP_WRITE: Can be mapped for writing
1527 * NOMMU_MAP_EXEC: Can be mapped for execution
1528 */
1529#define NOMMU_MAP_COPY 0x00000001
1530#define NOMMU_MAP_DIRECT 0x00000008
1531#define NOMMU_MAP_READ VM_MAYREAD
1532#define NOMMU_MAP_WRITE VM_MAYWRITE
1533#define NOMMU_MAP_EXEC VM_MAYEXEC
1534
1535#define NOMMU_VMFLAGS \
1536 (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
1537
1538
1522struct iov_iter; 1539struct iov_iter;
1523 1540
1524struct file_operations { 1541struct file_operations {
@@ -1553,6 +1570,9 @@ struct file_operations {
1553 long (*fallocate)(struct file *file, int mode, loff_t offset, 1570 long (*fallocate)(struct file *file, int mode, loff_t offset,
1554 loff_t len); 1571 loff_t len);
1555 void (*show_fdinfo)(struct seq_file *m, struct file *f); 1572 void (*show_fdinfo)(struct seq_file *m, struct file *f);
1573#ifndef CONFIG_MMU
1574 unsigned (*mmap_capabilities)(struct file *);
1575#endif
1556}; 1576};
1557 1577
1558struct inode_operations { 1578struct inode_operations {
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 031ff3a9a0bd..3301c4c289d6 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -408,4 +408,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
408 return mtd_is_bitflip(err) || mtd_is_eccerr(err); 408 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
409} 409}
410 410
411unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
412
411#endif /* __MTD_MTD_H__ */ 413#endif /* __MTD_MTD_H__ */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index cee02d65ab3f..0e9310905413 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -47,7 +47,7 @@ TRACE_EVENT(writeback_dirty_page,
47 47
48 TP_fast_assign( 48 TP_fast_assign(
49 strncpy(__entry->name, 49 strncpy(__entry->name,
50 mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32); 50 mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
51 __entry->ino = mapping ? mapping->host->i_ino : 0; 51 __entry->ino = mapping ? mapping->host->i_ino : 0;
52 __entry->index = page->index; 52 __entry->index = page->index;
53 ), 53 ),
@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
72 ), 72 ),
73 73
74 TP_fast_assign( 74 TP_fast_assign(
75 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 75 struct backing_dev_info *bdi = inode_to_bdi(inode);
76 76
77 /* may be called for files on pseudo FSes w/ unregistered bdi */ 77 /* may be called for files on pseudo FSes w/ unregistered bdi */
78 strncpy(__entry->name, 78 strncpy(__entry->name,
@@ -116,7 +116,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
116 116
117 TP_fast_assign( 117 TP_fast_assign(
118 strncpy(__entry->name, 118 strncpy(__entry->name,
119 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 119 dev_name(inode_to_bdi(inode)->dev), 32);
120 __entry->ino = inode->i_ino; 120 __entry->ino = inode->i_ino;
121 __entry->sync_mode = wbc->sync_mode; 121 __entry->sync_mode = wbc->sync_mode;
122 ), 122 ),
@@ -156,10 +156,8 @@ DECLARE_EVENT_CLASS(writeback_work_class,
156 __field(int, reason) 156 __field(int, reason)
157 ), 157 ),
158 TP_fast_assign( 158 TP_fast_assign(
159 struct device *dev = bdi->dev; 159 strncpy(__entry->name,
160 if (!dev) 160 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
161 dev = default_backing_dev_info.dev;
162 strncpy(__entry->name, dev_name(dev), 32);
163 __entry->nr_pages = work->nr_pages; 161 __entry->nr_pages = work->nr_pages;
164 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 162 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
165 __entry->sync_mode = work->sync_mode; 163 __entry->sync_mode = work->sync_mode;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 0ae0df55000b..7690ec77c722 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -14,19 +14,10 @@
14 14
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16 16
17struct backing_dev_info default_backing_dev_info = {
18 .name = "default",
19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
20 .state = 0,
21 .capabilities = BDI_CAP_MAP_COPY,
22};
23EXPORT_SYMBOL_GPL(default_backing_dev_info);
24
25struct backing_dev_info noop_backing_dev_info = { 17struct backing_dev_info noop_backing_dev_info = {
26 .name = "noop", 18 .name = "noop",
27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 19 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
28}; 20};
29EXPORT_SYMBOL_GPL(noop_backing_dev_info);
30 21
31static struct class *bdi_class; 22static struct class *bdi_class;
32 23
@@ -40,17 +31,6 @@ LIST_HEAD(bdi_list);
40/* bdi_wq serves all asynchronous writeback tasks */ 31/* bdi_wq serves all asynchronous writeback tasks */
41struct workqueue_struct *bdi_wq; 32struct workqueue_struct *bdi_wq;
42 33
43static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
44{
45 if (wb1 < wb2) {
46 spin_lock(&wb1->list_lock);
47 spin_lock_nested(&wb2->list_lock, 1);
48 } else {
49 spin_lock(&wb2->list_lock);
50 spin_lock_nested(&wb1->list_lock, 1);
51 }
52}
53
54#ifdef CONFIG_DEBUG_FS 34#ifdef CONFIG_DEBUG_FS
55#include <linux/debugfs.h> 35#include <linux/debugfs.h>
56#include <linux/seq_file.h> 36#include <linux/seq_file.h>
@@ -264,9 +244,6 @@ static int __init default_bdi_init(void)
264 if (!bdi_wq) 244 if (!bdi_wq)
265 return -ENOMEM; 245 return -ENOMEM;
266 246
267 err = bdi_init(&default_backing_dev_info);
268 if (!err)
269 bdi_register(&default_backing_dev_info, NULL, "default");
270 err = bdi_init(&noop_backing_dev_info); 247 err = bdi_init(&noop_backing_dev_info);
271 248
272 return err; 249 return err;
@@ -355,19 +332,19 @@ EXPORT_SYMBOL(bdi_register_dev);
355 */ 332 */
356static void bdi_wb_shutdown(struct backing_dev_info *bdi) 333static void bdi_wb_shutdown(struct backing_dev_info *bdi)
357{ 334{
358 if (!bdi_cap_writeback_dirty(bdi)) 335 /* Make sure nobody queues further work */
336 spin_lock_bh(&bdi->wb_lock);
337 if (!test_and_clear_bit(BDI_registered, &bdi->state)) {
338 spin_unlock_bh(&bdi->wb_lock);
359 return; 339 return;
340 }
341 spin_unlock_bh(&bdi->wb_lock);
360 342
361 /* 343 /*
362 * Make sure nobody finds us on the bdi_list anymore 344 * Make sure nobody finds us on the bdi_list anymore
363 */ 345 */
364 bdi_remove_from_list(bdi); 346 bdi_remove_from_list(bdi);
365 347
366 /* Make sure nobody queues further work */
367 spin_lock_bh(&bdi->wb_lock);
368 clear_bit(BDI_registered, &bdi->state);
369 spin_unlock_bh(&bdi->wb_lock);
370
371 /* 348 /*
372 * Drain work list and shutdown the delayed_work. At this point, 349 * Drain work list and shutdown the delayed_work. At this point,
373 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi 350 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
@@ -375,37 +352,22 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
375 */ 352 */
376 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 353 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
377 flush_delayed_work(&bdi->wb.dwork); 354 flush_delayed_work(&bdi->wb.dwork);
378 WARN_ON(!list_empty(&bdi->work_list));
379 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
380} 355}
381 356
382/* 357/*
383 * This bdi is going away now, make sure that no super_blocks point to it 358 * Called when the device behind @bdi has been removed or ejected.
359 *
360 * We can't really do much here except for reducing the dirty ratio at
361 * the moment. In the future we should be able to set a flag so that
362 * the filesystem can handle errors at mark_inode_dirty time instead
363 * of only at writeback time.
384 */ 364 */
385static void bdi_prune_sb(struct backing_dev_info *bdi)
386{
387 struct super_block *sb;
388
389 spin_lock(&sb_lock);
390 list_for_each_entry(sb, &super_blocks, s_list) {
391 if (sb->s_bdi == bdi)
392 sb->s_bdi = &default_backing_dev_info;
393 }
394 spin_unlock(&sb_lock);
395}
396
397void bdi_unregister(struct backing_dev_info *bdi) 365void bdi_unregister(struct backing_dev_info *bdi)
398{ 366{
399 if (bdi->dev) { 367 if (WARN_ON_ONCE(!bdi->dev))
400 bdi_set_min_ratio(bdi, 0); 368 return;
401 trace_writeback_bdi_unregister(bdi);
402 bdi_prune_sb(bdi);
403 369
404 bdi_wb_shutdown(bdi); 370 bdi_set_min_ratio(bdi, 0);
405 bdi_debug_unregister(bdi);
406 device_unregister(bdi->dev);
407 bdi->dev = NULL;
408 }
409} 371}
410EXPORT_SYMBOL(bdi_unregister); 372EXPORT_SYMBOL(bdi_unregister);
411 373
@@ -474,37 +436,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
474{ 436{
475 int i; 437 int i;
476 438
477 /* 439 bdi_wb_shutdown(bdi);
478 * Splice our entries to the default_backing_dev_info. This
479 * condition shouldn't happen. @wb must be empty at this point and
480 * dirty inodes on it might cause other issues. This workaround is
481 * added by ce5f8e779519 ("writeback: splice dirty inode entries to
482 * default bdi on bdi_destroy()") without root-causing the issue.
483 *
484 * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com
485 * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350
486 *
487 * We should probably add WARN_ON() to find out whether it still
488 * happens and track it down if so.
489 */
490 if (bdi_has_dirty_io(bdi)) {
491 struct bdi_writeback *dst = &default_backing_dev_info.wb;
492
493 bdi_lock_two(&bdi->wb, dst);
494 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
495 list_splice(&bdi->wb.b_io, &dst->b_io);
496 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
497 spin_unlock(&bdi->wb.list_lock);
498 spin_unlock(&dst->list_lock);
499 }
500
501 bdi_unregister(bdi);
502 440
441 WARN_ON(!list_empty(&bdi->work_list));
503 WARN_ON(delayed_work_pending(&bdi->wb.dwork)); 442 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
504 443
444 if (bdi->dev) {
445 bdi_debug_unregister(bdi);
446 device_unregister(bdi->dev);
447 bdi->dev = NULL;
448 }
449
505 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 450 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
506 percpu_counter_destroy(&bdi->bdi_stat[i]); 451 percpu_counter_destroy(&bdi->bdi_stat[i]);
507
508 fprop_local_destroy_percpu(&bdi->completions); 452 fprop_local_destroy_percpu(&bdi->completions);
509} 453}
510EXPORT_SYMBOL(bdi_destroy); 454EXPORT_SYMBOL(bdi_destroy);
@@ -513,13 +457,12 @@ EXPORT_SYMBOL(bdi_destroy);
513 * For use from filesystems to quickly init and register a bdi associated 457 * For use from filesystems to quickly init and register a bdi associated
514 * with dirty writeback 458 * with dirty writeback
515 */ 459 */
516int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, 460int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
517 unsigned int cap)
518{ 461{
519 int err; 462 int err;
520 463
521 bdi->name = name; 464 bdi->name = name;
522 bdi->capabilities = cap; 465 bdi->capabilities = 0;
523 err = bdi_init(bdi); 466 err = bdi_init(bdi);
524 if (err) 467 if (err)
525 return err; 468 return err;
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 2ad7adf4f0a4..fac23ecf8d72 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -73,7 +73,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
73 else 73 else
74 endbyte--; /* inclusive */ 74 endbyte--; /* inclusive */
75 75
76 bdi = mapping->backing_dev_info; 76 bdi = inode_to_bdi(mapping->host);
77 77
78 switch (advice) { 78 switch (advice) {
79 case POSIX_FADV_NORMAL: 79 case POSIX_FADV_NORMAL:
@@ -113,7 +113,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
113 case POSIX_FADV_NOREUSE: 113 case POSIX_FADV_NOREUSE:
114 break; 114 break;
115 case POSIX_FADV_DONTNEED: 115 case POSIX_FADV_DONTNEED:
116 if (!bdi_write_congested(mapping->backing_dev_info)) 116 if (!bdi_write_congested(bdi))
117 __filemap_fdatawrite_range(mapping, offset, endbyte, 117 __filemap_fdatawrite_range(mapping, offset, endbyte,
118 WB_SYNC_NONE); 118 WB_SYNC_NONE);
119 119
diff --git a/mm/filemap.c b/mm/filemap.c
index bf7a27142704..d9f5336552d7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -211,7 +211,7 @@ void __delete_from_page_cache(struct page *page, void *shadow)
211 */ 211 */
212 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { 212 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
213 dec_zone_page_state(page, NR_FILE_DIRTY); 213 dec_zone_page_state(page, NR_FILE_DIRTY);
214 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 214 dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE);
215 } 215 }
216} 216}
217 217
@@ -2564,7 +2564,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2564 size_t count = iov_iter_count(from); 2564 size_t count = iov_iter_count(from);
2565 2565
2566 /* We can write back this queue in page reclaim */ 2566 /* We can write back this queue in page reclaim */
2567 current->backing_dev_info = mapping->backing_dev_info; 2567 current->backing_dev_info = inode_to_bdi(inode);
2568 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2568 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2569 if (err) 2569 if (err)
2570 goto out; 2570 goto out;
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 70c09da1a419..c175f9f25210 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/backing-dev.h>
12#include <linux/pagemap.h> 13#include <linux/pagemap.h>
13#include <linux/export.h> 14#include <linux/export.h>
14#include <linux/uio.h> 15#include <linux/uio.h>
@@ -409,7 +410,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
409 count = len; 410 count = len;
410 411
411 /* We can write back this queue in page reclaim */ 412 /* We can write back this queue in page reclaim */
412 current->backing_dev_info = mapping->backing_dev_info; 413 current->backing_dev_info = inode_to_bdi(inode);
413 414
414 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); 415 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
415 if (ret) 416 if (ret)
diff --git a/mm/madvise.c b/mm/madvise.c
index d79fb5e8f80a..1077cbdc8b52 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -222,19 +222,22 @@ static long madvise_willneed(struct vm_area_struct *vma,
222 struct file *file = vma->vm_file; 222 struct file *file = vma->vm_file;
223 223
224#ifdef CONFIG_SWAP 224#ifdef CONFIG_SWAP
225 if (!file || mapping_cap_swap_backed(file->f_mapping)) { 225 if (!file) {
226 *prev = vma; 226 *prev = vma;
227 if (!file) 227 force_swapin_readahead(vma, start, end);
228 force_swapin_readahead(vma, start, end);
229 else
230 force_shm_swapin_readahead(vma, start, end,
231 file->f_mapping);
232 return 0; 228 return 0;
233 } 229 }
234#endif
235 230
231 if (shmem_mapping(file->f_mapping)) {
232 *prev = vma;
233 force_shm_swapin_readahead(vma, start, end,
234 file->f_mapping);
235 return 0;
236 }
237#else
236 if (!file) 238 if (!file)
237 return -EBADF; 239 return -EBADF;
240#endif
238 241
239 if (file->f_mapping->a_ops->get_xip_mem) { 242 if (file->f_mapping->a_ops->get_xip_mem) {
240 /* no bad return value, but ignore advice */ 243 /* no bad return value, but ignore advice */
diff --git a/mm/nommu.c b/mm/nommu.c
index 1a19fb3b0463..7296360fc057 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -980,9 +980,6 @@ static int validate_mmap_request(struct file *file,
980 return -EOVERFLOW; 980 return -EOVERFLOW;
981 981
982 if (file) { 982 if (file) {
983 /* validate file mapping requests */
984 struct address_space *mapping;
985
986 /* files must support mmap */ 983 /* files must support mmap */
987 if (!file->f_op->mmap) 984 if (!file->f_op->mmap)
988 return -ENODEV; 985 return -ENODEV;
@@ -991,28 +988,22 @@ static int validate_mmap_request(struct file *file,
991 * - we support chardevs that provide their own "memory" 988 * - we support chardevs that provide their own "memory"
992 * - we support files/blockdevs that are memory backed 989 * - we support files/blockdevs that are memory backed
993 */ 990 */
994 mapping = file->f_mapping; 991 if (file->f_op->mmap_capabilities) {
995 if (!mapping) 992 capabilities = file->f_op->mmap_capabilities(file);
996 mapping = file_inode(file)->i_mapping; 993 } else {
997
998 capabilities = 0;
999 if (mapping && mapping->backing_dev_info)
1000 capabilities = mapping->backing_dev_info->capabilities;
1001
1002 if (!capabilities) {
1003 /* no explicit capabilities set, so assume some 994 /* no explicit capabilities set, so assume some
1004 * defaults */ 995 * defaults */
1005 switch (file_inode(file)->i_mode & S_IFMT) { 996 switch (file_inode(file)->i_mode & S_IFMT) {
1006 case S_IFREG: 997 case S_IFREG:
1007 case S_IFBLK: 998 case S_IFBLK:
1008 capabilities = BDI_CAP_MAP_COPY; 999 capabilities = NOMMU_MAP_COPY;
1009 break; 1000 break;
1010 1001
1011 case S_IFCHR: 1002 case S_IFCHR:
1012 capabilities = 1003 capabilities =
1013 BDI_CAP_MAP_DIRECT | 1004 NOMMU_MAP_DIRECT |
1014 BDI_CAP_READ_MAP | 1005 NOMMU_MAP_READ |
1015 BDI_CAP_WRITE_MAP; 1006 NOMMU_MAP_WRITE;
1016 break; 1007 break;
1017 1008
1018 default: 1009 default:
@@ -1023,9 +1014,9 @@ static int validate_mmap_request(struct file *file,
1023 /* eliminate any capabilities that we can't support on this 1014 /* eliminate any capabilities that we can't support on this
1024 * device */ 1015 * device */
1025 if (!file->f_op->get_unmapped_area) 1016 if (!file->f_op->get_unmapped_area)
1026 capabilities &= ~BDI_CAP_MAP_DIRECT; 1017 capabilities &= ~NOMMU_MAP_DIRECT;
1027 if (!file->f_op->read) 1018 if (!file->f_op->read)
1028 capabilities &= ~BDI_CAP_MAP_COPY; 1019 capabilities &= ~NOMMU_MAP_COPY;
1029 1020
1030 /* The file shall have been opened with read permission. */ 1021 /* The file shall have been opened with read permission. */
1031 if (!(file->f_mode & FMODE_READ)) 1022 if (!(file->f_mode & FMODE_READ))
@@ -1044,29 +1035,29 @@ static int validate_mmap_request(struct file *file,
1044 if (locks_verify_locked(file)) 1035 if (locks_verify_locked(file))
1045 return -EAGAIN; 1036 return -EAGAIN;
1046 1037
1047 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1038 if (!(capabilities & NOMMU_MAP_DIRECT))
1048 return -ENODEV; 1039 return -ENODEV;
1049 1040
1050 /* we mustn't privatise shared mappings */ 1041 /* we mustn't privatise shared mappings */
1051 capabilities &= ~BDI_CAP_MAP_COPY; 1042 capabilities &= ~NOMMU_MAP_COPY;
1052 } else { 1043 } else {
1053 /* we're going to read the file into private memory we 1044 /* we're going to read the file into private memory we
1054 * allocate */ 1045 * allocate */
1055 if (!(capabilities & BDI_CAP_MAP_COPY)) 1046 if (!(capabilities & NOMMU_MAP_COPY))
1056 return -ENODEV; 1047 return -ENODEV;
1057 1048
1058 /* we don't permit a private writable mapping to be 1049 /* we don't permit a private writable mapping to be
1059 * shared with the backing device */ 1050 * shared with the backing device */
1060 if (prot & PROT_WRITE) 1051 if (prot & PROT_WRITE)
1061 capabilities &= ~BDI_CAP_MAP_DIRECT; 1052 capabilities &= ~NOMMU_MAP_DIRECT;
1062 } 1053 }
1063 1054
1064 if (capabilities & BDI_CAP_MAP_DIRECT) { 1055 if (capabilities & NOMMU_MAP_DIRECT) {
1065 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || 1056 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
1066 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || 1057 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
1067 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) 1058 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
1068 ) { 1059 ) {
1069 capabilities &= ~BDI_CAP_MAP_DIRECT; 1060 capabilities &= ~NOMMU_MAP_DIRECT;
1070 if (flags & MAP_SHARED) { 1061 if (flags & MAP_SHARED) {
1071 printk(KERN_WARNING 1062 printk(KERN_WARNING
1072 "MAP_SHARED not completely supported on !MMU\n"); 1063 "MAP_SHARED not completely supported on !MMU\n");
@@ -1083,21 +1074,21 @@ static int validate_mmap_request(struct file *file,
1083 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { 1074 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1084 /* handle implication of PROT_EXEC by PROT_READ */ 1075 /* handle implication of PROT_EXEC by PROT_READ */
1085 if (current->personality & READ_IMPLIES_EXEC) { 1076 if (current->personality & READ_IMPLIES_EXEC) {
1086 if (capabilities & BDI_CAP_EXEC_MAP) 1077 if (capabilities & NOMMU_MAP_EXEC)
1087 prot |= PROT_EXEC; 1078 prot |= PROT_EXEC;
1088 } 1079 }
1089 } else if ((prot & PROT_READ) && 1080 } else if ((prot & PROT_READ) &&
1090 (prot & PROT_EXEC) && 1081 (prot & PROT_EXEC) &&
1091 !(capabilities & BDI_CAP_EXEC_MAP) 1082 !(capabilities & NOMMU_MAP_EXEC)
1092 ) { 1083 ) {
1093 /* backing file is not executable, try to copy */ 1084 /* backing file is not executable, try to copy */
1094 capabilities &= ~BDI_CAP_MAP_DIRECT; 1085 capabilities &= ~NOMMU_MAP_DIRECT;
1095 } 1086 }
1096 } else { 1087 } else {
1097 /* anonymous mappings are always memory backed and can be 1088 /* anonymous mappings are always memory backed and can be
1098 * privately mapped 1089 * privately mapped
1099 */ 1090 */
1100 capabilities = BDI_CAP_MAP_COPY; 1091 capabilities = NOMMU_MAP_COPY;
1101 1092
1102 /* handle PROT_EXEC implication by PROT_READ */ 1093 /* handle PROT_EXEC implication by PROT_READ */
1103 if ((prot & PROT_READ) && 1094 if ((prot & PROT_READ) &&
@@ -1129,7 +1120,7 @@ static unsigned long determine_vm_flags(struct file *file,
1129 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1120 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1130 /* vm_flags |= mm->def_flags; */ 1121 /* vm_flags |= mm->def_flags; */
1131 1122
1132 if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1123 if (!(capabilities & NOMMU_MAP_DIRECT)) {
1133 /* attempt to share read-only copies of mapped file chunks */ 1124 /* attempt to share read-only copies of mapped file chunks */
1134 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1125 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1135 if (file && !(prot & PROT_WRITE)) 1126 if (file && !(prot & PROT_WRITE))
@@ -1138,7 +1129,7 @@ static unsigned long determine_vm_flags(struct file *file,
1138 /* overlay a shareable mapping on the backing device or inode 1129 /* overlay a shareable mapping on the backing device or inode
1139 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1130 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1140 * romfs/cramfs */ 1131 * romfs/cramfs */
1141 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); 1132 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
1142 if (flags & MAP_SHARED) 1133 if (flags & MAP_SHARED)
1143 vm_flags |= VM_SHARED; 1134 vm_flags |= VM_SHARED;
1144 } 1135 }
@@ -1191,7 +1182,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1191 * shared mappings on devices or memory 1182 * shared mappings on devices or memory
1192 * - VM_MAYSHARE will be set if it may attempt to share 1183 * - VM_MAYSHARE will be set if it may attempt to share
1193 */ 1184 */
1194 if (capabilities & BDI_CAP_MAP_DIRECT) { 1185 if (capabilities & NOMMU_MAP_DIRECT) {
1195 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1186 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1196 if (ret == 0) { 1187 if (ret == 0) {
1197 /* shouldn't return success if we're not sharing */ 1188 /* shouldn't return success if we're not sharing */
@@ -1380,7 +1371,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1380 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && 1371 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1381 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { 1372 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1382 /* new mapping is not a subset of the region */ 1373 /* new mapping is not a subset of the region */
1383 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1374 if (!(capabilities & NOMMU_MAP_DIRECT))
1384 goto sharing_violation; 1375 goto sharing_violation;
1385 continue; 1376 continue;
1386 } 1377 }
@@ -1419,7 +1410,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1419 * - this is the hook for quasi-memory character devices to 1410 * - this is the hook for quasi-memory character devices to
1420 * tell us the location of a shared mapping 1411 * tell us the location of a shared mapping
1421 */ 1412 */
1422 if (capabilities & BDI_CAP_MAP_DIRECT) { 1413 if (capabilities & NOMMU_MAP_DIRECT) {
1423 addr = file->f_op->get_unmapped_area(file, addr, len, 1414 addr = file->f_op->get_unmapped_area(file, addr, len,
1424 pgoff, flags); 1415 pgoff, flags);
1425 if (IS_ERR_VALUE(addr)) { 1416 if (IS_ERR_VALUE(addr)) {
@@ -1431,10 +1422,10 @@ unsigned long do_mmap_pgoff(struct file *file,
1431 * the mapping so we'll have to attempt to copy 1422 * the mapping so we'll have to attempt to copy
1432 * it */ 1423 * it */
1433 ret = -ENODEV; 1424 ret = -ENODEV;
1434 if (!(capabilities & BDI_CAP_MAP_COPY)) 1425 if (!(capabilities & NOMMU_MAP_COPY))
1435 goto error_just_free; 1426 goto error_just_free;
1436 1427
1437 capabilities &= ~BDI_CAP_MAP_DIRECT; 1428 capabilities &= ~NOMMU_MAP_DIRECT;
1438 } else { 1429 } else {
1439 vma->vm_start = region->vm_start = addr; 1430 vma->vm_start = region->vm_start = addr;
1440 vma->vm_end = region->vm_end = addr + len; 1431 vma->vm_end = region->vm_end = addr + len;
@@ -1445,7 +1436,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1445 vma->vm_region = region; 1436 vma->vm_region = region;
1446 1437
1447 /* set up the mapping 1438 /* set up the mapping
1448 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set 1439 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1449 */ 1440 */
1450 if (file && vma->vm_flags & VM_SHARED) 1441 if (file && vma->vm_flags & VM_SHARED)
1451 ret = do_mmap_shared_file(vma); 1442 ret = do_mmap_shared_file(vma);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6a73e47e81c6..45e187b2d971 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1351,7 +1351,7 @@ static void balance_dirty_pages(struct address_space *mapping,
1351 unsigned long task_ratelimit; 1351 unsigned long task_ratelimit;
1352 unsigned long dirty_ratelimit; 1352 unsigned long dirty_ratelimit;
1353 unsigned long pos_ratio; 1353 unsigned long pos_ratio;
1354 struct backing_dev_info *bdi = mapping->backing_dev_info; 1354 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
1355 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; 1355 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1356 unsigned long start_time = jiffies; 1356 unsigned long start_time = jiffies;
1357 1357
@@ -1574,7 +1574,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1574 */ 1574 */
1575void balance_dirty_pages_ratelimited(struct address_space *mapping) 1575void balance_dirty_pages_ratelimited(struct address_space *mapping)
1576{ 1576{
1577 struct backing_dev_info *bdi = mapping->backing_dev_info; 1577 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
1578 int ratelimit; 1578 int ratelimit;
1579 int *p; 1579 int *p;
1580 1580
@@ -1929,7 +1929,7 @@ continue_unlock:
1929 if (!clear_page_dirty_for_io(page)) 1929 if (!clear_page_dirty_for_io(page))
1930 goto continue_unlock; 1930 goto continue_unlock;
1931 1931
1932 trace_wbc_writepage(wbc, mapping->backing_dev_info); 1932 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1933 ret = (*writepage)(page, wbc, data); 1933 ret = (*writepage)(page, wbc, data);
1934 if (unlikely(ret)) { 1934 if (unlikely(ret)) {
1935 if (ret == AOP_WRITEPAGE_ACTIVATE) { 1935 if (ret == AOP_WRITEPAGE_ACTIVATE) {
@@ -2094,10 +2094,12 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2094 trace_writeback_dirty_page(page, mapping); 2094 trace_writeback_dirty_page(page, mapping);
2095 2095
2096 if (mapping_cap_account_dirty(mapping)) { 2096 if (mapping_cap_account_dirty(mapping)) {
2097 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
2098
2097 __inc_zone_page_state(page, NR_FILE_DIRTY); 2099 __inc_zone_page_state(page, NR_FILE_DIRTY);
2098 __inc_zone_page_state(page, NR_DIRTIED); 2100 __inc_zone_page_state(page, NR_DIRTIED);
2099 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 2101 __inc_bdi_stat(bdi, BDI_RECLAIMABLE);
2100 __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 2102 __inc_bdi_stat(bdi, BDI_DIRTIED);
2101 task_io_account_write(PAGE_CACHE_SIZE); 2103 task_io_account_write(PAGE_CACHE_SIZE);
2102 current->nr_dirtied++; 2104 current->nr_dirtied++;
2103 this_cpu_inc(bdp_ratelimits); 2105 this_cpu_inc(bdp_ratelimits);
@@ -2156,7 +2158,7 @@ void account_page_redirty(struct page *page)
2156 if (mapping && mapping_cap_account_dirty(mapping)) { 2158 if (mapping && mapping_cap_account_dirty(mapping)) {
2157 current->nr_dirtied--; 2159 current->nr_dirtied--;
2158 dec_zone_page_state(page, NR_DIRTIED); 2160 dec_zone_page_state(page, NR_DIRTIED);
2159 dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); 2161 dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED);
2160 } 2162 }
2161} 2163}
2162EXPORT_SYMBOL(account_page_redirty); 2164EXPORT_SYMBOL(account_page_redirty);
@@ -2298,7 +2300,7 @@ int clear_page_dirty_for_io(struct page *page)
2298 */ 2300 */
2299 if (TestClearPageDirty(page)) { 2301 if (TestClearPageDirty(page)) {
2300 dec_zone_page_state(page, NR_FILE_DIRTY); 2302 dec_zone_page_state(page, NR_FILE_DIRTY);
2301 dec_bdi_stat(mapping->backing_dev_info, 2303 dec_bdi_stat(inode_to_bdi(mapping->host),
2302 BDI_RECLAIMABLE); 2304 BDI_RECLAIMABLE);
2303 return 1; 2305 return 1;
2304 } 2306 }
@@ -2316,7 +2318,7 @@ int test_clear_page_writeback(struct page *page)
2316 2318
2317 memcg = mem_cgroup_begin_page_stat(page); 2319 memcg = mem_cgroup_begin_page_stat(page);
2318 if (mapping) { 2320 if (mapping) {
2319 struct backing_dev_info *bdi = mapping->backing_dev_info; 2321 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
2320 unsigned long flags; 2322 unsigned long flags;
2321 2323
2322 spin_lock_irqsave(&mapping->tree_lock, flags); 2324 spin_lock_irqsave(&mapping->tree_lock, flags);
@@ -2351,7 +2353,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2351 2353
2352 memcg = mem_cgroup_begin_page_stat(page); 2354 memcg = mem_cgroup_begin_page_stat(page);
2353 if (mapping) { 2355 if (mapping) {
2354 struct backing_dev_info *bdi = mapping->backing_dev_info; 2356 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
2355 unsigned long flags; 2357 unsigned long flags;
2356 2358
2357 spin_lock_irqsave(&mapping->tree_lock, flags); 2359 spin_lock_irqsave(&mapping->tree_lock, flags);
@@ -2405,12 +2407,7 @@ EXPORT_SYMBOL(mapping_tagged);
2405 */ 2407 */
2406void wait_for_stable_page(struct page *page) 2408void wait_for_stable_page(struct page *page)
2407{ 2409{
2408 struct address_space *mapping = page_mapping(page); 2410 if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
2409 struct backing_dev_info *bdi = mapping->backing_dev_info; 2411 wait_on_page_writeback(page);
2410
2411 if (!bdi_cap_stable_pages_required(bdi))
2412 return;
2413
2414 wait_on_page_writeback(page);
2415} 2412}
2416EXPORT_SYMBOL_GPL(wait_for_stable_page); 2413EXPORT_SYMBOL_GPL(wait_for_stable_page);
diff --git a/mm/readahead.c b/mm/readahead.c
index 17b9172ec37f..935675844b2e 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -27,7 +27,7 @@
27void 27void
28file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 28file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
29{ 29{
30 ra->ra_pages = mapping->backing_dev_info->ra_pages; 30 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
31 ra->prev_pos = -1; 31 ra->prev_pos = -1;
32} 32}
33EXPORT_SYMBOL_GPL(file_ra_state_init); 33EXPORT_SYMBOL_GPL(file_ra_state_init);
@@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping,
541 /* 541 /*
542 * Defer asynchronous read-ahead on IO congestion. 542 * Defer asynchronous read-ahead on IO congestion.
543 */ 543 */
544 if (bdi_read_congested(mapping->backing_dev_info)) 544 if (bdi_read_congested(inode_to_bdi(mapping->host)))
545 return; 545 return;
546 546
547 /* do read-ahead */ 547 /* do read-ahead */
diff --git a/mm/shmem.c b/mm/shmem.c
index 864c878401e6..a63031fa3e0c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -191,11 +191,6 @@ static const struct inode_operations shmem_dir_inode_operations;
191static const struct inode_operations shmem_special_inode_operations; 191static const struct inode_operations shmem_special_inode_operations;
192static const struct vm_operations_struct shmem_vm_ops; 192static const struct vm_operations_struct shmem_vm_ops;
193 193
194static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
195 .ra_pages = 0, /* No readahead */
196 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
197};
198
199static LIST_HEAD(shmem_swaplist); 194static LIST_HEAD(shmem_swaplist);
200static DEFINE_MUTEX(shmem_swaplist_mutex); 195static DEFINE_MUTEX(shmem_swaplist_mutex);
201 196
@@ -765,11 +760,11 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
765 goto redirty; 760 goto redirty;
766 761
767 /* 762 /*
768 * shmem_backing_dev_info's capabilities prevent regular writeback or 763 * Our capabilities prevent regular writeback or sync from ever calling
769 * sync from ever calling shmem_writepage; but a stacking filesystem 764 * shmem_writepage; but a stacking filesystem might use ->writepage of
770 * might use ->writepage of its underlying filesystem, in which case 765 * its underlying filesystem, in which case tmpfs should write out to
771 * tmpfs should write out to swap only in response to memory pressure, 766 * swap only in response to memory pressure, and not for the writeback
772 * and not for the writeback threads or sync. 767 * threads or sync.
773 */ 768 */
774 if (!wbc->for_reclaim) { 769 if (!wbc->for_reclaim) {
775 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 770 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
@@ -1415,7 +1410,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
1415 inode->i_ino = get_next_ino(); 1410 inode->i_ino = get_next_ino();
1416 inode_init_owner(inode, dir, mode); 1411 inode_init_owner(inode, dir, mode);
1417 inode->i_blocks = 0; 1412 inode->i_blocks = 0;
1418 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1419 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1413 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1420 inode->i_generation = get_seconds(); 1414 inode->i_generation = get_seconds();
1421 info = SHMEM_I(inode); 1415 info = SHMEM_I(inode);
@@ -1461,7 +1455,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
1461 1455
1462bool shmem_mapping(struct address_space *mapping) 1456bool shmem_mapping(struct address_space *mapping)
1463{ 1457{
1464 return mapping->backing_dev_info == &shmem_backing_dev_info; 1458 return mapping->host->i_sb->s_op == &shmem_ops;
1465} 1459}
1466 1460
1467#ifdef CONFIG_TMPFS 1461#ifdef CONFIG_TMPFS
@@ -3225,10 +3219,6 @@ int __init shmem_init(void)
3225 if (shmem_inode_cachep) 3219 if (shmem_inode_cachep)
3226 return 0; 3220 return 0;
3227 3221
3228 error = bdi_init(&shmem_backing_dev_info);
3229 if (error)
3230 goto out4;
3231
3232 error = shmem_init_inodecache(); 3222 error = shmem_init_inodecache();
3233 if (error) 3223 if (error)
3234 goto out3; 3224 goto out3;
@@ -3252,8 +3242,6 @@ out1:
3252out2: 3242out2:
3253 shmem_destroy_inodecache(); 3243 shmem_destroy_inodecache();
3254out3: 3244out3:
3255 bdi_destroy(&shmem_backing_dev_info);
3256out4:
3257 shm_mnt = ERR_PTR(error); 3245 shm_mnt = ERR_PTR(error);
3258 return error; 3246 return error;
3259} 3247}
diff --git a/mm/swap.c b/mm/swap.c
index 5b3087228b99..cd3a5e64cea9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1138,8 +1138,6 @@ void __init swap_setup(void)
1138#ifdef CONFIG_SWAP 1138#ifdef CONFIG_SWAP
1139 int i; 1139 int i;
1140 1140
1141 if (bdi_init(swapper_spaces[0].backing_dev_info))
1142 panic("Failed to init swap bdi");
1143 for (i = 0; i < MAX_SWAPFILES; i++) 1141 for (i = 0; i < MAX_SWAPFILES; i++)
1144 spin_lock_init(&swapper_spaces[i].tree_lock); 1142 spin_lock_init(&swapper_spaces[i].tree_lock);
1145#endif 1143#endif
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 9711342987a0..405923f77334 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -32,17 +32,11 @@ static const struct address_space_operations swap_aops = {
32#endif 32#endif
33}; 33};
34 34
35static struct backing_dev_info swap_backing_dev_info = {
36 .name = "swap",
37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
38};
39
40struct address_space swapper_spaces[MAX_SWAPFILES] = { 35struct address_space swapper_spaces[MAX_SWAPFILES] = {
41 [0 ... MAX_SWAPFILES - 1] = { 36 [0 ... MAX_SWAPFILES - 1] = {
42 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 37 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
43 .i_mmap_writable = ATOMIC_INIT(0), 38 .i_mmap_writable = ATOMIC_INIT(0),
44 .a_ops = &swap_aops, 39 .a_ops = &swap_aops,
45 .backing_dev_info = &swap_backing_dev_info,
46 } 40 }
47}; 41};
48 42
diff --git a/mm/truncate.c b/mm/truncate.c
index f1e4d6052369..ddec5a5966d7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -112,7 +112,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size)
112 struct address_space *mapping = page->mapping; 112 struct address_space *mapping = page->mapping;
113 if (mapping && mapping_cap_account_dirty(mapping)) { 113 if (mapping && mapping_cap_account_dirty(mapping)) {
114 dec_zone_page_state(page, NR_FILE_DIRTY); 114 dec_zone_page_state(page, NR_FILE_DIRTY);
115 dec_bdi_stat(mapping->backing_dev_info, 115 dec_bdi_stat(inode_to_bdi(mapping->host),
116 BDI_RECLAIMABLE); 116 BDI_RECLAIMABLE);
117 if (account_size) 117 if (account_size)
118 task_io_account_cancelled_write(account_size); 118 task_io_account_cancelled_write(account_size);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8e645ee52045..224dd298fdcd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -500,7 +500,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
500 } 500 }
501 if (mapping->a_ops->writepage == NULL) 501 if (mapping->a_ops->writepage == NULL)
502 return PAGE_ACTIVATE; 502 return PAGE_ACTIVATE;
503 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 503 if (!may_write_to_queue(inode_to_bdi(mapping->host), sc))
504 return PAGE_KEEP; 504 return PAGE_KEEP;
505 505
506 if (clear_page_dirty_for_io(page)) { 506 if (clear_page_dirty_for_io(page)) {
@@ -879,7 +879,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
879 */ 879 */
880 mapping = page_mapping(page); 880 mapping = page_mapping(page);
881 if (((dirty || writeback) && mapping && 881 if (((dirty || writeback) && mapping &&
882 bdi_write_congested(mapping->backing_dev_info)) || 882 bdi_write_congested(inode_to_bdi(mapping->host))) ||
883 (writeback && PageReclaim(page))) 883 (writeback && PageReclaim(page)))
884 nr_congested++; 884 nr_congested++;
885 885
diff --git a/security/security.c b/security/security.c
index 18b35c63fc0c..a0442b20f001 100644
--- a/security/security.c
+++ b/security/security.c
@@ -726,16 +726,15 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
726 return prot | PROT_EXEC; 726 return prot | PROT_EXEC;
727 /* 727 /*
728 * ditto if it's not on noexec mount, except that on !MMU we need 728 * ditto if it's not on noexec mount, except that on !MMU we need
729 * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case 729 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
730 */ 730 */
731 if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { 731 if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
732#ifndef CONFIG_MMU 732#ifndef CONFIG_MMU
733 unsigned long caps = 0; 733 if (file->f_op->mmap_capabilities) {
734 struct address_space *mapping = file->f_mapping; 734 unsigned caps = file->f_op->mmap_capabilities(file);
735 if (mapping && mapping->backing_dev_info) 735 if (!(caps & NOMMU_MAP_EXEC))
736 caps = mapping->backing_dev_info->capabilities; 736 return prot;
737 if (!(caps & BDI_CAP_EXEC_MAP)) 737 }
738 return prot;
739#endif 738#endif
740 return prot | PROT_EXEC; 739 return prot | PROT_EXEC;
741 } 740 }