aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c32
1 files changed, 9 insertions, 23 deletions
diff --git a/fs/inode.c b/fs/inode.c
index aa149e7262ac..86bfaca724db 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -170,20 +170,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
170 atomic_set(&mapping->i_mmap_writable, 0); 170 atomic_set(&mapping->i_mmap_writable, 0);
171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
172 mapping->private_data = NULL; 172 mapping->private_data = NULL;
173 mapping->backing_dev_info = &default_backing_dev_info;
174 mapping->writeback_index = 0; 173 mapping->writeback_index = 0;
175
176 /*
177 * If the block_device provides a backing_dev_info for client
178 * inodes then use that. Otherwise the inode share the bdev's
179 * backing_dev_info.
180 */
181 if (sb->s_bdev) {
182 struct backing_dev_info *bdi;
183
184 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
185 mapping->backing_dev_info = bdi;
186 }
187 inode->i_private = NULL; 174 inode->i_private = NULL;
188 inode->i_mapping = mapping; 175 inode->i_mapping = mapping;
189 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 176 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
@@ -194,7 +181,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
194#ifdef CONFIG_FSNOTIFY 181#ifdef CONFIG_FSNOTIFY
195 inode->i_fsnotify_mask = 0; 182 inode->i_fsnotify_mask = 0;
196#endif 183#endif
197 184 inode->i_flctx = NULL;
198 this_cpu_inc(nr_inodes); 185 this_cpu_inc(nr_inodes);
199 186
200 return 0; 187 return 0;
@@ -237,6 +224,7 @@ void __destroy_inode(struct inode *inode)
237 BUG_ON(inode_has_buffers(inode)); 224 BUG_ON(inode_has_buffers(inode));
238 security_inode_free(inode); 225 security_inode_free(inode);
239 fsnotify_inode_delete(inode); 226 fsnotify_inode_delete(inode);
227 locks_free_lock_context(inode->i_flctx);
240 if (!inode->i_nlink) { 228 if (!inode->i_nlink) {
241 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 229 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
242 atomic_long_dec(&inode->i_sb->s_remove_count); 230 atomic_long_dec(&inode->i_sb->s_remove_count);
@@ -355,7 +343,6 @@ void address_space_init_once(struct address_space *mapping)
355 INIT_LIST_HEAD(&mapping->private_list); 343 INIT_LIST_HEAD(&mapping->private_list);
356 spin_lock_init(&mapping->private_lock); 344 spin_lock_init(&mapping->private_lock);
357 mapping->i_mmap = RB_ROOT; 345 mapping->i_mmap = RB_ROOT;
358 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
359} 346}
360EXPORT_SYMBOL(address_space_init_once); 347EXPORT_SYMBOL(address_space_init_once);
361 348
@@ -685,8 +672,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
685 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 672 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
686 * with this flag set because they are the inodes that are out of order. 673 * with this flag set because they are the inodes that are out of order.
687 */ 674 */
688static enum lru_status 675static enum lru_status inode_lru_isolate(struct list_head *item,
689inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 676 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
690{ 677{
691 struct list_head *freeable = arg; 678 struct list_head *freeable = arg;
692 struct inode *inode = container_of(item, struct inode, i_lru); 679 struct inode *inode = container_of(item, struct inode, i_lru);
@@ -704,7 +691,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
704 */ 691 */
705 if (atomic_read(&inode->i_count) || 692 if (atomic_read(&inode->i_count) ||
706 (inode->i_state & ~I_REFERENCED)) { 693 (inode->i_state & ~I_REFERENCED)) {
707 list_del_init(&inode->i_lru); 694 list_lru_isolate(lru, &inode->i_lru);
708 spin_unlock(&inode->i_lock); 695 spin_unlock(&inode->i_lock);
709 this_cpu_dec(nr_unused); 696 this_cpu_dec(nr_unused);
710 return LRU_REMOVED; 697 return LRU_REMOVED;
@@ -738,7 +725,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
738 725
739 WARN_ON(inode->i_state & I_NEW); 726 WARN_ON(inode->i_state & I_NEW);
740 inode->i_state |= I_FREEING; 727 inode->i_state |= I_FREEING;
741 list_move(&inode->i_lru, freeable); 728 list_lru_isolate_move(lru, &inode->i_lru, freeable);
742 spin_unlock(&inode->i_lock); 729 spin_unlock(&inode->i_lock);
743 730
744 this_cpu_dec(nr_unused); 731 this_cpu_dec(nr_unused);
@@ -751,14 +738,13 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
751 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 738 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
752 * then are freed outside inode_lock by dispose_list(). 739 * then are freed outside inode_lock by dispose_list().
753 */ 740 */
754long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, 741long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
755 int nid)
756{ 742{
757 LIST_HEAD(freeable); 743 LIST_HEAD(freeable);
758 long freed; 744 long freed;
759 745
760 freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate, 746 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
761 &freeable, &nr_to_scan); 747 inode_lru_isolate, &freeable);
762 dispose_list(&freeable); 748 dispose_list(&freeable);
763 return freed; 749 return freed;
764} 750}