aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/fs/inode.c b/fs/inode.c
index d0be6159eb7f..32b7c3375021 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,8 +56,8 @@
56#define I_HASHBITS i_hash_shift 56#define I_HASHBITS i_hash_shift
57#define I_HASHMASK i_hash_mask 57#define I_HASHMASK i_hash_mask
58 58
59static unsigned int i_hash_mask; 59static unsigned int i_hash_mask __read_mostly;
60static unsigned int i_hash_shift; 60static unsigned int i_hash_shift __read_mostly;
61 61
62/* 62/*
63 * Each inode can be on two separate lists. One is 63 * Each inode can be on two separate lists. One is
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
73 73
74LIST_HEAD(inode_in_use); 74LIST_HEAD(inode_in_use);
75LIST_HEAD(inode_unused); 75LIST_HEAD(inode_unused);
76static struct hlist_head *inode_hashtable; 76static struct hlist_head *inode_hashtable __read_mostly;
77 77
78/* 78/*
79 * A simple spinlock to protect the list manipulations. 79 * A simple spinlock to protect the list manipulations.
@@ -84,27 +84,27 @@ static struct hlist_head *inode_hashtable;
84DEFINE_SPINLOCK(inode_lock); 84DEFINE_SPINLOCK(inode_lock);
85 85
86/* 86/*
87 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 87 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
88 * icache shrinking path, and the umount path. Without this exclusion, 88 * icache shrinking path, and the umount path. Without this exclusion,
89 * by the time prune_icache calls iput for the inode whose pages it has 89 * by the time prune_icache calls iput for the inode whose pages it has
90 * been invalidating, or by the time it calls clear_inode & destroy_inode 90 * been invalidating, or by the time it calls clear_inode & destroy_inode
91 * from its final dispose_list, the struct super_block they refer to 91 * from its final dispose_list, the struct super_block they refer to
92 * (for inode->i_sb->s_op) may already have been freed and reused. 92 * (for inode->i_sb->s_op) may already have been freed and reused.
93 */ 93 */
94DECLARE_MUTEX(iprune_sem); 94static DEFINE_MUTEX(iprune_mutex);
95 95
96/* 96/*
97 * Statistics gathering.. 97 * Statistics gathering..
98 */ 98 */
99struct inodes_stat_t inodes_stat; 99struct inodes_stat_t inodes_stat;
100 100
101static kmem_cache_t * inode_cachep; 101static kmem_cache_t * inode_cachep __read_mostly;
102 102
103static struct inode *alloc_inode(struct super_block *sb) 103static struct inode *alloc_inode(struct super_block *sb)
104{ 104{
105 static struct address_space_operations empty_aops; 105 static struct address_space_operations empty_aops;
106 static struct inode_operations empty_iops; 106 static struct inode_operations empty_iops;
107 static struct file_operations empty_fops; 107 static const struct file_operations empty_fops;
108 struct inode *inode; 108 struct inode *inode;
109 109
110 if (sb->s_op->alloc_inode) 110 if (sb->s_op->alloc_inode)
@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode)
206 i_size_ordered_init(inode); 206 i_size_ordered_init(inode);
207#ifdef CONFIG_INOTIFY 207#ifdef CONFIG_INOTIFY
208 INIT_LIST_HEAD(&inode->inotify_watches); 208 INIT_LIST_HEAD(&inode->inotify_watches);
209 sema_init(&inode->inotify_sem, 1); 209 mutex_init(&inode->inotify_mutex);
210#endif 210#endif
211} 211}
212 212
@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
319 /* 319 /*
320 * We can reschedule here without worrying about the list's 320 * We can reschedule here without worrying about the list's
321 * consistency because the per-sb list of inodes must not 321 * consistency because the per-sb list of inodes must not
322 * change during umount anymore, and because iprune_sem keeps 322 * change during umount anymore, and because iprune_mutex keeps
323 * shrink_icache_memory() away. 323 * shrink_icache_memory() away.
324 */ 324 */
325 cond_resched_lock(&inode_lock); 325 cond_resched_lock(&inode_lock);
@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
355 int busy; 355 int busy;
356 LIST_HEAD(throw_away); 356 LIST_HEAD(throw_away);
357 357
358 down(&iprune_sem); 358 mutex_lock(&iprune_mutex);
359 spin_lock(&inode_lock); 359 spin_lock(&inode_lock);
360 inotify_unmount_inodes(&sb->s_inodes); 360 inotify_unmount_inodes(&sb->s_inodes);
361 busy = invalidate_list(&sb->s_inodes, &throw_away); 361 busy = invalidate_list(&sb->s_inodes, &throw_away);
362 spin_unlock(&inode_lock); 362 spin_unlock(&inode_lock);
363 363
364 dispose_list(&throw_away); 364 dispose_list(&throw_away);
365 up(&iprune_sem); 365 mutex_unlock(&iprune_mutex);
366 366
367 return busy; 367 return busy;
368} 368}
@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
377 if (sb) { 377 if (sb) {
378 /* 378 /*
379 * no need to lock the super, get_super holds the 379 * no need to lock the super, get_super holds the
380 * read semaphore so the filesystem cannot go away 380 * read mutex so the filesystem cannot go away
381 * under us (->put_super runs with the write lock 381 * under us (->put_super runs with the write lock
382 * hold). 382 * hold).
383 */ 383 */
@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
423 int nr_scanned; 423 int nr_scanned;
424 unsigned long reap = 0; 424 unsigned long reap = 0;
425 425
426 down(&iprune_sem); 426 mutex_lock(&iprune_mutex);
427 spin_lock(&inode_lock); 427 spin_lock(&inode_lock);
428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
429 struct inode *inode; 429 struct inode *inode;
@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
459 spin_unlock(&inode_lock); 459 spin_unlock(&inode_lock);
460 460
461 dispose_list(&freeable); 461 dispose_list(&freeable);
462 up(&iprune_sem); 462 mutex_unlock(&iprune_mutex);
463 463
464 if (current_is_kswapd()) 464 if (current_is_kswapd())
465 mod_page_state(kswapd_inodesteal, reap); 465 mod_page_state(kswapd_inodesteal, reap);
@@ -1375,8 +1375,13 @@ void __init inode_init(unsigned long mempages)
1375 int loop; 1375 int loop;
1376 1376
1377 /* inode slab cache */ 1377 /* inode slab cache */
1378 inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), 1378 inode_cachep = kmem_cache_create("inode_cache",
1379 0, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_once, NULL); 1379 sizeof(struct inode),
1380 0,
1381 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1382 SLAB_MEM_SPREAD),
1383 init_once,
1384 NULL);
1380 set_shrinker(DEFAULT_SEEKS, shrink_icache_memory); 1385 set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
1381 1386
1382 /* Hash may have been set up in inode_init_early */ 1387 /* Hash may have been set up in inode_init_early */