aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c692
1 files changed, 387 insertions, 305 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 722860b323a9..ae2727ab0c3a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -20,16 +20,15 @@
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/cdev.h> 21#include <linux/cdev.h>
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/inotify.h>
24#include <linux/fsnotify.h> 23#include <linux/fsnotify.h>
25#include <linux/mount.h> 24#include <linux/mount.h>
26#include <linux/async.h> 25#include <linux/async.h>
27#include <linux/posix_acl.h> 26#include <linux/posix_acl.h>
27#include <linux/ima.h>
28 28
29/* 29/*
30 * This is needed for the following functions: 30 * This is needed for the following functions:
31 * - inode_has_buffers 31 * - inode_has_buffers
32 * - invalidate_inode_buffers
33 * - invalidate_bdev 32 * - invalidate_bdev
34 * 33 *
35 * FIXME: remove all knowledge of the buffer layer from this file 34 * FIXME: remove all knowledge of the buffer layer from this file
@@ -73,8 +72,7 @@ static unsigned int i_hash_shift __read_mostly;
73 * allowing for low-overhead inode sync() operations. 72 * allowing for low-overhead inode sync() operations.
74 */ 73 */
75 74
76LIST_HEAD(inode_in_use); 75static LIST_HEAD(inode_lru);
77LIST_HEAD(inode_unused);
78static struct hlist_head *inode_hashtable __read_mostly; 76static struct hlist_head *inode_hashtable __read_mostly;
79 77
80/* 78/*
@@ -104,8 +102,41 @@ static DECLARE_RWSEM(iprune_sem);
104 */ 102 */
105struct inodes_stat_t inodes_stat; 103struct inodes_stat_t inodes_stat;
106 104
105static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
106static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
107
107static struct kmem_cache *inode_cachep __read_mostly; 108static struct kmem_cache *inode_cachep __read_mostly;
108 109
110static inline int get_nr_inodes(void)
111{
112 return percpu_counter_sum_positive(&nr_inodes);
113}
114
115static inline int get_nr_inodes_unused(void)
116{
117 return percpu_counter_sum_positive(&nr_inodes_unused);
118}
119
120int get_nr_dirty_inodes(void)
121{
122 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
123 return nr_dirty > 0 ? nr_dirty : 0;
124
125}
126
127/*
128 * Handle nr_inode sysctl
129 */
130#ifdef CONFIG_SYSCTL
131int proc_nr_inodes(ctl_table *table, int write,
132 void __user *buffer, size_t *lenp, loff_t *ppos)
133{
134 inodes_stat.nr_inodes = get_nr_inodes();
135 inodes_stat.nr_unused = get_nr_inodes_unused();
136 return proc_dointvec(table, write, buffer, lenp, ppos);
137}
138#endif
139
109static void wake_up_inode(struct inode *inode) 140static void wake_up_inode(struct inode *inode)
110{ 141{
111 /* 142 /*
@@ -193,6 +224,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
193 inode->i_fsnotify_mask = 0; 224 inode->i_fsnotify_mask = 0;
194#endif 225#endif
195 226
227 percpu_counter_inc(&nr_inodes);
228
196 return 0; 229 return 0;
197out: 230out:
198 return -ENOMEM; 231 return -ENOMEM;
@@ -233,11 +266,13 @@ void __destroy_inode(struct inode *inode)
233 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 266 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
234 posix_acl_release(inode->i_default_acl); 267 posix_acl_release(inode->i_default_acl);
235#endif 268#endif
269 percpu_counter_dec(&nr_inodes);
236} 270}
237EXPORT_SYMBOL(__destroy_inode); 271EXPORT_SYMBOL(__destroy_inode);
238 272
239void destroy_inode(struct inode *inode) 273static void destroy_inode(struct inode *inode)
240{ 274{
275 BUG_ON(!list_empty(&inode->i_lru));
241 __destroy_inode(inode); 276 __destroy_inode(inode);
242 if (inode->i_sb->s_op->destroy_inode) 277 if (inode->i_sb->s_op->destroy_inode)
243 inode->i_sb->s_op->destroy_inode(inode); 278 inode->i_sb->s_op->destroy_inode(inode);
@@ -256,6 +291,8 @@ void inode_init_once(struct inode *inode)
256 INIT_HLIST_NODE(&inode->i_hash); 291 INIT_HLIST_NODE(&inode->i_hash);
257 INIT_LIST_HEAD(&inode->i_dentry); 292 INIT_LIST_HEAD(&inode->i_dentry);
258 INIT_LIST_HEAD(&inode->i_devices); 293 INIT_LIST_HEAD(&inode->i_devices);
294 INIT_LIST_HEAD(&inode->i_wb_list);
295 INIT_LIST_HEAD(&inode->i_lru);
259 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 296 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
260 spin_lock_init(&inode->i_data.tree_lock); 297 spin_lock_init(&inode->i_data.tree_lock);
261 spin_lock_init(&inode->i_data.i_mmap_lock); 298 spin_lock_init(&inode->i_data.i_mmap_lock);
@@ -264,12 +301,8 @@ void inode_init_once(struct inode *inode)
264 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); 301 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
265 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); 302 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
266 i_size_ordered_init(inode); 303 i_size_ordered_init(inode);
267#ifdef CONFIG_INOTIFY
268 INIT_LIST_HEAD(&inode->inotify_watches);
269 mutex_init(&inode->inotify_mutex);
270#endif
271#ifdef CONFIG_FSNOTIFY 304#ifdef CONFIG_FSNOTIFY
272 INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); 305 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
273#endif 306#endif
274} 307}
275EXPORT_SYMBOL(inode_init_once); 308EXPORT_SYMBOL(inode_init_once);
@@ -286,40 +319,137 @@ static void init_once(void *foo)
286 */ 319 */
287void __iget(struct inode *inode) 320void __iget(struct inode *inode)
288{ 321{
289 if (atomic_inc_return(&inode->i_count) != 1) 322 atomic_inc(&inode->i_count);
290 return; 323}
324
325/*
326 * get additional reference to inode; caller must already hold one.
327 */
328void ihold(struct inode *inode)
329{
330 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
331}
332EXPORT_SYMBOL(ihold);
333
334static void inode_lru_list_add(struct inode *inode)
335{
336 if (list_empty(&inode->i_lru)) {
337 list_add(&inode->i_lru, &inode_lru);
338 percpu_counter_inc(&nr_inodes_unused);
339 }
340}
341
342static void inode_lru_list_del(struct inode *inode)
343{
344 if (!list_empty(&inode->i_lru)) {
345 list_del_init(&inode->i_lru);
346 percpu_counter_dec(&nr_inodes_unused);
347 }
348}
349
350static inline void __inode_sb_list_add(struct inode *inode)
351{
352 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
353}
291 354
292 if (!(inode->i_state & (I_DIRTY|I_SYNC))) 355/**
293 list_move(&inode->i_list, &inode_in_use); 356 * inode_sb_list_add - add inode to the superblock list of inodes
294 inodes_stat.nr_unused--; 357 * @inode: inode to add
358 */
359void inode_sb_list_add(struct inode *inode)
360{
361 spin_lock(&inode_lock);
362 __inode_sb_list_add(inode);
363 spin_unlock(&inode_lock);
364}
365EXPORT_SYMBOL_GPL(inode_sb_list_add);
366
367static inline void __inode_sb_list_del(struct inode *inode)
368{
369 list_del_init(&inode->i_sb_list);
370}
371
372static unsigned long hash(struct super_block *sb, unsigned long hashval)
373{
374 unsigned long tmp;
375
376 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
377 L1_CACHE_BYTES;
378 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
379 return tmp & I_HASHMASK;
295} 380}
296 381
297/** 382/**
298 * clear_inode - clear an inode 383 * __insert_inode_hash - hash an inode
299 * @inode: inode to clear 384 * @inode: unhashed inode
385 * @hashval: unsigned long value used to locate this object in the
386 * inode_hashtable.
300 * 387 *
301 * This is called by the filesystem to tell us 388 * Add an inode to the inode hash for this superblock.
302 * that the inode is no longer useful. We just
303 * terminate it with extreme prejudice.
304 */ 389 */
305void clear_inode(struct inode *inode) 390void __insert_inode_hash(struct inode *inode, unsigned long hashval)
306{ 391{
307 might_sleep(); 392 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
308 invalidate_inode_buffers(inode); 393
394 spin_lock(&inode_lock);
395 hlist_add_head(&inode->i_hash, b);
396 spin_unlock(&inode_lock);
397}
398EXPORT_SYMBOL(__insert_inode_hash);
309 399
400/**
401 * __remove_inode_hash - remove an inode from the hash
402 * @inode: inode to unhash
403 *
404 * Remove an inode from the superblock.
405 */
406static void __remove_inode_hash(struct inode *inode)
407{
408 hlist_del_init(&inode->i_hash);
409}
410
411/**
412 * remove_inode_hash - remove an inode from the hash
413 * @inode: inode to unhash
414 *
415 * Remove an inode from the superblock.
416 */
417void remove_inode_hash(struct inode *inode)
418{
419 spin_lock(&inode_lock);
420 hlist_del_init(&inode->i_hash);
421 spin_unlock(&inode_lock);
422}
423EXPORT_SYMBOL(remove_inode_hash);
424
425void end_writeback(struct inode *inode)
426{
427 might_sleep();
310 BUG_ON(inode->i_data.nrpages); 428 BUG_ON(inode->i_data.nrpages);
429 BUG_ON(!list_empty(&inode->i_data.private_list));
311 BUG_ON(!(inode->i_state & I_FREEING)); 430 BUG_ON(!(inode->i_state & I_FREEING));
312 BUG_ON(inode->i_state & I_CLEAR); 431 BUG_ON(inode->i_state & I_CLEAR);
313 inode_sync_wait(inode); 432 inode_sync_wait(inode);
314 if (inode->i_sb->s_op->clear_inode) 433 inode->i_state = I_FREEING | I_CLEAR;
315 inode->i_sb->s_op->clear_inode(inode); 434}
435EXPORT_SYMBOL(end_writeback);
436
437static void evict(struct inode *inode)
438{
439 const struct super_operations *op = inode->i_sb->s_op;
440
441 if (op->evict_inode) {
442 op->evict_inode(inode);
443 } else {
444 if (inode->i_data.nrpages)
445 truncate_inode_pages(&inode->i_data, 0);
446 end_writeback(inode);
447 }
316 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 448 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
317 bd_forget(inode); 449 bd_forget(inode);
318 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 450 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
319 cd_forget(inode); 451 cd_forget(inode);
320 inode->i_state = I_CLEAR;
321} 452}
322EXPORT_SYMBOL(clear_inode);
323 453
324/* 454/*
325 * dispose_list - dispose of the contents of a local list 455 * dispose_list - dispose of the contents of a local list
@@ -330,104 +460,113 @@ EXPORT_SYMBOL(clear_inode);
330 */ 460 */
331static void dispose_list(struct list_head *head) 461static void dispose_list(struct list_head *head)
332{ 462{
333 int nr_disposed = 0;
334
335 while (!list_empty(head)) { 463 while (!list_empty(head)) {
336 struct inode *inode; 464 struct inode *inode;
337 465
338 inode = list_first_entry(head, struct inode, i_list); 466 inode = list_first_entry(head, struct inode, i_lru);
339 list_del(&inode->i_list); 467 list_del_init(&inode->i_lru);
340 468
341 if (inode->i_data.nrpages) 469 evict(inode);
342 truncate_inode_pages(&inode->i_data, 0);
343 clear_inode(inode);
344 470
345 spin_lock(&inode_lock); 471 spin_lock(&inode_lock);
346 hlist_del_init(&inode->i_hash); 472 __remove_inode_hash(inode);
347 list_del_init(&inode->i_sb_list); 473 __inode_sb_list_del(inode);
348 spin_unlock(&inode_lock); 474 spin_unlock(&inode_lock);
349 475
350 wake_up_inode(inode); 476 wake_up_inode(inode);
351 destroy_inode(inode); 477 destroy_inode(inode);
352 nr_disposed++;
353 } 478 }
354 spin_lock(&inode_lock);
355 inodes_stat.nr_inodes -= nr_disposed;
356 spin_unlock(&inode_lock);
357} 479}
358 480
359/* 481/**
360 * Invalidate all inodes for a device. 482 * evict_inodes - evict all evictable inodes for a superblock
483 * @sb: superblock to operate on
484 *
485 * Make sure that no inodes with zero refcount are retained. This is
486 * called by superblock shutdown after having MS_ACTIVE flag removed,
487 * so any inode reaching zero refcount during or after that call will
488 * be immediately evicted.
361 */ 489 */
362static int invalidate_list(struct list_head *head, struct list_head *dispose) 490void evict_inodes(struct super_block *sb)
363{ 491{
364 struct list_head *next; 492 struct inode *inode, *next;
365 int busy = 0, count = 0; 493 LIST_HEAD(dispose);
366 494
367 next = head->next; 495 down_write(&iprune_sem);
368 for (;;) {
369 struct list_head *tmp = next;
370 struct inode *inode;
371
372 /*
373 * We can reschedule here without worrying about the list's
374 * consistency because the per-sb list of inodes must not
375 * change during umount anymore, and because iprune_sem keeps
376 * shrink_icache_memory() away.
377 */
378 cond_resched_lock(&inode_lock);
379 496
380 next = next->next; 497 spin_lock(&inode_lock);
381 if (tmp == head) 498 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
382 break; 499 if (atomic_read(&inode->i_count))
383 inode = list_entry(tmp, struct inode, i_sb_list);
384 if (inode->i_state & I_NEW)
385 continue; 500 continue;
386 invalidate_inode_buffers(inode); 501
387 if (!atomic_read(&inode->i_count)) { 502 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
388 list_move(&inode->i_list, dispose); 503 WARN_ON(1);
389 WARN_ON(inode->i_state & I_NEW);
390 inode->i_state |= I_FREEING;
391 count++;
392 continue; 504 continue;
393 } 505 }
394 busy = 1; 506
507 inode->i_state |= I_FREEING;
508
509 /*
510 * Move the inode off the IO lists and LRU once I_FREEING is
511 * set so that it won't get moved back on there if it is dirty.
512 */
513 list_move(&inode->i_lru, &dispose);
514 list_del_init(&inode->i_wb_list);
515 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
516 percpu_counter_dec(&nr_inodes_unused);
395 } 517 }
396 /* only unused inodes may be cached with i_count zero */ 518 spin_unlock(&inode_lock);
397 inodes_stat.nr_unused -= count; 519
398 return busy; 520 dispose_list(&dispose);
521 up_write(&iprune_sem);
399} 522}
400 523
401/** 524/**
402 * invalidate_inodes - discard the inodes on a device 525 * invalidate_inodes - attempt to free all inodes on a superblock
403 * @sb: superblock 526 * @sb: superblock to operate on
404 * 527 *
405 * Discard all of the inodes for a given superblock. If the discard 528 * Attempts to free all inodes for a given superblock. If there were any
406 * fails because there are busy inodes then a non zero value is returned. 529 * busy inodes return a non-zero value, else zero.
407 * If the discard is successful all the inodes have been discarded.
408 */ 530 */
409int invalidate_inodes(struct super_block *sb) 531int invalidate_inodes(struct super_block *sb)
410{ 532{
411 int busy; 533 int busy = 0;
412 LIST_HEAD(throw_away); 534 struct inode *inode, *next;
535 LIST_HEAD(dispose);
413 536
414 down_write(&iprune_sem); 537 down_write(&iprune_sem);
538
415 spin_lock(&inode_lock); 539 spin_lock(&inode_lock);
416 inotify_unmount_inodes(&sb->s_inodes); 540 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
417 fsnotify_unmount_inodes(&sb->s_inodes); 541 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
418 busy = invalidate_list(&sb->s_inodes, &throw_away); 542 continue;
543 if (atomic_read(&inode->i_count)) {
544 busy = 1;
545 continue;
546 }
547
548 inode->i_state |= I_FREEING;
549
550 /*
551 * Move the inode off the IO lists and LRU once I_FREEING is
552 * set so that it won't get moved back on there if it is dirty.
553 */
554 list_move(&inode->i_lru, &dispose);
555 list_del_init(&inode->i_wb_list);
556 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
557 percpu_counter_dec(&nr_inodes_unused);
558 }
419 spin_unlock(&inode_lock); 559 spin_unlock(&inode_lock);
420 560
421 dispose_list(&throw_away); 561 dispose_list(&dispose);
422 up_write(&iprune_sem); 562 up_write(&iprune_sem);
423 563
424 return busy; 564 return busy;
425} 565}
426EXPORT_SYMBOL(invalidate_inodes);
427 566
428static int can_unuse(struct inode *inode) 567static int can_unuse(struct inode *inode)
429{ 568{
430 if (inode->i_state) 569 if (inode->i_state & ~I_REFERENCED)
431 return 0; 570 return 0;
432 if (inode_has_buffers(inode)) 571 if (inode_has_buffers(inode))
433 return 0; 572 return 0;
@@ -439,22 +578,24 @@ static int can_unuse(struct inode *inode)
439} 578}
440 579
441/* 580/*
442 * Scan `goal' inodes on the unused list for freeable ones. They are moved to 581 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
443 * a temporary list and then are freed outside inode_lock by dispose_list(). 582 * temporary list and then are freed outside inode_lock by dispose_list().
444 * 583 *
445 * Any inodes which are pinned purely because of attached pagecache have their 584 * Any inodes which are pinned purely because of attached pagecache have their
446 * pagecache removed. We expect the final iput() on that inode to add it to 585 * pagecache removed. If the inode has metadata buffers attached to
447 * the front of the inode_unused list. So look for it there and if the 586 * mapping->private_list then try to remove them.
448 * inode is still freeable, proceed. The right inode is found 99.9% of the
449 * time in testing on a 4-way.
450 * 587 *
451 * If the inode has metadata buffers attached to mapping->private_list then 588 * If the inode has the I_REFERENCED flag set, then it means that it has been
452 * try to remove them. 589 * used recently - the flag is set in iput_final(). When we encounter such an
590 * inode, clear the flag and move it to the back of the LRU so it gets another
591 * pass through the LRU before it gets reclaimed. This is necessary because of
592 * the fact we are doing lazy LRU updates to minimise lock contention so the
593 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
594 * with this flag set because they are the inodes that are out of order.
453 */ 595 */
454static void prune_icache(int nr_to_scan) 596static void prune_icache(int nr_to_scan)
455{ 597{
456 LIST_HEAD(freeable); 598 LIST_HEAD(freeable);
457 int nr_pruned = 0;
458 int nr_scanned; 599 int nr_scanned;
459 unsigned long reap = 0; 600 unsigned long reap = 0;
460 601
@@ -463,13 +604,26 @@ static void prune_icache(int nr_to_scan)
463 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 604 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
464 struct inode *inode; 605 struct inode *inode;
465 606
466 if (list_empty(&inode_unused)) 607 if (list_empty(&inode_lru))
467 break; 608 break;
468 609
469 inode = list_entry(inode_unused.prev, struct inode, i_list); 610 inode = list_entry(inode_lru.prev, struct inode, i_lru);
470 611
471 if (inode->i_state || atomic_read(&inode->i_count)) { 612 /*
472 list_move(&inode->i_list, &inode_unused); 613 * Referenced or dirty inodes are still in use. Give them
614 * another pass through the LRU as we canot reclaim them now.
615 */
616 if (atomic_read(&inode->i_count) ||
617 (inode->i_state & ~I_REFERENCED)) {
618 list_del_init(&inode->i_lru);
619 percpu_counter_dec(&nr_inodes_unused);
620 continue;
621 }
622
623 /* recently referenced inodes get one more pass */
624 if (inode->i_state & I_REFERENCED) {
625 list_move(&inode->i_lru, &inode_lru);
626 inode->i_state &= ~I_REFERENCED;
473 continue; 627 continue;
474 } 628 }
475 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 629 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
@@ -481,18 +635,23 @@ static void prune_icache(int nr_to_scan)
481 iput(inode); 635 iput(inode);
482 spin_lock(&inode_lock); 636 spin_lock(&inode_lock);
483 637
484 if (inode != list_entry(inode_unused.next, 638 if (inode != list_entry(inode_lru.next,
485 struct inode, i_list)) 639 struct inode, i_lru))
486 continue; /* wrong inode or list_empty */ 640 continue; /* wrong inode or list_empty */
487 if (!can_unuse(inode)) 641 if (!can_unuse(inode))
488 continue; 642 continue;
489 } 643 }
490 list_move(&inode->i_list, &freeable);
491 WARN_ON(inode->i_state & I_NEW); 644 WARN_ON(inode->i_state & I_NEW);
492 inode->i_state |= I_FREEING; 645 inode->i_state |= I_FREEING;
493 nr_pruned++; 646
647 /*
648 * Move the inode off the IO lists and LRU once I_FREEING is
649 * set so that it won't get moved back on there if it is dirty.
650 */
651 list_move(&inode->i_lru, &freeable);
652 list_del_init(&inode->i_wb_list);
653 percpu_counter_dec(&nr_inodes_unused);
494 } 654 }
495 inodes_stat.nr_unused -= nr_pruned;
496 if (current_is_kswapd()) 655 if (current_is_kswapd())
497 __count_vm_events(KSWAPD_INODESTEAL, reap); 656 __count_vm_events(KSWAPD_INODESTEAL, reap);
498 else 657 else
@@ -524,7 +683,7 @@ static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
524 return -1; 683 return -1;
525 prune_icache(nr); 684 prune_icache(nr);
526 } 685 }
527 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 686 return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
528} 687}
529 688
530static struct shrinker icache_shrinker = { 689static struct shrinker icache_shrinker = {
@@ -535,9 +694,6 @@ static struct shrinker icache_shrinker = {
535static void __wait_on_freeing_inode(struct inode *inode); 694static void __wait_on_freeing_inode(struct inode *inode);
536/* 695/*
537 * Called with the inode lock held. 696 * Called with the inode lock held.
538 * NOTE: we are not increasing the inode-refcount, you must call __iget()
539 * by hand after calling find_inode now! This simplifies iunique and won't
540 * add any additional branch in the common code.
541 */ 697 */
542static struct inode *find_inode(struct super_block *sb, 698static struct inode *find_inode(struct super_block *sb,
543 struct hlist_head *head, 699 struct hlist_head *head,
@@ -553,13 +709,14 @@ repeat:
553 continue; 709 continue;
554 if (!test(inode, data)) 710 if (!test(inode, data))
555 continue; 711 continue;
556 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { 712 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
557 __wait_on_freeing_inode(inode); 713 __wait_on_freeing_inode(inode);
558 goto repeat; 714 goto repeat;
559 } 715 }
560 break; 716 __iget(inode);
717 return inode;
561 } 718 }
562 return node ? inode : NULL; 719 return NULL;
563} 720}
564 721
565/* 722/*
@@ -578,57 +735,53 @@ repeat:
578 continue; 735 continue;
579 if (inode->i_sb != sb) 736 if (inode->i_sb != sb)
580 continue; 737 continue;
581 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { 738 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
582 __wait_on_freeing_inode(inode); 739 __wait_on_freeing_inode(inode);
583 goto repeat; 740 goto repeat;
584 } 741 }
585 break; 742 __iget(inode);
743 return inode;
586 } 744 }
587 return node ? inode : NULL; 745 return NULL;
588}
589
590static unsigned long hash(struct super_block *sb, unsigned long hashval)
591{
592 unsigned long tmp;
593
594 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
595 L1_CACHE_BYTES;
596 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
597 return tmp & I_HASHMASK;
598}
599
600static inline void
601__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
602 struct inode *inode)
603{
604 inodes_stat.nr_inodes++;
605 list_add(&inode->i_list, &inode_in_use);
606 list_add(&inode->i_sb_list, &sb->s_inodes);
607 if (head)
608 hlist_add_head(&inode->i_hash, head);
609} 746}
610 747
611/** 748/*
612 * inode_add_to_lists - add a new inode to relevant lists 749 * Each cpu owns a range of LAST_INO_BATCH numbers.
613 * @sb: superblock inode belongs to 750 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
614 * @inode: inode to mark in use 751 * to renew the exhausted range.
752 *
753 * This does not significantly increase overflow rate because every CPU can
754 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
755 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
756 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
757 * overflow rate by 2x, which does not seem too significant.
615 * 758 *
616 * When an inode is allocated it needs to be accounted for, added to the in use 759 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
617 * list, the owning superblock and the inode hash. This needs to be done under 760 * error if st_ino won't fit in target struct field. Use 32bit counter
618 * the inode_lock, so export a function to do this rather than the inode lock 761 * here to attempt to avoid that.
619 * itself. We calculate the hash list to add to here so it is all internal
620 * which requires the caller to have already set up the inode number in the
621 * inode to add.
622 */ 762 */
623void inode_add_to_lists(struct super_block *sb, struct inode *inode) 763#define LAST_INO_BATCH 1024
764static DEFINE_PER_CPU(unsigned int, last_ino);
765
766unsigned int get_next_ino(void)
624{ 767{
625 struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); 768 unsigned int *p = &get_cpu_var(last_ino);
769 unsigned int res = *p;
626 770
627 spin_lock(&inode_lock); 771#ifdef CONFIG_SMP
628 __inode_add_to_lists(sb, head, inode); 772 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
629 spin_unlock(&inode_lock); 773 static atomic_t shared_last_ino;
774 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
775
776 res = next - LAST_INO_BATCH;
777 }
778#endif
779
780 *p = ++res;
781 put_cpu_var(last_ino);
782 return res;
630} 783}
631EXPORT_SYMBOL_GPL(inode_add_to_lists); 784EXPORT_SYMBOL(get_next_ino);
632 785
633/** 786/**
634 * new_inode - obtain an inode 787 * new_inode - obtain an inode
@@ -644,12 +797,6 @@ EXPORT_SYMBOL_GPL(inode_add_to_lists);
644 */ 797 */
645struct inode *new_inode(struct super_block *sb) 798struct inode *new_inode(struct super_block *sb)
646{ 799{
647 /*
648 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
649 * error if st_ino won't fit in target struct field. Use 32bit counter
650 * here to attempt to avoid that.
651 */
652 static unsigned int last_ino;
653 struct inode *inode; 800 struct inode *inode;
654 801
655 spin_lock_prefetch(&inode_lock); 802 spin_lock_prefetch(&inode_lock);
@@ -657,8 +804,7 @@ struct inode *new_inode(struct super_block *sb)
657 inode = alloc_inode(sb); 804 inode = alloc_inode(sb);
658 if (inode) { 805 if (inode) {
659 spin_lock(&inode_lock); 806 spin_lock(&inode_lock);
660 __inode_add_to_lists(sb, NULL, inode); 807 __inode_sb_list_add(inode);
661 inode->i_ino = ++last_ino;
662 inode->i_state = 0; 808 inode->i_state = 0;
663 spin_unlock(&inode_lock); 809 spin_unlock(&inode_lock);
664 } 810 }
@@ -669,7 +815,7 @@ EXPORT_SYMBOL(new_inode);
669void unlock_new_inode(struct inode *inode) 815void unlock_new_inode(struct inode *inode)
670{ 816{
671#ifdef CONFIG_DEBUG_LOCK_ALLOC 817#ifdef CONFIG_DEBUG_LOCK_ALLOC
672 if (inode->i_mode & S_IFDIR) { 818 if (S_ISDIR(inode->i_mode)) {
673 struct file_system_type *type = inode->i_sb->s_type; 819 struct file_system_type *type = inode->i_sb->s_type;
674 820
675 /* Set new key only if filesystem hasn't already changed it */ 821 /* Set new key only if filesystem hasn't already changed it */
@@ -726,7 +872,8 @@ static struct inode *get_new_inode(struct super_block *sb,
726 if (set(inode, data)) 872 if (set(inode, data))
727 goto set_failed; 873 goto set_failed;
728 874
729 __inode_add_to_lists(sb, head, inode); 875 hlist_add_head(&inode->i_hash, head);
876 __inode_sb_list_add(inode);
730 inode->i_state = I_NEW; 877 inode->i_state = I_NEW;
731 spin_unlock(&inode_lock); 878 spin_unlock(&inode_lock);
732 879
@@ -741,7 +888,6 @@ static struct inode *get_new_inode(struct super_block *sb,
741 * us. Use the old inode instead of the one we just 888 * us. Use the old inode instead of the one we just
742 * allocated. 889 * allocated.
743 */ 890 */
744 __iget(old);
745 spin_unlock(&inode_lock); 891 spin_unlock(&inode_lock);
746 destroy_inode(inode); 892 destroy_inode(inode);
747 inode = old; 893 inode = old;
@@ -773,7 +919,8 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
773 old = find_inode_fast(sb, head, ino); 919 old = find_inode_fast(sb, head, ino);
774 if (!old) { 920 if (!old) {
775 inode->i_ino = ino; 921 inode->i_ino = ino;
776 __inode_add_to_lists(sb, head, inode); 922 hlist_add_head(&inode->i_hash, head);
923 __inode_sb_list_add(inode);
777 inode->i_state = I_NEW; 924 inode->i_state = I_NEW;
778 spin_unlock(&inode_lock); 925 spin_unlock(&inode_lock);
779 926
@@ -788,7 +935,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
788 * us. Use the old inode instead of the one we just 935 * us. Use the old inode instead of the one we just
789 * allocated. 936 * allocated.
790 */ 937 */
791 __iget(old);
792 spin_unlock(&inode_lock); 938 spin_unlock(&inode_lock);
793 destroy_inode(inode); 939 destroy_inode(inode);
794 inode = old; 940 inode = old;
@@ -797,6 +943,27 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
797 return inode; 943 return inode;
798} 944}
799 945
946/*
947 * search the inode cache for a matching inode number.
948 * If we find one, then the inode number we are trying to
949 * allocate is not unique and so we should not use it.
950 *
951 * Returns 1 if the inode number is unique, 0 if it is not.
952 */
953static int test_inode_iunique(struct super_block *sb, unsigned long ino)
954{
955 struct hlist_head *b = inode_hashtable + hash(sb, ino);
956 struct hlist_node *node;
957 struct inode *inode;
958
959 hlist_for_each_entry(inode, node, b, i_hash) {
960 if (inode->i_ino == ino && inode->i_sb == sb)
961 return 0;
962 }
963
964 return 1;
965}
966
800/** 967/**
801 * iunique - get a unique inode number 968 * iunique - get a unique inode number
802 * @sb: superblock 969 * @sb: superblock
@@ -818,19 +985,18 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
818 * error if st_ino won't fit in target struct field. Use 32bit counter 985 * error if st_ino won't fit in target struct field. Use 32bit counter
819 * here to attempt to avoid that. 986 * here to attempt to avoid that.
820 */ 987 */
988 static DEFINE_SPINLOCK(iunique_lock);
821 static unsigned int counter; 989 static unsigned int counter;
822 struct inode *inode;
823 struct hlist_head *head;
824 ino_t res; 990 ino_t res;
825 991
826 spin_lock(&inode_lock); 992 spin_lock(&inode_lock);
993 spin_lock(&iunique_lock);
827 do { 994 do {
828 if (counter <= max_reserved) 995 if (counter <= max_reserved)
829 counter = max_reserved + 1; 996 counter = max_reserved + 1;
830 res = counter++; 997 res = counter++;
831 head = inode_hashtable + hash(sb, res); 998 } while (!test_inode_iunique(sb, res));
832 inode = find_inode_fast(sb, head, res); 999 spin_unlock(&iunique_lock);
833 } while (inode != NULL);
834 spin_unlock(&inode_lock); 1000 spin_unlock(&inode_lock);
835 1001
836 return res; 1002 return res;
@@ -840,7 +1006,7 @@ EXPORT_SYMBOL(iunique);
840struct inode *igrab(struct inode *inode) 1006struct inode *igrab(struct inode *inode)
841{ 1007{
842 spin_lock(&inode_lock); 1008 spin_lock(&inode_lock);
843 if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))) 1009 if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
844 __iget(inode); 1010 __iget(inode);
845 else 1011 else
846 /* 1012 /*
@@ -882,7 +1048,6 @@ static struct inode *ifind(struct super_block *sb,
882 spin_lock(&inode_lock); 1048 spin_lock(&inode_lock);
883 inode = find_inode(sb, head, test, data); 1049 inode = find_inode(sb, head, test, data);
884 if (inode) { 1050 if (inode) {
885 __iget(inode);
886 spin_unlock(&inode_lock); 1051 spin_unlock(&inode_lock);
887 if (likely(wait)) 1052 if (likely(wait))
888 wait_on_inode(inode); 1053 wait_on_inode(inode);
@@ -915,7 +1080,6 @@ static struct inode *ifind_fast(struct super_block *sb,
915 spin_lock(&inode_lock); 1080 spin_lock(&inode_lock);
916 inode = find_inode_fast(sb, head, ino); 1081 inode = find_inode_fast(sb, head, ino);
917 if (inode) { 1082 if (inode) {
918 __iget(inode);
919 spin_unlock(&inode_lock); 1083 spin_unlock(&inode_lock);
920 wait_on_inode(inode); 1084 wait_on_inode(inode);
921 return inode; 1085 return inode;
@@ -1089,7 +1253,7 @@ int insert_inode_locked(struct inode *inode)
1089 continue; 1253 continue;
1090 if (old->i_sb != sb) 1254 if (old->i_sb != sb)
1091 continue; 1255 continue;
1092 if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) 1256 if (old->i_state & (I_FREEING|I_WILL_FREE))
1093 continue; 1257 continue;
1094 break; 1258 break;
1095 } 1259 }
@@ -1101,7 +1265,7 @@ int insert_inode_locked(struct inode *inode)
1101 __iget(old); 1265 __iget(old);
1102 spin_unlock(&inode_lock); 1266 spin_unlock(&inode_lock);
1103 wait_on_inode(old); 1267 wait_on_inode(old);
1104 if (unlikely(!hlist_unhashed(&old->i_hash))) { 1268 if (unlikely(!inode_unhashed(old))) {
1105 iput(old); 1269 iput(old);
1106 return -EBUSY; 1270 return -EBUSY;
1107 } 1271 }
@@ -1128,7 +1292,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1128 continue; 1292 continue;
1129 if (!test(old, data)) 1293 if (!test(old, data))
1130 continue; 1294 continue;
1131 if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) 1295 if (old->i_state & (I_FREEING|I_WILL_FREE))
1132 continue; 1296 continue;
1133 break; 1297 break;
1134 } 1298 }
@@ -1140,7 +1304,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1140 __iget(old); 1304 __iget(old);
1141 spin_unlock(&inode_lock); 1305 spin_unlock(&inode_lock);
1142 wait_on_inode(old); 1306 wait_on_inode(old);
1143 if (unlikely(!hlist_unhashed(&old->i_hash))) { 1307 if (unlikely(!inode_unhashed(old))) {
1144 iput(old); 1308 iput(old);
1145 return -EBUSY; 1309 return -EBUSY;
1146 } 1310 }
@@ -1149,100 +1313,53 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1149} 1313}
1150EXPORT_SYMBOL(insert_inode_locked4); 1314EXPORT_SYMBOL(insert_inode_locked4);
1151 1315
1152/**
1153 * __insert_inode_hash - hash an inode
1154 * @inode: unhashed inode
1155 * @hashval: unsigned long value used to locate this object in the
1156 * inode_hashtable.
1157 *
1158 * Add an inode to the inode hash for this superblock.
1159 */
1160void __insert_inode_hash(struct inode *inode, unsigned long hashval)
1161{
1162 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1163 spin_lock(&inode_lock);
1164 hlist_add_head(&inode->i_hash, head);
1165 spin_unlock(&inode_lock);
1166}
1167EXPORT_SYMBOL(__insert_inode_hash);
1168 1316
1169/** 1317int generic_delete_inode(struct inode *inode)
1170 * remove_inode_hash - remove an inode from the hash
1171 * @inode: inode to unhash
1172 *
1173 * Remove an inode from the superblock.
1174 */
1175void remove_inode_hash(struct inode *inode)
1176{ 1318{
1177 spin_lock(&inode_lock); 1319 return 1;
1178 hlist_del_init(&inode->i_hash);
1179 spin_unlock(&inode_lock);
1180} 1320}
1181EXPORT_SYMBOL(remove_inode_hash); 1321EXPORT_SYMBOL(generic_delete_inode);
1182 1322
1183/* 1323/*
1184 * Tell the filesystem that this inode is no longer of any interest and should 1324 * Normal UNIX filesystem behaviour: delete the
1185 * be completely destroyed. 1325 * inode when the usage count drops to zero, and
1186 * 1326 * i_nlink is zero.
1187 * We leave the inode in the inode hash table until *after* the filesystem's
1188 * ->delete_inode completes. This ensures that an iget (such as nfsd might
1189 * instigate) will always find up-to-date information either in the hash or on
1190 * disk.
1191 *
1192 * I_FREEING is set so that no-one will take a new reference to the inode while
1193 * it is being deleted.
1194 */ 1327 */
1195void generic_delete_inode(struct inode *inode) 1328int generic_drop_inode(struct inode *inode)
1196{ 1329{
1197 const struct super_operations *op = inode->i_sb->s_op; 1330 return !inode->i_nlink || inode_unhashed(inode);
1198
1199 list_del_init(&inode->i_list);
1200 list_del_init(&inode->i_sb_list);
1201 WARN_ON(inode->i_state & I_NEW);
1202 inode->i_state |= I_FREEING;
1203 inodes_stat.nr_inodes--;
1204 spin_unlock(&inode_lock);
1205
1206 if (op->delete_inode) {
1207 void (*delete)(struct inode *) = op->delete_inode;
1208 /* Filesystems implementing their own
1209 * s_op->delete_inode are required to call
1210 * truncate_inode_pages and clear_inode()
1211 * internally */
1212 delete(inode);
1213 } else {
1214 truncate_inode_pages(&inode->i_data, 0);
1215 clear_inode(inode);
1216 }
1217 spin_lock(&inode_lock);
1218 hlist_del_init(&inode->i_hash);
1219 spin_unlock(&inode_lock);
1220 wake_up_inode(inode);
1221 BUG_ON(inode->i_state != I_CLEAR);
1222 destroy_inode(inode);
1223} 1331}
1224EXPORT_SYMBOL(generic_delete_inode); 1332EXPORT_SYMBOL_GPL(generic_drop_inode);
1225 1333
1226/** 1334/*
1227 * generic_detach_inode - remove inode from inode lists 1335 * Called when we're dropping the last reference
1228 * @inode: inode to remove 1336 * to an inode.
1229 *
1230 * Remove inode from inode lists, write it if it's dirty. This is just an
1231 * internal VFS helper exported for hugetlbfs. Do not use!
1232 * 1337 *
1233 * Returns 1 if inode should be completely destroyed. 1338 * Call the FS "drop_inode()" function, defaulting to
1339 * the legacy UNIX filesystem behaviour. If it tells
1340 * us to evict inode, do so. Otherwise, retain inode
1341 * in cache if fs is alive, sync and evict if fs is
1342 * shutting down.
1234 */ 1343 */
1235int generic_detach_inode(struct inode *inode) 1344static void iput_final(struct inode *inode)
1236{ 1345{
1237 struct super_block *sb = inode->i_sb; 1346 struct super_block *sb = inode->i_sb;
1347 const struct super_operations *op = inode->i_sb->s_op;
1348 int drop;
1238 1349
1239 if (!hlist_unhashed(&inode->i_hash)) { 1350 if (op && op->drop_inode)
1240 if (!(inode->i_state & (I_DIRTY|I_SYNC))) 1351 drop = op->drop_inode(inode);
1241 list_move(&inode->i_list, &inode_unused); 1352 else
1242 inodes_stat.nr_unused++; 1353 drop = generic_drop_inode(inode);
1354
1355 if (!drop) {
1243 if (sb->s_flags & MS_ACTIVE) { 1356 if (sb->s_flags & MS_ACTIVE) {
1357 inode->i_state |= I_REFERENCED;
1358 if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
1359 inode_lru_list_add(inode);
1360 }
1244 spin_unlock(&inode_lock); 1361 spin_unlock(&inode_lock);
1245 return 0; 1362 return;
1246 } 1363 }
1247 WARN_ON(inode->i_state & I_NEW); 1364 WARN_ON(inode->i_state & I_NEW);
1248 inode->i_state |= I_WILL_FREE; 1365 inode->i_state |= I_WILL_FREE;
@@ -1251,65 +1368,28 @@ int generic_detach_inode(struct inode *inode)
1251 spin_lock(&inode_lock); 1368 spin_lock(&inode_lock);
1252 WARN_ON(inode->i_state & I_NEW); 1369 WARN_ON(inode->i_state & I_NEW);
1253 inode->i_state &= ~I_WILL_FREE; 1370 inode->i_state &= ~I_WILL_FREE;
1254 inodes_stat.nr_unused--; 1371 __remove_inode_hash(inode);
1255 hlist_del_init(&inode->i_hash);
1256 } 1372 }
1257 list_del_init(&inode->i_list); 1373
1258 list_del_init(&inode->i_sb_list);
1259 WARN_ON(inode->i_state & I_NEW); 1374 WARN_ON(inode->i_state & I_NEW);
1260 inode->i_state |= I_FREEING; 1375 inode->i_state |= I_FREEING;
1261 inodes_stat.nr_inodes--;
1262 spin_unlock(&inode_lock);
1263 return 1;
1264}
1265EXPORT_SYMBOL_GPL(generic_detach_inode);
1266 1376
1267static void generic_forget_inode(struct inode *inode) 1377 /*
1268{ 1378 * Move the inode off the IO lists and LRU once I_FREEING is
1269 if (!generic_detach_inode(inode)) 1379 * set so that it won't get moved back on there if it is dirty.
1270 return; 1380 */
1271 if (inode->i_data.nrpages) 1381 inode_lru_list_del(inode);
1272 truncate_inode_pages(&inode->i_data, 0); 1382 list_del_init(&inode->i_wb_list);
1273 clear_inode(inode); 1383
1384 __inode_sb_list_del(inode);
1385 spin_unlock(&inode_lock);
1386 evict(inode);
1387 remove_inode_hash(inode);
1274 wake_up_inode(inode); 1388 wake_up_inode(inode);
1389 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
1275 destroy_inode(inode); 1390 destroy_inode(inode);
1276} 1391}
1277 1392
1278/*
1279 * Normal UNIX filesystem behaviour: delete the
1280 * inode when the usage count drops to zero, and
1281 * i_nlink is zero.
1282 */
1283void generic_drop_inode(struct inode *inode)
1284{
1285 if (!inode->i_nlink)
1286 generic_delete_inode(inode);
1287 else
1288 generic_forget_inode(inode);
1289}
1290EXPORT_SYMBOL_GPL(generic_drop_inode);
1291
1292/*
1293 * Called when we're dropping the last reference
1294 * to an inode.
1295 *
1296 * Call the FS "drop()" function, defaulting to
1297 * the legacy UNIX filesystem behaviour..
1298 *
1299 * NOTE! NOTE! NOTE! We're called with the inode lock
1300 * held, and the drop function is supposed to release
1301 * the lock!
1302 */
1303static inline void iput_final(struct inode *inode)
1304{
1305 const struct super_operations *op = inode->i_sb->s_op;
1306 void (*drop)(struct inode *) = generic_drop_inode;
1307
1308 if (op && op->drop_inode)
1309 drop = op->drop_inode;
1310 drop(inode);
1311}
1312
1313/** 1393/**
1314 * iput - put an inode 1394 * iput - put an inode
1315 * @inode: inode to put 1395 * @inode: inode to put
@@ -1322,7 +1402,7 @@ static inline void iput_final(struct inode *inode)
1322void iput(struct inode *inode) 1402void iput(struct inode *inode)
1323{ 1403{
1324 if (inode) { 1404 if (inode) {
1325 BUG_ON(inode->i_state == I_CLEAR); 1405 BUG_ON(inode->i_state & I_CLEAR);
1326 1406
1327 if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) 1407 if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
1328 iput_final(inode); 1408 iput_final(inode);
@@ -1568,6 +1648,8 @@ void __init inode_init(void)
1568 SLAB_MEM_SPREAD), 1648 SLAB_MEM_SPREAD),
1569 init_once); 1649 init_once);
1570 register_shrinker(&icache_shrinker); 1650 register_shrinker(&icache_shrinker);
1651 percpu_counter_init(&nr_inodes, 0);
1652 percpu_counter_init(&nr_inodes_unused, 0);
1571 1653
1572 /* Hash may have been set up in inode_init_early */ 1654 /* Hash may have been set up in inode_init_early */
1573 if (!hashdist) 1655 if (!hashdist)