aboutsummaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c687
1 files changed, 341 insertions, 346 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 9910c039f026..33c963d08ab4 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -25,6 +25,39 @@
25#include <linux/async.h> 25#include <linux/async.h>
26#include <linux/posix_acl.h> 26#include <linux/posix_acl.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/cred.h>
29#include "internal.h"
30
31/*
32 * inode locking rules.
33 *
34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget()
36 * inode_lru_lock protects:
37 * inode_lru, inode->i_lru
38 * inode_sb_list_lock protects:
39 * sb->s_inodes, inode->i_sb_list
40 * inode_wb_list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
44 *
45 * Lock ordering:
46 *
47 * inode_sb_list_lock
48 * inode->i_lock
49 * inode_lru_lock
50 *
51 * inode_wb_list_lock
52 * inode->i_lock
53 *
54 * inode_hash_lock
55 * inode_sb_list_lock
56 * inode->i_lock
57 *
58 * iunique_lock
59 * inode_hash_lock
60 */
28 61
29/* 62/*
30 * This is needed for the following functions: 63 * This is needed for the following functions:
@@ -59,6 +92,8 @@
59 92
60static unsigned int i_hash_mask __read_mostly; 93static unsigned int i_hash_mask __read_mostly;
61static unsigned int i_hash_shift __read_mostly; 94static unsigned int i_hash_shift __read_mostly;
95static struct hlist_head *inode_hashtable __read_mostly;
96static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
62 97
63/* 98/*
64 * Each inode can be on two separate lists. One is 99 * Each inode can be on two separate lists. One is
@@ -73,15 +108,10 @@ static unsigned int i_hash_shift __read_mostly;
73 */ 108 */
74 109
75static LIST_HEAD(inode_lru); 110static LIST_HEAD(inode_lru);
76static struct hlist_head *inode_hashtable __read_mostly; 111static DEFINE_SPINLOCK(inode_lru_lock);
77 112
78/* 113__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
79 * A simple spinlock to protect the list manipulations. 114__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
80 *
81 * NOTE! You also have to own the lock if you change
82 * the i_state of an inode while it is in use..
83 */
84DEFINE_SPINLOCK(inode_lock);
85 115
86/* 116/*
87 * iprune_sem provides exclusion between the icache shrinking and the 117 * iprune_sem provides exclusion between the icache shrinking and the
@@ -95,6 +125,14 @@ DEFINE_SPINLOCK(inode_lock);
95static DECLARE_RWSEM(iprune_sem); 125static DECLARE_RWSEM(iprune_sem);
96 126
97/* 127/*
128 * Empty aops. Can be used for the cases where the user does not
129 * define any of the address_space operations.
130 */
131const struct address_space_operations empty_aops = {
132};
133EXPORT_SYMBOL(empty_aops);
134
135/*
98 * Statistics gathering.. 136 * Statistics gathering..
99 */ 137 */
100struct inodes_stat_t inodes_stat; 138struct inodes_stat_t inodes_stat;
@@ -136,15 +174,6 @@ int proc_nr_inodes(ctl_table *table, int write,
136} 174}
137#endif 175#endif
138 176
139static void wake_up_inode(struct inode *inode)
140{
141 /*
142 * Prevent speculative execution through spin_unlock(&inode_lock);
143 */
144 smp_mb();
145 wake_up_bit(&inode->i_state, __I_NEW);
146}
147
148/** 177/**
149 * inode_init_always - perform inode structure intialisation 178 * inode_init_always - perform inode structure intialisation
150 * @sb: superblock inode belongs to 179 * @sb: superblock inode belongs to
@@ -155,7 +184,6 @@ static void wake_up_inode(struct inode *inode)
155 */ 184 */
156int inode_init_always(struct super_block *sb, struct inode *inode) 185int inode_init_always(struct super_block *sb, struct inode *inode)
157{ 186{
158 static const struct address_space_operations empty_aops;
159 static const struct inode_operations empty_iops; 187 static const struct inode_operations empty_iops;
160 static const struct file_operations empty_fops; 188 static const struct file_operations empty_fops;
161 struct address_space *const mapping = &inode->i_data; 189 struct address_space *const mapping = &inode->i_data;
@@ -335,7 +363,7 @@ static void init_once(void *foo)
335} 363}
336 364
337/* 365/*
338 * inode_lock must be held 366 * inode->i_lock must be held
339 */ 367 */
340void __iget(struct inode *inode) 368void __iget(struct inode *inode)
341{ 369{
@@ -353,23 +381,22 @@ EXPORT_SYMBOL(ihold);
353 381
354static void inode_lru_list_add(struct inode *inode) 382static void inode_lru_list_add(struct inode *inode)
355{ 383{
384 spin_lock(&inode_lru_lock);
356 if (list_empty(&inode->i_lru)) { 385 if (list_empty(&inode->i_lru)) {
357 list_add(&inode->i_lru, &inode_lru); 386 list_add(&inode->i_lru, &inode_lru);
358 inodes_stat.nr_unused++; 387 inodes_stat.nr_unused++;
359 } 388 }
389 spin_unlock(&inode_lru_lock);
360} 390}
361 391
362static void inode_lru_list_del(struct inode *inode) 392static void inode_lru_list_del(struct inode *inode)
363{ 393{
394 spin_lock(&inode_lru_lock);
364 if (!list_empty(&inode->i_lru)) { 395 if (!list_empty(&inode->i_lru)) {
365 list_del_init(&inode->i_lru); 396 list_del_init(&inode->i_lru);
366 inodes_stat.nr_unused--; 397 inodes_stat.nr_unused--;
367 } 398 }
368} 399 spin_unlock(&inode_lru_lock);
369
370static inline void __inode_sb_list_add(struct inode *inode)
371{
372 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
373} 400}
374 401
375/** 402/**
@@ -378,15 +405,17 @@ static inline void __inode_sb_list_add(struct inode *inode)
378 */ 405 */
379void inode_sb_list_add(struct inode *inode) 406void inode_sb_list_add(struct inode *inode)
380{ 407{
381 spin_lock(&inode_lock); 408 spin_lock(&inode_sb_list_lock);
382 __inode_sb_list_add(inode); 409 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
383 spin_unlock(&inode_lock); 410 spin_unlock(&inode_sb_list_lock);
384} 411}
385EXPORT_SYMBOL_GPL(inode_sb_list_add); 412EXPORT_SYMBOL_GPL(inode_sb_list_add);
386 413
387static inline void __inode_sb_list_del(struct inode *inode) 414static inline void inode_sb_list_del(struct inode *inode)
388{ 415{
416 spin_lock(&inode_sb_list_lock);
389 list_del_init(&inode->i_sb_list); 417 list_del_init(&inode->i_sb_list);
418 spin_unlock(&inode_sb_list_lock);
390} 419}
391 420
392static unsigned long hash(struct super_block *sb, unsigned long hashval) 421static unsigned long hash(struct super_block *sb, unsigned long hashval)
@@ -411,24 +440,15 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
411{ 440{
412 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 441 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
413 442
414 spin_lock(&inode_lock); 443 spin_lock(&inode_hash_lock);
444 spin_lock(&inode->i_lock);
415 hlist_add_head(&inode->i_hash, b); 445 hlist_add_head(&inode->i_hash, b);
416 spin_unlock(&inode_lock); 446 spin_unlock(&inode->i_lock);
447 spin_unlock(&inode_hash_lock);
417} 448}
418EXPORT_SYMBOL(__insert_inode_hash); 449EXPORT_SYMBOL(__insert_inode_hash);
419 450
420/** 451/**
421 * __remove_inode_hash - remove an inode from the hash
422 * @inode: inode to unhash
423 *
424 * Remove an inode from the superblock.
425 */
426static void __remove_inode_hash(struct inode *inode)
427{
428 hlist_del_init(&inode->i_hash);
429}
430
431/**
432 * remove_inode_hash - remove an inode from the hash 452 * remove_inode_hash - remove an inode from the hash
433 * @inode: inode to unhash 453 * @inode: inode to unhash
434 * 454 *
@@ -436,9 +456,11 @@ static void __remove_inode_hash(struct inode *inode)
436 */ 456 */
437void remove_inode_hash(struct inode *inode) 457void remove_inode_hash(struct inode *inode)
438{ 458{
439 spin_lock(&inode_lock); 459 spin_lock(&inode_hash_lock);
460 spin_lock(&inode->i_lock);
440 hlist_del_init(&inode->i_hash); 461 hlist_del_init(&inode->i_hash);
441 spin_unlock(&inode_lock); 462 spin_unlock(&inode->i_lock);
463 spin_unlock(&inode_hash_lock);
442} 464}
443EXPORT_SYMBOL(remove_inode_hash); 465EXPORT_SYMBOL(remove_inode_hash);
444 466
@@ -455,10 +477,29 @@ void end_writeback(struct inode *inode)
455} 477}
456EXPORT_SYMBOL(end_writeback); 478EXPORT_SYMBOL(end_writeback);
457 479
480/*
481 * Free the inode passed in, removing it from the lists it is still connected
482 * to. We remove any pages still attached to the inode and wait for any IO that
483 * is still in progress before finally destroying the inode.
484 *
485 * An inode must already be marked I_FREEING so that we avoid the inode being
486 * moved back onto lists if we race with other code that manipulates the lists
487 * (e.g. writeback_single_inode). The caller is responsible for setting this.
488 *
489 * An inode must already be removed from the LRU list before being evicted from
490 * the cache. This should occur atomically with setting the I_FREEING state
491 * flag, so no inodes here should ever be on the LRU when being evicted.
492 */
458static void evict(struct inode *inode) 493static void evict(struct inode *inode)
459{ 494{
460 const struct super_operations *op = inode->i_sb->s_op; 495 const struct super_operations *op = inode->i_sb->s_op;
461 496
497 BUG_ON(!(inode->i_state & I_FREEING));
498 BUG_ON(!list_empty(&inode->i_lru));
499
500 inode_wb_list_del(inode);
501 inode_sb_list_del(inode);
502
462 if (op->evict_inode) { 503 if (op->evict_inode) {
463 op->evict_inode(inode); 504 op->evict_inode(inode);
464 } else { 505 } else {
@@ -470,6 +511,15 @@ static void evict(struct inode *inode)
470 bd_forget(inode); 511 bd_forget(inode);
471 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 512 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
472 cd_forget(inode); 513 cd_forget(inode);
514
515 remove_inode_hash(inode);
516
517 spin_lock(&inode->i_lock);
518 wake_up_bit(&inode->i_state, __I_NEW);
519 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
520 spin_unlock(&inode->i_lock);
521
522 destroy_inode(inode);
473} 523}
474 524
475/* 525/*
@@ -488,14 +538,6 @@ static void dispose_list(struct list_head *head)
488 list_del_init(&inode->i_lru); 538 list_del_init(&inode->i_lru);
489 539
490 evict(inode); 540 evict(inode);
491
492 spin_lock(&inode_lock);
493 __remove_inode_hash(inode);
494 __inode_sb_list_del(inode);
495 spin_unlock(&inode_lock);
496
497 wake_up_inode(inode);
498 destroy_inode(inode);
499 } 541 }
500} 542}
501 543
@@ -513,25 +555,23 @@ void evict_inodes(struct super_block *sb)
513 struct inode *inode, *next; 555 struct inode *inode, *next;
514 LIST_HEAD(dispose); 556 LIST_HEAD(dispose);
515 557
516 spin_lock(&inode_lock); 558 spin_lock(&inode_sb_list_lock);
517 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 559 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
518 if (atomic_read(&inode->i_count)) 560 if (atomic_read(&inode->i_count))
519 continue; 561 continue;
520 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) 562
563 spin_lock(&inode->i_lock);
564 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
565 spin_unlock(&inode->i_lock);
521 continue; 566 continue;
567 }
522 568
523 inode->i_state |= I_FREEING; 569 inode->i_state |= I_FREEING;
524 570 inode_lru_list_del(inode);
525 /* 571 spin_unlock(&inode->i_lock);
526 * Move the inode off the IO lists and LRU once I_FREEING is 572 list_add(&inode->i_lru, &dispose);
527 * set so that it won't get moved back on there if it is dirty.
528 */
529 list_move(&inode->i_lru, &dispose);
530 list_del_init(&inode->i_wb_list);
531 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
532 inodes_stat.nr_unused--;
533 } 573 }
534 spin_unlock(&inode_lock); 574 spin_unlock(&inode_sb_list_lock);
535 575
536 dispose_list(&dispose); 576 dispose_list(&dispose);
537 577
@@ -560,31 +600,30 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
560 struct inode *inode, *next; 600 struct inode *inode, *next;
561 LIST_HEAD(dispose); 601 LIST_HEAD(dispose);
562 602
563 spin_lock(&inode_lock); 603 spin_lock(&inode_sb_list_lock);
564 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 604 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
565 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) 605 spin_lock(&inode->i_lock);
606 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
607 spin_unlock(&inode->i_lock);
566 continue; 608 continue;
609 }
567 if (inode->i_state & I_DIRTY && !kill_dirty) { 610 if (inode->i_state & I_DIRTY && !kill_dirty) {
611 spin_unlock(&inode->i_lock);
568 busy = 1; 612 busy = 1;
569 continue; 613 continue;
570 } 614 }
571 if (atomic_read(&inode->i_count)) { 615 if (atomic_read(&inode->i_count)) {
616 spin_unlock(&inode->i_lock);
572 busy = 1; 617 busy = 1;
573 continue; 618 continue;
574 } 619 }
575 620
576 inode->i_state |= I_FREEING; 621 inode->i_state |= I_FREEING;
577 622 inode_lru_list_del(inode);
578 /* 623 spin_unlock(&inode->i_lock);
579 * Move the inode off the IO lists and LRU once I_FREEING is 624 list_add(&inode->i_lru, &dispose);
580 * set so that it won't get moved back on there if it is dirty.
581 */
582 list_move(&inode->i_lru, &dispose);
583 list_del_init(&inode->i_wb_list);
584 if (!(inode->i_state & (I_DIRTY | I_SYNC)))
585 inodes_stat.nr_unused--;
586 } 625 }
587 spin_unlock(&inode_lock); 626 spin_unlock(&inode_sb_list_lock);
588 627
589 dispose_list(&dispose); 628 dispose_list(&dispose);
590 629
@@ -606,7 +645,7 @@ static int can_unuse(struct inode *inode)
606 645
607/* 646/*
608 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a 647 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
609 * temporary list and then are freed outside inode_lock by dispose_list(). 648 * temporary list and then are freed outside inode_lru_lock by dispose_list().
610 * 649 *
611 * Any inodes which are pinned purely because of attached pagecache have their 650 * Any inodes which are pinned purely because of attached pagecache have their
612 * pagecache removed. If the inode has metadata buffers attached to 651 * pagecache removed. If the inode has metadata buffers attached to
@@ -627,7 +666,7 @@ static void prune_icache(int nr_to_scan)
627 unsigned long reap = 0; 666 unsigned long reap = 0;
628 667
629 down_read(&iprune_sem); 668 down_read(&iprune_sem);
630 spin_lock(&inode_lock); 669 spin_lock(&inode_lru_lock);
631 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 670 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
632 struct inode *inode; 671 struct inode *inode;
633 672
@@ -637,53 +676,67 @@ static void prune_icache(int nr_to_scan)
637 inode = list_entry(inode_lru.prev, struct inode, i_lru); 676 inode = list_entry(inode_lru.prev, struct inode, i_lru);
638 677
639 /* 678 /*
679 * we are inverting the inode_lru_lock/inode->i_lock here,
680 * so use a trylock. If we fail to get the lock, just move the
681 * inode to the back of the list so we don't spin on it.
682 */
683 if (!spin_trylock(&inode->i_lock)) {
684 list_move(&inode->i_lru, &inode_lru);
685 continue;
686 }
687
688 /*
640 * Referenced or dirty inodes are still in use. Give them 689 * Referenced or dirty inodes are still in use. Give them
641 * another pass through the LRU as we canot reclaim them now. 690 * another pass through the LRU as we canot reclaim them now.
642 */ 691 */
643 if (atomic_read(&inode->i_count) || 692 if (atomic_read(&inode->i_count) ||
644 (inode->i_state & ~I_REFERENCED)) { 693 (inode->i_state & ~I_REFERENCED)) {
645 list_del_init(&inode->i_lru); 694 list_del_init(&inode->i_lru);
695 spin_unlock(&inode->i_lock);
646 inodes_stat.nr_unused--; 696 inodes_stat.nr_unused--;
647 continue; 697 continue;
648 } 698 }
649 699
650 /* recently referenced inodes get one more pass */ 700 /* recently referenced inodes get one more pass */
651 if (inode->i_state & I_REFERENCED) { 701 if (inode->i_state & I_REFERENCED) {
652 list_move(&inode->i_lru, &inode_lru);
653 inode->i_state &= ~I_REFERENCED; 702 inode->i_state &= ~I_REFERENCED;
703 list_move(&inode->i_lru, &inode_lru);
704 spin_unlock(&inode->i_lock);
654 continue; 705 continue;
655 } 706 }
656 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 707 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
657 __iget(inode); 708 __iget(inode);
658 spin_unlock(&inode_lock); 709 spin_unlock(&inode->i_lock);
710 spin_unlock(&inode_lru_lock);
659 if (remove_inode_buffers(inode)) 711 if (remove_inode_buffers(inode))
660 reap += invalidate_mapping_pages(&inode->i_data, 712 reap += invalidate_mapping_pages(&inode->i_data,
661 0, -1); 713 0, -1);
662 iput(inode); 714 iput(inode);
663 spin_lock(&inode_lock); 715 spin_lock(&inode_lru_lock);
664 716
665 if (inode != list_entry(inode_lru.next, 717 if (inode != list_entry(inode_lru.next,
666 struct inode, i_lru)) 718 struct inode, i_lru))
667 continue; /* wrong inode or list_empty */ 719 continue; /* wrong inode or list_empty */
668 if (!can_unuse(inode)) 720 /* avoid lock inversions with trylock */
721 if (!spin_trylock(&inode->i_lock))
722 continue;
723 if (!can_unuse(inode)) {
724 spin_unlock(&inode->i_lock);
669 continue; 725 continue;
726 }
670 } 727 }
671 WARN_ON(inode->i_state & I_NEW); 728 WARN_ON(inode->i_state & I_NEW);
672 inode->i_state |= I_FREEING; 729 inode->i_state |= I_FREEING;
730 spin_unlock(&inode->i_lock);
673 731
674 /*
675 * Move the inode off the IO lists and LRU once I_FREEING is
676 * set so that it won't get moved back on there if it is dirty.
677 */
678 list_move(&inode->i_lru, &freeable); 732 list_move(&inode->i_lru, &freeable);
679 list_del_init(&inode->i_wb_list);
680 inodes_stat.nr_unused--; 733 inodes_stat.nr_unused--;
681 } 734 }
682 if (current_is_kswapd()) 735 if (current_is_kswapd())
683 __count_vm_events(KSWAPD_INODESTEAL, reap); 736 __count_vm_events(KSWAPD_INODESTEAL, reap);
684 else 737 else
685 __count_vm_events(PGINODESTEAL, reap); 738 __count_vm_events(PGINODESTEAL, reap);
686 spin_unlock(&inode_lock); 739 spin_unlock(&inode_lru_lock);
687 740
688 dispose_list(&freeable); 741 dispose_list(&freeable);
689 up_read(&iprune_sem); 742 up_read(&iprune_sem);
@@ -732,15 +785,21 @@ static struct inode *find_inode(struct super_block *sb,
732 785
733repeat: 786repeat:
734 hlist_for_each_entry(inode, node, head, i_hash) { 787 hlist_for_each_entry(inode, node, head, i_hash) {
735 if (inode->i_sb != sb) 788 spin_lock(&inode->i_lock);
789 if (inode->i_sb != sb) {
790 spin_unlock(&inode->i_lock);
736 continue; 791 continue;
737 if (!test(inode, data)) 792 }
793 if (!test(inode, data)) {
794 spin_unlock(&inode->i_lock);
738 continue; 795 continue;
796 }
739 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 797 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
740 __wait_on_freeing_inode(inode); 798 __wait_on_freeing_inode(inode);
741 goto repeat; 799 goto repeat;
742 } 800 }
743 __iget(inode); 801 __iget(inode);
802 spin_unlock(&inode->i_lock);
744 return inode; 803 return inode;
745 } 804 }
746 return NULL; 805 return NULL;
@@ -758,15 +817,21 @@ static struct inode *find_inode_fast(struct super_block *sb,
758 817
759repeat: 818repeat:
760 hlist_for_each_entry(inode, node, head, i_hash) { 819 hlist_for_each_entry(inode, node, head, i_hash) {
761 if (inode->i_ino != ino) 820 spin_lock(&inode->i_lock);
821 if (inode->i_ino != ino) {
822 spin_unlock(&inode->i_lock);
762 continue; 823 continue;
763 if (inode->i_sb != sb) 824 }
825 if (inode->i_sb != sb) {
826 spin_unlock(&inode->i_lock);
764 continue; 827 continue;
828 }
765 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 829 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
766 __wait_on_freeing_inode(inode); 830 __wait_on_freeing_inode(inode);
767 goto repeat; 831 goto repeat;
768 } 832 }
769 __iget(inode); 833 __iget(inode);
834 spin_unlock(&inode->i_lock);
770 return inode; 835 return inode;
771 } 836 }
772 return NULL; 837 return NULL;
@@ -826,19 +891,26 @@ struct inode *new_inode(struct super_block *sb)
826{ 891{
827 struct inode *inode; 892 struct inode *inode;
828 893
829 spin_lock_prefetch(&inode_lock); 894 spin_lock_prefetch(&inode_sb_list_lock);
830 895
831 inode = alloc_inode(sb); 896 inode = alloc_inode(sb);
832 if (inode) { 897 if (inode) {
833 spin_lock(&inode_lock); 898 spin_lock(&inode->i_lock);
834 __inode_sb_list_add(inode);
835 inode->i_state = 0; 899 inode->i_state = 0;
836 spin_unlock(&inode_lock); 900 spin_unlock(&inode->i_lock);
901 inode_sb_list_add(inode);
837 } 902 }
838 return inode; 903 return inode;
839} 904}
840EXPORT_SYMBOL(new_inode); 905EXPORT_SYMBOL(new_inode);
841 906
907/**
908 * unlock_new_inode - clear the I_NEW state and wake up any waiters
909 * @inode: new inode to unlock
910 *
911 * Called when the inode is fully initialised to clear the new state of the
912 * inode and wake up anyone waiting for the inode to finish initialisation.
913 */
842void unlock_new_inode(struct inode *inode) 914void unlock_new_inode(struct inode *inode)
843{ 915{
844#ifdef CONFIG_DEBUG_LOCK_ALLOC 916#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -858,51 +930,67 @@ void unlock_new_inode(struct inode *inode)
858 } 930 }
859 } 931 }
860#endif 932#endif
861 /* 933 spin_lock(&inode->i_lock);
862 * This is special! We do not need the spinlock when clearing I_NEW,
863 * because we're guaranteed that nobody else tries to do anything about
864 * the state of the inode when it is locked, as we just created it (so
865 * there can be no old holders that haven't tested I_NEW).
866 * However we must emit the memory barrier so that other CPUs reliably
867 * see the clearing of I_NEW after the other inode initialisation has
868 * completed.
869 */
870 smp_mb();
871 WARN_ON(!(inode->i_state & I_NEW)); 934 WARN_ON(!(inode->i_state & I_NEW));
872 inode->i_state &= ~I_NEW; 935 inode->i_state &= ~I_NEW;
873 wake_up_inode(inode); 936 wake_up_bit(&inode->i_state, __I_NEW);
937 spin_unlock(&inode->i_lock);
874} 938}
875EXPORT_SYMBOL(unlock_new_inode); 939EXPORT_SYMBOL(unlock_new_inode);
876 940
877/* 941/**
878 * This is called without the inode lock held.. Be careful. 942 * iget5_locked - obtain an inode from a mounted file system
943 * @sb: super block of file system
944 * @hashval: hash value (usually inode number) to get
945 * @test: callback used for comparisons between inodes
946 * @set: callback used to initialize a new struct inode
947 * @data: opaque data pointer to pass to @test and @set
879 * 948 *
880 * We no longer cache the sb_flags in i_flags - see fs.h 949 * Search for the inode specified by @hashval and @data in the inode cache,
881 * -- rmk@arm.uk.linux.org 950 * and if present it is return it with an increased reference count. This is
951 * a generalized version of iget_locked() for file systems where the inode
952 * number is not sufficient for unique identification of an inode.
953 *
954 * If the inode is not in cache, allocate a new inode and return it locked,
955 * hashed, and with the I_NEW flag set. The file system gets to fill it in
956 * before unlocking it via unlock_new_inode().
957 *
958 * Note both @test and @set are called with the inode_hash_lock held, so can't
959 * sleep.
882 */ 960 */
883static struct inode *get_new_inode(struct super_block *sb, 961struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
884 struct hlist_head *head, 962 int (*test)(struct inode *, void *),
885 int (*test)(struct inode *, void *), 963 int (*set)(struct inode *, void *), void *data)
886 int (*set)(struct inode *, void *),
887 void *data)
888{ 964{
965 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
889 struct inode *inode; 966 struct inode *inode;
890 967
968 spin_lock(&inode_hash_lock);
969 inode = find_inode(sb, head, test, data);
970 spin_unlock(&inode_hash_lock);
971
972 if (inode) {
973 wait_on_inode(inode);
974 return inode;
975 }
976
891 inode = alloc_inode(sb); 977 inode = alloc_inode(sb);
892 if (inode) { 978 if (inode) {
893 struct inode *old; 979 struct inode *old;
894 980
895 spin_lock(&inode_lock); 981 spin_lock(&inode_hash_lock);
896 /* We released the lock, so.. */ 982 /* We released the lock, so.. */
897 old = find_inode(sb, head, test, data); 983 old = find_inode(sb, head, test, data);
898 if (!old) { 984 if (!old) {
899 if (set(inode, data)) 985 if (set(inode, data))
900 goto set_failed; 986 goto set_failed;
901 987
902 hlist_add_head(&inode->i_hash, head); 988 spin_lock(&inode->i_lock);
903 __inode_sb_list_add(inode);
904 inode->i_state = I_NEW; 989 inode->i_state = I_NEW;
905 spin_unlock(&inode_lock); 990 hlist_add_head(&inode->i_hash, head);
991 spin_unlock(&inode->i_lock);
992 inode_sb_list_add(inode);
993 spin_unlock(&inode_hash_lock);
906 994
907 /* Return the locked inode with I_NEW set, the 995 /* Return the locked inode with I_NEW set, the
908 * caller is responsible for filling in the contents 996 * caller is responsible for filling in the contents
@@ -915,7 +1003,7 @@ static struct inode *get_new_inode(struct super_block *sb,
915 * us. Use the old inode instead of the one we just 1003 * us. Use the old inode instead of the one we just
916 * allocated. 1004 * allocated.
917 */ 1005 */
918 spin_unlock(&inode_lock); 1006 spin_unlock(&inode_hash_lock);
919 destroy_inode(inode); 1007 destroy_inode(inode);
920 inode = old; 1008 inode = old;
921 wait_on_inode(inode); 1009 wait_on_inode(inode);
@@ -923,33 +1011,53 @@ static struct inode *get_new_inode(struct super_block *sb,
923 return inode; 1011 return inode;
924 1012
925set_failed: 1013set_failed:
926 spin_unlock(&inode_lock); 1014 spin_unlock(&inode_hash_lock);
927 destroy_inode(inode); 1015 destroy_inode(inode);
928 return NULL; 1016 return NULL;
929} 1017}
1018EXPORT_SYMBOL(iget5_locked);
930 1019
931/* 1020/**
932 * get_new_inode_fast is the fast path version of get_new_inode, see the 1021 * iget_locked - obtain an inode from a mounted file system
933 * comment at iget_locked for details. 1022 * @sb: super block of file system
1023 * @ino: inode number to get
1024 *
1025 * Search for the inode specified by @ino in the inode cache and if present
1026 * return it with an increased reference count. This is for file systems
1027 * where the inode number is sufficient for unique identification of an inode.
1028 *
1029 * If the inode is not in cache, allocate a new inode and return it locked,
1030 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1031 * before unlocking it via unlock_new_inode().
934 */ 1032 */
935static struct inode *get_new_inode_fast(struct super_block *sb, 1033struct inode *iget_locked(struct super_block *sb, unsigned long ino)
936 struct hlist_head *head, unsigned long ino)
937{ 1034{
1035 struct hlist_head *head = inode_hashtable + hash(sb, ino);
938 struct inode *inode; 1036 struct inode *inode;
939 1037
1038 spin_lock(&inode_hash_lock);
1039 inode = find_inode_fast(sb, head, ino);
1040 spin_unlock(&inode_hash_lock);
1041 if (inode) {
1042 wait_on_inode(inode);
1043 return inode;
1044 }
1045
940 inode = alloc_inode(sb); 1046 inode = alloc_inode(sb);
941 if (inode) { 1047 if (inode) {
942 struct inode *old; 1048 struct inode *old;
943 1049
944 spin_lock(&inode_lock); 1050 spin_lock(&inode_hash_lock);
945 /* We released the lock, so.. */ 1051 /* We released the lock, so.. */
946 old = find_inode_fast(sb, head, ino); 1052 old = find_inode_fast(sb, head, ino);
947 if (!old) { 1053 if (!old) {
948 inode->i_ino = ino; 1054 inode->i_ino = ino;
949 hlist_add_head(&inode->i_hash, head); 1055 spin_lock(&inode->i_lock);
950 __inode_sb_list_add(inode);
951 inode->i_state = I_NEW; 1056 inode->i_state = I_NEW;
952 spin_unlock(&inode_lock); 1057 hlist_add_head(&inode->i_hash, head);
1058 spin_unlock(&inode->i_lock);
1059 inode_sb_list_add(inode);
1060 spin_unlock(&inode_hash_lock);
953 1061
954 /* Return the locked inode with I_NEW set, the 1062 /* Return the locked inode with I_NEW set, the
955 * caller is responsible for filling in the contents 1063 * caller is responsible for filling in the contents
@@ -962,13 +1070,14 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
962 * us. Use the old inode instead of the one we just 1070 * us. Use the old inode instead of the one we just
963 * allocated. 1071 * allocated.
964 */ 1072 */
965 spin_unlock(&inode_lock); 1073 spin_unlock(&inode_hash_lock);
966 destroy_inode(inode); 1074 destroy_inode(inode);
967 inode = old; 1075 inode = old;
968 wait_on_inode(inode); 1076 wait_on_inode(inode);
969 } 1077 }
970 return inode; 1078 return inode;
971} 1079}
1080EXPORT_SYMBOL(iget_locked);
972 1081
973/* 1082/*
974 * search the inode cache for a matching inode number. 1083 * search the inode cache for a matching inode number.
@@ -983,10 +1092,14 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
983 struct hlist_node *node; 1092 struct hlist_node *node;
984 struct inode *inode; 1093 struct inode *inode;
985 1094
1095 spin_lock(&inode_hash_lock);
986 hlist_for_each_entry(inode, node, b, i_hash) { 1096 hlist_for_each_entry(inode, node, b, i_hash) {
987 if (inode->i_ino == ino && inode->i_sb == sb) 1097 if (inode->i_ino == ino && inode->i_sb == sb) {
1098 spin_unlock(&inode_hash_lock);
988 return 0; 1099 return 0;
1100 }
989 } 1101 }
1102 spin_unlock(&inode_hash_lock);
990 1103
991 return 1; 1104 return 1;
992} 1105}
@@ -1016,7 +1129,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
1016 static unsigned int counter; 1129 static unsigned int counter;
1017 ino_t res; 1130 ino_t res;
1018 1131
1019 spin_lock(&inode_lock);
1020 spin_lock(&iunique_lock); 1132 spin_lock(&iunique_lock);
1021 do { 1133 do {
1022 if (counter <= max_reserved) 1134 if (counter <= max_reserved)
@@ -1024,7 +1136,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
1024 res = counter++; 1136 res = counter++;
1025 } while (!test_inode_iunique(sb, res)); 1137 } while (!test_inode_iunique(sb, res));
1026 spin_unlock(&iunique_lock); 1138 spin_unlock(&iunique_lock);
1027 spin_unlock(&inode_lock);
1028 1139
1029 return res; 1140 return res;
1030} 1141}
@@ -1032,116 +1143,50 @@ EXPORT_SYMBOL(iunique);
1032 1143
1033struct inode *igrab(struct inode *inode) 1144struct inode *igrab(struct inode *inode)
1034{ 1145{
1035 spin_lock(&inode_lock); 1146 spin_lock(&inode->i_lock);
1036 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) 1147 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1037 __iget(inode); 1148 __iget(inode);
1038 else 1149 spin_unlock(&inode->i_lock);
1150 } else {
1151 spin_unlock(&inode->i_lock);
1039 /* 1152 /*
1040 * Handle the case where s_op->clear_inode is not been 1153 * Handle the case where s_op->clear_inode is not been
1041 * called yet, and somebody is calling igrab 1154 * called yet, and somebody is calling igrab
1042 * while the inode is getting freed. 1155 * while the inode is getting freed.
1043 */ 1156 */
1044 inode = NULL; 1157 inode = NULL;
1045 spin_unlock(&inode_lock); 1158 }
1046 return inode; 1159 return inode;
1047} 1160}
1048EXPORT_SYMBOL(igrab); 1161EXPORT_SYMBOL(igrab);
1049 1162
1050/** 1163/**
1051 * ifind - internal function, you want ilookup5() or iget5().
1052 * @sb: super block of file system to search
1053 * @head: the head of the list to search
1054 * @test: callback used for comparisons between inodes
1055 * @data: opaque data pointer to pass to @test
1056 * @wait: if true wait for the inode to be unlocked, if false do not
1057 *
1058 * ifind() searches for the inode specified by @data in the inode
1059 * cache. This is a generalized version of ifind_fast() for file systems where
1060 * the inode number is not sufficient for unique identification of an inode.
1061 *
1062 * If the inode is in the cache, the inode is returned with an incremented
1063 * reference count.
1064 *
1065 * Otherwise NULL is returned.
1066 *
1067 * Note, @test is called with the inode_lock held, so can't sleep.
1068 */
1069static struct inode *ifind(struct super_block *sb,
1070 struct hlist_head *head, int (*test)(struct inode *, void *),
1071 void *data, const int wait)
1072{
1073 struct inode *inode;
1074
1075 spin_lock(&inode_lock);
1076 inode = find_inode(sb, head, test, data);
1077 if (inode) {
1078 spin_unlock(&inode_lock);
1079 if (likely(wait))
1080 wait_on_inode(inode);
1081 return inode;
1082 }
1083 spin_unlock(&inode_lock);
1084 return NULL;
1085}
1086
1087/**
1088 * ifind_fast - internal function, you want ilookup() or iget().
1089 * @sb: super block of file system to search
1090 * @head: head of the list to search
1091 * @ino: inode number to search for
1092 *
1093 * ifind_fast() searches for the inode @ino in the inode cache. This is for
1094 * file systems where the inode number is sufficient for unique identification
1095 * of an inode.
1096 *
1097 * If the inode is in the cache, the inode is returned with an incremented
1098 * reference count.
1099 *
1100 * Otherwise NULL is returned.
1101 */
1102static struct inode *ifind_fast(struct super_block *sb,
1103 struct hlist_head *head, unsigned long ino)
1104{
1105 struct inode *inode;
1106
1107 spin_lock(&inode_lock);
1108 inode = find_inode_fast(sb, head, ino);
1109 if (inode) {
1110 spin_unlock(&inode_lock);
1111 wait_on_inode(inode);
1112 return inode;
1113 }
1114 spin_unlock(&inode_lock);
1115 return NULL;
1116}
1117
1118/**
1119 * ilookup5_nowait - search for an inode in the inode cache 1164 * ilookup5_nowait - search for an inode in the inode cache
1120 * @sb: super block of file system to search 1165 * @sb: super block of file system to search
1121 * @hashval: hash value (usually inode number) to search for 1166 * @hashval: hash value (usually inode number) to search for
1122 * @test: callback used for comparisons between inodes 1167 * @test: callback used for comparisons between inodes
1123 * @data: opaque data pointer to pass to @test 1168 * @data: opaque data pointer to pass to @test
1124 * 1169 *
1125 * ilookup5() uses ifind() to search for the inode specified by @hashval and 1170 * Search for the inode specified by @hashval and @data in the inode cache.
1126 * @data in the inode cache. This is a generalized version of ilookup() for
1127 * file systems where the inode number is not sufficient for unique
1128 * identification of an inode.
1129 *
1130 * If the inode is in the cache, the inode is returned with an incremented 1171 * If the inode is in the cache, the inode is returned with an incremented
1131 * reference count. Note, the inode lock is not waited upon so you have to be 1172 * reference count.
1132 * very careful what you do with the returned inode. You probably should be
1133 * using ilookup5() instead.
1134 * 1173 *
1135 * Otherwise NULL is returned. 1174 * Note: I_NEW is not waited upon so you have to be very careful what you do
1175 * with the returned inode. You probably should be using ilookup5() instead.
1136 * 1176 *
1137 * Note, @test is called with the inode_lock held, so can't sleep. 1177 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1138 */ 1178 */
1139struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1179struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1140 int (*test)(struct inode *, void *), void *data) 1180 int (*test)(struct inode *, void *), void *data)
1141{ 1181{
1142 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1182 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1183 struct inode *inode;
1184
1185 spin_lock(&inode_hash_lock);
1186 inode = find_inode(sb, head, test, data);
1187 spin_unlock(&inode_hash_lock);
1143 1188
1144 return ifind(sb, head, test, data, 0); 1189 return inode;
1145} 1190}
1146EXPORT_SYMBOL(ilookup5_nowait); 1191EXPORT_SYMBOL(ilookup5_nowait);
1147 1192
@@ -1152,24 +1197,24 @@ EXPORT_SYMBOL(ilookup5_nowait);
1152 * @test: callback used for comparisons between inodes 1197 * @test: callback used for comparisons between inodes
1153 * @data: opaque data pointer to pass to @test 1198 * @data: opaque data pointer to pass to @test
1154 * 1199 *
1155 * ilookup5() uses ifind() to search for the inode specified by @hashval and 1200 * Search for the inode specified by @hashval and @data in the inode cache,
1156 * @data in the inode cache. This is a generalized version of ilookup() for 1201 * and if the inode is in the cache, return the inode with an incremented
1157 * file systems where the inode number is not sufficient for unique 1202 * reference count. Waits on I_NEW before returning the inode.
1158 * identification of an inode.
1159 *
1160 * If the inode is in the cache, the inode lock is waited upon and the inode is
1161 * returned with an incremented reference count. 1203 * returned with an incremented reference count.
1162 * 1204 *
1163 * Otherwise NULL is returned. 1205 * This is a generalized version of ilookup() for file systems where the
1206 * inode number is not sufficient for unique identification of an inode.
1164 * 1207 *
1165 * Note, @test is called with the inode_lock held, so can't sleep. 1208 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1166 */ 1209 */
1167struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1210struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1168 int (*test)(struct inode *, void *), void *data) 1211 int (*test)(struct inode *, void *), void *data)
1169{ 1212{
1170 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1213 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1171 1214
1172 return ifind(sb, head, test, data, 1); 1215 if (inode)
1216 wait_on_inode(inode);
1217 return inode;
1173} 1218}
1174EXPORT_SYMBOL(ilookup5); 1219EXPORT_SYMBOL(ilookup5);
1175 1220
@@ -1178,91 +1223,23 @@ EXPORT_SYMBOL(ilookup5);
1178 * @sb: super block of file system to search 1223 * @sb: super block of file system to search
1179 * @ino: inode number to search for 1224 * @ino: inode number to search for
1180 * 1225 *
1181 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. 1226 * Search for the inode @ino in the inode cache, and if the inode is in the
1182 * This is for file systems where the inode number is sufficient for unique 1227 * cache, the inode is returned with an incremented reference count.
1183 * identification of an inode.
1184 *
1185 * If the inode is in the cache, the inode is returned with an incremented
1186 * reference count.
1187 *
1188 * Otherwise NULL is returned.
1189 */ 1228 */
1190struct inode *ilookup(struct super_block *sb, unsigned long ino) 1229struct inode *ilookup(struct super_block *sb, unsigned long ino)
1191{ 1230{
1192 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1231 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1193
1194 return ifind_fast(sb, head, ino);
1195}
1196EXPORT_SYMBOL(ilookup);
1197
1198/**
1199 * iget5_locked - obtain an inode from a mounted file system
1200 * @sb: super block of file system
1201 * @hashval: hash value (usually inode number) to get
1202 * @test: callback used for comparisons between inodes
1203 * @set: callback used to initialize a new struct inode
1204 * @data: opaque data pointer to pass to @test and @set
1205 *
1206 * iget5_locked() uses ifind() to search for the inode specified by @hashval
1207 * and @data in the inode cache and if present it is returned with an increased
1208 * reference count. This is a generalized version of iget_locked() for file
1209 * systems where the inode number is not sufficient for unique identification
1210 * of an inode.
1211 *
1212 * If the inode is not in cache, get_new_inode() is called to allocate a new
1213 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
1214 * file system gets to fill it in before unlocking it via unlock_new_inode().
1215 *
1216 * Note both @test and @set are called with the inode_lock held, so can't sleep.
1217 */
1218struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1219 int (*test)(struct inode *, void *),
1220 int (*set)(struct inode *, void *), void *data)
1221{
1222 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1223 struct inode *inode; 1232 struct inode *inode;
1224 1233
1225 inode = ifind(sb, head, test, data, 1); 1234 spin_lock(&inode_hash_lock);
1226 if (inode) 1235 inode = find_inode_fast(sb, head, ino);
1227 return inode; 1236 spin_unlock(&inode_hash_lock);
1228 /*
1229 * get_new_inode() will do the right thing, re-trying the search
1230 * in case it had to block at any point.
1231 */
1232 return get_new_inode(sb, head, test, set, data);
1233}
1234EXPORT_SYMBOL(iget5_locked);
1235
1236/**
1237 * iget_locked - obtain an inode from a mounted file system
1238 * @sb: super block of file system
1239 * @ino: inode number to get
1240 *
1241 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
1242 * the inode cache and if present it is returned with an increased reference
1243 * count. This is for file systems where the inode number is sufficient for
1244 * unique identification of an inode.
1245 *
1246 * If the inode is not in cache, get_new_inode_fast() is called to allocate a
1247 * new inode and this is returned locked, hashed, and with the I_NEW flag set.
1248 * The file system gets to fill it in before unlocking it via
1249 * unlock_new_inode().
1250 */
1251struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1252{
1253 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1254 struct inode *inode;
1255 1237
1256 inode = ifind_fast(sb, head, ino);
1257 if (inode) 1238 if (inode)
1258 return inode; 1239 wait_on_inode(inode);
1259 /* 1240 return inode;
1260 * get_new_inode_fast() will do the right thing, re-trying the search
1261 * in case it had to block at any point.
1262 */
1263 return get_new_inode_fast(sb, head, ino);
1264} 1241}
1265EXPORT_SYMBOL(iget_locked); 1242EXPORT_SYMBOL(ilookup);
1266 1243
1267int insert_inode_locked(struct inode *inode) 1244int insert_inode_locked(struct inode *inode)
1268{ 1245{
@@ -1270,27 +1247,33 @@ int insert_inode_locked(struct inode *inode)
1270 ino_t ino = inode->i_ino; 1247 ino_t ino = inode->i_ino;
1271 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1248 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1272 1249
1273 inode->i_state |= I_NEW;
1274 while (1) { 1250 while (1) {
1275 struct hlist_node *node; 1251 struct hlist_node *node;
1276 struct inode *old = NULL; 1252 struct inode *old = NULL;
1277 spin_lock(&inode_lock); 1253 spin_lock(&inode_hash_lock);
1278 hlist_for_each_entry(old, node, head, i_hash) { 1254 hlist_for_each_entry(old, node, head, i_hash) {
1279 if (old->i_ino != ino) 1255 if (old->i_ino != ino)
1280 continue; 1256 continue;
1281 if (old->i_sb != sb) 1257 if (old->i_sb != sb)
1282 continue; 1258 continue;
1283 if (old->i_state & (I_FREEING|I_WILL_FREE)) 1259 spin_lock(&old->i_lock);
1260 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1261 spin_unlock(&old->i_lock);
1284 continue; 1262 continue;
1263 }
1285 break; 1264 break;
1286 } 1265 }
1287 if (likely(!node)) { 1266 if (likely(!node)) {
1267 spin_lock(&inode->i_lock);
1268 inode->i_state |= I_NEW;
1288 hlist_add_head(&inode->i_hash, head); 1269 hlist_add_head(&inode->i_hash, head);
1289 spin_unlock(&inode_lock); 1270 spin_unlock(&inode->i_lock);
1271 spin_unlock(&inode_hash_lock);
1290 return 0; 1272 return 0;
1291 } 1273 }
1292 __iget(old); 1274 __iget(old);
1293 spin_unlock(&inode_lock); 1275 spin_unlock(&old->i_lock);
1276 spin_unlock(&inode_hash_lock);
1294 wait_on_inode(old); 1277 wait_on_inode(old);
1295 if (unlikely(!inode_unhashed(old))) { 1278 if (unlikely(!inode_unhashed(old))) {
1296 iput(old); 1279 iput(old);
@@ -1307,29 +1290,34 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1307 struct super_block *sb = inode->i_sb; 1290 struct super_block *sb = inode->i_sb;
1308 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1291 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1309 1292
1310 inode->i_state |= I_NEW;
1311
1312 while (1) { 1293 while (1) {
1313 struct hlist_node *node; 1294 struct hlist_node *node;
1314 struct inode *old = NULL; 1295 struct inode *old = NULL;
1315 1296
1316 spin_lock(&inode_lock); 1297 spin_lock(&inode_hash_lock);
1317 hlist_for_each_entry(old, node, head, i_hash) { 1298 hlist_for_each_entry(old, node, head, i_hash) {
1318 if (old->i_sb != sb) 1299 if (old->i_sb != sb)
1319 continue; 1300 continue;
1320 if (!test(old, data)) 1301 if (!test(old, data))
1321 continue; 1302 continue;
1322 if (old->i_state & (I_FREEING|I_WILL_FREE)) 1303 spin_lock(&old->i_lock);
1304 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1305 spin_unlock(&old->i_lock);
1323 continue; 1306 continue;
1307 }
1324 break; 1308 break;
1325 } 1309 }
1326 if (likely(!node)) { 1310 if (likely(!node)) {
1311 spin_lock(&inode->i_lock);
1312 inode->i_state |= I_NEW;
1327 hlist_add_head(&inode->i_hash, head); 1313 hlist_add_head(&inode->i_hash, head);
1328 spin_unlock(&inode_lock); 1314 spin_unlock(&inode->i_lock);
1315 spin_unlock(&inode_hash_lock);
1329 return 0; 1316 return 0;
1330 } 1317 }
1331 __iget(old); 1318 __iget(old);
1332 spin_unlock(&inode_lock); 1319 spin_unlock(&old->i_lock);
1320 spin_unlock(&inode_hash_lock);
1333 wait_on_inode(old); 1321 wait_on_inode(old);
1334 if (unlikely(!inode_unhashed(old))) { 1322 if (unlikely(!inode_unhashed(old))) {
1335 iput(old); 1323 iput(old);
@@ -1374,47 +1362,35 @@ static void iput_final(struct inode *inode)
1374 const struct super_operations *op = inode->i_sb->s_op; 1362 const struct super_operations *op = inode->i_sb->s_op;
1375 int drop; 1363 int drop;
1376 1364
1365 WARN_ON(inode->i_state & I_NEW);
1366
1377 if (op && op->drop_inode) 1367 if (op && op->drop_inode)
1378 drop = op->drop_inode(inode); 1368 drop = op->drop_inode(inode);
1379 else 1369 else
1380 drop = generic_drop_inode(inode); 1370 drop = generic_drop_inode(inode);
1381 1371
1372 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1373 inode->i_state |= I_REFERENCED;
1374 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1375 inode_lru_list_add(inode);
1376 spin_unlock(&inode->i_lock);
1377 return;
1378 }
1379
1382 if (!drop) { 1380 if (!drop) {
1383 if (sb->s_flags & MS_ACTIVE) {
1384 inode->i_state |= I_REFERENCED;
1385 if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
1386 inode_lru_list_add(inode);
1387 }
1388 spin_unlock(&inode_lock);
1389 return;
1390 }
1391 WARN_ON(inode->i_state & I_NEW);
1392 inode->i_state |= I_WILL_FREE; 1381 inode->i_state |= I_WILL_FREE;
1393 spin_unlock(&inode_lock); 1382 spin_unlock(&inode->i_lock);
1394 write_inode_now(inode, 1); 1383 write_inode_now(inode, 1);
1395 spin_lock(&inode_lock); 1384 spin_lock(&inode->i_lock);
1396 WARN_ON(inode->i_state & I_NEW); 1385 WARN_ON(inode->i_state & I_NEW);
1397 inode->i_state &= ~I_WILL_FREE; 1386 inode->i_state &= ~I_WILL_FREE;
1398 __remove_inode_hash(inode);
1399 } 1387 }
1400 1388
1401 WARN_ON(inode->i_state & I_NEW);
1402 inode->i_state |= I_FREEING; 1389 inode->i_state |= I_FREEING;
1403
1404 /*
1405 * Move the inode off the IO lists and LRU once I_FREEING is
1406 * set so that it won't get moved back on there if it is dirty.
1407 */
1408 inode_lru_list_del(inode); 1390 inode_lru_list_del(inode);
1409 list_del_init(&inode->i_wb_list); 1391 spin_unlock(&inode->i_lock);
1410 1392
1411 __inode_sb_list_del(inode);
1412 spin_unlock(&inode_lock);
1413 evict(inode); 1393 evict(inode);
1414 remove_inode_hash(inode);
1415 wake_up_inode(inode);
1416 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
1417 destroy_inode(inode);
1418} 1394}
1419 1395
1420/** 1396/**
@@ -1431,7 +1407,7 @@ void iput(struct inode *inode)
1431 if (inode) { 1407 if (inode) {
1432 BUG_ON(inode->i_state & I_CLEAR); 1408 BUG_ON(inode->i_state & I_CLEAR);
1433 1409
1434 if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) 1410 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1435 iput_final(inode); 1411 iput_final(inode);
1436 } 1412 }
1437} 1413}
@@ -1610,9 +1586,8 @@ EXPORT_SYMBOL(inode_wait);
1610 * to recheck inode state. 1586 * to recheck inode state.
1611 * 1587 *
1612 * It doesn't matter if I_NEW is not set initially, a call to 1588 * It doesn't matter if I_NEW is not set initially, a call to
1613 * wake_up_inode() after removing from the hash list will DTRT. 1589 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1614 * 1590 * will DTRT.
1615 * This is called with inode_lock held.
1616 */ 1591 */
1617static void __wait_on_freeing_inode(struct inode *inode) 1592static void __wait_on_freeing_inode(struct inode *inode)
1618{ 1593{
@@ -1620,10 +1595,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
1620 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1595 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1621 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1596 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1622 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1597 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1623 spin_unlock(&inode_lock); 1598 spin_unlock(&inode->i_lock);
1599 spin_unlock(&inode_hash_lock);
1624 schedule(); 1600 schedule();
1625 finish_wait(wq, &wait.wait); 1601 finish_wait(wq, &wait.wait);
1626 spin_lock(&inode_lock); 1602 spin_lock(&inode_hash_lock);
1627} 1603}
1628 1604
1629static __initdata unsigned long ihash_entries; 1605static __initdata unsigned long ihash_entries;
@@ -1715,7 +1691,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1715EXPORT_SYMBOL(init_special_inode); 1691EXPORT_SYMBOL(init_special_inode);
1716 1692
1717/** 1693/**
1718 * Init uid,gid,mode for new inode according to posix standards 1694 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1719 * @inode: New inode 1695 * @inode: New inode
1720 * @dir: Directory inode 1696 * @dir: Directory inode
1721 * @mode: mode of the new inode 1697 * @mode: mode of the new inode
@@ -1733,3 +1709,22 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
1733 inode->i_mode = mode; 1709 inode->i_mode = mode;
1734} 1710}
1735EXPORT_SYMBOL(inode_init_owner); 1711EXPORT_SYMBOL(inode_init_owner);
1712
1713/**
1714 * inode_owner_or_capable - check current task permissions to inode
1715 * @inode: inode being checked
1716 *
1717 * Return true if current either has CAP_FOWNER to the inode, or
1718 * owns the file.
1719 */
1720bool inode_owner_or_capable(const struct inode *inode)
1721{
1722 struct user_namespace *ns = inode_userns(inode);
1723
1724 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1725 return true;
1726 if (ns_capable(ns, CAP_FOWNER))
1727 return true;
1728 return false;
1729}
1730EXPORT_SYMBOL(inode_owner_or_capable);