diff options
author | Christoph Hellwig <hch@lst.de> | 2009-12-17 08:25:01 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2009-12-17 11:03:25 -0500 |
commit | eaff8079d4f1016a12e34ab323737314f24127dd (patch) | |
tree | a3d9e00320c6195e55811d5247a521f99341a411 /fs/inode.c | |
parent | 7a0ad10c367ab57c899d340372f37880cbe6ab52 (diff) |
kill I_LOCK
After I_SYNC was split from I_LOCK the leftover is always used together with
I_NEW and thus superflous.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/fs/inode.c b/fs/inode.c index 06c1f02de611..03dfeb2e3928 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -113,7 +113,7 @@ static void wake_up_inode(struct inode *inode) | |||
113 | * Prevent speculative execution through spin_unlock(&inode_lock); | 113 | * Prevent speculative execution through spin_unlock(&inode_lock); |
114 | */ | 114 | */ |
115 | smp_mb(); | 115 | smp_mb(); |
116 | wake_up_bit(&inode->i_state, __I_LOCK); | 116 | wake_up_bit(&inode->i_state, __I_NEW); |
117 | } | 117 | } |
118 | 118 | ||
119 | /** | 119 | /** |
@@ -690,17 +690,17 @@ void unlock_new_inode(struct inode *inode) | |||
690 | } | 690 | } |
691 | #endif | 691 | #endif |
692 | /* | 692 | /* |
693 | * This is special! We do not need the spinlock when clearing I_LOCK, | 693 | * This is special! We do not need the spinlock when clearing I_NEW, |
694 | * because we're guaranteed that nobody else tries to do anything about | 694 | * because we're guaranteed that nobody else tries to do anything about |
695 | * the state of the inode when it is locked, as we just created it (so | 695 | * the state of the inode when it is locked, as we just created it (so |
696 | * there can be no old holders that haven't tested I_LOCK). | 696 | * there can be no old holders that haven't tested I_NEW). |
697 | * However we must emit the memory barrier so that other CPUs reliably | 697 | * However we must emit the memory barrier so that other CPUs reliably |
698 | * see the clearing of I_LOCK after the other inode initialisation has | 698 | * see the clearing of I_NEW after the other inode initialisation has |
699 | * completed. | 699 | * completed. |
700 | */ | 700 | */ |
701 | smp_mb(); | 701 | smp_mb(); |
702 | WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); | 702 | WARN_ON(!(inode->i_state & I_NEW)); |
703 | inode->i_state &= ~(I_LOCK|I_NEW); | 703 | inode->i_state &= ~I_NEW; |
704 | wake_up_inode(inode); | 704 | wake_up_inode(inode); |
705 | } | 705 | } |
706 | EXPORT_SYMBOL(unlock_new_inode); | 706 | EXPORT_SYMBOL(unlock_new_inode); |
@@ -731,7 +731,7 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
731 | goto set_failed; | 731 | goto set_failed; |
732 | 732 | ||
733 | __inode_add_to_lists(sb, head, inode); | 733 | __inode_add_to_lists(sb, head, inode); |
734 | inode->i_state = I_LOCK|I_NEW; | 734 | inode->i_state = I_NEW; |
735 | spin_unlock(&inode_lock); | 735 | spin_unlock(&inode_lock); |
736 | 736 | ||
737 | /* Return the locked inode with I_NEW set, the | 737 | /* Return the locked inode with I_NEW set, the |
@@ -778,7 +778,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb, | |||
778 | if (!old) { | 778 | if (!old) { |
779 | inode->i_ino = ino; | 779 | inode->i_ino = ino; |
780 | __inode_add_to_lists(sb, head, inode); | 780 | __inode_add_to_lists(sb, head, inode); |
781 | inode->i_state = I_LOCK|I_NEW; | 781 | inode->i_state = I_NEW; |
782 | spin_unlock(&inode_lock); | 782 | spin_unlock(&inode_lock); |
783 | 783 | ||
784 | /* Return the locked inode with I_NEW set, the | 784 | /* Return the locked inode with I_NEW set, the |
@@ -1083,7 +1083,7 @@ int insert_inode_locked(struct inode *inode) | |||
1083 | ino_t ino = inode->i_ino; | 1083 | ino_t ino = inode->i_ino; |
1084 | struct hlist_head *head = inode_hashtable + hash(sb, ino); | 1084 | struct hlist_head *head = inode_hashtable + hash(sb, ino); |
1085 | 1085 | ||
1086 | inode->i_state |= I_LOCK|I_NEW; | 1086 | inode->i_state |= I_NEW; |
1087 | while (1) { | 1087 | while (1) { |
1088 | struct hlist_node *node; | 1088 | struct hlist_node *node; |
1089 | struct inode *old = NULL; | 1089 | struct inode *old = NULL; |
@@ -1120,7 +1120,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, | |||
1120 | struct super_block *sb = inode->i_sb; | 1120 | struct super_block *sb = inode->i_sb; |
1121 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); | 1121 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); |
1122 | 1122 | ||
1123 | inode->i_state |= I_LOCK|I_NEW; | 1123 | inode->i_state |= I_NEW; |
1124 | 1124 | ||
1125 | while (1) { | 1125 | while (1) { |
1126 | struct hlist_node *node; | 1126 | struct hlist_node *node; |
@@ -1510,7 +1510,7 @@ EXPORT_SYMBOL(inode_wait); | |||
1510 | * until the deletion _might_ have completed. Callers are responsible | 1510 | * until the deletion _might_ have completed. Callers are responsible |
1511 | * to recheck inode state. | 1511 | * to recheck inode state. |
1512 | * | 1512 | * |
1513 | * It doesn't matter if I_LOCK is not set initially, a call to | 1513 | * It doesn't matter if I_NEW is not set initially, a call to |
1514 | * wake_up_inode() after removing from the hash list will DTRT. | 1514 | * wake_up_inode() after removing from the hash list will DTRT. |
1515 | * | 1515 | * |
1516 | * This is called with inode_lock held. | 1516 | * This is called with inode_lock held. |
@@ -1518,8 +1518,8 @@ EXPORT_SYMBOL(inode_wait); | |||
1518 | static void __wait_on_freeing_inode(struct inode *inode) | 1518 | static void __wait_on_freeing_inode(struct inode *inode) |
1519 | { | 1519 | { |
1520 | wait_queue_head_t *wq; | 1520 | wait_queue_head_t *wq; |
1521 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); | 1521 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); |
1522 | wq = bit_waitqueue(&inode->i_state, __I_LOCK); | 1522 | wq = bit_waitqueue(&inode->i_state, __I_NEW); |
1523 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | 1523 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); |
1524 | spin_unlock(&inode_lock); | 1524 | spin_unlock(&inode_lock); |
1525 | schedule(); | 1525 | schedule(); |