aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2009-12-17 08:25:01 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2009-12-17 11:03:25 -0500
commiteaff8079d4f1016a12e34ab323737314f24127dd (patch)
treea3d9e00320c6195e55811d5247a521f99341a411
parent7a0ad10c367ab57c899d340372f37880cbe6ab52 (diff)
kill I_LOCK
After I_SYNC was split from I_LOCK the leftover is always used together with I_NEW and thus superflous. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/inode.c26
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/ntfs/inode.c6
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/xfs_iget.c4
-rw-r--r--include/linux/fs.h36
-rw-r--r--include/linux/writeback.h3
9 files changed, 39 insertions, 44 deletions
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 3ff32fa793da..6e220f4eee7d 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -125,7 +125,7 @@ static struct inode *gfs2_iget_skip(struct super_block *sb,
125 * directory entry when gfs2_inode_lookup() is invoked. Part of the code 125 * directory entry when gfs2_inode_lookup() is invoked. Part of the code
126 * segment inside gfs2_inode_lookup code needs to get moved around. 126 * segment inside gfs2_inode_lookup code needs to get moved around.
127 * 127 *
128 * Clean up I_LOCK and I_NEW as well. 128 * Clears I_NEW as well.
129 **/ 129 **/
130 130
131void gfs2_set_iop(struct inode *inode) 131void gfs2_set_iop(struct inode *inode)
diff --git a/fs/inode.c b/fs/inode.c
index 06c1f02de611..03dfeb2e3928 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -113,7 +113,7 @@ static void wake_up_inode(struct inode *inode)
113 * Prevent speculative execution through spin_unlock(&inode_lock); 113 * Prevent speculative execution through spin_unlock(&inode_lock);
114 */ 114 */
115 smp_mb(); 115 smp_mb();
116 wake_up_bit(&inode->i_state, __I_LOCK); 116 wake_up_bit(&inode->i_state, __I_NEW);
117} 117}
118 118
119/** 119/**
@@ -690,17 +690,17 @@ void unlock_new_inode(struct inode *inode)
690 } 690 }
691#endif 691#endif
692 /* 692 /*
693 * This is special! We do not need the spinlock when clearing I_LOCK, 693 * This is special! We do not need the spinlock when clearing I_NEW,
694 * because we're guaranteed that nobody else tries to do anything about 694 * because we're guaranteed that nobody else tries to do anything about
695 * the state of the inode when it is locked, as we just created it (so 695 * the state of the inode when it is locked, as we just created it (so
696 * there can be no old holders that haven't tested I_LOCK). 696 * there can be no old holders that haven't tested I_NEW).
697 * However we must emit the memory barrier so that other CPUs reliably 697 * However we must emit the memory barrier so that other CPUs reliably
698 * see the clearing of I_LOCK after the other inode initialisation has 698 * see the clearing of I_NEW after the other inode initialisation has
699 * completed. 699 * completed.
700 */ 700 */
701 smp_mb(); 701 smp_mb();
702 WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); 702 WARN_ON(!(inode->i_state & I_NEW));
703 inode->i_state &= ~(I_LOCK|I_NEW); 703 inode->i_state &= ~I_NEW;
704 wake_up_inode(inode); 704 wake_up_inode(inode);
705} 705}
706EXPORT_SYMBOL(unlock_new_inode); 706EXPORT_SYMBOL(unlock_new_inode);
@@ -731,7 +731,7 @@ static struct inode *get_new_inode(struct super_block *sb,
731 goto set_failed; 731 goto set_failed;
732 732
733 __inode_add_to_lists(sb, head, inode); 733 __inode_add_to_lists(sb, head, inode);
734 inode->i_state = I_LOCK|I_NEW; 734 inode->i_state = I_NEW;
735 spin_unlock(&inode_lock); 735 spin_unlock(&inode_lock);
736 736
737 /* Return the locked inode with I_NEW set, the 737 /* Return the locked inode with I_NEW set, the
@@ -778,7 +778,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
778 if (!old) { 778 if (!old) {
779 inode->i_ino = ino; 779 inode->i_ino = ino;
780 __inode_add_to_lists(sb, head, inode); 780 __inode_add_to_lists(sb, head, inode);
781 inode->i_state = I_LOCK|I_NEW; 781 inode->i_state = I_NEW;
782 spin_unlock(&inode_lock); 782 spin_unlock(&inode_lock);
783 783
784 /* Return the locked inode with I_NEW set, the 784 /* Return the locked inode with I_NEW set, the
@@ -1083,7 +1083,7 @@ int insert_inode_locked(struct inode *inode)
1083 ino_t ino = inode->i_ino; 1083 ino_t ino = inode->i_ino;
1084 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1084 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1085 1085
1086 inode->i_state |= I_LOCK|I_NEW; 1086 inode->i_state |= I_NEW;
1087 while (1) { 1087 while (1) {
1088 struct hlist_node *node; 1088 struct hlist_node *node;
1089 struct inode *old = NULL; 1089 struct inode *old = NULL;
@@ -1120,7 +1120,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1120 struct super_block *sb = inode->i_sb; 1120 struct super_block *sb = inode->i_sb;
1121 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1121 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1122 1122
1123 inode->i_state |= I_LOCK|I_NEW; 1123 inode->i_state |= I_NEW;
1124 1124
1125 while (1) { 1125 while (1) {
1126 struct hlist_node *node; 1126 struct hlist_node *node;
@@ -1510,7 +1510,7 @@ EXPORT_SYMBOL(inode_wait);
1510 * until the deletion _might_ have completed. Callers are responsible 1510 * until the deletion _might_ have completed. Callers are responsible
1511 * to recheck inode state. 1511 * to recheck inode state.
1512 * 1512 *
1513 * It doesn't matter if I_LOCK is not set initially, a call to 1513 * It doesn't matter if I_NEW is not set initially, a call to
1514 * wake_up_inode() after removing from the hash list will DTRT. 1514 * wake_up_inode() after removing from the hash list will DTRT.
1515 * 1515 *
1516 * This is called with inode_lock held. 1516 * This is called with inode_lock held.
@@ -1518,8 +1518,8 @@ EXPORT_SYMBOL(inode_wait);
1518static void __wait_on_freeing_inode(struct inode *inode) 1518static void __wait_on_freeing_inode(struct inode *inode)
1519{ 1519{
1520 wait_queue_head_t *wq; 1520 wait_queue_head_t *wq;
1521 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); 1521 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1522 wq = bit_waitqueue(&inode->i_state, __I_LOCK); 1522 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1523 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1523 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1524 spin_unlock(&inode_lock); 1524 spin_unlock(&inode_lock);
1525 schedule(); 1525 schedule();
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index f26e4d03ada5..d945ea76b445 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1292,7 +1292,7 @@ int txCommit(tid_t tid, /* transaction identifier */
1292 */ 1292 */
1293 /* 1293 /*
1294 * I believe this code is no longer needed. Splitting I_LOCK 1294 * I believe this code is no longer needed. Splitting I_LOCK
1295 * into two bits, I_LOCK and I_SYNC should prevent this 1295 * into two bits, I_NEW and I_SYNC should prevent this
1296 * deadlock as well. But since I don't have a JFS testload 1296 * deadlock as well. But since I don't have a JFS testload
1297 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done. 1297 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
1298 * Joern 1298 * Joern
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 9938034762cc..dc2505abb6d7 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -530,7 +530,7 @@ err_corrupt_attr:
530 * the ntfs inode. 530 * the ntfs inode.
531 * 531 *
532 * Q: What locks are held when the function is called? 532 * Q: What locks are held when the function is called?
533 * A: i_state has I_LOCK set, hence the inode is locked, also 533 * A: i_state has I_NEW set, hence the inode is locked, also
534 * i_count is set to 1, so it is not going to go away 534 * i_count is set to 1, so it is not going to go away
535 * i_flags is set to 0 and we have no business touching it. Only an ioctl() 535 * i_flags is set to 0 and we have no business touching it. Only an ioctl()
536 * is allowed to write to them. We should of course be honouring them but 536 * is allowed to write to them. We should of course be honouring them but
@@ -1207,7 +1207,7 @@ err_out:
1207 * necessary fields in @vi as well as initializing the ntfs inode. 1207 * necessary fields in @vi as well as initializing the ntfs inode.
1208 * 1208 *
1209 * Q: What locks are held when the function is called? 1209 * Q: What locks are held when the function is called?
1210 * A: i_state has I_LOCK set, hence the inode is locked, also 1210 * A: i_state has I_NEW set, hence the inode is locked, also
1211 * i_count is set to 1, so it is not going to go away 1211 * i_count is set to 1, so it is not going to go away
1212 * 1212 *
1213 * Return 0 on success and -errno on error. In the error case, the inode will 1213 * Return 0 on success and -errno on error. In the error case, the inode will
@@ -1474,7 +1474,7 @@ err_out:
1474 * normal directory inodes. 1474 * normal directory inodes.
1475 * 1475 *
1476 * Q: What locks are held when the function is called? 1476 * Q: What locks are held when the function is called?
1477 * A: i_state has I_LOCK set, hence the inode is locked, also 1477 * A: i_state has I_NEW set, hence the inode is locked, also
1478 * i_count is set to 1, so it is not going to go away 1478 * i_count is set to 1, so it is not going to go away
1479 * 1479 *
1480 * Return 0 on success and -errno on error. In the error case, the inode will 1480 * Return 0 on success and -errno on error. In the error case, the inode will
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 39849f887e72..16a6444330ec 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -45,7 +45,7 @@
45 * 45 *
46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the 46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> 47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
48 * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not 48 * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
49 * set as well. However, UBIFS disables readahead. 49 * set as well. However, UBIFS disables readahead.
50 */ 50 */
51 51
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 1d5b298ba8b2..225946012d0b 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -794,7 +794,7 @@ xfs_setup_inode(
794 struct inode *inode = &ip->i_vnode; 794 struct inode *inode = &ip->i_vnode;
795 795
796 inode->i_ino = ip->i_ino; 796 inode->i_ino = ip->i_ino;
797 inode->i_state = I_NEW|I_LOCK; 797 inode->i_state = I_NEW;
798 inode_add_to_lists(ip->i_mount->m_super, inode); 798 inode_add_to_lists(ip->i_mount->m_super, inode);
799 799
800 inode->i_mode = ip->i_d.di_mode; 800 inode->i_mode = ip->i_d.di_mode;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 0de36c2a46f1..fa402a6bbbcf 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -91,7 +91,7 @@ xfs_inode_alloc(
91 ip->i_new_size = 0; 91 ip->i_new_size = 0;
92 92
93 /* prevent anyone from using this yet */ 93 /* prevent anyone from using this yet */
94 VFS_I(ip)->i_state = I_NEW|I_LOCK; 94 VFS_I(ip)->i_state = I_NEW;
95 95
96 return ip; 96 return ip;
97} 97}
@@ -217,7 +217,7 @@ xfs_iget_cache_hit(
217 trace_xfs_iget_reclaim(ip); 217 trace_xfs_iget_reclaim(ip);
218 goto out_error; 218 goto out_error;
219 } 219 }
220 inode->i_state = I_LOCK|I_NEW; 220 inode->i_state = I_NEW;
221 } else { 221 } else {
222 /* If the VFS inode is being torn down, pause and try again. */ 222 /* If the VFS inode is being torn down, pause and try again. */
223 if (!igrab(inode)) { 223 if (!igrab(inode)) {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 77a975089d9a..cca191933ff6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1587,7 +1587,7 @@ struct super_operations {
1587 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at 1587 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
1588 * various stages of removing an inode. 1588 * various stages of removing an inode.
1589 * 1589 *
1590 * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. 1590 * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
1591 * 1591 *
1592 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on 1592 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
1593 * fdatasync(). i_atime is the usual cause. 1593 * fdatasync(). i_atime is the usual cause.
@@ -1596,8 +1596,14 @@ struct super_operations {
1596 * don't have to write inode on fdatasync() when only 1596 * don't have to write inode on fdatasync() when only
1597 * mtime has changed in it. 1597 * mtime has changed in it.
1598 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. 1598 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
1599 * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both 1599 * I_NEW Serves as both a mutex and completion notification.
1600 * are cleared by unlock_new_inode(), called from iget(). 1600 * New inodes set I_NEW. If two processes both create
1601 * the same inode, one of them will release its inode and
1602 * wait for I_NEW to be released before returning.
1603 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1604 * also cause waiting on I_NEW, without I_NEW actually
1605 * being set. find_inode() uses this to prevent returning
1606 * nearly-dead inodes.
1601 * I_WILL_FREE Must be set when calling write_inode_now() if i_count 1607 * I_WILL_FREE Must be set when calling write_inode_now() if i_count
1602 * is zero. I_FREEING must be set when I_WILL_FREE is 1608 * is zero. I_FREEING must be set when I_WILL_FREE is
1603 * cleared. 1609 * cleared.
@@ -1611,20 +1617,11 @@ struct super_operations {
1611 * prohibited for many purposes. iget() must wait for 1617 * prohibited for many purposes. iget() must wait for
1612 * the inode to be completely released, then create it 1618 * the inode to be completely released, then create it
1613 * anew. Other functions will just ignore such inodes, 1619 * anew. Other functions will just ignore such inodes,
1614 * if appropriate. I_LOCK is used for waiting. 1620 * if appropriate. I_NEW is used for waiting.
1615 * 1621 *
1616 * I_LOCK Serves as both a mutex and completion notification. 1622 * I_SYNC Synchonized write of dirty inode data. The bits is
1617 * New inodes set I_LOCK. If two processes both create 1623 * set during data writeback, and cleared with a wakeup
1618 * the same inode, one of them will release its inode and 1624 * on the bit address once it is done.
1619 * wait for I_LOCK to be released before returning.
1620 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1621 * also cause waiting on I_LOCK, without I_LOCK actually
1622 * being set. find_inode() uses this to prevent returning
1623 * nearly-dead inodes.
1624 * I_SYNC Similar to I_LOCK, but limited in scope to writeback
1625 * of inode dirty data. Having a separate lock for this
1626 * purpose reduces latency and prevents some filesystem-
1627 * specific deadlocks.
1628 * 1625 *
1629 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1626 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1630 * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on 1627 * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on
@@ -1633,13 +1630,12 @@ struct super_operations {
1633#define I_DIRTY_SYNC 1 1630#define I_DIRTY_SYNC 1
1634#define I_DIRTY_DATASYNC 2 1631#define I_DIRTY_DATASYNC 2
1635#define I_DIRTY_PAGES 4 1632#define I_DIRTY_PAGES 4
1636#define I_NEW 8 1633#define __I_NEW 3
1634#define I_NEW (1 << __I_NEW)
1637#define I_WILL_FREE 16 1635#define I_WILL_FREE 16
1638#define I_FREEING 32 1636#define I_FREEING 32
1639#define I_CLEAR 64 1637#define I_CLEAR 64
1640#define __I_LOCK 7 1638#define __I_SYNC 7
1641#define I_LOCK (1 << __I_LOCK)
1642#define __I_SYNC 8
1643#define I_SYNC (1 << __I_SYNC) 1639#define I_SYNC (1 << __I_SYNC)
1644 1640
1645#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1641#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 705f01fe413a..c18c008f4bbf 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,8 +79,7 @@ void wakeup_flusher_threads(long nr_pages);
79static inline void wait_on_inode(struct inode *inode) 79static inline void wait_on_inode(struct inode *inode)
80{ 80{
81 might_sleep(); 81 might_sleep();
82 wait_on_bit(&inode->i_state, __I_LOCK, inode_wait, 82 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
83 TASK_UNINTERRUPTIBLE);
84} 83}
85static inline void inode_sync_wait(struct inode *inode) 84static inline void inode_sync_wait(struct inode *inode)
86{ 85{