aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-03-22 07:23:42 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2011-03-24 21:17:51 -0400
commit67a23c494621ff1d5431c3bc320947865b224625 (patch)
treecce0868fc73031d8b0addc4dc7412d49ee9ec370 /fs
parenta66979abad090b2765a6c6790c9fdeab996833f2 (diff)
fs: rename inode_lock to inode_hash_lock
All that remains of the inode_lock is protecting the inode hash list manipulation and traversals. Rename the inode_lock to inode_hash_lock to reflect it's actual function. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/inode.c111
-rw-r--r--fs/notify/inode_mark.c1
-rw-r--r--fs/notify/mark.c1
-rw-r--r--fs/notify/vfsmount_mark.c1
-rw-r--r--fs/ntfs/inode.c4
5 files changed, 63 insertions, 55 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 239fdc08719e..f9ee4928358f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -39,10 +39,10 @@
39 * sb->s_inodes, inode->i_sb_list 39 * sb->s_inodes, inode->i_sb_list
40 * inode_wb_list_lock protects: 40 * inode_wb_list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
42 * 44 *
43 * Lock ordering: 45 * Lock ordering:
44 * inode_lock
45 * inode->i_lock
46 * 46 *
47 * inode_sb_list_lock 47 * inode_sb_list_lock
48 * inode->i_lock 48 * inode->i_lock
@@ -50,6 +50,13 @@
50 * 50 *
51 * inode_wb_list_lock 51 * inode_wb_list_lock
52 * inode->i_lock 52 * inode->i_lock
53 *
54 * inode_hash_lock
55 * inode_sb_list_lock
56 * inode->i_lock
57 *
58 * iunique_lock
59 * inode_hash_lock
53 */ 60 */
54 61
55/* 62/*
@@ -85,6 +92,8 @@
85 92
86static unsigned int i_hash_mask __read_mostly; 93static unsigned int i_hash_mask __read_mostly;
87static unsigned int i_hash_shift __read_mostly; 94static unsigned int i_hash_shift __read_mostly;
95static struct hlist_head *inode_hashtable __read_mostly;
96static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
88 97
89/* 98/*
90 * Each inode can be on two separate lists. One is 99 * Each inode can be on two separate lists. One is
@@ -100,15 +109,6 @@ static unsigned int i_hash_shift __read_mostly;
100 109
101static LIST_HEAD(inode_lru); 110static LIST_HEAD(inode_lru);
102static DEFINE_SPINLOCK(inode_lru_lock); 111static DEFINE_SPINLOCK(inode_lru_lock);
103static struct hlist_head *inode_hashtable __read_mostly;
104
105/*
106 * A simple spinlock to protect the list manipulations.
107 *
108 * NOTE! You also have to own the lock if you change
109 * the i_state of an inode while it is in use..
110 */
111DEFINE_SPINLOCK(inode_lock);
112 112
113__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 113__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
114__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); 114__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
@@ -433,11 +433,11 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
433{ 433{
434 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 434 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
435 435
436 spin_lock(&inode_lock); 436 spin_lock(&inode_hash_lock);
437 spin_lock(&inode->i_lock); 437 spin_lock(&inode->i_lock);
438 hlist_add_head(&inode->i_hash, b); 438 hlist_add_head(&inode->i_hash, b);
439 spin_unlock(&inode->i_lock); 439 spin_unlock(&inode->i_lock);
440 spin_unlock(&inode_lock); 440 spin_unlock(&inode_hash_lock);
441} 441}
442EXPORT_SYMBOL(__insert_inode_hash); 442EXPORT_SYMBOL(__insert_inode_hash);
443 443
@@ -449,11 +449,11 @@ EXPORT_SYMBOL(__insert_inode_hash);
449 */ 449 */
450void remove_inode_hash(struct inode *inode) 450void remove_inode_hash(struct inode *inode)
451{ 451{
452 spin_lock(&inode_lock); 452 spin_lock(&inode_hash_lock);
453 spin_lock(&inode->i_lock); 453 spin_lock(&inode->i_lock);
454 hlist_del_init(&inode->i_hash); 454 hlist_del_init(&inode->i_hash);
455 spin_unlock(&inode->i_lock); 455 spin_unlock(&inode->i_lock);
456 spin_unlock(&inode_lock); 456 spin_unlock(&inode_hash_lock);
457} 457}
458EXPORT_SYMBOL(remove_inode_hash); 458EXPORT_SYMBOL(remove_inode_hash);
459 459
@@ -778,11 +778,15 @@ static struct inode *find_inode(struct super_block *sb,
778 778
779repeat: 779repeat:
780 hlist_for_each_entry(inode, node, head, i_hash) { 780 hlist_for_each_entry(inode, node, head, i_hash) {
781 if (inode->i_sb != sb) 781 spin_lock(&inode->i_lock);
782 if (inode->i_sb != sb) {
783 spin_unlock(&inode->i_lock);
782 continue; 784 continue;
783 if (!test(inode, data)) 785 }
786 if (!test(inode, data)) {
787 spin_unlock(&inode->i_lock);
784 continue; 788 continue;
785 spin_lock(&inode->i_lock); 789 }
786 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 790 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
787 __wait_on_freeing_inode(inode); 791 __wait_on_freeing_inode(inode);
788 goto repeat; 792 goto repeat;
@@ -806,11 +810,15 @@ static struct inode *find_inode_fast(struct super_block *sb,
806 810
807repeat: 811repeat:
808 hlist_for_each_entry(inode, node, head, i_hash) { 812 hlist_for_each_entry(inode, node, head, i_hash) {
809 if (inode->i_ino != ino) 813 spin_lock(&inode->i_lock);
814 if (inode->i_ino != ino) {
815 spin_unlock(&inode->i_lock);
810 continue; 816 continue;
811 if (inode->i_sb != sb) 817 }
818 if (inode->i_sb != sb) {
819 spin_unlock(&inode->i_lock);
812 continue; 820 continue;
813 spin_lock(&inode->i_lock); 821 }
814 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 822 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
815 __wait_on_freeing_inode(inode); 823 __wait_on_freeing_inode(inode);
816 goto repeat; 824 goto repeat;
@@ -924,7 +932,7 @@ void unlock_new_inode(struct inode *inode)
924EXPORT_SYMBOL(unlock_new_inode); 932EXPORT_SYMBOL(unlock_new_inode);
925 933
926/* 934/*
927 * This is called without the inode lock held.. Be careful. 935 * This is called without the inode hash lock held.. Be careful.
928 * 936 *
929 * We no longer cache the sb_flags in i_flags - see fs.h 937 * We no longer cache the sb_flags in i_flags - see fs.h
930 * -- rmk@arm.uk.linux.org 938 * -- rmk@arm.uk.linux.org
@@ -941,7 +949,7 @@ static struct inode *get_new_inode(struct super_block *sb,
941 if (inode) { 949 if (inode) {
942 struct inode *old; 950 struct inode *old;
943 951
944 spin_lock(&inode_lock); 952 spin_lock(&inode_hash_lock);
945 /* We released the lock, so.. */ 953 /* We released the lock, so.. */
946 old = find_inode(sb, head, test, data); 954 old = find_inode(sb, head, test, data);
947 if (!old) { 955 if (!old) {
@@ -953,7 +961,7 @@ static struct inode *get_new_inode(struct super_block *sb,
953 hlist_add_head(&inode->i_hash, head); 961 hlist_add_head(&inode->i_hash, head);
954 spin_unlock(&inode->i_lock); 962 spin_unlock(&inode->i_lock);
955 inode_sb_list_add(inode); 963 inode_sb_list_add(inode);
956 spin_unlock(&inode_lock); 964 spin_unlock(&inode_hash_lock);
957 965
958 /* Return the locked inode with I_NEW set, the 966 /* Return the locked inode with I_NEW set, the
959 * caller is responsible for filling in the contents 967 * caller is responsible for filling in the contents
@@ -966,7 +974,7 @@ static struct inode *get_new_inode(struct super_block *sb,
966 * us. Use the old inode instead of the one we just 974 * us. Use the old inode instead of the one we just
967 * allocated. 975 * allocated.
968 */ 976 */
969 spin_unlock(&inode_lock); 977 spin_unlock(&inode_hash_lock);
970 destroy_inode(inode); 978 destroy_inode(inode);
971 inode = old; 979 inode = old;
972 wait_on_inode(inode); 980 wait_on_inode(inode);
@@ -974,7 +982,7 @@ static struct inode *get_new_inode(struct super_block *sb,
974 return inode; 982 return inode;
975 983
976set_failed: 984set_failed:
977 spin_unlock(&inode_lock); 985 spin_unlock(&inode_hash_lock);
978 destroy_inode(inode); 986 destroy_inode(inode);
979 return NULL; 987 return NULL;
980} 988}
@@ -992,7 +1000,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
992 if (inode) { 1000 if (inode) {
993 struct inode *old; 1001 struct inode *old;
994 1002
995 spin_lock(&inode_lock); 1003 spin_lock(&inode_hash_lock);
996 /* We released the lock, so.. */ 1004 /* We released the lock, so.. */
997 old = find_inode_fast(sb, head, ino); 1005 old = find_inode_fast(sb, head, ino);
998 if (!old) { 1006 if (!old) {
@@ -1002,7 +1010,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
1002 hlist_add_head(&inode->i_hash, head); 1010 hlist_add_head(&inode->i_hash, head);
1003 spin_unlock(&inode->i_lock); 1011 spin_unlock(&inode->i_lock);
1004 inode_sb_list_add(inode); 1012 inode_sb_list_add(inode);
1005 spin_unlock(&inode_lock); 1013 spin_unlock(&inode_hash_lock);
1006 1014
1007 /* Return the locked inode with I_NEW set, the 1015 /* Return the locked inode with I_NEW set, the
1008 * caller is responsible for filling in the contents 1016 * caller is responsible for filling in the contents
@@ -1015,7 +1023,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
1015 * us. Use the old inode instead of the one we just 1023 * us. Use the old inode instead of the one we just
1016 * allocated. 1024 * allocated.
1017 */ 1025 */
1018 spin_unlock(&inode_lock); 1026 spin_unlock(&inode_hash_lock);
1019 destroy_inode(inode); 1027 destroy_inode(inode);
1020 inode = old; 1028 inode = old;
1021 wait_on_inode(inode); 1029 wait_on_inode(inode);
@@ -1036,10 +1044,14 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1036 struct hlist_node *node; 1044 struct hlist_node *node;
1037 struct inode *inode; 1045 struct inode *inode;
1038 1046
1047 spin_lock(&inode_hash_lock);
1039 hlist_for_each_entry(inode, node, b, i_hash) { 1048 hlist_for_each_entry(inode, node, b, i_hash) {
1040 if (inode->i_ino == ino && inode->i_sb == sb) 1049 if (inode->i_ino == ino && inode->i_sb == sb) {
1050 spin_unlock(&inode_hash_lock);
1041 return 0; 1051 return 0;
1052 }
1042 } 1053 }
1054 spin_unlock(&inode_hash_lock);
1043 1055
1044 return 1; 1056 return 1;
1045} 1057}
@@ -1069,7 +1081,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
1069 static unsigned int counter; 1081 static unsigned int counter;
1070 ino_t res; 1082 ino_t res;
1071 1083
1072 spin_lock(&inode_lock);
1073 spin_lock(&iunique_lock); 1084 spin_lock(&iunique_lock);
1074 do { 1085 do {
1075 if (counter <= max_reserved) 1086 if (counter <= max_reserved)
@@ -1077,7 +1088,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
1077 res = counter++; 1088 res = counter++;
1078 } while (!test_inode_iunique(sb, res)); 1089 } while (!test_inode_iunique(sb, res));
1079 spin_unlock(&iunique_lock); 1090 spin_unlock(&iunique_lock);
1080 spin_unlock(&inode_lock);
1081 1091
1082 return res; 1092 return res;
1083} 1093}
@@ -1119,7 +1129,7 @@ EXPORT_SYMBOL(igrab);
1119 * 1129 *
1120 * Otherwise NULL is returned. 1130 * Otherwise NULL is returned.
1121 * 1131 *
1122 * Note, @test is called with the inode_lock held, so can't sleep. 1132 * Note, @test is called with the inode_hash_lock held, so can't sleep.
1123 */ 1133 */
1124static struct inode *ifind(struct super_block *sb, 1134static struct inode *ifind(struct super_block *sb,
1125 struct hlist_head *head, int (*test)(struct inode *, void *), 1135 struct hlist_head *head, int (*test)(struct inode *, void *),
@@ -1127,15 +1137,15 @@ static struct inode *ifind(struct super_block *sb,
1127{ 1137{
1128 struct inode *inode; 1138 struct inode *inode;
1129 1139
1130 spin_lock(&inode_lock); 1140 spin_lock(&inode_hash_lock);
1131 inode = find_inode(sb, head, test, data); 1141 inode = find_inode(sb, head, test, data);
1132 if (inode) { 1142 if (inode) {
1133 spin_unlock(&inode_lock); 1143 spin_unlock(&inode_hash_lock);
1134 if (likely(wait)) 1144 if (likely(wait))
1135 wait_on_inode(inode); 1145 wait_on_inode(inode);
1136 return inode; 1146 return inode;
1137 } 1147 }
1138 spin_unlock(&inode_lock); 1148 spin_unlock(&inode_hash_lock);
1139 return NULL; 1149 return NULL;
1140} 1150}
1141 1151
@@ -1159,14 +1169,14 @@ static struct inode *ifind_fast(struct super_block *sb,
1159{ 1169{
1160 struct inode *inode; 1170 struct inode *inode;
1161 1171
1162 spin_lock(&inode_lock); 1172 spin_lock(&inode_hash_lock);
1163 inode = find_inode_fast(sb, head, ino); 1173 inode = find_inode_fast(sb, head, ino);
1164 if (inode) { 1174 if (inode) {
1165 spin_unlock(&inode_lock); 1175 spin_unlock(&inode_hash_lock);
1166 wait_on_inode(inode); 1176 wait_on_inode(inode);
1167 return inode; 1177 return inode;
1168 } 1178 }
1169 spin_unlock(&inode_lock); 1179 spin_unlock(&inode_hash_lock);
1170 return NULL; 1180 return NULL;
1171} 1181}
1172 1182
@@ -1189,7 +1199,7 @@ static struct inode *ifind_fast(struct super_block *sb,
1189 * 1199 *
1190 * Otherwise NULL is returned. 1200 * Otherwise NULL is returned.
1191 * 1201 *
1192 * Note, @test is called with the inode_lock held, so can't sleep. 1202 * Note, @test is called with the inode_hash_lock held, so can't sleep.
1193 */ 1203 */
1194struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1204struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1195 int (*test)(struct inode *, void *), void *data) 1205 int (*test)(struct inode *, void *), void *data)
@@ -1217,7 +1227,7 @@ EXPORT_SYMBOL(ilookup5_nowait);
1217 * 1227 *
1218 * Otherwise NULL is returned. 1228 * Otherwise NULL is returned.
1219 * 1229 *
1220 * Note, @test is called with the inode_lock held, so can't sleep. 1230 * Note, @test is called with the inode_hash_lock held, so can't sleep.
1221 */ 1231 */
1222struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1232struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1223 int (*test)(struct inode *, void *), void *data) 1233 int (*test)(struct inode *, void *), void *data)
@@ -1268,7 +1278,8 @@ EXPORT_SYMBOL(ilookup);
1268 * inode and this is returned locked, hashed, and with the I_NEW flag set. The 1278 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
1269 * file system gets to fill it in before unlocking it via unlock_new_inode(). 1279 * file system gets to fill it in before unlocking it via unlock_new_inode().
1270 * 1280 *
1271 * Note both @test and @set are called with the inode_lock held, so can't sleep. 1281 * Note both @test and @set are called with the inode_hash_lock held, so can't
1282 * sleep.
1272 */ 1283 */
1273struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1284struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1274 int (*test)(struct inode *, void *), 1285 int (*test)(struct inode *, void *),
@@ -1328,7 +1339,7 @@ int insert_inode_locked(struct inode *inode)
1328 while (1) { 1339 while (1) {
1329 struct hlist_node *node; 1340 struct hlist_node *node;
1330 struct inode *old = NULL; 1341 struct inode *old = NULL;
1331 spin_lock(&inode_lock); 1342 spin_lock(&inode_hash_lock);
1332 hlist_for_each_entry(old, node, head, i_hash) { 1343 hlist_for_each_entry(old, node, head, i_hash) {
1333 if (old->i_ino != ino) 1344 if (old->i_ino != ino)
1334 continue; 1345 continue;
@@ -1346,12 +1357,12 @@ int insert_inode_locked(struct inode *inode)
1346 inode->i_state |= I_NEW; 1357 inode->i_state |= I_NEW;
1347 hlist_add_head(&inode->i_hash, head); 1358 hlist_add_head(&inode->i_hash, head);
1348 spin_unlock(&inode->i_lock); 1359 spin_unlock(&inode->i_lock);
1349 spin_unlock(&inode_lock); 1360 spin_unlock(&inode_hash_lock);
1350 return 0; 1361 return 0;
1351 } 1362 }
1352 __iget(old); 1363 __iget(old);
1353 spin_unlock(&old->i_lock); 1364 spin_unlock(&old->i_lock);
1354 spin_unlock(&inode_lock); 1365 spin_unlock(&inode_hash_lock);
1355 wait_on_inode(old); 1366 wait_on_inode(old);
1356 if (unlikely(!inode_unhashed(old))) { 1367 if (unlikely(!inode_unhashed(old))) {
1357 iput(old); 1368 iput(old);
@@ -1372,7 +1383,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1372 struct hlist_node *node; 1383 struct hlist_node *node;
1373 struct inode *old = NULL; 1384 struct inode *old = NULL;
1374 1385
1375 spin_lock(&inode_lock); 1386 spin_lock(&inode_hash_lock);
1376 hlist_for_each_entry(old, node, head, i_hash) { 1387 hlist_for_each_entry(old, node, head, i_hash) {
1377 if (old->i_sb != sb) 1388 if (old->i_sb != sb)
1378 continue; 1389 continue;
@@ -1390,12 +1401,12 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1390 inode->i_state |= I_NEW; 1401 inode->i_state |= I_NEW;
1391 hlist_add_head(&inode->i_hash, head); 1402 hlist_add_head(&inode->i_hash, head);
1392 spin_unlock(&inode->i_lock); 1403 spin_unlock(&inode->i_lock);
1393 spin_unlock(&inode_lock); 1404 spin_unlock(&inode_hash_lock);
1394 return 0; 1405 return 0;
1395 } 1406 }
1396 __iget(old); 1407 __iget(old);
1397 spin_unlock(&old->i_lock); 1408 spin_unlock(&old->i_lock);
1398 spin_unlock(&inode_lock); 1409 spin_unlock(&inode_hash_lock);
1399 wait_on_inode(old); 1410 wait_on_inode(old);
1400 if (unlikely(!inode_unhashed(old))) { 1411 if (unlikely(!inode_unhashed(old))) {
1401 iput(old); 1412 iput(old);
@@ -1674,10 +1685,10 @@ static void __wait_on_freeing_inode(struct inode *inode)
1674 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1685 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1675 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1686 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1676 spin_unlock(&inode->i_lock); 1687 spin_unlock(&inode->i_lock);
1677 spin_unlock(&inode_lock); 1688 spin_unlock(&inode_hash_lock);
1678 schedule(); 1689 schedule();
1679 finish_wait(wq, &wait.wait); 1690 finish_wait(wq, &wait.wait);
1680 spin_lock(&inode_lock); 1691 spin_lock(&inode_hash_lock);
1681} 1692}
1682 1693
1683static __initdata unsigned long ihash_entries; 1694static __initdata unsigned long ihash_entries;
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index fb3b3c5ef0ee..07ea8d3e6ea2 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -22,7 +22,6 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/writeback.h> /* for inode_lock */
26 25
27#include <asm/atomic.h> 26#include <asm/atomic.h>
28 27
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 325185e514bb..50c00856f730 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,6 @@
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include <linux/spinlock.h> 92#include <linux/spinlock.h>
93#include <linux/srcu.h> 93#include <linux/srcu.h>
94#include <linux/writeback.h> /* for inode_lock */
95 94
96#include <asm/atomic.h> 95#include <asm/atomic.h>
97 96
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 85eebff6d0d7..e86577d6c5c3 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -23,7 +23,6 @@
23#include <linux/mount.h> 23#include <linux/mount.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/writeback.h> /* for inode_lock */
27 26
28#include <asm/atomic.h> 27#include <asm/atomic.h>
29 28
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index a627ed82c0a3..0b56c6b7ec01 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -54,7 +54,7 @@
54 * 54 *
55 * Return 1 if the attributes match and 0 if not. 55 * Return 1 if the attributes match and 0 if not.
56 * 56 *
57 * NOTE: This function runs with the inode_lock spin lock held so it is not 57 * NOTE: This function runs with the inode->i_lock spin lock held so it is not
58 * allowed to sleep. 58 * allowed to sleep.
59 */ 59 */
60int ntfs_test_inode(struct inode *vi, ntfs_attr *na) 60int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
@@ -98,7 +98,7 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
98 * 98 *
99 * Return 0 on success and -errno on error. 99 * Return 0 on success and -errno on error.
100 * 100 *
101 * NOTE: This function runs with the inode_lock spin lock held so it is not 101 * NOTE: This function runs with the inode->i_lock spin lock held so it is not
102 * allowed to sleep. (Hence the GFP_ATOMIC allocation.) 102 * allowed to sleep. (Hence the GFP_ATOMIC allocation.)
103 */ 103 */
104static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na) 104static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)