aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/affs/amigaffs.c3
-rw-r--r--fs/aio.c3
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/dcache.c9
-rw-r--r--fs/dlm/lowcomms.c11
-rw-r--r--fs/ecryptfs/messaging.c6
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/fat/inode.c3
-rw-r--r--fs/fat/nfs.c3
-rw-r--r--fs/fscache/cookie.c11
-rw-r--r--fs/inode.c19
-rw-r--r--fs/lockd/host.c29
-rw-r--r--fs/lockd/svcsubs.c7
-rw-r--r--fs/nfs/pnfs_dev.c9
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/notify/fsnotify.c3
-rw-r--r--fs/notify/inode_mark.c19
-rw-r--r--fs/notify/vfsmount_mark.c19
-rw-r--r--fs/ocfs2/dcache.c3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c6
-rw-r--r--fs/super.c6
-rw-r--r--fs/sysfs/bin.c3
-rw-r--r--fs/xfs/xfs_log_recover.c3
23 files changed, 69 insertions, 115 deletions
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index eb82ee53ee0b..d9a43674cb94 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -125,9 +125,8 @@ static void
125affs_fix_dcache(struct inode *inode, u32 entry_ino) 125affs_fix_dcache(struct inode *inode, u32 entry_ino)
126{ 126{
127 struct dentry *dentry; 127 struct dentry *dentry;
128 struct hlist_node *p;
129 spin_lock(&inode->i_lock); 128 spin_lock(&inode->i_lock);
130 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 129 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
131 if (entry_ino == (u32)(long)dentry->d_fsdata) { 130 if (entry_ino == (u32)(long)dentry->d_fsdata) {
132 dentry->d_fsdata = (void *)inode->i_ino; 131 dentry->d_fsdata = (void *)inode->i_ino;
133 break; 132 break;
diff --git a/fs/aio.c b/fs/aio.c
index 064bfbe37566..3f941f2a3059 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -591,11 +591,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
591{ 591{
592 struct mm_struct *mm = current->mm; 592 struct mm_struct *mm = current->mm;
593 struct kioctx *ctx, *ret = NULL; 593 struct kioctx *ctx, *ret = NULL;
594 struct hlist_node *n;
595 594
596 rcu_read_lock(); 595 rcu_read_lock();
597 596
598 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 597 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
599 /* 598 /*
600 * RCU protects us against accessing freed memory but 599 * RCU protects us against accessing freed memory but
601 * we have to be careful not to get a reference when the 600 * we have to be careful not to get a reference when the
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d2a833999bcc..83f2606c76d0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -816,10 +816,9 @@ static bool
816inode_has_hashed_dentries(struct inode *inode) 816inode_has_hashed_dentries(struct inode *inode)
817{ 817{
818 struct dentry *dentry; 818 struct dentry *dentry;
819 struct hlist_node *p;
820 819
821 spin_lock(&inode->i_lock); 820 spin_lock(&inode->i_lock);
822 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 821 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
823 if (!d_unhashed(dentry) || IS_ROOT(dentry)) { 822 if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
824 spin_unlock(&inode->i_lock); 823 spin_unlock(&inode->i_lock);
825 return true; 824 return true;
diff --git a/fs/dcache.c b/fs/dcache.c
index 68220dd0c135..fbfae008ba44 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -675,11 +675,10 @@ EXPORT_SYMBOL(dget_parent);
675static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 675static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
676{ 676{
677 struct dentry *alias, *discon_alias; 677 struct dentry *alias, *discon_alias;
678 struct hlist_node *p;
679 678
680again: 679again:
681 discon_alias = NULL; 680 discon_alias = NULL;
682 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 681 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
683 spin_lock(&alias->d_lock); 682 spin_lock(&alias->d_lock);
684 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 683 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
685 if (IS_ROOT(alias) && 684 if (IS_ROOT(alias) &&
@@ -730,10 +729,9 @@ EXPORT_SYMBOL(d_find_alias);
730void d_prune_aliases(struct inode *inode) 729void d_prune_aliases(struct inode *inode)
731{ 730{
732 struct dentry *dentry; 731 struct dentry *dentry;
733 struct hlist_node *p;
734restart: 732restart:
735 spin_lock(&inode->i_lock); 733 spin_lock(&inode->i_lock);
736 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 734 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
737 spin_lock(&dentry->d_lock); 735 spin_lock(&dentry->d_lock);
738 if (!dentry->d_count) { 736 if (!dentry->d_count) {
739 __dget_dlock(dentry); 737 __dget_dlock(dentry);
@@ -1443,14 +1441,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
1443 int len = entry->d_name.len; 1441 int len = entry->d_name.len;
1444 const char *name = entry->d_name.name; 1442 const char *name = entry->d_name.name;
1445 unsigned int hash = entry->d_name.hash; 1443 unsigned int hash = entry->d_name.hash;
1446 struct hlist_node *p;
1447 1444
1448 if (!inode) { 1445 if (!inode) {
1449 __d_instantiate(entry, NULL); 1446 __d_instantiate(entry, NULL);
1450 return NULL; 1447 return NULL;
1451 } 1448 }
1452 1449
1453 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 1450 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1454 /* 1451 /*
1455 * Don't need alias->d_lock here, because aliases with 1452 * Don't need alias->d_lock here, because aliases with
1456 * d_parent == entry->d_parent are not subject to name or 1453 * d_parent == entry->d_parent are not subject to name or
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index dd87a31bcc21..4f5ad246582f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -177,12 +177,11 @@ static inline int nodeid_hash(int nodeid)
177static struct connection *__find_con(int nodeid) 177static struct connection *__find_con(int nodeid)
178{ 178{
179 int r; 179 int r;
180 struct hlist_node *h;
181 struct connection *con; 180 struct connection *con;
182 181
183 r = nodeid_hash(nodeid); 182 r = nodeid_hash(nodeid);
184 183
185 hlist_for_each_entry(con, h, &connection_hash[r], list) { 184 hlist_for_each_entry(con, &connection_hash[r], list) {
186 if (con->nodeid == nodeid) 185 if (con->nodeid == nodeid)
187 return con; 186 return con;
188 } 187 }
@@ -232,13 +231,12 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
232static void foreach_conn(void (*conn_func)(struct connection *c)) 231static void foreach_conn(void (*conn_func)(struct connection *c))
233{ 232{
234 int i; 233 int i;
235 struct hlist_node *h, *n; 234 struct hlist_node *n;
236 struct connection *con; 235 struct connection *con;
237 236
238 for (i = 0; i < CONN_HASH_SIZE; i++) { 237 for (i = 0; i < CONN_HASH_SIZE; i++) {
239 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){ 238 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
240 conn_func(con); 239 conn_func(con);
241 }
242 } 240 }
243} 241}
244 242
@@ -257,13 +255,12 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
257static struct connection *assoc2con(int assoc_id) 255static struct connection *assoc2con(int assoc_id)
258{ 256{
259 int i; 257 int i;
260 struct hlist_node *h;
261 struct connection *con; 258 struct connection *con;
262 259
263 mutex_lock(&connections_lock); 260 mutex_lock(&connections_lock);
264 261
265 for (i = 0 ; i < CONN_HASH_SIZE; i++) { 262 for (i = 0 ; i < CONN_HASH_SIZE; i++) {
266 hlist_for_each_entry(con, h, &connection_hash[i], list) { 263 hlist_for_each_entry(con, &connection_hash[i], list) {
267 if (con->sctp_assoc == assoc_id) { 264 if (con->sctp_assoc == assoc_id) {
268 mutex_unlock(&connections_lock); 265 mutex_unlock(&connections_lock);
269 return con; 266 return con;
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 5fa2471796c2..8d7a577ae497 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -115,10 +115,9 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
115 */ 115 */
116int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) 116int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
117{ 117{
118 struct hlist_node *elem;
119 int rc; 118 int rc;
120 119
121 hlist_for_each_entry(*daemon, elem, 120 hlist_for_each_entry(*daemon,
122 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], 121 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
123 euid_chain) { 122 euid_chain) {
124 if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) { 123 if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
@@ -445,7 +444,6 @@ void ecryptfs_release_messaging(void)
445 mutex_unlock(&ecryptfs_msg_ctx_lists_mux); 444 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
446 } 445 }
447 if (ecryptfs_daemon_hash) { 446 if (ecryptfs_daemon_hash) {
448 struct hlist_node *elem;
449 struct ecryptfs_daemon *daemon; 447 struct ecryptfs_daemon *daemon;
450 int i; 448 int i;
451 449
@@ -453,7 +451,7 @@ void ecryptfs_release_messaging(void)
453 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { 451 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
454 int rc; 452 int rc;
455 453
456 hlist_for_each_entry(daemon, elem, 454 hlist_for_each_entry(daemon,
457 &ecryptfs_daemon_hash[i], 455 &ecryptfs_daemon_hash[i],
458 euid_chain) { 456 euid_chain) {
459 rc = ecryptfs_exorcise_daemon(daemon); 457 rc = ecryptfs_exorcise_daemon(daemon);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 5df4bb4aab14..262fc9940982 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result,
44{ 44{
45 struct dentry *dentry, *toput = NULL; 45 struct dentry *dentry, *toput = NULL;
46 struct inode *inode; 46 struct inode *inode;
47 struct hlist_node *p;
48 47
49 if (acceptable(context, result)) 48 if (acceptable(context, result))
50 return result; 49 return result;
51 50
52 inode = result->d_inode; 51 inode = result->d_inode;
53 spin_lock(&inode->i_lock); 52 spin_lock(&inode->i_lock);
54 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 53 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
55 dget(dentry); 54 dget(dentry);
56 spin_unlock(&inode->i_lock); 55 spin_unlock(&inode->i_lock);
57 if (toput) 56 if (toput)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 780e20806346..acf6e479b443 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -341,12 +341,11 @@ struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
341{ 341{
342 struct msdos_sb_info *sbi = MSDOS_SB(sb); 342 struct msdos_sb_info *sbi = MSDOS_SB(sb);
343 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos); 343 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
344 struct hlist_node *_p;
345 struct msdos_inode_info *i; 344 struct msdos_inode_info *i;
346 struct inode *inode = NULL; 345 struct inode *inode = NULL;
347 346
348 spin_lock(&sbi->inode_hash_lock); 347 spin_lock(&sbi->inode_hash_lock);
349 hlist_for_each_entry(i, _p, head, i_fat_hash) { 348 hlist_for_each_entry(i, head, i_fat_hash) {
350 BUG_ON(i->vfs_inode.i_sb != sb); 349 BUG_ON(i->vfs_inode.i_sb != sb);
351 if (i->i_pos != i_pos) 350 if (i->i_pos != i_pos)
352 continue; 351 continue;
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index ef4b5faba87b..499c10438ca2 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -21,13 +21,12 @@ static struct inode *fat_dget(struct super_block *sb, int i_logstart)
21{ 21{
22 struct msdos_sb_info *sbi = MSDOS_SB(sb); 22 struct msdos_sb_info *sbi = MSDOS_SB(sb);
23 struct hlist_head *head; 23 struct hlist_head *head;
24 struct hlist_node *_p;
25 struct msdos_inode_info *i; 24 struct msdos_inode_info *i;
26 struct inode *inode = NULL; 25 struct inode *inode = NULL;
27 26
28 head = sbi->dir_hashtable + fat_dir_hash(i_logstart); 27 head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
29 spin_lock(&sbi->dir_hash_lock); 28 spin_lock(&sbi->dir_hash_lock);
30 hlist_for_each_entry(i, _p, head, i_dir_hash) { 29 hlist_for_each_entry(i, head, i_dir_hash) {
31 BUG_ON(i->vfs_inode.i_sb != sb); 30 BUG_ON(i->vfs_inode.i_sb != sb);
32 if (i->i_logstart != i_logstart) 31 if (i->i_logstart != i_logstart)
33 continue; 32 continue;
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 8dcb114758e3..e2cba1f60c21 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -237,13 +237,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
237 struct fscache_cookie *cookie) 237 struct fscache_cookie *cookie)
238{ 238{
239 struct fscache_object *object; 239 struct fscache_object *object;
240 struct hlist_node *_n;
241 int ret; 240 int ret;
242 241
243 _enter("%p,%p{%s}", cache, cookie, cookie->def->name); 242 _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
244 243
245 spin_lock(&cookie->lock); 244 spin_lock(&cookie->lock);
246 hlist_for_each_entry(object, _n, &cookie->backing_objects, 245 hlist_for_each_entry(object, &cookie->backing_objects,
247 cookie_link) { 246 cookie_link) {
248 if (object->cache == cache) 247 if (object->cache == cache)
249 goto object_already_extant; 248 goto object_already_extant;
@@ -311,7 +310,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
311{ 310{
312 struct fscache_object *p; 311 struct fscache_object *p;
313 struct fscache_cache *cache = object->cache; 312 struct fscache_cache *cache = object->cache;
314 struct hlist_node *_n;
315 int ret; 313 int ret;
316 314
317 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); 315 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
@@ -321,7 +319,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
321 /* there may be multiple initial creations of this object, but we only 319 /* there may be multiple initial creations of this object, but we only
322 * want one */ 320 * want one */
323 ret = -EEXIST; 321 ret = -EEXIST;
324 hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) { 322 hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
325 if (p->cache == object->cache) { 323 if (p->cache == object->cache) {
326 if (p->state >= FSCACHE_OBJECT_DYING) 324 if (p->state >= FSCACHE_OBJECT_DYING)
327 ret = -ENOBUFS; 325 ret = -ENOBUFS;
@@ -331,7 +329,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
331 329
332 /* pin the parent object */ 330 /* pin the parent object */
333 spin_lock_nested(&cookie->parent->lock, 1); 331 spin_lock_nested(&cookie->parent->lock, 1);
334 hlist_for_each_entry(p, _n, &cookie->parent->backing_objects, 332 hlist_for_each_entry(p, &cookie->parent->backing_objects,
335 cookie_link) { 333 cookie_link) {
336 if (p->cache == object->cache) { 334 if (p->cache == object->cache) {
337 if (p->state >= FSCACHE_OBJECT_DYING) { 335 if (p->state >= FSCACHE_OBJECT_DYING) {
@@ -435,7 +433,6 @@ EXPORT_SYMBOL(__fscache_wait_on_invalidate);
435void __fscache_update_cookie(struct fscache_cookie *cookie) 433void __fscache_update_cookie(struct fscache_cookie *cookie)
436{ 434{
437 struct fscache_object *object; 435 struct fscache_object *object;
438 struct hlist_node *_p;
439 436
440 fscache_stat(&fscache_n_updates); 437 fscache_stat(&fscache_n_updates);
441 438
@@ -452,7 +449,7 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
452 spin_lock(&cookie->lock); 449 spin_lock(&cookie->lock);
453 450
454 /* update the index entry on disk in each cache backing this cookie */ 451 /* update the index entry on disk in each cache backing this cookie */
455 hlist_for_each_entry(object, _p, 452 hlist_for_each_entry(object,
456 &cookie->backing_objects, cookie_link) { 453 &cookie->backing_objects, cookie_link) {
457 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); 454 fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
458 } 455 }
diff --git a/fs/inode.c b/fs/inode.c
index 67880e604399..f5f7c06c36fb 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -798,11 +798,10 @@ static struct inode *find_inode(struct super_block *sb,
798 int (*test)(struct inode *, void *), 798 int (*test)(struct inode *, void *),
799 void *data) 799 void *data)
800{ 800{
801 struct hlist_node *node;
802 struct inode *inode = NULL; 801 struct inode *inode = NULL;
803 802
804repeat: 803repeat:
805 hlist_for_each_entry(inode, node, head, i_hash) { 804 hlist_for_each_entry(inode, head, i_hash) {
806 spin_lock(&inode->i_lock); 805 spin_lock(&inode->i_lock);
807 if (inode->i_sb != sb) { 806 if (inode->i_sb != sb) {
808 spin_unlock(&inode->i_lock); 807 spin_unlock(&inode->i_lock);
@@ -830,11 +829,10 @@ repeat:
830static struct inode *find_inode_fast(struct super_block *sb, 829static struct inode *find_inode_fast(struct super_block *sb,
831 struct hlist_head *head, unsigned long ino) 830 struct hlist_head *head, unsigned long ino)
832{ 831{
833 struct hlist_node *node;
834 struct inode *inode = NULL; 832 struct inode *inode = NULL;
835 833
836repeat: 834repeat:
837 hlist_for_each_entry(inode, node, head, i_hash) { 835 hlist_for_each_entry(inode, head, i_hash) {
838 spin_lock(&inode->i_lock); 836 spin_lock(&inode->i_lock);
839 if (inode->i_ino != ino) { 837 if (inode->i_ino != ino) {
840 spin_unlock(&inode->i_lock); 838 spin_unlock(&inode->i_lock);
@@ -1132,11 +1130,10 @@ EXPORT_SYMBOL(iget_locked);
1132static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1130static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1133{ 1131{
1134 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1132 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1135 struct hlist_node *node;
1136 struct inode *inode; 1133 struct inode *inode;
1137 1134
1138 spin_lock(&inode_hash_lock); 1135 spin_lock(&inode_hash_lock);
1139 hlist_for_each_entry(inode, node, b, i_hash) { 1136 hlist_for_each_entry(inode, b, i_hash) {
1140 if (inode->i_ino == ino && inode->i_sb == sb) { 1137 if (inode->i_ino == ino && inode->i_sb == sb) {
1141 spin_unlock(&inode_hash_lock); 1138 spin_unlock(&inode_hash_lock);
1142 return 0; 1139 return 0;
@@ -1291,10 +1288,9 @@ int insert_inode_locked(struct inode *inode)
1291 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1288 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1292 1289
1293 while (1) { 1290 while (1) {
1294 struct hlist_node *node;
1295 struct inode *old = NULL; 1291 struct inode *old = NULL;
1296 spin_lock(&inode_hash_lock); 1292 spin_lock(&inode_hash_lock);
1297 hlist_for_each_entry(old, node, head, i_hash) { 1293 hlist_for_each_entry(old, head, i_hash) {
1298 if (old->i_ino != ino) 1294 if (old->i_ino != ino)
1299 continue; 1295 continue;
1300 if (old->i_sb != sb) 1296 if (old->i_sb != sb)
@@ -1306,7 +1302,7 @@ int insert_inode_locked(struct inode *inode)
1306 } 1302 }
1307 break; 1303 break;
1308 } 1304 }
1309 if (likely(!node)) { 1305 if (likely(!old)) {
1310 spin_lock(&inode->i_lock); 1306 spin_lock(&inode->i_lock);
1311 inode->i_state |= I_NEW; 1307 inode->i_state |= I_NEW;
1312 hlist_add_head(&inode->i_hash, head); 1308 hlist_add_head(&inode->i_hash, head);
@@ -1334,11 +1330,10 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1334 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1330 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1335 1331
1336 while (1) { 1332 while (1) {
1337 struct hlist_node *node;
1338 struct inode *old = NULL; 1333 struct inode *old = NULL;
1339 1334
1340 spin_lock(&inode_hash_lock); 1335 spin_lock(&inode_hash_lock);
1341 hlist_for_each_entry(old, node, head, i_hash) { 1336 hlist_for_each_entry(old, head, i_hash) {
1342 if (old->i_sb != sb) 1337 if (old->i_sb != sb)
1343 continue; 1338 continue;
1344 if (!test(old, data)) 1339 if (!test(old, data))
@@ -1350,7 +1345,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1350 } 1345 }
1351 break; 1346 break;
1352 } 1347 }
1353 if (likely(!node)) { 1348 if (likely(!old)) {
1354 spin_lock(&inode->i_lock); 1349 spin_lock(&inode->i_lock);
1355 inode->i_state |= I_NEW; 1350 inode->i_state |= I_NEW;
1356 hlist_add_head(&inode->i_hash, head); 1351 hlist_add_head(&inode->i_hash, head);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 0e17090c310f..abdd75d44dd4 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -32,15 +32,15 @@
32static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; 32static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
33static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; 33static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
34 34
35#define for_each_host(host, pos, chain, table) \ 35#define for_each_host(host, chain, table) \
36 for ((chain) = (table); \ 36 for ((chain) = (table); \
37 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 37 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
38 hlist_for_each_entry((host), (pos), (chain), h_hash) 38 hlist_for_each_entry((host), (chain), h_hash)
39 39
40#define for_each_host_safe(host, pos, next, chain, table) \ 40#define for_each_host_safe(host, next, chain, table) \
41 for ((chain) = (table); \ 41 for ((chain) = (table); \
42 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ 42 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
43 hlist_for_each_entry_safe((host), (pos), (next), \ 43 hlist_for_each_entry_safe((host), (next), \
44 (chain), h_hash) 44 (chain), h_hash)
45 45
46static unsigned long nrhosts; 46static unsigned long nrhosts;
@@ -225,7 +225,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
225 .net = net, 225 .net = net,
226 }; 226 };
227 struct hlist_head *chain; 227 struct hlist_head *chain;
228 struct hlist_node *pos;
229 struct nlm_host *host; 228 struct nlm_host *host;
230 struct nsm_handle *nsm = NULL; 229 struct nsm_handle *nsm = NULL;
231 struct lockd_net *ln = net_generic(net, lockd_net_id); 230 struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -237,7 +236,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
237 mutex_lock(&nlm_host_mutex); 236 mutex_lock(&nlm_host_mutex);
238 237
239 chain = &nlm_client_hosts[nlm_hash_address(sap)]; 238 chain = &nlm_client_hosts[nlm_hash_address(sap)];
240 hlist_for_each_entry(host, pos, chain, h_hash) { 239 hlist_for_each_entry(host, chain, h_hash) {
241 if (host->net != net) 240 if (host->net != net)
242 continue; 241 continue;
243 if (!rpc_cmp_addr(nlm_addr(host), sap)) 242 if (!rpc_cmp_addr(nlm_addr(host), sap))
@@ -322,7 +321,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
322 const size_t hostname_len) 321 const size_t hostname_len)
323{ 322{
324 struct hlist_head *chain; 323 struct hlist_head *chain;
325 struct hlist_node *pos;
326 struct nlm_host *host = NULL; 324 struct nlm_host *host = NULL;
327 struct nsm_handle *nsm = NULL; 325 struct nsm_handle *nsm = NULL;
328 struct sockaddr *src_sap = svc_daddr(rqstp); 326 struct sockaddr *src_sap = svc_daddr(rqstp);
@@ -350,7 +348,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
350 nlm_gc_hosts(net); 348 nlm_gc_hosts(net);
351 349
352 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; 350 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
353 hlist_for_each_entry(host, pos, chain, h_hash) { 351 hlist_for_each_entry(host, chain, h_hash) {
354 if (host->net != net) 352 if (host->net != net)
355 continue; 353 continue;
356 if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) 354 if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
@@ -515,10 +513,9 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
515{ 513{
516 struct nlm_host *host; 514 struct nlm_host *host;
517 struct hlist_head *chain; 515 struct hlist_head *chain;
518 struct hlist_node *pos;
519 516
520 mutex_lock(&nlm_host_mutex); 517 mutex_lock(&nlm_host_mutex);
521 for_each_host(host, pos, chain, cache) { 518 for_each_host(host, chain, cache) {
522 if (host->h_nsmhandle == nsm 519 if (host->h_nsmhandle == nsm
523 && host->h_nsmstate != info->state) { 520 && host->h_nsmstate != info->state) {
524 host->h_nsmstate = info->state; 521 host->h_nsmstate = info->state;
@@ -570,7 +567,6 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
570static void nlm_complain_hosts(struct net *net) 567static void nlm_complain_hosts(struct net *net)
571{ 568{
572 struct hlist_head *chain; 569 struct hlist_head *chain;
573 struct hlist_node *pos;
574 struct nlm_host *host; 570 struct nlm_host *host;
575 571
576 if (net) { 572 if (net) {
@@ -587,7 +583,7 @@ static void nlm_complain_hosts(struct net *net)
587 dprintk("lockd: %lu hosts left:\n", nrhosts); 583 dprintk("lockd: %lu hosts left:\n", nrhosts);
588 } 584 }
589 585
590 for_each_host(host, pos, chain, nlm_server_hosts) { 586 for_each_host(host, chain, nlm_server_hosts) {
591 if (net && host->net != net) 587 if (net && host->net != net)
592 continue; 588 continue;
593 dprintk(" %s (cnt %d use %d exp %ld net %p)\n", 589 dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
@@ -600,14 +596,13 @@ void
600nlm_shutdown_hosts_net(struct net *net) 596nlm_shutdown_hosts_net(struct net *net)
601{ 597{
602 struct hlist_head *chain; 598 struct hlist_head *chain;
603 struct hlist_node *pos;
604 struct nlm_host *host; 599 struct nlm_host *host;
605 600
606 mutex_lock(&nlm_host_mutex); 601 mutex_lock(&nlm_host_mutex);
607 602
608 /* First, make all hosts eligible for gc */ 603 /* First, make all hosts eligible for gc */
609 dprintk("lockd: nuking all hosts in net %p...\n", net); 604 dprintk("lockd: nuking all hosts in net %p...\n", net);
610 for_each_host(host, pos, chain, nlm_server_hosts) { 605 for_each_host(host, chain, nlm_server_hosts) {
611 if (net && host->net != net) 606 if (net && host->net != net)
612 continue; 607 continue;
613 host->h_expires = jiffies - 1; 608 host->h_expires = jiffies - 1;
@@ -644,11 +639,11 @@ static void
644nlm_gc_hosts(struct net *net) 639nlm_gc_hosts(struct net *net)
645{ 640{
646 struct hlist_head *chain; 641 struct hlist_head *chain;
647 struct hlist_node *pos, *next; 642 struct hlist_node *next;
648 struct nlm_host *host; 643 struct nlm_host *host;
649 644
650 dprintk("lockd: host garbage collection for net %p\n", net); 645 dprintk("lockd: host garbage collection for net %p\n", net);
651 for_each_host(host, pos, chain, nlm_server_hosts) { 646 for_each_host(host, chain, nlm_server_hosts) {
652 if (net && host->net != net) 647 if (net && host->net != net)
653 continue; 648 continue;
654 host->h_inuse = 0; 649 host->h_inuse = 0;
@@ -657,7 +652,7 @@ nlm_gc_hosts(struct net *net)
657 /* Mark all hosts that hold locks, blocks or shares */ 652 /* Mark all hosts that hold locks, blocks or shares */
658 nlmsvc_mark_resources(net); 653 nlmsvc_mark_resources(net);
659 654
660 for_each_host_safe(host, pos, next, chain, nlm_server_hosts) { 655 for_each_host_safe(host, next, chain, nlm_server_hosts) {
661 if (net && host->net != net) 656 if (net && host->net != net)
662 continue; 657 continue;
663 if (atomic_read(&host->h_count) || host->h_inuse 658 if (atomic_read(&host->h_count) || host->h_inuse
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index b3a24b07d981..d17bb62b06d6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -84,7 +84,6 @@ __be32
84nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, 84nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
85 struct nfs_fh *f) 85 struct nfs_fh *f)
86{ 86{
87 struct hlist_node *pos;
88 struct nlm_file *file; 87 struct nlm_file *file;
89 unsigned int hash; 88 unsigned int hash;
90 __be32 nfserr; 89 __be32 nfserr;
@@ -96,7 +95,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
96 /* Lock file table */ 95 /* Lock file table */
97 mutex_lock(&nlm_file_mutex); 96 mutex_lock(&nlm_file_mutex);
98 97
99 hlist_for_each_entry(file, pos, &nlm_files[hash], f_list) 98 hlist_for_each_entry(file, &nlm_files[hash], f_list)
100 if (!nfs_compare_fh(&file->f_handle, f)) 99 if (!nfs_compare_fh(&file->f_handle, f))
101 goto found; 100 goto found;
102 101
@@ -248,13 +247,13 @@ static int
248nlm_traverse_files(void *data, nlm_host_match_fn_t match, 247nlm_traverse_files(void *data, nlm_host_match_fn_t match,
249 int (*is_failover_file)(void *data, struct nlm_file *file)) 248 int (*is_failover_file)(void *data, struct nlm_file *file))
250{ 249{
251 struct hlist_node *pos, *next; 250 struct hlist_node *next;
252 struct nlm_file *file; 251 struct nlm_file *file;
253 int i, ret = 0; 252 int i, ret = 0;
254 253
255 mutex_lock(&nlm_file_mutex); 254 mutex_lock(&nlm_file_mutex);
256 for (i = 0; i < FILE_NRHASH; i++) { 255 for (i = 0; i < FILE_NRHASH; i++) {
257 hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) { 256 hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) {
258 if (is_failover_file && !is_failover_file(data, file)) 257 if (is_failover_file && !is_failover_file(data, file))
259 continue; 258 continue;
260 file->f_count++; 259 file->f_count++;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index d35b62e83ea6..6da209bd9408 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -77,9 +77,8 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
77 long hash) 77 long hash)
78{ 78{
79 struct nfs4_deviceid_node *d; 79 struct nfs4_deviceid_node *d;
80 struct hlist_node *n;
81 80
82 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 81 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
83 if (d->ld == ld && d->nfs_client == clp && 82 if (d->ld == ld && d->nfs_client == clp &&
84 !memcmp(&d->deviceid, id, sizeof(*id))) { 83 !memcmp(&d->deviceid, id, sizeof(*id))) {
85 if (atomic_read(&d->ref)) 84 if (atomic_read(&d->ref))
@@ -248,12 +247,11 @@ static void
248_deviceid_purge_client(const struct nfs_client *clp, long hash) 247_deviceid_purge_client(const struct nfs_client *clp, long hash)
249{ 248{
250 struct nfs4_deviceid_node *d; 249 struct nfs4_deviceid_node *d;
251 struct hlist_node *n;
252 HLIST_HEAD(tmp); 250 HLIST_HEAD(tmp);
253 251
254 spin_lock(&nfs4_deviceid_lock); 252 spin_lock(&nfs4_deviceid_lock);
255 rcu_read_lock(); 253 rcu_read_lock();
256 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 254 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
257 if (d->nfs_client == clp && atomic_read(&d->ref)) { 255 if (d->nfs_client == clp && atomic_read(&d->ref)) {
258 hlist_del_init_rcu(&d->node); 256 hlist_del_init_rcu(&d->node);
259 hlist_add_head(&d->tmpnode, &tmp); 257 hlist_add_head(&d->tmpnode, &tmp);
@@ -291,12 +289,11 @@ void
291nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) 289nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
292{ 290{
293 struct nfs4_deviceid_node *d; 291 struct nfs4_deviceid_node *d;
294 struct hlist_node *n;
295 int i; 292 int i;
296 293
297 rcu_read_lock(); 294 rcu_read_lock();
298 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ 295 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
299 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node) 296 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
300 if (d->nfs_client == clp) 297 if (d->nfs_client == clp)
301 set_bit(NFS_DEVICEID_INVALID, &d->flags); 298 set_bit(NFS_DEVICEID_INVALID, &d->flags);
302 } 299 }
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 2cbac34a55da..da3dbd0f8979 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -120,7 +120,6 @@ hash_refile(struct svc_cacherep *rp)
120int 120int
121nfsd_cache_lookup(struct svc_rqst *rqstp) 121nfsd_cache_lookup(struct svc_rqst *rqstp)
122{ 122{
123 struct hlist_node *hn;
124 struct hlist_head *rh; 123 struct hlist_head *rh;
125 struct svc_cacherep *rp; 124 struct svc_cacherep *rp;
126 __be32 xid = rqstp->rq_xid; 125 __be32 xid = rqstp->rq_xid;
@@ -141,7 +140,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
141 rtn = RC_DOIT; 140 rtn = RC_DOIT;
142 141
143 rh = &cache_hash[request_hash(xid)]; 142 rh = &cache_hash[request_hash(xid)];
144 hlist_for_each_entry(rp, hn, rh, c_hash) { 143 hlist_for_each_entry(rp, rh, c_hash) {
145 if (rp->c_state != RC_UNUSED && 144 if (rp->c_state != RC_UNUSED &&
146 xid == rp->c_xid && proc == rp->c_proc && 145 xid == rp->c_xid && proc == rp->c_proc &&
147 proto == rp->c_prot && vers == rp->c_vers && 146 proto == rp->c_prot && vers == rp->c_vers &&
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 6baadb5a8430..4bb21d67d9b1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
52void __fsnotify_update_child_dentry_flags(struct inode *inode) 52void __fsnotify_update_child_dentry_flags(struct inode *inode)
53{ 53{
54 struct dentry *alias; 54 struct dentry *alias;
55 struct hlist_node *p;
56 int watched; 55 int watched;
57 56
58 if (!S_ISDIR(inode->i_mode)) 57 if (!S_ISDIR(inode->i_mode))
@@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
64 spin_lock(&inode->i_lock); 63 spin_lock(&inode->i_lock);
65 /* run all of the dentries associated with this inode. Since this is a 64 /* run all of the dentries associated with this inode. Since this is a
66 * directory, there damn well better only be one item on this list */ 65 * directory, there damn well better only be one item on this list */
67 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 66 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
68 struct dentry *child; 67 struct dentry *child;
69 68
70 /* run all of the children of the original inode and fix their 69 /* run all of the children of the original inode and fix their
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index f31e90fc050d..74825be65b7b 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -36,12 +36,11 @@
36static void fsnotify_recalc_inode_mask_locked(struct inode *inode) 36static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
37{ 37{
38 struct fsnotify_mark *mark; 38 struct fsnotify_mark *mark;
39 struct hlist_node *pos;
40 __u32 new_mask = 0; 39 __u32 new_mask = 0;
41 40
42 assert_spin_locked(&inode->i_lock); 41 assert_spin_locked(&inode->i_lock);
43 42
44 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) 43 hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
45 new_mask |= mark->mask; 44 new_mask |= mark->mask;
46 inode->i_fsnotify_mask = new_mask; 45 inode->i_fsnotify_mask = new_mask;
47} 46}
@@ -87,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
87void fsnotify_clear_marks_by_inode(struct inode *inode) 86void fsnotify_clear_marks_by_inode(struct inode *inode)
88{ 87{
89 struct fsnotify_mark *mark, *lmark; 88 struct fsnotify_mark *mark, *lmark;
90 struct hlist_node *pos, *n; 89 struct hlist_node *n;
91 LIST_HEAD(free_list); 90 LIST_HEAD(free_list);
92 91
93 spin_lock(&inode->i_lock); 92 spin_lock(&inode->i_lock);
94 hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { 93 hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
95 list_add(&mark->i.free_i_list, &free_list); 94 list_add(&mark->i.free_i_list, &free_list);
96 hlist_del_init_rcu(&mark->i.i_list); 95 hlist_del_init_rcu(&mark->i.i_list);
97 fsnotify_get_mark(mark); 96 fsnotify_get_mark(mark);
@@ -129,11 +128,10 @@ static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
129 struct inode *inode) 128 struct inode *inode)
130{ 129{
131 struct fsnotify_mark *mark; 130 struct fsnotify_mark *mark;
132 struct hlist_node *pos;
133 131
134 assert_spin_locked(&inode->i_lock); 132 assert_spin_locked(&inode->i_lock);
135 133
136 hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { 134 hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
137 if (mark->group == group) { 135 if (mark->group == group) {
138 fsnotify_get_mark(mark); 136 fsnotify_get_mark(mark);
139 return mark; 137 return mark;
@@ -194,8 +192,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
194 struct fsnotify_group *group, struct inode *inode, 192 struct fsnotify_group *group, struct inode *inode,
195 int allow_dups) 193 int allow_dups)
196{ 194{
197 struct fsnotify_mark *lmark; 195 struct fsnotify_mark *lmark, *last = NULL;
198 struct hlist_node *node, *last = NULL;
199 int ret = 0; 196 int ret = 0;
200 197
201 mark->flags |= FSNOTIFY_MARK_FLAG_INODE; 198 mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@@ -214,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
214 } 211 }
215 212
216 /* should mark be in the middle of the current list? */ 213 /* should mark be in the middle of the current list? */
217 hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) { 214 hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
218 last = node; 215 last = lmark;
219 216
220 if ((lmark->group == group) && !allow_dups) { 217 if ((lmark->group == group) && !allow_dups) {
221 ret = -EEXIST; 218 ret = -EEXIST;
@@ -235,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
235 232
236 BUG_ON(last == NULL); 233 BUG_ON(last == NULL);
237 /* mark should be the last entry. last is the current last entry */ 234 /* mark should be the last entry. last is the current last entry */
238 hlist_add_after_rcu(last, &mark->i.i_list); 235 hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
239out: 236out:
240 fsnotify_recalc_inode_mask_locked(inode); 237 fsnotify_recalc_inode_mask_locked(inode);
241 spin_unlock(&inode->i_lock); 238 spin_unlock(&inode->i_lock);
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 4df58b8ea64a..68ca5a8704b5 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -33,12 +33,12 @@
33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) 33void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
34{ 34{
35 struct fsnotify_mark *mark, *lmark; 35 struct fsnotify_mark *mark, *lmark;
36 struct hlist_node *pos, *n; 36 struct hlist_node *n;
37 struct mount *m = real_mount(mnt); 37 struct mount *m = real_mount(mnt);
38 LIST_HEAD(free_list); 38 LIST_HEAD(free_list);
39 39
40 spin_lock(&mnt->mnt_root->d_lock); 40 spin_lock(&mnt->mnt_root->d_lock);
41 hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) { 41 hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
42 list_add(&mark->m.free_m_list, &free_list); 42 list_add(&mark->m.free_m_list, &free_list);
43 hlist_del_init_rcu(&mark->m.m_list); 43 hlist_del_init_rcu(&mark->m.m_list);
44 fsnotify_get_mark(mark); 44 fsnotify_get_mark(mark);
@@ -71,12 +71,11 @@ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
71{ 71{
72 struct mount *m = real_mount(mnt); 72 struct mount *m = real_mount(mnt);
73 struct fsnotify_mark *mark; 73 struct fsnotify_mark *mark;
74 struct hlist_node *pos;
75 __u32 new_mask = 0; 74 __u32 new_mask = 0;
76 75
77 assert_spin_locked(&mnt->mnt_root->d_lock); 76 assert_spin_locked(&mnt->mnt_root->d_lock);
78 77
79 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) 78 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
80 new_mask |= mark->mask; 79 new_mask |= mark->mask;
81 m->mnt_fsnotify_mask = new_mask; 80 m->mnt_fsnotify_mask = new_mask;
82} 81}
@@ -114,11 +113,10 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
114{ 113{
115 struct mount *m = real_mount(mnt); 114 struct mount *m = real_mount(mnt);
116 struct fsnotify_mark *mark; 115 struct fsnotify_mark *mark;
117 struct hlist_node *pos;
118 116
119 assert_spin_locked(&mnt->mnt_root->d_lock); 117 assert_spin_locked(&mnt->mnt_root->d_lock);
120 118
121 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) { 119 hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
122 if (mark->group == group) { 120 if (mark->group == group) {
123 fsnotify_get_mark(mark); 121 fsnotify_get_mark(mark);
124 return mark; 122 return mark;
@@ -153,8 +151,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
153 int allow_dups) 151 int allow_dups)
154{ 152{
155 struct mount *m = real_mount(mnt); 153 struct mount *m = real_mount(mnt);
156 struct fsnotify_mark *lmark; 154 struct fsnotify_mark *lmark, *last = NULL;
157 struct hlist_node *node, *last = NULL;
158 int ret = 0; 155 int ret = 0;
159 156
160 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; 157 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@@ -173,8 +170,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
173 } 170 }
174 171
175 /* should mark be in the middle of the current list? */ 172 /* should mark be in the middle of the current list? */
176 hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) { 173 hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
177 last = node; 174 last = lmark;
178 175
179 if ((lmark->group == group) && !allow_dups) { 176 if ((lmark->group == group) && !allow_dups) {
180 ret = -EEXIST; 177 ret = -EEXIST;
@@ -194,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
194 191
195 BUG_ON(last == NULL); 192 BUG_ON(last == NULL);
196 /* mark should be the last entry. last is the current last entry */ 193 /* mark should be the last entry. last is the current last entry */
197 hlist_add_after_rcu(last, &mark->m.m_list); 194 hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
198out: 195out:
199 fsnotify_recalc_vfsmount_mask_locked(mnt); 196 fsnotify_recalc_vfsmount_mask_locked(mnt);
200 spin_unlock(&mnt->mnt_root->d_lock); 197 spin_unlock(&mnt->mnt_root->d_lock);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 8db4b58b2e4b..ef999729e274 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -169,11 +169,10 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
169 u64 parent_blkno, 169 u64 parent_blkno,
170 int skip_unhashed) 170 int skip_unhashed)
171{ 171{
172 struct hlist_node *p;
173 struct dentry *dentry; 172 struct dentry *dentry;
174 173
175 spin_lock(&inode->i_lock); 174 spin_lock(&inode->i_lock);
176 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 175 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
177 spin_lock(&dentry->d_lock); 176 spin_lock(&dentry->d_lock);
178 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 177 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
179 trace_ocfs2_find_local_alias(dentry->d_name.len, 178 trace_ocfs2_find_local_alias(dentry->d_name.len,
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 01ebfd0bdad7..eeac97bb3bfa 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2083,7 +2083,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2083 u8 dead_node, u8 new_master) 2083 u8 dead_node, u8 new_master)
2084{ 2084{
2085 int i; 2085 int i;
2086 struct hlist_node *hash_iter;
2087 struct hlist_head *bucket; 2086 struct hlist_head *bucket;
2088 struct dlm_lock_resource *res, *next; 2087 struct dlm_lock_resource *res, *next;
2089 2088
@@ -2114,7 +2113,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2114 * if necessary */ 2113 * if necessary */
2115 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2114 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2116 bucket = dlm_lockres_hash(dlm, i); 2115 bucket = dlm_lockres_hash(dlm, i);
2117 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 2116 hlist_for_each_entry(res, bucket, hash_node) {
2118 if (!(res->state & DLM_LOCK_RES_RECOVERING)) 2117 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2119 continue; 2118 continue;
2120 2119
@@ -2273,7 +2272,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2273 2272
2274static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2273static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2275{ 2274{
2276 struct hlist_node *iter;
2277 struct dlm_lock_resource *res; 2275 struct dlm_lock_resource *res;
2278 int i; 2276 int i;
2279 struct hlist_head *bucket; 2277 struct hlist_head *bucket;
@@ -2299,7 +2297,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2299 */ 2297 */
2300 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2298 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2301 bucket = dlm_lockres_hash(dlm, i); 2299 bucket = dlm_lockres_hash(dlm, i);
2302 hlist_for_each_entry(res, iter, bucket, hash_node) { 2300 hlist_for_each_entry(res, bucket, hash_node) {
2303 /* always prune any $RECOVERY entries for dead nodes, 2301 /* always prune any $RECOVERY entries for dead nodes,
2304 * otherwise hangs can occur during later recovery */ 2302 * otherwise hangs can occur during later recovery */
2305 if (dlm_is_recovery_lock(res->lockname.name, 2303 if (dlm_is_recovery_lock(res->lockname.name,
diff --git a/fs/super.c b/fs/super.c
index df6c2f4c6b59..7465d4364208 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -447,14 +447,13 @@ struct super_block *sget(struct file_system_type *type,
447 void *data) 447 void *data)
448{ 448{
449 struct super_block *s = NULL; 449 struct super_block *s = NULL;
450 struct hlist_node *node;
451 struct super_block *old; 450 struct super_block *old;
452 int err; 451 int err;
453 452
454retry: 453retry:
455 spin_lock(&sb_lock); 454 spin_lock(&sb_lock);
456 if (test) { 455 if (test) {
457 hlist_for_each_entry(old, node, &type->fs_supers, s_instances) { 456 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
458 if (!test(old, data)) 457 if (!test(old, data))
459 continue; 458 continue;
460 if (!grab_super(old)) 459 if (!grab_super(old))
@@ -554,10 +553,9 @@ void iterate_supers_type(struct file_system_type *type,
554 void (*f)(struct super_block *, void *), void *arg) 553 void (*f)(struct super_block *, void *), void *arg)
555{ 554{
556 struct super_block *sb, *p = NULL; 555 struct super_block *sb, *p = NULL;
557 struct hlist_node *node;
558 556
559 spin_lock(&sb_lock); 557 spin_lock(&sb_lock);
560 hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) { 558 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
561 sb->s_count++; 559 sb->s_count++;
562 spin_unlock(&sb_lock); 560 spin_unlock(&sb_lock);
563 561
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 2ce9a5db6ab5..15c68f9489ae 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -461,14 +461,13 @@ const struct file_operations bin_fops = {
461void unmap_bin_file(struct sysfs_dirent *attr_sd) 461void unmap_bin_file(struct sysfs_dirent *attr_sd)
462{ 462{
463 struct bin_buffer *bb; 463 struct bin_buffer *bb;
464 struct hlist_node *tmp;
465 464
466 if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR) 465 if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
467 return; 466 return;
468 467
469 mutex_lock(&sysfs_bin_lock); 468 mutex_lock(&sysfs_bin_lock);
470 469
471 hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) { 470 hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
472 struct inode *inode = file_inode(bb->file); 471 struct inode *inode = file_inode(bb->file);
473 472
474 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 473 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 96fcbb85ff83..d1dba7ce75ae 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1442,9 +1442,8 @@ xlog_recover_find_tid(
1442 xlog_tid_t tid) 1442 xlog_tid_t tid)
1443{ 1443{
1444 xlog_recover_t *trans; 1444 xlog_recover_t *trans;
1445 struct hlist_node *n;
1446 1445
1447 hlist_for_each_entry(trans, n, head, r_list) { 1446 hlist_for_each_entry(trans, head, r_list) {
1448 if (trans->r_log_tid == tid) 1447 if (trans->r_log_tid == tid)
1449 return trans; 1448 return trans;
1450 } 1449 }