aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-03-23 06:00:34 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:12 -0500
commitf24075bd0c1cd1cc2cf86d394f960aa0401de573 (patch)
tree609ad9884e43a8bb1ba5834996955a06bef66fa2 /fs
parenta11f3a0574a5734db3e5de38922430d005d35118 (diff)
[PATCH] sem2mutex: iprune
Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/inode.c16
-rw-r--r--fs/inotify.c6
2 files changed, 11 insertions, 11 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 603e93ef0c6f..25967b67903d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable;
84DEFINE_SPINLOCK(inode_lock); 84DEFINE_SPINLOCK(inode_lock);
85 85
86/* 86/*
87 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 87 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
88 * icache shrinking path, and the umount path. Without this exclusion, 88 * icache shrinking path, and the umount path. Without this exclusion,
89 * by the time prune_icache calls iput for the inode whose pages it has 89 * by the time prune_icache calls iput for the inode whose pages it has
90 * been invalidating, or by the time it calls clear_inode & destroy_inode 90 * been invalidating, or by the time it calls clear_inode & destroy_inode
91 * from its final dispose_list, the struct super_block they refer to 91 * from its final dispose_list, the struct super_block they refer to
92 * (for inode->i_sb->s_op) may already have been freed and reused. 92 * (for inode->i_sb->s_op) may already have been freed and reused.
93 */ 93 */
94DECLARE_MUTEX(iprune_sem); 94DEFINE_MUTEX(iprune_mutex);
95 95
96/* 96/*
97 * Statistics gathering.. 97 * Statistics gathering..
@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
319 /* 319 /*
320 * We can reschedule here without worrying about the list's 320 * We can reschedule here without worrying about the list's
321 * consistency because the per-sb list of inodes must not 321 * consistency because the per-sb list of inodes must not
322 * change during umount anymore, and because iprune_sem keeps 322 * change during umount anymore, and because iprune_mutex keeps
323 * shrink_icache_memory() away. 323 * shrink_icache_memory() away.
324 */ 324 */
325 cond_resched_lock(&inode_lock); 325 cond_resched_lock(&inode_lock);
@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
355 int busy; 355 int busy;
356 LIST_HEAD(throw_away); 356 LIST_HEAD(throw_away);
357 357
358 down(&iprune_sem); 358 mutex_lock(&iprune_mutex);
359 spin_lock(&inode_lock); 359 spin_lock(&inode_lock);
360 inotify_unmount_inodes(&sb->s_inodes); 360 inotify_unmount_inodes(&sb->s_inodes);
361 busy = invalidate_list(&sb->s_inodes, &throw_away); 361 busy = invalidate_list(&sb->s_inodes, &throw_away);
362 spin_unlock(&inode_lock); 362 spin_unlock(&inode_lock);
363 363
364 dispose_list(&throw_away); 364 dispose_list(&throw_away);
365 up(&iprune_sem); 365 mutex_unlock(&iprune_mutex);
366 366
367 return busy; 367 return busy;
368} 368}
@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
377 if (sb) { 377 if (sb) {
378 /* 378 /*
379 * no need to lock the super, get_super holds the 379 * no need to lock the super, get_super holds the
380 * read semaphore so the filesystem cannot go away 380 * read mutex so the filesystem cannot go away
381 * under us (->put_super runs with the write lock 381 * under us (->put_super runs with the write lock
382 * hold). 382 * hold).
383 */ 383 */
@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
423 int nr_scanned; 423 int nr_scanned;
424 unsigned long reap = 0; 424 unsigned long reap = 0;
425 425
426 down(&iprune_sem); 426 mutex_lock(&iprune_mutex);
427 spin_lock(&inode_lock); 427 spin_lock(&inode_lock);
428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
429 struct inode *inode; 429 struct inode *inode;
@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
459 spin_unlock(&inode_lock); 459 spin_unlock(&inode_lock);
460 460
461 dispose_list(&freeable); 461 dispose_list(&freeable);
462 up(&iprune_sem); 462 mutex_unlock(&iprune_mutex);
463 463
464 if (current_is_kswapd()) 464 if (current_is_kswapd())
465 mod_page_state(kswapd_inodesteal, reap); 465 mod_page_state(kswapd_inodesteal, reap);
diff --git a/fs/inotify.c b/fs/inotify.c
index 60d9653d55b7..0ee39ef591c6 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -54,7 +54,7 @@ int inotify_max_queued_events;
54 * Lock ordering: 54 * Lock ordering:
55 * 55 *
56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent) 56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
57 * iprune_sem (synchronize shrink_icache_memory()) 57 * iprune_mutex (synchronize shrink_icache_memory())
58 * inode_lock (protects the super_block->s_inodes list) 58 * inode_lock (protects the super_block->s_inodes list)
59 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) 59 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
60 * inotify_dev->mutex (protects inotify_device and watches->d_list) 60 * inotify_dev->mutex (protects inotify_device and watches->d_list)
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie);
569 * @list: list of inodes being unmounted (sb->s_inodes) 569 * @list: list of inodes being unmounted (sb->s_inodes)
570 * 570 *
571 * Called with inode_lock held, protecting the unmounting super block's list 571 * Called with inode_lock held, protecting the unmounting super block's list
572 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. 572 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
573 * We temporarily drop inode_lock, however, and CAN block. 573 * We temporarily drop inode_lock, however, and CAN block.
574 */ 574 */
575void inotify_unmount_inodes(struct list_head *list) 575void inotify_unmount_inodes(struct list_head *list)
@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list)
618 * We can safely drop inode_lock here because we hold 618 * We can safely drop inode_lock here because we hold
619 * references on both inode and next_i. Also no new inodes 619 * references on both inode and next_i. Also no new inodes
620 * will be added since the umount has begun. Finally, 620 * will be added since the umount has begun. Finally,
621 * iprune_sem keeps shrink_icache_memory() away. 621 * iprune_mutex keeps shrink_icache_memory() away.
622 */ 622 */
623 spin_unlock(&inode_lock); 623 spin_unlock(&inode_lock);
624 624