aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2010-03-22 19:36:35 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-05-21 18:31:14 -0400
commit551de6f34dfeefbeeadb32909c387d393114ecc8 (patch)
tree822af803e7e75ed476b0a176639c162e0395910d
parent1712ac8fda7d8bc4dc921f5777b7423aacad7263 (diff)
Leave superblocks on s_list until the end
We used to remove from s_list and s_instances at the same time. So let's *not* do the former and skip superblocks that have empty s_instances in the loops over s_list. The next step, of course, will be to get rid of rescan logics in those loops. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/quota/quota.c2
-rw-r--r--fs/super.c16
-rw-r--r--fs/sync.c5
6 files changed, 25 insertions, 4 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index c9c266db0624..021ec4da9932 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -568,6 +568,8 @@ static void do_thaw_all(struct work_struct *work)
568 spin_lock(&sb_lock); 568 spin_lock(&sb_lock);
569restart: 569restart:
570 list_for_each_entry(sb, &super_blocks, s_list) { 570 list_for_each_entry(sb, &super_blocks, s_list) {
571 if (list_empty(&sb->s_instances))
572 continue;
571 sb->s_count++; 573 sb->s_count++;
572 spin_unlock(&sb_lock); 574 spin_unlock(&sb_lock);
573 down_read(&sb->s_umount); 575 down_read(&sb->s_umount);
diff --git a/fs/dcache.c b/fs/dcache.c
index 2b6f09af13ab..5afc4994bb27 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -552,6 +552,8 @@ restart:
552 prune_ratio = unused / count; 552 prune_ratio = unused / count;
553 spin_lock(&sb_lock); 553 spin_lock(&sb_lock);
554 list_for_each_entry(sb, &super_blocks, s_list) { 554 list_for_each_entry(sb, &super_blocks, s_list) {
555 if (list_empty(&sb->s_instances))
556 continue;
555 if (sb->s_nr_dentry_unused == 0) 557 if (sb->s_nr_dentry_unused == 0)
556 continue; 558 continue;
557 sb->s_count++; 559 sb->s_count++;
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 31f4b0e6d72c..9cd4e4a70f56 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -40,6 +40,8 @@ static void drop_pagecache(void)
40 spin_lock(&sb_lock); 40 spin_lock(&sb_lock);
41restart: 41restart:
42 list_for_each_entry(sb, &super_blocks, s_list) { 42 list_for_each_entry(sb, &super_blocks, s_list) {
43 if (list_empty(&sb->s_instances))
44 continue;
43 sb->s_count++; 45 sb->s_count++;
44 spin_unlock(&sb_lock); 46 spin_unlock(&sb_lock);
45 down_read(&sb->s_umount); 47 down_read(&sb->s_umount);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index cfc78826da90..4669e7e639bd 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -59,6 +59,8 @@ static int quota_sync_all(int type)
59 spin_lock(&sb_lock); 59 spin_lock(&sb_lock);
60restart: 60restart:
61 list_for_each_entry(sb, &super_blocks, s_list) { 61 list_for_each_entry(sb, &super_blocks, s_list) {
62 if (list_empty(&sb->s_instances))
63 continue;
62 if (!sb->s_qcop || !sb->s_qcop->quota_sync) 64 if (!sb->s_qcop || !sb->s_qcop->quota_sync)
63 continue; 65 continue;
64 66
diff --git a/fs/super.c b/fs/super.c
index 157657b32798..0390461dfca0 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -135,6 +135,7 @@ static int __put_super(struct super_block *sb)
135 int ret = 0; 135 int ret = 0;
136 136
137 if (!--sb->s_count) { 137 if (!--sb->s_count) {
138 list_del_init(&sb->s_list);
138 destroy_super(sb); 139 destroy_super(sb);
139 ret = 1; 140 ret = 1;
140 } 141 }
@@ -151,7 +152,7 @@ static int __put_super(struct super_block *sb)
151int __put_super_and_need_restart(struct super_block *sb) 152int __put_super_and_need_restart(struct super_block *sb)
152{ 153{
153 /* check for race with generic_shutdown_super() */ 154 /* check for race with generic_shutdown_super() */
154 if (list_empty(&sb->s_list)) { 155 if (list_empty(&sb->s_instances)) {
155 /* super block is removed, need to restart... */ 156 /* super block is removed, need to restart... */
156 __put_super(sb); 157 __put_super(sb);
157 return 1; 158 return 1;
@@ -308,8 +309,7 @@ void generic_shutdown_super(struct super_block *sb)
308 } 309 }
309 spin_lock(&sb_lock); 310 spin_lock(&sb_lock);
310 /* should be initialized for __put_super_and_need_restart() */ 311 /* should be initialized for __put_super_and_need_restart() */
311 list_del_init(&sb->s_list); 312 list_del_init(&sb->s_instances);
312 list_del(&sb->s_instances);
313 spin_unlock(&sb_lock); 313 spin_unlock(&sb_lock);
314 up_write(&sb->s_umount); 314 up_write(&sb->s_umount);
315} 315}
@@ -400,6 +400,8 @@ void sync_supers(void)
400 spin_lock(&sb_lock); 400 spin_lock(&sb_lock);
401restart: 401restart:
402 list_for_each_entry(sb, &super_blocks, s_list) { 402 list_for_each_entry(sb, &super_blocks, s_list) {
403 if (list_empty(&sb->s_instances))
404 continue;
403 if (sb->s_op->write_super && sb->s_dirt) { 405 if (sb->s_op->write_super && sb->s_dirt) {
404 sb->s_count++; 406 sb->s_count++;
405 spin_unlock(&sb_lock); 407 spin_unlock(&sb_lock);
@@ -435,6 +437,8 @@ struct super_block * get_super(struct block_device *bdev)
435 spin_lock(&sb_lock); 437 spin_lock(&sb_lock);
436rescan: 438rescan:
437 list_for_each_entry(sb, &super_blocks, s_list) { 439 list_for_each_entry(sb, &super_blocks, s_list) {
440 if (list_empty(&sb->s_instances))
441 continue;
438 if (sb->s_bdev == bdev) { 442 if (sb->s_bdev == bdev) {
439 sb->s_count++; 443 sb->s_count++;
440 spin_unlock(&sb_lock); 444 spin_unlock(&sb_lock);
@@ -471,6 +475,8 @@ struct super_block *get_active_super(struct block_device *bdev)
471 475
472 spin_lock(&sb_lock); 476 spin_lock(&sb_lock);
473 list_for_each_entry(sb, &super_blocks, s_list) { 477 list_for_each_entry(sb, &super_blocks, s_list) {
478 if (list_empty(&sb->s_instances))
479 continue;
474 if (sb->s_bdev != bdev) 480 if (sb->s_bdev != bdev)
475 continue; 481 continue;
476 482
@@ -490,6 +496,8 @@ struct super_block * user_get_super(dev_t dev)
490 spin_lock(&sb_lock); 496 spin_lock(&sb_lock);
491rescan: 497rescan:
492 list_for_each_entry(sb, &super_blocks, s_list) { 498 list_for_each_entry(sb, &super_blocks, s_list) {
499 if (list_empty(&sb->s_instances))
500 continue;
493 if (sb->s_dev == dev) { 501 if (sb->s_dev == dev) {
494 sb->s_count++; 502 sb->s_count++;
495 spin_unlock(&sb_lock); 503 spin_unlock(&sb_lock);
@@ -600,6 +608,8 @@ static void do_emergency_remount(struct work_struct *work)
600 608
601 spin_lock(&sb_lock); 609 spin_lock(&sb_lock);
602 list_for_each_entry(sb, &super_blocks, s_list) { 610 list_for_each_entry(sb, &super_blocks, s_list) {
611 if (list_empty(&sb->s_instances))
612 continue;
603 sb->s_count++; 613 sb->s_count++;
604 spin_unlock(&sb_lock); 614 spin_unlock(&sb_lock);
605 down_write(&sb->s_umount); 615 down_write(&sb->s_umount);
diff --git a/fs/sync.c b/fs/sync.c
index 92b228176f7c..ad6691bae370 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -99,10 +99,13 @@ static void sync_filesystems(int wait)
99 mutex_lock(&mutex); /* Could be down_interruptible */ 99 mutex_lock(&mutex); /* Could be down_interruptible */
100 spin_lock(&sb_lock); 100 spin_lock(&sb_lock);
101 list_for_each_entry(sb, &super_blocks, s_list) 101 list_for_each_entry(sb, &super_blocks, s_list)
102 sb->s_need_sync = 1; 102 if (!list_empty(&sb->s_instances))
103 sb->s_need_sync = 1;
103 104
104restart: 105restart:
105 list_for_each_entry(sb, &super_blocks, s_list) { 106 list_for_each_entry(sb, &super_blocks, s_list) {
107 if (list_empty(&sb->s_instances))
108 continue;
106 if (!sb->s_need_sync) 109 if (!sb->s_need_sync)
107 continue; 110 continue;
108 sb->s_need_sync = 0; 111 sb->s_need_sync = 0;