diff options
author | Kirill Korotaev <dev@sw.ru> | 2005-06-23 03:09:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 12:45:27 -0400 |
commit | 618f06362ae3f60f95d7b0e666de25ee6ae35679 (patch) | |
tree | 4415b4e590913e16535704168ea74c6af5a93c48 /fs/super.c | |
parent | 4fea2838aa00b9e59efde974dcdb455608192811 (diff) |
[PATCH] O(1) sb list traversing on syncs
This patch removes O(n^2) super block loops in sync_inodes(),
sync_filesystems() etc. in favour of using __put_super_and_need_restart()
which I introduced earlier. We faced a noticably long freezes on sb
syncing when there are thousands of super blocks in the system.
Signed-Off-By: Kirill Korotaev <dev@sw.ru>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/super.c')
-rw-r--r-- | fs/super.c | 83 |
1 files changed, 45 insertions, 38 deletions
diff --git a/fs/super.c b/fs/super.c index 573bcc81bb82..25bc1ec6bc5d 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -341,20 +341,22 @@ static inline void write_super(struct super_block *sb) | |||
341 | */ | 341 | */ |
342 | void sync_supers(void) | 342 | void sync_supers(void) |
343 | { | 343 | { |
344 | struct super_block * sb; | 344 | struct super_block *sb; |
345 | restart: | 345 | |
346 | spin_lock(&sb_lock); | 346 | spin_lock(&sb_lock); |
347 | sb = sb_entry(super_blocks.next); | 347 | restart: |
348 | while (sb != sb_entry(&super_blocks)) | 348 | list_for_each_entry(sb, &super_blocks, s_list) { |
349 | if (sb->s_dirt) { | 349 | if (sb->s_dirt) { |
350 | sb->s_count++; | 350 | sb->s_count++; |
351 | spin_unlock(&sb_lock); | 351 | spin_unlock(&sb_lock); |
352 | down_read(&sb->s_umount); | 352 | down_read(&sb->s_umount); |
353 | write_super(sb); | 353 | write_super(sb); |
354 | drop_super(sb); | 354 | up_read(&sb->s_umount); |
355 | goto restart; | 355 | spin_lock(&sb_lock); |
356 | } else | 356 | if (__put_super_and_need_restart(sb)) |
357 | sb = sb_entry(sb->s_list.next); | 357 | goto restart; |
358 | } | ||
359 | } | ||
358 | spin_unlock(&sb_lock); | 360 | spin_unlock(&sb_lock); |
359 | } | 361 | } |
360 | 362 | ||
@@ -381,20 +383,16 @@ void sync_filesystems(int wait) | |||
381 | 383 | ||
382 | down(&mutex); /* Could be down_interruptible */ | 384 | down(&mutex); /* Could be down_interruptible */ |
383 | spin_lock(&sb_lock); | 385 | spin_lock(&sb_lock); |
384 | for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); | 386 | list_for_each_entry(sb, &super_blocks, s_list) { |
385 | sb = sb_entry(sb->s_list.next)) { | ||
386 | if (!sb->s_op->sync_fs) | 387 | if (!sb->s_op->sync_fs) |
387 | continue; | 388 | continue; |
388 | if (sb->s_flags & MS_RDONLY) | 389 | if (sb->s_flags & MS_RDONLY) |
389 | continue; | 390 | continue; |
390 | sb->s_need_sync_fs = 1; | 391 | sb->s_need_sync_fs = 1; |
391 | } | 392 | } |
392 | spin_unlock(&sb_lock); | ||
393 | 393 | ||
394 | restart: | 394 | restart: |
395 | spin_lock(&sb_lock); | 395 | list_for_each_entry(sb, &super_blocks, s_list) { |
396 | for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); | ||
397 | sb = sb_entry(sb->s_list.next)) { | ||
398 | if (!sb->s_need_sync_fs) | 396 | if (!sb->s_need_sync_fs) |
399 | continue; | 397 | continue; |
400 | sb->s_need_sync_fs = 0; | 398 | sb->s_need_sync_fs = 0; |
@@ -405,8 +403,11 @@ restart: | |||
405 | down_read(&sb->s_umount); | 403 | down_read(&sb->s_umount); |
406 | if (sb->s_root && (wait || sb->s_dirt)) | 404 | if (sb->s_root && (wait || sb->s_dirt)) |
407 | sb->s_op->sync_fs(sb, wait); | 405 | sb->s_op->sync_fs(sb, wait); |
408 | drop_super(sb); | 406 | up_read(&sb->s_umount); |
409 | goto restart; | 407 | /* restart only when sb is no longer on the list */ |
408 | spin_lock(&sb_lock); | ||
409 | if (__put_super_and_need_restart(sb)) | ||
410 | goto restart; | ||
410 | } | 411 | } |
411 | spin_unlock(&sb_lock); | 412 | spin_unlock(&sb_lock); |
412 | up(&mutex); | 413 | up(&mutex); |
@@ -422,21 +423,25 @@ restart: | |||
422 | 423 | ||
423 | struct super_block * get_super(struct block_device *bdev) | 424 | struct super_block * get_super(struct block_device *bdev) |
424 | { | 425 | { |
425 | struct list_head *p; | 426 | struct super_block *sb; |
427 | |||
426 | if (!bdev) | 428 | if (!bdev) |
427 | return NULL; | 429 | return NULL; |
428 | rescan: | 430 | |
429 | spin_lock(&sb_lock); | 431 | spin_lock(&sb_lock); |
430 | list_for_each(p, &super_blocks) { | 432 | rescan: |
431 | struct super_block *s = sb_entry(p); | 433 | list_for_each_entry(sb, &super_blocks, s_list) { |
432 | if (s->s_bdev == bdev) { | 434 | if (sb->s_bdev == bdev) { |
433 | s->s_count++; | 435 | sb->s_count++; |
434 | spin_unlock(&sb_lock); | 436 | spin_unlock(&sb_lock); |
435 | down_read(&s->s_umount); | 437 | down_read(&sb->s_umount); |
436 | if (s->s_root) | 438 | if (sb->s_root) |
437 | return s; | 439 | return sb; |
438 | drop_super(s); | 440 | up_read(&sb->s_umount); |
439 | goto rescan; | 441 | /* restart only when sb is no longer on the list */ |
442 | spin_lock(&sb_lock); | ||
443 | if (__put_super_and_need_restart(sb)) | ||
444 | goto rescan; | ||
440 | } | 445 | } |
441 | } | 446 | } |
442 | spin_unlock(&sb_lock); | 447 | spin_unlock(&sb_lock); |
@@ -447,20 +452,22 @@ EXPORT_SYMBOL(get_super); | |||
447 | 452 | ||
448 | struct super_block * user_get_super(dev_t dev) | 453 | struct super_block * user_get_super(dev_t dev) |
449 | { | 454 | { |
450 | struct list_head *p; | 455 | struct super_block *sb; |
451 | 456 | ||
452 | rescan: | ||
453 | spin_lock(&sb_lock); | 457 | spin_lock(&sb_lock); |
454 | list_for_each(p, &super_blocks) { | 458 | rescan: |
455 | struct super_block *s = sb_entry(p); | 459 | list_for_each_entry(sb, &super_blocks, s_list) { |
456 | if (s->s_dev == dev) { | 460 | if (sb->s_dev == dev) { |
457 | s->s_count++; | 461 | sb->s_count++; |
458 | spin_unlock(&sb_lock); | 462 | spin_unlock(&sb_lock); |
459 | down_read(&s->s_umount); | 463 | down_read(&sb->s_umount); |
460 | if (s->s_root) | 464 | if (sb->s_root) |
461 | return s; | 465 | return sb; |
462 | drop_super(s); | 466 | up_read(&sb->s_umount); |
463 | goto rescan; | 467 | /* restart only when sb is no longer on the list */ |
468 | spin_lock(&sb_lock); | ||
469 | if (__put_super_and_need_restart(sb)) | ||
470 | goto rescan; | ||
464 | } | 471 | } |
465 | } | 472 | } |
466 | spin_unlock(&sb_lock); | 473 | spin_unlock(&sb_lock); |