diff options
-rw-r--r-- | fs/fs-writeback.c | 64 | ||||
-rw-r--r-- | fs/quota.c | 60 | ||||
-rw-r--r-- | fs/super.c | 83 |
3 files changed, 96 insertions, 111 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8e050fa58218..e94ab398b717 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -485,32 +485,6 @@ static void set_sb_syncing(int val) | |||
485 | spin_unlock(&sb_lock); | 485 | spin_unlock(&sb_lock); |
486 | } | 486 | } |
487 | 487 | ||
488 | /* | ||
489 | * Find a superblock with inodes that need to be synced | ||
490 | */ | ||
491 | static struct super_block *get_super_to_sync(void) | ||
492 | { | ||
493 | struct super_block *sb; | ||
494 | restart: | ||
495 | spin_lock(&sb_lock); | ||
496 | sb = sb_entry(super_blocks.prev); | ||
497 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { | ||
498 | if (sb->s_syncing) | ||
499 | continue; | ||
500 | sb->s_syncing = 1; | ||
501 | sb->s_count++; | ||
502 | spin_unlock(&sb_lock); | ||
503 | down_read(&sb->s_umount); | ||
504 | if (!sb->s_root) { | ||
505 | drop_super(sb); | ||
506 | goto restart; | ||
507 | } | ||
508 | return sb; | ||
509 | } | ||
510 | spin_unlock(&sb_lock); | ||
511 | return NULL; | ||
512 | } | ||
513 | |||
514 | /** | 488 | /** |
515 | * sync_inodes - writes all inodes to disk | 489 | * sync_inodes - writes all inodes to disk |
516 | * @wait: wait for completion | 490 | * @wait: wait for completion |
@@ -530,23 +504,39 @@ restart: | |||
530 | * outstanding dirty inodes, the writeback goes block-at-a-time within the | 504 | * outstanding dirty inodes, the writeback goes block-at-a-time within the |
531 | * filesystem's write_inode(). This is extremely slow. | 505 | * filesystem's write_inode(). This is extremely slow. |
532 | */ | 506 | */ |
533 | void sync_inodes(int wait) | 507 | static void __sync_inodes(int wait) |
534 | { | 508 | { |
535 | struct super_block *sb; | 509 | struct super_block *sb; |
536 | 510 | ||
537 | set_sb_syncing(0); | 511 | spin_lock(&sb_lock); |
538 | while ((sb = get_super_to_sync()) != NULL) { | 512 | restart: |
539 | sync_inodes_sb(sb, 0); | 513 | list_for_each_entry(sb, &super_blocks, s_list) { |
540 | sync_blockdev(sb->s_bdev); | 514 | if (sb->s_syncing) |
541 | drop_super(sb); | 515 | continue; |
516 | sb->s_syncing = 1; | ||
517 | sb->s_count++; | ||
518 | spin_unlock(&sb_lock); | ||
519 | down_read(&sb->s_umount); | ||
520 | if (sb->s_root) { | ||
521 | sync_inodes_sb(sb, wait); | ||
522 | sync_blockdev(sb->s_bdev); | ||
523 | } | ||
524 | up_read(&sb->s_umount); | ||
525 | spin_lock(&sb_lock); | ||
526 | if (__put_super_and_need_restart(sb)) | ||
527 | goto restart; | ||
542 | } | 528 | } |
529 | spin_unlock(&sb_lock); | ||
530 | } | ||
531 | |||
532 | void sync_inodes(int wait) | ||
533 | { | ||
534 | set_sb_syncing(0); | ||
535 | __sync_inodes(0); | ||
536 | |||
543 | if (wait) { | 537 | if (wait) { |
544 | set_sb_syncing(0); | 538 | set_sb_syncing(0); |
545 | while ((sb = get_super_to_sync()) != NULL) { | 539 | __sync_inodes(1); |
546 | sync_inodes_sb(sb, 1); | ||
547 | sync_blockdev(sb->s_bdev); | ||
548 | drop_super(sb); | ||
549 | } | ||
550 | } | 540 | } |
551 | } | 541 | } |
552 | 542 | ||
diff --git a/fs/quota.c b/fs/quota.c index 3f0333a51a23..f5d1cff55196 100644 --- a/fs/quota.c +++ b/fs/quota.c | |||
@@ -149,36 +149,6 @@ static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t | |||
149 | return error; | 149 | return error; |
150 | } | 150 | } |
151 | 151 | ||
152 | static struct super_block *get_super_to_sync(int type) | ||
153 | { | ||
154 | struct list_head *head; | ||
155 | int cnt, dirty; | ||
156 | |||
157 | restart: | ||
158 | spin_lock(&sb_lock); | ||
159 | list_for_each(head, &super_blocks) { | ||
160 | struct super_block *sb = list_entry(head, struct super_block, s_list); | ||
161 | |||
162 | /* This test just improves performance so it needn't be reliable... */ | ||
163 | for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++) | ||
164 | if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt) | ||
165 | && info_any_dirty(&sb_dqopt(sb)->info[cnt])) | ||
166 | dirty = 1; | ||
167 | if (!dirty) | ||
168 | continue; | ||
169 | sb->s_count++; | ||
170 | spin_unlock(&sb_lock); | ||
171 | down_read(&sb->s_umount); | ||
172 | if (!sb->s_root) { | ||
173 | drop_super(sb); | ||
174 | goto restart; | ||
175 | } | ||
176 | return sb; | ||
177 | } | ||
178 | spin_unlock(&sb_lock); | ||
179 | return NULL; | ||
180 | } | ||
181 | |||
182 | static void quota_sync_sb(struct super_block *sb, int type) | 152 | static void quota_sync_sb(struct super_block *sb, int type) |
183 | { | 153 | { |
184 | int cnt; | 154 | int cnt; |
@@ -219,17 +189,35 @@ static void quota_sync_sb(struct super_block *sb, int type) | |||
219 | 189 | ||
220 | void sync_dquots(struct super_block *sb, int type) | 190 | void sync_dquots(struct super_block *sb, int type) |
221 | { | 191 | { |
192 | int cnt, dirty; | ||
193 | |||
222 | if (sb) { | 194 | if (sb) { |
223 | if (sb->s_qcop->quota_sync) | 195 | if (sb->s_qcop->quota_sync) |
224 | quota_sync_sb(sb, type); | 196 | quota_sync_sb(sb, type); |
197 | return; | ||
225 | } | 198 | } |
226 | else { | 199 | |
227 | while ((sb = get_super_to_sync(type)) != NULL) { | 200 | spin_lock(&sb_lock); |
228 | if (sb->s_qcop->quota_sync) | 201 | restart: |
229 | quota_sync_sb(sb, type); | 202 | list_for_each_entry(sb, &super_blocks, s_list) { |
230 | drop_super(sb); | 203 | /* This test just improves performance so it needn't be reliable... */ |
231 | } | 204 | for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++) |
205 | if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt) | ||
206 | && info_any_dirty(&sb_dqopt(sb)->info[cnt])) | ||
207 | dirty = 1; | ||
208 | if (!dirty) | ||
209 | continue; | ||
210 | sb->s_count++; | ||
211 | spin_unlock(&sb_lock); | ||
212 | down_read(&sb->s_umount); | ||
213 | if (sb->s_root && sb->s_qcop->quota_sync) | ||
214 | quota_sync_sb(sb, type); | ||
215 | up_read(&sb->s_umount); | ||
216 | spin_lock(&sb_lock); | ||
217 | if (__put_super_and_need_restart(sb)) | ||
218 | goto restart; | ||
232 | } | 219 | } |
220 | spin_unlock(&sb_lock); | ||
233 | } | 221 | } |
234 | 222 | ||
235 | /* Copy parameters and call proper function */ | 223 | /* Copy parameters and call proper function */ |
diff --git a/fs/super.c b/fs/super.c index 573bcc81bb82..25bc1ec6bc5d 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -341,20 +341,22 @@ static inline void write_super(struct super_block *sb) | |||
341 | */ | 341 | */ |
342 | void sync_supers(void) | 342 | void sync_supers(void) |
343 | { | 343 | { |
344 | struct super_block * sb; | 344 | struct super_block *sb; |
345 | restart: | 345 | |
346 | spin_lock(&sb_lock); | 346 | spin_lock(&sb_lock); |
347 | sb = sb_entry(super_blocks.next); | 347 | restart: |
348 | while (sb != sb_entry(&super_blocks)) | 348 | list_for_each_entry(sb, &super_blocks, s_list) { |
349 | if (sb->s_dirt) { | 349 | if (sb->s_dirt) { |
350 | sb->s_count++; | 350 | sb->s_count++; |
351 | spin_unlock(&sb_lock); | 351 | spin_unlock(&sb_lock); |
352 | down_read(&sb->s_umount); | 352 | down_read(&sb->s_umount); |
353 | write_super(sb); | 353 | write_super(sb); |
354 | drop_super(sb); | 354 | up_read(&sb->s_umount); |
355 | goto restart; | 355 | spin_lock(&sb_lock); |
356 | } else | 356 | if (__put_super_and_need_restart(sb)) |
357 | sb = sb_entry(sb->s_list.next); | 357 | goto restart; |
358 | } | ||
359 | } | ||
358 | spin_unlock(&sb_lock); | 360 | spin_unlock(&sb_lock); |
359 | } | 361 | } |
360 | 362 | ||
@@ -381,20 +383,16 @@ void sync_filesystems(int wait) | |||
381 | 383 | ||
382 | down(&mutex); /* Could be down_interruptible */ | 384 | down(&mutex); /* Could be down_interruptible */ |
383 | spin_lock(&sb_lock); | 385 | spin_lock(&sb_lock); |
384 | for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); | 386 | list_for_each_entry(sb, &super_blocks, s_list) { |
385 | sb = sb_entry(sb->s_list.next)) { | ||
386 | if (!sb->s_op->sync_fs) | 387 | if (!sb->s_op->sync_fs) |
387 | continue; | 388 | continue; |
388 | if (sb->s_flags & MS_RDONLY) | 389 | if (sb->s_flags & MS_RDONLY) |
389 | continue; | 390 | continue; |
390 | sb->s_need_sync_fs = 1; | 391 | sb->s_need_sync_fs = 1; |
391 | } | 392 | } |
392 | spin_unlock(&sb_lock); | ||
393 | 393 | ||
394 | restart: | 394 | restart: |
395 | spin_lock(&sb_lock); | 395 | list_for_each_entry(sb, &super_blocks, s_list) { |
396 | for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); | ||
397 | sb = sb_entry(sb->s_list.next)) { | ||
398 | if (!sb->s_need_sync_fs) | 396 | if (!sb->s_need_sync_fs) |
399 | continue; | 397 | continue; |
400 | sb->s_need_sync_fs = 0; | 398 | sb->s_need_sync_fs = 0; |
@@ -405,8 +403,11 @@ restart: | |||
405 | down_read(&sb->s_umount); | 403 | down_read(&sb->s_umount); |
406 | if (sb->s_root && (wait || sb->s_dirt)) | 404 | if (sb->s_root && (wait || sb->s_dirt)) |
407 | sb->s_op->sync_fs(sb, wait); | 405 | sb->s_op->sync_fs(sb, wait); |
408 | drop_super(sb); | 406 | up_read(&sb->s_umount); |
409 | goto restart; | 407 | /* restart only when sb is no longer on the list */ |
408 | spin_lock(&sb_lock); | ||
409 | if (__put_super_and_need_restart(sb)) | ||
410 | goto restart; | ||
410 | } | 411 | } |
411 | spin_unlock(&sb_lock); | 412 | spin_unlock(&sb_lock); |
412 | up(&mutex); | 413 | up(&mutex); |
@@ -422,21 +423,25 @@ restart: | |||
422 | 423 | ||
423 | struct super_block * get_super(struct block_device *bdev) | 424 | struct super_block * get_super(struct block_device *bdev) |
424 | { | 425 | { |
425 | struct list_head *p; | 426 | struct super_block *sb; |
427 | |||
426 | if (!bdev) | 428 | if (!bdev) |
427 | return NULL; | 429 | return NULL; |
428 | rescan: | 430 | |
429 | spin_lock(&sb_lock); | 431 | spin_lock(&sb_lock); |
430 | list_for_each(p, &super_blocks) { | 432 | rescan: |
431 | struct super_block *s = sb_entry(p); | 433 | list_for_each_entry(sb, &super_blocks, s_list) { |
432 | if (s->s_bdev == bdev) { | 434 | if (sb->s_bdev == bdev) { |
433 | s->s_count++; | 435 | sb->s_count++; |
434 | spin_unlock(&sb_lock); | 436 | spin_unlock(&sb_lock); |
435 | down_read(&s->s_umount); | 437 | down_read(&sb->s_umount); |
436 | if (s->s_root) | 438 | if (sb->s_root) |
437 | return s; | 439 | return sb; |
438 | drop_super(s); | 440 | up_read(&sb->s_umount); |
439 | goto rescan; | 441 | /* restart only when sb is no longer on the list */ |
442 | spin_lock(&sb_lock); | ||
443 | if (__put_super_and_need_restart(sb)) | ||
444 | goto rescan; | ||
440 | } | 445 | } |
441 | } | 446 | } |
442 | spin_unlock(&sb_lock); | 447 | spin_unlock(&sb_lock); |
@@ -447,20 +452,22 @@ EXPORT_SYMBOL(get_super); | |||
447 | 452 | ||
448 | struct super_block * user_get_super(dev_t dev) | 453 | struct super_block * user_get_super(dev_t dev) |
449 | { | 454 | { |
450 | struct list_head *p; | 455 | struct super_block *sb; |
451 | 456 | ||
452 | rescan: | ||
453 | spin_lock(&sb_lock); | 457 | spin_lock(&sb_lock); |
454 | list_for_each(p, &super_blocks) { | 458 | rescan: |
455 | struct super_block *s = sb_entry(p); | 459 | list_for_each_entry(sb, &super_blocks, s_list) { |
456 | if (s->s_dev == dev) { | 460 | if (sb->s_dev == dev) { |
457 | s->s_count++; | 461 | sb->s_count++; |
458 | spin_unlock(&sb_lock); | 462 | spin_unlock(&sb_lock); |
459 | down_read(&s->s_umount); | 463 | down_read(&sb->s_umount); |
460 | if (s->s_root) | 464 | if (sb->s_root) |
461 | return s; | 465 | return sb; |
462 | drop_super(s); | 466 | up_read(&sb->s_umount); |
463 | goto rescan; | 467 | /* restart only when sb is no longer on the list */ |
468 | spin_lock(&sb_lock); | ||
469 | if (__put_super_and_need_restart(sb)) | ||
470 | goto rescan; | ||
464 | } | 471 | } |
465 | } | 472 | } |
466 | spin_unlock(&sb_lock); | 473 | spin_unlock(&sb_lock); |