aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/dquot.c52
1 files changed, 39 insertions, 13 deletions
diff --git a/fs/dquot.c b/fs/dquot.c
index 1966c890b48d..9376a4378988 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -118,8 +118,7 @@
118 * spinlock to internal buffers before writing. 118 * spinlock to internal buffers before writing.
119 * 119 *
120 * Lock ordering (including related VFS locks) is the following: 120 * Lock ordering (including related VFS locks) is the following:
121 * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > 121 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > dqio_sem
122 * > dquot->dq_lock > dqio_sem
123 * i_mutex on quota files is special (it's below dqio_sem) 122 * i_mutex on quota files is special (it's below dqio_sem)
124 */ 123 */
125 124
@@ -407,23 +406,49 @@ out_dqlock:
407 406
408/* Invalidate all dquots on the list. Note that this function is called after 407/* Invalidate all dquots on the list. Note that this function is called after
409 * quota is disabled and pointers from inodes removed so there cannot be new 408 * quota is disabled and pointers from inodes removed so there cannot be new
410 * quota users. Also because we hold dqonoff_sem there can be no quota users 409 * quota users. There can still be some users of quotas due to inodes being
411 * for this sb+type at all. */ 410 * just deleted or pruned by prune_icache() (those are not attached to any
411 * list). We have to wait for such users.
412 */
412static void invalidate_dquots(struct super_block *sb, int type) 413static void invalidate_dquots(struct super_block *sb, int type)
413{ 414{
414 struct dquot *dquot, *tmp; 415 struct dquot *dquot, *tmp;
415 416
417restart:
416 spin_lock(&dq_list_lock); 418 spin_lock(&dq_list_lock);
417 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 419 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
418 if (dquot->dq_sb != sb) 420 if (dquot->dq_sb != sb)
419 continue; 421 continue;
420 if (dquot->dq_type != type) 422 if (dquot->dq_type != type)
421 continue; 423 continue;
422#ifdef __DQUOT_PARANOIA 424 /* Wait for dquot users */
423 if (atomic_read(&dquot->dq_count)) 425 if (atomic_read(&dquot->dq_count)) {
424 BUG(); 426 DEFINE_WAIT(wait);
425#endif 427
426 /* Quota now has no users and it has been written on last dqput() */ 428 atomic_inc(&dquot->dq_count);
429 prepare_to_wait(&dquot->dq_wait_unused, &wait,
430 TASK_UNINTERRUPTIBLE);
431 spin_unlock(&dq_list_lock);
432 /* Once dqput() wakes us up, we know it's time to free
433 * the dquot.
434 * IMPORTANT: we rely on the fact that there is always
435 * at most one process waiting for dquot to free.
436 * Otherwise dq_count would be > 1 and we would never
437 * wake up.
438 */
439 if (atomic_read(&dquot->dq_count) > 1)
440 schedule();
441 finish_wait(&dquot->dq_wait_unused, &wait);
442 dqput(dquot);
443 /* At this moment dquot() need not exist (it could be
444 * reclaimed by prune_dqcache(). Hence we must
445 * restart. */
446 goto restart;
447 }
448 /*
449 * Quota now has no users and it has been written on last
450 * dqput()
451 */
427 remove_dquot_hash(dquot); 452 remove_dquot_hash(dquot);
428 remove_free_dquot(dquot); 453 remove_free_dquot(dquot);
429 remove_inuse(dquot); 454 remove_inuse(dquot);
@@ -540,6 +565,10 @@ we_slept:
540 if (atomic_read(&dquot->dq_count) > 1) { 565 if (atomic_read(&dquot->dq_count) > 1) {
541 /* We have more than one user... nothing to do */ 566 /* We have more than one user... nothing to do */
542 atomic_dec(&dquot->dq_count); 567 atomic_dec(&dquot->dq_count);
568 /* Releasing dquot during quotaoff phase? */
569 if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) &&
570 atomic_read(&dquot->dq_count) == 1)
571 wake_up(&dquot->dq_wait_unused);
543 spin_unlock(&dq_list_lock); 572 spin_unlock(&dq_list_lock);
544 return; 573 return;
545 } 574 }
@@ -581,6 +610,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
581 INIT_LIST_HEAD(&dquot->dq_inuse); 610 INIT_LIST_HEAD(&dquot->dq_inuse);
582 INIT_HLIST_NODE(&dquot->dq_hash); 611 INIT_HLIST_NODE(&dquot->dq_hash);
583 INIT_LIST_HEAD(&dquot->dq_dirty); 612 INIT_LIST_HEAD(&dquot->dq_dirty);
613 init_waitqueue_head(&dquot->dq_wait_unused);
584 dquot->dq_sb = sb; 614 dquot->dq_sb = sb;
585 dquot->dq_type = type; 615 dquot->dq_type = type;
586 atomic_set(&dquot->dq_count, 1); 616 atomic_set(&dquot->dq_count, 1);
@@ -732,13 +762,9 @@ static void drop_dquot_ref(struct super_block *sb, int type)
732{ 762{
733 LIST_HEAD(tofree_head); 763 LIST_HEAD(tofree_head);
734 764
735 /* We need to be guarded against prune_icache to reach all the
736 * inodes - otherwise some can be on the local list of prune_icache */
737 down(&iprune_sem);
738 down_write(&sb_dqopt(sb)->dqptr_sem); 765 down_write(&sb_dqopt(sb)->dqptr_sem);
739 remove_dquot_ref(sb, type, &tofree_head); 766 remove_dquot_ref(sb, type, &tofree_head);
740 up_write(&sb_dqopt(sb)->dqptr_sem); 767 up_write(&sb_dqopt(sb)->dqptr_sem);
741 up(&iprune_sem);
742 put_dquot_list(&tofree_head); 768 put_dquot_list(&tofree_head);
743} 769}
744 770