diff options
Diffstat (limited to 'fs/dquot.c')
-rw-r--r-- | fs/dquot.c | 167 |
1 files changed, 97 insertions, 70 deletions
diff --git a/fs/dquot.c b/fs/dquot.c index 1966c890b48d..acf07e581f8c 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -103,12 +103,12 @@ | |||
103 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | 103 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that |
104 | * for altering the flag i_mutex is also needed). If operation is holding | 104 | * for altering the flag i_mutex is also needed). If operation is holding |
105 | * reference to dquot in other way (e.g. quotactl ops) it must be guarded by | 105 | * reference to dquot in other way (e.g. quotactl ops) it must be guarded by |
106 | * dqonoff_sem. | 106 | * dqonoff_mutex. |
107 | * This locking assures that: | 107 | * This locking assures that: |
108 | * a) update/access to dquot pointers in inode is serialized | 108 | * a) update/access to dquot pointers in inode is serialized |
109 | * b) everyone is guarded against invalidate_dquots() | 109 | * b) everyone is guarded against invalidate_dquots() |
110 | * | 110 | * |
111 | * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced | 111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced |
112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | 112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). |
113 | * Currently dquot is locked only when it is being read to memory (or space for | 113 | * Currently dquot is locked only when it is being read to memory (or space for |
114 | * it is being allocated) on the first dqget() and when it is being released on | 114 | * it is being allocated) on the first dqget() and when it is being released on |
@@ -118,9 +118,9 @@ | |||
118 | * spinlock to internal buffers before writing. | 118 | * spinlock to internal buffers before writing. |
119 | * | 119 | * |
120 | * Lock ordering (including related VFS locks) is the following: | 120 | * Lock ordering (including related VFS locks) is the following: |
121 | * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > | 121 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > |
122 | * > dquot->dq_lock > dqio_sem | 122 | * dqio_mutex |
123 | * i_mutex on quota files is special (it's below dqio_sem) | 123 | * i_mutex on quota files is special (it's below dqio_mutex) |
124 | */ | 124 | */ |
125 | 125 | ||
126 | static DEFINE_SPINLOCK(dq_list_lock); | 126 | static DEFINE_SPINLOCK(dq_list_lock); |
@@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot) | |||
281 | 281 | ||
282 | static void wait_on_dquot(struct dquot *dquot) | 282 | static void wait_on_dquot(struct dquot *dquot) |
283 | { | 283 | { |
284 | down(&dquot->dq_lock); | 284 | mutex_lock(&dquot->dq_lock); |
285 | up(&dquot->dq_lock); | 285 | mutex_unlock(&dquot->dq_lock); |
286 | } | 286 | } |
287 | 287 | ||
288 | #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) | 288 | #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) |
@@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot) | |||
321 | int ret = 0, ret2 = 0; | 321 | int ret = 0, ret2 = 0; |
322 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 322 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
323 | 323 | ||
324 | down(&dquot->dq_lock); | 324 | mutex_lock(&dquot->dq_lock); |
325 | down(&dqopt->dqio_sem); | 325 | mutex_lock(&dqopt->dqio_mutex); |
326 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) | 326 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) |
327 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); | 327 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); |
328 | if (ret < 0) | 328 | if (ret < 0) |
@@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot) | |||
343 | } | 343 | } |
344 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 344 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
345 | out_iolock: | 345 | out_iolock: |
346 | up(&dqopt->dqio_sem); | 346 | mutex_unlock(&dqopt->dqio_mutex); |
347 | up(&dquot->dq_lock); | 347 | mutex_unlock(&dquot->dq_lock); |
348 | return ret; | 348 | return ret; |
349 | } | 349 | } |
350 | 350 | ||
@@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot) | |||
356 | int ret = 0, ret2 = 0; | 356 | int ret = 0, ret2 = 0; |
357 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 357 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
358 | 358 | ||
359 | down(&dqopt->dqio_sem); | 359 | mutex_lock(&dqopt->dqio_mutex); |
360 | spin_lock(&dq_list_lock); | 360 | spin_lock(&dq_list_lock); |
361 | if (!clear_dquot_dirty(dquot)) { | 361 | if (!clear_dquot_dirty(dquot)) { |
362 | spin_unlock(&dq_list_lock); | 362 | spin_unlock(&dq_list_lock); |
@@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot) | |||
373 | ret = ret2; | 373 | ret = ret2; |
374 | } | 374 | } |
375 | out_sem: | 375 | out_sem: |
376 | up(&dqopt->dqio_sem); | 376 | mutex_unlock(&dqopt->dqio_mutex); |
377 | return ret; | 377 | return ret; |
378 | } | 378 | } |
379 | 379 | ||
@@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot) | |||
385 | int ret = 0, ret2 = 0; | 385 | int ret = 0, ret2 = 0; |
386 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 386 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
387 | 387 | ||
388 | down(&dquot->dq_lock); | 388 | mutex_lock(&dquot->dq_lock); |
389 | /* Check whether we are not racing with some other dqget() */ | 389 | /* Check whether we are not racing with some other dqget() */ |
390 | if (atomic_read(&dquot->dq_count) > 1) | 390 | if (atomic_read(&dquot->dq_count) > 1) |
391 | goto out_dqlock; | 391 | goto out_dqlock; |
392 | down(&dqopt->dqio_sem); | 392 | mutex_lock(&dqopt->dqio_mutex); |
393 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { | 393 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { |
394 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); | 394 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); |
395 | /* Write the info */ | 395 | /* Write the info */ |
@@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot) | |||
399 | ret = ret2; | 399 | ret = ret2; |
400 | } | 400 | } |
401 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 401 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
402 | up(&dqopt->dqio_sem); | 402 | mutex_unlock(&dqopt->dqio_mutex); |
403 | out_dqlock: | 403 | out_dqlock: |
404 | up(&dquot->dq_lock); | 404 | mutex_unlock(&dquot->dq_lock); |
405 | return ret; | 405 | return ret; |
406 | } | 406 | } |
407 | 407 | ||
408 | /* Invalidate all dquots on the list. Note that this function is called after | 408 | /* Invalidate all dquots on the list. Note that this function is called after |
409 | * quota is disabled and pointers from inodes removed so there cannot be new | 409 | * quota is disabled and pointers from inodes removed so there cannot be new |
410 | * quota users. Also because we hold dqonoff_sem there can be no quota users | 410 | * quota users. There can still be some users of quotas due to inodes being |
411 | * for this sb+type at all. */ | 411 | * just deleted or pruned by prune_icache() (those are not attached to any |
412 | * list). We have to wait for such users. | ||
413 | */ | ||
412 | static void invalidate_dquots(struct super_block *sb, int type) | 414 | static void invalidate_dquots(struct super_block *sb, int type) |
413 | { | 415 | { |
414 | struct dquot *dquot, *tmp; | 416 | struct dquot *dquot, *tmp; |
415 | 417 | ||
418 | restart: | ||
416 | spin_lock(&dq_list_lock); | 419 | spin_lock(&dq_list_lock); |
417 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { | 420 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { |
418 | if (dquot->dq_sb != sb) | 421 | if (dquot->dq_sb != sb) |
419 | continue; | 422 | continue; |
420 | if (dquot->dq_type != type) | 423 | if (dquot->dq_type != type) |
421 | continue; | 424 | continue; |
422 | #ifdef __DQUOT_PARANOIA | 425 | /* Wait for dquot users */ |
423 | if (atomic_read(&dquot->dq_count)) | 426 | if (atomic_read(&dquot->dq_count)) { |
424 | BUG(); | 427 | DEFINE_WAIT(wait); |
425 | #endif | 428 | |
426 | /* Quota now has no users and it has been written on last dqput() */ | 429 | atomic_inc(&dquot->dq_count); |
430 | prepare_to_wait(&dquot->dq_wait_unused, &wait, | ||
431 | TASK_UNINTERRUPTIBLE); | ||
432 | spin_unlock(&dq_list_lock); | ||
433 | /* Once dqput() wakes us up, we know it's time to free | ||
434 | * the dquot. | ||
435 | * IMPORTANT: we rely on the fact that there is always | ||
436 | * at most one process waiting for dquot to free. | ||
437 | * Otherwise dq_count would be > 1 and we would never | ||
438 | * wake up. | ||
439 | */ | ||
440 | if (atomic_read(&dquot->dq_count) > 1) | ||
441 | schedule(); | ||
442 | finish_wait(&dquot->dq_wait_unused, &wait); | ||
443 | dqput(dquot); | ||
444 | /* At this moment dquot() need not exist (it could be | ||
445 | * reclaimed by prune_dqcache(). Hence we must | ||
446 | * restart. */ | ||
447 | goto restart; | ||
448 | } | ||
449 | /* | ||
450 | * Quota now has no users and it has been written on last | ||
451 | * dqput() | ||
452 | */ | ||
427 | remove_dquot_hash(dquot); | 453 | remove_dquot_hash(dquot); |
428 | remove_free_dquot(dquot); | 454 | remove_free_dquot(dquot); |
429 | remove_inuse(dquot); | 455 | remove_inuse(dquot); |
@@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type) | |||
439 | struct quota_info *dqopt = sb_dqopt(sb); | 465 | struct quota_info *dqopt = sb_dqopt(sb); |
440 | int cnt; | 466 | int cnt; |
441 | 467 | ||
442 | down(&dqopt->dqonoff_sem); | 468 | mutex_lock(&dqopt->dqonoff_mutex); |
443 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 469 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
444 | if (type != -1 && cnt != type) | 470 | if (type != -1 && cnt != type) |
445 | continue; | 471 | continue; |
@@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type) | |||
474 | spin_lock(&dq_list_lock); | 500 | spin_lock(&dq_list_lock); |
475 | dqstats.syncs++; | 501 | dqstats.syncs++; |
476 | spin_unlock(&dq_list_lock); | 502 | spin_unlock(&dq_list_lock); |
477 | up(&dqopt->dqonoff_sem); | 503 | mutex_unlock(&dqopt->dqonoff_mutex); |
478 | 504 | ||
479 | return 0; | 505 | return 0; |
480 | } | 506 | } |
@@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | |||
515 | /* | 541 | /* |
516 | * Put reference to dquot | 542 | * Put reference to dquot |
517 | * NOTE: If you change this function please check whether dqput_blocks() works right... | 543 | * NOTE: If you change this function please check whether dqput_blocks() works right... |
518 | * MUST be called with either dqptr_sem or dqonoff_sem held | 544 | * MUST be called with either dqptr_sem or dqonoff_mutex held |
519 | */ | 545 | */ |
520 | static void dqput(struct dquot *dquot) | 546 | static void dqput(struct dquot *dquot) |
521 | { | 547 | { |
@@ -540,6 +566,10 @@ we_slept: | |||
540 | if (atomic_read(&dquot->dq_count) > 1) { | 566 | if (atomic_read(&dquot->dq_count) > 1) { |
541 | /* We have more than one user... nothing to do */ | 567 | /* We have more than one user... nothing to do */ |
542 | atomic_dec(&dquot->dq_count); | 568 | atomic_dec(&dquot->dq_count); |
569 | /* Releasing dquot during quotaoff phase? */ | ||
570 | if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && | ||
571 | atomic_read(&dquot->dq_count) == 1) | ||
572 | wake_up(&dquot->dq_wait_unused); | ||
543 | spin_unlock(&dq_list_lock); | 573 | spin_unlock(&dq_list_lock); |
544 | return; | 574 | return; |
545 | } | 575 | } |
@@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) | |||
576 | return NODQUOT; | 606 | return NODQUOT; |
577 | 607 | ||
578 | memset((caddr_t)dquot, 0, sizeof(struct dquot)); | 608 | memset((caddr_t)dquot, 0, sizeof(struct dquot)); |
579 | sema_init(&dquot->dq_lock, 1); | 609 | mutex_init(&dquot->dq_lock); |
580 | INIT_LIST_HEAD(&dquot->dq_free); | 610 | INIT_LIST_HEAD(&dquot->dq_free); |
581 | INIT_LIST_HEAD(&dquot->dq_inuse); | 611 | INIT_LIST_HEAD(&dquot->dq_inuse); |
582 | INIT_HLIST_NODE(&dquot->dq_hash); | 612 | INIT_HLIST_NODE(&dquot->dq_hash); |
583 | INIT_LIST_HEAD(&dquot->dq_dirty); | 613 | INIT_LIST_HEAD(&dquot->dq_dirty); |
614 | init_waitqueue_head(&dquot->dq_wait_unused); | ||
584 | dquot->dq_sb = sb; | 615 | dquot->dq_sb = sb; |
585 | dquot->dq_type = type; | 616 | dquot->dq_type = type; |
586 | atomic_set(&dquot->dq_count, 1); | 617 | atomic_set(&dquot->dq_count, 1); |
@@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) | |||
590 | 621 | ||
591 | /* | 622 | /* |
592 | * Get reference to dquot | 623 | * Get reference to dquot |
593 | * MUST be called with either dqptr_sem or dqonoff_sem held | 624 | * MUST be called with either dqptr_sem or dqonoff_mutex held |
594 | */ | 625 | */ |
595 | static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | 626 | static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) |
596 | { | 627 | { |
@@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type) | |||
656 | return 0; | 687 | return 0; |
657 | } | 688 | } |
658 | 689 | ||
659 | /* This routine is guarded by dqonoff_sem semaphore */ | 690 | /* This routine is guarded by dqonoff_mutex mutex */ |
660 | static void add_dquot_ref(struct super_block *sb, int type) | 691 | static void add_dquot_ref(struct super_block *sb, int type) |
661 | { | 692 | { |
662 | struct list_head *p; | 693 | struct list_head *p; |
@@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type) | |||
732 | { | 763 | { |
733 | LIST_HEAD(tofree_head); | 764 | LIST_HEAD(tofree_head); |
734 | 765 | ||
735 | /* We need to be guarded against prune_icache to reach all the | ||
736 | * inodes - otherwise some can be on the local list of prune_icache */ | ||
737 | down(&iprune_sem); | ||
738 | down_write(&sb_dqopt(sb)->dqptr_sem); | 766 | down_write(&sb_dqopt(sb)->dqptr_sem); |
739 | remove_dquot_ref(sb, type, &tofree_head); | 767 | remove_dquot_ref(sb, type, &tofree_head); |
740 | up_write(&sb_dqopt(sb)->dqptr_sem); | 768 | up_write(&sb_dqopt(sb)->dqptr_sem); |
741 | up(&iprune_sem); | ||
742 | put_dquot_list(&tofree_head); | 769 | put_dquot_list(&tofree_head); |
743 | } | 770 | } |
744 | 771 | ||
@@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type) | |||
938 | unsigned int id = 0; | 965 | unsigned int id = 0; |
939 | int cnt, ret = 0; | 966 | int cnt, ret = 0; |
940 | 967 | ||
941 | /* First test before acquiring semaphore - solves deadlocks when we | 968 | /* First test before acquiring mutex - solves deadlocks when we |
942 | * re-enter the quota code and are already holding the semaphore */ | 969 | * re-enter the quota code and are already holding the mutex */ |
943 | if (IS_NOQUOTA(inode)) | 970 | if (IS_NOQUOTA(inode)) |
944 | return 0; | 971 | return 0; |
945 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 972 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
@@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | |||
1002 | int cnt, ret = NO_QUOTA; | 1029 | int cnt, ret = NO_QUOTA; |
1003 | char warntype[MAXQUOTAS]; | 1030 | char warntype[MAXQUOTAS]; |
1004 | 1031 | ||
1005 | /* First test before acquiring semaphore - solves deadlocks when we | 1032 | /* First test before acquiring mutex - solves deadlocks when we |
1006 | * re-enter the quota code and are already holding the semaphore */ | 1033 | * re-enter the quota code and are already holding the mutex */ |
1007 | if (IS_NOQUOTA(inode)) { | 1034 | if (IS_NOQUOTA(inode)) { |
1008 | out_add: | 1035 | out_add: |
1009 | inode_add_bytes(inode, number); | 1036 | inode_add_bytes(inode, number); |
@@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number) | |||
1051 | int cnt, ret = NO_QUOTA; | 1078 | int cnt, ret = NO_QUOTA; |
1052 | char warntype[MAXQUOTAS]; | 1079 | char warntype[MAXQUOTAS]; |
1053 | 1080 | ||
1054 | /* First test before acquiring semaphore - solves deadlocks when we | 1081 | /* First test before acquiring mutex - solves deadlocks when we |
1055 | * re-enter the quota code and are already holding the semaphore */ | 1082 | * re-enter the quota code and are already holding the mutex */ |
1056 | if (IS_NOQUOTA(inode)) | 1083 | if (IS_NOQUOTA(inode)) |
1057 | return QUOTA_OK; | 1084 | return QUOTA_OK; |
1058 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1085 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
@@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number) | |||
1095 | { | 1122 | { |
1096 | unsigned int cnt; | 1123 | unsigned int cnt; |
1097 | 1124 | ||
1098 | /* First test before acquiring semaphore - solves deadlocks when we | 1125 | /* First test before acquiring mutex - solves deadlocks when we |
1099 | * re-enter the quota code and are already holding the semaphore */ | 1126 | * re-enter the quota code and are already holding the mutex */ |
1100 | if (IS_NOQUOTA(inode)) { | 1127 | if (IS_NOQUOTA(inode)) { |
1101 | out_sub: | 1128 | out_sub: |
1102 | inode_sub_bytes(inode, number); | 1129 | inode_sub_bytes(inode, number); |
@@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number) | |||
1131 | { | 1158 | { |
1132 | unsigned int cnt; | 1159 | unsigned int cnt; |
1133 | 1160 | ||
1134 | /* First test before acquiring semaphore - solves deadlocks when we | 1161 | /* First test before acquiring mutex - solves deadlocks when we |
1135 | * re-enter the quota code and are already holding the semaphore */ | 1162 | * re-enter the quota code and are already holding the mutex */ |
1136 | if (IS_NOQUOTA(inode)) | 1163 | if (IS_NOQUOTA(inode)) |
1137 | return QUOTA_OK; | 1164 | return QUOTA_OK; |
1138 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1165 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
@@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
1171 | chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; | 1198 | chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; |
1172 | char warntype[MAXQUOTAS]; | 1199 | char warntype[MAXQUOTAS]; |
1173 | 1200 | ||
1174 | /* First test before acquiring semaphore - solves deadlocks when we | 1201 | /* First test before acquiring mutex - solves deadlocks when we |
1175 | * re-enter the quota code and are already holding the semaphore */ | 1202 | * re-enter the quota code and are already holding the mutex */ |
1176 | if (IS_NOQUOTA(inode)) | 1203 | if (IS_NOQUOTA(inode)) |
1177 | return QUOTA_OK; | 1204 | return QUOTA_OK; |
1178 | /* Clear the arrays */ | 1205 | /* Clear the arrays */ |
@@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type) | |||
1266 | int ret; | 1293 | int ret; |
1267 | struct quota_info *dqopt = sb_dqopt(sb); | 1294 | struct quota_info *dqopt = sb_dqopt(sb); |
1268 | 1295 | ||
1269 | down(&dqopt->dqio_sem); | 1296 | mutex_lock(&dqopt->dqio_mutex); |
1270 | ret = dqopt->ops[type]->write_file_info(sb, type); | 1297 | ret = dqopt->ops[type]->write_file_info(sb, type); |
1271 | up(&dqopt->dqio_sem); | 1298 | mutex_unlock(&dqopt->dqio_mutex); |
1272 | return ret; | 1299 | return ret; |
1273 | } | 1300 | } |
1274 | 1301 | ||
@@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1324 | struct inode *toputinode[MAXQUOTAS]; | 1351 | struct inode *toputinode[MAXQUOTAS]; |
1325 | 1352 | ||
1326 | /* We need to serialize quota_off() for device */ | 1353 | /* We need to serialize quota_off() for device */ |
1327 | down(&dqopt->dqonoff_sem); | 1354 | mutex_lock(&dqopt->dqonoff_mutex); |
1328 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1355 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
1329 | toputinode[cnt] = NULL; | 1356 | toputinode[cnt] = NULL; |
1330 | if (type != -1 && cnt != type) | 1357 | if (type != -1 && cnt != type) |
@@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1353 | dqopt->info[cnt].dqi_bgrace = 0; | 1380 | dqopt->info[cnt].dqi_bgrace = 0; |
1354 | dqopt->ops[cnt] = NULL; | 1381 | dqopt->ops[cnt] = NULL; |
1355 | } | 1382 | } |
1356 | up(&dqopt->dqonoff_sem); | 1383 | mutex_unlock(&dqopt->dqonoff_mutex); |
1357 | /* Sync the superblock so that buffers with quota data are written to | 1384 | /* Sync the superblock so that buffers with quota data are written to |
1358 | * disk (and so userspace sees correct data afterwards). */ | 1385 | * disk (and so userspace sees correct data afterwards). */ |
1359 | if (sb->s_op->sync_fs) | 1386 | if (sb->s_op->sync_fs) |
@@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1366 | * changes done by userspace on the next quotaon() */ | 1393 | * changes done by userspace on the next quotaon() */ |
1367 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1394 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
1368 | if (toputinode[cnt]) { | 1395 | if (toputinode[cnt]) { |
1369 | down(&dqopt->dqonoff_sem); | 1396 | mutex_lock(&dqopt->dqonoff_mutex); |
1370 | /* If quota was reenabled in the meantime, we have | 1397 | /* If quota was reenabled in the meantime, we have |
1371 | * nothing to do */ | 1398 | * nothing to do */ |
1372 | if (!sb_has_quota_enabled(sb, cnt)) { | 1399 | if (!sb_has_quota_enabled(sb, cnt)) { |
@@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1378 | mark_inode_dirty(toputinode[cnt]); | 1405 | mark_inode_dirty(toputinode[cnt]); |
1379 | iput(toputinode[cnt]); | 1406 | iput(toputinode[cnt]); |
1380 | } | 1407 | } |
1381 | up(&dqopt->dqonoff_sem); | 1408 | mutex_unlock(&dqopt->dqonoff_mutex); |
1382 | } | 1409 | } |
1383 | if (sb->s_bdev) | 1410 | if (sb->s_bdev) |
1384 | invalidate_bdev(sb->s_bdev, 0); | 1411 | invalidate_bdev(sb->s_bdev, 0); |
@@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) | |||
1419 | /* And now flush the block cache so that kernel sees the changes */ | 1446 | /* And now flush the block cache so that kernel sees the changes */ |
1420 | invalidate_bdev(sb->s_bdev, 0); | 1447 | invalidate_bdev(sb->s_bdev, 0); |
1421 | mutex_lock(&inode->i_mutex); | 1448 | mutex_lock(&inode->i_mutex); |
1422 | down(&dqopt->dqonoff_sem); | 1449 | mutex_lock(&dqopt->dqonoff_mutex); |
1423 | if (sb_has_quota_enabled(sb, type)) { | 1450 | if (sb_has_quota_enabled(sb, type)) { |
1424 | error = -EBUSY; | 1451 | error = -EBUSY; |
1425 | goto out_lock; | 1452 | goto out_lock; |
@@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) | |||
1444 | dqopt->ops[type] = fmt->qf_ops; | 1471 | dqopt->ops[type] = fmt->qf_ops; |
1445 | dqopt->info[type].dqi_format = fmt; | 1472 | dqopt->info[type].dqi_format = fmt; |
1446 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); | 1473 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); |
1447 | down(&dqopt->dqio_sem); | 1474 | mutex_lock(&dqopt->dqio_mutex); |
1448 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { | 1475 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { |
1449 | up(&dqopt->dqio_sem); | 1476 | mutex_unlock(&dqopt->dqio_mutex); |
1450 | goto out_file_init; | 1477 | goto out_file_init; |
1451 | } | 1478 | } |
1452 | up(&dqopt->dqio_sem); | 1479 | mutex_unlock(&dqopt->dqio_mutex); |
1453 | mutex_unlock(&inode->i_mutex); | 1480 | mutex_unlock(&inode->i_mutex); |
1454 | set_enable_flags(dqopt, type); | 1481 | set_enable_flags(dqopt, type); |
1455 | 1482 | ||
1456 | add_dquot_ref(sb, type); | 1483 | add_dquot_ref(sb, type); |
1457 | up(&dqopt->dqonoff_sem); | 1484 | mutex_unlock(&dqopt->dqonoff_mutex); |
1458 | 1485 | ||
1459 | return 0; | 1486 | return 0; |
1460 | 1487 | ||
@@ -1462,7 +1489,7 @@ out_file_init: | |||
1462 | dqopt->files[type] = NULL; | 1489 | dqopt->files[type] = NULL; |
1463 | iput(inode); | 1490 | iput(inode); |
1464 | out_lock: | 1491 | out_lock: |
1465 | up(&dqopt->dqonoff_sem); | 1492 | mutex_unlock(&dqopt->dqonoff_mutex); |
1466 | if (oldflags != -1) { | 1493 | if (oldflags != -1) { |
1467 | down_write(&dqopt->dqptr_sem); | 1494 | down_write(&dqopt->dqptr_sem); |
1468 | /* Set the flags back (in the case of accidental quotaon() | 1495 | /* Set the flags back (in the case of accidental quotaon() |
@@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
1550 | { | 1577 | { |
1551 | struct dquot *dquot; | 1578 | struct dquot *dquot; |
1552 | 1579 | ||
1553 | down(&sb_dqopt(sb)->dqonoff_sem); | 1580 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1554 | if (!(dquot = dqget(sb, id, type))) { | 1581 | if (!(dquot = dqget(sb, id, type))) { |
1555 | up(&sb_dqopt(sb)->dqonoff_sem); | 1582 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1556 | return -ESRCH; | 1583 | return -ESRCH; |
1557 | } | 1584 | } |
1558 | do_get_dqblk(dquot, di); | 1585 | do_get_dqblk(dquot, di); |
1559 | dqput(dquot); | 1586 | dqput(dquot); |
1560 | up(&sb_dqopt(sb)->dqonoff_sem); | 1587 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1561 | return 0; | 1588 | return 0; |
1562 | } | 1589 | } |
1563 | 1590 | ||
@@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d | |||
1619 | { | 1646 | { |
1620 | struct dquot *dquot; | 1647 | struct dquot *dquot; |
1621 | 1648 | ||
1622 | down(&sb_dqopt(sb)->dqonoff_sem); | 1649 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1623 | if (!(dquot = dqget(sb, id, type))) { | 1650 | if (!(dquot = dqget(sb, id, type))) { |
1624 | up(&sb_dqopt(sb)->dqonoff_sem); | 1651 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1625 | return -ESRCH; | 1652 | return -ESRCH; |
1626 | } | 1653 | } |
1627 | do_set_dqblk(dquot, di); | 1654 | do_set_dqblk(dquot, di); |
1628 | dqput(dquot); | 1655 | dqput(dquot); |
1629 | up(&sb_dqopt(sb)->dqonoff_sem); | 1656 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1630 | return 0; | 1657 | return 0; |
1631 | } | 1658 | } |
1632 | 1659 | ||
@@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1635 | { | 1662 | { |
1636 | struct mem_dqinfo *mi; | 1663 | struct mem_dqinfo *mi; |
1637 | 1664 | ||
1638 | down(&sb_dqopt(sb)->dqonoff_sem); | 1665 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1639 | if (!sb_has_quota_enabled(sb, type)) { | 1666 | if (!sb_has_quota_enabled(sb, type)) { |
1640 | up(&sb_dqopt(sb)->dqonoff_sem); | 1667 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1641 | return -ESRCH; | 1668 | return -ESRCH; |
1642 | } | 1669 | } |
1643 | mi = sb_dqopt(sb)->info + type; | 1670 | mi = sb_dqopt(sb)->info + type; |
@@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1647 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | 1674 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; |
1648 | ii->dqi_valid = IIF_ALL; | 1675 | ii->dqi_valid = IIF_ALL; |
1649 | spin_unlock(&dq_data_lock); | 1676 | spin_unlock(&dq_data_lock); |
1650 | up(&sb_dqopt(sb)->dqonoff_sem); | 1677 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1651 | return 0; | 1678 | return 0; |
1652 | } | 1679 | } |
1653 | 1680 | ||
@@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1656 | { | 1683 | { |
1657 | struct mem_dqinfo *mi; | 1684 | struct mem_dqinfo *mi; |
1658 | 1685 | ||
1659 | down(&sb_dqopt(sb)->dqonoff_sem); | 1686 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
1660 | if (!sb_has_quota_enabled(sb, type)) { | 1687 | if (!sb_has_quota_enabled(sb, type)) { |
1661 | up(&sb_dqopt(sb)->dqonoff_sem); | 1688 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1662 | return -ESRCH; | 1689 | return -ESRCH; |
1663 | } | 1690 | } |
1664 | mi = sb_dqopt(sb)->info + type; | 1691 | mi = sb_dqopt(sb)->info + type; |
@@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |||
1673 | mark_info_dirty(sb, type); | 1700 | mark_info_dirty(sb, type); |
1674 | /* Force write to disk */ | 1701 | /* Force write to disk */ |
1675 | sb->dq_op->write_info(sb, type); | 1702 | sb->dq_op->write_info(sb, type); |
1676 | up(&sb_dqopt(sb)->dqonoff_sem); | 1703 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
1677 | return 0; | 1704 | return 0; |
1678 | } | 1705 | } |
1679 | 1706 | ||