diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-12-06 16:58:18 -0500 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2011-12-14 17:32:21 -0500 |
commit | 92678554abfc2a2f2727ad168da87d8d434ac904 (patch) | |
tree | 082b395f587dca7903c083e8e10529be1890dc4e /fs/xfs | |
parent | be7ffc38a80a78e6b68d0f51fae8e8d57b55324c (diff) |
xfs: flatten the dquot lock ordering
Introduce a new XFS_DQ_FREEING flag that tells lookup and mplist walks
to skip a dquot that is beeing freed, and use this avoid the trylock
on the hash and mplist locks in xfs_qm_dqreclaim_one. Also simplify
xfs_dqpurge by moving the inodes to a dispose list after marking them
XFS_DQ_FREEING and avoid the locker ordering constraints.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_dquot.c | 113 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.c | 134 | ||||
-rw-r--r-- | fs/xfs/xfs_quota.h | 4 |
4 files changed, 103 insertions, 150 deletions
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 35d2b8aad0f9..d06d2a61e31b 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -728,6 +728,12 @@ xfs_qm_dqlookup( | |||
728 | trace_xfs_dqlookup_found(dqp); | 728 | trace_xfs_dqlookup_found(dqp); |
729 | 729 | ||
730 | xfs_dqlock(dqp); | 730 | xfs_dqlock(dqp); |
731 | if (dqp->dq_flags & XFS_DQ_FREEING) { | ||
732 | *O_dqpp = NULL; | ||
733 | xfs_dqunlock(dqp); | ||
734 | return -1; | ||
735 | } | ||
736 | |||
731 | XFS_DQHOLD(dqp); | 737 | XFS_DQHOLD(dqp); |
732 | 738 | ||
733 | /* | 739 | /* |
@@ -781,11 +787,7 @@ xfs_qm_dqget( | |||
781 | return (EIO); | 787 | return (EIO); |
782 | } | 788 | } |
783 | } | 789 | } |
784 | #endif | ||
785 | 790 | ||
786 | again: | ||
787 | |||
788 | #ifdef DEBUG | ||
789 | ASSERT(type == XFS_DQ_USER || | 791 | ASSERT(type == XFS_DQ_USER || |
790 | type == XFS_DQ_PROJ || | 792 | type == XFS_DQ_PROJ || |
791 | type == XFS_DQ_GROUP); | 793 | type == XFS_DQ_GROUP); |
@@ -797,13 +799,21 @@ xfs_qm_dqget( | |||
797 | ASSERT(ip->i_gdquot == NULL); | 799 | ASSERT(ip->i_gdquot == NULL); |
798 | } | 800 | } |
799 | #endif | 801 | #endif |
802 | |||
803 | restart: | ||
800 | mutex_lock(&h->qh_lock); | 804 | mutex_lock(&h->qh_lock); |
801 | 805 | ||
802 | /* | 806 | /* |
803 | * Look in the cache (hashtable). | 807 | * Look in the cache (hashtable). |
804 | * The chain is kept locked during lookup. | 808 | * The chain is kept locked during lookup. |
805 | */ | 809 | */ |
806 | if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { | 810 | switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) { |
811 | case -1: | ||
812 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); | ||
813 | mutex_unlock(&h->qh_lock); | ||
814 | delay(1); | ||
815 | goto restart; | ||
816 | case 0: | ||
807 | XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); | 817 | XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); |
808 | /* | 818 | /* |
809 | * The dquot was found, moved to the front of the chain, | 819 | * The dquot was found, moved to the front of the chain, |
@@ -814,9 +824,11 @@ xfs_qm_dqget( | |||
814 | ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); | 824 | ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); |
815 | mutex_unlock(&h->qh_lock); | 825 | mutex_unlock(&h->qh_lock); |
816 | trace_xfs_dqget_hit(*O_dqpp); | 826 | trace_xfs_dqget_hit(*O_dqpp); |
817 | return (0); /* success */ | 827 | return 0; /* success */ |
828 | default: | ||
829 | XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); | ||
830 | break; | ||
818 | } | 831 | } |
819 | XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); | ||
820 | 832 | ||
821 | /* | 833 | /* |
822 | * Dquot cache miss. We don't want to keep the inode lock across | 834 | * Dquot cache miss. We don't want to keep the inode lock across |
@@ -913,16 +925,21 @@ xfs_qm_dqget( | |||
913 | * lock order between the two dquots here since dqp isn't | 925 | * lock order between the two dquots here since dqp isn't |
914 | * on any findable lists yet. | 926 | * on any findable lists yet. |
915 | */ | 927 | */ |
916 | if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { | 928 | switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) { |
929 | case 0: | ||
930 | case -1: | ||
917 | /* | 931 | /* |
918 | * Duplicate found. Just throw away the new dquot | 932 | * Duplicate found, either in cache or on its way out. |
919 | * and start over. | 933 | * Just throw away the new dquot and start over. |
920 | */ | 934 | */ |
921 | xfs_qm_dqput(tmpdqp); | 935 | if (tmpdqp) |
936 | xfs_qm_dqput(tmpdqp); | ||
922 | mutex_unlock(&h->qh_lock); | 937 | mutex_unlock(&h->qh_lock); |
923 | xfs_qm_dqdestroy(dqp); | 938 | xfs_qm_dqdestroy(dqp); |
924 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); | 939 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); |
925 | goto again; | 940 | goto restart; |
941 | default: | ||
942 | break; | ||
926 | } | 943 | } |
927 | } | 944 | } |
928 | 945 | ||
@@ -1250,51 +1267,18 @@ xfs_dqlock2( | |||
1250 | } | 1267 | } |
1251 | } | 1268 | } |
1252 | 1269 | ||
1253 | |||
1254 | /* | 1270 | /* |
1255 | * Take a dquot out of the mount's dqlist as well as the hashlist. | 1271 | * Take a dquot out of the mount's dqlist as well as the hashlist. This is |
1256 | * This is called via unmount as well as quotaoff, and the purge | 1272 | * called via unmount as well as quotaoff, and the purge will always succeed. |
1257 | * will always succeed unless there are soft (temp) references | ||
1258 | * outstanding. | ||
1259 | * | ||
1260 | * This returns 0 if it was purged, 1 if it wasn't. It's not an error code | ||
1261 | * that we're returning! XXXsup - not cool. | ||
1262 | */ | 1273 | */ |
1263 | /* ARGSUSED */ | 1274 | void |
1264 | int | ||
1265 | xfs_qm_dqpurge( | 1275 | xfs_qm_dqpurge( |
1266 | xfs_dquot_t *dqp) | 1276 | struct xfs_dquot *dqp) |
1267 | { | 1277 | { |
1268 | xfs_dqhash_t *qh = dqp->q_hash; | 1278 | struct xfs_mount *mp = dqp->q_mount; |
1269 | xfs_mount_t *mp = dqp->q_mount; | 1279 | struct xfs_dqhash *qh = dqp->q_hash; |
1270 | |||
1271 | ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock)); | ||
1272 | ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock)); | ||
1273 | |||
1274 | /* | ||
1275 | * XXX(hch): horrible locking order, will get cleaned up ASAP. | ||
1276 | */ | ||
1277 | if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { | ||
1278 | mutex_unlock(&dqp->q_hash->qh_lock); | ||
1279 | return 1; | ||
1280 | } | ||
1281 | 1280 | ||
1282 | xfs_dqlock(dqp); | 1281 | xfs_dqlock(dqp); |
1283 | /* | ||
1284 | * We really can't afford to purge a dquot that is | ||
1285 | * referenced, because these are hard refs. | ||
1286 | * It shouldn't happen in general because we went thru _all_ inodes in | ||
1287 | * dqrele_all_inodes before calling this and didn't let the mountlock go. | ||
1288 | * However it is possible that we have dquots with temporary | ||
1289 | * references that are not attached to an inode. e.g. see xfs_setattr(). | ||
1290 | */ | ||
1291 | if (dqp->q_nrefs != 0) { | ||
1292 | xfs_dqunlock(dqp); | ||
1293 | mutex_unlock(&dqp->q_hash->qh_lock); | ||
1294 | return (1); | ||
1295 | } | ||
1296 | |||
1297 | ASSERT(!list_empty(&dqp->q_freelist)); | ||
1298 | 1282 | ||
1299 | /* | 1283 | /* |
1300 | * If we're turning off quotas, we have to make sure that, for | 1284 | * If we're turning off quotas, we have to make sure that, for |
@@ -1313,19 +1297,14 @@ xfs_qm_dqpurge( | |||
1313 | } | 1297 | } |
1314 | 1298 | ||
1315 | /* | 1299 | /* |
1316 | * XXXIf we're turning this type of quotas off, we don't care | 1300 | * If we are turning this type of quotas off, we don't care |
1317 | * about the dirty metadata sitting in this dquot. OTOH, if | 1301 | * about the dirty metadata sitting in this dquot. OTOH, if |
1318 | * we're unmounting, we do care, so we flush it and wait. | 1302 | * we're unmounting, we do care, so we flush it and wait. |
1319 | */ | 1303 | */ |
1320 | if (XFS_DQ_IS_DIRTY(dqp)) { | 1304 | if (XFS_DQ_IS_DIRTY(dqp)) { |
1321 | int error; | 1305 | int error; |
1322 | 1306 | ||
1323 | /* dqflush unlocks dqflock */ | ||
1324 | /* | 1307 | /* |
1325 | * Given that dqpurge is a very rare occurrence, it is OK | ||
1326 | * that we're holding the hashlist and mplist locks | ||
1327 | * across the disk write. But, ... XXXsup | ||
1328 | * | ||
1329 | * We don't care about getting disk errors here. We need | 1308 | * We don't care about getting disk errors here. We need |
1330 | * to purge this dquot anyway, so we go ahead regardless. | 1309 | * to purge this dquot anyway, so we go ahead regardless. |
1331 | */ | 1310 | */ |
@@ -1335,28 +1314,36 @@ xfs_qm_dqpurge( | |||
1335 | __func__, dqp); | 1314 | __func__, dqp); |
1336 | xfs_dqflock(dqp); | 1315 | xfs_dqflock(dqp); |
1337 | } | 1316 | } |
1317 | |||
1338 | ASSERT(atomic_read(&dqp->q_pincount) == 0); | 1318 | ASSERT(atomic_read(&dqp->q_pincount) == 0); |
1339 | ASSERT(XFS_FORCED_SHUTDOWN(mp) || | 1319 | ASSERT(XFS_FORCED_SHUTDOWN(mp) || |
1340 | !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); | 1320 | !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); |
1341 | 1321 | ||
1322 | xfs_dqfunlock(dqp); | ||
1323 | xfs_dqunlock(dqp); | ||
1324 | |||
1325 | mutex_lock(&qh->qh_lock); | ||
1342 | list_del_init(&dqp->q_hashlist); | 1326 | list_del_init(&dqp->q_hashlist); |
1343 | qh->qh_version++; | 1327 | qh->qh_version++; |
1328 | mutex_unlock(&qh->qh_lock); | ||
1344 | 1329 | ||
1330 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); | ||
1345 | list_del_init(&dqp->q_mplist); | 1331 | list_del_init(&dqp->q_mplist); |
1346 | mp->m_quotainfo->qi_dqreclaims++; | 1332 | mp->m_quotainfo->qi_dqreclaims++; |
1347 | mp->m_quotainfo->qi_dquots--; | 1333 | mp->m_quotainfo->qi_dquots--; |
1334 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | ||
1348 | 1335 | ||
1336 | /* | ||
1337 | * We move dquots to the freelist as soon as their reference count | ||
1338 | * hits zero, so it really should be on the freelist here. | ||
1339 | */ | ||
1340 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | ||
1341 | ASSERT(!list_empty(&dqp->q_freelist)); | ||
1349 | list_del_init(&dqp->q_freelist); | 1342 | list_del_init(&dqp->q_freelist); |
1350 | xfs_Gqm->qm_dqfrlist_cnt--; | 1343 | xfs_Gqm->qm_dqfrlist_cnt--; |
1351 | |||
1352 | xfs_dqfunlock(dqp); | ||
1353 | xfs_dqunlock(dqp); | ||
1354 | |||
1355 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1344 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
1356 | mutex_unlock(&qh->qh_lock); | ||
1357 | 1345 | ||
1358 | xfs_qm_dqdestroy(dqp); | 1346 | xfs_qm_dqdestroy(dqp); |
1359 | return 0; | ||
1360 | } | 1347 | } |
1361 | 1348 | ||
1362 | /* | 1349 | /* |
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 0b5d2ae92028..98488dfe442f 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h | |||
@@ -133,7 +133,7 @@ static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) | |||
133 | 133 | ||
134 | extern void xfs_qm_dqdestroy(xfs_dquot_t *); | 134 | extern void xfs_qm_dqdestroy(xfs_dquot_t *); |
135 | extern int xfs_qm_dqflush(xfs_dquot_t *, uint); | 135 | extern int xfs_qm_dqflush(xfs_dquot_t *, uint); |
136 | extern int xfs_qm_dqpurge(xfs_dquot_t *); | 136 | extern void xfs_qm_dqpurge(xfs_dquot_t *); |
137 | extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); | 137 | extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); |
138 | extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, | 138 | extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, |
139 | xfs_disk_dquot_t *); | 139 | xfs_disk_dquot_t *); |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 6a0c4f0d9306..f418731e90f4 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -398,7 +398,8 @@ again: | |||
398 | mutex_lock(&q->qi_dqlist_lock); | 398 | mutex_lock(&q->qi_dqlist_lock); |
399 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { | 399 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { |
400 | xfs_dqlock(dqp); | 400 | xfs_dqlock(dqp); |
401 | if (! XFS_DQ_IS_DIRTY(dqp)) { | 401 | if ((dqp->dq_flags & XFS_DQ_FREEING) || |
402 | !XFS_DQ_IS_DIRTY(dqp)) { | ||
402 | xfs_dqunlock(dqp); | 403 | xfs_dqunlock(dqp); |
403 | continue; | 404 | continue; |
404 | } | 405 | } |
@@ -437,6 +438,7 @@ again: | |||
437 | /* return ! busy */ | 438 | /* return ! busy */ |
438 | return 0; | 439 | return 0; |
439 | } | 440 | } |
441 | |||
440 | /* | 442 | /* |
441 | * Release the group dquot pointers the user dquots may be | 443 | * Release the group dquot pointers the user dquots may be |
442 | * carrying around as a hint. mplist is locked on entry and exit. | 444 | * carrying around as a hint. mplist is locked on entry and exit. |
@@ -453,6 +455,13 @@ xfs_qm_detach_gdquots( | |||
453 | ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); | 455 | ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); |
454 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { | 456 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { |
455 | xfs_dqlock(dqp); | 457 | xfs_dqlock(dqp); |
458 | if (dqp->dq_flags & XFS_DQ_FREEING) { | ||
459 | xfs_dqunlock(dqp); | ||
460 | mutex_unlock(&q->qi_dqlist_lock); | ||
461 | delay(1); | ||
462 | mutex_lock(&q->qi_dqlist_lock); | ||
463 | goto again; | ||
464 | } | ||
456 | if ((gdqp = dqp->q_gdquot)) { | 465 | if ((gdqp = dqp->q_gdquot)) { |
457 | xfs_dqlock(gdqp); | 466 | xfs_dqlock(gdqp); |
458 | dqp->q_gdquot = NULL; | 467 | dqp->q_gdquot = NULL; |
@@ -489,8 +498,8 @@ xfs_qm_dqpurge_int( | |||
489 | struct xfs_quotainfo *q = mp->m_quotainfo; | 498 | struct xfs_quotainfo *q = mp->m_quotainfo; |
490 | struct xfs_dquot *dqp, *n; | 499 | struct xfs_dquot *dqp, *n; |
491 | uint dqtype; | 500 | uint dqtype; |
492 | int nrecl; | 501 | int nmisses = 0; |
493 | int nmisses; | 502 | LIST_HEAD (dispose_list); |
494 | 503 | ||
495 | if (!q) | 504 | if (!q) |
496 | return 0; | 505 | return 0; |
@@ -509,46 +518,26 @@ xfs_qm_dqpurge_int( | |||
509 | */ | 518 | */ |
510 | xfs_qm_detach_gdquots(mp); | 519 | xfs_qm_detach_gdquots(mp); |
511 | 520 | ||
512 | again: | ||
513 | nmisses = 0; | ||
514 | ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); | ||
515 | /* | 521 | /* |
516 | * Try to get rid of all of the unwanted dquots. The idea is to | 522 | * Try to get rid of all of the unwanted dquots. |
517 | * get them off mplist and hashlist, but leave them on freelist. | ||
518 | */ | 523 | */ |
519 | list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { | 524 | list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { |
520 | xfs_dqlock(dqp); | 525 | xfs_dqlock(dqp); |
521 | if ((dqp->dq_flags & dqtype) == 0) { | 526 | if ((dqp->dq_flags & dqtype) != 0 && |
522 | xfs_dqunlock(dqp); | 527 | !(dqp->dq_flags & XFS_DQ_FREEING)) { |
523 | continue; | 528 | if (dqp->q_nrefs == 0) { |
529 | dqp->dq_flags |= XFS_DQ_FREEING; | ||
530 | list_move_tail(&dqp->q_mplist, &dispose_list); | ||
531 | } else | ||
532 | nmisses++; | ||
524 | } | 533 | } |
525 | xfs_dqunlock(dqp); | 534 | xfs_dqunlock(dqp); |
526 | |||
527 | if (!mutex_trylock(&dqp->q_hash->qh_lock)) { | ||
528 | nrecl = q->qi_dqreclaims; | ||
529 | mutex_unlock(&q->qi_dqlist_lock); | ||
530 | mutex_lock(&dqp->q_hash->qh_lock); | ||
531 | mutex_lock(&q->qi_dqlist_lock); | ||
532 | |||
533 | /* | ||
534 | * XXXTheoretically, we can get into a very long | ||
535 | * ping pong game here. | ||
536 | * No one can be adding dquots to the mplist at | ||
537 | * this point, but somebody might be taking things off. | ||
538 | */ | ||
539 | if (nrecl != q->qi_dqreclaims) { | ||
540 | mutex_unlock(&dqp->q_hash->qh_lock); | ||
541 | goto again; | ||
542 | } | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Take the dquot off the mplist and hashlist. It may remain on | ||
547 | * freelist in INACTIVE state. | ||
548 | */ | ||
549 | nmisses += xfs_qm_dqpurge(dqp); | ||
550 | } | 535 | } |
551 | mutex_unlock(&q->qi_dqlist_lock); | 536 | mutex_unlock(&q->qi_dqlist_lock); |
537 | |||
538 | list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist) | ||
539 | xfs_qm_dqpurge(dqp); | ||
540 | |||
552 | return nmisses; | 541 | return nmisses; |
553 | } | 542 | } |
554 | 543 | ||
@@ -1667,25 +1656,16 @@ xfs_qm_init_quotainos( | |||
1667 | 1656 | ||
1668 | 1657 | ||
1669 | /* | 1658 | /* |
1670 | * Just pop the least recently used dquot off the freelist and | 1659 | * Pop the least recently used dquot off the freelist and recycle it. |
1671 | * recycle it. The returned dquot is locked. | ||
1672 | */ | 1660 | */ |
1673 | STATIC xfs_dquot_t * | 1661 | STATIC struct xfs_dquot * |
1674 | xfs_qm_dqreclaim_one(void) | 1662 | xfs_qm_dqreclaim_one(void) |
1675 | { | 1663 | { |
1676 | xfs_dquot_t *dqpout; | 1664 | struct xfs_dquot *dqp; |
1677 | xfs_dquot_t *dqp; | 1665 | int restarts = 0; |
1678 | int restarts; | ||
1679 | int startagain; | ||
1680 | |||
1681 | restarts = 0; | ||
1682 | dqpout = NULL; | ||
1683 | 1666 | ||
1684 | /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ | ||
1685 | again: | ||
1686 | startagain = 0; | ||
1687 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 1667 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); |
1688 | 1668 | restart: | |
1689 | list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { | 1669 | list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { |
1690 | struct xfs_mount *mp = dqp->q_mount; | 1670 | struct xfs_mount *mp = dqp->q_mount; |
1691 | xfs_dqlock(dqp); | 1671 | xfs_dqlock(dqp); |
@@ -1701,7 +1681,6 @@ again: | |||
1701 | list_del_init(&dqp->q_freelist); | 1681 | list_del_init(&dqp->q_freelist); |
1702 | xfs_Gqm->qm_dqfrlist_cnt--; | 1682 | xfs_Gqm->qm_dqfrlist_cnt--; |
1703 | restarts++; | 1683 | restarts++; |
1704 | startagain = 1; | ||
1705 | goto dqunlock; | 1684 | goto dqunlock; |
1706 | } | 1685 | } |
1707 | 1686 | ||
@@ -1737,57 +1716,42 @@ again: | |||
1737 | } | 1716 | } |
1738 | goto dqunlock; | 1717 | goto dqunlock; |
1739 | } | 1718 | } |
1719 | xfs_dqfunlock(dqp); | ||
1740 | 1720 | ||
1741 | /* | 1721 | /* |
1742 | * We're trying to get the hashlock out of order. This races | 1722 | * Prevent lookup now that we are going to reclaim the dquot. |
1743 | * with dqlookup; so, we giveup and goto the next dquot if | 1723 | * Once XFS_DQ_FREEING is set lookup won't touch the dquot, |
1744 | * we couldn't get the hashlock. This way, we won't starve | 1724 | * thus we can drop the lock now. |
1745 | * a dqlookup process that holds the hashlock that is | ||
1746 | * waiting for the freelist lock. | ||
1747 | */ | 1725 | */ |
1748 | if (!mutex_trylock(&dqp->q_hash->qh_lock)) { | 1726 | dqp->dq_flags |= XFS_DQ_FREEING; |
1749 | restarts++; | 1727 | xfs_dqunlock(dqp); |
1750 | goto dqfunlock; | ||
1751 | } | ||
1752 | 1728 | ||
1753 | /* | 1729 | mutex_lock(&dqp->q_hash->qh_lock); |
1754 | * This races with dquot allocation code as well as dqflush_all | 1730 | list_del_init(&dqp->q_hashlist); |
1755 | * and reclaim code. So, if we failed to grab the mplist lock, | 1731 | dqp->q_hash->qh_version++; |
1756 | * giveup everything and start over. | 1732 | mutex_unlock(&dqp->q_hash->qh_lock); |
1757 | */ | ||
1758 | if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) { | ||
1759 | restarts++; | ||
1760 | startagain = 1; | ||
1761 | goto qhunlock; | ||
1762 | } | ||
1763 | 1733 | ||
1764 | ASSERT(dqp->q_nrefs == 0); | 1734 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); |
1765 | list_del_init(&dqp->q_mplist); | 1735 | list_del_init(&dqp->q_mplist); |
1766 | mp->m_quotainfo->qi_dquots--; | 1736 | mp->m_quotainfo->qi_dquots--; |
1767 | mp->m_quotainfo->qi_dqreclaims++; | 1737 | mp->m_quotainfo->qi_dqreclaims++; |
1768 | list_del_init(&dqp->q_hashlist); | 1738 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); |
1769 | dqp->q_hash->qh_version++; | 1739 | |
1740 | ASSERT(dqp->q_nrefs == 0); | ||
1770 | list_del_init(&dqp->q_freelist); | 1741 | list_del_init(&dqp->q_freelist); |
1771 | xfs_Gqm->qm_dqfrlist_cnt--; | 1742 | xfs_Gqm->qm_dqfrlist_cnt--; |
1772 | dqpout = dqp; | 1743 | |
1773 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | 1744 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
1774 | qhunlock: | 1745 | return dqp; |
1775 | mutex_unlock(&dqp->q_hash->qh_lock); | ||
1776 | dqfunlock: | ||
1777 | xfs_dqfunlock(dqp); | ||
1778 | dqunlock: | 1746 | dqunlock: |
1779 | xfs_dqunlock(dqp); | 1747 | xfs_dqunlock(dqp); |
1780 | if (dqpout) | ||
1781 | break; | ||
1782 | if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) | 1748 | if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) |
1783 | break; | 1749 | break; |
1784 | if (startagain) { | 1750 | goto restart; |
1785 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | ||
1786 | goto again; | ||
1787 | } | ||
1788 | } | 1751 | } |
1752 | |||
1789 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1753 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
1790 | return dqpout; | 1754 | return NULL; |
1791 | } | 1755 | } |
1792 | 1756 | ||
1793 | /* | 1757 | /* |
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index 487653ddbef0..b86c62f5eeba 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h | |||
@@ -87,6 +87,7 @@ typedef struct xfs_dqblk { | |||
87 | #define XFS_DQ_PROJ 0x0002 /* project quota */ | 87 | #define XFS_DQ_PROJ 0x0002 /* project quota */ |
88 | #define XFS_DQ_GROUP 0x0004 /* a group quota */ | 88 | #define XFS_DQ_GROUP 0x0004 /* a group quota */ |
89 | #define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */ | 89 | #define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */ |
90 | #define XFS_DQ_FREEING 0x0010 /* dquot is beeing torn down */ | ||
90 | 91 | ||
91 | #define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) | 92 | #define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) |
92 | 93 | ||
@@ -94,7 +95,8 @@ typedef struct xfs_dqblk { | |||
94 | { XFS_DQ_USER, "USER" }, \ | 95 | { XFS_DQ_USER, "USER" }, \ |
95 | { XFS_DQ_PROJ, "PROJ" }, \ | 96 | { XFS_DQ_PROJ, "PROJ" }, \ |
96 | { XFS_DQ_GROUP, "GROUP" }, \ | 97 | { XFS_DQ_GROUP, "GROUP" }, \ |
97 | { XFS_DQ_DIRTY, "DIRTY" } | 98 | { XFS_DQ_DIRTY, "DIRTY" }, \ |
99 | { XFS_DQ_FREEING, "FREEING" } | ||
98 | 100 | ||
99 | /* | 101 | /* |
100 | * In the worst case, when both user and group quotas are on, | 102 | * In the worst case, when both user and group quotas are on, |