aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_alloc.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-04-24 15:06:17 -0400
committerAlex Elder <aelder@sgi.com>2011-04-28 14:18:09 -0400
commit8a072a4d4c6a5b6ec32836c467d2996393c76c6f (patch)
treeb21dad1310e4351854b9e2e24feb86beed20d1f7 /fs/xfs/xfs_alloc.c
parent97d3ac75e5e0ebf7ca38ae74cebd201c09b97ab2 (diff)
xfs: reduce the number of pagb_lock roundtrips in xfs_alloc_clear_busy
Instead of finding the per-ag and then taking and releasing the pagb_lock for every single busy extent completed sort the list of busy extents and only switch betweens AGs where nessecary. This becomes especially important with the online discard support which will hit this lock more often. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_alloc.c')
-rw-r--r--fs/xfs/xfs_alloc.c56
1 files changed, 46 insertions, 10 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 53157d4d5e8b..44a51a7b4c3a 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2964,24 +2964,60 @@ fail:
2964 *rlen = 0; 2964 *rlen = 0;
2965} 2965}
2966 2966
2967void 2967static void
2968xfs_alloc_busy_clear( 2968xfs_alloc_busy_clear_one(
2969 struct xfs_mount *mp, 2969 struct xfs_mount *mp,
2970 struct xfs_perag *pag,
2970 struct xfs_busy_extent *busyp) 2971 struct xfs_busy_extent *busyp)
2971{ 2972{
2972 struct xfs_perag *pag;
2973
2974 list_del_init(&busyp->list);
2975
2976 pag = xfs_perag_get(mp, busyp->agno);
2977 spin_lock(&pag->pagb_lock);
2978 if (busyp->length) { 2973 if (busyp->length) {
2979 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno, 2974 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
2980 busyp->length); 2975 busyp->length);
2981 rb_erase(&busyp->rb_node, &pag->pagb_tree); 2976 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2982 } 2977 }
2983 spin_unlock(&pag->pagb_lock);
2984 xfs_perag_put(pag);
2985 2978
2979 list_del_init(&busyp->list);
2986 kmem_free(busyp); 2980 kmem_free(busyp);
2987} 2981}
2982
2983void
2984xfs_alloc_busy_clear(
2985 struct xfs_mount *mp,
2986 struct list_head *list)
2987{
2988 struct xfs_busy_extent *busyp, *n;
2989 struct xfs_perag *pag = NULL;
2990 xfs_agnumber_t agno = NULLAGNUMBER;
2991
2992 list_for_each_entry_safe(busyp, n, list, list) {
2993 if (busyp->agno != agno) {
2994 if (pag) {
2995 spin_unlock(&pag->pagb_lock);
2996 xfs_perag_put(pag);
2997 }
2998 pag = xfs_perag_get(mp, busyp->agno);
2999 spin_lock(&pag->pagb_lock);
3000 agno = busyp->agno;
3001 }
3002
3003 xfs_alloc_busy_clear_one(mp, pag, busyp);
3004 }
3005
3006 if (pag) {
3007 spin_unlock(&pag->pagb_lock);
3008 xfs_perag_put(pag);
3009 }
3010}
3011
3012/*
3013 * Callback for list_sort to sort busy extents by the AG they reside in.
3014 */
3015int
3016xfs_busy_extent_ag_cmp(
3017 void *priv,
3018 struct list_head *a,
3019 struct list_head *b)
3020{
3021 return container_of(a, struct xfs_busy_extent, list)->agno -
3022 container_of(b, struct xfs_busy_extent, list)->agno;
3023}