aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/xfs_alloc.c12
-rw-r--r--fs/xfs/xfs_alloc_btree.c6
-rw-r--r--fs/xfs/xfs_discard.c4
-rw-r--r--fs/xfs/xfs_extent_busy.c78
-rw-r--r--fs/xfs/xfs_extent_busy.h20
-rw-r--r--fs/xfs/xfs_log_cil.c6
-rw-r--r--fs/xfs/xfs_trace.h16
-rw-r--r--fs/xfs/xfs_trans.c4
8 files changed, 73 insertions, 73 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index ae6df2585895..588496de0f93 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -47,7 +47,7 @@ STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
47STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); 47STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
48STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, 48STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
49 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); 49 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
50STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *, 50STATIC void xfs_extent_busy_trim(struct xfs_alloc_arg *,
51 xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *); 51 xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
52 52
53/* 53/*
@@ -152,7 +152,7 @@ xfs_alloc_compute_aligned(
152 xfs_extlen_t len; 152 xfs_extlen_t len;
153 153
154 /* Trim busy sections out of found extent */ 154 /* Trim busy sections out of found extent */
155 xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len); 155 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
156 156
157 if (args->alignment > 1 && len >= args->minlen) { 157 if (args->alignment > 1 && len >= args->minlen) {
158 xfs_agblock_t aligned_bno = roundup(bno, args->alignment); 158 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
@@ -536,7 +536,7 @@ xfs_alloc_ag_vextent(
536 if (error) 536 if (error)
537 return error; 537 return error;
538 538
539 ASSERT(!xfs_alloc_busy_search(args->mp, args->agno, 539 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
540 args->agbno, args->len)); 540 args->agbno, args->len));
541 } 541 }
542 542
@@ -603,7 +603,7 @@ xfs_alloc_ag_vextent_exact(
603 /* 603 /*
604 * Check for overlapping busy extents. 604 * Check for overlapping busy extents.
605 */ 605 */
606 xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen); 606 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
607 607
608 /* 608 /*
609 * Give up if the start of the extent is busy, or the freespace isn't 609 * Give up if the start of the extent is busy, or the freespace isn't
@@ -1391,7 +1391,7 @@ xfs_alloc_ag_vextent_small(
1391 if (error) 1391 if (error)
1392 goto error0; 1392 goto error0;
1393 if (fbno != NULLAGBLOCK) { 1393 if (fbno != NULLAGBLOCK) {
1394 xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1, 1394 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1395 args->userdata); 1395 args->userdata);
1396 1396
1397 if (args->userdata) { 1397 if (args->userdata) {
@@ -2496,7 +2496,7 @@ xfs_free_extent(
2496 2496
2497 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); 2497 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2498 if (!error) 2498 if (!error)
2499 xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0); 2499 xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
2500error0: 2500error0:
2501 xfs_perag_put(args.pag); 2501 xfs_perag_put(args.pag);
2502 return error; 2502 return error;
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 3f665487521a..68ebabc388c6 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -94,7 +94,7 @@ xfs_allocbt_alloc_block(
94 return 0; 94 return 0;
95 } 95 }
96 96
97 xfs_alloc_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false); 97 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
98 98
99 xfs_trans_agbtree_delta(cur->bc_tp, 1); 99 xfs_trans_agbtree_delta(cur->bc_tp, 1);
100 new->s = cpu_to_be32(bno); 100 new->s = cpu_to_be32(bno);
@@ -119,8 +119,8 @@ xfs_allocbt_free_block(
119 if (error) 119 if (error)
120 return error; 120 return error;
121 121
122 xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, 122 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
123 XFS_ALLOC_BUSY_SKIP_DISCARD); 123 XFS_EXTENT_BUSY_SKIP_DISCARD);
124 xfs_trans_agbtree_delta(cur->bc_tp, -1); 124 xfs_trans_agbtree_delta(cur->bc_tp, -1);
125 return 0; 125 return 0;
126} 126}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index e3f1abe774f6..f9c3fe304a17 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -118,7 +118,7 @@ xfs_trim_extents(
118 * If any blocks in the range are still busy, skip the 118 * If any blocks in the range are still busy, skip the
119 * discard and try again the next time. 119 * discard and try again the next time.
120 */ 120 */
121 if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { 121 if (xfs_extent_busy_search(mp, agno, fbno, flen)) {
122 trace_xfs_discard_busy(mp, agno, fbno, flen); 122 trace_xfs_discard_busy(mp, agno, fbno, flen);
123 goto next_extent; 123 goto next_extent;
124 } 124 }
@@ -212,7 +212,7 @@ xfs_discard_extents(
212 struct xfs_mount *mp, 212 struct xfs_mount *mp,
213 struct list_head *list) 213 struct list_head *list)
214{ 214{
215 struct xfs_busy_extent *busyp; 215 struct xfs_extent_busy *busyp;
216 int error = 0; 216 int error = 0;
217 217
218 list_for_each_entry(busyp, list, list) { 218 list_for_each_entry(busyp, list, list) {
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 4b5a4fa869af..9475bd989379 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -32,27 +32,27 @@
32#include "xfs_trace.h" 32#include "xfs_trace.h"
33 33
34void 34void
35xfs_alloc_busy_insert( 35xfs_extent_busy_insert(
36 struct xfs_trans *tp, 36 struct xfs_trans *tp,
37 xfs_agnumber_t agno, 37 xfs_agnumber_t agno,
38 xfs_agblock_t bno, 38 xfs_agblock_t bno,
39 xfs_extlen_t len, 39 xfs_extlen_t len,
40 unsigned int flags) 40 unsigned int flags)
41{ 41{
42 struct xfs_busy_extent *new; 42 struct xfs_extent_busy *new;
43 struct xfs_busy_extent *busyp; 43 struct xfs_extent_busy *busyp;
44 struct xfs_perag *pag; 44 struct xfs_perag *pag;
45 struct rb_node **rbp; 45 struct rb_node **rbp;
46 struct rb_node *parent = NULL; 46 struct rb_node *parent = NULL;
47 47
48 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL); 48 new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
49 if (!new) { 49 if (!new) {
50 /* 50 /*
51 * No Memory! Since it is now not possible to track the free 51 * No Memory! Since it is now not possible to track the free
52 * block, make this a synchronous transaction to insure that 52 * block, make this a synchronous transaction to insure that
53 * the block is not reused before this transaction commits. 53 * the block is not reused before this transaction commits.
54 */ 54 */
55 trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len); 55 trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
56 xfs_trans_set_sync(tp); 56 xfs_trans_set_sync(tp);
57 return; 57 return;
58 } 58 }
@@ -64,14 +64,14 @@ xfs_alloc_busy_insert(
64 new->flags = flags; 64 new->flags = flags;
65 65
66 /* trace before insert to be able to see failed inserts */ 66 /* trace before insert to be able to see failed inserts */
67 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len); 67 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
68 68
69 pag = xfs_perag_get(tp->t_mountp, new->agno); 69 pag = xfs_perag_get(tp->t_mountp, new->agno);
70 spin_lock(&pag->pagb_lock); 70 spin_lock(&pag->pagb_lock);
71 rbp = &pag->pagb_tree.rb_node; 71 rbp = &pag->pagb_tree.rb_node;
72 while (*rbp) { 72 while (*rbp) {
73 parent = *rbp; 73 parent = *rbp;
74 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node); 74 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
75 75
76 if (new->bno < busyp->bno) { 76 if (new->bno < busyp->bno) {
77 rbp = &(*rbp)->rb_left; 77 rbp = &(*rbp)->rb_left;
@@ -95,14 +95,14 @@ xfs_alloc_busy_insert(
95/* 95/*
96 * Search for a busy extent within the range of the extent we are about to 96 * Search for a busy extent within the range of the extent we are about to
97 * allocate. You need to be holding the busy extent tree lock when calling 97 * allocate. You need to be holding the busy extent tree lock when calling
98 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy 98 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
99 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact 99 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
100 * match. This is done so that a non-zero return indicates an overlap that 100 * match. This is done so that a non-zero return indicates an overlap that
101 * will require a synchronous transaction, but it can still be 101 * will require a synchronous transaction, but it can still be
102 * used to distinguish between a partial or exact match. 102 * used to distinguish between a partial or exact match.
103 */ 103 */
104int 104int
105xfs_alloc_busy_search( 105xfs_extent_busy_search(
106 struct xfs_mount *mp, 106 struct xfs_mount *mp,
107 xfs_agnumber_t agno, 107 xfs_agnumber_t agno,
108 xfs_agblock_t bno, 108 xfs_agblock_t bno,
@@ -110,7 +110,7 @@ xfs_alloc_busy_search(
110{ 110{
111 struct xfs_perag *pag; 111 struct xfs_perag *pag;
112 struct rb_node *rbp; 112 struct rb_node *rbp;
113 struct xfs_busy_extent *busyp; 113 struct xfs_extent_busy *busyp;
114 int match = 0; 114 int match = 0;
115 115
116 pag = xfs_perag_get(mp, agno); 116 pag = xfs_perag_get(mp, agno);
@@ -120,7 +120,7 @@ xfs_alloc_busy_search(
120 120
121 /* find closest start bno overlap */ 121 /* find closest start bno overlap */
122 while (rbp) { 122 while (rbp) {
123 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node); 123 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
124 if (bno < busyp->bno) { 124 if (bno < busyp->bno) {
125 /* may overlap, but exact start block is lower */ 125 /* may overlap, but exact start block is lower */
126 if (bno + len > busyp->bno) 126 if (bno + len > busyp->bno)
@@ -154,10 +154,10 @@ xfs_alloc_busy_search(
154 * needs to be restarted. 154 * needs to be restarted.
155 */ 155 */
156STATIC bool 156STATIC bool
157xfs_alloc_busy_update_extent( 157xfs_extent_busy_update_extent(
158 struct xfs_mount *mp, 158 struct xfs_mount *mp,
159 struct xfs_perag *pag, 159 struct xfs_perag *pag,
160 struct xfs_busy_extent *busyp, 160 struct xfs_extent_busy *busyp,
161 xfs_agblock_t fbno, 161 xfs_agblock_t fbno,
162 xfs_extlen_t flen, 162 xfs_extlen_t flen,
163 bool userdata) 163 bool userdata)
@@ -171,7 +171,7 @@ xfs_alloc_busy_update_extent(
171 * performing the discard a chance to mark the extent unbusy 171 * performing the discard a chance to mark the extent unbusy
172 * and retry. 172 * and retry.
173 */ 173 */
174 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) { 174 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
175 spin_unlock(&pag->pagb_lock); 175 spin_unlock(&pag->pagb_lock);
176 delay(1); 176 delay(1);
177 spin_lock(&pag->pagb_lock); 177 spin_lock(&pag->pagb_lock);
@@ -285,13 +285,13 @@ xfs_alloc_busy_update_extent(
285 ASSERT(0); 285 ASSERT(0);
286 } 286 }
287 287
288 trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen); 288 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
289 return true; 289 return true;
290 290
291out_force_log: 291out_force_log:
292 spin_unlock(&pag->pagb_lock); 292 spin_unlock(&pag->pagb_lock);
293 xfs_log_force(mp, XFS_LOG_SYNC); 293 xfs_log_force(mp, XFS_LOG_SYNC);
294 trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen); 294 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
295 spin_lock(&pag->pagb_lock); 295 spin_lock(&pag->pagb_lock);
296 return false; 296 return false;
297} 297}
@@ -301,7 +301,7 @@ out_force_log:
301 * For a given extent [fbno, flen], make sure we can reuse it safely. 301 * For a given extent [fbno, flen], make sure we can reuse it safely.
302 */ 302 */
303void 303void
304xfs_alloc_busy_reuse( 304xfs_extent_busy_reuse(
305 struct xfs_mount *mp, 305 struct xfs_mount *mp,
306 xfs_agnumber_t agno, 306 xfs_agnumber_t agno,
307 xfs_agblock_t fbno, 307 xfs_agblock_t fbno,
@@ -318,8 +318,8 @@ xfs_alloc_busy_reuse(
318restart: 318restart:
319 rbp = pag->pagb_tree.rb_node; 319 rbp = pag->pagb_tree.rb_node;
320 while (rbp) { 320 while (rbp) {
321 struct xfs_busy_extent *busyp = 321 struct xfs_extent_busy *busyp =
322 rb_entry(rbp, struct xfs_busy_extent, rb_node); 322 rb_entry(rbp, struct xfs_extent_busy, rb_node);
323 xfs_agblock_t bbno = busyp->bno; 323 xfs_agblock_t bbno = busyp->bno;
324 xfs_agblock_t bend = bbno + busyp->length; 324 xfs_agblock_t bend = bbno + busyp->length;
325 325
@@ -331,7 +331,7 @@ restart:
331 continue; 331 continue;
332 } 332 }
333 333
334 if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen, 334 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
335 userdata)) 335 userdata))
336 goto restart; 336 goto restart;
337 } 337 }
@@ -346,7 +346,7 @@ restart:
346 * code needs to force out the log and retry the allocation. 346 * code needs to force out the log and retry the allocation.
347 */ 347 */
348STATIC void 348STATIC void
349xfs_alloc_busy_trim( 349xfs_extent_busy_trim(
350 struct xfs_alloc_arg *args, 350 struct xfs_alloc_arg *args,
351 xfs_agblock_t bno, 351 xfs_agblock_t bno,
352 xfs_extlen_t len, 352 xfs_extlen_t len,
@@ -365,8 +365,8 @@ restart:
365 flen = len; 365 flen = len;
366 rbp = args->pag->pagb_tree.rb_node; 366 rbp = args->pag->pagb_tree.rb_node;
367 while (rbp && flen >= args->minlen) { 367 while (rbp && flen >= args->minlen) {
368 struct xfs_busy_extent *busyp = 368 struct xfs_extent_busy *busyp =
369 rb_entry(rbp, struct xfs_busy_extent, rb_node); 369 rb_entry(rbp, struct xfs_extent_busy, rb_node);
370 xfs_agblock_t fend = fbno + flen; 370 xfs_agblock_t fend = fbno + flen;
371 xfs_agblock_t bbno = busyp->bno; 371 xfs_agblock_t bbno = busyp->bno;
372 xfs_agblock_t bend = bbno + busyp->length; 372 xfs_agblock_t bend = bbno + busyp->length;
@@ -384,8 +384,8 @@ restart:
384 * extent instead of trimming the allocation. 384 * extent instead of trimming the allocation.
385 */ 385 */
386 if (!args->userdata && 386 if (!args->userdata &&
387 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) { 387 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
388 if (!xfs_alloc_busy_update_extent(args->mp, args->pag, 388 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
389 busyp, fbno, flen, 389 busyp, fbno, flen,
390 false)) 390 false))
391 goto restart; 391 goto restart;
@@ -517,7 +517,7 @@ restart:
517 spin_unlock(&args->pag->pagb_lock); 517 spin_unlock(&args->pag->pagb_lock);
518 518
519 if (fbno != bno || flen != len) { 519 if (fbno != bno || flen != len) {
520 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, 520 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
521 fbno, flen); 521 fbno, flen);
522 } 522 }
523 *rbno = fbno; 523 *rbno = fbno;
@@ -529,19 +529,19 @@ fail:
529 * re-check if the trimmed extent satisfies the minlen requirement. 529 * re-check if the trimmed extent satisfies the minlen requirement.
530 */ 530 */
531 spin_unlock(&args->pag->pagb_lock); 531 spin_unlock(&args->pag->pagb_lock);
532 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0); 532 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
533 *rbno = fbno; 533 *rbno = fbno;
534 *rlen = 0; 534 *rlen = 0;
535} 535}
536 536
537static void 537STATIC void
538xfs_alloc_busy_clear_one( 538xfs_extent_busy_clear_one(
539 struct xfs_mount *mp, 539 struct xfs_mount *mp,
540 struct xfs_perag *pag, 540 struct xfs_perag *pag,
541 struct xfs_busy_extent *busyp) 541 struct xfs_extent_busy *busyp)
542{ 542{
543 if (busyp->length) { 543 if (busyp->length) {
544 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno, 544 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
545 busyp->length); 545 busyp->length);
546 rb_erase(&busyp->rb_node, &pag->pagb_tree); 546 rb_erase(&busyp->rb_node, &pag->pagb_tree);
547 } 547 }
@@ -556,12 +556,12 @@ xfs_alloc_busy_clear_one(
556 * these as undergoing a discard operation instead. 556 * these as undergoing a discard operation instead.
557 */ 557 */
558void 558void
559xfs_alloc_busy_clear( 559xfs_extent_busy_clear(
560 struct xfs_mount *mp, 560 struct xfs_mount *mp,
561 struct list_head *list, 561 struct list_head *list,
562 bool do_discard) 562 bool do_discard)
563{ 563{
564 struct xfs_busy_extent *busyp, *n; 564 struct xfs_extent_busy *busyp, *n;
565 struct xfs_perag *pag = NULL; 565 struct xfs_perag *pag = NULL;
566 xfs_agnumber_t agno = NULLAGNUMBER; 566 xfs_agnumber_t agno = NULLAGNUMBER;
567 567
@@ -577,10 +577,10 @@ xfs_alloc_busy_clear(
577 } 577 }
578 578
579 if (do_discard && busyp->length && 579 if (do_discard && busyp->length &&
580 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD)) 580 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
581 busyp->flags = XFS_ALLOC_BUSY_DISCARDED; 581 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
582 else 582 else
583 xfs_alloc_busy_clear_one(mp, pag, busyp); 583 xfs_extent_busy_clear_one(mp, pag, busyp);
584 } 584 }
585 585
586 if (pag) { 586 if (pag) {
@@ -593,11 +593,11 @@ xfs_alloc_busy_clear(
593 * Callback for list_sort to sort busy extents by the AG they reside in. 593 * Callback for list_sort to sort busy extents by the AG they reside in.
594 */ 594 */
595int 595int
596xfs_alloc_busy_ag_cmp( 596xfs_extent_busy_ag_cmp(
597 void *priv, 597 void *priv,
598 struct list_head *a, 598 struct list_head *a,
599 struct list_head *b) 599 struct list_head *b)
600{ 600{
601 return container_of(a, struct xfs_busy_extent, list)->agno - 601 return container_of(a, struct xfs_extent_busy, list)->agno -
602 container_of(b, struct xfs_busy_extent, list)->agno; 602 container_of(b, struct xfs_extent_busy, list)->agno;
603} 603}
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index 671b501f13e5..91f2fcbb2001 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -27,39 +27,39 @@
27 * Note that we use the transaction ID to record the transaction, not the 27 * Note that we use the transaction ID to record the transaction, not the
28 * transaction structure itself. See xfs_extent_busy_insert() for details. 28 * transaction structure itself. See xfs_extent_busy_insert() for details.
29 */ 29 */
30struct xfs_busy_extent { 30struct xfs_extent_busy {
31 struct rb_node rb_node; /* ag by-bno indexed search tree */ 31 struct rb_node rb_node; /* ag by-bno indexed search tree */
32 struct list_head list; /* transaction busy extent list */ 32 struct list_head list; /* transaction busy extent list */
33 xfs_agnumber_t agno; 33 xfs_agnumber_t agno;
34 xfs_agblock_t bno; 34 xfs_agblock_t bno;
35 xfs_extlen_t length; 35 xfs_extlen_t length;
36 unsigned int flags; 36 unsigned int flags;
37#define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */ 37#define XFS_EXTENT_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
38#define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */ 38#define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
39}; 39};
40 40
41void 41void
42xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, 42xfs_extent_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
43 xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags); 43 xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
44 44
45void 45void
46xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list, 46xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
47 bool do_discard); 47 bool do_discard);
48 48
49int 49int
50xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, 50xfs_extent_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
51 xfs_agblock_t bno, xfs_extlen_t len); 51 xfs_agblock_t bno, xfs_extlen_t len);
52 52
53void 53void
54xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno, 54xfs_extent_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
55 xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata); 55 xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
56 56
57int 57int
58xfs_alloc_busy_ag_cmp(void *priv, struct list_head *a, struct list_head *b); 58xfs_extent_busy_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
59 59
60static inline void xfs_alloc_busy_sort(struct list_head *list) 60static inline void xfs_extent_busy_sort(struct list_head *list)
61{ 61{
62 list_sort(NULL, list, xfs_alloc_busy_ag_cmp); 62 list_sort(NULL, list, xfs_extent_busy_ag_cmp);
63} 63}
64 64
65#endif /* __XFS_EXTENT_BUSY_H__ */ 65#endif /* __XFS_EXTENT_BUSY_H__ */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index a6e3e71e3f88..601ccf02618a 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -338,8 +338,8 @@ xlog_cil_committed(
338 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 338 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
339 ctx->start_lsn, abort); 339 ctx->start_lsn, abort);
340 340
341 xfs_alloc_busy_sort(&ctx->busy_extents); 341 xfs_extent_busy_sort(&ctx->busy_extents);
342 xfs_alloc_busy_clear(mp, &ctx->busy_extents, 342 xfs_extent_busy_clear(mp, &ctx->busy_extents,
343 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); 343 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
344 344
345 spin_lock(&ctx->cil->xc_cil_lock); 345 spin_lock(&ctx->cil->xc_cil_lock);
@@ -352,7 +352,7 @@ xlog_cil_committed(
352 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); 352 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
353 353
354 xfs_discard_extents(mp, &ctx->busy_extents); 354 xfs_discard_extents(mp, &ctx->busy_extents);
355 xfs_alloc_busy_clear(mp, &ctx->busy_extents, false); 355 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
356 } 356 }
357 357
358 kmem_free(ctx); 358 kmem_free(ctx);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 900764c450a8..febff43176f1 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1142,7 +1142,7 @@ TRACE_EVENT(xfs_bunmap,
1142 1142
1143); 1143);
1144 1144
1145DECLARE_EVENT_CLASS(xfs_busy_class, 1145DECLARE_EVENT_CLASS(xfs_extent_busy_class,
1146 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, 1146 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
1147 xfs_agblock_t agbno, xfs_extlen_t len), 1147 xfs_agblock_t agbno, xfs_extlen_t len),
1148 TP_ARGS(mp, agno, agbno, len), 1148 TP_ARGS(mp, agno, agbno, len),
@@ -1165,17 +1165,17 @@ DECLARE_EVENT_CLASS(xfs_busy_class,
1165 __entry->len) 1165 __entry->len)
1166); 1166);
1167#define DEFINE_BUSY_EVENT(name) \ 1167#define DEFINE_BUSY_EVENT(name) \
1168DEFINE_EVENT(xfs_busy_class, name, \ 1168DEFINE_EVENT(xfs_extent_busy_class, name, \
1169 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ 1169 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
1170 xfs_agblock_t agbno, xfs_extlen_t len), \ 1170 xfs_agblock_t agbno, xfs_extlen_t len), \
1171 TP_ARGS(mp, agno, agbno, len)) 1171 TP_ARGS(mp, agno, agbno, len))
1172DEFINE_BUSY_EVENT(xfs_alloc_busy); 1172DEFINE_BUSY_EVENT(xfs_extent_busy);
1173DEFINE_BUSY_EVENT(xfs_alloc_busy_enomem); 1173DEFINE_BUSY_EVENT(xfs_extent_busy_enomem);
1174DEFINE_BUSY_EVENT(xfs_alloc_busy_force); 1174DEFINE_BUSY_EVENT(xfs_extent_busy_force);
1175DEFINE_BUSY_EVENT(xfs_alloc_busy_reuse); 1175DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
1176DEFINE_BUSY_EVENT(xfs_alloc_busy_clear); 1176DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
1177 1177
1178TRACE_EVENT(xfs_alloc_busy_trim, 1178TRACE_EVENT(xfs_extent_busy_trim,
1179 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, 1179 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
1180 xfs_agblock_t agbno, xfs_extlen_t len, 1180 xfs_agblock_t agbno, xfs_extlen_t len,
1181 xfs_agblock_t tbno, xfs_extlen_t tlen), 1181 xfs_agblock_t tbno, xfs_extlen_t tlen),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index d8bdb618ec19..f674855c8dc9 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -608,8 +608,8 @@ STATIC void
608xfs_trans_free( 608xfs_trans_free(
609 struct xfs_trans *tp) 609 struct xfs_trans *tp)
610{ 610{
611 xfs_alloc_busy_sort(&tp->t_busy); 611 xfs_extent_busy_sort(&tp->t_busy);
612 xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy, false); 612 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
613 613
614 atomic_dec(&tp->t_mountp->m_active_trans); 614 atomic_dec(&tp->t_mountp->m_active_trans);
615 xfs_trans_free_dqinfo(tp); 615 xfs_trans_free_dqinfo(tp);