aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_alloc.c')
-rw-r--r--fs/xfs/xfs_alloc.c372
1 files changed, 255 insertions, 117 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 94cddbfb2560..af168faccc7a 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -24,18 +24,13 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_btree.h" 33#include "xfs_btree.h"
38#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 34#include "xfs_alloc.h"
40#include "xfs_error.h" 35#include "xfs_error.h"
41#include "xfs_trace.h" 36#include "xfs_trace.h"
@@ -46,11 +41,9 @@
46#define XFSA_FIXUP_BNO_OK 1 41#define XFSA_FIXUP_BNO_OK 1
47#define XFSA_FIXUP_CNT_OK 2 42#define XFSA_FIXUP_CNT_OK 2
48 43
49STATIC void 44static int
50xfs_alloc_search_busy(xfs_trans_t *tp, 45xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
51 xfs_agnumber_t agno, 46 xfs_agblock_t bno, xfs_extlen_t len);
52 xfs_agblock_t bno,
53 xfs_extlen_t len);
54 47
55/* 48/*
56 * Prototypes for per-ag allocation routines 49 * Prototypes for per-ag allocation routines
@@ -540,9 +533,16 @@ xfs_alloc_ag_vextent(
540 be32_to_cpu(agf->agf_length)); 533 be32_to_cpu(agf->agf_length));
541 xfs_alloc_log_agf(args->tp, args->agbp, 534 xfs_alloc_log_agf(args->tp, args->agbp,
542 XFS_AGF_FREEBLKS); 535 XFS_AGF_FREEBLKS);
543 /* search the busylist for these blocks */ 536 /*
544 xfs_alloc_search_busy(args->tp, args->agno, 537 * Search the busylist for these blocks and mark the
545 args->agbno, args->len); 538 * transaction as synchronous if blocks are found. This
539 * avoids the need to block due to a synchronous log
540 * force to ensure correct ordering as the synchronous
541 * transaction will guarantee that for us.
542 */
543 if (xfs_alloc_busy_search(args->mp, args->agno,
544 args->agbno, args->len))
545 xfs_trans_set_sync(args->tp);
546 } 546 }
547 if (!args->isfl) 547 if (!args->isfl)
548 xfs_trans_mod_sb(args->tp, 548 xfs_trans_mod_sb(args->tp,
@@ -683,8 +683,6 @@ xfs_alloc_ag_vextent_near(
683 xfs_agblock_t ltbno; /* start bno of left side entry */ 683 xfs_agblock_t ltbno; /* start bno of left side entry */
684 xfs_agblock_t ltbnoa; /* aligned ... */ 684 xfs_agblock_t ltbnoa; /* aligned ... */
685 xfs_extlen_t ltdiff; /* difference to left side entry */ 685 xfs_extlen_t ltdiff; /* difference to left side entry */
686 /*REFERENCED*/
687 xfs_agblock_t ltend; /* end bno of left side entry */
688 xfs_extlen_t ltlen; /* length of left side entry */ 686 xfs_extlen_t ltlen; /* length of left side entry */
689 xfs_extlen_t ltlena; /* aligned ... */ 687 xfs_extlen_t ltlena; /* aligned ... */
690 xfs_agblock_t ltnew; /* useful start bno of left side */ 688 xfs_agblock_t ltnew; /* useful start bno of left side */
@@ -809,8 +807,7 @@ xfs_alloc_ag_vextent_near(
809 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 807 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
810 goto error0; 808 goto error0;
811 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 809 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
812 ltend = ltbno + ltlen; 810 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
813 ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
814 args->len = blen; 811 args->len = blen;
815 if (!xfs_alloc_fix_minleft(args)) { 812 if (!xfs_alloc_fix_minleft(args)) {
816 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 813 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@@ -823,7 +820,7 @@ xfs_alloc_ag_vextent_near(
823 */ 820 */
824 args->agbno = bnew; 821 args->agbno = bnew;
825 ASSERT(bnew >= ltbno); 822 ASSERT(bnew >= ltbno);
826 ASSERT(bnew + blen <= ltend); 823 ASSERT(bnew + blen <= ltbno + ltlen);
827 /* 824 /*
828 * Set up a cursor for the by-bno tree. 825 * Set up a cursor for the by-bno tree.
829 */ 826 */
@@ -1152,7 +1149,6 @@ xfs_alloc_ag_vextent_near(
1152 /* 1149 /*
1153 * Fix up the length and compute the useful address. 1150 * Fix up the length and compute the useful address.
1154 */ 1151 */
1155 ltend = ltbno + ltlen;
1156 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1152 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1157 xfs_alloc_fix_len(args); 1153 xfs_alloc_fix_len(args);
1158 if (!xfs_alloc_fix_minleft(args)) { 1154 if (!xfs_alloc_fix_minleft(args)) {
@@ -1165,7 +1161,7 @@ xfs_alloc_ag_vextent_near(
1165 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno, 1161 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
1166 ltlen, &ltnew); 1162 ltlen, &ltnew);
1167 ASSERT(ltnew >= ltbno); 1163 ASSERT(ltnew >= ltbno);
1168 ASSERT(ltnew + rlen <= ltend); 1164 ASSERT(ltnew + rlen <= ltbno + ltlen);
1169 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1165 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1170 args->agbno = ltnew; 1166 args->agbno = ltnew;
1171 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, 1167 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
@@ -1693,7 +1689,7 @@ xfs_free_ag_extent(
1693 * when the iclog commits to disk. If a busy block is allocated, 1689 * when the iclog commits to disk. If a busy block is allocated,
1694 * the iclog is pushed up to the LSN that freed the block. 1690 * the iclog is pushed up to the LSN that freed the block.
1695 */ 1691 */
1696 xfs_alloc_mark_busy(tp, agno, bno, len); 1692 xfs_alloc_busy_insert(tp, agno, bno, len);
1697 return 0; 1693 return 0;
1698 1694
1699 error0: 1695 error0:
@@ -1989,14 +1985,20 @@ xfs_alloc_get_freelist(
1989 *bnop = bno; 1985 *bnop = bno;
1990 1986
1991 /* 1987 /*
1992 * As blocks are freed, they are added to the per-ag busy list 1988 * As blocks are freed, they are added to the per-ag busy list and
1993 * and remain there until the freeing transaction is committed to 1989 * remain there until the freeing transaction is committed to disk.
1994 * disk. Now that we have allocated blocks, this list must be 1990 * Now that we have allocated blocks, this list must be searched to see
1995 * searched to see if a block is being reused. If one is, then 1991 * if a block is being reused. If one is, then the freeing transaction
1996 * the freeing transaction must be pushed to disk NOW by forcing 1992 * must be pushed to disk before this transaction.
1997 * to disk all iclogs up that transaction's LSN. 1993 *
1994 * We do this by setting the current transaction to a sync transaction
1995 * which guarantees that the freeing transaction is on disk before this
1996 * transaction. This is done instead of a synchronous log force here so
1997 * that we don't sit and wait with the AGF locked in the transaction
1998 * during the log force.
1998 */ 1999 */
1999 xfs_alloc_search_busy(tp, be32_to_cpu(agf->agf_seqno), bno, 1); 2000 if (xfs_alloc_busy_search(mp, be32_to_cpu(agf->agf_seqno), bno, 1))
2001 xfs_trans_set_sync(tp);
2000 return 0; 2002 return 0;
2001} 2003}
2002 2004
@@ -2201,7 +2203,7 @@ xfs_alloc_read_agf(
2201 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); 2203 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2202 spin_lock_init(&pag->pagb_lock); 2204 spin_lock_init(&pag->pagb_lock);
2203 pag->pagb_count = 0; 2205 pag->pagb_count = 0;
2204 memset(pag->pagb_list, 0, sizeof(pag->pagb_list)); 2206 pag->pagb_tree = RB_ROOT;
2205 pag->pagf_init = 1; 2207 pag->pagf_init = 1;
2206 } 2208 }
2207#ifdef DEBUG 2209#ifdef DEBUG
@@ -2479,127 +2481,263 @@ error0:
2479 * list is reused, the transaction that freed it must be forced to disk 2481 * list is reused, the transaction that freed it must be forced to disk
2480 * before continuing to use the block. 2482 * before continuing to use the block.
2481 * 2483 *
2482 * xfs_alloc_mark_busy - add to the per-ag busy list 2484 * xfs_alloc_busy_insert - add to the per-ag busy list
2483 * xfs_alloc_clear_busy - remove an item from the per-ag busy list 2485 * xfs_alloc_busy_clear - remove an item from the per-ag busy list
2486 * xfs_alloc_busy_search - search for a busy extent
2487 */
2488
2489/*
2490 * Insert a new extent into the busy tree.
2491 *
2492 * The busy extent tree is indexed by the start block of the busy extent.
2493 * there can be multiple overlapping ranges in the busy extent tree but only
2494 * ever one entry at a given start block. The reason for this is that
2495 * multi-block extents can be freed, then smaller chunks of that extent
2496 * allocated and freed again before the first transaction commit is on disk.
2497 * If the exact same start block is freed a second time, we have to wait for
2498 * that busy extent to pass out of the tree before the new extent is inserted.
2499 * There are two main cases we have to handle here.
2500 *
2501 * The first case is a transaction that triggers a "free - allocate - free"
2502 * cycle. This can occur during btree manipulations as a btree block is freed
2503 * to the freelist, then allocated from the free list, then freed again. In
2504 * this case, the second extxpnet free is what triggers the duplicate and as
2505 * such the transaction IDs should match. Because the extent was allocated in
2506 * this transaction, the transaction must be marked as synchronous. This is
2507 * true for all cases where the free/alloc/free occurs in the one transaction,
2508 * hence the addition of the ASSERT(tp->t_flags & XFS_TRANS_SYNC) to this case.
2509 * This serves to catch violations of the second case quite effectively.
2510 *
2511 * The second case is where the free/alloc/free occur in different
2512 * transactions. In this case, the thread freeing the extent the second time
2513 * can't mark the extent busy immediately because it is already tracked in a
2514 * transaction that may be committing. When the log commit for the existing
2515 * busy extent completes, the busy extent will be removed from the tree. If we
2516 * allow the second busy insert to continue using that busy extent structure,
2517 * it can be freed before this transaction is safely in the log. Hence our
2518 * only option in this case is to force the log to remove the existing busy
2519 * extent from the list before we insert the new one with the current
2520 * transaction ID.
2521 *
2522 * The problem we are trying to avoid in the free-alloc-free in separate
2523 * transactions is most easily described with a timeline:
2524 *
2525 * Thread 1 Thread 2 Thread 3 xfslogd
2526 * xact alloc
2527 * free X
2528 * mark busy
2529 * commit xact
2530 * free xact
2531 * xact alloc
2532 * alloc X
2533 * busy search
2534 * mark xact sync
2535 * commit xact
2536 * free xact
2537 * force log
2538 * checkpoint starts
2539 * ....
2540 * xact alloc
2541 * free X
2542 * mark busy
2543 * finds match
2544 * *** KABOOM! ***
2545 * ....
2546 * log IO completes
2547 * unbusy X
2548 * checkpoint completes
2549 *
2550 * By issuing a log force in thread 3 @ "KABOOM", the thread will block until
2551 * the checkpoint completes, and the busy extent it matched will have been
2552 * removed from the tree when it is woken. Hence it can then continue safely.
2553 *
2554 * However, to ensure this matching process is robust, we need to use the
2555 * transaction ID for identifying transaction, as delayed logging results in
2556 * the busy extent and transaction lifecycles being different. i.e. the busy
2557 * extent is active for a lot longer than the transaction. Hence the
2558 * transaction structure can be freed and reallocated, then mark the same
2559 * extent busy again in the new transaction. In this case the new transaction
2560 * will have a different tid but can have the same address, and hence we need
2561 * to check against the tid.
2562 *
2563 * Future: for delayed logging, we could avoid the log force if the extent was
2564 * first freed in the current checkpoint sequence. This, however, requires the
2565 * ability to pin the current checkpoint in memory until this transaction
2566 * commits to ensure that both the original free and the current one combine
2567 * logically into the one checkpoint. If the checkpoint sequences are
2568 * different, however, we still need to wait on a log force.
2484 */ 2569 */
2485void 2570void
2486xfs_alloc_mark_busy(xfs_trans_t *tp, 2571xfs_alloc_busy_insert(
2487 xfs_agnumber_t agno, 2572 struct xfs_trans *tp,
2488 xfs_agblock_t bno, 2573 xfs_agnumber_t agno,
2489 xfs_extlen_t len) 2574 xfs_agblock_t bno,
2575 xfs_extlen_t len)
2490{ 2576{
2491 xfs_perag_busy_t *bsy; 2577 struct xfs_busy_extent *new;
2578 struct xfs_busy_extent *busyp;
2492 struct xfs_perag *pag; 2579 struct xfs_perag *pag;
2493 int n; 2580 struct rb_node **rbp;
2581 struct rb_node *parent;
2582 int match;
2494 2583
2495 pag = xfs_perag_get(tp->t_mountp, agno);
2496 spin_lock(&pag->pagb_lock);
2497 2584
2498 /* search pagb_list for an open slot */ 2585 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2499 for (bsy = pag->pagb_list, n = 0; 2586 if (!new) {
2500 n < XFS_PAGB_NUM_SLOTS; 2587 /*
2501 bsy++, n++) { 2588 * No Memory! Since it is now not possible to track the free
2502 if (bsy->busy_tp == NULL) { 2589 * block, make this a synchronous transaction to insure that
2503 break; 2590 * the block is not reused before this transaction commits.
2504 } 2591 */
2592 trace_xfs_alloc_busy(tp, agno, bno, len, 1);
2593 xfs_trans_set_sync(tp);
2594 return;
2505 } 2595 }
2506 2596
2507 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len, n); 2597 new->agno = agno;
2598 new->bno = bno;
2599 new->length = len;
2600 new->tid = xfs_log_get_trans_ident(tp);
2508 2601
2509 if (n < XFS_PAGB_NUM_SLOTS) { 2602 INIT_LIST_HEAD(&new->list);
2510 bsy = &pag->pagb_list[n]; 2603
2511 pag->pagb_count++; 2604 /* trace before insert to be able to see failed inserts */
2512 bsy->busy_start = bno; 2605 trace_xfs_alloc_busy(tp, agno, bno, len, 0);
2513 bsy->busy_length = len; 2606
2514 bsy->busy_tp = tp; 2607 pag = xfs_perag_get(tp->t_mountp, new->agno);
2515 xfs_trans_add_busy(tp, agno, n); 2608restart:
2516 } else { 2609 spin_lock(&pag->pagb_lock);
2610 rbp = &pag->pagb_tree.rb_node;
2611 parent = NULL;
2612 busyp = NULL;
2613 match = 0;
2614 while (*rbp && match >= 0) {
2615 parent = *rbp;
2616 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
2617
2618 if (new->bno < busyp->bno) {
2619 /* may overlap, but exact start block is lower */
2620 rbp = &(*rbp)->rb_left;
2621 if (new->bno + new->length > busyp->bno)
2622 match = busyp->tid == new->tid ? 1 : -1;
2623 } else if (new->bno > busyp->bno) {
2624 /* may overlap, but exact start block is higher */
2625 rbp = &(*rbp)->rb_right;
2626 if (bno < busyp->bno + busyp->length)
2627 match = busyp->tid == new->tid ? 1 : -1;
2628 } else {
2629 match = busyp->tid == new->tid ? 1 : -1;
2630 break;
2631 }
2632 }
2633 if (match < 0) {
2634 /* overlap marked busy in different transaction */
2635 spin_unlock(&pag->pagb_lock);
2636 xfs_log_force(tp->t_mountp, XFS_LOG_SYNC);
2637 goto restart;
2638 }
2639 if (match > 0) {
2517 /* 2640 /*
2518 * The busy list is full! Since it is now not possible to 2641 * overlap marked busy in same transaction. Update if exact
2519 * track the free block, make this a synchronous transaction 2642 * start block match, otherwise combine the busy extents into
2520 * to insure that the block is not reused before this 2643 * a single range.
2521 * transaction commits.
2522 */ 2644 */
2523 xfs_trans_set_sync(tp); 2645 if (busyp->bno == new->bno) {
2524 } 2646 busyp->length = max(busyp->length, new->length);
2647 spin_unlock(&pag->pagb_lock);
2648 ASSERT(tp->t_flags & XFS_TRANS_SYNC);
2649 xfs_perag_put(pag);
2650 kmem_free(new);
2651 return;
2652 }
2653 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2654 new->length = max(busyp->bno + busyp->length,
2655 new->bno + new->length) -
2656 min(busyp->bno, new->bno);
2657 new->bno = min(busyp->bno, new->bno);
2658 } else
2659 busyp = NULL;
2525 2660
2661 rb_link_node(&new->rb_node, parent, rbp);
2662 rb_insert_color(&new->rb_node, &pag->pagb_tree);
2663
2664 list_add(&new->list, &tp->t_busy);
2526 spin_unlock(&pag->pagb_lock); 2665 spin_unlock(&pag->pagb_lock);
2527 xfs_perag_put(pag); 2666 xfs_perag_put(pag);
2667 kmem_free(busyp);
2528} 2668}
2529 2669
2530void 2670/*
2531xfs_alloc_clear_busy(xfs_trans_t *tp, 2671 * Search for a busy extent within the range of the extent we are about to
2532 xfs_agnumber_t agno, 2672 * allocate. You need to be holding the busy extent tree lock when calling
2533 int idx) 2673 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2674 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2675 * match. This is done so that a non-zero return indicates an overlap that
2676 * will require a synchronous transaction, but it can still be
2677 * used to distinguish between a partial or exact match.
2678 */
2679static int
2680xfs_alloc_busy_search(
2681 struct xfs_mount *mp,
2682 xfs_agnumber_t agno,
2683 xfs_agblock_t bno,
2684 xfs_extlen_t len)
2534{ 2685{
2535 struct xfs_perag *pag; 2686 struct xfs_perag *pag;
2536 xfs_perag_busy_t *list; 2687 struct rb_node *rbp;
2688 struct xfs_busy_extent *busyp;
2689 int match = 0;
2537 2690
2538 ASSERT(idx < XFS_PAGB_NUM_SLOTS); 2691 pag = xfs_perag_get(mp, agno);
2539 pag = xfs_perag_get(tp->t_mountp, agno);
2540 spin_lock(&pag->pagb_lock); 2692 spin_lock(&pag->pagb_lock);
2541 list = pag->pagb_list;
2542 2693
2543 trace_xfs_alloc_unbusy(tp->t_mountp, agno, idx, list[idx].busy_tp == tp); 2694 rbp = pag->pagb_tree.rb_node;
2544 2695
2545 if (list[idx].busy_tp == tp) { 2696 /* find closest start bno overlap */
2546 list[idx].busy_tp = NULL; 2697 while (rbp) {
2547 pag->pagb_count--; 2698 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2699 if (bno < busyp->bno) {
2700 /* may overlap, but exact start block is lower */
2701 if (bno + len > busyp->bno)
2702 match = -1;
2703 rbp = rbp->rb_left;
2704 } else if (bno > busyp->bno) {
2705 /* may overlap, but exact start block is higher */
2706 if (bno < busyp->bno + busyp->length)
2707 match = -1;
2708 rbp = rbp->rb_right;
2709 } else {
2710 /* bno matches busyp, length determines exact match */
2711 match = (busyp->length == len) ? 1 : -1;
2712 break;
2713 }
2548 } 2714 }
2549
2550 spin_unlock(&pag->pagb_lock); 2715 spin_unlock(&pag->pagb_lock);
2716 trace_xfs_alloc_busysearch(mp, agno, bno, len, !!match);
2551 xfs_perag_put(pag); 2717 xfs_perag_put(pag);
2718 return match;
2552} 2719}
2553 2720
2554 2721void
2555/* 2722xfs_alloc_busy_clear(
2556 * If we find the extent in the busy list, force the log out to get the 2723 struct xfs_mount *mp,
2557 * extent out of the busy list so the caller can use it straight away. 2724 struct xfs_busy_extent *busyp)
2558 */
2559STATIC void
2560xfs_alloc_search_busy(xfs_trans_t *tp,
2561 xfs_agnumber_t agno,
2562 xfs_agblock_t bno,
2563 xfs_extlen_t len)
2564{ 2725{
2565 struct xfs_perag *pag; 2726 struct xfs_perag *pag;
2566 xfs_perag_busy_t *bsy;
2567 xfs_agblock_t uend, bend;
2568 xfs_lsn_t lsn = 0;
2569 int cnt;
2570 2727
2571 pag = xfs_perag_get(tp->t_mountp, agno); 2728 trace_xfs_alloc_unbusy(mp, busyp->agno, busyp->bno,
2572 spin_lock(&pag->pagb_lock); 2729 busyp->length);
2573 cnt = pag->pagb_count;
2574 2730
2575 /* 2731 ASSERT(xfs_alloc_busy_search(mp, busyp->agno, busyp->bno,
2576 * search pagb_list for this slot, skipping open slots. We have to 2732 busyp->length) == 1);
2577 * search the entire array as there may be multiple overlaps and
2578 * we have to get the most recent LSN for the log force to push out
2579 * all the transactions that span the range.
2580 */
2581 uend = bno + len - 1;
2582 for (cnt = 0; cnt < pag->pagb_count; cnt++) {
2583 bsy = &pag->pagb_list[cnt];
2584 if (!bsy->busy_tp)
2585 continue;
2586 2733
2587 bend = bsy->busy_start + bsy->busy_length - 1; 2734 list_del_init(&busyp->list);
2588 if (bno > bend || uend < bsy->busy_start)
2589 continue;
2590 2735
2591 /* (start1,length1) within (start2, length2) */ 2736 pag = xfs_perag_get(mp, busyp->agno);
2592 if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0) 2737 spin_lock(&pag->pagb_lock);
2593 lsn = bsy->busy_tp->t_commit_lsn; 2738 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2594 }
2595 spin_unlock(&pag->pagb_lock); 2739 spin_unlock(&pag->pagb_lock);
2596 xfs_perag_put(pag); 2740 xfs_perag_put(pag);
2597 trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn);
2598 2741
2599 /* 2742 kmem_free(busyp);
2600 * If a block was found, force the log through the LSN of the
2601 * transaction that freed the block
2602 */
2603 if (lsn)
2604 xfs_log_force_lsn(tp->t_mountp, lsn, XFS_LOG_SYNC);
2605} 2743}