aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/xfs_ag.h18
-rw-r--r--fs/xfs/xfs_alloc.c572
-rw-r--r--fs/xfs/xfs_alloc.h28
-rw-r--r--fs/xfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/xfs_discard.c1
-rw-r--r--fs/xfs/xfs_extent_busy.c603
-rw-r--r--fs/xfs/xfs_extent_busy.h65
-rw-r--r--fs/xfs/xfs_log_cil.c1
-rw-r--r--fs/xfs/xfs_trans.c1
10 files changed, 674 insertions, 617 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0a9977983f92..ca9229ff8ac0 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -33,6 +33,7 @@ xfs-y += xfs_aops.o \
33 xfs_discard.o \ 33 xfs_discard.o \
34 xfs_error.o \ 34 xfs_error.o \
35 xfs_export.o \ 35 xfs_export.o \
36 xfs_extent_busy.o \
36 xfs_file.o \ 37 xfs_file.o \
37 xfs_filestream.o \ 38 xfs_filestream.o \
38 xfs_fsops.o \ 39 xfs_fsops.o \
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 4805f009f923..44d65c1533c0 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -175,24 +175,6 @@ typedef struct xfs_agfl {
175} xfs_agfl_t; 175} xfs_agfl_t;
176 176
177/* 177/*
178 * Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
179 * have been freed but whose transactions aren't committed to disk yet.
180 *
181 * Note that we use the transaction ID to record the transaction, not the
182 * transaction structure itself. See xfs_alloc_busy_insert() for details.
183 */
184struct xfs_busy_extent {
185 struct rb_node rb_node; /* ag by-bno indexed search tree */
186 struct list_head list; /* transaction busy extent list */
187 xfs_agnumber_t agno;
188 xfs_agblock_t bno;
189 xfs_extlen_t length;
190 unsigned int flags;
191#define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
192#define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */
193};
194
195/*
196 * Per-ag incore structure, copies of information in agf and agi, 178 * Per-ag incore structure, copies of information in agf and agi,
197 * to improve the performance of allocation group selection. 179 * to improve the performance of allocation group selection.
198 */ 180 */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 95ee705f146b..ae6df2585895 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -31,6 +31,7 @@
31#include "xfs_inode.h" 31#include "xfs_inode.h"
32#include "xfs_btree.h" 32#include "xfs_btree.h"
33#include "xfs_alloc.h" 33#include "xfs_alloc.h"
34#include "xfs_extent_busy.h"
34#include "xfs_error.h" 35#include "xfs_error.h"
35#include "xfs_trace.h" 36#include "xfs_trace.h"
36 37
@@ -2500,574 +2501,3 @@ error0:
2500 xfs_perag_put(args.pag); 2501 xfs_perag_put(args.pag);
2501 return error; 2502 return error;
2502} 2503}
2503
2504void
2505xfs_alloc_busy_insert(
2506 struct xfs_trans *tp,
2507 xfs_agnumber_t agno,
2508 xfs_agblock_t bno,
2509 xfs_extlen_t len,
2510 unsigned int flags)
2511{
2512 struct xfs_busy_extent *new;
2513 struct xfs_busy_extent *busyp;
2514 struct xfs_perag *pag;
2515 struct rb_node **rbp;
2516 struct rb_node *parent = NULL;
2517
2518 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2519 if (!new) {
2520 /*
2521 * No Memory! Since it is now not possible to track the free
2522 * block, make this a synchronous transaction to insure that
2523 * the block is not reused before this transaction commits.
2524 */
2525 trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
2526 xfs_trans_set_sync(tp);
2527 return;
2528 }
2529
2530 new->agno = agno;
2531 new->bno = bno;
2532 new->length = len;
2533 INIT_LIST_HEAD(&new->list);
2534 new->flags = flags;
2535
2536 /* trace before insert to be able to see failed inserts */
2537 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
2538
2539 pag = xfs_perag_get(tp->t_mountp, new->agno);
2540 spin_lock(&pag->pagb_lock);
2541 rbp = &pag->pagb_tree.rb_node;
2542 while (*rbp) {
2543 parent = *rbp;
2544 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
2545
2546 if (new->bno < busyp->bno) {
2547 rbp = &(*rbp)->rb_left;
2548 ASSERT(new->bno + new->length <= busyp->bno);
2549 } else if (new->bno > busyp->bno) {
2550 rbp = &(*rbp)->rb_right;
2551 ASSERT(bno >= busyp->bno + busyp->length);
2552 } else {
2553 ASSERT(0);
2554 }
2555 }
2556
2557 rb_link_node(&new->rb_node, parent, rbp);
2558 rb_insert_color(&new->rb_node, &pag->pagb_tree);
2559
2560 list_add(&new->list, &tp->t_busy);
2561 spin_unlock(&pag->pagb_lock);
2562 xfs_perag_put(pag);
2563}
2564
2565/*
2566 * Search for a busy extent within the range of the extent we are about to
2567 * allocate. You need to be holding the busy extent tree lock when calling
2568 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2569 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2570 * match. This is done so that a non-zero return indicates an overlap that
2571 * will require a synchronous transaction, but it can still be
2572 * used to distinguish between a partial or exact match.
2573 */
2574int
2575xfs_alloc_busy_search(
2576 struct xfs_mount *mp,
2577 xfs_agnumber_t agno,
2578 xfs_agblock_t bno,
2579 xfs_extlen_t len)
2580{
2581 struct xfs_perag *pag;
2582 struct rb_node *rbp;
2583 struct xfs_busy_extent *busyp;
2584 int match = 0;
2585
2586 pag = xfs_perag_get(mp, agno);
2587 spin_lock(&pag->pagb_lock);
2588
2589 rbp = pag->pagb_tree.rb_node;
2590
2591 /* find closest start bno overlap */
2592 while (rbp) {
2593 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2594 if (bno < busyp->bno) {
2595 /* may overlap, but exact start block is lower */
2596 if (bno + len > busyp->bno)
2597 match = -1;
2598 rbp = rbp->rb_left;
2599 } else if (bno > busyp->bno) {
2600 /* may overlap, but exact start block is higher */
2601 if (bno < busyp->bno + busyp->length)
2602 match = -1;
2603 rbp = rbp->rb_right;
2604 } else {
2605 /* bno matches busyp, length determines exact match */
2606 match = (busyp->length == len) ? 1 : -1;
2607 break;
2608 }
2609 }
2610 spin_unlock(&pag->pagb_lock);
2611 xfs_perag_put(pag);
2612 return match;
2613}
2614
2615/*
2616 * The found free extent [fbno, fend] overlaps part or all of the given busy
2617 * extent. If the overlap covers the beginning, the end, or all of the busy
2618 * extent, the overlapping portion can be made unbusy and used for the
2619 * allocation. We can't split a busy extent because we can't modify a
2620 * transaction/CIL context busy list, but we can update an entries block
2621 * number or length.
2622 *
2623 * Returns true if the extent can safely be reused, or false if the search
2624 * needs to be restarted.
2625 */
2626STATIC bool
2627xfs_alloc_busy_update_extent(
2628 struct xfs_mount *mp,
2629 struct xfs_perag *pag,
2630 struct xfs_busy_extent *busyp,
2631 xfs_agblock_t fbno,
2632 xfs_extlen_t flen,
2633 bool userdata)
2634{
2635 xfs_agblock_t fend = fbno + flen;
2636 xfs_agblock_t bbno = busyp->bno;
2637 xfs_agblock_t bend = bbno + busyp->length;
2638
2639 /*
2640 * This extent is currently being discarded. Give the thread
2641 * performing the discard a chance to mark the extent unbusy
2642 * and retry.
2643 */
2644 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
2645 spin_unlock(&pag->pagb_lock);
2646 delay(1);
2647 spin_lock(&pag->pagb_lock);
2648 return false;
2649 }
2650
2651 /*
2652 * If there is a busy extent overlapping a user allocation, we have
2653 * no choice but to force the log and retry the search.
2654 *
2655 * Fortunately this does not happen during normal operation, but
2656 * only if the filesystem is very low on space and has to dip into
2657 * the AGFL for normal allocations.
2658 */
2659 if (userdata)
2660 goto out_force_log;
2661
2662 if (bbno < fbno && bend > fend) {
2663 /*
2664 * Case 1:
2665 * bbno bend
2666 * +BBBBBBBBBBBBBBBBB+
2667 * +---------+
2668 * fbno fend
2669 */
2670
2671 /*
2672 * We would have to split the busy extent to be able to track
2673 * it correct, which we cannot do because we would have to
2674 * modify the list of busy extents attached to the transaction
2675 * or CIL context, which is immutable.
2676 *
2677 * Force out the log to clear the busy extent and retry the
2678 * search.
2679 */
2680 goto out_force_log;
2681 } else if (bbno >= fbno && bend <= fend) {
2682 /*
2683 * Case 2:
2684 * bbno bend
2685 * +BBBBBBBBBBBBBBBBB+
2686 * +-----------------+
2687 * fbno fend
2688 *
2689 * Case 3:
2690 * bbno bend
2691 * +BBBBBBBBBBBBBBBBB+
2692 * +--------------------------+
2693 * fbno fend
2694 *
2695 * Case 4:
2696 * bbno bend
2697 * +BBBBBBBBBBBBBBBBB+
2698 * +--------------------------+
2699 * fbno fend
2700 *
2701 * Case 5:
2702 * bbno bend
2703 * +BBBBBBBBBBBBBBBBB+
2704 * +-----------------------------------+
2705 * fbno fend
2706 *
2707 */
2708
2709 /*
2710 * The busy extent is fully covered by the extent we are
2711 * allocating, and can simply be removed from the rbtree.
2712 * However we cannot remove it from the immutable list
2713 * tracking busy extents in the transaction or CIL context,
2714 * so set the length to zero to mark it invalid.
2715 *
2716 * We also need to restart the busy extent search from the
2717 * tree root, because erasing the node can rearrange the
2718 * tree topology.
2719 */
2720 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2721 busyp->length = 0;
2722 return false;
2723 } else if (fend < bend) {
2724 /*
2725 * Case 6:
2726 * bbno bend
2727 * +BBBBBBBBBBBBBBBBB+
2728 * +---------+
2729 * fbno fend
2730 *
2731 * Case 7:
2732 * bbno bend
2733 * +BBBBBBBBBBBBBBBBB+
2734 * +------------------+
2735 * fbno fend
2736 *
2737 */
2738 busyp->bno = fend;
2739 } else if (bbno < fbno) {
2740 /*
2741 * Case 8:
2742 * bbno bend
2743 * +BBBBBBBBBBBBBBBBB+
2744 * +-------------+
2745 * fbno fend
2746 *
2747 * Case 9:
2748 * bbno bend
2749 * +BBBBBBBBBBBBBBBBB+
2750 * +----------------------+
2751 * fbno fend
2752 */
2753 busyp->length = fbno - busyp->bno;
2754 } else {
2755 ASSERT(0);
2756 }
2757
2758 trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
2759 return true;
2760
2761out_force_log:
2762 spin_unlock(&pag->pagb_lock);
2763 xfs_log_force(mp, XFS_LOG_SYNC);
2764 trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
2765 spin_lock(&pag->pagb_lock);
2766 return false;
2767}
2768
2769
2770/*
2771 * For a given extent [fbno, flen], make sure we can reuse it safely.
2772 */
2773void
2774xfs_alloc_busy_reuse(
2775 struct xfs_mount *mp,
2776 xfs_agnumber_t agno,
2777 xfs_agblock_t fbno,
2778 xfs_extlen_t flen,
2779 bool userdata)
2780{
2781 struct xfs_perag *pag;
2782 struct rb_node *rbp;
2783
2784 ASSERT(flen > 0);
2785
2786 pag = xfs_perag_get(mp, agno);
2787 spin_lock(&pag->pagb_lock);
2788restart:
2789 rbp = pag->pagb_tree.rb_node;
2790 while (rbp) {
2791 struct xfs_busy_extent *busyp =
2792 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2793 xfs_agblock_t bbno = busyp->bno;
2794 xfs_agblock_t bend = bbno + busyp->length;
2795
2796 if (fbno + flen <= bbno) {
2797 rbp = rbp->rb_left;
2798 continue;
2799 } else if (fbno >= bend) {
2800 rbp = rbp->rb_right;
2801 continue;
2802 }
2803
2804 if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
2805 userdata))
2806 goto restart;
2807 }
2808 spin_unlock(&pag->pagb_lock);
2809 xfs_perag_put(pag);
2810}
2811
2812/*
2813 * For a given extent [fbno, flen], search the busy extent list to find a
2814 * subset of the extent that is not busy. If *rlen is smaller than
2815 * args->minlen no suitable extent could be found, and the higher level
2816 * code needs to force out the log and retry the allocation.
2817 */
2818STATIC void
2819xfs_alloc_busy_trim(
2820 struct xfs_alloc_arg *args,
2821 xfs_agblock_t bno,
2822 xfs_extlen_t len,
2823 xfs_agblock_t *rbno,
2824 xfs_extlen_t *rlen)
2825{
2826 xfs_agblock_t fbno;
2827 xfs_extlen_t flen;
2828 struct rb_node *rbp;
2829
2830 ASSERT(len > 0);
2831
2832 spin_lock(&args->pag->pagb_lock);
2833restart:
2834 fbno = bno;
2835 flen = len;
2836 rbp = args->pag->pagb_tree.rb_node;
2837 while (rbp && flen >= args->minlen) {
2838 struct xfs_busy_extent *busyp =
2839 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2840 xfs_agblock_t fend = fbno + flen;
2841 xfs_agblock_t bbno = busyp->bno;
2842 xfs_agblock_t bend = bbno + busyp->length;
2843
2844 if (fend <= bbno) {
2845 rbp = rbp->rb_left;
2846 continue;
2847 } else if (fbno >= bend) {
2848 rbp = rbp->rb_right;
2849 continue;
2850 }
2851
2852 /*
2853 * If this is a metadata allocation, try to reuse the busy
2854 * extent instead of trimming the allocation.
2855 */
2856 if (!args->userdata &&
2857 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
2858 if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
2859 busyp, fbno, flen,
2860 false))
2861 goto restart;
2862 continue;
2863 }
2864
2865 if (bbno <= fbno) {
2866 /* start overlap */
2867
2868 /*
2869 * Case 1:
2870 * bbno bend
2871 * +BBBBBBBBBBBBBBBBB+
2872 * +---------+
2873 * fbno fend
2874 *
2875 * Case 2:
2876 * bbno bend
2877 * +BBBBBBBBBBBBBBBBB+
2878 * +-------------+
2879 * fbno fend
2880 *
2881 * Case 3:
2882 * bbno bend
2883 * +BBBBBBBBBBBBBBBBB+
2884 * +-------------+
2885 * fbno fend
2886 *
2887 * Case 4:
2888 * bbno bend
2889 * +BBBBBBBBBBBBBBBBB+
2890 * +-----------------+
2891 * fbno fend
2892 *
2893 * No unbusy region in extent, return failure.
2894 */
2895 if (fend <= bend)
2896 goto fail;
2897
2898 /*
2899 * Case 5:
2900 * bbno bend
2901 * +BBBBBBBBBBBBBBBBB+
2902 * +----------------------+
2903 * fbno fend
2904 *
2905 * Case 6:
2906 * bbno bend
2907 * +BBBBBBBBBBBBBBBBB+
2908 * +--------------------------+
2909 * fbno fend
2910 *
2911 * Needs to be trimmed to:
2912 * +-------+
2913 * fbno fend
2914 */
2915 fbno = bend;
2916 } else if (bend >= fend) {
2917 /* end overlap */
2918
2919 /*
2920 * Case 7:
2921 * bbno bend
2922 * +BBBBBBBBBBBBBBBBB+
2923 * +------------------+
2924 * fbno fend
2925 *
2926 * Case 8:
2927 * bbno bend
2928 * +BBBBBBBBBBBBBBBBB+
2929 * +--------------------------+
2930 * fbno fend
2931 *
2932 * Needs to be trimmed to:
2933 * +-------+
2934 * fbno fend
2935 */
2936 fend = bbno;
2937 } else {
2938 /* middle overlap */
2939
2940 /*
2941 * Case 9:
2942 * bbno bend
2943 * +BBBBBBBBBBBBBBBBB+
2944 * +-----------------------------------+
2945 * fbno fend
2946 *
2947 * Can be trimmed to:
2948 * +-------+ OR +-------+
2949 * fbno fend fbno fend
2950 *
2951 * Backward allocation leads to significant
2952 * fragmentation of directories, which degrades
2953 * directory performance, therefore we always want to
2954 * choose the option that produces forward allocation
2955 * patterns.
2956 * Preferring the lower bno extent will make the next
2957 * request use "fend" as the start of the next
2958 * allocation; if the segment is no longer busy at
2959 * that point, we'll get a contiguous allocation, but
2960 * even if it is still busy, we will get a forward
2961 * allocation.
2962 * We try to avoid choosing the segment at "bend",
2963 * because that can lead to the next allocation
2964 * taking the segment at "fbno", which would be a
2965 * backward allocation. We only use the segment at
2966 * "fbno" if it is much larger than the current
2967 * requested size, because in that case there's a
2968 * good chance subsequent allocations will be
2969 * contiguous.
2970 */
2971 if (bbno - fbno >= args->maxlen) {
2972 /* left candidate fits perfect */
2973 fend = bbno;
2974 } else if (fend - bend >= args->maxlen * 4) {
2975 /* right candidate has enough free space */
2976 fbno = bend;
2977 } else if (bbno - fbno >= args->minlen) {
2978 /* left candidate fits minimum requirement */
2979 fend = bbno;
2980 } else {
2981 goto fail;
2982 }
2983 }
2984
2985 flen = fend - fbno;
2986 }
2987 spin_unlock(&args->pag->pagb_lock);
2988
2989 if (fbno != bno || flen != len) {
2990 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
2991 fbno, flen);
2992 }
2993 *rbno = fbno;
2994 *rlen = flen;
2995 return;
2996fail:
2997 /*
2998 * Return a zero extent length as failure indications. All callers
2999 * re-check if the trimmed extent satisfies the minlen requirement.
3000 */
3001 spin_unlock(&args->pag->pagb_lock);
3002 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
3003 *rbno = fbno;
3004 *rlen = 0;
3005}
3006
3007static void
3008xfs_alloc_busy_clear_one(
3009 struct xfs_mount *mp,
3010 struct xfs_perag *pag,
3011 struct xfs_busy_extent *busyp)
3012{
3013 if (busyp->length) {
3014 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
3015 busyp->length);
3016 rb_erase(&busyp->rb_node, &pag->pagb_tree);
3017 }
3018
3019 list_del_init(&busyp->list);
3020 kmem_free(busyp);
3021}
3022
3023/*
3024 * Remove all extents on the passed in list from the busy extents tree.
3025 * If do_discard is set skip extents that need to be discarded, and mark
3026 * these as undergoing a discard operation instead.
3027 */
3028void
3029xfs_alloc_busy_clear(
3030 struct xfs_mount *mp,
3031 struct list_head *list,
3032 bool do_discard)
3033{
3034 struct xfs_busy_extent *busyp, *n;
3035 struct xfs_perag *pag = NULL;
3036 xfs_agnumber_t agno = NULLAGNUMBER;
3037
3038 list_for_each_entry_safe(busyp, n, list, list) {
3039 if (busyp->agno != agno) {
3040 if (pag) {
3041 spin_unlock(&pag->pagb_lock);
3042 xfs_perag_put(pag);
3043 }
3044 pag = xfs_perag_get(mp, busyp->agno);
3045 spin_lock(&pag->pagb_lock);
3046 agno = busyp->agno;
3047 }
3048
3049 if (do_discard && busyp->length &&
3050 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
3051 busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
3052 else
3053 xfs_alloc_busy_clear_one(mp, pag, busyp);
3054 }
3055
3056 if (pag) {
3057 spin_unlock(&pag->pagb_lock);
3058 xfs_perag_put(pag);
3059 }
3060}
3061
3062/*
3063 * Callback for list_sort to sort busy extents by the AG they reside in.
3064 */
3065int
3066xfs_busy_extent_ag_cmp(
3067 void *priv,
3068 struct list_head *a,
3069 struct list_head *b)
3070{
3071 return container_of(a, struct xfs_busy_extent, list)->agno -
3072 container_of(b, struct xfs_busy_extent, list)->agno;
3073}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 3a7e7d8f8ded..93be4a667ca1 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -23,7 +23,6 @@ struct xfs_btree_cur;
23struct xfs_mount; 23struct xfs_mount;
24struct xfs_perag; 24struct xfs_perag;
25struct xfs_trans; 25struct xfs_trans;
26struct xfs_busy_extent;
27 26
28extern struct workqueue_struct *xfs_alloc_wq; 27extern struct workqueue_struct *xfs_alloc_wq;
29 28
@@ -139,33 +138,6 @@ xfs_extlen_t
139xfs_alloc_longest_free_extent(struct xfs_mount *mp, 138xfs_alloc_longest_free_extent(struct xfs_mount *mp,
140 struct xfs_perag *pag); 139 struct xfs_perag *pag);
141 140
142#ifdef __KERNEL__
143void
144xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
145 xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
146
147void
148xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list,
149 bool do_discard);
150
151int
152xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
153 xfs_agblock_t bno, xfs_extlen_t len);
154
155void
156xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
157 xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
158
159int
160xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
161
162static inline void xfs_alloc_busy_sort(struct list_head *list)
163{
164 list_sort(NULL, list, xfs_busy_extent_ag_cmp);
165}
166
167#endif /* __KERNEL__ */
168
169/* 141/*
170 * Compute and fill in value of m_ag_maxlevels. 142 * Compute and fill in value of m_ag_maxlevels.
171 */ 143 */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index e23cc978f95b..3f665487521a 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -31,6 +31,7 @@
31#include "xfs_inode.h" 31#include "xfs_inode.h"
32#include "xfs_btree.h" 32#include "xfs_btree.h"
33#include "xfs_alloc.h" 33#include "xfs_alloc.h"
34#include "xfs_extent_busy.h"
34#include "xfs_error.h" 35#include "xfs_error.h"
35#include "xfs_trace.h" 36#include "xfs_trace.h"
36 37
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index bbbabc8d0514..e3f1abe774f6 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -29,6 +29,7 @@
29#include "xfs_inode.h" 29#include "xfs_inode.h"
30#include "xfs_alloc.h" 30#include "xfs_alloc.h"
31#include "xfs_error.h" 31#include "xfs_error.h"
32#include "xfs_extent_busy.h"
32#include "xfs_discard.h" 33#include "xfs_discard.h"
33#include "xfs_trace.h" 34#include "xfs_trace.h"
34 35
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
new file mode 100644
index 000000000000..4b5a4fa869af
--- /dev/null
+++ b/fs/xfs/xfs_extent_busy.c
@@ -0,0 +1,603 @@
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2010 David Chinner.
4 * Copyright (c) 2011 Christoph Hellwig.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_types.h"
23#include "xfs_log.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc.h"
30#include "xfs_inode.h"
31#include "xfs_extent_busy.h"
32#include "xfs_trace.h"
33
34void
35xfs_alloc_busy_insert(
36 struct xfs_trans *tp,
37 xfs_agnumber_t agno,
38 xfs_agblock_t bno,
39 xfs_extlen_t len,
40 unsigned int flags)
41{
42 struct xfs_busy_extent *new;
43 struct xfs_busy_extent *busyp;
44 struct xfs_perag *pag;
45 struct rb_node **rbp;
46 struct rb_node *parent = NULL;
47
48 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
49 if (!new) {
50 /*
51 * No Memory! Since it is now not possible to track the free
52 * block, make this a synchronous transaction to insure that
53 * the block is not reused before this transaction commits.
54 */
55 trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
56 xfs_trans_set_sync(tp);
57 return;
58 }
59
60 new->agno = agno;
61 new->bno = bno;
62 new->length = len;
63 INIT_LIST_HEAD(&new->list);
64 new->flags = flags;
65
66 /* trace before insert to be able to see failed inserts */
67 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
68
69 pag = xfs_perag_get(tp->t_mountp, new->agno);
70 spin_lock(&pag->pagb_lock);
71 rbp = &pag->pagb_tree.rb_node;
72 while (*rbp) {
73 parent = *rbp;
74 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
75
76 if (new->bno < busyp->bno) {
77 rbp = &(*rbp)->rb_left;
78 ASSERT(new->bno + new->length <= busyp->bno);
79 } else if (new->bno > busyp->bno) {
80 rbp = &(*rbp)->rb_right;
81 ASSERT(bno >= busyp->bno + busyp->length);
82 } else {
83 ASSERT(0);
84 }
85 }
86
87 rb_link_node(&new->rb_node, parent, rbp);
88 rb_insert_color(&new->rb_node, &pag->pagb_tree);
89
90 list_add(&new->list, &tp->t_busy);
91 spin_unlock(&pag->pagb_lock);
92 xfs_perag_put(pag);
93}
94
95/*
96 * Search for a busy extent within the range of the extent we are about to
97 * allocate. You need to be holding the busy extent tree lock when calling
98 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
99 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
100 * match. This is done so that a non-zero return indicates an overlap that
101 * will require a synchronous transaction, but it can still be
102 * used to distinguish between a partial or exact match.
103 */
104int
105xfs_alloc_busy_search(
106 struct xfs_mount *mp,
107 xfs_agnumber_t agno,
108 xfs_agblock_t bno,
109 xfs_extlen_t len)
110{
111 struct xfs_perag *pag;
112 struct rb_node *rbp;
113 struct xfs_busy_extent *busyp;
114 int match = 0;
115
116 pag = xfs_perag_get(mp, agno);
117 spin_lock(&pag->pagb_lock);
118
119 rbp = pag->pagb_tree.rb_node;
120
121 /* find closest start bno overlap */
122 while (rbp) {
123 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
124 if (bno < busyp->bno) {
125 /* may overlap, but exact start block is lower */
126 if (bno + len > busyp->bno)
127 match = -1;
128 rbp = rbp->rb_left;
129 } else if (bno > busyp->bno) {
130 /* may overlap, but exact start block is higher */
131 if (bno < busyp->bno + busyp->length)
132 match = -1;
133 rbp = rbp->rb_right;
134 } else {
135 /* bno matches busyp, length determines exact match */
136 match = (busyp->length == len) ? 1 : -1;
137 break;
138 }
139 }
140 spin_unlock(&pag->pagb_lock);
141 xfs_perag_put(pag);
142 return match;
143}
144
145/*
146 * The found free extent [fbno, fend] overlaps part or all of the given busy
147 * extent. If the overlap covers the beginning, the end, or all of the busy
148 * extent, the overlapping portion can be made unbusy and used for the
149 * allocation. We can't split a busy extent because we can't modify a
150 * transaction/CIL context busy list, but we can update an entries block
151 * number or length.
152 *
153 * Returns true if the extent can safely be reused, or false if the search
154 * needs to be restarted.
155 */
156STATIC bool
157xfs_alloc_busy_update_extent(
158 struct xfs_mount *mp,
159 struct xfs_perag *pag,
160 struct xfs_busy_extent *busyp,
161 xfs_agblock_t fbno,
162 xfs_extlen_t flen,
163 bool userdata)
164{
165 xfs_agblock_t fend = fbno + flen;
166 xfs_agblock_t bbno = busyp->bno;
167 xfs_agblock_t bend = bbno + busyp->length;
168
169 /*
170 * This extent is currently being discarded. Give the thread
171 * performing the discard a chance to mark the extent unbusy
172 * and retry.
173 */
174 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
175 spin_unlock(&pag->pagb_lock);
176 delay(1);
177 spin_lock(&pag->pagb_lock);
178 return false;
179 }
180
181 /*
182 * If there is a busy extent overlapping a user allocation, we have
183 * no choice but to force the log and retry the search.
184 *
185 * Fortunately this does not happen during normal operation, but
186 * only if the filesystem is very low on space and has to dip into
187 * the AGFL for normal allocations.
188 */
189 if (userdata)
190 goto out_force_log;
191
192 if (bbno < fbno && bend > fend) {
193 /*
194 * Case 1:
195 * bbno bend
196 * +BBBBBBBBBBBBBBBBB+
197 * +---------+
198 * fbno fend
199 */
200
201 /*
202 * We would have to split the busy extent to be able to track
203 * it correct, which we cannot do because we would have to
204 * modify the list of busy extents attached to the transaction
205 * or CIL context, which is immutable.
206 *
207 * Force out the log to clear the busy extent and retry the
208 * search.
209 */
210 goto out_force_log;
211 } else if (bbno >= fbno && bend <= fend) {
212 /*
213 * Case 2:
214 * bbno bend
215 * +BBBBBBBBBBBBBBBBB+
216 * +-----------------+
217 * fbno fend
218 *
219 * Case 3:
220 * bbno bend
221 * +BBBBBBBBBBBBBBBBB+
222 * +--------------------------+
223 * fbno fend
224 *
225 * Case 4:
226 * bbno bend
227 * +BBBBBBBBBBBBBBBBB+
228 * +--------------------------+
229 * fbno fend
230 *
231 * Case 5:
232 * bbno bend
233 * +BBBBBBBBBBBBBBBBB+
234 * +-----------------------------------+
235 * fbno fend
236 *
237 */
238
239 /*
240 * The busy extent is fully covered by the extent we are
241 * allocating, and can simply be removed from the rbtree.
242 * However we cannot remove it from the immutable list
243 * tracking busy extents in the transaction or CIL context,
244 * so set the length to zero to mark it invalid.
245 *
246 * We also need to restart the busy extent search from the
247 * tree root, because erasing the node can rearrange the
248 * tree topology.
249 */
250 rb_erase(&busyp->rb_node, &pag->pagb_tree);
251 busyp->length = 0;
252 return false;
253 } else if (fend < bend) {
254 /*
255 * Case 6:
256 * bbno bend
257 * +BBBBBBBBBBBBBBBBB+
258 * +---------+
259 * fbno fend
260 *
261 * Case 7:
262 * bbno bend
263 * +BBBBBBBBBBBBBBBBB+
264 * +------------------+
265 * fbno fend
266 *
267 */
268 busyp->bno = fend;
269 } else if (bbno < fbno) {
270 /*
271 * Case 8:
272 * bbno bend
273 * +BBBBBBBBBBBBBBBBB+
274 * +-------------+
275 * fbno fend
276 *
277 * Case 9:
278 * bbno bend
279 * +BBBBBBBBBBBBBBBBB+
280 * +----------------------+
281 * fbno fend
282 */
283 busyp->length = fbno - busyp->bno;
284 } else {
285 ASSERT(0);
286 }
287
288 trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
289 return true;
290
291out_force_log:
292 spin_unlock(&pag->pagb_lock);
293 xfs_log_force(mp, XFS_LOG_SYNC);
294 trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
295 spin_lock(&pag->pagb_lock);
296 return false;
297}
298
299
300/*
301 * For a given extent [fbno, flen], make sure we can reuse it safely.
302 */
303void
304xfs_alloc_busy_reuse(
305 struct xfs_mount *mp,
306 xfs_agnumber_t agno,
307 xfs_agblock_t fbno,
308 xfs_extlen_t flen,
309 bool userdata)
310{
311 struct xfs_perag *pag;
312 struct rb_node *rbp;
313
314 ASSERT(flen > 0);
315
316 pag = xfs_perag_get(mp, agno);
317 spin_lock(&pag->pagb_lock);
318restart:
319 rbp = pag->pagb_tree.rb_node;
320 while (rbp) {
321 struct xfs_busy_extent *busyp =
322 rb_entry(rbp, struct xfs_busy_extent, rb_node);
323 xfs_agblock_t bbno = busyp->bno;
324 xfs_agblock_t bend = bbno + busyp->length;
325
326 if (fbno + flen <= bbno) {
327 rbp = rbp->rb_left;
328 continue;
329 } else if (fbno >= bend) {
330 rbp = rbp->rb_right;
331 continue;
332 }
333
334 if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
335 userdata))
336 goto restart;
337 }
338 spin_unlock(&pag->pagb_lock);
339 xfs_perag_put(pag);
340}
341
342/*
343 * For a given extent [fbno, flen], search the busy extent list to find a
344 * subset of the extent that is not busy. If *rlen is smaller than
345 * args->minlen no suitable extent could be found, and the higher level
346 * code needs to force out the log and retry the allocation.
347 */
348STATIC void
349xfs_alloc_busy_trim(
350 struct xfs_alloc_arg *args,
351 xfs_agblock_t bno,
352 xfs_extlen_t len,
353 xfs_agblock_t *rbno,
354 xfs_extlen_t *rlen)
355{
356 xfs_agblock_t fbno;
357 xfs_extlen_t flen;
358 struct rb_node *rbp;
359
360 ASSERT(len > 0);
361
362 spin_lock(&args->pag->pagb_lock);
363restart:
364 fbno = bno;
365 flen = len;
366 rbp = args->pag->pagb_tree.rb_node;
367 while (rbp && flen >= args->minlen) {
368 struct xfs_busy_extent *busyp =
369 rb_entry(rbp, struct xfs_busy_extent, rb_node);
370 xfs_agblock_t fend = fbno + flen;
371 xfs_agblock_t bbno = busyp->bno;
372 xfs_agblock_t bend = bbno + busyp->length;
373
374 if (fend <= bbno) {
375 rbp = rbp->rb_left;
376 continue;
377 } else if (fbno >= bend) {
378 rbp = rbp->rb_right;
379 continue;
380 }
381
382 /*
383 * If this is a metadata allocation, try to reuse the busy
384 * extent instead of trimming the allocation.
385 */
386 if (!args->userdata &&
387 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
388 if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
389 busyp, fbno, flen,
390 false))
391 goto restart;
392 continue;
393 }
394
395 if (bbno <= fbno) {
396 /* start overlap */
397
398 /*
399 * Case 1:
400 * bbno bend
401 * +BBBBBBBBBBBBBBBBB+
402 * +---------+
403 * fbno fend
404 *
405 * Case 2:
406 * bbno bend
407 * +BBBBBBBBBBBBBBBBB+
408 * +-------------+
409 * fbno fend
410 *
411 * Case 3:
412 * bbno bend
413 * +BBBBBBBBBBBBBBBBB+
414 * +-------------+
415 * fbno fend
416 *
417 * Case 4:
418 * bbno bend
419 * +BBBBBBBBBBBBBBBBB+
420 * +-----------------+
421 * fbno fend
422 *
423 * No unbusy region in extent, return failure.
424 */
425 if (fend <= bend)
426 goto fail;
427
428 /*
429 * Case 5:
430 * bbno bend
431 * +BBBBBBBBBBBBBBBBB+
432 * +----------------------+
433 * fbno fend
434 *
435 * Case 6:
436 * bbno bend
437 * +BBBBBBBBBBBBBBBBB+
438 * +--------------------------+
439 * fbno fend
440 *
441 * Needs to be trimmed to:
442 * +-------+
443 * fbno fend
444 */
445 fbno = bend;
446 } else if (bend >= fend) {
447 /* end overlap */
448
449 /*
450 * Case 7:
451 * bbno bend
452 * +BBBBBBBBBBBBBBBBB+
453 * +------------------+
454 * fbno fend
455 *
456 * Case 8:
457 * bbno bend
458 * +BBBBBBBBBBBBBBBBB+
459 * +--------------------------+
460 * fbno fend
461 *
462 * Needs to be trimmed to:
463 * +-------+
464 * fbno fend
465 */
466 fend = bbno;
467 } else {
468 /* middle overlap */
469
470 /*
471 * Case 9:
472 * bbno bend
473 * +BBBBBBBBBBBBBBBBB+
474 * +-----------------------------------+
475 * fbno fend
476 *
477 * Can be trimmed to:
478 * +-------+ OR +-------+
479 * fbno fend fbno fend
480 *
481 * Backward allocation leads to significant
482 * fragmentation of directories, which degrades
483 * directory performance, therefore we always want to
484 * choose the option that produces forward allocation
485 * patterns.
486 * Preferring the lower bno extent will make the next
487 * request use "fend" as the start of the next
488 * allocation; if the segment is no longer busy at
489 * that point, we'll get a contiguous allocation, but
490 * even if it is still busy, we will get a forward
491 * allocation.
492 * We try to avoid choosing the segment at "bend",
493 * because that can lead to the next allocation
494 * taking the segment at "fbno", which would be a
495 * backward allocation. We only use the segment at
496 * "fbno" if it is much larger than the current
497 * requested size, because in that case there's a
498 * good chance subsequent allocations will be
499 * contiguous.
500 */
501 if (bbno - fbno >= args->maxlen) {
502 /* left candidate fits perfect */
503 fend = bbno;
504 } else if (fend - bend >= args->maxlen * 4) {
505 /* right candidate has enough free space */
506 fbno = bend;
507 } else if (bbno - fbno >= args->minlen) {
508 /* left candidate fits minimum requirement */
509 fend = bbno;
510 } else {
511 goto fail;
512 }
513 }
514
515 flen = fend - fbno;
516 }
517 spin_unlock(&args->pag->pagb_lock);
518
519 if (fbno != bno || flen != len) {
520 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
521 fbno, flen);
522 }
523 *rbno = fbno;
524 *rlen = flen;
525 return;
526fail:
527 /*
528 * Return a zero extent length as failure indications. All callers
529 * re-check if the trimmed extent satisfies the minlen requirement.
530 */
531 spin_unlock(&args->pag->pagb_lock);
532 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
533 *rbno = fbno;
534 *rlen = 0;
535}
536
537static void
538xfs_alloc_busy_clear_one(
539 struct xfs_mount *mp,
540 struct xfs_perag *pag,
541 struct xfs_busy_extent *busyp)
542{
543 if (busyp->length) {
544 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
545 busyp->length);
546 rb_erase(&busyp->rb_node, &pag->pagb_tree);
547 }
548
549 list_del_init(&busyp->list);
550 kmem_free(busyp);
551}
552
553/*
554 * Remove all extents on the passed in list from the busy extents tree.
555 * If do_discard is set skip extents that need to be discarded, and mark
556 * these as undergoing a discard operation instead.
557 */
558void
559xfs_alloc_busy_clear(
560 struct xfs_mount *mp,
561 struct list_head *list,
562 bool do_discard)
563{
564 struct xfs_busy_extent *busyp, *n;
565 struct xfs_perag *pag = NULL;
566 xfs_agnumber_t agno = NULLAGNUMBER;
567
568 list_for_each_entry_safe(busyp, n, list, list) {
569 if (busyp->agno != agno) {
570 if (pag) {
571 spin_unlock(&pag->pagb_lock);
572 xfs_perag_put(pag);
573 }
574 pag = xfs_perag_get(mp, busyp->agno);
575 spin_lock(&pag->pagb_lock);
576 agno = busyp->agno;
577 }
578
579 if (do_discard && busyp->length &&
580 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
581 busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
582 else
583 xfs_alloc_busy_clear_one(mp, pag, busyp);
584 }
585
586 if (pag) {
587 spin_unlock(&pag->pagb_lock);
588 xfs_perag_put(pag);
589 }
590}
591
592/*
593 * Callback for list_sort to sort busy extents by the AG they reside in.
594 */
595int
596xfs_alloc_busy_ag_cmp(
597 void *priv,
598 struct list_head *a,
599 struct list_head *b)
600{
601 return container_of(a, struct xfs_busy_extent, list)->agno -
602 container_of(b, struct xfs_busy_extent, list)->agno;
603}
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
new file mode 100644
index 000000000000..671b501f13e5
--- /dev/null
+++ b/fs/xfs/xfs_extent_busy.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2010 David Chinner.
4 * Copyright (c) 2011 Christoph Hellwig.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#ifndef __XFS_EXTENT_BUSY_H__
21#define __XFS_EXTENT_BUSY_H__
22
23/*
24 * Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
25 * have been freed but whose transactions aren't committed to disk yet.
26 *
27 * Note that we use the transaction ID to record the transaction, not the
28 * transaction structure itself. See xfs_extent_busy_insert() for details.
29 */
30struct xfs_busy_extent {
31 struct rb_node rb_node; /* ag by-bno indexed search tree */
32 struct list_head list; /* transaction busy extent list */
33 xfs_agnumber_t agno;
34 xfs_agblock_t bno;
35 xfs_extlen_t length;
36 unsigned int flags;
37#define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
38#define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */
39};
40
41void
42xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
43 xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
44
45void
46xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list,
47 bool do_discard);
48
49int
50xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
51 xfs_agblock_t bno, xfs_extlen_t len);
52
53void
54xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
55 xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
56
57int
58xfs_alloc_busy_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
59
60static inline void xfs_alloc_busy_sort(struct list_head *list)
61{
62 list_sort(NULL, list, xfs_alloc_busy_ag_cmp);
63}
64
65#endif /* __XFS_EXTENT_BUSY_H__ */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 47b139b230d6..a6e3e71e3f88 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -28,6 +28,7 @@
28#include "xfs_mount.h" 28#include "xfs_mount.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30#include "xfs_alloc.h" 30#include "xfs_alloc.h"
31#include "xfs_extent_busy.h"
31#include "xfs_discard.h" 32#include "xfs_discard.h"
32 33
33/* 34/*
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index d7acb7bab437..d8bdb618ec19 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -35,6 +35,7 @@
35#include "xfs_btree.h" 35#include "xfs_btree.h"
36#include "xfs_ialloc.h" 36#include "xfs_ialloc.h"
37#include "xfs_alloc.h" 37#include "xfs_alloc.h"
38#include "xfs_extent_busy.h"
38#include "xfs_bmap.h" 39#include "xfs_bmap.h"
39#include "xfs_quota.h" 40#include "xfs_quota.h"
40#include "xfs_trans_priv.h" 41#include "xfs_trans_priv.h"