aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c181
1 files changed, 178 insertions, 3 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index a73fc23e2961..9a91d1eb0af4 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -10,6 +10,7 @@
10#include "delayed-ref.h" 10#include "delayed-ref.h"
11#include "transaction.h" 11#include "transaction.h"
12#include "qgroup.h" 12#include "qgroup.h"
13#include "space-info.h"
13 14
14struct kmem_cache *btrfs_delayed_ref_head_cachep; 15struct kmem_cache *btrfs_delayed_ref_head_cachep;
15struct kmem_cache *btrfs_delayed_tree_ref_cachep; 16struct kmem_cache *btrfs_delayed_tree_ref_cachep;
@@ -24,6 +25,179 @@ struct kmem_cache *btrfs_delayed_extent_op_cachep;
24 * of hammering updates on the extent allocation tree. 25 * of hammering updates on the extent allocation tree.
25 */ 26 */
26 27
28bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
29{
30 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
31 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
32 bool ret = false;
33 u64 reserved;
34
35 spin_lock(&global_rsv->lock);
36 reserved = global_rsv->reserved;
37 spin_unlock(&global_rsv->lock);
38
39 /*
40 * Since the global reserve is just kind of magic we don't really want
41 * to rely on it to save our bacon, so if our size is more than the
42 * delayed_refs_rsv and the global rsv then it's time to think about
43 * bailing.
44 */
45 spin_lock(&delayed_refs_rsv->lock);
46 reserved += delayed_refs_rsv->reserved;
47 if (delayed_refs_rsv->size >= reserved)
48 ret = true;
49 spin_unlock(&delayed_refs_rsv->lock);
50 return ret;
51}
52
53int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
54{
55 u64 num_entries =
56 atomic_read(&trans->transaction->delayed_refs.num_entries);
57 u64 avg_runtime;
58 u64 val;
59
60 smp_mb();
61 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
62 val = num_entries * avg_runtime;
63 if (val >= NSEC_PER_SEC)
64 return 1;
65 if (val >= NSEC_PER_SEC / 2)
66 return 2;
67
68 return btrfs_check_space_for_delayed_refs(trans->fs_info);
69}
70
71/**
72 * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
73 * @fs_info - the fs_info for our fs.
74 * @nr - the number of items to drop.
75 *
76 * This drops the delayed ref head's count from the delayed refs rsv and frees
77 * any excess reservation we had.
78 */
79void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
80{
81 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
82 u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr);
83 u64 released = 0;
84
85 released = __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes,
86 NULL);
87 if (released)
88 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
89 0, released, 0);
90}
91
92/*
93 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
94 * @trans - the trans that may have generated delayed refs
95 *
96 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
97 * it'll calculate the additional size and add it to the delayed_refs_rsv.
98 */
99void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
100{
101 struct btrfs_fs_info *fs_info = trans->fs_info;
102 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
103 u64 num_bytes;
104
105 if (!trans->delayed_ref_updates)
106 return;
107
108 num_bytes = btrfs_calc_trans_metadata_size(fs_info,
109 trans->delayed_ref_updates);
110 spin_lock(&delayed_rsv->lock);
111 delayed_rsv->size += num_bytes;
112 delayed_rsv->full = 0;
113 spin_unlock(&delayed_rsv->lock);
114 trans->delayed_ref_updates = 0;
115}
116
117/**
118 * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
119 * @fs_info - the fs info for our fs.
120 * @src - the source block rsv to transfer from.
121 * @num_bytes - the number of bytes to transfer.
122 *
123 * This transfers up to the num_bytes amount from the src rsv to the
124 * delayed_refs_rsv. Any extra bytes are returned to the space info.
125 */
126void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
127 struct btrfs_block_rsv *src,
128 u64 num_bytes)
129{
130 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
131 u64 to_free = 0;
132
133 spin_lock(&src->lock);
134 src->reserved -= num_bytes;
135 src->size -= num_bytes;
136 spin_unlock(&src->lock);
137
138 spin_lock(&delayed_refs_rsv->lock);
139 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
140 u64 delta = delayed_refs_rsv->size -
141 delayed_refs_rsv->reserved;
142 if (num_bytes > delta) {
143 to_free = num_bytes - delta;
144 num_bytes = delta;
145 }
146 } else {
147 to_free = num_bytes;
148 num_bytes = 0;
149 }
150
151 if (num_bytes)
152 delayed_refs_rsv->reserved += num_bytes;
153 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
154 delayed_refs_rsv->full = 1;
155 spin_unlock(&delayed_refs_rsv->lock);
156
157 if (num_bytes)
158 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
159 0, num_bytes, 1);
160 if (to_free)
161 btrfs_space_info_add_old_bytes(fs_info,
162 delayed_refs_rsv->space_info, to_free);
163}
164
165/**
166 * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
167 * @fs_info - the fs_info for our fs.
168 * @flush - control how we can flush for this reservation.
169 *
170 * This will refill the delayed block_rsv up to 1 items size worth of space and
171 * will return -ENOSPC if we can't make the reservation.
172 */
173int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
174 enum btrfs_reserve_flush_enum flush)
175{
176 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
177 u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1);
178 u64 num_bytes = 0;
179 int ret = -ENOSPC;
180
181 spin_lock(&block_rsv->lock);
182 if (block_rsv->reserved < block_rsv->size) {
183 num_bytes = block_rsv->size - block_rsv->reserved;
184 num_bytes = min(num_bytes, limit);
185 }
186 spin_unlock(&block_rsv->lock);
187
188 if (!num_bytes)
189 return 0;
190
191 ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
192 num_bytes, flush);
193 if (ret)
194 return ret;
195 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
196 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
197 0, num_bytes, 1);
198 return 0;
199}
200
27/* 201/*
28 * compare two delayed tree backrefs with same bytenr and type 202 * compare two delayed tree backrefs with same bytenr and type
29 */ 203 */
@@ -957,13 +1131,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
957} 1131}
958 1132
959/* 1133/*
960 * this does a simple search for the head node for a given extent. 1134 * This does a simple search for the head node for a given extent. Returns the
961 * It must be called with the delayed ref spinlock held, and it returns 1135 * head node if found, or NULL if not.
962 * the head node if any where found, or NULL if not.
963 */ 1136 */
964struct btrfs_delayed_ref_head * 1137struct btrfs_delayed_ref_head *
965btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr) 1138btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
966{ 1139{
1140 lockdep_assert_held(&delayed_refs->lock);
1141
967 return find_ref_head(delayed_refs, bytenr, false); 1142 return find_ref_head(delayed_refs, bytenr, false);
968} 1143}
969 1144