aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2012-11-26 04:26:20 -0500
committerChris Mason <chris.mason@fusionio.com>2012-12-16 20:46:12 -0500
commit26176e7c2aa923327becdc25b5aca2cb907ac932 (patch)
treed65f57b99fd2045b5d4299784b23ea249a68caad /fs/btrfs/file.c
parent8ddc473433b5e8ce8693db9f6e251f5a28267528 (diff)
Btrfs: restructure btrfs_run_defrag_inodes()
This patch restructure btrfs_run_defrag_inodes() and make the code of the auto defragment more readable. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c197
1 files changed, 107 insertions, 90 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 00918321e390..3c6f7479cd5b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -216,11 +216,11 @@ out:
216} 216}
217 217
218/* 218/*
219 * must be called with the defrag_inodes lock held 219 * pick the defragable inode that we want, if it doesn't exist, we will get
220 * the next one.
220 */ 221 */
221struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, 222static struct inode_defrag *
222 u64 root, u64 ino, 223btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
223 struct rb_node **next)
224{ 224{
225 struct inode_defrag *entry = NULL; 225 struct inode_defrag *entry = NULL;
226 struct inode_defrag tmp; 226 struct inode_defrag tmp;
@@ -231,7 +231,8 @@ struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
231 tmp.ino = ino; 231 tmp.ino = ino;
232 tmp.root = root; 232 tmp.root = root;
233 233
234 p = info->defrag_inodes.rb_node; 234 spin_lock(&fs_info->defrag_inodes_lock);
235 p = fs_info->defrag_inodes.rb_node;
235 while (p) { 236 while (p) {
236 parent = p; 237 parent = p;
237 entry = rb_entry(parent, struct inode_defrag, rb_node); 238 entry = rb_entry(parent, struct inode_defrag, rb_node);
@@ -242,52 +243,128 @@ struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
242 else if (ret > 0) 243 else if (ret > 0)
243 p = parent->rb_right; 244 p = parent->rb_right;
244 else 245 else
245 return entry; 246 goto out;
246 } 247 }
247 248
248 if (next) { 249 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
249 while (parent && __compare_inode_defrag(&tmp, entry) > 0) { 250 parent = rb_next(parent);
250 parent = rb_next(parent); 251 if (parent)
251 entry = rb_entry(parent, struct inode_defrag, rb_node); 252 entry = rb_entry(parent, struct inode_defrag, rb_node);
252 } 253 else
253 *next = parent; 254 entry = NULL;
254 } 255 }
255 return NULL; 256out:
257 if (entry)
258 rb_erase(parent, &fs_info->defrag_inodes);
259 spin_unlock(&fs_info->defrag_inodes_lock);
260 return entry;
256} 261}
257 262
258/* 263void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
259 * run through the list of inodes in the FS that need
260 * defragging
261 */
262int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
263{ 264{
264 struct inode_defrag *defrag; 265 struct inode_defrag *defrag;
266 struct rb_node *node;
267
268 spin_lock(&fs_info->defrag_inodes_lock);
269 node = rb_first(&fs_info->defrag_inodes);
270 while (node) {
271 rb_erase(node, &fs_info->defrag_inodes);
272 defrag = rb_entry(node, struct inode_defrag, rb_node);
273 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
274
275 if (need_resched()) {
276 spin_unlock(&fs_info->defrag_inodes_lock);
277 cond_resched();
278 spin_lock(&fs_info->defrag_inodes_lock);
279 }
280
281 node = rb_first(&fs_info->defrag_inodes);
282 }
283 spin_unlock(&fs_info->defrag_inodes_lock);
284}
285
286#define BTRFS_DEFRAG_BATCH 1024
287
288static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
289 struct inode_defrag *defrag)
290{
265 struct btrfs_root *inode_root; 291 struct btrfs_root *inode_root;
266 struct inode *inode; 292 struct inode *inode;
267 struct rb_node *n;
268 struct btrfs_key key; 293 struct btrfs_key key;
269 struct btrfs_ioctl_defrag_range_args range; 294 struct btrfs_ioctl_defrag_range_args range;
270 u64 first_ino = 0;
271 u64 root_objectid = 0;
272 int num_defrag; 295 int num_defrag;
273 int defrag_batch = 1024;
274 296
297 /* get the inode */
298 key.objectid = defrag->root;
299 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
300 key.offset = (u64)-1;
301 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
302 if (IS_ERR(inode_root)) {
303 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
304 return PTR_ERR(inode_root);
305 }
306
307 key.objectid = defrag->ino;
308 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
309 key.offset = 0;
310 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
311 if (IS_ERR(inode)) {
312 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
313 return PTR_ERR(inode);
314 }
315
316 /* do a chunk of defrag */
317 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
275 memset(&range, 0, sizeof(range)); 318 memset(&range, 0, sizeof(range));
276 range.len = (u64)-1; 319 range.len = (u64)-1;
320 range.start = defrag->last_offset;
321 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
322 BTRFS_DEFRAG_BATCH);
323 /*
324 * if we filled the whole defrag batch, there
325 * must be more work to do. Queue this defrag
326 * again
327 */
328 if (num_defrag == BTRFS_DEFRAG_BATCH) {
329 defrag->last_offset = range.start;
330 btrfs_requeue_inode_defrag(inode, defrag);
331 } else if (defrag->last_offset && !defrag->cycled) {
332 /*
333 * we didn't fill our defrag batch, but
334 * we didn't start at zero. Make sure we loop
335 * around to the start of the file.
336 */
337 defrag->last_offset = 0;
338 defrag->cycled = 1;
339 btrfs_requeue_inode_defrag(inode, defrag);
340 } else {
341 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
342 }
343
344 iput(inode);
345 return 0;
346}
347
348/*
349 * run through the list of inodes in the FS that need
350 * defragging
351 */
352int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
353{
354 struct inode_defrag *defrag;
355 u64 first_ino = 0;
356 u64 root_objectid = 0;
277 357
278 atomic_inc(&fs_info->defrag_running); 358 atomic_inc(&fs_info->defrag_running);
279 spin_lock(&fs_info->defrag_inodes_lock);
280 while(1) { 359 while(1) {
281 n = NULL; 360 if (!__need_auto_defrag(fs_info->tree_root))
361 break;
282 362
283 /* find an inode to defrag */ 363 /* find an inode to defrag */
284 defrag = btrfs_find_defrag_inode(fs_info, root_objectid, 364 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
285 first_ino, &n); 365 first_ino);
286 if (!defrag) { 366 if (!defrag) {
287 if (n) { 367 if (root_objectid || first_ino) {
288 defrag = rb_entry(n, struct inode_defrag,
289 rb_node);
290 } else if (root_objectid || first_ino) {
291 root_objectid = 0; 368 root_objectid = 0;
292 first_ino = 0; 369 first_ino = 0;
293 continue; 370 continue;
@@ -296,71 +373,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
296 } 373 }
297 } 374 }
298 375
299 /* remove it from the rbtree */
300 first_ino = defrag->ino + 1; 376 first_ino = defrag->ino + 1;
301 root_objectid = defrag->root; 377 root_objectid = defrag->root;
302 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
303
304 if (btrfs_fs_closing(fs_info))
305 goto next_free;
306 378
307 spin_unlock(&fs_info->defrag_inodes_lock); 379 __btrfs_run_defrag_inode(fs_info, defrag);
308
309 /* get the inode */
310 key.objectid = defrag->root;
311 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
312 key.offset = (u64)-1;
313 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
314 if (IS_ERR(inode_root))
315 goto next;
316
317 key.objectid = defrag->ino;
318 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
319 key.offset = 0;
320
321 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
322 if (IS_ERR(inode))
323 goto next;
324
325 /* do a chunk of defrag */
326 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
327 range.start = defrag->last_offset;
328 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
329 defrag_batch);
330 /*
331 * if we filled the whole defrag batch, there
332 * must be more work to do. Queue this defrag
333 * again
334 */
335 if (num_defrag == defrag_batch) {
336 defrag->last_offset = range.start;
337 btrfs_requeue_inode_defrag(inode, defrag);
338 /*
339 * we don't want to kfree defrag, we added it back to
340 * the rbtree
341 */
342 defrag = NULL;
343 } else if (defrag->last_offset && !defrag->cycled) {
344 /*
345 * we didn't fill our defrag batch, but
346 * we didn't start at zero. Make sure we loop
347 * around to the start of the file.
348 */
349 defrag->last_offset = 0;
350 defrag->cycled = 1;
351 btrfs_requeue_inode_defrag(inode, defrag);
352 defrag = NULL;
353 }
354
355 iput(inode);
356next:
357 spin_lock(&fs_info->defrag_inodes_lock);
358next_free:
359 if (defrag)
360 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
361 } 380 }
362 spin_unlock(&fs_info->defrag_inodes_lock);
363
364 atomic_dec(&fs_info->defrag_running); 381 atomic_dec(&fs_info->defrag_running);
365 382
366 /* 383 /*