aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-10-05 15:18:58 -0400
committerJosef Bacik <josef@redhat.com>2011-10-19 15:12:52 -0400
commita67509c30079f4c5025fb19ea443fb2906c3a85e (patch)
treeff8bd615b4fd447c044f06f29e2ad3b65db35ea2 /fs/btrfs/free-space-cache.c
parentf75b130e9bb361850787e156c79311adb84f551e (diff)
Btrfs: add a io_ctl struct and helpers for dealing with the space cache
In writing and reading the space cache we have one big loop that keeps track of which page we are on and then a bunch of sizeable loops underneath this big loop to try and read/write out properly. Especially in the write case this makes things hugely complicated and hard to follow, and makes our error checking and recovery equally as complex. So add a io_ctl struct with a bunch of helpers to keep track of the pages we have, where we are, if we have enough space etc. This unifies how we deal with the pages we're writing and keeps all the messy tracking internal. This allows us to kill the big loops in both the read and write case and makes reviewing and chaning the write and read paths much simpler. I've run xfstests and stress.sh on this code and it survives. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c693
1 files changed, 375 insertions, 318 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index b81556ca75ea..35bfc13c9d42 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -241,27 +241,275 @@ static int readahead_cache(struct inode *inode)
241 return 0; 241 return 0;
242} 242}
243 243
244struct io_ctl {
245 void *cur, *orig;
246 struct page *page;
247 struct page **pages;
248 struct btrfs_root *root;
249 unsigned long size;
250 int index;
251 int num_pages;
252};
253
254static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
255 struct btrfs_root *root)
256{
257 memset(io_ctl, 0, sizeof(struct io_ctl));
258 io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
259 PAGE_CACHE_SHIFT;
260 io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
261 GFP_NOFS);
262 if (!io_ctl->pages)
263 return -ENOMEM;
264 io_ctl->root = root;
265 return 0;
266}
267
268static void io_ctl_free(struct io_ctl *io_ctl)
269{
270 kfree(io_ctl->pages);
271}
272
273static void io_ctl_unmap_page(struct io_ctl *io_ctl)
274{
275 if (io_ctl->cur) {
276 kunmap(io_ctl->page);
277 io_ctl->cur = NULL;
278 io_ctl->orig = NULL;
279 }
280}
281
282static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
283{
284 WARN_ON(io_ctl->cur);
285 BUG_ON(io_ctl->index >= io_ctl->num_pages);
286 io_ctl->page = io_ctl->pages[io_ctl->index++];
287 io_ctl->cur = kmap(io_ctl->page);
288 io_ctl->orig = io_ctl->cur;
289 io_ctl->size = PAGE_CACHE_SIZE;
290 if (clear)
291 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
292}
293
294static void io_ctl_drop_pages(struct io_ctl *io_ctl)
295{
296 int i;
297
298 io_ctl_unmap_page(io_ctl);
299
300 for (i = 0; i < io_ctl->num_pages; i++) {
301 ClearPageChecked(io_ctl->pages[i]);
302 unlock_page(io_ctl->pages[i]);
303 page_cache_release(io_ctl->pages[i]);
304 }
305}
306
307static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
308 int uptodate)
309{
310 struct page *page;
311 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
312 int i;
313
314 for (i = 0; i < io_ctl->num_pages; i++) {
315 page = find_or_create_page(inode->i_mapping, i, mask);
316 if (!page) {
317 io_ctl_drop_pages(io_ctl);
318 return -ENOMEM;
319 }
320 io_ctl->pages[i] = page;
321 if (uptodate && !PageUptodate(page)) {
322 btrfs_readpage(NULL, page);
323 lock_page(page);
324 if (!PageUptodate(page)) {
325 printk(KERN_ERR "btrfs: error reading free "
326 "space cache\n");
327 io_ctl_drop_pages(io_ctl);
328 return -EIO;
329 }
330 }
331 }
332
333 return 0;
334}
335
336static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
337{
338 u64 *val;
339
340 io_ctl_map_page(io_ctl, 1);
341
342 /*
343 * Skip the first 64bits to make sure theres a bogus crc for old
344 * kernels
345 */
346 io_ctl->cur += sizeof(u64);
347
348 val = io_ctl->cur;
349 *val = cpu_to_le64(generation);
350 io_ctl->cur += sizeof(u64);
351 io_ctl->size -= sizeof(u64) * 2;
352}
353
354static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
355{
356 u64 *gen;
357
358 io_ctl_map_page(io_ctl, 0);
359
360 /* Skip the bogus crc area */
361 io_ctl->cur += sizeof(u64);
362 gen = io_ctl->cur;
363 if (le64_to_cpu(*gen) != generation) {
364 printk_ratelimited(KERN_ERR "btrfs: space cache generation "
365 "(%Lu) does not match inode (%Lu)\n", *gen,
366 generation);
367 io_ctl_unmap_page(io_ctl);
368 return -EIO;
369 }
370 io_ctl->cur += sizeof(u64);
371 io_ctl->size -= sizeof(u64) * 2;
372 return 0;
373}
374
375static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
376 void *bitmap)
377{
378 struct btrfs_free_space_entry *entry;
379
380 if (!io_ctl->cur)
381 return -ENOSPC;
382
383 entry = io_ctl->cur;
384 entry->offset = cpu_to_le64(offset);
385 entry->bytes = cpu_to_le64(bytes);
386 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
387 BTRFS_FREE_SPACE_EXTENT;
388 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
389 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
390
391 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
392 return 0;
393
394 /*
395 * index == 1 means the current page is 0, we need to generate a bogus
396 * crc for older kernels.
397 */
398 if (io_ctl->index == 1) {
399 u32 *tmp;
400 u32 crc = ~(u32)0;
401
402 crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + sizeof(u64),
403 crc, PAGE_CACHE_SIZE - sizeof(u64));
404 btrfs_csum_final(crc, (char *)&crc);
405 crc++;
406 tmp = io_ctl->orig;
407 *tmp = crc;
408 }
409 io_ctl_unmap_page(io_ctl);
410
411 /* No more pages to map */
412 if (io_ctl->index >= io_ctl->num_pages)
413 return 0;
414
415 /* map the next page */
416 io_ctl_map_page(io_ctl, 1);
417 return 0;
418}
419
420static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
421{
422 if (!io_ctl->cur)
423 return -ENOSPC;
424
425 /*
426 * If we aren't at the start of the current page, unmap this one and
427 * map the next one if there is any left.
428 */
429 if (io_ctl->cur != io_ctl->orig) {
430 io_ctl_unmap_page(io_ctl);
431 if (io_ctl->index >= io_ctl->num_pages)
432 return -ENOSPC;
433 io_ctl_map_page(io_ctl, 0);
434 }
435
436 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
437 io_ctl_unmap_page(io_ctl);
438 if (io_ctl->index < io_ctl->num_pages)
439 io_ctl_map_page(io_ctl, 0);
440 return 0;
441}
442
443static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
444{
445 io_ctl_unmap_page(io_ctl);
446
447 while (io_ctl->index < io_ctl->num_pages) {
448 io_ctl_map_page(io_ctl, 1);
449 io_ctl_unmap_page(io_ctl);
450 }
451}
452
453static u8 io_ctl_read_entry(struct io_ctl *io_ctl,
454 struct btrfs_free_space *entry)
455{
456 struct btrfs_free_space_entry *e;
457 u8 type;
458
459 e = io_ctl->cur;
460 entry->offset = le64_to_cpu(e->offset);
461 entry->bytes = le64_to_cpu(e->bytes);
462 type = e->type;
463 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
464 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
465
466 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
467 return type;
468
469 io_ctl_unmap_page(io_ctl);
470
471 if (io_ctl->index >= io_ctl->num_pages)
472 return type;
473
474 io_ctl_map_page(io_ctl, 0);
475 return type;
476}
477
478static void io_ctl_read_bitmap(struct io_ctl *io_ctl,
479 struct btrfs_free_space *entry)
480{
481 BUG_ON(!io_ctl->cur);
482 if (io_ctl->cur != io_ctl->orig) {
483 io_ctl_unmap_page(io_ctl);
484 io_ctl_map_page(io_ctl, 0);
485 }
486 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
487 io_ctl_unmap_page(io_ctl);
488 if (io_ctl->index < io_ctl->num_pages)
489 io_ctl_map_page(io_ctl, 0);
490}
491
244int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 492int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
245 struct btrfs_free_space_ctl *ctl, 493 struct btrfs_free_space_ctl *ctl,
246 struct btrfs_path *path, u64 offset) 494 struct btrfs_path *path, u64 offset)
247{ 495{
248 struct btrfs_free_space_header *header; 496 struct btrfs_free_space_header *header;
249 struct extent_buffer *leaf; 497 struct extent_buffer *leaf;
250 struct page *page; 498 struct io_ctl io_ctl;
251 struct btrfs_key key; 499 struct btrfs_key key;
500 struct btrfs_free_space *e, *n;
252 struct list_head bitmaps; 501 struct list_head bitmaps;
253 u64 num_entries; 502 u64 num_entries;
254 u64 num_bitmaps; 503 u64 num_bitmaps;
255 u64 generation; 504 u64 generation;
256 pgoff_t index = 0; 505 u8 type;
257 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
258 int ret = 0; 506 int ret = 0;
259 507
260 INIT_LIST_HEAD(&bitmaps); 508 INIT_LIST_HEAD(&bitmaps);
261 509
262 /* Nothing in the space cache, goodbye */ 510 /* Nothing in the space cache, goodbye */
263 if (!i_size_read(inode)) 511 if (!i_size_read(inode))
264 goto out; 512 return 0;
265 513
266 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 514 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
267 key.offset = offset; 515 key.offset = offset;
@@ -269,11 +517,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
269 517
270 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 518 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
271 if (ret < 0) 519 if (ret < 0)
272 goto out; 520 return 0;
273 else if (ret > 0) { 521 else if (ret > 0) {
274 btrfs_release_path(path); 522 btrfs_release_path(path);
275 ret = 0; 523 return 0;
276 goto out;
277 } 524 }
278 525
279 ret = -1; 526 ret = -1;
@@ -291,170 +538,89 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
291 " not match free space cache generation (%llu)\n", 538 " not match free space cache generation (%llu)\n",
292 (unsigned long long)BTRFS_I(inode)->generation, 539 (unsigned long long)BTRFS_I(inode)->generation,
293 (unsigned long long)generation); 540 (unsigned long long)generation);
294 goto out; 541 return 0;
295 } 542 }
296 543
297 if (!num_entries) 544 if (!num_entries)
298 goto out; 545 return 0;
299 546
547 io_ctl_init(&io_ctl, inode, root);
300 ret = readahead_cache(inode); 548 ret = readahead_cache(inode);
301 if (ret) 549 if (ret)
302 goto out; 550 goto out;
303 551
304 while (1) { 552 ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
305 struct btrfs_free_space_entry *entry; 553 if (ret)
306 struct btrfs_free_space *e; 554 goto out;
307 void *addr;
308 unsigned long offset = 0;
309 int need_loop = 0;
310 555
311 if (!num_entries && !num_bitmaps) 556 ret = io_ctl_check_generation(&io_ctl, generation);
312 break; 557 if (ret)
558 goto free_cache;
313 559
314 page = find_or_create_page(inode->i_mapping, index, mask); 560 while (num_entries) {
315 if (!page) 561 e = kmem_cache_zalloc(btrfs_free_space_cachep,
562 GFP_NOFS);
563 if (!e)
316 goto free_cache; 564 goto free_cache;
317 565
318 if (!PageUptodate(page)) { 566 type = io_ctl_read_entry(&io_ctl, e);
319 btrfs_readpage(NULL, page); 567 if (!e->bytes) {
320 lock_page(page); 568 kmem_cache_free(btrfs_free_space_cachep, e);
321 if (!PageUptodate(page)) { 569 goto free_cache;
322 unlock_page(page);
323 page_cache_release(page);
324 printk(KERN_ERR "btrfs: error reading free "
325 "space cache\n");
326 goto free_cache;
327 }
328 } 570 }
329 addr = kmap(page);
330
331 if (index == 0) {
332 u64 *gen;
333 571
334 /* 572 if (type == BTRFS_FREE_SPACE_EXTENT) {
335 * We put a bogus crc in the front of the first page in 573 spin_lock(&ctl->tree_lock);
336 * case old kernels try to mount a fs with the new 574 ret = link_free_space(ctl, e);
337 * format to make sure they discard the cache. 575 spin_unlock(&ctl->tree_lock);
338 */ 576 if (ret) {
339 addr += sizeof(u64); 577 printk(KERN_ERR "Duplicate entries in "
340 offset += sizeof(u64); 578 "free space cache, dumping\n");
341 579 kmem_cache_free(btrfs_free_space_cachep, e);
342 gen = addr;
343 if (*gen != BTRFS_I(inode)->generation) {
344 printk_ratelimited(KERN_ERR "btrfs: space cache"
345 " generation (%llu) does not match "
346 "inode (%llu)\n",
347 (unsigned long long)*gen,
348 (unsigned long long)
349 BTRFS_I(inode)->generation);
350 kunmap(page);
351 unlock_page(page);
352 page_cache_release(page);
353 goto free_cache; 580 goto free_cache;
354 } 581 }
355 addr += sizeof(u64); 582 } else {
356 offset += sizeof(u64); 583 BUG_ON(!num_bitmaps);
357 } 584 num_bitmaps--;
358 entry = addr; 585 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
359 586 if (!e->bitmap) {
360 while (1) { 587 kmem_cache_free(
361 if (!num_entries) 588 btrfs_free_space_cachep, e);
362 break;
363
364 need_loop = 1;
365 e = kmem_cache_zalloc(btrfs_free_space_cachep,
366 GFP_NOFS);
367 if (!e) {
368 kunmap(page);
369 unlock_page(page);
370 page_cache_release(page);
371 goto free_cache; 589 goto free_cache;
372 } 590 }
373 591 spin_lock(&ctl->tree_lock);
374 e->offset = le64_to_cpu(entry->offset); 592 ret = link_free_space(ctl, e);
375 e->bytes = le64_to_cpu(entry->bytes); 593 ctl->total_bitmaps++;
376 if (!e->bytes) { 594 ctl->op->recalc_thresholds(ctl);
377 kunmap(page); 595 spin_unlock(&ctl->tree_lock);
596 if (ret) {
597 printk(KERN_ERR "Duplicate entries in "
598 "free space cache, dumping\n");
378 kmem_cache_free(btrfs_free_space_cachep, e); 599 kmem_cache_free(btrfs_free_space_cachep, e);
379 unlock_page(page);
380 page_cache_release(page);
381 goto free_cache; 600 goto free_cache;
382 } 601 }
383 602 list_add_tail(&e->list, &bitmaps);
384 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
385 spin_lock(&ctl->tree_lock);
386 ret = link_free_space(ctl, e);
387 spin_unlock(&ctl->tree_lock);
388 if (ret) {
389 printk(KERN_ERR "Duplicate entries in "
390 "free space cache, dumping\n");
391 kunmap(page);
392 unlock_page(page);
393 page_cache_release(page);
394 goto free_cache;
395 }
396 } else {
397 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
398 if (!e->bitmap) {
399 kunmap(page);
400 kmem_cache_free(
401 btrfs_free_space_cachep, e);
402 unlock_page(page);
403 page_cache_release(page);
404 goto free_cache;
405 }
406 spin_lock(&ctl->tree_lock);
407 ret = link_free_space(ctl, e);
408 ctl->total_bitmaps++;
409 ctl->op->recalc_thresholds(ctl);
410 spin_unlock(&ctl->tree_lock);
411 if (ret) {
412 printk(KERN_ERR "Duplicate entries in "
413 "free space cache, dumping\n");
414 kunmap(page);
415 unlock_page(page);
416 page_cache_release(page);
417 goto free_cache;
418 }
419 list_add_tail(&e->list, &bitmaps);
420 }
421
422 num_entries--;
423 offset += sizeof(struct btrfs_free_space_entry);
424 if (offset + sizeof(struct btrfs_free_space_entry) >=
425 PAGE_CACHE_SIZE)
426 break;
427 entry++;
428 } 603 }
429 604
430 /* 605 num_entries--;
431 * We read an entry out of this page, we need to move on to the 606 }
432 * next page.
433 */
434 if (need_loop) {
435 kunmap(page);
436 goto next;
437 }
438 607
439 /* 608 /*
440 * We add the bitmaps at the end of the entries in order that 609 * We add the bitmaps at the end of the entries in order that
441 * the bitmap entries are added to the cache. 610 * the bitmap entries are added to the cache.
442 */ 611 */
443 e = list_entry(bitmaps.next, struct btrfs_free_space, list); 612 list_for_each_entry_safe(e, n, &bitmaps, list) {
444 list_del_init(&e->list); 613 list_del_init(&e->list);
445 memcpy(e->bitmap, addr, PAGE_CACHE_SIZE); 614 io_ctl_read_bitmap(&io_ctl, e);
446 kunmap(page);
447 num_bitmaps--;
448next:
449 unlock_page(page);
450 page_cache_release(page);
451 index++;
452 } 615 }
453 616
617 io_ctl_drop_pages(&io_ctl);
454 ret = 1; 618 ret = 1;
455out: 619out:
620 io_ctl_free(&io_ctl);
456 return ret; 621 return ret;
457free_cache: 622free_cache:
623 io_ctl_drop_pages(&io_ctl);
458 __btrfs_remove_free_space_cache(ctl); 624 __btrfs_remove_free_space_cache(ctl);
459 goto out; 625 goto out;
460} 626}
@@ -554,40 +720,28 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
554 struct extent_buffer *leaf; 720 struct extent_buffer *leaf;
555 struct rb_node *node; 721 struct rb_node *node;
556 struct list_head *pos, *n; 722 struct list_head *pos, *n;
557 struct page **pages;
558 struct page *page;
559 struct extent_state *cached_state = NULL; 723 struct extent_state *cached_state = NULL;
560 struct btrfs_free_cluster *cluster = NULL; 724 struct btrfs_free_cluster *cluster = NULL;
561 struct extent_io_tree *unpin = NULL; 725 struct extent_io_tree *unpin = NULL;
726 struct io_ctl io_ctl;
562 struct list_head bitmap_list; 727 struct list_head bitmap_list;
563 struct btrfs_key key; 728 struct btrfs_key key;
564 u64 start, end, len; 729 u64 start, end, len;
565 u64 bytes = 0;
566 u32 crc = ~(u32)0;
567 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
568 int index = 0, num_pages = 0;
569 int entries = 0; 730 int entries = 0;
570 int bitmaps = 0; 731 int bitmaps = 0;
571 int ret; 732 int ret;
572 int err = -1; 733 int err = -1;
573 bool next_page = false;
574 bool out_of_space = false;
575 734
576 INIT_LIST_HEAD(&bitmap_list); 735 INIT_LIST_HEAD(&bitmap_list);
577 736
578 if (!i_size_read(inode)) 737 if (!i_size_read(inode))
579 return -1; 738 return -1;
580 739
581 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
582 PAGE_CACHE_SHIFT;
583
584 filemap_write_and_wait(inode->i_mapping); 740 filemap_write_and_wait(inode->i_mapping);
585 btrfs_wait_ordered_range(inode, inode->i_size & 741 btrfs_wait_ordered_range(inode, inode->i_size &
586 ~(root->sectorsize - 1), (u64)-1); 742 ~(root->sectorsize - 1), (u64)-1);
587 743
588 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); 744 io_ctl_init(&io_ctl, inode, root);
589 if (!pages)
590 return -1;
591 745
592 /* Get the cluster for this block_group if it exists */ 746 /* Get the cluster for this block_group if it exists */
593 if (block_group && !list_empty(&block_group->cluster_list)) 747 if (block_group && !list_empty(&block_group->cluster_list))
@@ -601,30 +755,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
601 */ 755 */
602 unpin = root->fs_info->pinned_extents; 756 unpin = root->fs_info->pinned_extents;
603 757
604 /* 758 /* Lock all pages first so we can lock the extent safely. */
605 * Lock all pages first so we can lock the extent safely. 759 io_ctl_prepare_pages(&io_ctl, inode, 0);
606 *
607 * NOTE: Because we hold the ref the entire time we're going to write to
608 * the page find_get_page should never fail, so we don't do a check
609 * after find_get_page at this point. Just putting this here so people
610 * know and don't freak out.
611 */
612 while (index < num_pages) {
613 page = find_or_create_page(inode->i_mapping, index, mask);
614 if (!page) {
615 int i;
616
617 for (i = 0; i < num_pages; i++) {
618 unlock_page(pages[i]);
619 page_cache_release(pages[i]);
620 }
621 goto out;
622 }
623 pages[index] = page;
624 index++;
625 }
626 760
627 index = 0;
628 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 761 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
629 0, &cached_state, GFP_NOFS); 762 0, &cached_state, GFP_NOFS);
630 763
@@ -641,166 +774,78 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
641 cluster = NULL; 774 cluster = NULL;
642 } 775 }
643 776
644 /* Write out the extent entries */ 777 io_ctl_set_generation(&io_ctl, trans->transid);
645 do {
646 struct btrfs_free_space_entry *entry;
647 void *addr, *orig;
648 unsigned long offset = 0;
649
650 next_page = false;
651 778
652 if (index >= num_pages) { 779 /* Write out the extent entries */
653 out_of_space = true; 780 while (node) {
654 break; 781 struct btrfs_free_space *e;
655 }
656
657 page = pages[index];
658 782
659 orig = addr = kmap(page); 783 e = rb_entry(node, struct btrfs_free_space, offset_index);
660 if (index == 0) { 784 entries++;
661 u64 *gen;
662 785
663 /* 786 ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
664 * We're going to put in a bogus crc for this page to 787 e->bitmap);
665 * make sure that old kernels who aren't aware of this 788 if (ret)
666 * format will be sure to discard the cache. 789 goto out_nospc;
667 */
668 addr += sizeof(u64);
669 offset += sizeof(u64);
670 790
671 gen = addr; 791 if (e->bitmap) {
672 *gen = trans->transid; 792 list_add_tail(&e->list, &bitmap_list);
673 addr += sizeof(u64); 793 bitmaps++;
674 offset += sizeof(u64);
675 } 794 }
676 entry = addr; 795 node = rb_next(node);
677 796 if (!node && cluster) {
678 memset(addr, 0, PAGE_CACHE_SIZE - offset); 797 node = rb_first(&cluster->root);
679 while (node && !next_page) { 798 cluster = NULL;
680 struct btrfs_free_space *e;
681
682 e = rb_entry(node, struct btrfs_free_space, offset_index);
683 entries++;
684
685 entry->offset = cpu_to_le64(e->offset);
686 entry->bytes = cpu_to_le64(e->bytes);
687 if (e->bitmap) {
688 entry->type = BTRFS_FREE_SPACE_BITMAP;
689 list_add_tail(&e->list, &bitmap_list);
690 bitmaps++;
691 } else {
692 entry->type = BTRFS_FREE_SPACE_EXTENT;
693 }
694 node = rb_next(node);
695 if (!node && cluster) {
696 node = rb_first(&cluster->root);
697 cluster = NULL;
698 }
699 offset += sizeof(struct btrfs_free_space_entry);
700 if (offset + sizeof(struct btrfs_free_space_entry) >=
701 PAGE_CACHE_SIZE)
702 next_page = true;
703 entry++;
704 } 799 }
800 }
705 801
706 /* 802 /*
707 * We want to add any pinned extents to our free space cache 803 * We want to add any pinned extents to our free space cache
708 * so we don't leak the space 804 * so we don't leak the space
709 */ 805 */
710 while (block_group && !next_page && 806 while (block_group && (start < block_group->key.objectid +
711 (start < block_group->key.objectid + 807 block_group->key.offset)) {
712 block_group->key.offset)) { 808 ret = find_first_extent_bit(unpin, start, &start, &end,
713 ret = find_first_extent_bit(unpin, start, &start, &end, 809 EXTENT_DIRTY);
714 EXTENT_DIRTY); 810 if (ret) {
715 if (ret) { 811 ret = 0;
716 ret = 0; 812 break;
717 break;
718 }
719
720 /* This pinned extent is out of our range */
721 if (start >= block_group->key.objectid +
722 block_group->key.offset)
723 break;
724
725 len = block_group->key.objectid +
726 block_group->key.offset - start;
727 len = min(len, end + 1 - start);
728
729 entries++;
730 entry->offset = cpu_to_le64(start);
731 entry->bytes = cpu_to_le64(len);
732 entry->type = BTRFS_FREE_SPACE_EXTENT;
733
734 start = end + 1;
735 offset += sizeof(struct btrfs_free_space_entry);
736 if (offset + sizeof(struct btrfs_free_space_entry) >=
737 PAGE_CACHE_SIZE)
738 next_page = true;
739 entry++;
740 } 813 }
741 814
742 /* Generate bogus crc value */ 815 /* This pinned extent is out of our range */
743 if (index == 0) { 816 if (start >= block_group->key.objectid +
744 u32 *tmp; 817 block_group->key.offset)
745 crc = btrfs_csum_data(root, orig + sizeof(u64), crc, 818 break;
746 PAGE_CACHE_SIZE - sizeof(u64));
747 btrfs_csum_final(crc, (char *)&crc);
748 crc++;
749 tmp = orig;
750 *tmp = crc;
751 }
752 819
753 kunmap(page); 820 len = block_group->key.objectid +
821 block_group->key.offset - start;
822 len = min(len, end + 1 - start);
754 823
755 bytes += PAGE_CACHE_SIZE; 824 entries++;
825 ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
826 if (ret)
827 goto out_nospc;
756 828
757 index++; 829 start = end + 1;
758 } while (node || next_page); 830 }
759 831
760 /* Write out the bitmaps */ 832 /* Write out the bitmaps */
761 list_for_each_safe(pos, n, &bitmap_list) { 833 list_for_each_safe(pos, n, &bitmap_list) {
762 void *addr;
763 struct btrfs_free_space *entry = 834 struct btrfs_free_space *entry =
764 list_entry(pos, struct btrfs_free_space, list); 835 list_entry(pos, struct btrfs_free_space, list);
765 836
766 if (index >= num_pages) { 837 ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
767 out_of_space = true; 838 if (ret)
768 break; 839 goto out_nospc;
769 }
770 page = pages[index];
771
772 addr = kmap(page);
773 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
774 kunmap(page);
775 bytes += PAGE_CACHE_SIZE;
776
777 list_del_init(&entry->list); 840 list_del_init(&entry->list);
778 index++;
779 }
780
781 if (out_of_space) {
782 btrfs_drop_pages(pages, num_pages);
783 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
784 i_size_read(inode) - 1, &cached_state,
785 GFP_NOFS);
786 goto out;
787 } 841 }
788 842
789 /* Zero out the rest of the pages just to make sure */ 843 /* Zero out the rest of the pages just to make sure */
790 while (index < num_pages) { 844 io_ctl_zero_remaining_pages(&io_ctl);
791 void *addr;
792 845
793 page = pages[index]; 846 ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
794 addr = kmap(page); 847 0, i_size_read(inode), &cached_state);
795 memset(addr, 0, PAGE_CACHE_SIZE); 848 io_ctl_drop_pages(&io_ctl);
796 kunmap(page);
797 bytes += PAGE_CACHE_SIZE;
798 index++;
799 }
800
801 ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
802 bytes, &cached_state);
803 btrfs_drop_pages(pages, num_pages);
804 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 849 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
805 i_size_read(inode) - 1, &cached_state, GFP_NOFS); 850 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
806 851
@@ -817,7 +862,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
817 862
818 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 863 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
819 if (ret < 0) { 864 if (ret < 0) {
820 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, 865 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
821 EXTENT_DIRTY | EXTENT_DELALLOC | 866 EXTENT_DIRTY | EXTENT_DELALLOC |
822 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); 867 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
823 goto out; 868 goto out;
@@ -830,7 +875,8 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
830 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 875 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
831 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || 876 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
832 found_key.offset != offset) { 877 found_key.offset != offset) {
833 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, 878 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
879 inode->i_size - 1,
834 EXTENT_DIRTY | EXTENT_DELALLOC | 880 EXTENT_DIRTY | EXTENT_DELALLOC |
835 EXTENT_DO_ACCOUNTING, 0, 0, NULL, 881 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
836 GFP_NOFS); 882 GFP_NOFS);
@@ -848,13 +894,24 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
848 894
849 err = 0; 895 err = 0;
850out: 896out:
851 kfree(pages); 897 io_ctl_free(&io_ctl);
852 if (err) { 898 if (err) {
853 invalidate_inode_pages2_range(inode->i_mapping, 0, index); 899 invalidate_inode_pages2(inode->i_mapping);
854 BTRFS_I(inode)->generation = 0; 900 BTRFS_I(inode)->generation = 0;
855 } 901 }
856 btrfs_update_inode(trans, root, inode); 902 btrfs_update_inode(trans, root, inode);
857 return err; 903 return err;
904
905out_nospc:
906 list_for_each_safe(pos, n, &bitmap_list) {
907 struct btrfs_free_space *entry =
908 list_entry(pos, struct btrfs_free_space, list);
909 list_del_init(&entry->list);
910 }
911 io_ctl_drop_pages(&io_ctl);
912 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
913 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
914 goto out;
858} 915}
859 916
860int btrfs_write_out_cache(struct btrfs_root *root, 917int btrfs_write_out_cache(struct btrfs_root *root,