aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c89
1 files changed, 32 insertions, 57 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index fe1d6c3424a5..b306b3a88fc7 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1961,28 +1961,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1961} 1961}
1962 1962
1963/* 1963/*
1964 * helper function to unlock a page if all the extents in the tree
1965 * for that page are unlocked
1966 */
1967static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1968{
1969 u64 start = page_offset(page);
1970 u64 end = start + PAGE_CACHE_SIZE - 1;
1971 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1972 unlock_page(page);
1973}
1974
1975/*
1976 * helper function to end page writeback if all the extents
1977 * in the tree for that page are done with writeback
1978 */
1979static void check_page_writeback(struct extent_io_tree *tree,
1980 struct page *page)
1981{
1982 end_page_writeback(page);
1983}
1984
1985/*
1986 * When IO fails, either with EIO or csum verification fails, we 1964 * When IO fails, either with EIO or csum verification fails, we
1987 * try other mirrors that might have a good copy of the data. This 1965 * try other mirrors that might have a good copy of the data. This
1988 * io_failure_record is used to record state as we go through all the 1966 * io_failure_record is used to record state as we go through all the
@@ -2411,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2411 struct extent_io_tree *tree; 2389 struct extent_io_tree *tree;
2412 u64 start; 2390 u64 start;
2413 u64 end; 2391 u64 end;
2414 int whole_page;
2415 2392
2416 do { 2393 do {
2417 struct page *page = bvec->bv_page; 2394 struct page *page = bvec->bv_page;
2418 tree = &BTRFS_I(page->mapping->host)->io_tree; 2395 tree = &BTRFS_I(page->mapping->host)->io_tree;
2419 2396
2420 start = page_offset(page) + bvec->bv_offset; 2397 /* We always issue full-page reads, but if some block
2421 end = start + bvec->bv_len - 1; 2398 * in a page fails to read, blk_update_request() will
2399 * advance bv_offset and adjust bv_len to compensate.
2400 * Print a warning for nonzero offsets, and an error
2401 * if they don't add up to a full page. */
2402 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2403 printk("%s page write in btrfs with offset %u and length %u\n",
2404 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2405 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2406 bvec->bv_offset, bvec->bv_len);
2422 2407
2423 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2408 start = page_offset(page);
2424 whole_page = 1; 2409 end = start + bvec->bv_offset + bvec->bv_len - 1;
2425 else
2426 whole_page = 0;
2427 2410
2428 if (--bvec >= bio->bi_io_vec) 2411 if (--bvec >= bio->bi_io_vec)
2429 prefetchw(&bvec->bv_page->flags); 2412 prefetchw(&bvec->bv_page->flags);
@@ -2431,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2431 if (end_extent_writepage(page, err, start, end)) 2414 if (end_extent_writepage(page, err, start, end))
2432 continue; 2415 continue;
2433 2416
2434 if (whole_page) 2417 end_page_writeback(page);
2435 end_page_writeback(page);
2436 else
2437 check_page_writeback(tree, page);
2438 } while (bvec >= bio->bi_io_vec); 2418 } while (bvec >= bio->bi_io_vec);
2439 2419
2440 bio_put(bio); 2420 bio_put(bio);
@@ -2459,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2459 struct extent_io_tree *tree; 2439 struct extent_io_tree *tree;
2460 u64 start; 2440 u64 start;
2461 u64 end; 2441 u64 end;
2462 int whole_page;
2463 int mirror; 2442 int mirror;
2464 int ret; 2443 int ret;
2465 2444
@@ -2477,13 +2456,19 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2477 io_bio->mirror_num); 2456 io_bio->mirror_num);
2478 tree = &BTRFS_I(page->mapping->host)->io_tree; 2457 tree = &BTRFS_I(page->mapping->host)->io_tree;
2479 2458
2480 start = page_offset(page) + bvec->bv_offset; 2459 /* We always issue full-page reads, but if some block
2481 end = start + bvec->bv_len - 1; 2460 * in a page fails to read, blk_update_request() will
2461 * advance bv_offset and adjust bv_len to compensate.
2462 * Print a warning for nonzero offsets, and an error
2463 * if they don't add up to a full page. */
2464 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2465 printk("%s page read in btrfs with offset %u and length %u\n",
2466 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2467 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2468 bvec->bv_offset, bvec->bv_len);
2482 2469
2483 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2470 start = page_offset(page);
2484 whole_page = 1; 2471 end = start + bvec->bv_offset + bvec->bv_len - 1;
2485 else
2486 whole_page = 0;
2487 2472
2488 if (++bvec <= bvec_end) 2473 if (++bvec <= bvec_end)
2489 prefetchw(&bvec->bv_page->flags); 2474 prefetchw(&bvec->bv_page->flags);
@@ -2542,23 +2527,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2542 } 2527 }
2543 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); 2528 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2544 2529
2545 if (whole_page) { 2530 if (uptodate) {
2546 if (uptodate) { 2531 SetPageUptodate(page);
2547 SetPageUptodate(page);
2548 } else {
2549 ClearPageUptodate(page);
2550 SetPageError(page);
2551 }
2552 unlock_page(page);
2553 } else { 2532 } else {
2554 if (uptodate) { 2533 ClearPageUptodate(page);
2555 check_page_uptodate(tree, page); 2534 SetPageError(page);
2556 } else {
2557 ClearPageUptodate(page);
2558 SetPageError(page);
2559 }
2560 check_page_locked(tree, page);
2561 } 2535 }
2536 unlock_page(page);
2562 } while (bvec <= bvec_end); 2537 } while (bvec <= bvec_end);
2563 2538
2564 bio_put(bio); 2539 bio_put(bio);
@@ -4022,7 +3997,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4022 last_for_get_extent = isize; 3997 last_for_get_extent = isize;
4023 } 3998 }
4024 3999
4025 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 4000 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4026 &cached_state); 4001 &cached_state);
4027 4002
4028 em = get_extent_skip_holes(inode, start, last_for_get_extent, 4003 em = get_extent_skip_holes(inode, start, last_for_get_extent,
@@ -4109,7 +4084,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4109out_free: 4084out_free:
4110 free_extent_map(em); 4085 free_extent_map(em);
4111out: 4086out:
4112 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, 4087 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4113 &cached_state, GFP_NOFS); 4088 &cached_state, GFP_NOFS);
4114 return ret; 4089 return ret;
4115} 4090}