aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2013-10-18 11:44:46 -0400
committerChris Mason <chris.mason@fusionio.com>2013-11-11 21:58:56 -0500
commit7f4ca37c486733da008778a1f4058fbc194a4fdd (patch)
tree491f6ffb135f7906428371cb9d064278ec5c8f48 /fs
parent4277a9c3b3665f2830c55ece015163867b9414cc (diff)
Btrfs: fix up seek_hole/seek_data handling
Whoever wrote this was braindead. Also it doesn't work right if you have VACANCY's since we assumed you would only have that at the end of the file, which won't be the case in the near future. I tested this with generic/285 and generic/286 as well as the btrfs tests that use fssum since it uses seek_hole/seek_data to verify things are ok. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/file.c94
1 files changed, 19 insertions, 75 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 72da4df53c9a..bf3465cf455a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2405,14 +2405,12 @@ out_reserve_fail:
2405static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) 2405static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2406{ 2406{
2407 struct btrfs_root *root = BTRFS_I(inode)->root; 2407 struct btrfs_root *root = BTRFS_I(inode)->root;
2408 struct extent_map *em; 2408 struct extent_map *em = NULL;
2409 struct extent_state *cached_state = NULL; 2409 struct extent_state *cached_state = NULL;
2410 u64 lockstart = *offset; 2410 u64 lockstart = *offset;
2411 u64 lockend = i_size_read(inode); 2411 u64 lockend = i_size_read(inode);
2412 u64 start = *offset; 2412 u64 start = *offset;
2413 u64 orig_start = *offset;
2414 u64 len = i_size_read(inode); 2413 u64 len = i_size_read(inode);
2415 u64 last_end = 0;
2416 int ret = 0; 2414 int ret = 0;
2417 2415
2418 lockend = max_t(u64, root->sectorsize, lockend); 2416 lockend = max_t(u64, root->sectorsize, lockend);
@@ -2429,89 +2427,35 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2429 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 2427 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2430 &cached_state); 2428 &cached_state);
2431 2429
2432 /* 2430 while (start < inode->i_size) {
2433 * Delalloc is such a pain. If we have a hole and we have pending
2434 * delalloc for a portion of the hole we will get back a hole that
2435 * exists for the entire range since it hasn't been actually written
2436 * yet. So to take care of this case we need to look for an extent just
2437 * before the position we want in case there is outstanding delalloc
2438 * going on here.
2439 */
2440 if (whence == SEEK_HOLE && start != 0) {
2441 if (start <= root->sectorsize)
2442 em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2443 root->sectorsize, 0);
2444 else
2445 em = btrfs_get_extent_fiemap(inode, NULL, 0,
2446 start - root->sectorsize,
2447 root->sectorsize, 0);
2448 if (IS_ERR(em)) {
2449 ret = PTR_ERR(em);
2450 goto out;
2451 }
2452 last_end = em->start + em->len;
2453 if (em->block_start == EXTENT_MAP_DELALLOC)
2454 last_end = min_t(u64, last_end, inode->i_size);
2455 free_extent_map(em);
2456 }
2457
2458 while (1) {
2459 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 2431 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2460 if (IS_ERR(em)) { 2432 if (IS_ERR(em)) {
2461 ret = PTR_ERR(em); 2433 ret = PTR_ERR(em);
2434 em = NULL;
2462 break; 2435 break;
2463 } 2436 }
2464 2437
2465 if (em->block_start == EXTENT_MAP_HOLE) { 2438 if (whence == SEEK_HOLE &&
2466 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 2439 (em->block_start == EXTENT_MAP_HOLE ||
2467 if (last_end <= orig_start) { 2440 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2468 free_extent_map(em); 2441 break;
2469 ret = -ENXIO; 2442 else if (whence == SEEK_DATA &&
2470 break; 2443 (em->block_start != EXTENT_MAP_HOLE &&
2471 } 2444 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2472 } 2445 break;
2473
2474 if (whence == SEEK_HOLE) {
2475 *offset = start;
2476 free_extent_map(em);
2477 break;
2478 }
2479 } else {
2480 if (whence == SEEK_DATA) {
2481 if (em->block_start == EXTENT_MAP_DELALLOC) {
2482 if (start >= inode->i_size) {
2483 free_extent_map(em);
2484 ret = -ENXIO;
2485 break;
2486 }
2487 }
2488
2489 if (!test_bit(EXTENT_FLAG_PREALLOC,
2490 &em->flags)) {
2491 *offset = start;
2492 free_extent_map(em);
2493 break;
2494 }
2495 }
2496 }
2497 2446
2498 start = em->start + em->len; 2447 start = em->start + em->len;
2499 last_end = em->start + em->len;
2500
2501 if (em->block_start == EXTENT_MAP_DELALLOC)
2502 last_end = min_t(u64, last_end, inode->i_size);
2503
2504 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2505 free_extent_map(em);
2506 ret = -ENXIO;
2507 break;
2508 }
2509 free_extent_map(em); 2448 free_extent_map(em);
2449 em = NULL;
2510 cond_resched(); 2450 cond_resched();
2511 } 2451 }
2512 if (!ret) 2452 free_extent_map(em);
2513 *offset = min(*offset, inode->i_size); 2453 if (!ret) {
2514out: 2454 if (whence == SEEK_DATA && start >= inode->i_size)
2455 ret = -ENXIO;
2456 else
2457 *offset = min_t(loff_t, start, inode->i_size);
2458 }
2515 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 2459 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2516 &cached_state, GFP_NOFS); 2460 &cached_state, GFP_NOFS);
2517 return ret; 2461 return ret;