aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c114
1 files changed, 113 insertions, 1 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8c2d5d036bd6..48f1d1b96450 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -385,6 +385,86 @@ out:
385 return ret; 385 return ret;
386} 386}
387 387
388struct io_failure_record {
389 struct page *page;
390 u64 start;
391 u64 len;
392 u64 logical;
393 int last_mirror;
394};
395
396int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
397 struct page *page, u64 start, u64 end,
398 struct extent_state *state)
399{
400 struct io_failure_record *failrec = NULL;
401 u64 private;
402 struct extent_map *em;
403 struct inode *inode = page->mapping->host;
404 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
405 struct bio *bio;
406 int num_copies;
407 int ret;
408 u64 logical;
409
410 ret = get_state_private(failure_tree, start, &private);
411 if (ret) {
412 size_t pg_offset = start - page_offset(page);
413 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
414 if (!failrec)
415 return -ENOMEM;
416 failrec->start = start;
417 failrec->len = end - start + 1;
418 failrec->last_mirror = 0;
419
420 em = btrfs_get_extent(inode, NULL, pg_offset, start,
421 failrec->len, 0);
422
423 if (!em || IS_ERR(em)) {
424 kfree(failrec);
425 return -EIO;
426 }
427 logical = start - em->start;
428 logical = em->block_start + logical;
429 failrec->logical = logical;
430 free_extent_map(em);
431 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
432 EXTENT_DIRTY, GFP_NOFS);
433 set_state_private(failure_tree, start, (u64)failrec);
434 } else {
435 failrec = (struct io_failure_record *)private;
436 }
437 num_copies = btrfs_num_copies(
438 &BTRFS_I(inode)->root->fs_info->mapping_tree,
439 failrec->logical, failrec->len);
440 failrec->last_mirror++;
441 if (!state) {
442 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
443 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
444 failrec->start,
445 EXTENT_LOCKED);
446 if (state && state->start != failrec->start)
447 state = NULL;
448 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
449 }
450 if (!state || failrec->last_mirror > num_copies) {
451 set_state_private(failure_tree, failrec->start, 0);
452 clear_extent_bits(failure_tree, failrec->start,
453 failrec->start + failrec->len - 1,
454 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
455 kfree(failrec);
456 return -EIO;
457 }
458 bio = bio_alloc(GFP_NOFS, 1);
459 bio->bi_private = state;
460 bio->bi_end_io = failed_bio->bi_end_io;
461 bio->bi_sector = failrec->logical >> 9;
462 bio->bi_bdev = failed_bio->bi_bdev;
463 bio_add_page(bio, page, failrec->len, start - page_offset(page));
464 btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror);
465 return 0;
466}
467
388int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, 468int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
389 struct extent_state *state) 469 struct extent_state *state)
390{ 470{
@@ -419,6 +499,29 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
419 } 499 }
420 kunmap_atomic(kaddr, KM_IRQ0); 500 kunmap_atomic(kaddr, KM_IRQ0);
421 local_irq_restore(flags); 501 local_irq_restore(flags);
502
503 /* if the io failure tree for this inode is non-empty,
504 * check to see if we've recovered from a failed IO
505 */
506 private = 0;
507 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
508 (u64)-1, 1, EXTENT_DIRTY)) {
509 u64 private_failure;
510 struct io_failure_record *failure;
511 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
512 start, &private_failure);
513 if (ret == 0) {
514 failure = (struct io_failure_record *)private_failure;
515 set_state_private(&BTRFS_I(inode)->io_failure_tree,
516 failure->start, 0);
517 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
518 failure->start,
519 failure->start + failure->len - 1,
520 EXTENT_DIRTY | EXTENT_LOCKED,
521 GFP_NOFS);
522 kfree(failure);
523 }
524 }
422 return 0; 525 return 0;
423 526
424zeroit: 527zeroit:
@@ -429,7 +532,7 @@ zeroit:
429 flush_dcache_page(page); 532 flush_dcache_page(page);
430 kunmap_atomic(kaddr, KM_IRQ0); 533 kunmap_atomic(kaddr, KM_IRQ0);
431 local_irq_restore(flags); 534 local_irq_restore(flags);
432 return 0; 535 return -EIO;
433} 536}
434 537
435void btrfs_read_locked_inode(struct inode *inode) 538void btrfs_read_locked_inode(struct inode *inode)
@@ -1271,6 +1374,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
1271 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); 1374 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1272 extent_io_tree_init(&BTRFS_I(inode)->io_tree, 1375 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1273 inode->i_mapping, GFP_NOFS); 1376 inode->i_mapping, GFP_NOFS);
1377 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1378 inode->i_mapping, GFP_NOFS);
1274 return 0; 1379 return 0;
1275} 1380}
1276 1381
@@ -1578,6 +1683,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1578 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); 1683 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1579 extent_io_tree_init(&BTRFS_I(inode)->io_tree, 1684 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1580 inode->i_mapping, GFP_NOFS); 1685 inode->i_mapping, GFP_NOFS);
1686 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1687 inode->i_mapping, GFP_NOFS);
1581 BTRFS_I(inode)->delalloc_bytes = 0; 1688 BTRFS_I(inode)->delalloc_bytes = 0;
1582 BTRFS_I(inode)->root = root; 1689 BTRFS_I(inode)->root = root;
1583 1690
@@ -1803,6 +1910,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
1803 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); 1910 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1804 extent_io_tree_init(&BTRFS_I(inode)->io_tree, 1911 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1805 inode->i_mapping, GFP_NOFS); 1912 inode->i_mapping, GFP_NOFS);
1913 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1914 inode->i_mapping, GFP_NOFS);
1806 BTRFS_I(inode)->delalloc_bytes = 0; 1915 BTRFS_I(inode)->delalloc_bytes = 0;
1807 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 1916 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1808 } 1917 }
@@ -2972,6 +3081,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2972 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); 3081 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2973 extent_io_tree_init(&BTRFS_I(inode)->io_tree, 3082 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2974 inode->i_mapping, GFP_NOFS); 3083 inode->i_mapping, GFP_NOFS);
3084 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3085 inode->i_mapping, GFP_NOFS);
2975 BTRFS_I(inode)->delalloc_bytes = 0; 3086 BTRFS_I(inode)->delalloc_bytes = 0;
2976 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 3087 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2977 } 3088 }
@@ -3070,6 +3181,7 @@ static struct extent_io_ops btrfs_extent_io_ops = {
3070 .merge_bio_hook = btrfs_merge_bio_hook, 3181 .merge_bio_hook = btrfs_merge_bio_hook,
3071 .readpage_io_hook = btrfs_readpage_io_hook, 3182 .readpage_io_hook = btrfs_readpage_io_hook,
3072 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 3183 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
3184 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
3073 .set_bit_hook = btrfs_set_bit_hook, 3185 .set_bit_hook = btrfs_set_bit_hook,
3074 .clear_bit_hook = btrfs_clear_bit_hook, 3186 .clear_bit_hook = btrfs_clear_bit_hook,
3075}; 3187};