diff options
author | Raz Ben-Jehuda(caro) <raziebe@gmail.com> | 2006-12-10 05:20:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-10 12:57:20 -0500 |
commit | f679623f50545bc0577caf2d0f8675b61162f059 (patch) | |
tree | 7253c48db142ec63e6f22df12dfa2babb3e6129c /drivers/md/raid5.c | |
parent | 23032a0eb97c8eaae8ac9d17373b53b19d0f5413 (diff) |
[PATCH] md: handle bypassing the read cache (assuming nothing fails)
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 78 |
1 files changed, 78 insertions, 0 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b86ceba04f6f..269b7771a30b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2633,6 +2633,84 @@ static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_ | |||
2633 | return max; | 2633 | return max; |
2634 | } | 2634 | } |
2635 | 2635 | ||
2636 | |||
2637 | static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) | ||
2638 | { | ||
2639 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); | ||
2640 | unsigned int chunk_sectors = mddev->chunk_size >> 9; | ||
2641 | unsigned int bio_sectors = bio->bi_size >> 9; | ||
2642 | |||
2643 | return chunk_sectors >= | ||
2644 | ((sector & (chunk_sectors - 1)) + bio_sectors); | ||
2645 | } | ||
2646 | |||
2647 | /* | ||
2648 | * The "raid5_align_endio" should check if the read succeeded and if it | ||
2649 | * did, call bio_endio on the original bio (having bio_put the new bio | ||
2650 | * first). | ||
2651 | * If the read failed.. | ||
2652 | */ | ||
2653 | int raid5_align_endio(struct bio *bi, unsigned int bytes , int error) | ||
2654 | { | ||
2655 | struct bio* raid_bi = bi->bi_private; | ||
2656 | if (bi->bi_size) | ||
2657 | return 1; | ||
2658 | bio_put(bi); | ||
2659 | bio_endio(raid_bi, bytes, error); | ||
2660 | return 0; | ||
2661 | } | ||
2662 | |||
2663 | static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) | ||
2664 | { | ||
2665 | mddev_t *mddev = q->queuedata; | ||
2666 | raid5_conf_t *conf = mddev_to_conf(mddev); | ||
2667 | const unsigned int raid_disks = conf->raid_disks; | ||
2668 | const unsigned int data_disks = raid_disks - 1; | ||
2669 | unsigned int dd_idx, pd_idx; | ||
2670 | struct bio* align_bi; | ||
2671 | mdk_rdev_t *rdev; | ||
2672 | |||
2673 | if (!in_chunk_boundary(mddev, raid_bio)) { | ||
2674 | printk("chunk_aligned_read : non aligned\n"); | ||
2675 | return 0; | ||
2676 | } | ||
2677 | /* | ||
2678 | * use bio_clone to make a copy of the bio | ||
2679 | */ | ||
2680 | align_bi = bio_clone(raid_bio, GFP_NOIO); | ||
2681 | if (!align_bi) | ||
2682 | return 0; | ||
2683 | /* | ||
2684 | * set bi_end_io to a new function, and set bi_private to the | ||
2685 | * original bio. | ||
2686 | */ | ||
2687 | align_bi->bi_end_io = raid5_align_endio; | ||
2688 | align_bi->bi_private = raid_bio; | ||
2689 | /* | ||
2690 | * compute position | ||
2691 | */ | ||
2692 | align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, | ||
2693 | raid_disks, | ||
2694 | data_disks, | ||
2695 | &dd_idx, | ||
2696 | &pd_idx, | ||
2697 | conf); | ||
2698 | |||
2699 | rcu_read_lock(); | ||
2700 | rdev = rcu_dereference(conf->disks[dd_idx].rdev); | ||
2701 | if (rdev && test_bit(In_sync, &rdev->flags)) { | ||
2702 | align_bi->bi_bdev = rdev->bdev; | ||
2703 | atomic_inc(&rdev->nr_pending); | ||
2704 | rcu_read_unlock(); | ||
2705 | generic_make_request(align_bi); | ||
2706 | return 1; | ||
2707 | } else { | ||
2708 | rcu_read_unlock(); | ||
2709 | return 0; | ||
2710 | } | ||
2711 | } | ||
2712 | |||
2713 | |||
2636 | static int make_request(request_queue_t *q, struct bio * bi) | 2714 | static int make_request(request_queue_t *q, struct bio * bi) |
2637 | { | 2715 | { |
2638 | mddev_t *mddev = q->queuedata; | 2716 | mddev_t *mddev = q->queuedata; |