aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Brassow <jbrassow@redhat.com>2013-03-07 17:22:01 -0500
committerNeilBrown <neilb@suse.de>2013-03-19 22:16:57 -0400
commite3620a3ad52609f64a2402e4b59300afb4b83b77 (patch)
treec707eed6157a41e7f913c40eb1db1341473dfca7
parentce7d363aaf1e28be8406a2976220944ca487e8ca (diff)
MD RAID5: Avoid accessing gendisk or queue structs when not available
MD RAID5: Fix kernel oops when RAID4/5/6 is used via device-mapper Commit a9add5d (v3.8-rc1) added blktrace calls to the RAID4/5/6 driver. However, when device-mapper is used to create RAID4/5/6 arrays, the mddev->gendisk and mddev->queue fields are not setup. Therefore, calling things like trace_block_bio_remap will cause a kernel oops. This patch conditionalizes those calls on whether the proper fields exist to make the calls. (Device-mapper will call trace_block_bio_remap on its own.) This patch is suitable for the 3.8.y stable kernel. Cc: stable@vger.kernel.org (v3.8+) Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/raid5.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5601dda1bc40..52ba88a10668 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -674,9 +674,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
674 bi->bi_next = NULL; 674 bi->bi_next = NULL;
675 if (rrdev) 675 if (rrdev)
676 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 676 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
677 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 677
678 bi, disk_devt(conf->mddev->gendisk), 678 if (conf->mddev->gendisk)
679 sh->dev[i].sector); 679 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
680 bi, disk_devt(conf->mddev->gendisk),
681 sh->dev[i].sector);
680 generic_make_request(bi); 682 generic_make_request(bi);
681 } 683 }
682 if (rrdev) { 684 if (rrdev) {
@@ -704,9 +706,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
704 rbi->bi_io_vec[0].bv_offset = 0; 706 rbi->bi_io_vec[0].bv_offset = 0;
705 rbi->bi_size = STRIPE_SIZE; 707 rbi->bi_size = STRIPE_SIZE;
706 rbi->bi_next = NULL; 708 rbi->bi_next = NULL;
707 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 709 if (conf->mddev->gendisk)
708 rbi, disk_devt(conf->mddev->gendisk), 710 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
709 sh->dev[i].sector); 711 rbi, disk_devt(conf->mddev->gendisk),
712 sh->dev[i].sector);
710 generic_make_request(rbi); 713 generic_make_request(rbi);
711 } 714 }
712 if (!rdev && !rrdev) { 715 if (!rdev && !rrdev) {
@@ -2835,8 +2838,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2835 set_bit(STRIPE_HANDLE, &sh->state); 2838 set_bit(STRIPE_HANDLE, &sh->state);
2836 if (rmw < rcw && rmw > 0) { 2839 if (rmw < rcw && rmw > 0) {
2837 /* prefer read-modify-write, but need to get some data */ 2840 /* prefer read-modify-write, but need to get some data */
2838 blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", 2841 if (conf->mddev->queue)
2839 (unsigned long long)sh->sector, rmw); 2842 blk_add_trace_msg(conf->mddev->queue,
2843 "raid5 rmw %llu %d",
2844 (unsigned long long)sh->sector, rmw);
2840 for (i = disks; i--; ) { 2845 for (i = disks; i--; ) {
2841 struct r5dev *dev = &sh->dev[i]; 2846 struct r5dev *dev = &sh->dev[i];
2842 if ((dev->towrite || i == sh->pd_idx) && 2847 if ((dev->towrite || i == sh->pd_idx) &&
@@ -2886,7 +2891,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2886 } 2891 }
2887 } 2892 }
2888 } 2893 }
2889 if (rcw) 2894 if (rcw && conf->mddev->queue)
2890 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 2895 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
2891 (unsigned long long)sh->sector, 2896 (unsigned long long)sh->sector,
2892 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 2897 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
@@ -3993,9 +3998,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3993 atomic_inc(&conf->active_aligned_reads); 3998 atomic_inc(&conf->active_aligned_reads);
3994 spin_unlock_irq(&conf->device_lock); 3999 spin_unlock_irq(&conf->device_lock);
3995 4000
3996 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4001 if (mddev->gendisk)
3997 align_bi, disk_devt(mddev->gendisk), 4002 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
3998 raid_bio->bi_sector); 4003 align_bi, disk_devt(mddev->gendisk),
4004 raid_bio->bi_sector);
3999 generic_make_request(align_bi); 4005 generic_make_request(align_bi);
4000 return 1; 4006 return 1;
4001 } else { 4007 } else {
@@ -4089,7 +4095,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4089 } 4095 }
4090 spin_unlock_irq(&conf->device_lock); 4096 spin_unlock_irq(&conf->device_lock);
4091 } 4097 }
4092 trace_block_unplug(mddev->queue, cnt, !from_schedule); 4098 if (mddev->queue)
4099 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4093 kfree(cb); 4100 kfree(cb);
4094} 4101}
4095 4102