aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-10-30 20:59:09 -0400
committerNeilBrown <neilb@suse.de>2012-12-17 18:22:21 -0500
commita9add5d92b64ea57fb4c3b557c3891cdeb15fa0c (patch)
treed025f0fbba2389550acadc4489c3c9c59cd12d97 /drivers/md
parent749586b7d34df910118bff2c248d08877d772e81 (diff)
md/raid5: add blktrace calls
This makes it easier to trace what raid5 is doing. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2cf23f23ddc..ffebc1e8f48 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,8 @@
53#include <linux/cpu.h> 53#include <linux/cpu.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/ratelimit.h> 55#include <linux/ratelimit.h>
56#include <trace/events/block.h>
57
56#include "md.h" 58#include "md.h"
57#include "raid5.h" 59#include "raid5.h"
58#include "raid0.h" 60#include "raid0.h"
@@ -182,6 +184,8 @@ static void return_io(struct bio *return_bi)
182 return_bi = bi->bi_next; 184 return_bi = bi->bi_next;
183 bi->bi_next = NULL; 185 bi->bi_next = NULL;
184 bi->bi_size = 0; 186 bi->bi_size = 0;
187 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
188 bi, 0);
185 bio_endio(bi, 0); 189 bio_endio(bi, 0);
186 bi = return_bi; 190 bi = return_bi;
187 } 191 }
@@ -671,6 +675,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
671 bi->bi_next = NULL; 675 bi->bi_next = NULL;
672 if (rrdev) 676 if (rrdev)
673 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 677 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
678 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
679 bi, disk_devt(conf->mddev->gendisk),
680 sh->dev[i].sector);
674 generic_make_request(bi); 681 generic_make_request(bi);
675 } 682 }
676 if (rrdev) { 683 if (rrdev) {
@@ -698,6 +705,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
698 rbi->bi_io_vec[0].bv_offset = 0; 705 rbi->bi_io_vec[0].bv_offset = 0;
699 rbi->bi_size = STRIPE_SIZE; 706 rbi->bi_size = STRIPE_SIZE;
700 rbi->bi_next = NULL; 707 rbi->bi_next = NULL;
708 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
709 rbi, disk_devt(conf->mddev->gendisk),
710 sh->dev[i].sector);
701 generic_make_request(rbi); 711 generic_make_request(rbi);
702 } 712 }
703 if (!rdev && !rrdev) { 713 if (!rdev && !rrdev) {
@@ -2855,8 +2865,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2855 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2865 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2856 (unsigned long long)sh->sector, rmw, rcw); 2866 (unsigned long long)sh->sector, rmw, rcw);
2857 set_bit(STRIPE_HANDLE, &sh->state); 2867 set_bit(STRIPE_HANDLE, &sh->state);
2858 if (rmw < rcw && rmw > 0) 2868 if (rmw < rcw && rmw > 0) {
2859 /* prefer read-modify-write, but need to get some data */ 2869 /* prefer read-modify-write, but need to get some data */
2870 blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
2871 (unsigned long long)sh->sector, rmw);
2860 for (i = disks; i--; ) { 2872 for (i = disks; i--; ) {
2861 struct r5dev *dev = &sh->dev[i]; 2873 struct r5dev *dev = &sh->dev[i];
2862 if ((dev->towrite || i == sh->pd_idx) && 2874 if ((dev->towrite || i == sh->pd_idx) &&
@@ -2867,7 +2879,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2867 if ( 2879 if (
2868 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2880 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2869 pr_debug("Read_old block " 2881 pr_debug("Read_old block "
2870 "%d for r-m-w\n", i); 2882 "%d for r-m-w\n", i);
2871 set_bit(R5_LOCKED, &dev->flags); 2883 set_bit(R5_LOCKED, &dev->flags);
2872 set_bit(R5_Wantread, &dev->flags); 2884 set_bit(R5_Wantread, &dev->flags);
2873 s->locked++; 2885 s->locked++;
@@ -2877,8 +2889,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2877 } 2889 }
2878 } 2890 }
2879 } 2891 }
2892 }
2880 if (rcw <= rmw && rcw > 0) { 2893 if (rcw <= rmw && rcw > 0) {
2881 /* want reconstruct write, but need to get some data */ 2894 /* want reconstruct write, but need to get some data */
2895 int qread =0;
2882 rcw = 0; 2896 rcw = 0;
2883 for (i = disks; i--; ) { 2897 for (i = disks; i--; ) {
2884 struct r5dev *dev = &sh->dev[i]; 2898 struct r5dev *dev = &sh->dev[i];
@@ -2897,12 +2911,17 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2897 set_bit(R5_LOCKED, &dev->flags); 2911 set_bit(R5_LOCKED, &dev->flags);
2898 set_bit(R5_Wantread, &dev->flags); 2912 set_bit(R5_Wantread, &dev->flags);
2899 s->locked++; 2913 s->locked++;
2914 qread++;
2900 } else { 2915 } else {
2901 set_bit(STRIPE_DELAYED, &sh->state); 2916 set_bit(STRIPE_DELAYED, &sh->state);
2902 set_bit(STRIPE_HANDLE, &sh->state); 2917 set_bit(STRIPE_HANDLE, &sh->state);
2903 } 2918 }
2904 } 2919 }
2905 } 2920 }
2921 if (rcw)
2922 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
2923 (unsigned long long)sh->sector,
2924 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
2906 } 2925 }
2907 /* now if nothing is locked, and if we have enough data, 2926 /* now if nothing is locked, and if we have enough data,
2908 * we can start a write request 2927 * we can start a write request
@@ -3900,6 +3919,8 @@ static void raid5_align_endio(struct bio *bi, int error)
3900 rdev_dec_pending(rdev, conf->mddev); 3919 rdev_dec_pending(rdev, conf->mddev);
3901 3920
3902 if (!error && uptodate) { 3921 if (!error && uptodate) {
3922 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
3923 raid_bi, 0);
3903 bio_endio(raid_bi, 0); 3924 bio_endio(raid_bi, 0);
3904 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3925 if (atomic_dec_and_test(&conf->active_aligned_reads))
3905 wake_up(&conf->wait_for_stripe); 3926 wake_up(&conf->wait_for_stripe);
@@ -4004,6 +4025,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4004 atomic_inc(&conf->active_aligned_reads); 4025 atomic_inc(&conf->active_aligned_reads);
4005 spin_unlock_irq(&conf->device_lock); 4026 spin_unlock_irq(&conf->device_lock);
4006 4027
4028 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4029 align_bi, disk_devt(mddev->gendisk),
4030 raid_bio->bi_sector);
4007 generic_make_request(align_bi); 4031 generic_make_request(align_bi);
4008 return 1; 4032 return 1;
4009 } else { 4033 } else {
@@ -4078,6 +4102,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4078 struct stripe_head *sh; 4102 struct stripe_head *sh;
4079 struct mddev *mddev = cb->cb.data; 4103 struct mddev *mddev = cb->cb.data;
4080 struct r5conf *conf = mddev->private; 4104 struct r5conf *conf = mddev->private;
4105 int cnt = 0;
4081 4106
4082 if (cb->list.next && !list_empty(&cb->list)) { 4107 if (cb->list.next && !list_empty(&cb->list)) {
4083 spin_lock_irq(&conf->device_lock); 4108 spin_lock_irq(&conf->device_lock);
@@ -4092,9 +4117,11 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4092 smp_mb__before_clear_bit(); 4117 smp_mb__before_clear_bit();
4093 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4118 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
4094 __release_stripe(conf, sh); 4119 __release_stripe(conf, sh);
4120 cnt++;
4095 } 4121 }
4096 spin_unlock_irq(&conf->device_lock); 4122 spin_unlock_irq(&conf->device_lock);
4097 } 4123 }
4124 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4098 kfree(cb); 4125 kfree(cb);
4099} 4126}
4100 4127
@@ -4352,6 +4379,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4352 if ( rw == WRITE ) 4379 if ( rw == WRITE )
4353 md_write_end(mddev); 4380 md_write_end(mddev);
4354 4381
4382 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
4383 bi, 0);
4355 bio_endio(bi, 0); 4384 bio_endio(bi, 0);
4356 } 4385 }
4357} 4386}
@@ -4728,8 +4757,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4728 handled++; 4757 handled++;
4729 } 4758 }
4730 remaining = raid5_dec_bi_active_stripes(raid_bio); 4759 remaining = raid5_dec_bi_active_stripes(raid_bio);
4731 if (remaining == 0) 4760 if (remaining == 0) {
4761 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
4762 raid_bio, 0);
4732 bio_endio(raid_bio, 0); 4763 bio_endio(raid_bio, 0);
4764 }
4733 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4765 if (atomic_dec_and_test(&conf->active_aligned_reads))
4734 wake_up(&conf->wait_for_stripe); 4766 wake_up(&conf->wait_for_stripe);
4735 return handled; 4767 return handled;