aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d8555bf3e1d..19d77a026639 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,8 @@
53#include <linux/cpu.h> 53#include <linux/cpu.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/ratelimit.h> 55#include <linux/ratelimit.h>
56#include <trace/events/block.h>
57
56#include "md.h" 58#include "md.h"
57#include "raid5.h" 59#include "raid5.h"
58#include "raid0.h" 60#include "raid0.h"
@@ -182,6 +184,8 @@ static void return_io(struct bio *return_bi)
182 return_bi = bi->bi_next; 184 return_bi = bi->bi_next;
183 bi->bi_next = NULL; 185 bi->bi_next = NULL;
184 bi->bi_size = 0; 186 bi->bi_size = 0;
187 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
188 bi, 0);
185 bio_endio(bi, 0); 189 bio_endio(bi, 0);
186 bi = return_bi; 190 bi = return_bi;
187 } 191 }
@@ -670,6 +674,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
670 bi->bi_next = NULL; 674 bi->bi_next = NULL;
671 if (rrdev) 675 if (rrdev)
672 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 676 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
677 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
678 bi, disk_devt(conf->mddev->gendisk),
679 sh->dev[i].sector);
673 generic_make_request(bi); 680 generic_make_request(bi);
674 } 681 }
675 if (rrdev) { 682 if (rrdev) {
@@ -697,6 +704,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
697 rbi->bi_io_vec[0].bv_offset = 0; 704 rbi->bi_io_vec[0].bv_offset = 0;
698 rbi->bi_size = STRIPE_SIZE; 705 rbi->bi_size = STRIPE_SIZE;
699 rbi->bi_next = NULL; 706 rbi->bi_next = NULL;
707 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
708 rbi, disk_devt(conf->mddev->gendisk),
709 sh->dev[i].sector);
700 generic_make_request(rbi); 710 generic_make_request(rbi);
701 } 711 }
702 if (!rdev && !rrdev) { 712 if (!rdev && !rrdev) {
@@ -2853,8 +2863,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2853 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2863 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2854 (unsigned long long)sh->sector, rmw, rcw); 2864 (unsigned long long)sh->sector, rmw, rcw);
2855 set_bit(STRIPE_HANDLE, &sh->state); 2865 set_bit(STRIPE_HANDLE, &sh->state);
2856 if (rmw < rcw && rmw > 0) 2866 if (rmw < rcw && rmw > 0) {
2857 /* prefer read-modify-write, but need to get some data */ 2867 /* prefer read-modify-write, but need to get some data */
2868 blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
2869 (unsigned long long)sh->sector, rmw);
2858 for (i = disks; i--; ) { 2870 for (i = disks; i--; ) {
2859 struct r5dev *dev = &sh->dev[i]; 2871 struct r5dev *dev = &sh->dev[i];
2860 if ((dev->towrite || i == sh->pd_idx) && 2872 if ((dev->towrite || i == sh->pd_idx) &&
@@ -2865,7 +2877,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2865 if ( 2877 if (
2866 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2878 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2867 pr_debug("Read_old block " 2879 pr_debug("Read_old block "
2868 "%d for r-m-w\n", i); 2880 "%d for r-m-w\n", i);
2869 set_bit(R5_LOCKED, &dev->flags); 2881 set_bit(R5_LOCKED, &dev->flags);
2870 set_bit(R5_Wantread, &dev->flags); 2882 set_bit(R5_Wantread, &dev->flags);
2871 s->locked++; 2883 s->locked++;
@@ -2875,8 +2887,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2875 } 2887 }
2876 } 2888 }
2877 } 2889 }
2890 }
2878 if (rcw <= rmw && rcw > 0) { 2891 if (rcw <= rmw && rcw > 0) {
2879 /* want reconstruct write, but need to get some data */ 2892 /* want reconstruct write, but need to get some data */
2893 int qread =0;
2880 rcw = 0; 2894 rcw = 0;
2881 for (i = disks; i--; ) { 2895 for (i = disks; i--; ) {
2882 struct r5dev *dev = &sh->dev[i]; 2896 struct r5dev *dev = &sh->dev[i];
@@ -2895,12 +2909,17 @@ static void handle_stripe_dirtying(struct r5conf *conf,
2895 set_bit(R5_LOCKED, &dev->flags); 2909 set_bit(R5_LOCKED, &dev->flags);
2896 set_bit(R5_Wantread, &dev->flags); 2910 set_bit(R5_Wantread, &dev->flags);
2897 s->locked++; 2911 s->locked++;
2912 qread++;
2898 } else { 2913 } else {
2899 set_bit(STRIPE_DELAYED, &sh->state); 2914 set_bit(STRIPE_DELAYED, &sh->state);
2900 set_bit(STRIPE_HANDLE, &sh->state); 2915 set_bit(STRIPE_HANDLE, &sh->state);
2901 } 2916 }
2902 } 2917 }
2903 } 2918 }
2919 if (rcw)
2920 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
2921 (unsigned long long)sh->sector,
2922 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
2904 } 2923 }
2905 /* now if nothing is locked, and if we have enough data, 2924 /* now if nothing is locked, and if we have enough data,
2906 * we can start a write request 2925 * we can start a write request
@@ -3222,10 +3241,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3222 3241
3223 } 3242 }
3224 /* done submitting copies, wait for them to complete */ 3243 /* done submitting copies, wait for them to complete */
3225 if (tx) { 3244 async_tx_quiesce(&tx);
3226 async_tx_ack(tx);
3227 dma_wait_for_async_tx(tx);
3228 }
3229} 3245}
3230 3246
3231/* 3247/*
@@ -3901,6 +3917,8 @@ static void raid5_align_endio(struct bio *bi, int error)
3901 rdev_dec_pending(rdev, conf->mddev); 3917 rdev_dec_pending(rdev, conf->mddev);
3902 3918
3903 if (!error && uptodate) { 3919 if (!error && uptodate) {
3920 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
3921 raid_bi, 0);
3904 bio_endio(raid_bi, 0); 3922 bio_endio(raid_bi, 0);
3905 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3923 if (atomic_dec_and_test(&conf->active_aligned_reads))
3906 wake_up(&conf->wait_for_stripe); 3924 wake_up(&conf->wait_for_stripe);
@@ -4005,6 +4023,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4005 atomic_inc(&conf->active_aligned_reads); 4023 atomic_inc(&conf->active_aligned_reads);
4006 spin_unlock_irq(&conf->device_lock); 4024 spin_unlock_irq(&conf->device_lock);
4007 4025
4026 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4027 align_bi, disk_devt(mddev->gendisk),
4028 raid_bio->bi_sector);
4008 generic_make_request(align_bi); 4029 generic_make_request(align_bi);
4009 return 1; 4030 return 1;
4010 } else { 4031 } else {
@@ -4079,6 +4100,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4079 struct stripe_head *sh; 4100 struct stripe_head *sh;
4080 struct mddev *mddev = cb->cb.data; 4101 struct mddev *mddev = cb->cb.data;
4081 struct r5conf *conf = mddev->private; 4102 struct r5conf *conf = mddev->private;
4103 int cnt = 0;
4082 4104
4083 if (cb->list.next && !list_empty(&cb->list)) { 4105 if (cb->list.next && !list_empty(&cb->list)) {
4084 spin_lock_irq(&conf->device_lock); 4106 spin_lock_irq(&conf->device_lock);
@@ -4093,9 +4115,11 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4093 smp_mb__before_clear_bit(); 4115 smp_mb__before_clear_bit();
4094 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4116 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
4095 __release_stripe(conf, sh); 4117 __release_stripe(conf, sh);
4118 cnt++;
4096 } 4119 }
4097 spin_unlock_irq(&conf->device_lock); 4120 spin_unlock_irq(&conf->device_lock);
4098 } 4121 }
4122 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4099 kfree(cb); 4123 kfree(cb);
4100} 4124}
4101 4125
@@ -4353,6 +4377,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4353 if ( rw == WRITE ) 4377 if ( rw == WRITE )
4354 md_write_end(mddev); 4378 md_write_end(mddev);
4355 4379
4380 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
4381 bi, 0);
4356 bio_endio(bi, 0); 4382 bio_endio(bi, 0);
4357 } 4383 }
4358} 4384}
@@ -4729,8 +4755,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4729 handled++; 4755 handled++;
4730 } 4756 }
4731 remaining = raid5_dec_bi_active_stripes(raid_bio); 4757 remaining = raid5_dec_bi_active_stripes(raid_bio);
4732 if (remaining == 0) 4758 if (remaining == 0) {
4759 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
4760 raid_bio, 0);
4733 bio_endio(raid_bio, 0); 4761 bio_endio(raid_bio, 0);
4762 }
4734 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4763 if (atomic_dec_and_test(&conf->active_aligned_reads))
4735 wake_up(&conf->wait_for_stripe); 4764 wake_up(&conf->wait_for_stripe);
4736 return handled; 4765 return handled;