aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2012-07-19 02:01:31 -0400
committerNeilBrown <neilb@suse.de>2012-07-19 02:01:31 -0400
commite7836bd6f60d659830b87804f7f4026edfe0f1d1 (patch)
tree48b8be28ae6d47697307d6fbce9d35c4fd1506f0 /drivers/md/raid5.c
parent4eb788df670ef30a19b7ea15b107ea440544bc80 (diff)
raid5: lockless access raid5 overrided bi_phys_segments
Raid5 overrides bio->bi_phys_segments, accessing it is with device_lock hold, which is unnecessary, We can make it lockless actually. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c62
1 files changed, 32 insertions, 30 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 848034666342..6ef1eeb68f7c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
99 * We maintain a biased count of active stripes in the bottom 16 bits of 99 * We maintain a biased count of active stripes in the bottom 16 bits of
100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
101 */ 101 */
102static inline int raid5_bi_phys_segments(struct bio *bio) 102static inline int raid5_bi_processed_stripes(struct bio *bio)
103{ 103{
104 return bio->bi_phys_segments & 0xffff; 104 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
105 return (atomic_read(segments) >> 16) & 0xffff;
105} 106}
106 107
107static inline int raid5_bi_hw_segments(struct bio *bio) 108static inline int raid5_dec_bi_active_stripes(struct bio *bio)
108{ 109{
109 return (bio->bi_phys_segments >> 16) & 0xffff; 110 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
111 return atomic_sub_return(1, segments) & 0xffff;
110} 112}
111 113
112static inline int raid5_dec_bi_phys_segments(struct bio *bio) 114static inline void raid5_inc_bi_active_stripes(struct bio *bio)
113{ 115{
114 --bio->bi_phys_segments; 116 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
115 return raid5_bi_phys_segments(bio); 117 atomic_inc(segments);
116} 118}
117 119
118static inline int raid5_dec_bi_hw_segments(struct bio *bio) 120static inline void raid5_set_bi_processed_stripes(struct bio *bio,
121 unsigned int cnt)
119{ 122{
120 unsigned short val = raid5_bi_hw_segments(bio); 123 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
124 int old, new;
121 125
122 --val; 126 do {
123 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 127 old = atomic_read(segments);
124 return val; 128 new = (old & 0xffff) | (cnt << 16);
129 } while (atomic_cmpxchg(segments, old, new) != old);
125} 130}
126 131
127static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 132static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
128{ 133{
129 bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); 134 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
135 atomic_set(segments, cnt);
130} 136}
131 137
132/* Find first data disk in a raid6 stripe */ 138/* Find first data disk in a raid6 stripe */
@@ -781,7 +787,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
781 while (rbi && rbi->bi_sector < 787 while (rbi && rbi->bi_sector <
782 dev->sector + STRIPE_SECTORS) { 788 dev->sector + STRIPE_SECTORS) {
783 rbi2 = r5_next_bio(rbi, dev->sector); 789 rbi2 = r5_next_bio(rbi, dev->sector);
784 if (!raid5_dec_bi_phys_segments(rbi)) { 790 if (!raid5_dec_bi_active_stripes(rbi)) {
785 rbi->bi_next = return_bi; 791 rbi->bi_next = return_bi;
786 return_bi = rbi; 792 return_bi = rbi;
787 } 793 }
@@ -2367,7 +2373,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2367 if (*bip) 2373 if (*bip)
2368 bi->bi_next = *bip; 2374 bi->bi_next = *bip;
2369 *bip = bi; 2375 *bip = bi;
2370 bi->bi_phys_segments++; 2376 raid5_inc_bi_active_stripes(bi);
2371 2377
2372 if (forwrite) { 2378 if (forwrite) {
2373 /* check if page is covered */ 2379 /* check if page is covered */
@@ -2464,7 +2470,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2464 sh->dev[i].sector + STRIPE_SECTORS) { 2470 sh->dev[i].sector + STRIPE_SECTORS) {
2465 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2471 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2466 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2472 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2467 if (!raid5_dec_bi_phys_segments(bi)) { 2473 if (!raid5_dec_bi_active_stripes(bi)) {
2468 md_write_end(conf->mddev); 2474 md_write_end(conf->mddev);
2469 bi->bi_next = *return_bi; 2475 bi->bi_next = *return_bi;
2470 *return_bi = bi; 2476 *return_bi = bi;
@@ -2479,7 +2485,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2479 sh->dev[i].sector + STRIPE_SECTORS) { 2485 sh->dev[i].sector + STRIPE_SECTORS) {
2480 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2486 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2481 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2487 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2482 if (!raid5_dec_bi_phys_segments(bi)) { 2488 if (!raid5_dec_bi_active_stripes(bi)) {
2483 md_write_end(conf->mddev); 2489 md_write_end(conf->mddev);
2484 bi->bi_next = *return_bi; 2490 bi->bi_next = *return_bi;
2485 *return_bi = bi; 2491 *return_bi = bi;
@@ -2503,7 +2509,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2503 struct bio *nextbi = 2509 struct bio *nextbi =
2504 r5_next_bio(bi, sh->dev[i].sector); 2510 r5_next_bio(bi, sh->dev[i].sector);
2505 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2511 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2506 if (!raid5_dec_bi_phys_segments(bi)) { 2512 if (!raid5_dec_bi_active_stripes(bi)) {
2507 bi->bi_next = *return_bi; 2513 bi->bi_next = *return_bi;
2508 *return_bi = bi; 2514 *return_bi = bi;
2509 } 2515 }
@@ -2722,7 +2728,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2722 while (wbi && wbi->bi_sector < 2728 while (wbi && wbi->bi_sector <
2723 dev->sector + STRIPE_SECTORS) { 2729 dev->sector + STRIPE_SECTORS) {
2724 wbi2 = r5_next_bio(wbi, dev->sector); 2730 wbi2 = r5_next_bio(wbi, dev->sector);
2725 if (!raid5_dec_bi_phys_segments(wbi)) { 2731 if (!raid5_dec_bi_active_stripes(wbi)) {
2726 md_write_end(conf->mddev); 2732 md_write_end(conf->mddev);
2727 wbi->bi_next = *return_bi; 2733 wbi->bi_next = *return_bi;
2728 *return_bi = wbi; 2734 *return_bi = wbi;
@@ -3798,7 +3804,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
3798 * this sets the active strip count to 1 and the processed 3804 * this sets the active strip count to 1 and the processed
3799 * strip count to zero (upper 8 bits) 3805 * strip count to zero (upper 8 bits)
3800 */ 3806 */
3801 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3807 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
3802 } 3808 }
3803 3809
3804 return bi; 3810 return bi;
@@ -4133,9 +4139,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4133 } 4139 }
4134 } 4140 }
4135 4141
4136 spin_lock_irq(&conf->device_lock); 4142 remaining = raid5_dec_bi_active_stripes(bi);
4137 remaining = raid5_dec_bi_phys_segments(bi);
4138 spin_unlock_irq(&conf->device_lock);
4139 if (remaining == 0) { 4143 if (remaining == 0) {
4140 4144
4141 if ( rw == WRITE ) 4145 if ( rw == WRITE )
@@ -4491,7 +4495,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4491 sector += STRIPE_SECTORS, 4495 sector += STRIPE_SECTORS,
4492 scnt++) { 4496 scnt++) {
4493 4497
4494 if (scnt < raid5_bi_hw_segments(raid_bio)) 4498 if (scnt < raid5_bi_processed_stripes(raid_bio))
4495 /* already done this stripe */ 4499 /* already done this stripe */
4496 continue; 4500 continue;
4497 4501
@@ -4499,14 +4503,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4499 4503
4500 if (!sh) { 4504 if (!sh) {
4501 /* failed to get a stripe - must wait */ 4505 /* failed to get a stripe - must wait */
4502 raid5_set_bi_hw_segments(raid_bio, scnt); 4506 raid5_set_bi_processed_stripes(raid_bio, scnt);
4503 conf->retry_read_aligned = raid_bio; 4507 conf->retry_read_aligned = raid_bio;
4504 return handled; 4508 return handled;
4505 } 4509 }
4506 4510
4507 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4511 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4508 release_stripe(sh); 4512 release_stripe(sh);
4509 raid5_set_bi_hw_segments(raid_bio, scnt); 4513 raid5_set_bi_processed_stripes(raid_bio, scnt);
4510 conf->retry_read_aligned = raid_bio; 4514 conf->retry_read_aligned = raid_bio;
4511 return handled; 4515 return handled;
4512 } 4516 }
@@ -4515,9 +4519,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4515 release_stripe(sh); 4519 release_stripe(sh);
4516 handled++; 4520 handled++;
4517 } 4521 }
4518 spin_lock_irq(&conf->device_lock); 4522 remaining = raid5_dec_bi_active_stripes(raid_bio);
4519 remaining = raid5_dec_bi_phys_segments(raid_bio);
4520 spin_unlock_irq(&conf->device_lock);
4521 if (remaining == 0) 4523 if (remaining == 0)
4522 bio_endio(raid_bio, 0); 4524 bio_endio(raid_bio, 0);
4523 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4525 if (atomic_dec_and_test(&conf->active_aligned_reads))