aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c64
1 files changed, 43 insertions, 21 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8912407a4dd0..da583bb43c84 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
659{ 659{
660 struct stripe_head *sh; 660 struct stripe_head *sh;
661 int hash = stripe_hash_locks_hash(sector); 661 int hash = stripe_hash_locks_hash(sector);
662 int inc_empty_inactive_list_flag;
662 663
663 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 664 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
664 665
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
703 atomic_inc(&conf->active_stripes); 704 atomic_inc(&conf->active_stripes);
704 BUG_ON(list_empty(&sh->lru) && 705 BUG_ON(list_empty(&sh->lru) &&
705 !test_bit(STRIPE_EXPANDING, &sh->state)); 706 !test_bit(STRIPE_EXPANDING, &sh->state));
707 inc_empty_inactive_list_flag = 0;
708 if (!list_empty(conf->inactive_list + hash))
709 inc_empty_inactive_list_flag = 1;
706 list_del_init(&sh->lru); 710 list_del_init(&sh->lru);
711 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
712 atomic_inc(&conf->empty_inactive_list_nr);
707 if (sh->group) { 713 if (sh->group) {
708 sh->group->stripes_cnt--; 714 sh->group->stripes_cnt--;
709 sh->group = NULL; 715 sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
762 sector_t head_sector, tmp_sec; 768 sector_t head_sector, tmp_sec;
763 int hash; 769 int hash;
764 int dd_idx; 770 int dd_idx;
771 int inc_empty_inactive_list_flag;
765 772
766 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 773 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
767 tmp_sec = sh->sector; 774 tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
779 atomic_inc(&conf->active_stripes); 786 atomic_inc(&conf->active_stripes);
780 BUG_ON(list_empty(&head->lru) && 787 BUG_ON(list_empty(&head->lru) &&
781 !test_bit(STRIPE_EXPANDING, &head->state)); 788 !test_bit(STRIPE_EXPANDING, &head->state));
789 inc_empty_inactive_list_flag = 0;
790 if (!list_empty(conf->inactive_list + hash))
791 inc_empty_inactive_list_flag = 1;
782 list_del_init(&head->lru); 792 list_del_init(&head->lru);
793 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
794 atomic_inc(&conf->empty_inactive_list_nr);
783 if (head->group) { 795 if (head->group) {
784 head->group->stripes_cnt--; 796 head->group->stripes_cnt--;
785 head->group = NULL; 797 head->group = NULL;
@@ -993,7 +1005,6 @@ again:
993 1005
994 set_bit(STRIPE_IO_STARTED, &sh->state); 1006 set_bit(STRIPE_IO_STARTED, &sh->state);
995 1007
996 bio_reset(bi);
997 bi->bi_bdev = rdev->bdev; 1008 bi->bi_bdev = rdev->bdev;
998 bio_set_op_attrs(bi, op, op_flags); 1009 bio_set_op_attrs(bi, op, op_flags);
999 bi->bi_end_io = op_is_write(op) 1010 bi->bi_end_io = op_is_write(op)
@@ -1045,7 +1056,6 @@ again:
1045 1056
1046 set_bit(STRIPE_IO_STARTED, &sh->state); 1057 set_bit(STRIPE_IO_STARTED, &sh->state);
1047 1058
1048 bio_reset(rbi);
1049 rbi->bi_bdev = rrdev->bdev; 1059 rbi->bi_bdev = rrdev->bdev;
1050 bio_set_op_attrs(rbi, op, op_flags); 1060 bio_set_op_attrs(rbi, op, op_flags);
1051 BUG_ON(!op_is_write(op)); 1061 BUG_ON(!op_is_write(op));
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1978 put_cpu(); 1988 put_cpu();
1979} 1989}
1980 1990
1981static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) 1991static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
1992 int disks)
1982{ 1993{
1983 struct stripe_head *sh; 1994 struct stripe_head *sh;
1995 int i;
1984 1996
1985 sh = kmem_cache_zalloc(sc, gfp); 1997 sh = kmem_cache_zalloc(sc, gfp);
1986 if (sh) { 1998 if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
1989 INIT_LIST_HEAD(&sh->batch_list); 2001 INIT_LIST_HEAD(&sh->batch_list);
1990 INIT_LIST_HEAD(&sh->lru); 2002 INIT_LIST_HEAD(&sh->lru);
1991 atomic_set(&sh->count, 1); 2003 atomic_set(&sh->count, 1);
2004 for (i = 0; i < disks; i++) {
2005 struct r5dev *dev = &sh->dev[i];
2006
2007 bio_init(&dev->req);
2008 dev->req.bi_io_vec = &dev->vec;
2009 dev->req.bi_max_vecs = 1;
2010
2011 bio_init(&dev->rreq);
2012 dev->rreq.bi_io_vec = &dev->rvec;
2013 dev->rreq.bi_max_vecs = 1;
2014 }
1992 } 2015 }
1993 return sh; 2016 return sh;
1994} 2017}
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
1996{ 2019{
1997 struct stripe_head *sh; 2020 struct stripe_head *sh;
1998 2021
1999 sh = alloc_stripe(conf->slab_cache, gfp); 2022 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
2000 if (!sh) 2023 if (!sh)
2001 return 0; 2024 return 0;
2002 2025
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2167 mutex_lock(&conf->cache_size_mutex); 2190 mutex_lock(&conf->cache_size_mutex);
2168 2191
2169 for (i = conf->max_nr_stripes; i; i--) { 2192 for (i = conf->max_nr_stripes; i; i--) {
2170 nsh = alloc_stripe(sc, GFP_KERNEL); 2193 nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
2171 if (!nsh) 2194 if (!nsh)
2172 break; 2195 break;
2173 2196
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
2299 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2322 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2300 bi->bi_error); 2323 bi->bi_error);
2301 if (i == disks) { 2324 if (i == disks) {
2325 bio_reset(bi);
2302 BUG(); 2326 BUG();
2303 return; 2327 return;
2304 } 2328 }
@@ -2402,6 +2426,7 @@ static void raid5_end_read_request(struct bio * bi)
2402 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2426 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2403 set_bit(STRIPE_HANDLE, &sh->state); 2427 set_bit(STRIPE_HANDLE, &sh->state);
2404 raid5_release_stripe(sh); 2428 raid5_release_stripe(sh);
2429 bio_reset(bi);
2405} 2430}
2406 2431
2407static void raid5_end_write_request(struct bio *bi) 2432static void raid5_end_write_request(struct bio *bi)
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
2436 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2461 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2437 bi->bi_error); 2462 bi->bi_error);
2438 if (i == disks) { 2463 if (i == disks) {
2464 bio_reset(bi);
2439 BUG(); 2465 BUG();
2440 return; 2466 return;
2441 } 2467 }
@@ -2479,22 +2505,13 @@ static void raid5_end_write_request(struct bio *bi)
2479 2505
2480 if (sh->batch_head && sh != sh->batch_head) 2506 if (sh->batch_head && sh != sh->batch_head)
2481 raid5_release_stripe(sh->batch_head); 2507 raid5_release_stripe(sh->batch_head);
2508 bio_reset(bi);
2482} 2509}
2483 2510
2484static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2511static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2485{ 2512{
2486 struct r5dev *dev = &sh->dev[i]; 2513 struct r5dev *dev = &sh->dev[i];
2487 2514
2488 bio_init(&dev->req);
2489 dev->req.bi_io_vec = &dev->vec;
2490 dev->req.bi_max_vecs = 1;
2491 dev->req.bi_private = sh;
2492
2493 bio_init(&dev->rreq);
2494 dev->rreq.bi_io_vec = &dev->rvec;
2495 dev->rreq.bi_max_vecs = 1;
2496 dev->rreq.bi_private = sh;
2497
2498 dev->flags = 0; 2515 dev->flags = 0;
2499 dev->sector = raid5_compute_blocknr(sh, i, previous); 2516 dev->sector = raid5_compute_blocknr(sh, i, previous);
2500} 2517}
@@ -4628,7 +4645,9 @@ finish:
4628 } 4645 }
4629 4646
4630 if (!bio_list_empty(&s.return_bi)) { 4647 if (!bio_list_empty(&s.return_bi)) {
4631 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { 4648 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
4649 (s.failed <= conf->max_degraded ||
4650 conf->mddev->external == 0)) {
4632 spin_lock_irq(&conf->device_lock); 4651 spin_lock_irq(&conf->device_lock);
4633 bio_list_merge(&conf->return_bi, &s.return_bi); 4652 bio_list_merge(&conf->return_bi, &s.return_bi);
4634 spin_unlock_irq(&conf->device_lock); 4653 spin_unlock_irq(&conf->device_lock);
@@ -6826,11 +6845,14 @@ static int raid5_run(struct mddev *mddev)
6826 if (IS_ERR(conf)) 6845 if (IS_ERR(conf))
6827 return PTR_ERR(conf); 6846 return PTR_ERR(conf);
6828 6847
6829 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { 6848 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
6830 printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", 6849 if (!journal_dev) {
6831 mdname(mddev)); 6850 pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
6832 mddev->ro = 1; 6851 mdname(mddev));
6833 set_disk_ro(mddev->gendisk, 1); 6852 mddev->ro = 1;
6853 set_disk_ro(mddev->gendisk, 1);
6854 } else if (mddev->recovery_cp == MaxSector)
6855 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
6834 } 6856 }
6835 6857
6836 conf->min_offset_diff = min_offset_diff; 6858 conf->min_offset_diff = min_offset_diff;