aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
2 files changed, 54 insertions, 49 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
1953 for (i = 0; i < conf->raid_disks * 2; i++) { 1953 for (i = 0; i < conf->raid_disks * 2; i++) {
1954 int j; 1954 int j;
1955 int size; 1955 int size;
1956 int uptodate;
1956 struct bio *b = r1_bio->bios[i]; 1957 struct bio *b = r1_bio->bios[i];
1957 if (b->bi_end_io != end_sync_read) 1958 if (b->bi_end_io != end_sync_read)
1958 continue; 1959 continue;
1959 /* fixup the bio for reuse */ 1960 /* fixup the bio for reuse, but preserve BIO_UPTODATE */
1961 uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
1960 bio_reset(b); 1962 bio_reset(b);
1963 if (!uptodate)
1964 clear_bit(BIO_UPTODATE, &b->bi_flags);
1961 b->bi_vcnt = vcnt; 1965 b->bi_vcnt = vcnt;
1962 b->bi_iter.bi_size = r1_bio->sectors << 9; 1966 b->bi_iter.bi_size = r1_bio->sectors << 9;
1963 b->bi_iter.bi_sector = r1_bio->sector + 1967 b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
1990 int j; 1994 int j;
1991 struct bio *pbio = r1_bio->bios[primary]; 1995 struct bio *pbio = r1_bio->bios[primary];
1992 struct bio *sbio = r1_bio->bios[i]; 1996 struct bio *sbio = r1_bio->bios[i];
1997 int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
1993 1998
1994 if (sbio->bi_end_io != end_sync_read) 1999 if (sbio->bi_end_io != end_sync_read)
1995 continue; 2000 continue;
2001 /* Now we can 'fixup' the BIO_UPTODATE flag */
2002 set_bit(BIO_UPTODATE, &sbio->bi_flags);
1996 2003
1997 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 2004 if (uptodate) {
1998 for (j = vcnt; j-- ; ) { 2005 for (j = vcnt; j-- ; ) {
1999 struct page *p, *s; 2006 struct page *p, *s;
2000 p = pbio->bi_io_vec[j].bv_page; 2007 p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
2009 if (j >= 0) 2016 if (j >= 0)
2010 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2017 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2011 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2018 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2012 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 2019 && uptodate)) {
2013 /* No need to write to this device. */ 2020 /* No need to write to this device. */
2014 sbio->bi_end_io = NULL; 2021 sbio->bi_end_io = NULL;
2015 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 2022 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
5514 return sectors * (raid_disks - conf->max_degraded); 5514 return sectors * (raid_disks - conf->max_degraded);
5515} 5515}
5516 5516
5517static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5518{
5519 safe_put_page(percpu->spare_page);
5520 kfree(percpu->scribble);
5521 percpu->spare_page = NULL;
5522 percpu->scribble = NULL;
5523}
5524
5525static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5526{
5527 if (conf->level == 6 && !percpu->spare_page)
5528 percpu->spare_page = alloc_page(GFP_KERNEL);
5529 if (!percpu->scribble)
5530 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5531
5532 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
5533 free_scratch_buffer(conf, percpu);
5534 return -ENOMEM;
5535 }
5536
5537 return 0;
5538}
5539
5517static void raid5_free_percpu(struct r5conf *conf) 5540static void raid5_free_percpu(struct r5conf *conf)
5518{ 5541{
5519 struct raid5_percpu *percpu;
5520 unsigned long cpu; 5542 unsigned long cpu;
5521 5543
5522 if (!conf->percpu) 5544 if (!conf->percpu)
5523 return; 5545 return;
5524 5546
5525 get_online_cpus();
5526 for_each_possible_cpu(cpu) {
5527 percpu = per_cpu_ptr(conf->percpu, cpu);
5528 safe_put_page(percpu->spare_page);
5529 kfree(percpu->scribble);
5530 }
5531#ifdef CONFIG_HOTPLUG_CPU 5547#ifdef CONFIG_HOTPLUG_CPU
5532 unregister_cpu_notifier(&conf->cpu_notify); 5548 unregister_cpu_notifier(&conf->cpu_notify);
5533#endif 5549#endif
5550
5551 get_online_cpus();
5552 for_each_possible_cpu(cpu)
5553 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5534 put_online_cpus(); 5554 put_online_cpus();
5535 5555
5536 free_percpu(conf->percpu); 5556 free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5557 switch (action) { 5577 switch (action) {
5558 case CPU_UP_PREPARE: 5578 case CPU_UP_PREPARE:
5559 case CPU_UP_PREPARE_FROZEN: 5579 case CPU_UP_PREPARE_FROZEN:
5560 if (conf->level == 6 && !percpu->spare_page) 5580 if (alloc_scratch_buffer(conf, percpu)) {
5561 percpu->spare_page = alloc_page(GFP_KERNEL);
5562 if (!percpu->scribble)
5563 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5564
5565 if (!percpu->scribble ||
5566 (conf->level == 6 && !percpu->spare_page)) {
5567 safe_put_page(percpu->spare_page);
5568 kfree(percpu->scribble);
5569 pr_err("%s: failed memory allocation for cpu%ld\n", 5581 pr_err("%s: failed memory allocation for cpu%ld\n",
5570 __func__, cpu); 5582 __func__, cpu);
5571 return notifier_from_errno(-ENOMEM); 5583 return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5573 break; 5585 break;
5574 case CPU_DEAD: 5586 case CPU_DEAD:
5575 case CPU_DEAD_FROZEN: 5587 case CPU_DEAD_FROZEN:
5576 safe_put_page(percpu->spare_page); 5588 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5577 kfree(percpu->scribble);
5578 percpu->spare_page = NULL;
5579 percpu->scribble = NULL;
5580 break; 5589 break;
5581 default: 5590 default:
5582 break; 5591 break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5588static int raid5_alloc_percpu(struct r5conf *conf) 5597static int raid5_alloc_percpu(struct r5conf *conf)
5589{ 5598{
5590 unsigned long cpu; 5599 unsigned long cpu;
5591 struct page *spare_page; 5600 int err = 0;
5592 struct raid5_percpu __percpu *allcpus;
5593 void *scribble;
5594 int err;
5595 5601
5596 allcpus = alloc_percpu(struct raid5_percpu); 5602 conf->percpu = alloc_percpu(struct raid5_percpu);
5597 if (!allcpus) 5603 if (!conf->percpu)
5598 return -ENOMEM; 5604 return -ENOMEM;
5599 conf->percpu = allcpus; 5605
5606#ifdef CONFIG_HOTPLUG_CPU
5607 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5608 conf->cpu_notify.priority = 0;
5609 err = register_cpu_notifier(&conf->cpu_notify);
5610 if (err)
5611 return err;
5612#endif
5600 5613
5601 get_online_cpus(); 5614 get_online_cpus();
5602 err = 0;
5603 for_each_present_cpu(cpu) { 5615 for_each_present_cpu(cpu) {
5604 if (conf->level == 6) { 5616 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5605 spare_page = alloc_page(GFP_KERNEL); 5617 if (err) {
5606 if (!spare_page) { 5618 pr_err("%s: failed memory allocation for cpu%ld\n",
5607 err = -ENOMEM; 5619 __func__, cpu);
5608 break;
5609 }
5610 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
5611 }
5612 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5613 if (!scribble) {
5614 err = -ENOMEM;
5615 break; 5620 break;
5616 } 5621 }
5617 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
5618 } 5622 }
5619#ifdef CONFIG_HOTPLUG_CPU
5620 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5621 conf->cpu_notify.priority = 0;
5622 if (err == 0)
5623 err = register_cpu_notifier(&conf->cpu_notify);
5624#endif
5625 put_online_cpus(); 5623 put_online_cpus();
5626 5624
5627 return err; 5625 return err;