aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid1.c20
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c20
-rw-r--r--drivers/md/raid10.h2
4 files changed, 42 insertions, 1 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e023a25acf54..d8957d74fd25 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -45,6 +45,11 @@
45 */ 45 */
46#define NR_RAID1_BIOS 256 46#define NR_RAID1_BIOS 256
47 47
48/* When there are this many requests queue to be written by
49 * the raid1 thread, we become 'congested' to provide back-pressure
50 * for writeback.
51 */
52static int max_queued_requests = 1024;
48 53
49static void allow_barrier(struct r1conf *conf); 54static void allow_barrier(struct r1conf *conf);
50static void lower_barrier(struct r1conf *conf); 55static void lower_barrier(struct r1conf *conf);
@@ -598,6 +603,10 @@ int md_raid1_congested(struct mddev *mddev, int bits)
598 struct r1conf *conf = mddev->private; 603 struct r1conf *conf = mddev->private;
599 int i, ret = 0; 604 int i, ret = 0;
600 605
606 if ((bits & (1 << BDI_async_congested)) &&
607 conf->pending_count >= max_queued_requests)
608 return 1;
609
601 rcu_read_lock(); 610 rcu_read_lock();
602 for (i = 0; i < mddev->raid_disks; i++) { 611 for (i = 0; i < mddev->raid_disks; i++) {
603 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 612 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
@@ -638,10 +647,12 @@ static void flush_pending_writes(struct r1conf *conf)
638 if (conf->pending_bio_list.head) { 647 if (conf->pending_bio_list.head) {
639 struct bio *bio; 648 struct bio *bio;
640 bio = bio_list_get(&conf->pending_bio_list); 649 bio = bio_list_get(&conf->pending_bio_list);
650 conf->pending_count = 0;
641 spin_unlock_irq(&conf->device_lock); 651 spin_unlock_irq(&conf->device_lock);
642 /* flush any pending bitmap writes to 652 /* flush any pending bitmap writes to
643 * disk before proceeding w/ I/O */ 653 * disk before proceeding w/ I/O */
644 bitmap_unplug(conf->mddev->bitmap); 654 bitmap_unplug(conf->mddev->bitmap);
655 wake_up(&conf->wait_barrier);
645 656
646 while (bio) { /* submit pending writes */ 657 while (bio) { /* submit pending writes */
647 struct bio *next = bio->bi_next; 658 struct bio *next = bio->bi_next;
@@ -945,6 +956,11 @@ read_again:
945 /* 956 /*
946 * WRITE: 957 * WRITE:
947 */ 958 */
959 if (conf->pending_count >= max_queued_requests) {
960 md_wakeup_thread(mddev->thread);
961 wait_event(conf->wait_barrier,
962 conf->pending_count < max_queued_requests);
963 }
948 /* first select target devices under rcu_lock and 964 /* first select target devices under rcu_lock and
949 * inc refcount on their rdev. Record them by setting 965 * inc refcount on their rdev. Record them by setting
950 * bios[x] to bio 966 * bios[x] to bio
@@ -1108,6 +1124,7 @@ read_again:
1108 atomic_inc(&r1_bio->remaining); 1124 atomic_inc(&r1_bio->remaining);
1109 spin_lock_irqsave(&conf->device_lock, flags); 1125 spin_lock_irqsave(&conf->device_lock, flags);
1110 bio_list_add(&conf->pending_bio_list, mbio); 1126 bio_list_add(&conf->pending_bio_list, mbio);
1127 conf->pending_count++;
1111 spin_unlock_irqrestore(&conf->device_lock, flags); 1128 spin_unlock_irqrestore(&conf->device_lock, flags);
1112 } 1129 }
1113 /* Mustn't call r1_bio_write_done before this next test, 1130 /* Mustn't call r1_bio_write_done before this next test,
@@ -2418,6 +2435,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2418 init_waitqueue_head(&conf->wait_barrier); 2435 init_waitqueue_head(&conf->wait_barrier);
2419 2436
2420 bio_list_init(&conf->pending_bio_list); 2437 bio_list_init(&conf->pending_bio_list);
2438 conf->pending_count = 0;
2421 2439
2422 conf->last_used = -1; 2440 conf->last_used = -1;
2423 for (i = 0; i < conf->raid_disks; i++) { 2441 for (i = 0; i < conf->raid_disks; i++) {
@@ -2776,3 +2794,5 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
2776MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2794MODULE_ALIAS("md-personality-3"); /* RAID1 */
2777MODULE_ALIAS("md-raid1"); 2795MODULE_ALIAS("md-raid1");
2778MODULE_ALIAS("md-level-1"); 2796MODULE_ALIAS("md-level-1");
2797
2798module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5b16d09817df..c732b6cce935 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -46,6 +46,7 @@ struct r1conf {
46 46
47 /* queue pending writes to be submitted on unplug */ 47 /* queue pending writes to be submitted on unplug */
48 struct bio_list pending_bio_list; 48 struct bio_list pending_bio_list;
49 int pending_count;
49 50
50 /* for use when syncing mirrors: 51 /* for use when syncing mirrors:
51 * We don't allow both normal IO and resync/recovery IO at 52 * We don't allow both normal IO and resync/recovery IO at
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8427ff1c5af1..9496463ca5df 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -58,6 +58,12 @@
58 */ 58 */
59#define NR_RAID10_BIOS 256 59#define NR_RAID10_BIOS 256
60 60
61/* When there are this many requests queue to be written by
62 * the raid10 thread, we become 'congested' to provide back-pressure
63 * for writeback.
64 */
65static int max_queued_requests = 1024;
66
61static void allow_barrier(struct r10conf *conf); 67static void allow_barrier(struct r10conf *conf);
62static void lower_barrier(struct r10conf *conf); 68static void lower_barrier(struct r10conf *conf);
63 69
@@ -681,6 +687,10 @@ static int raid10_congested(void *data, int bits)
681 struct r10conf *conf = mddev->private; 687 struct r10conf *conf = mddev->private;
682 int i, ret = 0; 688 int i, ret = 0;
683 689
690 if ((bits & (1 << BDI_async_congested)) &&
691 conf->pending_count >= max_queued_requests)
692 return 1;
693
684 if (mddev_congested(mddev, bits)) 694 if (mddev_congested(mddev, bits))
685 return 1; 695 return 1;
686 rcu_read_lock(); 696 rcu_read_lock();
@@ -706,10 +716,12 @@ static void flush_pending_writes(struct r10conf *conf)
706 if (conf->pending_bio_list.head) { 716 if (conf->pending_bio_list.head) {
707 struct bio *bio; 717 struct bio *bio;
708 bio = bio_list_get(&conf->pending_bio_list); 718 bio = bio_list_get(&conf->pending_bio_list);
719 conf->pending_count = 0;
709 spin_unlock_irq(&conf->device_lock); 720 spin_unlock_irq(&conf->device_lock);
710 /* flush any pending bitmap writes to disk 721 /* flush any pending bitmap writes to disk
711 * before proceeding w/ I/O */ 722 * before proceeding w/ I/O */
712 bitmap_unplug(conf->mddev->bitmap); 723 bitmap_unplug(conf->mddev->bitmap);
724 wake_up(&conf->wait_barrier);
713 725
714 while (bio) { /* submit pending writes */ 726 while (bio) { /* submit pending writes */
715 struct bio *next = bio->bi_next; 727 struct bio *next = bio->bi_next;
@@ -996,6 +1008,11 @@ read_again:
996 /* 1008 /*
997 * WRITE: 1009 * WRITE:
998 */ 1010 */
1011 if (conf->pending_count >= max_queued_requests) {
1012 md_wakeup_thread(mddev->thread);
1013 wait_event(conf->wait_barrier,
1014 conf->pending_count < max_queued_requests);
1015 }
999 /* first select target devices under rcu_lock and 1016 /* first select target devices under rcu_lock and
1000 * inc refcount on their rdev. Record them by setting 1017 * inc refcount on their rdev. Record them by setting
1001 * bios[x] to bio 1018 * bios[x] to bio
@@ -1129,6 +1146,7 @@ retry_write:
1129 atomic_inc(&r10_bio->remaining); 1146 atomic_inc(&r10_bio->remaining);
1130 spin_lock_irqsave(&conf->device_lock, flags); 1147 spin_lock_irqsave(&conf->device_lock, flags);
1131 bio_list_add(&conf->pending_bio_list, mbio); 1148 bio_list_add(&conf->pending_bio_list, mbio);
1149 conf->pending_count++;
1132 spin_unlock_irqrestore(&conf->device_lock, flags); 1150 spin_unlock_irqrestore(&conf->device_lock, flags);
1133 } 1151 }
1134 1152
@@ -3086,3 +3104,5 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
3086MODULE_ALIAS("md-personality-9"); /* RAID10 */ 3104MODULE_ALIAS("md-personality-9"); /* RAID10 */
3087MODULE_ALIAS("md-raid10"); 3105MODULE_ALIAS("md-raid10");
3088MODULE_ALIAS("md-level-10"); 3106MODULE_ALIAS("md-level-10");
3107
3108module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 35489a569597..7facfdf841f4 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -42,7 +42,7 @@ struct r10conf {
42 struct list_head retry_list; 42 struct list_head retry_list;
43 /* queue pending writes and submit them on unplug */ 43 /* queue pending writes and submit them on unplug */
44 struct bio_list pending_bio_list; 44 struct bio_list pending_bio_list;
45 45 int pending_count;
46 46
47 spinlock_t resync_lock; 47 spinlock_t resync_lock;
48 int nr_pending; 48 int nr_pending;