aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /drivers/md
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-kcopyd.c52
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/multipath.c31
-rw-r--r--drivers/md/raid0.c16
-rw-r--r--drivers/md/raid1.c83
-rw-r--r--drivers/md/raid10.c87
-rw-r--r--drivers/md/raid5.c62
-rw-r--r--drivers/md/raid5.h2
15 files changed, 58 insertions, 372 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9a35320fb59f..54bfc274b39a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1339 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1339 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1340 TASK_UNINTERRUPTIBLE); 1340 TASK_UNINTERRUPTIBLE);
1341 spin_unlock_irq(&bitmap->lock); 1341 spin_unlock_irq(&bitmap->lock);
1342 md_unplug(bitmap->mddev); 1342 io_schedule();
1343 schedule();
1344 finish_wait(&bitmap->overflow_wait, &__wait); 1343 finish_wait(&bitmap->overflow_wait, &__wait);
1345 continue; 1344 continue;
1346 } 1345 }
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e054bd91664..2c62c1169f78 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
991 clone->bi_destructor = dm_crypt_bio_destructor; 991 clone->bi_destructor = dm_crypt_bio_destructor;
992} 992}
993 993
994static void kcryptd_unplug(struct crypt_config *cc)
995{
996 blk_unplug(bdev_get_queue(cc->dev->bdev));
997}
998
999static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 994static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1000{ 995{
1001 struct crypt_config *cc = io->target->private; 996 struct crypt_config *cc = io->target->private;
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1008 * one in order to decrypt the whole bio data *afterwards*. 1003 * one in order to decrypt the whole bio data *afterwards*.
1009 */ 1004 */
1010 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 1005 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
1011 if (!clone) { 1006 if (!clone)
1012 kcryptd_unplug(cc);
1013 return 1; 1007 return 1;
1014 }
1015 1008
1016 crypt_inc_pending(io); 1009 crypt_inc_pending(io);
1017 1010
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 924f5f0084c2..400cf35094a4 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,13 +37,6 @@ struct dm_kcopyd_client {
37 unsigned int nr_pages; 37 unsigned int nr_pages;
38 unsigned int nr_free_pages; 38 unsigned int nr_free_pages;
39 39
40 /*
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
44 */
45 struct block_device *unplug[2];
46
47 struct dm_io_client *io_client; 40 struct dm_io_client *io_client;
48 41
49 wait_queue_head_t destroyq; 42 wait_queue_head_t destroyq;
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
315 return 0; 308 return 0;
316} 309}
317 310
318/*
319 * Unplug the block device at the specified index.
320 */
321static void unplug(struct dm_kcopyd_client *kc, int rw)
322{
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
326 }
327}
328
329/*
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
333 */
334static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
336{
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
341}
342
343static void complete_io(unsigned long error, void *context) 311static void complete_io(unsigned long error, void *context)
344{ 312{
345 struct kcopyd_job *job = (struct kcopyd_job *) context; 313 struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -386,15 +354,12 @@ static int run_io_job(struct kcopyd_job *job)
386 .client = job->kc->io_client, 354 .client = job->kc->io_client,
387 }; 355 };
388 356
389 if (job->rw == READ) { 357 if (job->rw == READ)
390 r = dm_io(&io_req, 1, &job->source, NULL); 358 r = dm_io(&io_req, 1, &job->source, NULL);
391 prepare_unplug(job->kc, READ, job->source.bdev); 359 else {
392 } else {
393 if (job->num_dests > 1) 360 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG; 361 io_req.bi_rw |= REQ_UNPLUG;
395 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 362 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
398 } 363 }
399 364
400 return r; 365 return r;
@@ -466,6 +431,7 @@ static void do_work(struct work_struct *work)
466{ 431{
467 struct dm_kcopyd_client *kc = container_of(work, 432 struct dm_kcopyd_client *kc = container_of(work,
468 struct dm_kcopyd_client, kcopyd_work); 433 struct dm_kcopyd_client, kcopyd_work);
434 struct blk_plug plug;
469 435
470 /* 436 /*
471 * The order that these are called is *very* important. 437 * The order that these are called is *very* important.
@@ -473,18 +439,12 @@ static void do_work(struct work_struct *work)
473 * Pages jobs when successful will jump onto the io jobs 439 * Pages jobs when successful will jump onto the io jobs
474 * list. io jobs call wake when they complete and it all 440 * list. io jobs call wake when they complete and it all
475 * starts again. 441 * starts again.
476 *
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
482 */ 442 */
443 blk_start_plug(&plug);
483 process_jobs(&kc->complete_jobs, kc, run_complete_job); 444 process_jobs(&kc->complete_jobs, kc, run_complete_job);
484 process_jobs(&kc->pages_jobs, kc, run_pages_job); 445 process_jobs(&kc->pages_jobs, kc, run_pages_job);
485 process_jobs(&kc->io_jobs, kc, run_io_job); 446 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ); 447 blk_finish_plug(&plug);
487 unplug(kc, WRITE);
488} 448}
489 449
490/* 450/*
@@ -665,8 +625,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
665 INIT_LIST_HEAD(&kc->io_jobs); 625 INIT_LIST_HEAD(&kc->io_jobs);
666 INIT_LIST_HEAD(&kc->pages_jobs); 626 INIT_LIST_HEAD(&kc->pages_jobs);
667 627
668 memset(kc->unplug, 0, sizeof(kc->unplug));
669
670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 628 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
671 if (!kc->job_pool) 629 if (!kc->job_pool)
672 goto bad_slab; 630 goto bad_slab;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b9e1e15ef11c..5ef136cdba91 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
394{ 394{
395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
396 396
397 md_raid5_unplug_device(rs->md.private); 397 md_raid5_kick_device(rs->md.private);
398} 398}
399 399
400/* 400/*
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dee326775c60..976ad4688afc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
842 do_reads(ms, &reads); 842 do_reads(ms, &reads);
843 do_writes(ms, &writes); 843 do_writes(ms, &writes);
844 do_failures(ms, &failures); 844 do_failures(ms, &failures);
845
846 dm_table_unplug_all(ms->ti->table);
847} 845}
848 846
849/*----------------------------------------------------------------- 847/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 38e4eb1bb965..f50a7b952257 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t)
1275 return 0; 1275 return 0;
1276} 1276}
1277 1277
1278void dm_table_unplug_all(struct dm_table *t)
1279{
1280 struct dm_dev_internal *dd;
1281 struct list_head *devices = dm_table_get_devices(t);
1282 struct dm_target_callbacks *cb;
1283
1284 list_for_each_entry(dd, devices, list) {
1285 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1286 char b[BDEVNAME_SIZE];
1287
1288 if (likely(q))
1289 blk_unplug(q);
1290 else
1291 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1292 dm_device_name(t->md),
1293 bdevname(dd->dm_dev.bdev, b));
1294 }
1295
1296 list_for_each_entry(cb, &t->target_callbacks, list)
1297 if (cb->unplug_fn)
1298 cb->unplug_fn(cb);
1299}
1300
1301struct mapped_device *dm_table_get_md(struct dm_table *t) 1278struct mapped_device *dm_table_get_md(struct dm_table *t)
1302{ 1279{
1303 return t->md; 1280 return t->md;
@@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
1345EXPORT_SYMBOL(dm_table_get_md); 1322EXPORT_SYMBOL(dm_table_get_md);
1346EXPORT_SYMBOL(dm_table_put); 1323EXPORT_SYMBOL(dm_table_put);
1347EXPORT_SYMBOL(dm_table_get); 1324EXPORT_SYMBOL(dm_table_get);
1348EXPORT_SYMBOL(dm_table_unplug_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eaa3af0e0632..d22b9905c168 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone)
807 dm_unprep_request(rq); 807 dm_unprep_request(rq);
808 808
809 spin_lock_irqsave(q->queue_lock, flags); 809 spin_lock_irqsave(q->queue_lock, flags);
810 if (elv_queue_empty(q))
811 blk_plug_device(q);
812 blk_requeue_request(q, rq); 810 blk_requeue_request(q, rq);
813 spin_unlock_irqrestore(q->queue_lock, flags); 811 spin_unlock_irqrestore(q->queue_lock, flags);
814 812
@@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q)
1613 * number of in-flight I/Os after the queue is stopped in 1611 * number of in-flight I/Os after the queue is stopped in
1614 * dm_suspend(). 1612 * dm_suspend().
1615 */ 1613 */
1616 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1614 while (!blk_queue_stopped(q)) {
1617 rq = blk_peek_request(q); 1615 rq = blk_peek_request(q);
1618 if (!rq) 1616 if (!rq)
1619 goto plug_and_out; 1617 goto delay_and_out;
1620 1618
1621 /* always use block 0 to find the target for flushes for now */ 1619 /* always use block 0 to find the target for flushes for now */
1622 pos = 0; 1620 pos = 0;
@@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q)
1627 BUG_ON(!dm_target_is_valid(ti)); 1625 BUG_ON(!dm_target_is_valid(ti));
1628 1626
1629 if (ti->type->busy && ti->type->busy(ti)) 1627 if (ti->type->busy && ti->type->busy(ti))
1630 goto plug_and_out; 1628 goto delay_and_out;
1631 1629
1632 blk_start_request(rq); 1630 blk_start_request(rq);
1633 clone = rq->special; 1631 clone = rq->special;
@@ -1647,11 +1645,8 @@ requeued:
1647 BUG_ON(!irqs_disabled()); 1645 BUG_ON(!irqs_disabled());
1648 spin_lock(q->queue_lock); 1646 spin_lock(q->queue_lock);
1649 1647
1650plug_and_out: 1648delay_and_out:
1651 if (!elv_queue_empty(q)) 1649 blk_delay_queue(q, HZ / 10);
1652 /* Some requests still remain, retry later */
1653 blk_plug_device(q);
1654
1655out: 1650out:
1656 dm_table_put(map); 1651 dm_table_put(map);
1657 1652
@@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q)
1680 return r; 1675 return r;
1681} 1676}
1682 1677
1683static void dm_unplug_all(struct request_queue *q)
1684{
1685 struct mapped_device *md = q->queuedata;
1686 struct dm_table *map = dm_get_live_table(md);
1687
1688 if (map) {
1689 if (dm_request_based(md))
1690 generic_unplug_device(q);
1691
1692 dm_table_unplug_all(map);
1693 dm_table_put(map);
1694 }
1695}
1696
1697static int dm_any_congested(void *congested_data, int bdi_bits) 1678static int dm_any_congested(void *congested_data, int bdi_bits)
1698{ 1679{
1699 int r = bdi_bits; 1680 int r = bdi_bits;
@@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md)
1817 md->queue->backing_dev_info.congested_data = md; 1798 md->queue->backing_dev_info.congested_data = md;
1818 blk_queue_make_request(md->queue, dm_request); 1799 blk_queue_make_request(md->queue, dm_request);
1819 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1800 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1820 md->queue->unplug_fn = dm_unplug_all;
1821 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1801 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1822 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); 1802 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1823} 1803}
@@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2263 int r = 0; 2243 int r = 0;
2264 DECLARE_WAITQUEUE(wait, current); 2244 DECLARE_WAITQUEUE(wait, current);
2265 2245
2266 dm_unplug_all(md->queue);
2267
2268 add_wait_queue(&md->wait, &wait); 2246 add_wait_queue(&md->wait, &wait);
2269 2247
2270 while (1) { 2248 while (1) {
@@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md)
2539 2517
2540 clear_bit(DMF_SUSPENDED, &md->flags); 2518 clear_bit(DMF_SUSPENDED, &md->flags);
2541 2519
2542 dm_table_unplug_all(map);
2543 r = 0; 2520 r = 0;
2544out: 2521out:
2545 dm_table_put(map); 2522 dm_table_put(map);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..38861b5b9d90 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
87 return maxsectors << 9; 87 return maxsectors << 9;
88} 88}
89 89
90static void linear_unplug(struct request_queue *q)
91{
92 mddev_t *mddev = q->queuedata;
93 linear_conf_t *conf;
94 int i;
95
96 rcu_read_lock();
97 conf = rcu_dereference(mddev->private);
98
99 for (i=0; i < mddev->raid_disks; i++) {
100 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
101 blk_unplug(r_queue);
102 }
103 rcu_read_unlock();
104}
105
106static int linear_congested(void *data, int bits) 90static int linear_congested(void *data, int bits)
107{ 91{
108 mddev_t *mddev = data; 92 mddev_t *mddev = data;
@@ -225,7 +209,6 @@ static int linear_run (mddev_t *mddev)
225 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 209 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
226 210
227 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 211 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
228 mddev->queue->unplug_fn = linear_unplug;
229 mddev->queue->backing_dev_info.congested_fn = linear_congested; 212 mddev->queue->backing_dev_info.congested_fn = linear_congested;
230 mddev->queue->backing_dev_info.congested_data = mddev; 213 mddev->queue->backing_dev_info.congested_data = mddev;
231 md_integrity_register(mddev); 214 md_integrity_register(mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c1..ca0d79c264b9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4812,7 +4812,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4812 __md_stop_writes(mddev); 4812 __md_stop_writes(mddev);
4813 md_stop(mddev); 4813 md_stop(mddev);
4814 mddev->queue->merge_bvec_fn = NULL; 4814 mddev->queue->merge_bvec_fn = NULL;
4815 mddev->queue->unplug_fn = NULL;
4816 mddev->queue->backing_dev_info.congested_fn = NULL; 4815 mddev->queue->backing_dev_info.congested_fn = NULL;
4817 4816
4818 /* tell userspace to handle 'inactive' */ 4817 /* tell userspace to handle 'inactive' */
@@ -6669,8 +6668,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
6669 6668
6670void md_unplug(mddev_t *mddev) 6669void md_unplug(mddev_t *mddev)
6671{ 6670{
6672 if (mddev->queue)
6673 blk_unplug(mddev->queue);
6674 if (mddev->plug) 6671 if (mddev->plug)
6675 mddev->plug->unplug_fn(mddev->plug); 6672 mddev->plug->unplug_fn(mddev->plug);
6676} 6673}
@@ -6853,7 +6850,6 @@ void md_do_sync(mddev_t *mddev)
6853 >= mddev->resync_max - mddev->curr_resync_completed 6850 >= mddev->resync_max - mddev->curr_resync_completed
6854 )) { 6851 )) {
6855 /* time to update curr_resync_completed */ 6852 /* time to update curr_resync_completed */
6856 md_unplug(mddev);
6857 wait_event(mddev->recovery_wait, 6853 wait_event(mddev->recovery_wait,
6858 atomic_read(&mddev->recovery_active) == 0); 6854 atomic_read(&mddev->recovery_active) == 0);
6859 mddev->curr_resync_completed = j; 6855 mddev->curr_resync_completed = j;
@@ -6929,7 +6925,6 @@ void md_do_sync(mddev_t *mddev)
6929 * about not overloading the IO subsystem. (things like an 6925 * about not overloading the IO subsystem. (things like an
6930 * e2fsck being done on the RAID array should execute fast) 6926 * e2fsck being done on the RAID array should execute fast)
6931 */ 6927 */
6932 md_unplug(mddev);
6933 cond_resched(); 6928 cond_resched();
6934 6929
6935 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6930 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6948,8 +6943,6 @@ void md_do_sync(mddev_t *mddev)
6948 * this also signals 'finished resyncing' to md_stop 6943 * this also signals 'finished resyncing' to md_stop
6949 */ 6944 */
6950 out: 6945 out:
6951 md_unplug(mddev);
6952
6953 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6946 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6954 6947
6955 /* tell personality that we are finished */ 6948 /* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..1cc8ed44e4ad 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static void unplug_slaves(mddev_t *mddev)
110{
111 multipath_conf_t *conf = mddev->private;
112 int i;
113
114 rcu_read_lock();
115 for (i=0; i<mddev->raid_disks; i++) {
116 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
117 if (rdev && !test_bit(Faulty, &rdev->flags)
118 && atomic_read(&rdev->nr_pending)) {
119 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
120
121 atomic_inc(&rdev->nr_pending);
122 rcu_read_unlock();
123
124 blk_unplug(r_queue);
125
126 rdev_dec_pending(rdev, mddev);
127 rcu_read_lock();
128 }
129 }
130 rcu_read_unlock();
131}
132
133static void multipath_unplug(struct request_queue *q)
134{
135 unplug_slaves(q->queuedata);
136}
137
138
139static int multipath_make_request(mddev_t *mddev, struct bio * bio) 109static int multipath_make_request(mddev_t *mddev, struct bio * bio)
140{ 110{
141 multipath_conf_t *conf = mddev->private; 111 multipath_conf_t *conf = mddev->private;
@@ -518,7 +488,6 @@ static int multipath_run (mddev_t *mddev)
518 */ 488 */
519 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); 489 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
520 490
521 mddev->queue->unplug_fn = multipath_unplug;
522 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 491 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
523 mddev->queue->backing_dev_info.congested_data = mddev; 492 mddev->queue->backing_dev_info.congested_data = mddev;
524 md_integrity_register(mddev); 493 md_integrity_register(mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855edb..6338c0fe6208 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,21 +25,6 @@
25#include "raid0.h" 25#include "raid0.h"
26#include "raid5.h" 26#include "raid5.h"
27 27
28static void raid0_unplug(struct request_queue *q)
29{
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev->private;
32 mdk_rdev_t **devlist = conf->devlist;
33 int raid_disks = conf->strip_zone[0].nb_dev;
34 int i;
35
36 for (i=0; i < raid_disks; i++) {
37 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38
39 blk_unplug(r_queue);
40 }
41}
42
43static int raid0_congested(void *data, int bits) 28static int raid0_congested(void *data, int bits)
44{ 29{
45 mddev_t *mddev = data; 30 mddev_t *mddev = data;
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
272 mdname(mddev), 257 mdname(mddev),
273 (unsigned long long)smallest->sectors); 258 (unsigned long long)smallest->sectors);
274 } 259 }
275 mddev->queue->unplug_fn = raid0_unplug;
276 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 260 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
277 mddev->queue->backing_dev_info.congested_data = mddev; 261 mddev->queue->backing_dev_info.congested_data = mddev;
278 262
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..b67d822d57ae 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,23 +52,16 @@
52#define NR_RAID1_BIOS 256 52#define NR_RAID1_BIOS 256
53 53
54 54
55static void unplug_slaves(mddev_t *mddev);
56
57static void allow_barrier(conf_t *conf); 55static void allow_barrier(conf_t *conf);
58static void lower_barrier(conf_t *conf); 56static void lower_barrier(conf_t *conf);
59 57
60static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 58static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
61{ 59{
62 struct pool_info *pi = data; 60 struct pool_info *pi = data;
63 r1bio_t *r1_bio;
64 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 61 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
65 62
66 /* allocate a r1bio with room for raid_disks entries in the bios array */ 63 /* allocate a r1bio with room for raid_disks entries in the bios array */
67 r1_bio = kzalloc(size, gfp_flags); 64 return kzalloc(size, gfp_flags);
68 if (!r1_bio && pi->mddev)
69 unplug_slaves(pi->mddev);
70
71 return r1_bio;
72} 65}
73 66
74static void r1bio_pool_free(void *r1_bio, void *data) 67static void r1bio_pool_free(void *r1_bio, void *data)
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
91 int i, j; 84 int i, j;
92 85
93 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 86 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
94 if (!r1_bio) { 87 if (!r1_bio)
95 unplug_slaves(pi->mddev);
96 return NULL; 88 return NULL;
97 }
98 89
99 /* 90 /*
100 * Allocate bios : 1 for reading, n-1 for writing 91 * Allocate bios : 1 for reading, n-1 for writing
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
520 return new_disk; 511 return new_disk;
521} 512}
522 513
523static void unplug_slaves(mddev_t *mddev)
524{
525 conf_t *conf = mddev->private;
526 int i;
527
528 rcu_read_lock();
529 for (i=0; i<mddev->raid_disks; i++) {
530 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
531 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
532 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
533
534 atomic_inc(&rdev->nr_pending);
535 rcu_read_unlock();
536
537 blk_unplug(r_queue);
538
539 rdev_dec_pending(rdev, mddev);
540 rcu_read_lock();
541 }
542 }
543 rcu_read_unlock();
544}
545
546static void raid1_unplug(struct request_queue *q)
547{
548 mddev_t *mddev = q->queuedata;
549
550 unplug_slaves(mddev);
551 md_wakeup_thread(mddev->thread);
552}
553
554static int raid1_congested(void *data, int bits) 514static int raid1_congested(void *data, int bits)
555{ 515{
556 mddev_t *mddev = data; 516 mddev_t *mddev = data;
@@ -580,20 +540,16 @@ static int raid1_congested(void *data, int bits)
580} 540}
581 541
582 542
583static int flush_pending_writes(conf_t *conf) 543static void flush_pending_writes(conf_t *conf)
584{ 544{
585 /* Any writes that have been queued but are awaiting 545 /* Any writes that have been queued but are awaiting
586 * bitmap updates get flushed here. 546 * bitmap updates get flushed here.
587 * We return 1 if any requests were actually submitted.
588 */ 547 */
589 int rv = 0;
590
591 spin_lock_irq(&conf->device_lock); 548 spin_lock_irq(&conf->device_lock);
592 549
593 if (conf->pending_bio_list.head) { 550 if (conf->pending_bio_list.head) {
594 struct bio *bio; 551 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 552 bio = bio_list_get(&conf->pending_bio_list);
596 blk_remove_plug(conf->mddev->queue);
597 spin_unlock_irq(&conf->device_lock); 553 spin_unlock_irq(&conf->device_lock);
598 /* flush any pending bitmap writes to 554 /* flush any pending bitmap writes to
599 * disk before proceeding w/ I/O */ 555 * disk before proceeding w/ I/O */
@@ -605,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
605 generic_make_request(bio); 561 generic_make_request(bio);
606 bio = next; 562 bio = next;
607 } 563 }
608 rv = 1;
609 } else 564 } else
610 spin_unlock_irq(&conf->device_lock); 565 spin_unlock_irq(&conf->device_lock);
611 return rv; 566}
567
568static void md_kick_device(mddev_t *mddev)
569{
570 blk_flush_plug(current);
571 md_wakeup_thread(mddev->thread);
612} 572}
613 573
614/* Barriers.... 574/* Barriers....
@@ -640,8 +600,7 @@ static void raise_barrier(conf_t *conf)
640 600
641 /* Wait until no block IO is waiting */ 601 /* Wait until no block IO is waiting */
642 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 602 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
643 conf->resync_lock, 603 conf->resync_lock, md_kick_device(conf->mddev));
644 raid1_unplug(conf->mddev->queue));
645 604
646 /* block any new IO from starting */ 605 /* block any new IO from starting */
647 conf->barrier++; 606 conf->barrier++;
@@ -649,8 +608,7 @@ static void raise_barrier(conf_t *conf)
649 /* Now wait for all pending IO to complete */ 608 /* Now wait for all pending IO to complete */
650 wait_event_lock_irq(conf->wait_barrier, 609 wait_event_lock_irq(conf->wait_barrier,
651 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 610 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
652 conf->resync_lock, 611 conf->resync_lock, md_kick_device(conf->mddev));
653 raid1_unplug(conf->mddev->queue));
654 612
655 spin_unlock_irq(&conf->resync_lock); 613 spin_unlock_irq(&conf->resync_lock);
656} 614}
@@ -672,7 +630,7 @@ static void wait_barrier(conf_t *conf)
672 conf->nr_waiting++; 630 conf->nr_waiting++;
673 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 631 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
674 conf->resync_lock, 632 conf->resync_lock,
675 raid1_unplug(conf->mddev->queue)); 633 md_kick_device(conf->mddev));
676 conf->nr_waiting--; 634 conf->nr_waiting--;
677 } 635 }
678 conf->nr_pending++; 636 conf->nr_pending++;
@@ -709,7 +667,7 @@ static void freeze_array(conf_t *conf)
709 conf->nr_pending == conf->nr_queued+1, 667 conf->nr_pending == conf->nr_queued+1,
710 conf->resync_lock, 668 conf->resync_lock,
711 ({ flush_pending_writes(conf); 669 ({ flush_pending_writes(conf);
712 raid1_unplug(conf->mddev->queue); })); 670 md_kick_device(conf->mddev); }));
713 spin_unlock_irq(&conf->resync_lock); 671 spin_unlock_irq(&conf->resync_lock);
714} 672}
715static void unfreeze_array(conf_t *conf) 673static void unfreeze_array(conf_t *conf)
@@ -959,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_inc(&r1_bio->remaining); 917 atomic_inc(&r1_bio->remaining);
960 spin_lock_irqsave(&conf->device_lock, flags); 918 spin_lock_irqsave(&conf->device_lock, flags);
961 bio_list_add(&conf->pending_bio_list, mbio); 919 bio_list_add(&conf->pending_bio_list, mbio);
962 blk_plug_device(mddev->queue);
963 spin_unlock_irqrestore(&conf->device_lock, flags); 920 spin_unlock_irqrestore(&conf->device_lock, flags);
964 } 921 }
965 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 922 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -968,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
968 /* In case raid1d snuck in to freeze_array */ 925 /* In case raid1d snuck in to freeze_array */
969 wake_up(&conf->wait_barrier); 926 wake_up(&conf->wait_barrier);
970 927
971 if (do_sync) 928 if (do_sync || !bitmap)
972 md_wakeup_thread(mddev->thread); 929 md_wakeup_thread(mddev->thread);
973 930
974 return 0; 931 return 0;
@@ -1558,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
1558 unsigned long flags; 1515 unsigned long flags;
1559 conf_t *conf = mddev->private; 1516 conf_t *conf = mddev->private;
1560 struct list_head *head = &conf->retry_list; 1517 struct list_head *head = &conf->retry_list;
1561 int unplug=0;
1562 mdk_rdev_t *rdev; 1518 mdk_rdev_t *rdev;
1563 1519
1564 md_check_recovery(mddev); 1520 md_check_recovery(mddev);
@@ -1566,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
1566 for (;;) { 1522 for (;;) {
1567 char b[BDEVNAME_SIZE]; 1523 char b[BDEVNAME_SIZE];
1568 1524
1569 unplug += flush_pending_writes(conf); 1525 flush_pending_writes(conf);
1570 1526
1571 spin_lock_irqsave(&conf->device_lock, flags); 1527 spin_lock_irqsave(&conf->device_lock, flags);
1572 if (list_empty(head)) { 1528 if (list_empty(head)) {
@@ -1580,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
1580 1536
1581 mddev = r1_bio->mddev; 1537 mddev = r1_bio->mddev;
1582 conf = mddev->private; 1538 conf = mddev->private;
1583 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1539 if (test_bit(R1BIO_IsSync, &r1_bio->state))
1584 sync_request_write(mddev, r1_bio); 1540 sync_request_write(mddev, r1_bio);
1585 unplug = 1; 1541 else {
1586 } else {
1587 int disk; 1542 int disk;
1588 1543
1589 /* we got a read error. Maybe the drive is bad. Maybe just 1544 /* we got a read error. Maybe the drive is bad. Maybe just
@@ -1633,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
1633 bio->bi_end_io = raid1_end_read_request; 1588 bio->bi_end_io = raid1_end_read_request;
1634 bio->bi_rw = READ | do_sync; 1589 bio->bi_rw = READ | do_sync;
1635 bio->bi_private = r1_bio; 1590 bio->bi_private = r1_bio;
1636 unplug = 1;
1637 generic_make_request(bio); 1591 generic_make_request(bio);
1638 } 1592 }
1639 } 1593 }
1640 cond_resched(); 1594 cond_resched();
1641 } 1595 }
1642 if (unplug)
1643 unplug_slaves(mddev);
1644} 1596}
1645 1597
1646 1598
@@ -2064,7 +2016,6 @@ static int run(mddev_t *mddev)
2064 2016
2065 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2017 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2066 2018
2067 mddev->queue->unplug_fn = raid1_unplug;
2068 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2019 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2069 mddev->queue->backing_dev_info.congested_data = mddev; 2020 mddev->queue->backing_dev_info.congested_data = mddev;
2070 md_integrity_register(mddev); 2021 md_integrity_register(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..e79f1c5bf71b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -57,23 +57,16 @@
57 */ 57 */
58#define NR_RAID10_BIOS 256 58#define NR_RAID10_BIOS 256
59 59
60static void unplug_slaves(mddev_t *mddev);
61
62static void allow_barrier(conf_t *conf); 60static void allow_barrier(conf_t *conf);
63static void lower_barrier(conf_t *conf); 61static void lower_barrier(conf_t *conf);
64 62
65static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 63static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
66{ 64{
67 conf_t *conf = data; 65 conf_t *conf = data;
68 r10bio_t *r10_bio;
69 int size = offsetof(struct r10bio_s, devs[conf->copies]); 66 int size = offsetof(struct r10bio_s, devs[conf->copies]);
70 67
71 /* allocate a r10bio with room for raid_disks entries in the bios array */ 68 /* allocate a r10bio with room for raid_disks entries in the bios array */
72 r10_bio = kzalloc(size, gfp_flags); 69 return kzalloc(size, gfp_flags);
73 if (!r10_bio && conf->mddev)
74 unplug_slaves(conf->mddev);
75
76 return r10_bio;
77} 70}
78 71
79static void r10bio_pool_free(void *r10_bio, void *data) 72static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
106 int nalloc; 99 int nalloc;
107 100
108 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 101 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
109 if (!r10_bio) { 102 if (!r10_bio)
110 unplug_slaves(conf->mddev);
111 return NULL; 103 return NULL;
112 }
113 104
114 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) 105 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
115 nalloc = conf->copies; /* resync */ 106 nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
597 return disk; 588 return disk;
598} 589}
599 590
600static void unplug_slaves(mddev_t *mddev)
601{
602 conf_t *conf = mddev->private;
603 int i;
604
605 rcu_read_lock();
606 for (i=0; i < conf->raid_disks; i++) {
607 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
608 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
609 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
610
611 atomic_inc(&rdev->nr_pending);
612 rcu_read_unlock();
613
614 blk_unplug(r_queue);
615
616 rdev_dec_pending(rdev, mddev);
617 rcu_read_lock();
618 }
619 }
620 rcu_read_unlock();
621}
622
623static void raid10_unplug(struct request_queue *q)
624{
625 mddev_t *mddev = q->queuedata;
626
627 unplug_slaves(q->queuedata);
628 md_wakeup_thread(mddev->thread);
629}
630
631static int raid10_congested(void *data, int bits) 591static int raid10_congested(void *data, int bits)
632{ 592{
633 mddev_t *mddev = data; 593 mddev_t *mddev = data;
@@ -649,20 +609,16 @@ static int raid10_congested(void *data, int bits)
649 return ret; 609 return ret;
650} 610}
651 611
652static int flush_pending_writes(conf_t *conf) 612static void flush_pending_writes(conf_t *conf)
653{ 613{
654 /* Any writes that have been queued but are awaiting 614 /* Any writes that have been queued but are awaiting
655 * bitmap updates get flushed here. 615 * bitmap updates get flushed here.
656 * We return 1 if any requests were actually submitted.
657 */ 616 */
658 int rv = 0;
659
660 spin_lock_irq(&conf->device_lock); 617 spin_lock_irq(&conf->device_lock);
661 618
662 if (conf->pending_bio_list.head) { 619 if (conf->pending_bio_list.head) {
663 struct bio *bio; 620 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 621 bio = bio_list_get(&conf->pending_bio_list);
665 blk_remove_plug(conf->mddev->queue);
666 spin_unlock_irq(&conf->device_lock); 622 spin_unlock_irq(&conf->device_lock);
667 /* flush any pending bitmap writes to disk 623 /* flush any pending bitmap writes to disk
668 * before proceeding w/ I/O */ 624 * before proceeding w/ I/O */
@@ -674,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
674 generic_make_request(bio); 630 generic_make_request(bio);
675 bio = next; 631 bio = next;
676 } 632 }
677 rv = 1;
678 } else 633 } else
679 spin_unlock_irq(&conf->device_lock); 634 spin_unlock_irq(&conf->device_lock);
680 return rv;
681} 635}
636
637static void md_kick_device(mddev_t *mddev)
638{
639 blk_flush_plug(current);
640 md_wakeup_thread(mddev->thread);
641}
642
682/* Barriers.... 643/* Barriers....
683 * Sometimes we need to suspend IO while we do something else, 644 * Sometimes we need to suspend IO while we do something else,
684 * either some resync/recovery, or reconfigure the array. 645 * either some resync/recovery, or reconfigure the array.
@@ -708,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
708 669
709 /* Wait until no block IO is waiting (unless 'force') */ 670 /* Wait until no block IO is waiting (unless 'force') */
710 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
711 conf->resync_lock, 672 conf->resync_lock, md_kick_device(conf->mddev));
712 raid10_unplug(conf->mddev->queue));
713 673
714 /* block any new IO from starting */ 674 /* block any new IO from starting */
715 conf->barrier++; 675 conf->barrier++;
@@ -717,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
717 /* No wait for all pending IO to complete */ 677 /* No wait for all pending IO to complete */
718 wait_event_lock_irq(conf->wait_barrier, 678 wait_event_lock_irq(conf->wait_barrier,
719 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
720 conf->resync_lock, 680 conf->resync_lock, md_kick_device(conf->mddev));
721 raid10_unplug(conf->mddev->queue));
722 681
723 spin_unlock_irq(&conf->resync_lock); 682 spin_unlock_irq(&conf->resync_lock);
724} 683}
@@ -739,7 +698,7 @@ static void wait_barrier(conf_t *conf)
739 conf->nr_waiting++; 698 conf->nr_waiting++;
740 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
741 conf->resync_lock, 700 conf->resync_lock,
742 raid10_unplug(conf->mddev->queue)); 701 md_kick_device(conf->mddev));
743 conf->nr_waiting--; 702 conf->nr_waiting--;
744 } 703 }
745 conf->nr_pending++; 704 conf->nr_pending++;
@@ -776,7 +735,7 @@ static void freeze_array(conf_t *conf)
776 conf->nr_pending == conf->nr_queued+1, 735 conf->nr_pending == conf->nr_queued+1,
777 conf->resync_lock, 736 conf->resync_lock,
778 ({ flush_pending_writes(conf); 737 ({ flush_pending_writes(conf);
779 raid10_unplug(conf->mddev->queue); })); 738 md_kick_device(conf->mddev); }));
780 spin_unlock_irq(&conf->resync_lock); 739 spin_unlock_irq(&conf->resync_lock);
781} 740}
782 741
@@ -971,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 atomic_inc(&r10_bio->remaining); 930 atomic_inc(&r10_bio->remaining);
972 spin_lock_irqsave(&conf->device_lock, flags); 931 spin_lock_irqsave(&conf->device_lock, flags);
973 bio_list_add(&conf->pending_bio_list, mbio); 932 bio_list_add(&conf->pending_bio_list, mbio);
974 blk_plug_device(mddev->queue);
975 spin_unlock_irqrestore(&conf->device_lock, flags); 933 spin_unlock_irqrestore(&conf->device_lock, flags);
976 } 934 }
977 935
@@ -988,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
988 /* In case raid10d snuck in to freeze_array */ 946 /* In case raid10d snuck in to freeze_array */
989 wake_up(&conf->wait_barrier); 947 wake_up(&conf->wait_barrier);
990 948
991 if (do_sync) 949 if (do_sync || !mddev->bitmap)
992 md_wakeup_thread(mddev->thread); 950 md_wakeup_thread(mddev->thread);
993 951
994 return 0; 952 return 0;
@@ -1681,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
1681 unsigned long flags; 1639 unsigned long flags;
1682 conf_t *conf = mddev->private; 1640 conf_t *conf = mddev->private;
1683 struct list_head *head = &conf->retry_list; 1641 struct list_head *head = &conf->retry_list;
1684 int unplug=0;
1685 mdk_rdev_t *rdev; 1642 mdk_rdev_t *rdev;
1686 1643
1687 md_check_recovery(mddev); 1644 md_check_recovery(mddev);
@@ -1689,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
1689 for (;;) { 1646 for (;;) {
1690 char b[BDEVNAME_SIZE]; 1647 char b[BDEVNAME_SIZE];
1691 1648
1692 unplug += flush_pending_writes(conf); 1649 flush_pending_writes(conf);
1693 1650
1694 spin_lock_irqsave(&conf->device_lock, flags); 1651 spin_lock_irqsave(&conf->device_lock, flags);
1695 if (list_empty(head)) { 1652 if (list_empty(head)) {
@@ -1703,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
1703 1660
1704 mddev = r10_bio->mddev; 1661 mddev = r10_bio->mddev;
1705 conf = mddev->private; 1662 conf = mddev->private;
1706 if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1663 if (test_bit(R10BIO_IsSync, &r10_bio->state))
1707 sync_request_write(mddev, r10_bio); 1664 sync_request_write(mddev, r10_bio);
1708 unplug = 1; 1665 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1709 } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1710 recovery_request_write(mddev, r10_bio); 1666 recovery_request_write(mddev, r10_bio);
1711 unplug = 1; 1667 else {
1712 } else {
1713 int mirror; 1668 int mirror;
1714 /* we got a read error. Maybe the drive is bad. Maybe just 1669 /* we got a read error. Maybe the drive is bad. Maybe just
1715 * the block and we can fix it. 1670 * the block and we can fix it.
@@ -1756,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
1756 bio->bi_rw = READ | do_sync; 1711 bio->bi_rw = READ | do_sync;
1757 bio->bi_private = r10_bio; 1712 bio->bi_private = r10_bio;
1758 bio->bi_end_io = raid10_end_read_request; 1713 bio->bi_end_io = raid10_end_read_request;
1759 unplug = 1;
1760 generic_make_request(bio); 1714 generic_make_request(bio);
1761 } 1715 }
1762 } 1716 }
1763 cond_resched(); 1717 cond_resched();
1764 } 1718 }
1765 if (unplug)
1766 unplug_slaves(mddev);
1767} 1719}
1768 1720
1769 1721
@@ -2376,7 +2328,6 @@ static int run(mddev_t *mddev)
2376 md_set_array_sectors(mddev, size); 2328 md_set_array_sectors(mddev, size);
2377 mddev->resync_max_sectors = size; 2329 mddev->resync_max_sectors = size;
2378 2330
2379 mddev->queue->unplug_fn = raid10_unplug;
2380 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2331 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2381 mddev->queue->backing_dev_info.congested_data = mddev; 2332 mddev->queue->backing_dev_info.congested_data = mddev;
2382 2333
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 702812824195..e867ee42b152 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
433 return 0; 433 return 0;
434} 434}
435 435
436static void unplug_slaves(mddev_t *mddev);
437
438static struct stripe_head * 436static struct stripe_head *
439get_active_stripe(raid5_conf_t *conf, sector_t sector, 437get_active_stripe(raid5_conf_t *conf, sector_t sector,
440 int previous, int noblock, int noquiesce) 438 int previous, int noblock, int noquiesce)
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
463 < (conf->max_nr_stripes *3/4) 461 < (conf->max_nr_stripes *3/4)
464 || !conf->inactive_blocked), 462 || !conf->inactive_blocked),
465 conf->device_lock, 463 conf->device_lock,
466 md_raid5_unplug_device(conf) 464 md_raid5_kick_device(conf));
467 );
468 conf->inactive_blocked = 0; 465 conf->inactive_blocked = 0;
469 } else 466 } else
470 init_stripe(sh, sector, previous); 467 init_stripe(sh, sector, previous);
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1473 wait_event_lock_irq(conf->wait_for_stripe, 1470 wait_event_lock_irq(conf->wait_for_stripe,
1474 !list_empty(&conf->inactive_list), 1471 !list_empty(&conf->inactive_list),
1475 conf->device_lock, 1472 conf->device_lock,
1476 unplug_slaves(conf->mddev) 1473 blk_flush_plug(current));
1477 );
1478 osh = get_free_stripe(conf); 1474 osh = get_free_stripe(conf);
1479 spin_unlock_irq(&conf->device_lock); 1475 spin_unlock_irq(&conf->device_lock);
1480 atomic_set(&nsh->count, 1); 1476 atomic_set(&nsh->count, 1);
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
3645 } 3641 }
3646} 3642}
3647 3643
3648static void unplug_slaves(mddev_t *mddev) 3644void md_raid5_kick_device(raid5_conf_t *conf)
3649{ 3645{
3650 raid5_conf_t *conf = mddev->private; 3646 blk_flush_plug(current);
3651 int i; 3647 raid5_activate_delayed(conf);
3652 int devs = max(conf->raid_disks, conf->previous_raid_disks);
3653
3654 rcu_read_lock();
3655 for (i = 0; i < devs; i++) {
3656 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3657 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3658 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3659
3660 atomic_inc(&rdev->nr_pending);
3661 rcu_read_unlock();
3662
3663 blk_unplug(r_queue);
3664
3665 rdev_dec_pending(rdev, mddev);
3666 rcu_read_lock();
3667 }
3668 }
3669 rcu_read_unlock();
3670}
3671
3672void md_raid5_unplug_device(raid5_conf_t *conf)
3673{
3674 unsigned long flags;
3675
3676 spin_lock_irqsave(&conf->device_lock, flags);
3677
3678 if (plugger_remove_plug(&conf->plug)) {
3679 conf->seq_flush++;
3680 raid5_activate_delayed(conf);
3681 }
3682 md_wakeup_thread(conf->mddev->thread); 3648 md_wakeup_thread(conf->mddev->thread);
3683
3684 spin_unlock_irqrestore(&conf->device_lock, flags);
3685
3686 unplug_slaves(conf->mddev);
3687} 3649}
3688EXPORT_SYMBOL_GPL(md_raid5_unplug_device); 3650EXPORT_SYMBOL_GPL(md_raid5_kick_device);
3689 3651
3690static void raid5_unplug(struct plug_handle *plug) 3652static void raid5_unplug(struct plug_handle *plug)
3691{ 3653{
3692 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); 3654 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3693 md_raid5_unplug_device(conf);
3694}
3695 3655
3696static void raid5_unplug_queue(struct request_queue *q) 3656 md_raid5_kick_device(conf);
3697{
3698 mddev_t *mddev = q->queuedata;
3699 md_raid5_unplug_device(mddev->private);
3700} 3657}
3701 3658
3702int md_raid5_congested(mddev_t *mddev, int bits) 3659int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4100 * add failed due to overlap. Flush everything 4057 * add failed due to overlap. Flush everything
4101 * and wait a while 4058 * and wait a while
4102 */ 4059 */
4103 md_raid5_unplug_device(conf); 4060 md_raid5_kick_device(conf);
4104 release_stripe(sh); 4061 release_stripe(sh);
4105 schedule(); 4062 schedule();
4106 goto retry; 4063 goto retry;
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4365 4322
4366 if (sector_nr >= max_sector) { 4323 if (sector_nr >= max_sector) {
4367 /* just being told to finish up .. nothing much to do */ 4324 /* just being told to finish up .. nothing much to do */
4368 unplug_slaves(mddev);
4369 4325
4370 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4326 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4371 end_reshape(conf); 4327 end_reshape(conf);
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
4569 spin_unlock_irq(&conf->device_lock); 4525 spin_unlock_irq(&conf->device_lock);
4570 4526
4571 async_tx_issue_pending_all(); 4527 async_tx_issue_pending_all();
4572 unplug_slaves(mddev);
4573 4528
4574 pr_debug("--- raid5d inactive\n"); 4529 pr_debug("--- raid5d inactive\n");
4575} 4530}
@@ -5205,7 +5160,6 @@ static int run(mddev_t *mddev)
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5160 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5161 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->queue_lock = &conf->device_lock; 5162 mddev->queue->queue_lock = &conf->device_lock;
5208 mddev->queue->unplug_fn = raid5_unplug_queue;
5209 5163
5210 chunk_size = mddev->chunk_sectors << 9; 5164 chunk_size = mddev->chunk_sectors << 9;
5211 blk_queue_io_min(mddev->queue, chunk_size); 5165 blk_queue_io_min(mddev->queue, chunk_size);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2ace0582b409..8d563a4f022a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
503} 503}
504 504
505extern int md_raid5_congested(mddev_t *mddev, int bits); 505extern int md_raid5_congested(mddev_t *mddev, int bits);
506extern void md_raid5_unplug_device(raid5_conf_t *conf); 506extern void md_raid5_kick_device(raid5_conf_t *conf);
507extern int raid5_set_cache_size(mddev_t *mddev, int size); 507extern int raid5_set_cache_size(mddev_t *mddev, int size);
508#endif 508#endif