aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-24 13:16:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-24 13:16:26 -0400
commit6c5103890057b1bb781b26b7aae38d33e4c517d8 (patch)
treee6e57961dcddcb5841acb34956e70b9dc696a880 /drivers/md
parent3dab04e6978e358ad2307bca563fabd6c5d2c58b (diff)
parent9d2e157d970a73b3f270b631828e03eb452d525e (diff)
Merge branch 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.39/core' of git://git.kernel.dk/linux-2.6-block: (65 commits) Documentation/iostats.txt: bit-size reference etc. cfq-iosched: removing unnecessary think time checking cfq-iosched: Don't clear queue stats when preempt. blk-throttle: Reset group slice when limits are changed blk-cgroup: Only give unaccounted_time under debug cfq-iosched: Don't set active queue in preempt block: fix non-atomic access to genhd inflight structures block: attempt to merge with existing requests on plug flush block: NULL dereference on error path in __blkdev_get() cfq-iosched: Don't update group weights when on service tree fs: assign sb->s_bdi to default_backing_dev_info if the bdi is going away block: Require subsystems to explicitly allocate bio_set integrity mempool jbd2: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging jbd: finish conversion from WRITE_SYNC_PLUG to WRITE_SYNC and explicit plugging fs: make fsync_buffers_list() plug mm: make generic_writepages() use plugging blk-cgroup: Add unaccounted time to timeslice_used. block: fixup plugging stubs for !CONFIG_BLOCK block: remove obsolete comments for blkdev_issue_zeroout. blktrace: Use rq->cmd_flags directly in blk_add_trace_rq. ... Fix up conflicts in fs/{aio.c,super.c}
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-kcopyd.c55
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-table.c31
-rw-r--r--drivers/md/dm.c52
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/linear.c20
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/md/multipath.c38
-rw-r--r--drivers/md/raid0.c19
-rw-r--r--drivers/md/raid1.c91
-rw-r--r--drivers/md/raid10.c97
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/md/raid5.h2
17 files changed, 102 insertions, 408 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a2ce0b2da281..5c9362792f1d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
347 atomic_inc(&bitmap->pending_writes); 347 atomic_inc(&bitmap->pending_writes);
348 set_buffer_locked(bh); 348 set_buffer_locked(bh);
349 set_buffer_mapped(bh); 349 set_buffer_mapped(bh);
350 submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); 350 submit_bh(WRITE | REQ_SYNC, bh);
351 bh = bh->b_this_page; 351 bh = bh->b_this_page;
352 } 352 }
353 353
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1339 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1339 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1340 TASK_UNINTERRUPTIBLE); 1340 TASK_UNINTERRUPTIBLE);
1341 spin_unlock_irq(&bitmap->lock); 1341 spin_unlock_irq(&bitmap->lock);
1342 md_unplug(bitmap->mddev); 1342 io_schedule();
1343 schedule();
1344 finish_wait(&bitmap->overflow_wait, &__wait); 1343 finish_wait(&bitmap->overflow_wait, &__wait);
1345 continue; 1344 continue;
1346 } 1345 }
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e054bd91664..2c62c1169f78 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
991 clone->bi_destructor = dm_crypt_bio_destructor; 991 clone->bi_destructor = dm_crypt_bio_destructor;
992} 992}
993 993
994static void kcryptd_unplug(struct crypt_config *cc)
995{
996 blk_unplug(bdev_get_queue(cc->dev->bdev));
997}
998
999static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 994static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1000{ 995{
1001 struct crypt_config *cc = io->target->private; 996 struct crypt_config *cc = io->target->private;
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1008 * one in order to decrypt the whole bio data *afterwards*. 1003 * one in order to decrypt the whole bio data *afterwards*.
1009 */ 1004 */
1010 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 1005 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
1011 if (!clone) { 1006 if (!clone)
1012 kcryptd_unplug(cc);
1013 return 1; 1007 return 1;
1014 }
1015 1008
1016 crypt_inc_pending(io); 1009 crypt_inc_pending(io);
1017 1010
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 136d4f71a116..76a5af00a26b 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
352 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 352 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
353 353
354 if (sync) 354 if (sync)
355 rw |= REQ_SYNC | REQ_UNPLUG; 355 rw |= REQ_SYNC;
356 356
357 /* 357 /*
358 * For multiple regions we need to be careful to rewind 358 * For multiple regions we need to be careful to rewind
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 924f5f0084c2..1bb73a13ca40 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,13 +37,6 @@ struct dm_kcopyd_client {
37 unsigned int nr_pages; 37 unsigned int nr_pages;
38 unsigned int nr_free_pages; 38 unsigned int nr_free_pages;
39 39
40 /*
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
44 */
45 struct block_device *unplug[2];
46
47 struct dm_io_client *io_client; 40 struct dm_io_client *io_client;
48 41
49 wait_queue_head_t destroyq; 42 wait_queue_head_t destroyq;
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
315 return 0; 308 return 0;
316} 309}
317 310
318/*
319 * Unplug the block device at the specified index.
320 */
321static void unplug(struct dm_kcopyd_client *kc, int rw)
322{
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
326 }
327}
328
329/*
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
333 */
334static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
336{
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
341}
342
343static void complete_io(unsigned long error, void *context) 311static void complete_io(unsigned long error, void *context)
344{ 312{
345 struct kcopyd_job *job = (struct kcopyd_job *) context; 313 struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -386,16 +354,10 @@ static int run_io_job(struct kcopyd_job *job)
386 .client = job->kc->io_client, 354 .client = job->kc->io_client,
387 }; 355 };
388 356
389 if (job->rw == READ) { 357 if (job->rw == READ)
390 r = dm_io(&io_req, 1, &job->source, NULL); 358 r = dm_io(&io_req, 1, &job->source, NULL);
391 prepare_unplug(job->kc, READ, job->source.bdev); 359 else
392 } else {
393 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG;
395 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 360 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
398 }
399 361
400 return r; 362 return r;
401} 363}
@@ -466,6 +428,7 @@ static void do_work(struct work_struct *work)
466{ 428{
467 struct dm_kcopyd_client *kc = container_of(work, 429 struct dm_kcopyd_client *kc = container_of(work,
468 struct dm_kcopyd_client, kcopyd_work); 430 struct dm_kcopyd_client, kcopyd_work);
431 struct blk_plug plug;
469 432
470 /* 433 /*
471 * The order that these are called is *very* important. 434 * The order that these are called is *very* important.
@@ -473,18 +436,12 @@ static void do_work(struct work_struct *work)
473 * Pages jobs when successful will jump onto the io jobs 436 * Pages jobs when successful will jump onto the io jobs
474 * list. io jobs call wake when they complete and it all 437 * list. io jobs call wake when they complete and it all
475 * starts again. 438 * starts again.
476 *
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
482 */ 439 */
440 blk_start_plug(&plug);
483 process_jobs(&kc->complete_jobs, kc, run_complete_job); 441 process_jobs(&kc->complete_jobs, kc, run_complete_job);
484 process_jobs(&kc->pages_jobs, kc, run_pages_job); 442 process_jobs(&kc->pages_jobs, kc, run_pages_job);
485 process_jobs(&kc->io_jobs, kc, run_io_job); 443 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ); 444 blk_finish_plug(&plug);
487 unplug(kc, WRITE);
488} 445}
489 446
490/* 447/*
@@ -665,8 +622,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
665 INIT_LIST_HEAD(&kc->io_jobs); 622 INIT_LIST_HEAD(&kc->io_jobs);
666 INIT_LIST_HEAD(&kc->pages_jobs); 623 INIT_LIST_HEAD(&kc->pages_jobs);
667 624
668 memset(kc->unplug, 0, sizeof(kc->unplug));
669
670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 625 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
671 if (!kc->job_pool) 626 if (!kc->job_pool)
672 goto bad_slab; 627 goto bad_slab;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b9e1e15ef11c..5ef136cdba91 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
394{ 394{
395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
396 396
397 md_raid5_unplug_device(rs->md.private); 397 md_raid5_kick_device(rs->md.private);
398} 398}
399 399
400/* 400/*
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dee326775c60..976ad4688afc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
842 do_reads(ms, &reads); 842 do_reads(ms, &reads);
843 do_writes(ms, &writes); 843 do_writes(ms, &writes);
844 do_failures(ms, &failures); 844 do_failures(ms, &failures);
845
846 dm_table_unplug_all(ms->ti->table);
847} 845}
848 846
849/*----------------------------------------------------------------- 847/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 38e4eb1bb965..416d4e258df6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -55,6 +55,7 @@ struct dm_table {
55 struct dm_target *targets; 55 struct dm_target *targets;
56 56
57 unsigned discards_supported:1; 57 unsigned discards_supported:1;
58 unsigned integrity_supported:1;
58 59
59 /* 60 /*
60 * Indicates the rw permissions for the new logical 61 * Indicates the rw permissions for the new logical
@@ -859,7 +860,7 @@ int dm_table_alloc_md_mempools(struct dm_table *t)
859 return -EINVAL; 860 return -EINVAL;
860 } 861 }
861 862
862 t->mempools = dm_alloc_md_mempools(type); 863 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
863 if (!t->mempools) 864 if (!t->mempools)
864 return -ENOMEM; 865 return -ENOMEM;
865 866
@@ -935,8 +936,10 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
935 struct dm_dev_internal *dd; 936 struct dm_dev_internal *dd;
936 937
937 list_for_each_entry(dd, devices, list) 938 list_for_each_entry(dd, devices, list)
938 if (bdev_get_integrity(dd->dm_dev.bdev)) 939 if (bdev_get_integrity(dd->dm_dev.bdev)) {
940 t->integrity_supported = 1;
939 return blk_integrity_register(dm_disk(md), NULL); 941 return blk_integrity_register(dm_disk(md), NULL);
942 }
940 943
941 return 0; 944 return 0;
942} 945}
@@ -1275,29 +1278,6 @@ int dm_table_any_busy_target(struct dm_table *t)
1275 return 0; 1278 return 0;
1276} 1279}
1277 1280
1278void dm_table_unplug_all(struct dm_table *t)
1279{
1280 struct dm_dev_internal *dd;
1281 struct list_head *devices = dm_table_get_devices(t);
1282 struct dm_target_callbacks *cb;
1283
1284 list_for_each_entry(dd, devices, list) {
1285 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1286 char b[BDEVNAME_SIZE];
1287
1288 if (likely(q))
1289 blk_unplug(q);
1290 else
1291 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1292 dm_device_name(t->md),
1293 bdevname(dd->dm_dev.bdev, b));
1294 }
1295
1296 list_for_each_entry(cb, &t->target_callbacks, list)
1297 if (cb->unplug_fn)
1298 cb->unplug_fn(cb);
1299}
1300
1301struct mapped_device *dm_table_get_md(struct dm_table *t) 1281struct mapped_device *dm_table_get_md(struct dm_table *t)
1302{ 1282{
1303 return t->md; 1283 return t->md;
@@ -1345,4 +1325,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
1345EXPORT_SYMBOL(dm_table_get_md); 1325EXPORT_SYMBOL(dm_table_get_md);
1346EXPORT_SYMBOL(dm_table_put); 1326EXPORT_SYMBOL(dm_table_put);
1347EXPORT_SYMBOL(dm_table_get); 1327EXPORT_SYMBOL(dm_table_get);
1348EXPORT_SYMBOL(dm_table_unplug_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eaa3af0e0632..0cf68b478878 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -477,7 +477,8 @@ static void start_io_acct(struct dm_io *io)
477 cpu = part_stat_lock(); 477 cpu = part_stat_lock();
478 part_round_stats(cpu, &dm_disk(md)->part0); 478 part_round_stats(cpu, &dm_disk(md)->part0);
479 part_stat_unlock(); 479 part_stat_unlock();
480 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 480 atomic_set(&dm_disk(md)->part0.in_flight[rw],
481 atomic_inc_return(&md->pending[rw]));
481} 482}
482 483
483static void end_io_acct(struct dm_io *io) 484static void end_io_acct(struct dm_io *io)
@@ -497,8 +498,8 @@ static void end_io_acct(struct dm_io *io)
497 * After this is decremented the bio must not be touched if it is 498 * After this is decremented the bio must not be touched if it is
498 * a flush. 499 * a flush.
499 */ 500 */
500 dm_disk(md)->part0.in_flight[rw] = pending = 501 pending = atomic_dec_return(&md->pending[rw]);
501 atomic_dec_return(&md->pending[rw]); 502 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
502 pending += atomic_read(&md->pending[rw^0x1]); 503 pending += atomic_read(&md->pending[rw^0x1]);
503 504
504 /* nudge anyone waiting on suspend queue */ 505 /* nudge anyone waiting on suspend queue */
@@ -807,8 +808,6 @@ void dm_requeue_unmapped_request(struct request *clone)
807 dm_unprep_request(rq); 808 dm_unprep_request(rq);
808 809
809 spin_lock_irqsave(q->queue_lock, flags); 810 spin_lock_irqsave(q->queue_lock, flags);
810 if (elv_queue_empty(q))
811 blk_plug_device(q);
812 blk_requeue_request(q, rq); 811 blk_requeue_request(q, rq);
813 spin_unlock_irqrestore(q->queue_lock, flags); 812 spin_unlock_irqrestore(q->queue_lock, flags);
814 813
@@ -1613,10 +1612,10 @@ static void dm_request_fn(struct request_queue *q)
1613 * number of in-flight I/Os after the queue is stopped in 1612 * number of in-flight I/Os after the queue is stopped in
1614 * dm_suspend(). 1613 * dm_suspend().
1615 */ 1614 */
1616 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1615 while (!blk_queue_stopped(q)) {
1617 rq = blk_peek_request(q); 1616 rq = blk_peek_request(q);
1618 if (!rq) 1617 if (!rq)
1619 goto plug_and_out; 1618 goto delay_and_out;
1620 1619
1621 /* always use block 0 to find the target for flushes for now */ 1620 /* always use block 0 to find the target for flushes for now */
1622 pos = 0; 1621 pos = 0;
@@ -1627,7 +1626,7 @@ static void dm_request_fn(struct request_queue *q)
1627 BUG_ON(!dm_target_is_valid(ti)); 1626 BUG_ON(!dm_target_is_valid(ti));
1628 1627
1629 if (ti->type->busy && ti->type->busy(ti)) 1628 if (ti->type->busy && ti->type->busy(ti))
1630 goto plug_and_out; 1629 goto delay_and_out;
1631 1630
1632 blk_start_request(rq); 1631 blk_start_request(rq);
1633 clone = rq->special; 1632 clone = rq->special;
@@ -1647,11 +1646,8 @@ requeued:
1647 BUG_ON(!irqs_disabled()); 1646 BUG_ON(!irqs_disabled());
1648 spin_lock(q->queue_lock); 1647 spin_lock(q->queue_lock);
1649 1648
1650plug_and_out: 1649delay_and_out:
1651 if (!elv_queue_empty(q)) 1650 blk_delay_queue(q, HZ / 10);
1652 /* Some requests still remain, retry later */
1653 blk_plug_device(q);
1654
1655out: 1651out:
1656 dm_table_put(map); 1652 dm_table_put(map);
1657 1653
@@ -1680,20 +1676,6 @@ static int dm_lld_busy(struct request_queue *q)
1680 return r; 1676 return r;
1681} 1677}
1682 1678
1683static void dm_unplug_all(struct request_queue *q)
1684{
1685 struct mapped_device *md = q->queuedata;
1686 struct dm_table *map = dm_get_live_table(md);
1687
1688 if (map) {
1689 if (dm_request_based(md))
1690 generic_unplug_device(q);
1691
1692 dm_table_unplug_all(map);
1693 dm_table_put(map);
1694 }
1695}
1696
1697static int dm_any_congested(void *congested_data, int bdi_bits) 1679static int dm_any_congested(void *congested_data, int bdi_bits)
1698{ 1680{
1699 int r = bdi_bits; 1681 int r = bdi_bits;
@@ -1817,7 +1799,6 @@ static void dm_init_md_queue(struct mapped_device *md)
1817 md->queue->backing_dev_info.congested_data = md; 1799 md->queue->backing_dev_info.congested_data = md;
1818 blk_queue_make_request(md->queue, dm_request); 1800 blk_queue_make_request(md->queue, dm_request);
1819 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1801 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1820 md->queue->unplug_fn = dm_unplug_all;
1821 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1802 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1822 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); 1803 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1823} 1804}
@@ -2263,8 +2244,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2263 int r = 0; 2244 int r = 0;
2264 DECLARE_WAITQUEUE(wait, current); 2245 DECLARE_WAITQUEUE(wait, current);
2265 2246
2266 dm_unplug_all(md->queue);
2267
2268 add_wait_queue(&md->wait, &wait); 2247 add_wait_queue(&md->wait, &wait);
2269 2248
2270 while (1) { 2249 while (1) {
@@ -2539,7 +2518,6 @@ int dm_resume(struct mapped_device *md)
2539 2518
2540 clear_bit(DMF_SUSPENDED, &md->flags); 2519 clear_bit(DMF_SUSPENDED, &md->flags);
2541 2520
2542 dm_table_unplug_all(map);
2543 r = 0; 2521 r = 0;
2544out: 2522out:
2545 dm_table_put(map); 2523 dm_table_put(map);
@@ -2643,9 +2621,10 @@ int dm_noflush_suspending(struct dm_target *ti)
2643} 2621}
2644EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2622EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2645 2623
2646struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) 2624struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2647{ 2625{
2648 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2626 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2627 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
2649 2628
2650 if (!pools) 2629 if (!pools)
2651 return NULL; 2630 return NULL;
@@ -2662,13 +2641,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2662 if (!pools->tio_pool) 2641 if (!pools->tio_pool)
2663 goto free_io_pool_and_out; 2642 goto free_io_pool_and_out;
2664 2643
2665 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2644 pools->bs = bioset_create(pool_size, 0);
2666 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2667 if (!pools->bs) 2645 if (!pools->bs)
2668 goto free_tio_pool_and_out; 2646 goto free_tio_pool_and_out;
2669 2647
2648 if (integrity && bioset_integrity_create(pools->bs, pool_size))
2649 goto free_bioset_and_out;
2650
2670 return pools; 2651 return pools;
2671 2652
2653free_bioset_and_out:
2654 bioset_free(pools->bs);
2655
2672free_tio_pool_and_out: 2656free_tio_pool_and_out:
2673 mempool_destroy(pools->tio_pool); 2657 mempool_destroy(pools->tio_pool);
2674 2658
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 0c2dd5f4af76..1aaf16746da8 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -149,7 +149,7 @@ void dm_kcopyd_exit(void);
149/* 149/*
150 * Mempool operations 150 * Mempool operations
151 */ 151 */
152struct dm_md_mempools *dm_alloc_md_mempools(unsigned type); 152struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
153void dm_free_md_mempools(struct dm_md_mempools *pools); 153void dm_free_md_mempools(struct dm_md_mempools *pools);
154 154
155#endif 155#endif
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 0ed7f6bc2a7f..abfb59a61ede 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
87 return maxsectors << 9; 87 return maxsectors << 9;
88} 88}
89 89
90static void linear_unplug(struct request_queue *q)
91{
92 mddev_t *mddev = q->queuedata;
93 linear_conf_t *conf;
94 int i;
95
96 rcu_read_lock();
97 conf = rcu_dereference(mddev->private);
98
99 for (i=0; i < mddev->raid_disks; i++) {
100 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
101 blk_unplug(r_queue);
102 }
103 rcu_read_unlock();
104}
105
106static int linear_congested(void *data, int bits) 90static int linear_congested(void *data, int bits)
107{ 91{
108 mddev_t *mddev = data; 92 mddev_t *mddev = data;
@@ -224,11 +208,9 @@ static int linear_run (mddev_t *mddev)
224 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 208 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
225 209
226 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 210 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
227 mddev->queue->unplug_fn = linear_unplug;
228 mddev->queue->backing_dev_info.congested_fn = linear_congested; 211 mddev->queue->backing_dev_info.congested_fn = linear_congested;
229 mddev->queue->backing_dev_info.congested_data = mddev; 212 mddev->queue->backing_dev_info.congested_data = mddev;
230 md_integrity_register(mddev); 213 return md_integrity_register(mddev);
231 return 0;
232} 214}
233 215
234static void free_conf(struct rcu_head *head) 216static void free_conf(struct rcu_head *head)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d5ad7723b172..06ecea751a39 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -780,8 +780,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
780 bio->bi_end_io = super_written; 780 bio->bi_end_io = super_written;
781 781
782 atomic_inc(&mddev->pending_writes); 782 atomic_inc(&mddev->pending_writes);
783 submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, 783 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
784 bio);
785} 784}
786 785
787void md_super_wait(mddev_t *mddev) 786void md_super_wait(mddev_t *mddev)
@@ -809,7 +808,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
809 struct completion event; 808 struct completion event;
810 int ret; 809 int ret;
811 810
812 rw |= REQ_SYNC | REQ_UNPLUG; 811 rw |= REQ_SYNC;
813 812
814 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 813 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
815 rdev->meta_bdev : rdev->bdev; 814 rdev->meta_bdev : rdev->bdev;
@@ -1804,8 +1803,12 @@ int md_integrity_register(mddev_t *mddev)
1804 mdname(mddev)); 1803 mdname(mddev));
1805 return -EINVAL; 1804 return -EINVAL;
1806 } 1805 }
1807 printk(KERN_NOTICE "md: data integrity on %s enabled\n", 1806 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1808 mdname(mddev)); 1807 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1808 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
1809 mdname(mddev));
1810 return -EINVAL;
1811 }
1809 return 0; 1812 return 0;
1810} 1813}
1811EXPORT_SYMBOL(md_integrity_register); 1814EXPORT_SYMBOL(md_integrity_register);
@@ -4817,7 +4820,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4817 __md_stop_writes(mddev); 4820 __md_stop_writes(mddev);
4818 md_stop(mddev); 4821 md_stop(mddev);
4819 mddev->queue->merge_bvec_fn = NULL; 4822 mddev->queue->merge_bvec_fn = NULL;
4820 mddev->queue->unplug_fn = NULL;
4821 mddev->queue->backing_dev_info.congested_fn = NULL; 4823 mddev->queue->backing_dev_info.congested_fn = NULL;
4822 4824
4823 /* tell userspace to handle 'inactive' */ 4825 /* tell userspace to handle 'inactive' */
@@ -6692,8 +6694,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
6692 6694
6693void md_unplug(mddev_t *mddev) 6695void md_unplug(mddev_t *mddev)
6694{ 6696{
6695 if (mddev->queue)
6696 blk_unplug(mddev->queue);
6697 if (mddev->plug) 6697 if (mddev->plug)
6698 mddev->plug->unplug_fn(mddev->plug); 6698 mddev->plug->unplug_fn(mddev->plug);
6699} 6699}
@@ -6876,7 +6876,6 @@ void md_do_sync(mddev_t *mddev)
6876 >= mddev->resync_max - mddev->curr_resync_completed 6876 >= mddev->resync_max - mddev->curr_resync_completed
6877 )) { 6877 )) {
6878 /* time to update curr_resync_completed */ 6878 /* time to update curr_resync_completed */
6879 md_unplug(mddev);
6880 wait_event(mddev->recovery_wait, 6879 wait_event(mddev->recovery_wait,
6881 atomic_read(&mddev->recovery_active) == 0); 6880 atomic_read(&mddev->recovery_active) == 0);
6882 mddev->curr_resync_completed = j; 6881 mddev->curr_resync_completed = j;
@@ -6952,7 +6951,6 @@ void md_do_sync(mddev_t *mddev)
6952 * about not overloading the IO subsystem. (things like an 6951 * about not overloading the IO subsystem. (things like an
6953 * e2fsck being done on the RAID array should execute fast) 6952 * e2fsck being done on the RAID array should execute fast)
6954 */ 6953 */
6955 md_unplug(mddev);
6956 cond_resched(); 6954 cond_resched();
6957 6955
6958 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6956 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6971,8 +6969,6 @@ void md_do_sync(mddev_t *mddev)
6971 * this also signals 'finished resyncing' to md_stop 6969 * this also signals 'finished resyncing' to md_stop
6972 */ 6970 */
6973 out: 6971 out:
6974 md_unplug(mddev);
6975
6976 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6972 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6977 6973
6978 /* tell personality that we are finished */ 6974 /* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3a62d440e27b..c35890990985 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static void unplug_slaves(mddev_t *mddev)
110{
111 multipath_conf_t *conf = mddev->private;
112 int i;
113
114 rcu_read_lock();
115 for (i=0; i<mddev->raid_disks; i++) {
116 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
117 if (rdev && !test_bit(Faulty, &rdev->flags)
118 && atomic_read(&rdev->nr_pending)) {
119 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
120
121 atomic_inc(&rdev->nr_pending);
122 rcu_read_unlock();
123
124 blk_unplug(r_queue);
125
126 rdev_dec_pending(rdev, mddev);
127 rcu_read_lock();
128 }
129 }
130 rcu_read_unlock();
131}
132
133static void multipath_unplug(struct request_queue *q)
134{
135 unplug_slaves(q->queuedata);
136}
137
138
139static int multipath_make_request(mddev_t *mddev, struct bio * bio) 109static int multipath_make_request(mddev_t *mddev, struct bio * bio)
140{ 110{
141 multipath_conf_t *conf = mddev->private; 111 multipath_conf_t *conf = mddev->private;
@@ -345,7 +315,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
345 p->rdev = rdev; 315 p->rdev = rdev;
346 goto abort; 316 goto abort;
347 } 317 }
348 md_integrity_register(mddev); 318 err = md_integrity_register(mddev);
349 } 319 }
350abort: 320abort:
351 321
@@ -517,10 +487,12 @@ static int multipath_run (mddev_t *mddev)
517 */ 487 */
518 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); 488 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
519 489
520 mddev->queue->unplug_fn = multipath_unplug;
521 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 490 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
522 mddev->queue->backing_dev_info.congested_data = mddev; 491 mddev->queue->backing_dev_info.congested_data = mddev;
523 md_integrity_register(mddev); 492
493 if (md_integrity_register(mddev))
494 goto out_free_conf;
495
524 return 0; 496 return 0;
525 497
526out_free_conf: 498out_free_conf:
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c0ac457f1218..e86bf3682e1e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,21 +25,6 @@
25#include "raid0.h" 25#include "raid0.h"
26#include "raid5.h" 26#include "raid5.h"
27 27
28static void raid0_unplug(struct request_queue *q)
29{
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev->private;
32 mdk_rdev_t **devlist = conf->devlist;
33 int raid_disks = conf->strip_zone[0].nb_dev;
34 int i;
35
36 for (i=0; i < raid_disks; i++) {
37 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38
39 blk_unplug(r_queue);
40 }
41}
42
43static int raid0_congested(void *data, int bits) 28static int raid0_congested(void *data, int bits)
44{ 29{
45 mddev_t *mddev = data; 30 mddev_t *mddev = data;
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
272 mdname(mddev), 257 mdname(mddev),
273 (unsigned long long)smallest->sectors); 258 (unsigned long long)smallest->sectors);
274 } 259 }
275 mddev->queue->unplug_fn = raid0_unplug;
276 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 260 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
277 mddev->queue->backing_dev_info.congested_data = mddev; 261 mddev->queue->backing_dev_info.congested_data = mddev;
278 262
@@ -395,8 +379,7 @@ static int raid0_run(mddev_t *mddev)
395 379
396 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 380 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
397 dump_zones(mddev); 381 dump_zones(mddev);
398 md_integrity_register(mddev); 382 return md_integrity_register(mddev);
399 return 0;
400} 383}
401 384
402static int raid0_stop(mddev_t *mddev) 385static int raid0_stop(mddev_t *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 06cd712807d0..c2a21ae56d97 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,23 +52,16 @@
52#define NR_RAID1_BIOS 256 52#define NR_RAID1_BIOS 256
53 53
54 54
55static void unplug_slaves(mddev_t *mddev);
56
57static void allow_barrier(conf_t *conf); 55static void allow_barrier(conf_t *conf);
58static void lower_barrier(conf_t *conf); 56static void lower_barrier(conf_t *conf);
59 57
60static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 58static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
61{ 59{
62 struct pool_info *pi = data; 60 struct pool_info *pi = data;
63 r1bio_t *r1_bio;
64 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 61 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
65 62
66 /* allocate a r1bio with room for raid_disks entries in the bios array */ 63 /* allocate a r1bio with room for raid_disks entries in the bios array */
67 r1_bio = kzalloc(size, gfp_flags); 64 return kzalloc(size, gfp_flags);
68 if (!r1_bio && pi->mddev)
69 unplug_slaves(pi->mddev);
70
71 return r1_bio;
72} 65}
73 66
74static void r1bio_pool_free(void *r1_bio, void *data) 67static void r1bio_pool_free(void *r1_bio, void *data)
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
91 int i, j; 84 int i, j;
92 85
93 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 86 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
94 if (!r1_bio) { 87 if (!r1_bio)
95 unplug_slaves(pi->mddev);
96 return NULL; 88 return NULL;
97 }
98 89
99 /* 90 /*
100 * Allocate bios : 1 for reading, n-1 for writing 91 * Allocate bios : 1 for reading, n-1 for writing
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
520 return new_disk; 511 return new_disk;
521} 512}
522 513
523static void unplug_slaves(mddev_t *mddev)
524{
525 conf_t *conf = mddev->private;
526 int i;
527
528 rcu_read_lock();
529 for (i=0; i<mddev->raid_disks; i++) {
530 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
531 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
532 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
533
534 atomic_inc(&rdev->nr_pending);
535 rcu_read_unlock();
536
537 blk_unplug(r_queue);
538
539 rdev_dec_pending(rdev, mddev);
540 rcu_read_lock();
541 }
542 }
543 rcu_read_unlock();
544}
545
546static void raid1_unplug(struct request_queue *q)
547{
548 mddev_t *mddev = q->queuedata;
549
550 unplug_slaves(mddev);
551 md_wakeup_thread(mddev->thread);
552}
553
554static int raid1_congested(void *data, int bits) 514static int raid1_congested(void *data, int bits)
555{ 515{
556 mddev_t *mddev = data; 516 mddev_t *mddev = data;
@@ -580,23 +540,16 @@ static int raid1_congested(void *data, int bits)
580} 540}
581 541
582 542
583static int flush_pending_writes(conf_t *conf) 543static void flush_pending_writes(conf_t *conf)
584{ 544{
585 /* Any writes that have been queued but are awaiting 545 /* Any writes that have been queued but are awaiting
586 * bitmap updates get flushed here. 546 * bitmap updates get flushed here.
587 * We return 1 if any requests were actually submitted.
588 */ 547 */
589 int rv = 0;
590
591 spin_lock_irq(&conf->device_lock); 548 spin_lock_irq(&conf->device_lock);
592 549
593 if (conf->pending_bio_list.head) { 550 if (conf->pending_bio_list.head) {
594 struct bio *bio; 551 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 552 bio = bio_list_get(&conf->pending_bio_list);
596 /* Only take the spinlock to quiet a warning */
597 spin_lock(conf->mddev->queue->queue_lock);
598 blk_remove_plug(conf->mddev->queue);
599 spin_unlock(conf->mddev->queue->queue_lock);
600 spin_unlock_irq(&conf->device_lock); 553 spin_unlock_irq(&conf->device_lock);
601 /* flush any pending bitmap writes to 554 /* flush any pending bitmap writes to
602 * disk before proceeding w/ I/O */ 555 * disk before proceeding w/ I/O */
@@ -608,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
608 generic_make_request(bio); 561 generic_make_request(bio);
609 bio = next; 562 bio = next;
610 } 563 }
611 rv = 1;
612 } else 564 } else
613 spin_unlock_irq(&conf->device_lock); 565 spin_unlock_irq(&conf->device_lock);
614 return rv; 566}
567
568static void md_kick_device(mddev_t *mddev)
569{
570 blk_flush_plug(current);
571 md_wakeup_thread(mddev->thread);
615} 572}
616 573
617/* Barriers.... 574/* Barriers....
@@ -643,8 +600,7 @@ static void raise_barrier(conf_t *conf)
643 600
644 /* Wait until no block IO is waiting */ 601 /* Wait until no block IO is waiting */
645 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 602 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
646 conf->resync_lock, 603 conf->resync_lock, md_kick_device(conf->mddev));
647 raid1_unplug(conf->mddev->queue));
648 604
649 /* block any new IO from starting */ 605 /* block any new IO from starting */
650 conf->barrier++; 606 conf->barrier++;
@@ -652,8 +608,7 @@ static void raise_barrier(conf_t *conf)
652 /* Now wait for all pending IO to complete */ 608 /* Now wait for all pending IO to complete */
653 wait_event_lock_irq(conf->wait_barrier, 609 wait_event_lock_irq(conf->wait_barrier,
654 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 610 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
655 conf->resync_lock, 611 conf->resync_lock, md_kick_device(conf->mddev));
656 raid1_unplug(conf->mddev->queue));
657 612
658 spin_unlock_irq(&conf->resync_lock); 613 spin_unlock_irq(&conf->resync_lock);
659} 614}
@@ -675,7 +630,7 @@ static void wait_barrier(conf_t *conf)
675 conf->nr_waiting++; 630 conf->nr_waiting++;
676 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 631 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
677 conf->resync_lock, 632 conf->resync_lock,
678 raid1_unplug(conf->mddev->queue)); 633 md_kick_device(conf->mddev));
679 conf->nr_waiting--; 634 conf->nr_waiting--;
680 } 635 }
681 conf->nr_pending++; 636 conf->nr_pending++;
@@ -712,7 +667,7 @@ static void freeze_array(conf_t *conf)
712 conf->nr_pending == conf->nr_queued+1, 667 conf->nr_pending == conf->nr_queued+1,
713 conf->resync_lock, 668 conf->resync_lock,
714 ({ flush_pending_writes(conf); 669 ({ flush_pending_writes(conf);
715 raid1_unplug(conf->mddev->queue); })); 670 md_kick_device(conf->mddev); }));
716 spin_unlock_irq(&conf->resync_lock); 671 spin_unlock_irq(&conf->resync_lock);
717} 672}
718static void unfreeze_array(conf_t *conf) 673static void unfreeze_array(conf_t *conf)
@@ -962,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
962 atomic_inc(&r1_bio->remaining); 917 atomic_inc(&r1_bio->remaining);
963 spin_lock_irqsave(&conf->device_lock, flags); 918 spin_lock_irqsave(&conf->device_lock, flags);
964 bio_list_add(&conf->pending_bio_list, mbio); 919 bio_list_add(&conf->pending_bio_list, mbio);
965 blk_plug_device_unlocked(mddev->queue);
966 spin_unlock_irqrestore(&conf->device_lock, flags); 920 spin_unlock_irqrestore(&conf->device_lock, flags);
967 } 921 }
968 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 922 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -971,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 /* In case raid1d snuck in to freeze_array */ 925 /* In case raid1d snuck in to freeze_array */
972 wake_up(&conf->wait_barrier); 926 wake_up(&conf->wait_barrier);
973 927
974 if (do_sync) 928 if (do_sync || !bitmap)
975 md_wakeup_thread(mddev->thread); 929 md_wakeup_thread(mddev->thread);
976 930
977 return 0; 931 return 0;
@@ -1178,7 +1132,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
1178 p->rdev = rdev; 1132 p->rdev = rdev;
1179 goto abort; 1133 goto abort;
1180 } 1134 }
1181 md_integrity_register(mddev); 1135 err = md_integrity_register(mddev);
1182 } 1136 }
1183abort: 1137abort:
1184 1138
@@ -1561,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
1561 unsigned long flags; 1515 unsigned long flags;
1562 conf_t *conf = mddev->private; 1516 conf_t *conf = mddev->private;
1563 struct list_head *head = &conf->retry_list; 1517 struct list_head *head = &conf->retry_list;
1564 int unplug=0;
1565 mdk_rdev_t *rdev; 1518 mdk_rdev_t *rdev;
1566 1519
1567 md_check_recovery(mddev); 1520 md_check_recovery(mddev);
@@ -1569,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
1569 for (;;) { 1522 for (;;) {
1570 char b[BDEVNAME_SIZE]; 1523 char b[BDEVNAME_SIZE];
1571 1524
1572 unplug += flush_pending_writes(conf); 1525 flush_pending_writes(conf);
1573 1526
1574 spin_lock_irqsave(&conf->device_lock, flags); 1527 spin_lock_irqsave(&conf->device_lock, flags);
1575 if (list_empty(head)) { 1528 if (list_empty(head)) {
@@ -1583,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
1583 1536
1584 mddev = r1_bio->mddev; 1537 mddev = r1_bio->mddev;
1585 conf = mddev->private; 1538 conf = mddev->private;
1586 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1539 if (test_bit(R1BIO_IsSync, &r1_bio->state))
1587 sync_request_write(mddev, r1_bio); 1540 sync_request_write(mddev, r1_bio);
1588 unplug = 1; 1541 else {
1589 } else {
1590 int disk; 1542 int disk;
1591 1543
1592 /* we got a read error. Maybe the drive is bad. Maybe just 1544 /* we got a read error. Maybe the drive is bad. Maybe just
@@ -1636,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
1636 bio->bi_end_io = raid1_end_read_request; 1588 bio->bi_end_io = raid1_end_read_request;
1637 bio->bi_rw = READ | do_sync; 1589 bio->bi_rw = READ | do_sync;
1638 bio->bi_private = r1_bio; 1590 bio->bi_private = r1_bio;
1639 unplug = 1;
1640 generic_make_request(bio); 1591 generic_make_request(bio);
1641 } 1592 }
1642 } 1593 }
1643 cond_resched(); 1594 cond_resched();
1644 } 1595 }
1645 if (unplug)
1646 unplug_slaves(mddev);
1647} 1596}
1648 1597
1649 1598
@@ -2066,11 +2015,9 @@ static int run(mddev_t *mddev)
2066 2015
2067 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2016 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2068 2017
2069 mddev->queue->unplug_fn = raid1_unplug;
2070 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2018 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2071 mddev->queue->backing_dev_info.congested_data = mddev; 2019 mddev->queue->backing_dev_info.congested_data = mddev;
2072 md_integrity_register(mddev); 2020 return md_integrity_register(mddev);
2073 return 0;
2074} 2021}
2075 2022
2076static int stop(mddev_t *mddev) 2023static int stop(mddev_t *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 747d061d8e05..f7b62370b374 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -57,23 +57,16 @@
57 */ 57 */
58#define NR_RAID10_BIOS 256 58#define NR_RAID10_BIOS 256
59 59
60static void unplug_slaves(mddev_t *mddev);
61
62static void allow_barrier(conf_t *conf); 60static void allow_barrier(conf_t *conf);
63static void lower_barrier(conf_t *conf); 61static void lower_barrier(conf_t *conf);
64 62
65static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 63static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
66{ 64{
67 conf_t *conf = data; 65 conf_t *conf = data;
68 r10bio_t *r10_bio;
69 int size = offsetof(struct r10bio_s, devs[conf->copies]); 66 int size = offsetof(struct r10bio_s, devs[conf->copies]);
70 67
71 /* allocate a r10bio with room for raid_disks entries in the bios array */ 68 /* allocate a r10bio with room for raid_disks entries in the bios array */
72 r10_bio = kzalloc(size, gfp_flags); 69 return kzalloc(size, gfp_flags);
73 if (!r10_bio && conf->mddev)
74 unplug_slaves(conf->mddev);
75
76 return r10_bio;
77} 70}
78 71
79static void r10bio_pool_free(void *r10_bio, void *data) 72static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
106 int nalloc; 99 int nalloc;
107 100
108 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 101 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
109 if (!r10_bio) { 102 if (!r10_bio)
110 unplug_slaves(conf->mddev);
111 return NULL; 103 return NULL;
112 }
113 104
114 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) 105 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
115 nalloc = conf->copies; /* resync */ 106 nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
597 return disk; 588 return disk;
598} 589}
599 590
600static void unplug_slaves(mddev_t *mddev)
601{
602 conf_t *conf = mddev->private;
603 int i;
604
605 rcu_read_lock();
606 for (i=0; i < conf->raid_disks; i++) {
607 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
608 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
609 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
610
611 atomic_inc(&rdev->nr_pending);
612 rcu_read_unlock();
613
614 blk_unplug(r_queue);
615
616 rdev_dec_pending(rdev, mddev);
617 rcu_read_lock();
618 }
619 }
620 rcu_read_unlock();
621}
622
623static void raid10_unplug(struct request_queue *q)
624{
625 mddev_t *mddev = q->queuedata;
626
627 unplug_slaves(q->queuedata);
628 md_wakeup_thread(mddev->thread);
629}
630
631static int raid10_congested(void *data, int bits) 591static int raid10_congested(void *data, int bits)
632{ 592{
633 mddev_t *mddev = data; 593 mddev_t *mddev = data;
@@ -649,23 +609,16 @@ static int raid10_congested(void *data, int bits)
649 return ret; 609 return ret;
650} 610}
651 611
652static int flush_pending_writes(conf_t *conf) 612static void flush_pending_writes(conf_t *conf)
653{ 613{
654 /* Any writes that have been queued but are awaiting 614 /* Any writes that have been queued but are awaiting
655 * bitmap updates get flushed here. 615 * bitmap updates get flushed here.
656 * We return 1 if any requests were actually submitted.
657 */ 616 */
658 int rv = 0;
659
660 spin_lock_irq(&conf->device_lock); 617 spin_lock_irq(&conf->device_lock);
661 618
662 if (conf->pending_bio_list.head) { 619 if (conf->pending_bio_list.head) {
663 struct bio *bio; 620 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 621 bio = bio_list_get(&conf->pending_bio_list);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf->mddev->queue->queue_lock);
667 blk_remove_plug(conf->mddev->queue);
668 spin_unlock(conf->mddev->queue->queue_lock);
669 spin_unlock_irq(&conf->device_lock); 622 spin_unlock_irq(&conf->device_lock);
670 /* flush any pending bitmap writes to disk 623 /* flush any pending bitmap writes to disk
671 * before proceeding w/ I/O */ 624 * before proceeding w/ I/O */
@@ -677,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
677 generic_make_request(bio); 630 generic_make_request(bio);
678 bio = next; 631 bio = next;
679 } 632 }
680 rv = 1;
681 } else 633 } else
682 spin_unlock_irq(&conf->device_lock); 634 spin_unlock_irq(&conf->device_lock);
683 return rv;
684} 635}
636
637static void md_kick_device(mddev_t *mddev)
638{
639 blk_flush_plug(current);
640 md_wakeup_thread(mddev->thread);
641}
642
685/* Barriers.... 643/* Barriers....
686 * Sometimes we need to suspend IO while we do something else, 644 * Sometimes we need to suspend IO while we do something else,
687 * either some resync/recovery, or reconfigure the array. 645 * either some resync/recovery, or reconfigure the array.
@@ -711,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
711 669
712 /* Wait until no block IO is waiting (unless 'force') */ 670 /* Wait until no block IO is waiting (unless 'force') */
713 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
714 conf->resync_lock, 672 conf->resync_lock, md_kick_device(conf->mddev));
715 raid10_unplug(conf->mddev->queue));
716 673
717 /* block any new IO from starting */ 674 /* block any new IO from starting */
718 conf->barrier++; 675 conf->barrier++;
@@ -720,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
720 /* No wait for all pending IO to complete */ 677 /* No wait for all pending IO to complete */
721 wait_event_lock_irq(conf->wait_barrier, 678 wait_event_lock_irq(conf->wait_barrier,
722 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
723 conf->resync_lock, 680 conf->resync_lock, md_kick_device(conf->mddev));
724 raid10_unplug(conf->mddev->queue));
725 681
726 spin_unlock_irq(&conf->resync_lock); 682 spin_unlock_irq(&conf->resync_lock);
727} 683}
@@ -742,7 +698,7 @@ static void wait_barrier(conf_t *conf)
742 conf->nr_waiting++; 698 conf->nr_waiting++;
743 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
744 conf->resync_lock, 700 conf->resync_lock,
745 raid10_unplug(conf->mddev->queue)); 701 md_kick_device(conf->mddev));
746 conf->nr_waiting--; 702 conf->nr_waiting--;
747 } 703 }
748 conf->nr_pending++; 704 conf->nr_pending++;
@@ -779,7 +735,7 @@ static void freeze_array(conf_t *conf)
779 conf->nr_pending == conf->nr_queued+1, 735 conf->nr_pending == conf->nr_queued+1,
780 conf->resync_lock, 736 conf->resync_lock,
781 ({ flush_pending_writes(conf); 737 ({ flush_pending_writes(conf);
782 raid10_unplug(conf->mddev->queue); })); 738 md_kick_device(conf->mddev); }));
783 spin_unlock_irq(&conf->resync_lock); 739 spin_unlock_irq(&conf->resync_lock);
784} 740}
785 741
@@ -974,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
974 atomic_inc(&r10_bio->remaining); 930 atomic_inc(&r10_bio->remaining);
975 spin_lock_irqsave(&conf->device_lock, flags); 931 spin_lock_irqsave(&conf->device_lock, flags);
976 bio_list_add(&conf->pending_bio_list, mbio); 932 bio_list_add(&conf->pending_bio_list, mbio);
977 blk_plug_device_unlocked(mddev->queue);
978 spin_unlock_irqrestore(&conf->device_lock, flags); 933 spin_unlock_irqrestore(&conf->device_lock, flags);
979 } 934 }
980 935
@@ -991,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
991 /* In case raid10d snuck in to freeze_array */ 946 /* In case raid10d snuck in to freeze_array */
992 wake_up(&conf->wait_barrier); 947 wake_up(&conf->wait_barrier);
993 948
994 if (do_sync) 949 if (do_sync || !mddev->bitmap)
995 md_wakeup_thread(mddev->thread); 950 md_wakeup_thread(mddev->thread);
996 951
997 return 0; 952 return 0;
@@ -1233,7 +1188,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
1233 p->rdev = rdev; 1188 p->rdev = rdev;
1234 goto abort; 1189 goto abort;
1235 } 1190 }
1236 md_integrity_register(mddev); 1191 err = md_integrity_register(mddev);
1237 } 1192 }
1238abort: 1193abort:
1239 1194
@@ -1684,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
1684 unsigned long flags; 1639 unsigned long flags;
1685 conf_t *conf = mddev->private; 1640 conf_t *conf = mddev->private;
1686 struct list_head *head = &conf->retry_list; 1641 struct list_head *head = &conf->retry_list;
1687 int unplug=0;
1688 mdk_rdev_t *rdev; 1642 mdk_rdev_t *rdev;
1689 1643
1690 md_check_recovery(mddev); 1644 md_check_recovery(mddev);
@@ -1692,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
1692 for (;;) { 1646 for (;;) {
1693 char b[BDEVNAME_SIZE]; 1647 char b[BDEVNAME_SIZE];
1694 1648
1695 unplug += flush_pending_writes(conf); 1649 flush_pending_writes(conf);
1696 1650
1697 spin_lock_irqsave(&conf->device_lock, flags); 1651 spin_lock_irqsave(&conf->device_lock, flags);
1698 if (list_empty(head)) { 1652 if (list_empty(head)) {
@@ -1706,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
1706 1660
1707 mddev = r10_bio->mddev; 1661 mddev = r10_bio->mddev;
1708 conf = mddev->private; 1662 conf = mddev->private;
1709 if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1663 if (test_bit(R10BIO_IsSync, &r10_bio->state))
1710 sync_request_write(mddev, r10_bio); 1664 sync_request_write(mddev, r10_bio);
1711 unplug = 1; 1665 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1712 } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1713 recovery_request_write(mddev, r10_bio); 1666 recovery_request_write(mddev, r10_bio);
1714 unplug = 1; 1667 else {
1715 } else {
1716 int mirror; 1668 int mirror;
1717 /* we got a read error. Maybe the drive is bad. Maybe just 1669 /* we got a read error. Maybe the drive is bad. Maybe just
1718 * the block and we can fix it. 1670 * the block and we can fix it.
@@ -1759,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
1759 bio->bi_rw = READ | do_sync; 1711 bio->bi_rw = READ | do_sync;
1760 bio->bi_private = r10_bio; 1712 bio->bi_private = r10_bio;
1761 bio->bi_end_io = raid10_end_read_request; 1713 bio->bi_end_io = raid10_end_read_request;
1762 unplug = 1;
1763 generic_make_request(bio); 1714 generic_make_request(bio);
1764 } 1715 }
1765 } 1716 }
1766 cond_resched(); 1717 cond_resched();
1767 } 1718 }
1768 if (unplug)
1769 unplug_slaves(mddev);
1770} 1719}
1771 1720
1772 1721
@@ -2377,7 +2326,6 @@ static int run(mddev_t *mddev)
2377 md_set_array_sectors(mddev, size); 2326 md_set_array_sectors(mddev, size);
2378 mddev->resync_max_sectors = size; 2327 mddev->resync_max_sectors = size;
2379 2328
2380 mddev->queue->unplug_fn = raid10_unplug;
2381 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2329 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2382 mddev->queue->backing_dev_info.congested_data = mddev; 2330 mddev->queue->backing_dev_info.congested_data = mddev;
2383 2331
@@ -2395,7 +2343,10 @@ static int run(mddev_t *mddev)
2395 2343
2396 if (conf->near_copies < conf->raid_disks) 2344 if (conf->near_copies < conf->raid_disks)
2397 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2345 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2398 md_integrity_register(mddev); 2346
2347 if (md_integrity_register(mddev))
2348 goto out_free_conf;
2349
2399 return 0; 2350 return 0;
2400 2351
2401out_free_conf: 2352out_free_conf:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78536fdbd87f..e867ee42b152 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
433 return 0; 433 return 0;
434} 434}
435 435
436static void unplug_slaves(mddev_t *mddev);
437
438static struct stripe_head * 436static struct stripe_head *
439get_active_stripe(raid5_conf_t *conf, sector_t sector, 437get_active_stripe(raid5_conf_t *conf, sector_t sector,
440 int previous, int noblock, int noquiesce) 438 int previous, int noblock, int noquiesce)
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
463 < (conf->max_nr_stripes *3/4) 461 < (conf->max_nr_stripes *3/4)
464 || !conf->inactive_blocked), 462 || !conf->inactive_blocked),
465 conf->device_lock, 463 conf->device_lock,
466 md_raid5_unplug_device(conf) 464 md_raid5_kick_device(conf));
467 );
468 conf->inactive_blocked = 0; 465 conf->inactive_blocked = 0;
469 } else 466 } else
470 init_stripe(sh, sector, previous); 467 init_stripe(sh, sector, previous);
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1473 wait_event_lock_irq(conf->wait_for_stripe, 1470 wait_event_lock_irq(conf->wait_for_stripe,
1474 !list_empty(&conf->inactive_list), 1471 !list_empty(&conf->inactive_list),
1475 conf->device_lock, 1472 conf->device_lock,
1476 unplug_slaves(conf->mddev) 1473 blk_flush_plug(current));
1477 );
1478 osh = get_free_stripe(conf); 1474 osh = get_free_stripe(conf);
1479 spin_unlock_irq(&conf->device_lock); 1475 spin_unlock_irq(&conf->device_lock);
1480 atomic_set(&nsh->count, 1); 1476 atomic_set(&nsh->count, 1);
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
3645 } 3641 }
3646} 3642}
3647 3643
3648static void unplug_slaves(mddev_t *mddev) 3644void md_raid5_kick_device(raid5_conf_t *conf)
3649{ 3645{
3650 raid5_conf_t *conf = mddev->private; 3646 blk_flush_plug(current);
3651 int i; 3647 raid5_activate_delayed(conf);
3652 int devs = max(conf->raid_disks, conf->previous_raid_disks);
3653
3654 rcu_read_lock();
3655 for (i = 0; i < devs; i++) {
3656 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3657 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3658 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3659
3660 atomic_inc(&rdev->nr_pending);
3661 rcu_read_unlock();
3662
3663 blk_unplug(r_queue);
3664
3665 rdev_dec_pending(rdev, mddev);
3666 rcu_read_lock();
3667 }
3668 }
3669 rcu_read_unlock();
3670}
3671
3672void md_raid5_unplug_device(raid5_conf_t *conf)
3673{
3674 unsigned long flags;
3675
3676 spin_lock_irqsave(&conf->device_lock, flags);
3677
3678 if (plugger_remove_plug(&conf->plug)) {
3679 conf->seq_flush++;
3680 raid5_activate_delayed(conf);
3681 }
3682 md_wakeup_thread(conf->mddev->thread); 3648 md_wakeup_thread(conf->mddev->thread);
3683
3684 spin_unlock_irqrestore(&conf->device_lock, flags);
3685
3686 unplug_slaves(conf->mddev);
3687} 3649}
3688EXPORT_SYMBOL_GPL(md_raid5_unplug_device); 3650EXPORT_SYMBOL_GPL(md_raid5_kick_device);
3689 3651
3690static void raid5_unplug(struct plug_handle *plug) 3652static void raid5_unplug(struct plug_handle *plug)
3691{ 3653{
3692 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); 3654 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3693 md_raid5_unplug_device(conf);
3694}
3695 3655
3696static void raid5_unplug_queue(struct request_queue *q) 3656 md_raid5_kick_device(conf);
3697{
3698 mddev_t *mddev = q->queuedata;
3699 md_raid5_unplug_device(mddev->private);
3700} 3657}
3701 3658
3702int md_raid5_congested(mddev_t *mddev, int bits) 3659int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4100 * add failed due to overlap. Flush everything 4057 * add failed due to overlap. Flush everything
4101 * and wait a while 4058 * and wait a while
4102 */ 4059 */
4103 md_raid5_unplug_device(conf); 4060 md_raid5_kick_device(conf);
4104 release_stripe(sh); 4061 release_stripe(sh);
4105 schedule(); 4062 schedule();
4106 goto retry; 4063 goto retry;
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4365 4322
4366 if (sector_nr >= max_sector) { 4323 if (sector_nr >= max_sector) {
4367 /* just being told to finish up .. nothing much to do */ 4324 /* just being told to finish up .. nothing much to do */
4368 unplug_slaves(mddev);
4369 4325
4370 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4326 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4371 end_reshape(conf); 4327 end_reshape(conf);
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
4569 spin_unlock_irq(&conf->device_lock); 4525 spin_unlock_irq(&conf->device_lock);
4570 4526
4571 async_tx_issue_pending_all(); 4527 async_tx_issue_pending_all();
4572 unplug_slaves(mddev);
4573 4528
4574 pr_debug("--- raid5d inactive\n"); 4529 pr_debug("--- raid5d inactive\n");
4575} 4530}
@@ -5204,7 +5159,7 @@ static int run(mddev_t *mddev)
5204 5159
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5160 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5161 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->unplug_fn = raid5_unplug_queue; 5162 mddev->queue->queue_lock = &conf->device_lock;
5208 5163
5209 chunk_size = mddev->chunk_sectors << 9; 5164 chunk_size = mddev->chunk_sectors << 9;
5210 blk_queue_io_min(mddev->queue, chunk_size); 5165 blk_queue_io_min(mddev->queue, chunk_size);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2ace0582b409..8d563a4f022a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
503} 503}
504 504
505extern int md_raid5_congested(mddev_t *mddev, int bits); 505extern int md_raid5_congested(mddev_t *mddev, int bits);
506extern void md_raid5_unplug_device(raid5_conf_t *conf); 506extern void md_raid5_kick_device(raid5_conf_t *conf);
507extern int raid5_set_cache_size(mddev_t *mddev, int size); 507extern int raid5_set_cache_size(mddev_t *mddev, int size);
508#endif 508#endif