aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-mpath.c8
-rw-r--r--drivers/md/dm-raid1.c1
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm.c34
4 files changed, 31 insertions, 16 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 4840733cd903..3d7f4923cd13 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -441,13 +441,13 @@ static void process_queued_ios(struct work_struct *work)
441 __choose_pgpath(m); 441 __choose_pgpath(m);
442 442
443 pgpath = m->current_pgpath; 443 pgpath = m->current_pgpath;
444 m->pgpath_to_activate = m->current_pgpath;
445 444
446 if ((pgpath && !m->queue_io) || 445 if ((pgpath && !m->queue_io) ||
447 (!pgpath && !m->queue_if_no_path)) 446 (!pgpath && !m->queue_if_no_path))
448 must_queue = 0; 447 must_queue = 0;
449 448
450 if (m->pg_init_required && !m->pg_init_in_progress) { 449 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
450 m->pgpath_to_activate = pgpath;
451 m->pg_init_count++; 451 m->pg_init_count++;
452 m->pg_init_required = 0; 452 m->pg_init_required = 0;
453 m->pg_init_in_progress = 1; 453 m->pg_init_in_progress = 1;
@@ -708,6 +708,10 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
708 m->hw_handler_name = NULL; 708 m->hw_handler_name = NULL;
709 return -EINVAL; 709 return -EINVAL;
710 } 710 }
711
712 if (hw_argc > 1)
713 DMWARN("Ignoring user-specified arguments for "
714 "hardware handler \"%s\"", m->hw_handler_name);
711 consume(as, hw_argc - 1); 715 consume(as, hw_argc - 1);
712 716
713 return 0; 717 return 0;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9d7b53ed75b2..ec43f9fa4b2a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1032,6 +1032,7 @@ static void mirror_dtr(struct dm_target *ti)
1032 1032
1033 del_timer_sync(&ms->timer); 1033 del_timer_sync(&ms->timer);
1034 flush_workqueue(ms->kmirrord_wq); 1034 flush_workqueue(ms->kmirrord_wq);
1035 flush_scheduled_work();
1035 dm_kcopyd_client_destroy(ms->kcopyd_client); 1036 dm_kcopyd_client_destroy(ms->kcopyd_client);
1036 destroy_workqueue(ms->kmirrord_wq); 1037 destroy_workqueue(ms->kmirrord_wq);
1037 free_context(ms, ti, ms->nr_mirrors); 1038 free_context(ms, ti, ms->nr_mirrors);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a2d068dbe9e2..9e4ef88d421e 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -320,8 +320,10 @@ int __init dm_stripe_init(void)
320 int r; 320 int r;
321 321
322 r = dm_register_target(&stripe_target); 322 r = dm_register_target(&stripe_target);
323 if (r < 0) 323 if (r < 0) {
324 DMWARN("target registration failed"); 324 DMWARN("target registration failed");
325 return r;
326 }
325 327
326 kstriped = create_singlethread_workqueue("kstriped"); 328 kstriped = create_singlethread_workqueue("kstriped");
327 if (!kstriped) { 329 if (!kstriped) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6963ad148408..c99e4728ff41 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -375,7 +375,7 @@ static void start_io_acct(struct dm_io *io)
375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
376} 376}
377 377
378static int end_io_acct(struct dm_io *io) 378static void end_io_acct(struct dm_io *io)
379{ 379{
380 struct mapped_device *md = io->md; 380 struct mapped_device *md = io->md;
381 struct bio *bio = io->bio; 381 struct bio *bio = io->bio;
@@ -391,7 +391,9 @@ static int end_io_acct(struct dm_io *io)
391 dm_disk(md)->part0.in_flight = pending = 391 dm_disk(md)->part0.in_flight = pending =
392 atomic_dec_return(&md->pending); 392 atomic_dec_return(&md->pending);
393 393
394 return !pending; 394 /* nudge anyone waiting on suspend queue */
395 if (!pending)
396 wake_up(&md->wait);
395} 397}
396 398
397/* 399/*
@@ -499,9 +501,7 @@ static void dec_pending(struct dm_io *io, int error)
499 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 501 spin_unlock_irqrestore(&io->md->pushback_lock, flags);
500 } 502 }
501 503
502 if (end_io_acct(io)) 504 end_io_acct(io);
503 /* nudge anyone waiting on suspend queue */
504 wake_up(&io->md->wait);
505 505
506 if (io->error != DM_ENDIO_REQUEUE) { 506 if (io->error != DM_ENDIO_REQUEUE) {
507 blk_add_trace_bio(io->md->queue, io->bio, 507 blk_add_trace_bio(io->md->queue, io->bio,
@@ -937,16 +937,24 @@ static void dm_unplug_all(struct request_queue *q)
937 937
938static int dm_any_congested(void *congested_data, int bdi_bits) 938static int dm_any_congested(void *congested_data, int bdi_bits)
939{ 939{
940 int r; 940 int r = bdi_bits;
941 struct mapped_device *md = (struct mapped_device *) congested_data; 941 struct mapped_device *md = congested_data;
942 struct dm_table *map = dm_get_table(md); 942 struct dm_table *map;
943 943
944 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 944 atomic_inc(&md->pending);
945 r = bdi_bits; 945
946 else 946 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
947 r = dm_table_any_congested(map, bdi_bits); 947 map = dm_get_table(md);
948 if (map) {
949 r = dm_table_any_congested(map, bdi_bits);
950 dm_table_put(map);
951 }
952 }
953
954 if (!atomic_dec_return(&md->pending))
955 /* nudge anyone waiting on suspend queue */
956 wake_up(&md->wait);
948 957
949 dm_table_put(map);
950 return r; 958 return r;
951} 959}
952 960