aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-mpath.c68
-rw-r--r--drivers/md/dm-raid1.c1
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-table.c1
-rw-r--r--drivers/md/dm.c27
5 files changed, 63 insertions, 40 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0c1b8520ef86..785806bdb248 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -63,6 +63,7 @@ struct multipath {
63 unsigned nr_priority_groups; 63 unsigned nr_priority_groups;
64 struct list_head priority_groups; 64 struct list_head priority_groups;
65 unsigned pg_init_required; /* pg_init needs calling? */ 65 unsigned pg_init_required; /* pg_init needs calling? */
66 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
66 67
67 unsigned nr_valid_paths; /* Total number of usable paths */ 68 unsigned nr_valid_paths; /* Total number of usable paths */
68 struct pgpath *current_pgpath; 69 struct pgpath *current_pgpath;
@@ -72,7 +73,7 @@ struct multipath {
72 73
73 unsigned queue_io; /* Must we queue all I/O? */ 74 unsigned queue_io; /* Must we queue all I/O? */
74 unsigned queue_if_no_path; /* Queue I/O if last path fails? */ 75 unsigned queue_if_no_path; /* Queue I/O if last path fails? */
75 unsigned suspended; /* Has dm core suspended our I/O? */ 76 unsigned saved_queue_if_no_path;/* Saved state during suspension */
76 77
77 struct work_struct process_queued_ios; 78 struct work_struct process_queued_ios;
78 struct bio_list queued_ios; 79 struct bio_list queued_ios;
@@ -304,11 +305,12 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
304 m->queue_size--; 305 m->queue_size--;
305 306
306 if ((pgpath && m->queue_io) || 307 if ((pgpath && m->queue_io) ||
307 (!pgpath && m->queue_if_no_path && !m->suspended)) { 308 (!pgpath && m->queue_if_no_path)) {
308 /* Queue for the daemon to resubmit */ 309 /* Queue for the daemon to resubmit */
309 bio_list_add(&m->queued_ios, bio); 310 bio_list_add(&m->queued_ios, bio);
310 m->queue_size++; 311 m->queue_size++;
311 if (m->pg_init_required || !m->queue_io) 312 if ((m->pg_init_required && !m->pg_init_in_progress) ||
313 !m->queue_io)
312 queue_work(kmultipathd, &m->process_queued_ios); 314 queue_work(kmultipathd, &m->process_queued_ios);
313 pgpath = NULL; 315 pgpath = NULL;
314 r = 0; 316 r = 0;
@@ -333,8 +335,9 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
333 335
334 spin_lock_irqsave(&m->lock, flags); 336 spin_lock_irqsave(&m->lock, flags);
335 337
338 m->saved_queue_if_no_path = m->queue_if_no_path;
336 m->queue_if_no_path = queue_if_no_path; 339 m->queue_if_no_path = queue_if_no_path;
337 if (!m->queue_if_no_path) 340 if (!m->queue_if_no_path && m->queue_size)
338 queue_work(kmultipathd, &m->process_queued_ios); 341 queue_work(kmultipathd, &m->process_queued_ios);
339 342
340 spin_unlock_irqrestore(&m->lock, flags); 343 spin_unlock_irqrestore(&m->lock, flags);
@@ -379,25 +382,31 @@ static void process_queued_ios(void *data)
379{ 382{
380 struct multipath *m = (struct multipath *) data; 383 struct multipath *m = (struct multipath *) data;
381 struct hw_handler *hwh = &m->hw_handler; 384 struct hw_handler *hwh = &m->hw_handler;
382 struct pgpath *pgpath; 385 struct pgpath *pgpath = NULL;
383 unsigned init_required, must_queue = 0; 386 unsigned init_required = 0, must_queue = 1;
384 unsigned long flags; 387 unsigned long flags;
385 388
386 spin_lock_irqsave(&m->lock, flags); 389 spin_lock_irqsave(&m->lock, flags);
387 390
391 if (!m->queue_size)
392 goto out;
393
388 if (!m->current_pgpath) 394 if (!m->current_pgpath)
389 __choose_pgpath(m); 395 __choose_pgpath(m);
390 396
391 pgpath = m->current_pgpath; 397 pgpath = m->current_pgpath;
392 398
393 if ((pgpath && m->queue_io) || 399 if ((pgpath && !m->queue_io) ||
394 (!pgpath && m->queue_if_no_path && !m->suspended)) 400 (!pgpath && !m->queue_if_no_path))
395 must_queue = 1; 401 must_queue = 0;
396 402
397 init_required = m->pg_init_required; 403 if (m->pg_init_required && !m->pg_init_in_progress) {
398 if (init_required)
399 m->pg_init_required = 0; 404 m->pg_init_required = 0;
405 m->pg_init_in_progress = 1;
406 init_required = 1;
407 }
400 408
409out:
401 spin_unlock_irqrestore(&m->lock, flags); 410 spin_unlock_irqrestore(&m->lock, flags);
402 411
403 if (init_required) 412 if (init_required)
@@ -752,6 +761,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
752static void multipath_dtr(struct dm_target *ti) 761static void multipath_dtr(struct dm_target *ti)
753{ 762{
754 struct multipath *m = (struct multipath *) ti->private; 763 struct multipath *m = (struct multipath *) ti->private;
764
765 flush_workqueue(kmultipathd);
755 free_multipath(m); 766 free_multipath(m);
756} 767}
757 768
@@ -765,6 +776,9 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
765 struct mpath_io *mpio; 776 struct mpath_io *mpio;
766 struct multipath *m = (struct multipath *) ti->private; 777 struct multipath *m = (struct multipath *) ti->private;
767 778
779 if (bio_barrier(bio))
780 return -EOPNOTSUPP;
781
768 mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); 782 mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
769 dm_bio_record(&mpio->details, bio); 783 dm_bio_record(&mpio->details, bio);
770 784
@@ -837,7 +851,7 @@ static int reinstate_path(struct pgpath *pgpath)
837 pgpath->path.is_active = 1; 851 pgpath->path.is_active = 1;
838 852
839 m->current_pgpath = NULL; 853 m->current_pgpath = NULL;
840 if (!m->nr_valid_paths++) 854 if (!m->nr_valid_paths++ && m->queue_size)
841 queue_work(kmultipathd, &m->process_queued_ios); 855 queue_work(kmultipathd, &m->process_queued_ios);
842 856
843 queue_work(kmultipathd, &m->trigger_event); 857 queue_work(kmultipathd, &m->trigger_event);
@@ -963,12 +977,13 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags)
963 bypass_pg(m, pg, 1); 977 bypass_pg(m, pg, 1);
964 978
965 spin_lock_irqsave(&m->lock, flags); 979 spin_lock_irqsave(&m->lock, flags);
966 if (!err_flags) 980 if (err_flags) {
967 m->queue_io = 0;
968 else {
969 m->current_pgpath = NULL; 981 m->current_pgpath = NULL;
970 m->current_pg = NULL; 982 m->current_pg = NULL;
971 } 983 } else if (!m->pg_init_required)
984 m->queue_io = 0;
985
986 m->pg_init_in_progress = 0;
972 queue_work(kmultipathd, &m->process_queued_ios); 987 queue_work(kmultipathd, &m->process_queued_ios);
973 spin_unlock_irqrestore(&m->lock, flags); 988 spin_unlock_irqrestore(&m->lock, flags);
974} 989}
@@ -988,9 +1003,12 @@ static int do_end_io(struct multipath *m, struct bio *bio,
988 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 1003 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
989 return error; 1004 return error;
990 1005
1006 if (error == -EOPNOTSUPP)
1007 return error;
1008
991 spin_lock(&m->lock); 1009 spin_lock(&m->lock);
992 if (!m->nr_valid_paths) { 1010 if (!m->nr_valid_paths) {
993 if (!m->queue_if_no_path || m->suspended) { 1011 if (!m->queue_if_no_path) {
994 spin_unlock(&m->lock); 1012 spin_unlock(&m->lock);
995 return -EIO; 1013 return -EIO;
996 } else { 1014 } else {
@@ -1051,27 +1069,27 @@ static int multipath_end_io(struct dm_target *ti, struct bio *bio,
1051 1069
1052/* 1070/*
1053 * Suspend can't complete until all the I/O is processed so if 1071 * Suspend can't complete until all the I/O is processed so if
1054 * the last path failed we will now error any queued I/O. 1072 * the last path fails we must error any remaining I/O.
1073 * Note that if the freeze_bdev fails while suspending, the
1074 * queue_if_no_path state is lost - userspace should reset it.
1055 */ 1075 */
1056static void multipath_presuspend(struct dm_target *ti) 1076static void multipath_presuspend(struct dm_target *ti)
1057{ 1077{
1058 struct multipath *m = (struct multipath *) ti->private; 1078 struct multipath *m = (struct multipath *) ti->private;
1059 unsigned long flags;
1060 1079
1061 spin_lock_irqsave(&m->lock, flags); 1080 queue_if_no_path(m, 0);
1062 m->suspended = 1;
1063 if (m->queue_if_no_path)
1064 queue_work(kmultipathd, &m->process_queued_ios);
1065 spin_unlock_irqrestore(&m->lock, flags);
1066} 1081}
1067 1082
1083/*
1084 * Restore the queue_if_no_path setting.
1085 */
1068static void multipath_resume(struct dm_target *ti) 1086static void multipath_resume(struct dm_target *ti)
1069{ 1087{
1070 struct multipath *m = (struct multipath *) ti->private; 1088 struct multipath *m = (struct multipath *) ti->private;
1071 unsigned long flags; 1089 unsigned long flags;
1072 1090
1073 spin_lock_irqsave(&m->lock, flags); 1091 spin_lock_irqsave(&m->lock, flags);
1074 m->suspended = 0; 1092 m->queue_if_no_path = m->saved_queue_if_no_path;
1075 spin_unlock_irqrestore(&m->lock, flags); 1093 spin_unlock_irqrestore(&m->lock, flags);
1076} 1094}
1077 1095
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6e3cf7e13451..12031c9d3f1e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1060,6 +1060,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1060 } 1060 }
1061 1061
1062 ti->private = ms; 1062 ti->private = ms;
1063 ti->split_io = ms->rh.region_size;
1063 1064
1064 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1065 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1065 if (r) { 1066 if (r) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 7e691ab9a748..ab54f99b7c3b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -777,7 +777,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
777 777
778 /* Full snapshots are not usable */ 778 /* Full snapshots are not usable */
779 if (!s->valid) 779 if (!s->valid)
780 return -1; 780 return -EIO;
781 781
782 /* 782 /*
783 * Write to snapshot - higher level takes care of RW/RO 783 * Write to snapshot - higher level takes care of RW/RO
@@ -931,6 +931,10 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
931 if (!snap->valid) 931 if (!snap->valid)
932 continue; 932 continue;
933 933
934 /* Nothing to do if writing beyond end of snapshot */
935 if (bio->bi_sector >= dm_table_get_size(snap->table))
936 continue;
937
934 down_write(&snap->lock); 938 down_write(&snap->lock);
935 939
936 /* 940 /*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 18e9b9953fcd..a5a4c0ed8a14 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -943,6 +943,7 @@ EXPORT_SYMBOL(dm_vcalloc);
943EXPORT_SYMBOL(dm_get_device); 943EXPORT_SYMBOL(dm_get_device);
944EXPORT_SYMBOL(dm_put_device); 944EXPORT_SYMBOL(dm_put_device);
945EXPORT_SYMBOL(dm_table_event); 945EXPORT_SYMBOL(dm_table_event);
946EXPORT_SYMBOL(dm_table_get_size);
946EXPORT_SYMBOL(dm_table_get_mode); 947EXPORT_SYMBOL(dm_table_get_mode);
947EXPORT_SYMBOL(dm_table_put); 948EXPORT_SYMBOL(dm_table_put);
948EXPORT_SYMBOL(dm_table_get); 949EXPORT_SYMBOL(dm_table_get);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f6b03957efc7..54fabbf06678 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -384,7 +384,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
384 /* error the io and bail out */ 384 /* error the io and bail out */
385 struct dm_io *io = tio->io; 385 struct dm_io *io = tio->io;
386 free_tio(tio->io->md, tio); 386 free_tio(tio->io->md, tio);
387 dec_pending(io, -EIO); 387 dec_pending(io, r);
388 bio_put(clone); 388 bio_put(clone);
389 } 389 }
390} 390}
@@ -966,23 +966,20 @@ static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
966 */ 966 */
967int dm_swap_table(struct mapped_device *md, struct dm_table *table) 967int dm_swap_table(struct mapped_device *md, struct dm_table *table)
968{ 968{
969 int r; 969 int r = -EINVAL;
970 970
971 down_write(&md->lock); 971 down_write(&md->lock);
972 972
973 /* device must be suspended */ 973 /* device must be suspended */
974 if (!test_bit(DMF_SUSPENDED, &md->flags)) { 974 if (!test_bit(DMF_SUSPENDED, &md->flags))
975 up_write(&md->lock); 975 goto out;
976 return -EPERM;
977 }
978 976
979 __unbind(md); 977 __unbind(md);
980 r = __bind(md, table); 978 r = __bind(md, table);
981 if (r)
982 return r;
983 979
980out:
984 up_write(&md->lock); 981 up_write(&md->lock);
985 return 0; 982 return r;
986} 983}
987 984
988/* 985/*
@@ -1055,14 +1052,17 @@ int dm_suspend(struct mapped_device *md)
1055 if (test_bit(DMF_BLOCK_IO, &md->flags)) 1052 if (test_bit(DMF_BLOCK_IO, &md->flags))
1056 goto out_read_unlock; 1053 goto out_read_unlock;
1057 1054
1058 error = __lock_fs(md);
1059 if (error)
1060 goto out_read_unlock;
1061
1062 map = dm_get_table(md); 1055 map = dm_get_table(md);
1063 if (map) 1056 if (map)
1057 /* This does not get reverted if there's an error later. */
1064 dm_table_presuspend_targets(map); 1058 dm_table_presuspend_targets(map);
1065 1059
1060 error = __lock_fs(md);
1061 if (error) {
1062 dm_table_put(map);
1063 goto out_read_unlock;
1064 }
1065
1066 up_read(&md->lock); 1066 up_read(&md->lock);
1067 1067
1068 /* 1068 /*
@@ -1121,7 +1121,6 @@ int dm_suspend(struct mapped_device *md)
1121 return 0; 1121 return 0;
1122 1122
1123out_unfreeze: 1123out_unfreeze:
1124 /* FIXME Undo dm_table_presuspend_targets */
1125 __unlock_fs(md); 1124 __unlock_fs(md);
1126 clear_bit(DMF_BLOCK_IO, &md->flags); 1125 clear_bit(DMF_BLOCK_IO, &md->flags);
1127out_write_unlock: 1126out_write_unlock: