aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlasdair G Kergon <agk@redhat.com>2005-07-12 18:53:04 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-12 19:19:11 -0400
commitc3cd4f6b275da0f594797b73f721a4185335478f (patch)
treef7a3f0945993b02bb94fb770f2be4b01260b3ead
parent436d41087d047b61f8ab0604dc74fff3240a8933 (diff)
[PATCH] device-mapper multipath: Fix pg initialisation races
Prevent more than one priority group initialisation function from being outstanding at once. Otherwise the completion functions interfere with each other. Also, reloading the table could reference a freed pointer. Only reset queue_io in pg_init_complete if another pg_init isn't required. Skip process_queued_ios if the queue is empty so that we only trigger a pg_init if there's I/O. Signed-off-by: Lars Marowsky-Bree <lmb@suse.de> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/md/dm-mpath.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 98da8eee2d2c..785806bdb248 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -63,6 +63,7 @@ struct multipath {
63 unsigned nr_priority_groups; 63 unsigned nr_priority_groups;
64 struct list_head priority_groups; 64 struct list_head priority_groups;
65 unsigned pg_init_required; /* pg_init needs calling? */ 65 unsigned pg_init_required; /* pg_init needs calling? */
66 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
66 67
67 unsigned nr_valid_paths; /* Total number of usable paths */ 68 unsigned nr_valid_paths; /* Total number of usable paths */
68 struct pgpath *current_pgpath; 69 struct pgpath *current_pgpath;
@@ -308,7 +309,8 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
308 /* Queue for the daemon to resubmit */ 309 /* Queue for the daemon to resubmit */
309 bio_list_add(&m->queued_ios, bio); 310 bio_list_add(&m->queued_ios, bio);
310 m->queue_size++; 311 m->queue_size++;
311 if (m->pg_init_required || !m->queue_io) 312 if ((m->pg_init_required && !m->pg_init_in_progress) ||
313 !m->queue_io)
312 queue_work(kmultipathd, &m->process_queued_ios); 314 queue_work(kmultipathd, &m->process_queued_ios);
313 pgpath = NULL; 315 pgpath = NULL;
314 r = 0; 316 r = 0;
@@ -335,7 +337,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
335 337
336 m->saved_queue_if_no_path = m->queue_if_no_path; 338 m->saved_queue_if_no_path = m->queue_if_no_path;
337 m->queue_if_no_path = queue_if_no_path; 339 m->queue_if_no_path = queue_if_no_path;
338 if (!m->queue_if_no_path) 340 if (!m->queue_if_no_path && m->queue_size)
339 queue_work(kmultipathd, &m->process_queued_ios); 341 queue_work(kmultipathd, &m->process_queued_ios);
340 342
341 spin_unlock_irqrestore(&m->lock, flags); 343 spin_unlock_irqrestore(&m->lock, flags);
@@ -380,25 +382,31 @@ static void process_queued_ios(void *data)
380{ 382{
381 struct multipath *m = (struct multipath *) data; 383 struct multipath *m = (struct multipath *) data;
382 struct hw_handler *hwh = &m->hw_handler; 384 struct hw_handler *hwh = &m->hw_handler;
383 struct pgpath *pgpath; 385 struct pgpath *pgpath = NULL;
384 unsigned init_required, must_queue = 0; 386 unsigned init_required = 0, must_queue = 1;
385 unsigned long flags; 387 unsigned long flags;
386 388
387 spin_lock_irqsave(&m->lock, flags); 389 spin_lock_irqsave(&m->lock, flags);
388 390
391 if (!m->queue_size)
392 goto out;
393
389 if (!m->current_pgpath) 394 if (!m->current_pgpath)
390 __choose_pgpath(m); 395 __choose_pgpath(m);
391 396
392 pgpath = m->current_pgpath; 397 pgpath = m->current_pgpath;
393 398
394 if ((pgpath && m->queue_io) || 399 if ((pgpath && !m->queue_io) ||
395 (!pgpath && m->queue_if_no_path)) 400 (!pgpath && !m->queue_if_no_path))
396 must_queue = 1; 401 must_queue = 0;
397 402
398 init_required = m->pg_init_required; 403 if (m->pg_init_required && !m->pg_init_in_progress) {
399 if (init_required)
400 m->pg_init_required = 0; 404 m->pg_init_required = 0;
405 m->pg_init_in_progress = 1;
406 init_required = 1;
407 }
401 408
409out:
402 spin_unlock_irqrestore(&m->lock, flags); 410 spin_unlock_irqrestore(&m->lock, flags);
403 411
404 if (init_required) 412 if (init_required)
@@ -843,7 +851,7 @@ static int reinstate_path(struct pgpath *pgpath)
843 pgpath->path.is_active = 1; 851 pgpath->path.is_active = 1;
844 852
845 m->current_pgpath = NULL; 853 m->current_pgpath = NULL;
846 if (!m->nr_valid_paths++) 854 if (!m->nr_valid_paths++ && m->queue_size)
847 queue_work(kmultipathd, &m->process_queued_ios); 855 queue_work(kmultipathd, &m->process_queued_ios);
848 856
849 queue_work(kmultipathd, &m->trigger_event); 857 queue_work(kmultipathd, &m->trigger_event);
@@ -969,12 +977,13 @@ void dm_pg_init_complete(struct path *path, unsigned err_flags)
969 bypass_pg(m, pg, 1); 977 bypass_pg(m, pg, 1);
970 978
971 spin_lock_irqsave(&m->lock, flags); 979 spin_lock_irqsave(&m->lock, flags);
972 if (!err_flags) 980 if (err_flags) {
973 m->queue_io = 0;
974 else {
975 m->current_pgpath = NULL; 981 m->current_pgpath = NULL;
976 m->current_pg = NULL; 982 m->current_pg = NULL;
977 } 983 } else if (!m->pg_init_required)
984 m->queue_io = 0;
985
986 m->pg_init_in_progress = 0;
978 queue_work(kmultipathd, &m->process_queued_ios); 987 queue_work(kmultipathd, &m->process_queued_ios);
979 spin_unlock_irqrestore(&m->lock, flags); 988 spin_unlock_irqrestore(&m->lock, flags);
980} 989}