diff options
author | Mike Snitzer <snitzer@redhat.com> | 2011-01-13 14:59:46 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2011-01-13 14:59:46 -0500 |
commit | 09c9d4c9b6a2b5909ae3c6265e4cd3820b636863 (patch) | |
tree | 340925bf6af03a51f13f165cf68263be247c9d25 /drivers | |
parent | c217649bf2d60ac119afd71d938278cffd55962b (diff) |
dm mpath: disable blk_abort_queue
Revert commit 224cb3e981f1b2f9f93dbd49eaef505d17d894c2
dm: Call blk_abort_queue on failed paths
Multipath began to use blk_abort_queue() to allow for
lower latency path deactivation. This was found to
cause list corruption:
the cmd gets blk_abort_queued/timedout run on it and the scsi eh
somehow is able to complete and run scsi_queue_insert while
scsi_request_fn is still trying to process the request.
https://www.redhat.com/archives/dm-devel/2010-November/msg00085.html
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: Mike Anderson <andmike@linux.vnet.ibm.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: stable@kernel.org
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-mpath.c | 12 |
1 files changed, 0 insertions, 12 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 487ecda90ad4..406091f9692b 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -33,7 +33,6 @@ struct pgpath { | |||
33 | unsigned fail_count; /* Cumulative failure count */ | 33 | unsigned fail_count; /* Cumulative failure count */ |
34 | 34 | ||
35 | struct dm_path path; | 35 | struct dm_path path; |
36 | struct work_struct deactivate_path; | ||
37 | struct work_struct activate_path; | 36 | struct work_struct activate_path; |
38 | }; | 37 | }; |
39 | 38 | ||
@@ -116,7 +115,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; | |||
116 | static void process_queued_ios(struct work_struct *work); | 115 | static void process_queued_ios(struct work_struct *work); |
117 | static void trigger_event(struct work_struct *work); | 116 | static void trigger_event(struct work_struct *work); |
118 | static void activate_path(struct work_struct *work); | 117 | static void activate_path(struct work_struct *work); |
119 | static void deactivate_path(struct work_struct *work); | ||
120 | 118 | ||
121 | 119 | ||
122 | /*----------------------------------------------- | 120 | /*----------------------------------------------- |
@@ -129,7 +127,6 @@ static struct pgpath *alloc_pgpath(void) | |||
129 | 127 | ||
130 | if (pgpath) { | 128 | if (pgpath) { |
131 | pgpath->is_active = 1; | 129 | pgpath->is_active = 1; |
132 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | ||
133 | INIT_WORK(&pgpath->activate_path, activate_path); | 130 | INIT_WORK(&pgpath->activate_path, activate_path); |
134 | } | 131 | } |
135 | 132 | ||
@@ -141,14 +138,6 @@ static void free_pgpath(struct pgpath *pgpath) | |||
141 | kfree(pgpath); | 138 | kfree(pgpath); |
142 | } | 139 | } |
143 | 140 | ||
144 | static void deactivate_path(struct work_struct *work) | ||
145 | { | ||
146 | struct pgpath *pgpath = | ||
147 | container_of(work, struct pgpath, deactivate_path); | ||
148 | |||
149 | blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); | ||
150 | } | ||
151 | |||
152 | static struct priority_group *alloc_priority_group(void) | 141 | static struct priority_group *alloc_priority_group(void) |
153 | { | 142 | { |
154 | struct priority_group *pg; | 143 | struct priority_group *pg; |
@@ -995,7 +984,6 @@ static int fail_path(struct pgpath *pgpath) | |||
995 | pgpath->path.dev->name, m->nr_valid_paths); | 984 | pgpath->path.dev->name, m->nr_valid_paths); |
996 | 985 | ||
997 | schedule_work(&m->trigger_event); | 986 | schedule_work(&m->trigger_event); |
998 | queue_work(kmultipathd, &pgpath->deactivate_path); | ||
999 | 987 | ||
1000 | out: | 988 | out: |
1001 | spin_unlock_irqrestore(&m->lock, flags); | 989 | spin_unlock_irqrestore(&m->lock, flags); |