diff options
author | NeilBrown <neilb@suse.de> | 2011-01-13 17:14:33 -0500 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2011-01-13 17:14:33 -0500 |
commit | 0ca69886a8273ac1350143d562280bfcbe4760dc (patch) | |
tree | 98acbe1e5682e4455bf7d2e7a9413b5a3fd43b2a /drivers | |
parent | 067032bc628598606056412594042564fcf09e22 (diff) |
md: Ensure no IO request to get md device before it is properly initialised.
When an md device is in the process of coming on line it is possible
for an IO request (typically a partition table probe) to get through
before the array is fully initialised, which can cause unexpected
behaviour (e.g. a crash).
So explicitly record when the array is ready for IO and don't allow IO
through until then.
There is no possibility for a similar problem when the array is going
off-line as there must only be one 'open' at that time, and it is busy
off-lining the array and so cannot send IO requests. So no memory
barrier is needed in md_stop()
This has been a bug since commit 409c57f3801 in 2.6.30 which
introduced md_make_request. Before then, each personality would
register its own make_request_fn when it was ready.
This is suitable for any stable kernel from 2.6.30.y onwards.
Cc: <stable@kernel.org>
Signed-off-by: NeilBrown <neilb@suse.de>
Reported-by: "Hawrylewicz Czarnowski, Przemyslaw" <przemyslaw.hawrylewicz.czarnowski@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/md.c | 8 | ||||
-rw-r--r-- | drivers/md/md.h | 3 |
2 files changed, 8 insertions, 3 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index a3019121dc57..540347c538f9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -288,10 +288,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
288 | int rv; | 288 | int rv; |
289 | int cpu; | 289 | int cpu; |
290 | 290 | ||
291 | if (mddev == NULL || mddev->pers == NULL) { | 291 | if (mddev == NULL || mddev->pers == NULL |
292 | || !mddev->ready) { | ||
292 | bio_io_error(bio); | 293 | bio_io_error(bio); |
293 | return 0; | 294 | return 0; |
294 | } | 295 | } |
296 | smp_rmb(); /* Ensure implications of 'active' are visible */ | ||
295 | rcu_read_lock(); | 297 | rcu_read_lock(); |
296 | if (mddev->suspended) { | 298 | if (mddev->suspended) { |
297 | DEFINE_WAIT(__wait); | 299 | DEFINE_WAIT(__wait); |
@@ -4564,7 +4566,8 @@ int md_run(mddev_t *mddev) | |||
4564 | mddev->safemode_timer.data = (unsigned long) mddev; | 4566 | mddev->safemode_timer.data = (unsigned long) mddev; |
4565 | mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ | 4567 | mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ |
4566 | mddev->in_sync = 1; | 4568 | mddev->in_sync = 1; |
4567 | 4569 | smp_wmb(); | |
4570 | mddev->ready = 1; | ||
4568 | list_for_each_entry(rdev, &mddev->disks, same_set) | 4571 | list_for_each_entry(rdev, &mddev->disks, same_set) |
4569 | if (rdev->raid_disk >= 0) { | 4572 | if (rdev->raid_disk >= 0) { |
4570 | char nm[20]; | 4573 | char nm[20]; |
@@ -4725,6 +4728,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); | |||
4725 | 4728 | ||
4726 | void md_stop(mddev_t *mddev) | 4729 | void md_stop(mddev_t *mddev) |
4727 | { | 4730 | { |
4731 | mddev->ready = 0; | ||
4728 | mddev->pers->stop(mddev); | 4732 | mddev->pers->stop(mddev); |
4729 | if (mddev->pers->sync_request && mddev->to_remove == NULL) | 4733 | if (mddev->pers->sync_request && mddev->to_remove == NULL) |
4730 | mddev->to_remove = &md_redundancy_group; | 4734 | mddev->to_remove = &md_redundancy_group; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index d05bab55df4e..229675a604f7 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -148,7 +148,8 @@ struct mddev_s | |||
148 | * are happening, so run/ | 148 | * are happening, so run/ |
149 | * takeover/stop are not safe | 149 | * takeover/stop are not safe |
150 | */ | 150 | */ |
151 | 151 | int ready; /* See when safe to pass | |
152 | * IO requests down */ | ||
152 | struct gendisk *gendisk; | 153 | struct gendisk *gendisk; |
153 | 154 | ||
154 | struct kobject kobj; | 155 | struct kobject kobj; |