diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-mpath.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 5 | ||||
-rw-r--r-- | drivers/md/dm.c | 5 | ||||
-rw-r--r-- | drivers/md/md.c | 44 | ||||
-rw-r--r-- | drivers/md/md.h | 3 | ||||
-rw-r--r-- | drivers/md/raid1.c | 1 | ||||
-rw-r--r-- | drivers/md/raid10.c | 2 |
7 files changed, 35 insertions, 37 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 487ecda90ad4..406091f9692b 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -33,7 +33,6 @@ struct pgpath { | |||
33 | unsigned fail_count; /* Cumulative failure count */ | 33 | unsigned fail_count; /* Cumulative failure count */ |
34 | 34 | ||
35 | struct dm_path path; | 35 | struct dm_path path; |
36 | struct work_struct deactivate_path; | ||
37 | struct work_struct activate_path; | 36 | struct work_struct activate_path; |
38 | }; | 37 | }; |
39 | 38 | ||
@@ -116,7 +115,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; | |||
116 | static void process_queued_ios(struct work_struct *work); | 115 | static void process_queued_ios(struct work_struct *work); |
117 | static void trigger_event(struct work_struct *work); | 116 | static void trigger_event(struct work_struct *work); |
118 | static void activate_path(struct work_struct *work); | 117 | static void activate_path(struct work_struct *work); |
119 | static void deactivate_path(struct work_struct *work); | ||
120 | 118 | ||
121 | 119 | ||
122 | /*----------------------------------------------- | 120 | /*----------------------------------------------- |
@@ -129,7 +127,6 @@ static struct pgpath *alloc_pgpath(void) | |||
129 | 127 | ||
130 | if (pgpath) { | 128 | if (pgpath) { |
131 | pgpath->is_active = 1; | 129 | pgpath->is_active = 1; |
132 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | ||
133 | INIT_WORK(&pgpath->activate_path, activate_path); | 130 | INIT_WORK(&pgpath->activate_path, activate_path); |
134 | } | 131 | } |
135 | 132 | ||
@@ -141,14 +138,6 @@ static void free_pgpath(struct pgpath *pgpath) | |||
141 | kfree(pgpath); | 138 | kfree(pgpath); |
142 | } | 139 | } |
143 | 140 | ||
144 | static void deactivate_path(struct work_struct *work) | ||
145 | { | ||
146 | struct pgpath *pgpath = | ||
147 | container_of(work, struct pgpath, deactivate_path); | ||
148 | |||
149 | blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); | ||
150 | } | ||
151 | |||
152 | static struct priority_group *alloc_priority_group(void) | 141 | static struct priority_group *alloc_priority_group(void) |
153 | { | 142 | { |
154 | struct priority_group *pg; | 143 | struct priority_group *pg; |
@@ -995,7 +984,6 @@ static int fail_path(struct pgpath *pgpath) | |||
995 | pgpath->path.dev->name, m->nr_valid_paths); | 984 | pgpath->path.dev->name, m->nr_valid_paths); |
996 | 985 | ||
997 | schedule_work(&m->trigger_event); | 986 | schedule_work(&m->trigger_event); |
998 | queue_work(kmultipathd, &pgpath->deactivate_path); | ||
999 | 987 | ||
1000 | out: | 988 | out: |
1001 | spin_unlock_irqrestore(&m->lock, flags); | 989 | spin_unlock_irqrestore(&m->lock, flags); |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f9fc07d7a4b9..87e4e78790c0 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1136,11 +1136,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1136 | */ | 1136 | */ |
1137 | q->limits = *limits; | 1137 | q->limits = *limits; |
1138 | 1138 | ||
1139 | if (limits->no_cluster) | ||
1140 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | ||
1141 | else | ||
1142 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | ||
1143 | |||
1144 | if (!dm_table_supports_discards(t)) | 1139 | if (!dm_table_supports_discards(t)) |
1145 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); | 1140 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); |
1146 | else | 1141 | else |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ac384b2a6a33..a173db5fc76a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2111,13 +2111,14 @@ static void event_callback(void *context) | |||
2111 | wake_up(&md->eventq); | 2111 | wake_up(&md->eventq); |
2112 | } | 2112 | } |
2113 | 2113 | ||
2114 | /* | ||
2115 | * Protected by md->suspend_lock obtained by dm_swap_table(). | ||
2116 | */ | ||
2114 | static void __set_size(struct mapped_device *md, sector_t size) | 2117 | static void __set_size(struct mapped_device *md, sector_t size) |
2115 | { | 2118 | { |
2116 | set_capacity(md->disk, size); | 2119 | set_capacity(md->disk, size); |
2117 | 2120 | ||
2118 | mutex_lock(&md->bdev->bd_inode->i_mutex); | ||
2119 | i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); | 2121 | i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); |
2120 | mutex_unlock(&md->bdev->bd_inode->i_mutex); | ||
2121 | } | 2122 | } |
2122 | 2123 | ||
2123 | /* | 2124 | /* |
diff --git a/drivers/md/md.c b/drivers/md/md.c index f20d13e717d5..012859708a1b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -220,11 +220,14 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
220 | mddev_t *mddev = q->queuedata; | 220 | mddev_t *mddev = q->queuedata; |
221 | int rv; | 221 | int rv; |
222 | int cpu; | 222 | int cpu; |
223 | unsigned int sectors; | ||
223 | 224 | ||
224 | if (mddev == NULL || mddev->pers == NULL) { | 225 | if (mddev == NULL || mddev->pers == NULL |
226 | || !mddev->ready) { | ||
225 | bio_io_error(bio); | 227 | bio_io_error(bio); |
226 | return 0; | 228 | return 0; |
227 | } | 229 | } |
230 | smp_rmb(); /* Ensure implications of 'active' are visible */ | ||
228 | rcu_read_lock(); | 231 | rcu_read_lock(); |
229 | if (mddev->suspended || mddev->barrier) { | 232 | if (mddev->suspended || mddev->barrier) { |
230 | DEFINE_WAIT(__wait); | 233 | DEFINE_WAIT(__wait); |
@@ -242,12 +245,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
242 | atomic_inc(&mddev->active_io); | 245 | atomic_inc(&mddev->active_io); |
243 | rcu_read_unlock(); | 246 | rcu_read_unlock(); |
244 | 247 | ||
248 | /* | ||
249 | * save the sectors now since our bio can | ||
250 | * go away inside make_request | ||
251 | */ | ||
252 | sectors = bio_sectors(bio); | ||
245 | rv = mddev->pers->make_request(mddev, bio); | 253 | rv = mddev->pers->make_request(mddev, bio); |
246 | 254 | ||
247 | cpu = part_stat_lock(); | 255 | cpu = part_stat_lock(); |
248 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | 256 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
249 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | 257 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); |
250 | bio_sectors(bio)); | ||
251 | part_stat_unlock(); | 258 | part_stat_unlock(); |
252 | 259 | ||
253 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | 260 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
@@ -1329,7 +1336,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1329 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1336 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1330 | rdev->sb_page); | 1337 | rdev->sb_page); |
1331 | md_super_wait(rdev->mddev); | 1338 | md_super_wait(rdev->mddev); |
1332 | return num_sectors / 2; /* kB for sysfs */ | 1339 | return num_sectors; |
1333 | } | 1340 | } |
1334 | 1341 | ||
1335 | 1342 | ||
@@ -1697,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) | |||
1697 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1704 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
1698 | rdev->sb_page); | 1705 | rdev->sb_page); |
1699 | md_super_wait(rdev->mddev); | 1706 | md_super_wait(rdev->mddev); |
1700 | return num_sectors / 2; /* kB for sysfs */ | 1707 | return num_sectors; |
1701 | } | 1708 | } |
1702 | 1709 | ||
1703 | static struct super_type super_types[] = { | 1710 | static struct super_type super_types[] = { |
@@ -2172,6 +2179,8 @@ repeat: | |||
2172 | if (!mddev->persistent) { | 2179 | if (!mddev->persistent) { |
2173 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | 2180 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); |
2174 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | 2181 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); |
2182 | if (!mddev->external) | ||
2183 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
2175 | wake_up(&mddev->sb_wait); | 2184 | wake_up(&mddev->sb_wait); |
2176 | return; | 2185 | return; |
2177 | } | 2186 | } |
@@ -3107,7 +3116,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
3107 | char nm[20]; | 3116 | char nm[20]; |
3108 | if (rdev->raid_disk < 0) | 3117 | if (rdev->raid_disk < 0) |
3109 | continue; | 3118 | continue; |
3110 | if (rdev->new_raid_disk > mddev->raid_disks) | 3119 | if (rdev->new_raid_disk >= mddev->raid_disks) |
3111 | rdev->new_raid_disk = -1; | 3120 | rdev->new_raid_disk = -1; |
3112 | if (rdev->new_raid_disk == rdev->raid_disk) | 3121 | if (rdev->new_raid_disk == rdev->raid_disk) |
3113 | continue; | 3122 | continue; |
@@ -4287,9 +4296,6 @@ static int md_alloc(dev_t dev, char *name) | |||
4287 | goto abort; | 4296 | goto abort; |
4288 | mddev->queue->queuedata = mddev; | 4297 | mddev->queue->queuedata = mddev; |
4289 | 4298 | ||
4290 | /* Can be unlocked because the queue is new: no concurrency */ | ||
4291 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); | ||
4292 | |||
4293 | blk_queue_make_request(mddev->queue, md_make_request); | 4299 | blk_queue_make_request(mddev->queue, md_make_request); |
4294 | 4300 | ||
4295 | disk = alloc_disk(1 << shift); | 4301 | disk = alloc_disk(1 << shift); |
@@ -4555,7 +4561,8 @@ int md_run(mddev_t *mddev) | |||
4555 | mddev->safemode_timer.data = (unsigned long) mddev; | 4561 | mddev->safemode_timer.data = (unsigned long) mddev; |
4556 | mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ | 4562 | mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ |
4557 | mddev->in_sync = 1; | 4563 | mddev->in_sync = 1; |
4558 | 4564 | smp_wmb(); | |
4565 | mddev->ready = 1; | ||
4559 | list_for_each_entry(rdev, &mddev->disks, same_set) | 4566 | list_for_each_entry(rdev, &mddev->disks, same_set) |
4560 | if (rdev->raid_disk >= 0) { | 4567 | if (rdev->raid_disk >= 0) { |
4561 | char nm[20]; | 4568 | char nm[20]; |
@@ -4717,6 +4724,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); | |||
4717 | 4724 | ||
4718 | void md_stop(mddev_t *mddev) | 4725 | void md_stop(mddev_t *mddev) |
4719 | { | 4726 | { |
4727 | mddev->ready = 0; | ||
4720 | mddev->pers->stop(mddev); | 4728 | mddev->pers->stop(mddev); |
4721 | if (mddev->pers->sync_request && mddev->to_remove == NULL) | 4729 | if (mddev->pers->sync_request && mddev->to_remove == NULL) |
4722 | mddev->to_remove = &md_redundancy_group; | 4730 | mddev->to_remove = &md_redundancy_group; |
@@ -5148,17 +5156,21 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) | |||
5148 | PTR_ERR(rdev)); | 5156 | PTR_ERR(rdev)); |
5149 | return PTR_ERR(rdev); | 5157 | return PTR_ERR(rdev); |
5150 | } | 5158 | } |
5151 | /* set save_raid_disk if appropriate */ | 5159 | /* set saved_raid_disk if appropriate */ |
5152 | if (!mddev->persistent) { | 5160 | if (!mddev->persistent) { |
5153 | if (info->state & (1<<MD_DISK_SYNC) && | 5161 | if (info->state & (1<<MD_DISK_SYNC) && |
5154 | info->raid_disk < mddev->raid_disks) | 5162 | info->raid_disk < mddev->raid_disks) { |
5155 | rdev->raid_disk = info->raid_disk; | 5163 | rdev->raid_disk = info->raid_disk; |
5156 | else | 5164 | set_bit(In_sync, &rdev->flags); |
5165 | } else | ||
5157 | rdev->raid_disk = -1; | 5166 | rdev->raid_disk = -1; |
5158 | } else | 5167 | } else |
5159 | super_types[mddev->major_version]. | 5168 | super_types[mddev->major_version]. |
5160 | validate_super(mddev, rdev); | 5169 | validate_super(mddev, rdev); |
5161 | rdev->saved_raid_disk = rdev->raid_disk; | 5170 | if (test_bit(In_sync, &rdev->flags)) |
5171 | rdev->saved_raid_disk = rdev->raid_disk; | ||
5172 | else | ||
5173 | rdev->saved_raid_disk = -1; | ||
5162 | 5174 | ||
5163 | clear_bit(In_sync, &rdev->flags); /* just to be sure */ | 5175 | clear_bit(In_sync, &rdev->flags); /* just to be sure */ |
5164 | if (info->state & (1<<MD_DISK_WRITEMOSTLY)) | 5176 | if (info->state & (1<<MD_DISK_WRITEMOSTLY)) |
@@ -6036,8 +6048,8 @@ static int md_thread(void * arg) | |||
6036 | thread->timeout); | 6048 | thread->timeout); |
6037 | 6049 | ||
6038 | clear_bit(THREAD_WAKEUP, &thread->flags); | 6050 | clear_bit(THREAD_WAKEUP, &thread->flags); |
6039 | 6051 | if (!kthread_should_stop()) | |
6040 | thread->run(thread->mddev); | 6052 | thread->run(thread->mddev); |
6041 | } | 6053 | } |
6042 | 6054 | ||
6043 | return 0; | 6055 | return 0; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 3931299788dc..563ede31d5fc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -149,7 +149,8 @@ struct mddev_s | |||
149 | * are happening, so run/ | 149 | * are happening, so run/ |
150 | * takeover/stop are not safe | 150 | * takeover/stop are not safe |
151 | */ | 151 | */ |
152 | 152 | int ready; /* See when safe to pass | |
153 | * IO requests down */ | ||
153 | struct gendisk *gendisk; | 154 | struct gendisk *gendisk; |
154 | 155 | ||
155 | struct kobject kobj; | 156 | struct kobject kobj; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0b830bbe1d8b..d8b2d7b0c3be 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1210,6 +1210,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) | |||
1210 | * is not possible. | 1210 | * is not possible. |
1211 | */ | 1211 | */ |
1212 | if (!test_bit(Faulty, &rdev->flags) && | 1212 | if (!test_bit(Faulty, &rdev->flags) && |
1213 | !mddev->recovery_disabled && | ||
1213 | mddev->degraded < conf->raid_disks) { | 1214 | mddev->degraded < conf->raid_disks) { |
1214 | err = -EBUSY; | 1215 | err = -EBUSY; |
1215 | goto abort; | 1216 | goto abort; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 84718383124d..838c275fd3c8 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2396,13 +2396,13 @@ static int run(mddev_t *mddev) | |||
2396 | return 0; | 2396 | return 0; |
2397 | 2397 | ||
2398 | out_free_conf: | 2398 | out_free_conf: |
2399 | md_unregister_thread(mddev->thread); | ||
2399 | if (conf->r10bio_pool) | 2400 | if (conf->r10bio_pool) |
2400 | mempool_destroy(conf->r10bio_pool); | 2401 | mempool_destroy(conf->r10bio_pool); |
2401 | safe_put_page(conf->tmppage); | 2402 | safe_put_page(conf->tmppage); |
2402 | kfree(conf->mirrors); | 2403 | kfree(conf->mirrors); |
2403 | kfree(conf); | 2404 | kfree(conf); |
2404 | mddev->private = NULL; | 2405 | mddev->private = NULL; |
2405 | md_unregister_thread(mddev->thread); | ||
2406 | out: | 2406 | out: |
2407 | return -EIO; | 2407 | return -EIO; |
2408 | } | 2408 | } |