diff options
Diffstat (limited to 'drivers/md')
| -rw-r--r-- | drivers/md/bitmap.c | 45 | ||||
| -rw-r--r-- | drivers/md/dm-mpath.c | 16 | ||||
| -rw-r--r-- | drivers/md/dm.c | 12 | ||||
| -rw-r--r-- | drivers/md/md.c | 10 |
4 files changed, 68 insertions, 15 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7e65bad522cb..ac89a5deaca2 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
| @@ -238,15 +238,47 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde | |||
| 238 | 238 | ||
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | ||
| 242 | { | ||
| 243 | /* Iterate the disks of an mddev, using rcu to protect access to the | ||
| 244 | * linked list, and raising the refcount of devices we return to ensure | ||
| 245 | * they don't disappear while in use. | ||
| 246 | * As devices are only added or removed when raid_disk is < 0 and | ||
| 247 | * nr_pending is 0 and In_sync is clear, the entries we return will | ||
| 248 | * still be in the same position on the list when we re-enter | ||
| 249 | * list_for_each_continue_rcu. | ||
| 250 | */ | ||
| 251 | struct list_head *pos; | ||
| 252 | rcu_read_lock(); | ||
| 253 | if (rdev == NULL) | ||
| 254 | /* start at the beginning */ | ||
| 255 | pos = &mddev->disks; | ||
| 256 | else { | ||
| 257 | /* release the previous rdev and start from there. */ | ||
| 258 | rdev_dec_pending(rdev, mddev); | ||
| 259 | pos = &rdev->same_set; | ||
| 260 | } | ||
| 261 | list_for_each_continue_rcu(pos, &mddev->disks) { | ||
| 262 | rdev = list_entry(pos, mdk_rdev_t, same_set); | ||
| 263 | if (rdev->raid_disk >= 0 && | ||
| 264 | test_bit(In_sync, &rdev->flags) && | ||
| 265 | !test_bit(Faulty, &rdev->flags)) { | ||
| 266 | /* this is a usable devices */ | ||
| 267 | atomic_inc(&rdev->nr_pending); | ||
| 268 | rcu_read_unlock(); | ||
| 269 | return rdev; | ||
| 270 | } | ||
| 271 | } | ||
| 272 | rcu_read_unlock(); | ||
| 273 | return NULL; | ||
| 274 | } | ||
| 275 | |||
| 241 | static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | 276 | static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) |
| 242 | { | 277 | { |
| 243 | mdk_rdev_t *rdev; | 278 | mdk_rdev_t *rdev = NULL; |
| 244 | mddev_t *mddev = bitmap->mddev; | 279 | mddev_t *mddev = bitmap->mddev; |
| 245 | 280 | ||
| 246 | rcu_read_lock(); | 281 | while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { |
| 247 | rdev_for_each_rcu(rdev, mddev) | ||
| 248 | if (test_bit(In_sync, &rdev->flags) | ||
| 249 | && !test_bit(Faulty, &rdev->flags)) { | ||
| 250 | int size = PAGE_SIZE; | 282 | int size = PAGE_SIZE; |
| 251 | if (page->index == bitmap->file_pages-1) | 283 | if (page->index == bitmap->file_pages-1) |
| 252 | size = roundup(bitmap->last_page_size, | 284 | size = roundup(bitmap->last_page_size, |
| @@ -281,8 +313,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) | |||
| 281 | + page->index * (PAGE_SIZE/512), | 313 | + page->index * (PAGE_SIZE/512), |
| 282 | size, | 314 | size, |
| 283 | page); | 315 | page); |
| 284 | } | 316 | } |
| 285 | rcu_read_unlock(); | ||
| 286 | 317 | ||
| 287 | if (wait) | 318 | if (wait) |
| 288 | md_super_wait(mddev); | 319 | md_super_wait(mddev); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 71dd65aa31b6..c2fcf28b4c70 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -63,6 +63,7 @@ struct multipath { | |||
| 63 | 63 | ||
| 64 | const char *hw_handler_name; | 64 | const char *hw_handler_name; |
| 65 | struct work_struct activate_path; | 65 | struct work_struct activate_path; |
| 66 | struct pgpath *pgpath_to_activate; | ||
| 66 | unsigned nr_priority_groups; | 67 | unsigned nr_priority_groups; |
| 67 | struct list_head priority_groups; | 68 | struct list_head priority_groups; |
| 68 | unsigned pg_init_required; /* pg_init needs calling? */ | 69 | unsigned pg_init_required; /* pg_init needs calling? */ |
| @@ -146,6 +147,7 @@ static struct priority_group *alloc_priority_group(void) | |||
| 146 | 147 | ||
| 147 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | 148 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
| 148 | { | 149 | { |
| 150 | unsigned long flags; | ||
| 149 | struct pgpath *pgpath, *tmp; | 151 | struct pgpath *pgpath, *tmp; |
| 150 | struct multipath *m = ti->private; | 152 | struct multipath *m = ti->private; |
| 151 | 153 | ||
| @@ -154,6 +156,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |||
| 154 | if (m->hw_handler_name) | 156 | if (m->hw_handler_name) |
| 155 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); | 157 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); |
| 156 | dm_put_device(ti, pgpath->path.dev); | 158 | dm_put_device(ti, pgpath->path.dev); |
| 159 | spin_lock_irqsave(&m->lock, flags); | ||
| 160 | if (m->pgpath_to_activate == pgpath) | ||
| 161 | m->pgpath_to_activate = NULL; | ||
| 162 | spin_unlock_irqrestore(&m->lock, flags); | ||
| 157 | free_pgpath(pgpath); | 163 | free_pgpath(pgpath); |
| 158 | } | 164 | } |
| 159 | } | 165 | } |
| @@ -421,6 +427,7 @@ static void process_queued_ios(struct work_struct *work) | |||
| 421 | __choose_pgpath(m); | 427 | __choose_pgpath(m); |
| 422 | 428 | ||
| 423 | pgpath = m->current_pgpath; | 429 | pgpath = m->current_pgpath; |
| 430 | m->pgpath_to_activate = m->current_pgpath; | ||
| 424 | 431 | ||
| 425 | if ((pgpath && !m->queue_io) || | 432 | if ((pgpath && !m->queue_io) || |
| 426 | (!pgpath && !m->queue_if_no_path)) | 433 | (!pgpath && !m->queue_if_no_path)) |
| @@ -1093,8 +1100,15 @@ static void activate_path(struct work_struct *work) | |||
| 1093 | int ret; | 1100 | int ret; |
| 1094 | struct multipath *m = | 1101 | struct multipath *m = |
| 1095 | container_of(work, struct multipath, activate_path); | 1102 | container_of(work, struct multipath, activate_path); |
| 1096 | struct dm_path *path = &m->current_pgpath->path; | 1103 | struct dm_path *path; |
| 1104 | unsigned long flags; | ||
| 1097 | 1105 | ||
| 1106 | spin_lock_irqsave(&m->lock, flags); | ||
| 1107 | path = &m->pgpath_to_activate->path; | ||
| 1108 | m->pgpath_to_activate = NULL; | ||
| 1109 | spin_unlock_irqrestore(&m->lock, flags); | ||
| 1110 | if (!path) | ||
| 1111 | return; | ||
| 1098 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | 1112 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); |
| 1099 | pg_init_done(path, ret); | 1113 | pg_init_done(path, ret); |
| 1100 | } | 1114 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bca448e11878..ace998ce59f6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -837,12 +837,14 @@ static int dm_merge_bvec(struct request_queue *q, | |||
| 837 | struct dm_table *map = dm_get_table(md); | 837 | struct dm_table *map = dm_get_table(md); |
| 838 | struct dm_target *ti; | 838 | struct dm_target *ti; |
| 839 | sector_t max_sectors; | 839 | sector_t max_sectors; |
| 840 | int max_size; | 840 | int max_size = 0; |
| 841 | 841 | ||
| 842 | if (unlikely(!map)) | 842 | if (unlikely(!map)) |
| 843 | return 0; | 843 | goto out; |
| 844 | 844 | ||
| 845 | ti = dm_table_find_target(map, bvm->bi_sector); | 845 | ti = dm_table_find_target(map, bvm->bi_sector); |
| 846 | if (!dm_target_is_valid(ti)) | ||
| 847 | goto out_table; | ||
| 846 | 848 | ||
| 847 | /* | 849 | /* |
| 848 | * Find maximum amount of I/O that won't need splitting | 850 | * Find maximum amount of I/O that won't need splitting |
| @@ -861,14 +863,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
| 861 | if (max_size && ti->type->merge) | 863 | if (max_size && ti->type->merge) |
| 862 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 864 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
| 863 | 865 | ||
| 866 | out_table: | ||
| 867 | dm_table_put(map); | ||
| 868 | |||
| 869 | out: | ||
| 864 | /* | 870 | /* |
| 865 | * Always allow an entire first page | 871 | * Always allow an entire first page |
| 866 | */ | 872 | */ |
| 867 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) | 873 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) |
| 868 | max_size = biovec->bv_len; | 874 | max_size = biovec->bv_len; |
| 869 | 875 | ||
| 870 | dm_table_put(map); | ||
| 871 | |||
| 872 | return max_size; | 876 | return max_size; |
| 873 | } | 877 | } |
| 874 | 878 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 8cfadc5bd2ba..deeac4b44173 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -3841,8 +3841,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
| 3841 | 3841 | ||
| 3842 | del_timer_sync(&mddev->safemode_timer); | 3842 | del_timer_sync(&mddev->safemode_timer); |
| 3843 | 3843 | ||
| 3844 | invalidate_partition(disk, 0); | ||
| 3845 | |||
| 3846 | switch(mode) { | 3844 | switch(mode) { |
| 3847 | case 1: /* readonly */ | 3845 | case 1: /* readonly */ |
| 3848 | err = -ENXIO; | 3846 | err = -ENXIO; |
| @@ -5763,7 +5761,11 @@ void md_do_sync(mddev_t *mddev) | |||
| 5763 | * time 'round when curr_resync == 2 | 5761 | * time 'round when curr_resync == 2 |
| 5764 | */ | 5762 | */ |
| 5765 | continue; | 5763 | continue; |
| 5766 | prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); | 5764 | /* We need to wait 'interruptible' so as not to |
| 5765 | * contribute to the load average, and not to | ||
| 5766 | * be caught by 'softlockup' | ||
| 5767 | */ | ||
| 5768 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); | ||
| 5767 | if (!kthread_should_stop() && | 5769 | if (!kthread_should_stop() && |
| 5768 | mddev2->curr_resync >= mddev->curr_resync) { | 5770 | mddev2->curr_resync >= mddev->curr_resync) { |
| 5769 | printk(KERN_INFO "md: delaying %s of %s" | 5771 | printk(KERN_INFO "md: delaying %s of %s" |
| @@ -5771,6 +5773,8 @@ void md_do_sync(mddev_t *mddev) | |||
| 5771 | " share one or more physical units)\n", | 5773 | " share one or more physical units)\n", |
| 5772 | desc, mdname(mddev), mdname(mddev2)); | 5774 | desc, mdname(mddev), mdname(mddev2)); |
| 5773 | mddev_put(mddev2); | 5775 | mddev_put(mddev2); |
| 5776 | if (signal_pending(current)) | ||
| 5777 | flush_signals(current); | ||
| 5774 | schedule(); | 5778 | schedule(); |
| 5775 | finish_wait(&resync_wait, &wq); | 5779 | finish_wait(&resync_wait, &wq); |
| 5776 | goto try_again; | 5780 | goto try_again; |
