diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-10 03:25:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-10 03:25:29 -0400 |
commit | 8eb95f28f66b1a5461fdbcc9a1ee9068fb2cf2b6 (patch) | |
tree | e42d0e004b07f86d38de930fc163f3ca7d121f9c /drivers/md | |
parent | d7451fca18e2ec62641ae4bbfe946069f13765a3 (diff) | |
parent | 3fa8749e584b55f1180411ab1b51117190bac1e5 (diff) |
Merge commit 'v2.6.27' into timers/hpet
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-mpath.c | 16 | ||||
-rw-r--r-- | drivers/md/dm.c | 12 | ||||
-rw-r--r-- | drivers/md/md.c | 8 |
3 files changed, 30 insertions, 6 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 71dd65aa31b6..c2fcf28b4c70 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -63,6 +63,7 @@ struct multipath { | |||
63 | 63 | ||
64 | const char *hw_handler_name; | 64 | const char *hw_handler_name; |
65 | struct work_struct activate_path; | 65 | struct work_struct activate_path; |
66 | struct pgpath *pgpath_to_activate; | ||
66 | unsigned nr_priority_groups; | 67 | unsigned nr_priority_groups; |
67 | struct list_head priority_groups; | 68 | struct list_head priority_groups; |
68 | unsigned pg_init_required; /* pg_init needs calling? */ | 69 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -146,6 +147,7 @@ static struct priority_group *alloc_priority_group(void) | |||
146 | 147 | ||
147 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | 148 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
148 | { | 149 | { |
150 | unsigned long flags; | ||
149 | struct pgpath *pgpath, *tmp; | 151 | struct pgpath *pgpath, *tmp; |
150 | struct multipath *m = ti->private; | 152 | struct multipath *m = ti->private; |
151 | 153 | ||
@@ -154,6 +156,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |||
154 | if (m->hw_handler_name) | 156 | if (m->hw_handler_name) |
155 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); | 157 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); |
156 | dm_put_device(ti, pgpath->path.dev); | 158 | dm_put_device(ti, pgpath->path.dev); |
159 | spin_lock_irqsave(&m->lock, flags); | ||
160 | if (m->pgpath_to_activate == pgpath) | ||
161 | m->pgpath_to_activate = NULL; | ||
162 | spin_unlock_irqrestore(&m->lock, flags); | ||
157 | free_pgpath(pgpath); | 163 | free_pgpath(pgpath); |
158 | } | 164 | } |
159 | } | 165 | } |
@@ -421,6 +427,7 @@ static void process_queued_ios(struct work_struct *work) | |||
421 | __choose_pgpath(m); | 427 | __choose_pgpath(m); |
422 | 428 | ||
423 | pgpath = m->current_pgpath; | 429 | pgpath = m->current_pgpath; |
430 | m->pgpath_to_activate = m->current_pgpath; | ||
424 | 431 | ||
425 | if ((pgpath && !m->queue_io) || | 432 | if ((pgpath && !m->queue_io) || |
426 | (!pgpath && !m->queue_if_no_path)) | 433 | (!pgpath && !m->queue_if_no_path)) |
@@ -1093,8 +1100,15 @@ static void activate_path(struct work_struct *work) | |||
1093 | int ret; | 1100 | int ret; |
1094 | struct multipath *m = | 1101 | struct multipath *m = |
1095 | container_of(work, struct multipath, activate_path); | 1102 | container_of(work, struct multipath, activate_path); |
1096 | struct dm_path *path = &m->current_pgpath->path; | 1103 | struct dm_path *path; |
1104 | unsigned long flags; | ||
1097 | 1105 | ||
1106 | spin_lock_irqsave(&m->lock, flags); | ||
1107 | path = &m->pgpath_to_activate->path; | ||
1108 | m->pgpath_to_activate = NULL; | ||
1109 | spin_unlock_irqrestore(&m->lock, flags); | ||
1110 | if (!path) | ||
1111 | return; | ||
1098 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | 1112 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); |
1099 | pg_init_done(path, ret); | 1113 | pg_init_done(path, ret); |
1100 | } | 1114 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bca448e11878..ace998ce59f6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -837,12 +837,14 @@ static int dm_merge_bvec(struct request_queue *q, | |||
837 | struct dm_table *map = dm_get_table(md); | 837 | struct dm_table *map = dm_get_table(md); |
838 | struct dm_target *ti; | 838 | struct dm_target *ti; |
839 | sector_t max_sectors; | 839 | sector_t max_sectors; |
840 | int max_size; | 840 | int max_size = 0; |
841 | 841 | ||
842 | if (unlikely(!map)) | 842 | if (unlikely(!map)) |
843 | return 0; | 843 | goto out; |
844 | 844 | ||
845 | ti = dm_table_find_target(map, bvm->bi_sector); | 845 | ti = dm_table_find_target(map, bvm->bi_sector); |
846 | if (!dm_target_is_valid(ti)) | ||
847 | goto out_table; | ||
846 | 848 | ||
847 | /* | 849 | /* |
848 | * Find maximum amount of I/O that won't need splitting | 850 | * Find maximum amount of I/O that won't need splitting |
@@ -861,14 +863,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
861 | if (max_size && ti->type->merge) | 863 | if (max_size && ti->type->merge) |
862 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 864 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
863 | 865 | ||
866 | out_table: | ||
867 | dm_table_put(map); | ||
868 | |||
869 | out: | ||
864 | /* | 870 | /* |
865 | * Always allow an entire first page | 871 | * Always allow an entire first page |
866 | */ | 872 | */ |
867 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) | 873 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) |
868 | max_size = biovec->bv_len; | 874 | max_size = biovec->bv_len; |
869 | 875 | ||
870 | dm_table_put(map); | ||
871 | |||
872 | return max_size; | 876 | return max_size; |
873 | } | 877 | } |
874 | 878 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 4790c83d78d0..deeac4b44173 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5761,7 +5761,11 @@ void md_do_sync(mddev_t *mddev) | |||
5761 | * time 'round when curr_resync == 2 | 5761 | * time 'round when curr_resync == 2 |
5762 | */ | 5762 | */ |
5763 | continue; | 5763 | continue; |
5764 | prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); | 5764 | /* We need to wait 'interruptible' so as not to |
5765 | * contribute to the load average, and not to | ||
5766 | * be caught by 'softlockup' | ||
5767 | */ | ||
5768 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); | ||
5765 | if (!kthread_should_stop() && | 5769 | if (!kthread_should_stop() && |
5766 | mddev2->curr_resync >= mddev->curr_resync) { | 5770 | mddev2->curr_resync >= mddev->curr_resync) { |
5767 | printk(KERN_INFO "md: delaying %s of %s" | 5771 | printk(KERN_INFO "md: delaying %s of %s" |
@@ -5769,6 +5773,8 @@ void md_do_sync(mddev_t *mddev) | |||
5769 | " share one or more physical units)\n", | 5773 | " share one or more physical units)\n", |
5770 | desc, mdname(mddev), mdname(mddev2)); | 5774 | desc, mdname(mddev), mdname(mddev2)); |
5771 | mddev_put(mddev2); | 5775 | mddev_put(mddev2); |
5776 | if (signal_pending(current)) | ||
5777 | flush_signals(current); | ||
5772 | schedule(); | 5778 | schedule(); |
5773 | finish_wait(&resync_wait, &wq); | 5779 | finish_wait(&resync_wait, &wq); |
5774 | goto try_again; | 5780 | goto try_again; |