aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c33
-rw-r--r--drivers/md/raid6algos.c4
-rw-r--r--drivers/md/raid6mmx.c2
-rw-r--r--drivers/md/raid6sse1.c2
-rw-r--r--drivers/md/raid6sse2.c4
-rw-r--r--drivers/md/raid6x86.h2
14 files changed, 40 insertions, 39 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7c426d07a555..1b1ef3130e6e 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1207 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1207 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1208 TASK_UNINTERRUPTIBLE); 1208 TASK_UNINTERRUPTIBLE);
1209 spin_unlock_irq(&bitmap->lock); 1209 spin_unlock_irq(&bitmap->lock);
1210 bitmap->mddev->queue 1210 blk_unplug(bitmap->mddev->queue);
1211 ->unplug_fn(bitmap->mddev->queue);
1212 schedule(); 1211 schedule();
1213 finish_wait(&bitmap->overflow_wait, &__wait); 1212 finish_wait(&bitmap->overflow_wait, &__wait);
1214 continue; 1213 continue;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8939e6105088..e298d8d11f24 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -102,6 +102,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
102 lhs->seg_boundary_mask = 102 lhs->seg_boundary_mask =
103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); 103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
104 104
105 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
106
105 lhs->no_cluster |= rhs->no_cluster; 107 lhs->no_cluster |= rhs->no_cluster;
106} 108}
107 109
@@ -566,6 +568,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
566 min_not_zero(rs->seg_boundary_mask, 568 min_not_zero(rs->seg_boundary_mask,
567 q->seg_boundary_mask); 569 q->seg_boundary_mask);
568 570
571 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
572
569 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 573 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
570} 574}
571EXPORT_SYMBOL_GPL(dm_set_device_limits); 575EXPORT_SYMBOL_GPL(dm_set_device_limits);
@@ -707,6 +711,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
707 rs->max_segment_size = MAX_SEGMENT_SIZE; 711 rs->max_segment_size = MAX_SEGMENT_SIZE;
708 if (!rs->seg_boundary_mask) 712 if (!rs->seg_boundary_mask)
709 rs->seg_boundary_mask = -1; 713 rs->seg_boundary_mask = -1;
714 if (!rs->bounce_pfn)
715 rs->bounce_pfn = -1;
710} 716}
711 717
712int dm_table_add_target(struct dm_table *t, const char *type, 718int dm_table_add_target(struct dm_table *t, const char *type,
@@ -891,6 +897,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
891 q->hardsect_size = t->limits.hardsect_size; 897 q->hardsect_size = t->limits.hardsect_size;
892 q->max_segment_size = t->limits.max_segment_size; 898 q->max_segment_size = t->limits.max_segment_size;
893 q->seg_boundary_mask = t->limits.seg_boundary_mask; 899 q->seg_boundary_mask = t->limits.seg_boundary_mask;
900 q->bounce_pfn = t->limits.bounce_pfn;
894 if (t->limits.no_cluster) 901 if (t->limits.no_cluster)
895 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); 902 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
896 else 903 else
@@ -993,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t)
993 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 1000 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
994 struct request_queue *q = bdev_get_queue(dd->bdev); 1001 struct request_queue *q = bdev_get_queue(dd->bdev);
995 1002
996 if (q->unplug_fn) 1003 blk_unplug(q);
997 q->unplug_fn(q);
998 } 1004 }
999} 1005}
1000 1006
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 56a11f6c127b..3dac1cfb8189 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q)
87 87
88 for (i=0; i < mddev->raid_disks; i++) { 88 for (i=0; i < mddev->raid_disks; i++) {
89 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 89 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
90 if (r_queue->unplug_fn) 90 blk_unplug(r_queue);
91 r_queue->unplug_fn(r_queue);
92 } 91 }
93} 92}
94 93
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 808cd9549456..cef9ebd5a046 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev)
5445 * about not overloading the IO subsystem. (things like an 5445 * about not overloading the IO subsystem. (things like an
5446 * e2fsck being done on the RAID array should execute fast) 5446 * e2fsck being done on the RAID array should execute fast)
5447 */ 5447 */
5448 mddev->queue->unplug_fn(mddev->queue); 5448 blk_unplug(mddev->queue);
5449 cond_resched(); 5449 cond_resched();
5450 5450
5451 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 5451 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev)
5464 * this also signals 'finished resyncing' to md_stop 5464 * this also signals 'finished resyncing' to md_stop
5465 */ 5465 */
5466 out: 5466 out:
5467 mddev->queue->unplug_fn(mddev->queue); 5467 blk_unplug(mddev->queue);
5468 5468
5469 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 5469 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5470 5470
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index b35731cceac6..eb631ebed686 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,8 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
125 atomic_inc(&rdev->nr_pending); 125 atomic_inc(&rdev->nr_pending);
126 rcu_read_unlock(); 126 rcu_read_unlock();
127 127
128 if (r_queue->unplug_fn) 128 blk_unplug(r_queue);
129 r_queue->unplug_fn(r_queue);
130 129
131 rdev_dec_pending(rdev, mddev); 130 rdev_dec_pending(rdev, mddev);
132 rcu_read_lock(); 131 rcu_read_lock();
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c111105fc2dc..f8e591708d1f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -35,8 +35,7 @@ static void raid0_unplug(struct request_queue *q)
35 for (i=0; i<mddev->raid_disks; i++) { 35 for (i=0; i<mddev->raid_disks; i++) {
36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); 36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
37 37
38 if (r_queue->unplug_fn) 38 blk_unplug(r_queue);
39 r_queue->unplug_fn(r_queue);
40 } 39 }
41} 40}
42 41
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 85478d6a9c1a..4a69c416e045 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -549,8 +549,7 @@ static void unplug_slaves(mddev_t *mddev)
549 atomic_inc(&rdev->nr_pending); 549 atomic_inc(&rdev->nr_pending);
550 rcu_read_unlock(); 550 rcu_read_unlock();
551 551
552 if (r_queue->unplug_fn) 552 blk_unplug(r_queue);
553 r_queue->unplug_fn(r_queue);
554 553
555 rdev_dec_pending(rdev, mddev); 554 rdev_dec_pending(rdev, mddev);
556 rcu_read_lock(); 555 rcu_read_lock();
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index fc6607acb6e4..5cdcc9386200 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -593,8 +593,7 @@ static void unplug_slaves(mddev_t *mddev)
593 atomic_inc(&rdev->nr_pending); 593 atomic_inc(&rdev->nr_pending);
594 rcu_read_unlock(); 594 rcu_read_unlock();
595 595
596 if (r_queue->unplug_fn) 596 blk_unplug(r_queue);
597 r_queue->unplug_fn(r_queue);
598 597
599 rdev_dec_pending(rdev, mddev); 598 rdev_dec_pending(rdev, mddev);
600 rcu_read_lock(); 599 rcu_read_lock();
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 80a67d789b72..a5aad8cad843 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -688,7 +688,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
688} 688}
689 689
690static struct dma_async_tx_descriptor * 690static struct dma_async_tx_descriptor *
691ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 691ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
692 unsigned long pending)
692{ 693{
693 int disks = sh->disks; 694 int disks = sh->disks;
694 int pd_idx = sh->pd_idx, i; 695 int pd_idx = sh->pd_idx, i;
@@ -696,7 +697,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
696 /* check if prexor is active which means only process blocks 697 /* check if prexor is active which means only process blocks
697 * that are part of a read-modify-write (Wantprexor) 698 * that are part of a read-modify-write (Wantprexor)
698 */ 699 */
699 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 700 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
700 701
701 pr_debug("%s: stripe %llu\n", __FUNCTION__, 702 pr_debug("%s: stripe %llu\n", __FUNCTION__,
702 (unsigned long long)sh->sector); 703 (unsigned long long)sh->sector);
@@ -773,7 +774,8 @@ static void ops_complete_write(void *stripe_head_ref)
773} 774}
774 775
775static void 776static void
776ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 777ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
778 unsigned long pending)
777{ 779{
778 /* kernel stack size limits the total number of disks */ 780 /* kernel stack size limits the total number of disks */
779 int disks = sh->disks; 781 int disks = sh->disks;
@@ -781,7 +783,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
781 783
782 int count = 0, pd_idx = sh->pd_idx, i; 784 int count = 0, pd_idx = sh->pd_idx, i;
783 struct page *xor_dest; 785 struct page *xor_dest;
784 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 786 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
785 unsigned long flags; 787 unsigned long flags;
786 dma_async_tx_callback callback; 788 dma_async_tx_callback callback;
787 789
@@ -808,7 +810,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
808 } 810 }
809 811
810 /* check whether this postxor is part of a write */ 812 /* check whether this postxor is part of a write */
811 callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ? 813 callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
812 ops_complete_write : ops_complete_postxor; 814 ops_complete_write : ops_complete_postxor;
813 815
814 /* 1/ if we prexor'd then the dest is reused as a source 816 /* 1/ if we prexor'd then the dest is reused as a source
@@ -896,12 +898,12 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
896 tx = ops_run_prexor(sh, tx); 898 tx = ops_run_prexor(sh, tx);
897 899
898 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 900 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
899 tx = ops_run_biodrain(sh, tx); 901 tx = ops_run_biodrain(sh, tx, pending);
900 overlap_clear++; 902 overlap_clear++;
901 } 903 }
902 904
903 if (test_bit(STRIPE_OP_POSTXOR, &pending)) 905 if (test_bit(STRIPE_OP_POSTXOR, &pending))
904 ops_run_postxor(sh, tx); 906 ops_run_postxor(sh, tx, pending);
905 907
906 if (test_bit(STRIPE_OP_CHECK, &pending)) 908 if (test_bit(STRIPE_OP_CHECK, &pending))
907 ops_run_check(sh); 909 ops_run_check(sh);
@@ -2624,6 +2626,13 @@ static void handle_stripe5(struct stripe_head *sh)
2624 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2626 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2625 /* Now to look around and see what can be done */ 2627 /* Now to look around and see what can be done */
2626 2628
2629 /* clean-up completed biofill operations */
2630 if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) {
2631 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
2632 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
2633 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
2634 }
2635
2627 rcu_read_lock(); 2636 rcu_read_lock();
2628 for (i=disks; i--; ) { 2637 for (i=disks; i--; ) {
2629 mdk_rdev_t *rdev; 2638 mdk_rdev_t *rdev;
@@ -2897,13 +2906,6 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2897 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2906 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2898 /* Now to look around and see what can be done */ 2907 /* Now to look around and see what can be done */
2899 2908
2900 /* clean-up completed biofill operations */
2901 if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) {
2902 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
2903 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
2904 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
2905 }
2906
2907 rcu_read_lock(); 2909 rcu_read_lock();
2908 for (i=disks; i--; ) { 2910 for (i=disks; i--; ) {
2909 mdk_rdev_t *rdev; 2911 mdk_rdev_t *rdev;
@@ -3186,8 +3188,7 @@ static void unplug_slaves(mddev_t *mddev)
3186 atomic_inc(&rdev->nr_pending); 3188 atomic_inc(&rdev->nr_pending);
3187 rcu_read_unlock(); 3189 rcu_read_unlock();
3188 3190
3189 if (r_queue->unplug_fn) 3191 blk_unplug(r_queue);
3190 r_queue->unplug_fn(r_queue);
3191 3192
3192 rdev_dec_pending(rdev, mddev); 3193 rdev_dec_pending(rdev, mddev);
3193 rcu_read_lock(); 3194 rcu_read_lock();
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 926576156578..77a6e4bf503d 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -52,7 +52,7 @@ const struct raid6_calls * const raid6_algos[] = {
52 &raid6_intx16, 52 &raid6_intx16,
53 &raid6_intx32, 53 &raid6_intx32,
54#endif 54#endif
55#if defined(__i386__) 55#if defined(__i386__) && !defined(__arch_um__)
56 &raid6_mmxx1, 56 &raid6_mmxx1,
57 &raid6_mmxx2, 57 &raid6_mmxx2,
58 &raid6_sse1x1, 58 &raid6_sse1x1,
@@ -60,7 +60,7 @@ const struct raid6_calls * const raid6_algos[] = {
60 &raid6_sse2x1, 60 &raid6_sse2x1,
61 &raid6_sse2x2, 61 &raid6_sse2x2,
62#endif 62#endif
63#if defined(__x86_64__) 63#if defined(__x86_64__) && !defined(__arch_um__)
64 &raid6_sse2x1, 64 &raid6_sse2x1,
65 &raid6_sse2x2, 65 &raid6_sse2x2,
66 &raid6_sse2x4, 66 &raid6_sse2x4,
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c
index 6181a5a3365a..d4e4a1bd70ad 100644
--- a/drivers/md/raid6mmx.c
+++ b/drivers/md/raid6mmx.c
@@ -16,7 +16,7 @@
16 * MMX implementation of RAID-6 syndrome functions 16 * MMX implementation of RAID-6 syndrome functions
17 */ 17 */
18 18
19#if defined(__i386__) 19#if defined(__i386__) && !defined(__arch_um__)
20 20
21#include "raid6.h" 21#include "raid6.h"
22#include "raid6x86.h" 22#include "raid6x86.h"
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c
index f0a1ba8f40ba..0666237276ff 100644
--- a/drivers/md/raid6sse1.c
+++ b/drivers/md/raid6sse1.c
@@ -21,7 +21,7 @@
21 * worthwhile as a separate implementation. 21 * worthwhile as a separate implementation.
22 */ 22 */
23 23
24#if defined(__i386__) 24#if defined(__i386__) && !defined(__arch_um__)
25 25
26#include "raid6.h" 26#include "raid6.h"
27#include "raid6x86.h" 27#include "raid6x86.h"
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c
index 0f019762a7c3..b034ad868039 100644
--- a/drivers/md/raid6sse2.c
+++ b/drivers/md/raid6sse2.c
@@ -17,7 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#if defined(__i386__) || defined(__x86_64__) 20#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
21 21
22#include "raid6.h" 22#include "raid6.h"
23#include "raid6x86.h" 23#include "raid6x86.h"
@@ -161,7 +161,7 @@ const struct raid6_calls raid6_sse2x2 = {
161 161
162#endif 162#endif
163 163
164#ifdef __x86_64__ 164#if defined(__x86_64__) && !defined(__arch_um__)
165 165
166/* 166/*
167 * Unrolled-by-4 SSE2 implementation 167 * Unrolled-by-4 SSE2 implementation
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h
index 9111950414ff..99fea7a70ca7 100644
--- a/drivers/md/raid6x86.h
+++ b/drivers/md/raid6x86.h
@@ -19,7 +19,7 @@
19#ifndef LINUX_RAID_RAID6X86_H 19#ifndef LINUX_RAID_RAID6X86_H
20#define LINUX_RAID_RAID6X86_H 20#define LINUX_RAID_RAID6X86_H
21 21
22#if defined(__i386__) || defined(__x86_64__) 22#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
23 23
24#ifdef __KERNEL__ /* Real code */ 24#ifdef __KERNEL__ /* Real code */
25 25