aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-01 04:26:41 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-01 04:26:43 -0500
commit7e4d960993331e92567f0180e45322a93e6780ba (patch)
tree4d7444035303fc0b545e88afbd894176344fb2a3 /block
parentde5bdff7a72acc281219be2b8edeeca1fd81c542 (diff)
parent164974a8f2a482f1abcb027c6d1a89dd79b14297 (diff)
Merge branch 'linus' into sched/core
Merge reason: we'll queue up dependent patches. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c33
-rw-r--r--block/blk-ioc.c111
-rw-r--r--block/blk-merge.c37
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c24
-rw-r--r--block/elevator.c55
-rw-r--r--block/partitions/ldm.c11
9 files changed, 101 insertions, 177 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index fa8f26309444..75642a352a8f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1659 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); 1659 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1660 if (ioc) { 1660 if (ioc) {
1661 ioc_cgroup_changed(ioc); 1661 ioc_cgroup_changed(ioc);
1662 put_io_context(ioc, NULL); 1662 put_io_context(ioc);
1663 } 1663 }
1664 } 1664 }
1665} 1665}
diff --git a/block/blk-core.c b/block/blk-core.c
index e6c05a97ee2b..3a78b00edd71 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
642 if (rq->cmd_flags & REQ_ELVPRIV) { 642 if (rq->cmd_flags & REQ_ELVPRIV) {
643 elv_put_request(q, rq); 643 elv_put_request(q, rq);
644 if (rq->elv.icq) 644 if (rq->elv.icq)
645 put_io_context(rq->elv.icq->ioc, q); 645 put_io_context(rq->elv.icq->ioc);
646 } 646 }
647 647
648 mempool_free(rq, q->rq.rq_pool); 648 mempool_free(rq, q->rq.rq_pool);
@@ -872,13 +872,15 @@ retry:
872 spin_unlock_irq(q->queue_lock); 872 spin_unlock_irq(q->queue_lock);
873 873
874 /* create icq if missing */ 874 /* create icq if missing */
875 if (unlikely(et->icq_cache && !icq)) 875 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
876 icq = ioc_create_icq(q, gfp_mask); 876 icq = ioc_create_icq(q, gfp_mask);
877 if (!icq)
878 goto fail_icq;
879 }
877 880
878 /* rqs are guaranteed to have icq on elv_set_request() if requested */ 881 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
879 if (likely(!et->icq_cache || icq))
880 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
881 882
883fail_icq:
882 if (unlikely(!rq)) { 884 if (unlikely(!rq)) {
883 /* 885 /*
884 * Allocation failed presumably due to memory. Undo anything 886 * Allocation failed presumably due to memory. Undo anything
@@ -1210,7 +1212,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1210 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1212 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1211 1213
1212 drive_stat_acct(req, 0); 1214 drive_stat_acct(req, 0);
1213 elv_bio_merged(q, req, bio);
1214 return true; 1215 return true;
1215} 1216}
1216 1217
@@ -1241,7 +1242,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1241 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1242 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1242 1243
1243 drive_stat_acct(req, 0); 1244 drive_stat_acct(req, 0);
1244 elv_bio_merged(q, req, bio);
1245 return true; 1245 return true;
1246} 1246}
1247 1247
@@ -1255,13 +1255,12 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1255 * on %current's plugged list. Returns %true if merge was successful, 1255 * on %current's plugged list. Returns %true if merge was successful,
1256 * otherwise %false. 1256 * otherwise %false.
1257 * 1257 *
1258 * This function is called without @q->queue_lock; however, elevator is 1258 * Plugging coalesces IOs from the same issuer for the same purpose without
1259 * accessed iff there already are requests on the plugged list which in 1259 * going through @q->queue_lock. As such it's more of an issuing mechanism
1260 * turn guarantees validity of the elevator. 1260 * than scheduling, and the request, while may have elvpriv data, is not
1261 * 1261 * added on the elevator at this point. In addition, we don't have
1262 * Note that, on successful merge, elevator operation 1262 * reliable access to the elevator outside queue lock. Only check basic
1263 * elevator_bio_merged_fn() will be called without queue lock. Elevator 1263 * merging parameters without querying the elevator.
1264 * must be ready for this.
1265 */ 1264 */
1266static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1265static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1267 unsigned int *request_count) 1266 unsigned int *request_count)
@@ -1280,10 +1279,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1280 1279
1281 (*request_count)++; 1280 (*request_count)++;
1282 1281
1283 if (rq->q != q) 1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1284 continue; 1283 continue;
1285 1284
1286 el_ret = elv_try_merge(rq, bio); 1285 el_ret = blk_try_merge(rq, bio);
1287 if (el_ret == ELEVATOR_BACK_MERGE) { 1286 if (el_ret == ELEVATOR_BACK_MERGE) {
1288 ret = bio_attempt_back_merge(q, rq, bio); 1287 ret = bio_attempt_back_merge(q, rq, bio);
1289 if (ret) 1288 if (ret)
@@ -1345,12 +1344,14 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1345 el_ret = elv_merge(q, &req, bio); 1344 el_ret = elv_merge(q, &req, bio);
1346 if (el_ret == ELEVATOR_BACK_MERGE) { 1345 if (el_ret == ELEVATOR_BACK_MERGE) {
1347 if (bio_attempt_back_merge(q, req, bio)) { 1346 if (bio_attempt_back_merge(q, req, bio)) {
1347 elv_bio_merged(q, req, bio);
1348 if (!attempt_back_merge(q, req)) 1348 if (!attempt_back_merge(q, req))
1349 elv_merged_request(q, req, el_ret); 1349 elv_merged_request(q, req, el_ret);
1350 goto out_unlock; 1350 goto out_unlock;
1351 } 1351 }
1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1353 if (bio_attempt_front_merge(q, req, bio)) { 1353 if (bio_attempt_front_merge(q, req, bio)) {
1354 elv_bio_merged(q, req, bio);
1354 if (!attempt_front_merge(q, req)) 1355 if (!attempt_front_merge(q, req))
1355 elv_merged_request(q, req, el_ret); 1356 elv_merged_request(q, req, el_ret);
1356 goto out_unlock; 1357 goto out_unlock;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 27a06e00eaec..8b782a63c297 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)
29} 29}
30EXPORT_SYMBOL(get_io_context); 30EXPORT_SYMBOL(get_io_context);
31 31
32/*
33 * Releasing ioc may nest into another put_io_context() leading to nested
34 * fast path release. As the ioc's can't be the same, this is okay but
35 * makes lockdep whine. Keep track of nesting and use it as subclass.
36 */
37#ifdef CONFIG_LOCKDEP
38#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
39#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
40#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
41#else
42#define ioc_release_depth(q) 0
43#define ioc_release_depth_inc(q) do { } while (0)
44#define ioc_release_depth_dec(q) do { } while (0)
45#endif
46
47static void icq_free_icq_rcu(struct rcu_head *head) 32static void icq_free_icq_rcu(struct rcu_head *head)
48{ 33{
49 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
@@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq)
75 if (rcu_dereference_raw(ioc->icq_hint) == icq) 60 if (rcu_dereference_raw(ioc->icq_hint) == icq)
76 rcu_assign_pointer(ioc->icq_hint, NULL); 61 rcu_assign_pointer(ioc->icq_hint, NULL);
77 62
78 if (et->ops.elevator_exit_icq_fn) { 63 if (et->ops.elevator_exit_icq_fn)
79 ioc_release_depth_inc(q);
80 et->ops.elevator_exit_icq_fn(icq); 64 et->ops.elevator_exit_icq_fn(icq);
81 ioc_release_depth_dec(q);
82 }
83 65
84 /* 66 /*
85 * @icq->q might have gone away by the time RCU callback runs 67 * @icq->q might have gone away by the time RCU callback runs
@@ -98,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
98 struct io_context *ioc = container_of(work, struct io_context, 80 struct io_context *ioc = container_of(work, struct io_context,
99 release_work); 81 release_work);
100 struct request_queue *last_q = NULL; 82 struct request_queue *last_q = NULL;
83 unsigned long flags;
101 84
102 spin_lock_irq(&ioc->lock); 85 /*
86 * Exiting icq may call into put_io_context() through elevator
87 * which will trigger lockdep warning. The ioc's are guaranteed to
88 * be different, use a different locking subclass here. Use
89 * irqsave variant as there's no spin_lock_irq_nested().
90 */
91 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
103 92
104 while (!hlist_empty(&ioc->icq_list)) { 93 while (!hlist_empty(&ioc->icq_list)) {
105 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 94 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
@@ -121,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
121 */ 110 */
122 if (last_q) { 111 if (last_q) {
123 spin_unlock(last_q->queue_lock); 112 spin_unlock(last_q->queue_lock);
124 spin_unlock_irq(&ioc->lock); 113 spin_unlock_irqrestore(&ioc->lock, flags);
125 blk_put_queue(last_q); 114 blk_put_queue(last_q);
126 } else { 115 } else {
127 spin_unlock_irq(&ioc->lock); 116 spin_unlock_irqrestore(&ioc->lock, flags);
128 } 117 }
129 118
130 last_q = this_q; 119 last_q = this_q;
131 spin_lock_irq(this_q->queue_lock); 120 spin_lock_irqsave(this_q->queue_lock, flags);
132 spin_lock(&ioc->lock); 121 spin_lock_nested(&ioc->lock, 1);
133 continue; 122 continue;
134 } 123 }
135 ioc_exit_icq(icq); 124 ioc_exit_icq(icq);
@@ -137,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
137 126
138 if (last_q) { 127 if (last_q) {
139 spin_unlock(last_q->queue_lock); 128 spin_unlock(last_q->queue_lock);
140 spin_unlock_irq(&ioc->lock); 129 spin_unlock_irqrestore(&ioc->lock, flags);
141 blk_put_queue(last_q); 130 blk_put_queue(last_q);
142 } else { 131 } else {
143 spin_unlock_irq(&ioc->lock); 132 spin_unlock_irqrestore(&ioc->lock, flags);
144 } 133 }
145 134
146 kmem_cache_free(iocontext_cachep, ioc); 135 kmem_cache_free(iocontext_cachep, ioc);
@@ -149,79 +138,29 @@ static void ioc_release_fn(struct work_struct *work)
149/** 138/**
150 * put_io_context - put a reference of io_context 139 * put_io_context - put a reference of io_context
151 * @ioc: io_context to put 140 * @ioc: io_context to put
152 * @locked_q: request_queue the caller is holding queue_lock of (hint)
153 * 141 *
154 * Decrement reference count of @ioc and release it if the count reaches 142 * Decrement reference count of @ioc and release it if the count reaches
155 * zero. If the caller is holding queue_lock of a queue, it can indicate 143 * zero.
156 * that with @locked_q. This is an optimization hint and the caller is
157 * allowed to pass in %NULL even when it's holding a queue_lock.
158 */ 144 */
159void put_io_context(struct io_context *ioc, struct request_queue *locked_q) 145void put_io_context(struct io_context *ioc)
160{ 146{
161 struct request_queue *last_q = locked_q;
162 unsigned long flags; 147 unsigned long flags;
163 148
164 if (ioc == NULL) 149 if (ioc == NULL)
165 return; 150 return;
166 151
167 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 152 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
168 if (locked_q)
169 lockdep_assert_held(locked_q->queue_lock);
170
171 if (!atomic_long_dec_and_test(&ioc->refcount))
172 return;
173 153
174 /* 154 /*
175 * Destroy @ioc. This is a bit messy because icq's are chained 155 * Releasing ioc requires reverse order double locking and we may
176 * from both ioc and queue, and ioc->lock nests inside queue_lock. 156 * already be holding a queue_lock. Do it asynchronously from wq.
177 * The inner ioc->lock should be held to walk our icq_list and then
178 * for each icq the outer matching queue_lock should be grabbed.
179 * ie. We need to do reverse-order double lock dancing.
180 *
181 * Another twist is that we are often called with one of the
182 * matching queue_locks held as indicated by @locked_q, which
183 * prevents performing double-lock dance for other queues.
184 *
185 * So, we do it in two stages. The fast path uses the queue_lock
186 * the caller is holding and, if other queues need to be accessed,
187 * uses trylock to avoid introducing locking dependency. This can
188 * handle most cases, especially if @ioc was performing IO on only
189 * single device.
190 *
191 * If trylock doesn't cut it, we defer to @ioc->release_work which
192 * can do all the double-locking dancing.
193 */ 157 */
194 spin_lock_irqsave_nested(&ioc->lock, flags, 158 if (atomic_long_dec_and_test(&ioc->refcount)) {
195 ioc_release_depth(locked_q)); 159 spin_lock_irqsave(&ioc->lock, flags);
196 160 if (!hlist_empty(&ioc->icq_list))
197 while (!hlist_empty(&ioc->icq_list)) { 161 schedule_work(&ioc->release_work);
198 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 162 spin_unlock_irqrestore(&ioc->lock, flags);
199 struct io_cq, ioc_node);
200 struct request_queue *this_q = icq->q;
201
202 if (this_q != last_q) {
203 if (last_q && last_q != locked_q)
204 spin_unlock(last_q->queue_lock);
205 last_q = NULL;
206
207 if (!spin_trylock(this_q->queue_lock))
208 break;
209 last_q = this_q;
210 continue;
211 }
212 ioc_exit_icq(icq);
213 } 163 }
214
215 if (last_q && last_q != locked_q)
216 spin_unlock(last_q->queue_lock);
217
218 spin_unlock_irqrestore(&ioc->lock, flags);
219
220 /* if no icq is left, we're done; otherwise, kick release_work */
221 if (hlist_empty(&ioc->icq_list))
222 kmem_cache_free(iocontext_cachep, ioc);
223 else
224 schedule_work(&ioc->release_work);
225} 164}
226EXPORT_SYMBOL(put_io_context); 165EXPORT_SYMBOL(put_io_context);
227 166
@@ -236,7 +175,7 @@ void exit_io_context(struct task_struct *task)
236 task_unlock(task); 175 task_unlock(task);
237 176
238 atomic_dec(&ioc->nr_tasks); 177 atomic_dec(&ioc->nr_tasks);
239 put_io_context(ioc, NULL); 178 put_io_context(ioc);
240} 179}
241 180
242/** 181/**
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37cb222b..160035f54882 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -471,3 +471,40 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
471{ 471{
472 return attempt_merge(q, rq, next); 472 return attempt_merge(q, rq, next);
473} 473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477 if (!rq_mergeable(rq))
478 return false;
479
480 /* don't merge file system requests and discard requests */
481 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482 return false;
483
484 /* don't merge discard requests and secure discard requests */
485 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486 return false;
487
488 /* different data direction or already started, don't merge */
489 if (bio_data_dir(bio) != rq_data_dir(rq))
490 return false;
491
492 /* must be same device and not a special request */
493 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494 return false;
495
496 /* only merge integrity protected bio into ditto rq */
497 if (bio_integrity(bio) != blk_integrity_rq(rq))
498 return false;
499
500 return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
506 return ELEVATOR_BACK_MERGE;
507 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508 return ELEVATOR_FRONT_MERGE;
509 return ELEVATOR_NO_MERGE;
510}
diff --git a/block/blk.h b/block/blk.h
index df5b59acbdff..d45be871329e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -137,6 +137,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
137 struct request *next); 137 struct request *next);
138void blk_recalc_rq_segments(struct request *rq); 138void blk_recalc_rq_segments(struct request *rq);
139void blk_rq_set_mixed_merge(struct request *rq); 139void blk_rq_set_mixed_merge(struct request *rq);
140bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
141int blk_try_merge(struct request *rq, struct bio *bio);
140 142
141void blk_queue_congestion_threshold(struct request_queue *q); 143void blk_queue_congestion_threshold(struct request_queue *q);
142 144
diff --git a/block/bsg.c b/block/bsg.c
index 4cf703fd98bb..ff64ae3bacee 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -983,7 +983,8 @@ void bsg_unregister_queue(struct request_queue *q)
983 983
984 mutex_lock(&bsg_mutex); 984 mutex_lock(&bsg_mutex);
985 idr_remove(&bsg_minor_idr, bcd->minor); 985 idr_remove(&bsg_minor_idr, bcd->minor);
986 sysfs_remove_link(&q->kobj, "bsg"); 986 if (q->kobj.sd)
987 sysfs_remove_link(&q->kobj, "bsg");
987 device_unregister(bcd->class_dev); 988 device_unregister(bcd->class_dev);
988 bcd->class_dev = NULL; 989 bcd->class_dev = NULL;
989 kref_put(&bcd->ref, bsg_kref_release_function); 990 kref_put(&bcd->ref, bsg_kref_release_function);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ee55019066a1..d0ba50533668 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1699,18 +1699,11 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1699 1699
1700 /* 1700 /*
1701 * Lookup the cfqq that this bio will be queued with and allow 1701 * Lookup the cfqq that this bio will be queued with and allow
1702 * merge only if rq is queued there. This function can be called 1702 * merge only if rq is queued there.
1703 * from plug merge without queue_lock. In such cases, ioc of @rq
1704 * and %current are guaranteed to be equal. Avoid lookup which
1705 * requires queue_lock by using @rq's cic.
1706 */ 1703 */
1707 if (current->io_context == RQ_CIC(rq)->icq.ioc) { 1704 cic = cfq_cic_lookup(cfqd, current->io_context);
1708 cic = RQ_CIC(rq); 1705 if (!cic)
1709 } else { 1706 return false;
1710 cic = cfq_cic_lookup(cfqd, current->io_context);
1711 if (!cic)
1712 return false;
1713 }
1714 1707
1715 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 1708 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1716 return cfqq == RQ_CFQQ(rq); 1709 return cfqq == RQ_CFQQ(rq);
@@ -1794,7 +1787,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1794 cfqd->active_queue = NULL; 1787 cfqd->active_queue = NULL;
1795 1788
1796 if (cfqd->active_cic) { 1789 if (cfqd->active_cic) {
1797 put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); 1790 put_io_context(cfqd->active_cic->icq.ioc);
1798 cfqd->active_cic = NULL; 1791 cfqd->active_cic = NULL;
1799 } 1792 }
1800} 1793}
@@ -3117,17 +3110,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3117 */ 3110 */
3118static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3111static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3119{ 3112{
3113 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3114
3120 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3115 cfq_log_cfqq(cfqd, cfqq, "preempt");
3116 cfq_slice_expired(cfqd, 1);
3121 3117
3122 /* 3118 /*
3123 * workload type is changed, don't save slice, otherwise preempt 3119 * workload type is changed, don't save slice, otherwise preempt
3124 * doesn't happen 3120 * doesn't happen
3125 */ 3121 */
3126 if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq)) 3122 if (old_type != cfqq_type(cfqq))
3127 cfqq->cfqg->saved_workload_slice = 0; 3123 cfqq->cfqg->saved_workload_slice = 0;
3128 3124
3129 cfq_slice_expired(cfqd, 1);
3130
3131 /* 3125 /*
3132 * Put the new queue at the front of the of the current list, 3126 * Put the new queue at the front of the of the current list,
3133 * so we know that it will be selected next. 3127 * so we know that it will be selected next.
diff --git a/block/elevator.c b/block/elevator.c
index 91e18f8af9be..f016855a46b0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -70,39 +70,9 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
70/* 70/*
71 * can we safely merge with this request? 71 * can we safely merge with this request?
72 */ 72 */
73int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
74{ 74{
75 if (!rq_mergeable(rq)) 75 if (!blk_rq_merge_ok(rq, bio))
76 return 0;
77
78 /*
79 * Don't merge file system requests and discard requests
80 */
81 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
82 return 0;
83
84 /*
85 * Don't merge discard requests and secure discard requests
86 */
87 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
88 return 0;
89
90 /*
91 * different data direction or already started, don't merge
92 */
93 if (bio_data_dir(bio) != rq_data_dir(rq))
94 return 0;
95
96 /*
97 * must be same device and not a special request
98 */
99 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
100 return 0;
101
102 /*
103 * only merge integrity protected bio into ditto rq
104 */
105 if (bio_integrity(bio) != blk_integrity_rq(rq))
106 return 0; 76 return 0;
107 77
108 if (!elv_iosched_allow_merge(rq, bio)) 78 if (!elv_iosched_allow_merge(rq, bio))
@@ -112,23 +82,6 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
112} 82}
113EXPORT_SYMBOL(elv_rq_merge_ok); 83EXPORT_SYMBOL(elv_rq_merge_ok);
114 84
115int elv_try_merge(struct request *__rq, struct bio *bio)
116{
117 int ret = ELEVATOR_NO_MERGE;
118
119 /*
120 * we can merge and sequence is ok, check if it's possible
121 */
122 if (elv_rq_merge_ok(__rq, bio)) {
123 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
124 ret = ELEVATOR_BACK_MERGE;
125 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
126 ret = ELEVATOR_FRONT_MERGE;
127 }
128
129 return ret;
130}
131
132static struct elevator_type *elevator_find(const char *name) 85static struct elevator_type *elevator_find(const char *name)
133{ 86{
134 struct elevator_type *e; 87 struct elevator_type *e;
@@ -478,8 +431,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
478 /* 431 /*
479 * First try one-hit cache. 432 * First try one-hit cache.
480 */ 433 */
481 if (q->last_merge) { 434 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
482 ret = elv_try_merge(q->last_merge, bio); 435 ret = blk_try_merge(q->last_merge, bio);
483 if (ret != ELEVATOR_NO_MERGE) { 436 if (ret != ELEVATOR_NO_MERGE) {
484 *req = q->last_merge; 437 *req = q->last_merge;
485 return ret; 438 return ret;
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index bd8ae788f689..e507cfbd044e 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -2,7 +2,7 @@
2 * ldm - Support for Windows Logical Disk Manager (Dynamic Disks) 2 * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
3 * 3 *
4 * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> 4 * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
5 * Copyright (c) 2001-2007 Anton Altaparmakov 5 * Copyright (c) 2001-2012 Anton Altaparmakov
6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> 6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
7 * 7 *
8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads 8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
@@ -1341,20 +1341,17 @@ found:
1341 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num); 1341 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
1342 return false; 1342 return false;
1343 } 1343 }
1344
1345 if (f->map & (1 << rec)) { 1344 if (f->map & (1 << rec)) {
1346 ldm_error ("Duplicate VBLK, part %d.", rec); 1345 ldm_error ("Duplicate VBLK, part %d.", rec);
1347 f->map &= 0x7F; /* Mark the group as broken */ 1346 f->map &= 0x7F; /* Mark the group as broken */
1348 return false; 1347 return false;
1349 } 1348 }
1350
1351 f->map |= (1 << rec); 1349 f->map |= (1 << rec);
1352 1350 if (!rec)
1351 memcpy(f->data, data, VBLK_SIZE_HEAD);
1353 data += VBLK_SIZE_HEAD; 1352 data += VBLK_SIZE_HEAD;
1354 size -= VBLK_SIZE_HEAD; 1353 size -= VBLK_SIZE_HEAD;
1355 1354 memcpy(f->data + VBLK_SIZE_HEAD + rec * size, data, size);
1356 memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
1357
1358 return true; 1355 return true;
1359} 1356}
1360 1357